diff --git a/.ci/gitlab-ci.yml b/.ci/gitlab-ci.yml new file mode 100644 index 00000000000000..9f5528692ccdb5 --- /dev/null +++ b/.ci/gitlab-ci.yml @@ -0,0 +1,44 @@ +# Gitlab configuraiton for spack/spack + +stages: + - packages + +variables: + SPACK_PACKAGES_CHECKOUT_VERSION: develop + +.clone_packages: &clone_packages + - mkdir -p ${REPO_DESTINATION} + - cd ${REPO_DESTINATION} + - git init + - git remote add origin https://github.com/spack/spack-packages.git + - git fetch --depth 1 origin ${SPACK_PACKAGES_CHECKOUT_VERSION} + - git checkout FETCH_HEAD + - cd - + +dotenv: + stage: .pre + image: ghcr.io/spack/e4s-ubuntu-18.04:v2021-10-18 + tags: [ spack, service ] + script: + - export REPO_DESTINATION=etc/spack-packages + - *clone_packages + - repo_commit=$(git -C ${REPO_DESTINATION} rev-parse FETCH_HEAD) + - echo "SPACK_CHECKOUT_VERSION=${repo_commit}" >> ${CI_PROJECT_DIR}/env + - echo "SPACK_CHECKOUT_REPO=spack/spack-packages" >> ${CI_PROJECT_DIR}/env + - cat ${CI_PROJECT_DIR}/env + - python3 ${CI_PROJECT_DIR}/.ci/gitlab/forward_dotenv_variables.py + ${CI_PROJECT_DIR}/env + ${REPO_DESTINATION}/.ci/gitlab/.gitlab-ci.yml + + artifacts: + paths: + - etc/spack-packages/.ci/gitlab/.gitlab-ci.yml + +spack-packages: + stage: packages + trigger: + strategy: depend + include: + - artifact: etc/spack-packages/.ci/gitlab/.gitlab-ci.yml + job: dotenv + diff --git a/.ci/gitlab/forward_dotenv_variables.py b/.ci/gitlab/forward_dotenv_variables.py new file mode 100644 index 00000000000000..829cd6f98329c2 --- /dev/null +++ b/.ci/gitlab/forward_dotenv_variables.py @@ -0,0 +1,36 @@ +import sys +from typing import Dict + +import yaml + + +def read_dotenv(file_name: str) -> Dict[str, str]: + result = [] + with open(file_name, "r", encoding="utf-8") as fd: + for field in fd: + if field.strip()[0] == "#": + continue + + data = field.strip("\n").split("=", 1) + try: + result.append((data[0], data[1])) + except IndexError: + print(f"Skipping bad value: {field}") + + return dict(result) + + +if __name__ == "__main__": + dotenv = read_dotenv(sys.argv[1]) + if not dotenv: + exit(0) + + with open(sys.argv[2], "r", encoding="utf-8") as fd: + conf = yaml.load(fd, Loader=yaml.Loader) + + if "variables" not in conf: + conf["variables"] = {} + conf["variables"].update(dotenv) + + with open(sys.argv[2], "w", encoding="utf-8") as fd: + yaml.dump(conf, fd, Dumper=yaml.Dumper) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 4a32ed118715c1..6ade0a6b804245 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -3,3 +3,5 @@ 603569e321013a1a63a637813c94c2834d0a0023 # Formatted entire codebase with black 22 f52f6e99dbf1131886a80112b8c79dfc414afb7c +# Formatted all rst files +1377d42c16c6912faa77259c0a1f665210ccfd85 diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 00000000000000..e2bf3aa7e86375 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,74 @@ +bootstrap: +- changed-files: + - any-glob-to-any-file: lib/spack/spack/bootstrap/** + +binary-caches: +- changed-files: + - any-glob-to-any-file: lib/spack/spack/binary_distribution.py + - any-glob-to-any-file: lib/spack/spack/cmd/buildcache.py + +ci: +- changed-files: + - any-glob-to-any-file: .ci/** + - any-glob-to-any-file: .github/** + - any-glob-to-any-file: lib/spack/spack/ci/** + +commands: +- changed-files: + - any-glob-to-any-file: lib/spack/spack/cmd/** + +config: +- changed-files: + - any-glob-to-any-file: etc/spack/** + - any-glob-to-any-file: lib/spack/spack/cmd/config.py + - any-glob-to-any-file: lib/spack/spack/config.py + - any-glob-to-any-file: lib/spack/spack/schema/** + +docs: +- changed-files: + - any-glob-to-any-file: .readthedocs.yml + - any-glob-to-any-file: lib/spack/docs/** + +environments: +- changed-files: + - any-glob-to-any-file: lib/spack/spack/cmd/env.py + - any-glob-to-any-file: lib/spack/spack/environment/** + +mirrors: +- changed-files: + - any-glob-to-any-file: lib/spack/spack/cmd/mirror.py + - any-glob-to-any-file: lib/spack/spack/mirrors/** + +modules: +- changed-files: + - any-glob-to-any-file: lib/spack/spack/cmd/module.py + - any-glob-to-any-file: lib/spack/spack/modules/** + +solver: +- changed-files: + - any-glob-to-any-file: lib/spack/spack/solver/** + +style: +- changed-files: + - any-glob-to-any-file: .flake8 + - any-glob-to-any-file: .github/workflows/prechecks.yml + - any-glob-to-any-file: .github/workflows/requirements/style/** + - any-glob-to-any-file: lib/spack/spack/cmd/style.py + - any-glob-to-any-file: pyproject.toml + +unit-tests: +- changed-files: + - any-glob-to-any-file: .codecov.yml + - any-glob-to-any-file: lib/spack/spack/cmd/unit_test.py + - any-glob-to-any-file: lib/spack/spack/test/** + - any-glob-to-any-file: pyproject.toml + - any-glob-to-any-file: pytest.ini + - any-glob-to-any-file: var/spack/test_repos/** + +vendor: +- changed-files: + - any-glob-to-any-file: lib/spack/spack/vendor/** + +versions: +- changed-files: + - any-glob-to-any-file: lib/spack/spack/version/** diff --git a/.github/workflows/bin/bootstrap-test.sh b/.github/workflows/bin/bootstrap-test.sh deleted file mode 100755 index 0d774c248c3a6a..00000000000000 --- a/.github/workflows/bin/bootstrap-test.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -e -source share/spack/setup-env.sh -$PYTHON bin/spack bootstrap disable github-actions-v0.5 -$PYTHON bin/spack bootstrap disable spack-install -$PYTHON bin/spack $SPACK_FLAGS solve zlib -tree $BOOTSTRAP/store -exit 0 diff --git a/.github/workflows/bin/canonicalize.py b/.github/workflows/bin/canonicalize.py new file mode 100755 index 00000000000000..8ca6106a10aec9 --- /dev/null +++ b/.github/workflows/bin/canonicalize.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +import argparse +import ast +import os +import subprocess +import sys +from itertools import product +from typing import List + + +def run_git_command(*args: str, dir: str) -> None: + """Run a git command in the output directory.""" + subprocess.run( + [ + "git", + "-c", + "user.email=example@example.com", + "-c", + "user.name=Example", + "-c", + "init.defaultBranch=main", + "-c", + "color.ui=always", + "-C", + dir, + *args, + ], + check=True, + stdout=sys.stdout, + stderr=sys.stderr, + ) + + +def run(root: str, output_dir: str) -> None: + """Recurse over a directory and canonicalize all Python files.""" + from spack.util.package_hash import RemoveDocstrings, unparse + + count = 0 + stack = [root] + + while stack: + current = stack.pop() + for entry in os.scandir(current): + if entry.is_dir(follow_symlinks=False): + stack.append(entry.path) + elif entry.is_file(follow_symlinks=False) and entry.name.endswith(".py"): + try: + with open(entry.path, "r") as f: + src = f.read() + except OSError: + continue + + canonical_dir = os.path.join(output_dir, os.path.relpath(current, root)) + os.makedirs(canonical_dir, exist_ok=True) + with open(os.path.join(canonical_dir, entry.name), "w") as f: + f.write( + unparse(RemoveDocstrings().visit(ast.parse(src)), py_ver_consistent=True) + ) + count += 1 + + assert count > 0, "No Python files found in the specified directory." + + +def compare( + input_dir: str, output_dir: str, python_versions: List[str], spack_versions: List[str] +) -> None: + """Compare canonicalized files across different Python versions and error if they differ.""" + # Create a git repo in output_dir to track changes + os.makedirs(output_dir, exist_ok=True) + run_git_command("init", dir=output_dir) + + pairs = list(product(spack_versions, python_versions)) + + if len(pairs) < 2: + raise ValueError("At least two Python or two Spack versions must be given for comparison.") + + changes_with_previous: List[int] = [] + + for i, (spack_dir, python_exe) in enumerate(pairs): + print(f"\033[1;97mCanonicalizing with {python_exe} and {spack_dir}...\033[0m", flush=True) + + # Point PYTHONPATH to the given Spack library for the subprocess + if not os.path.isdir(spack_dir): + raise ValueError(f"Invalid Spack dir: {spack_dir}") + env = os.environ.copy() + spack_pythonpath = os.path.join(spack_dir, "lib", "spack") + if "PYTHONPATH" in env and env["PYTHONPATH"]: + env["PYTHONPATH"] = f"{spack_pythonpath}{os.pathsep}{env['PYTHONPATH']}" + else: + env["PYTHONPATH"] = spack_pythonpath + + subprocess.run( + [python_exe, __file__, "--run", "--input-dir", input_dir, "--output-dir", output_dir], + check=True, + stdout=sys.stdout, + stderr=sys.stderr, + env=env, + ) + if i > 0: + try: + run_git_command("diff", "--exit-code", "HEAD", dir=output_dir) + except subprocess.CalledProcessError: + changes_with_previous.append(i) + + # The first run creates a commit for reference + run_git_command("add", ".", dir=output_dir) + run_git_command( + "commit", + "--quiet", + "--allow-empty", # makes this idempotent when running locally + "-m", + f"Canonicalized with {python_exe} and {spack_dir}", + dir=output_dir, + ) + + for i in changes_with_previous: + previous_spack, previous_python = pairs[i - 1] + current_spack, current_python = pairs[i] + print( + f"\033[1;31mChanges detected between {previous_python} ({previous_spack}) and " + f"{current_python} ({current_spack})\033[0m" + ) + + if changes_with_previous: + exit(1) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Canonicalize Spack package files.") + parser.add_argument("--run", action="store_true", help="Generate canonicalized sources.") + parser.add_argument("--spack", nargs="+", help="Specify one or more Spack versions.") + parser.add_argument("--python", nargs="+", help="Specify one or more Python versions.") + parser.add_argument("--input-dir", type=str, required=True, help="A repo's packages dir.") + parser.add_argument( + "--output-dir", + type=str, + required=True, + help="The output directory for canonicalized package files.", + ) + args = parser.parse_args() + + if args.run: + run(args.input_dir, args.output_dir) + else: + compare(args.input_dir, args.output_dir, args.python, args.spack) diff --git a/.github/workflows/bin/format-rst.py b/.github/workflows/bin/format-rst.py new file mode 100755 index 00000000000000..d092c3e1ccd493 --- /dev/null +++ b/.github/workflows/bin/format-rst.py @@ -0,0 +1,288 @@ +#!/usr/bin/env python3 +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +"""This script formats reStructuredText files to ensure one sentence per line and no trailing +whitespace. It exits with a non-zero status if any files were modified.""" + +import difflib +import importlib +import io +import json +import os +import re +import subprocess +import sys +from typing import List + +import black +from docutils import nodes +from docutils.core import publish_doctree +from docutils.parsers.rst import Directive, directives +from ruamel.yaml import YAML + +from spack.vendor import jsonschema + +import spack.schema + +#: Map Spack config sections to their corresponding JSON schema +SECTION_AND_SCHEMA = [ + # The first property's key is the config section name + (next(iter(m.schema["properties"])), m.schema) + # Dynamically load all modules in spack.schema to be future-proof + for m in ( + importlib.import_module(f"spack.schema.{f[:-3]}") + for f in os.listdir(os.path.dirname(spack.schema.__file__)) + if f.endswith(".py") and f != "__init__.py" + ) + if hasattr(m, "schema") and len(m.schema.get("properties", {})) == 1 +] + +assert SECTION_AND_SCHEMA, "no schemas found" + +END_OF_SENTENCE = re.compile( + r""" +( + (?: + (? str: + return f"\033[1;33mwarning:\033[0m {msg}" + + +class Warning: + def __init__(self, path: str, line: int, message: str) -> None: + self.path = path + self.line = line + self.message = message + + def __str__(self) -> str: + return _warning(f"{self.path}:{self.line}: {self.message}") + + +class CodeBlockWarning(Warning): + def __init__(self, path: str, line: int, message: str, diff: str): + super().__init__(path, line, f"{message}\n{diff}") + + def __str__(self) -> str: + return _warning(f"{self.path}:{self.line}: {self.message}") + + +class ValidationWarning(Warning): + pass + + +class SphinxCodeBlock(Directive): + """Defines a code-block directive with the options Sphinx supports.""" + + has_content = True + optional_arguments = 1 # language + required_arguments = 0 + option_spec = { + "force": directives.unchanged, + "linenos": directives.unchanged, + "dedent": directives.unchanged, + "lineno-start": directives.unchanged, + "emphasize-lines": directives.unchanged, + "caption": directives.unchanged, + "class": directives.unchanged, + "name": directives.unchanged, + } + + def run(self) -> List[nodes.Node]: + # Produce a literal block with block.attributes["language"] set. + language = self.arguments[0] if self.arguments else "python" + literal = nodes.literal_block("\n".join(self.content), "\n".join(self.content)) + literal["language"] = language + return [literal] + + +directives.register_directive("code-block", SphinxCodeBlock) + + +class ParagraphInfo: + lineno: int + end_lineno: int + src: str + lines: List[str] + + def __init__(self, line: int, src: str) -> None: + self.lineno = line + self.src = src + self.lines = src.splitlines() + self.end_lineno = line + len(self.lines) - 1 + + +def _is_node_in_table(node: nodes.Node) -> bool: + """Check if a node is inside a table by walking up the parent chain.""" + while node.parent: + node = node.parent + if isinstance(node, nodes.table): + return True + return False + + +def _validate_schema(data: object) -> None: + if not isinstance(data, dict): + return + for section, schema in SECTION_AND_SCHEMA: + if section in data: + jsonschema.validate(data, schema) + + +def _format_code_blocks(document: nodes.document, path: str) -> List[Warning]: + """Try to parse and format Python, YAML, and JSON code blocks. This does *not* update the + sources, but collects issues for later reporting. Returns a list of warnings.""" + issues: List[Warning] = [] + for code_block in document.findall(nodes.literal_block): + language = code_block.attributes.get("language", "") + if language not in ("python", "yaml", "json"): + continue + original = code_block.astext() + line = code_block.line if code_block.line else 0 + possible_config_data = None + + try: + if language == "python": + formatted = black.format_str(original, mode=black.FileMode(line_length=99)) + elif language == "yaml": + yaml = YAML(pure=True) + yaml.width = 10000 # do not wrap lines + yaml.preserve_quotes = True # do not force particular quotes + buf = io.BytesIO() + possible_config_data = yaml.load(original) + yaml.dump(possible_config_data, buf) + formatted = buf.getvalue().decode("utf-8") + elif language == "json": + formatted = json.dumps(json.loads(original), indent=2) + else: + assert False + except Exception as e: + issues.append(Warning(path, line, f"formatting failed: {e}: {original!r}")) + continue + + try: + _validate_schema(possible_config_data) + except jsonschema.ValidationError as e: + issues.append(ValidationWarning(path, line, f"schema validation failed: {e.message}")) + + if formatted == original: + continue + diff = "\n".join( + difflib.unified_diff( + original.splitlines(), + formatted.splitlines(), + lineterm="", + fromfile=f"{path}:{line} (original)", + tofile=f"{path}:{line} (suggested, NOT required)", + ) + ) + + # ignore suggestions to quote double colons like this: + # + # - build_stage:: + # + 'build_stage:': + # + if diff and not DOUBLE_COLON_WARNING.search(diff): + issues.append(CodeBlockWarning(path, line, "formatting suggested:", diff)) + return issues + + +def _format_paragraphs(document: nodes.document, path: str, src_lines: List[str]) -> bool: + """Format paragraphs in the document. Returns True if ``src_lines`` was modified.""" + + paragraphs = [ + ParagraphInfo(line=p.line, src=p.rawsource) + for p in document.findall(nodes.paragraph) + if p.line is not None and p.rawsource and not _is_node_in_table(p) + ] + + # Work from bottom to top to avoid messing up line numbers + paragraphs.sort(key=lambda p: p.lineno, reverse=True) + modified = False + + for p in paragraphs: + # docutils does not give us the column offset, so we'll find it ourselves. + col_offset = src_lines[p.lineno - 1].rfind(p.lines[0]) + assert col_offset >= 0, f"{path}:{p.lineno}: rst parsing error." + prefix = lambda i: " " * col_offset if i > 0 else src_lines[p.lineno - 1][:col_offset] + + # Defensive check to ensure the source paragraph matches the docutils paragraph + for i, line in enumerate(p.lines): + line_lhs = f"{prefix(i)}{line}" + line_rhs = src_lines[p.lineno - 1 + i].rstrip() # docutils trims trailing whitespace + assert line_lhs == line_rhs, f"{path}:{p.lineno + i}: rst parsing error." + + # Replace current newlines with whitespace, and then split sentences. + new_paragraph_src = END_OF_SENTENCE.sub(r"\1\n", p.src.replace("\n", " ")) + new_paragraph_lines = [ + f"{prefix(i)}{line.lstrip()}" for i, line in enumerate(new_paragraph_src.splitlines()) + ] + + if new_paragraph_lines != src_lines[p.lineno - 1 : p.end_lineno]: + modified = True + src_lines[p.lineno - 1 : p.end_lineno] = new_paragraph_lines + + return modified + + +def reformat_rst_file(path: str, warnings: List[Warning]) -> bool: + """Reformat a reStructuredText file "in-place". Returns True if modified, False otherwise.""" + with open(path, "r", encoding="utf-8") as f: + src = f.read() + + src_lines = src.splitlines() + document: nodes.document = publish_doctree(src, settings_overrides=DOCUTILS_SETTING) + + warnings.extend(_format_code_blocks(document, path)) + + if not _format_paragraphs(document, path, src_lines): + return False + + with open(f"{path}.tmp", "w", encoding="utf-8") as f: + f.write("\n".join(src_lines)) + f.write("\n") + os.rename(f"{path}.tmp", path) + print(f"Fixed reStructuredText formatting: {path}", flush=True) + return True + + +def main(*files: str) -> None: + modified = False + warnings: List[Warning] = [] + for f in files: + modified |= reformat_rst_file(f, warnings) + + if modified: + subprocess.run(["git", "--no-pager", "diff", "--color=always", "--", *files]) + + for warning in sorted(warnings, key=lambda w: isinstance(w, ValidationWarning)): + print(warning, flush=True, file=sys.stderr) + + if warnings: + print( + _warning(f"completed with {len(warnings)} potential issues"), + flush=True, + file=sys.stderr, + ) + sys.exit(1 if modified else 0) + + +if __name__ == "__main__": + main(*sys.argv[1:]) diff --git a/.github/workflows/bootstrap.yml b/.github/workflows/bootstrap.yml index cbb98553ef3158..a4952804d01298 100644 --- a/.github/workflows/bootstrap.yml +++ b/.github/workflows/bootstrap.yml @@ -6,7 +6,7 @@ on: workflow_call: schedule: # nightly at 2:16 AM - - cron: '16 2 * * *' + - cron: "16 2 * * *" concurrency: group: bootstrap-${{github.ref}}-${{github.event.pull_request.number || github.run_number}} @@ -43,10 +43,9 @@ jobs: fetch-depth: 0 - name: Bootstrap clingo run: | - source share/spack/setup-env.sh + . share/spack/setup-env.sh + spack bootstrap disable github-actions-v2 spack bootstrap disable github-actions-v0.6 - spack bootstrap disable github-actions-v0.5 - spack external find cmake bison spack -d solve zlib tree ~/.spack/bootstrap/store/ @@ -55,12 +54,11 @@ jobs: runs-on: ${{ matrix.runner }} strategy: matrix: - runner: ['macos-13', 'macos-14', "ubuntu-latest"] + runner: ["macos-15-intel", "macos-latest", "ubuntu-latest"] steps: - name: Setup macOS if: ${{ matrix.runner != 'ubuntu-latest' }} - run: | - brew install cmake bison tree + run: brew install bison tree - name: Checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 with: @@ -70,19 +68,19 @@ jobs: python-version: "3.12" - name: Bootstrap clingo run: | - source share/spack/setup-env.sh + . share/spack/setup-env.sh + spack bootstrap disable github-actions-v2 spack bootstrap disable github-actions-v0.6 - spack bootstrap disable github-actions-v0.5 - spack external find --not-buildable cmake bison + export PATH="$(brew --prefix bison)/bin:$(brew --prefix cmake)/bin:$PATH" spack -d solve zlib - tree $HOME/.spack/bootstrap/store/ + tree ~/.spack/bootstrap/store/ gnupg-sources: if: github.repository == 'spack/spack' runs-on: ${{ matrix.runner }} strategy: matrix: - runner: [ 'macos-13', 'macos-14', "ubuntu-latest" ] + runner: ["macos-15-intel", "macos-latest", "ubuntu-latest"] steps: - name: Setup macOS if: ${{ matrix.runner != 'ubuntu-latest' }} @@ -98,10 +96,10 @@ jobs: fetch-depth: 0 - name: Bootstrap GnuPG run: | - source share/spack/setup-env.sh + . share/spack/setup-env.sh spack solve zlib + spack bootstrap disable github-actions-v2 spack bootstrap disable github-actions-v0.6 - spack bootstrap disable github-actions-v0.5 spack -d gpg list tree ~/.spack/bootstrap/store/ @@ -110,7 +108,7 @@ jobs: runs-on: ${{ matrix.runner }} strategy: matrix: - runner: ['macos-13', 'macos-14', "ubuntu-latest"] + runner: ["macos-15-intel", "macos-latest", "ubuntu-latest"] steps: - name: Setup macOS if: ${{ matrix.runner != 'ubuntu-latest' }} @@ -133,39 +131,30 @@ jobs: 3.11 3.12 3.13 + 3.14 - name: Set bootstrap sources run: | - source share/spack/setup-env.sh - spack bootstrap disable github-actions-v0.5 + . share/spack/setup-env.sh + spack bootstrap disable github-actions-v0.6 spack bootstrap disable spack-install - name: Bootstrap clingo run: | - set -e - for ver in '3.8' '3.9' '3.10' '3.11' '3.12' '3.13'; do - not_found=1 - ver_dir="$(find $RUNNER_TOOL_CACHE/Python -wholename "*/${ver}.*/*/bin" | grep . || true)" - if [[ -d "$ver_dir" ]] ; then - echo "Testing $ver_dir" - if $ver_dir/python --version ; then - export PYTHON="$ver_dir/python" - not_found=0 - old_path="$PATH" - export PATH="$ver_dir:$PATH" - ./bin/spack-tmpconfig -b ./.github/workflows/bin/bootstrap-test.sh - export PATH="$old_path" - fi - fi - if (($not_found)) ; then - echo Required python version $ver not found in runner! + . share/spack/setup-env.sh + for ver in 3.8 3.9 3.10 3.11 3.12 3.13 3.14; do + ver_dir="$(find "$RUNNER_TOOL_CACHE/Python" -wholename "*/${ver}.*/*/bin" | grep . || true)" + export SPACK_PYTHON="$ver_dir/python3" + if [ ! -d "$ver_dir" ] || ! "$SPACK_PYTHON" --version; then + echo "Python $ver not found" exit 1 fi + spack solve zlib done + tree ~/.spack/bootstrap/store - name: Bootstrap GnuPG run: | - source share/spack/setup-env.sh + . share/spack/setup-env.sh spack -d gpg list - tree $HOME/.spack/bootstrap/store/ - + tree ~/.spack/bootstrap/store/ windows: if: github.repository == 'spack/spack' @@ -185,9 +174,8 @@ jobs: - name: Bootstrap clingo run: | ./share/spack/setup-env.ps1 + spack bootstrap disable github-actions-v2 spack bootstrap disable github-actions-v0.6 - spack bootstrap disable github-actions-v0.5 - spack external find --not-buildable cmake bison spack -d solve zlib ./share/spack/qa/validate_last_exit.ps1 tree $env:userprofile/.spack/bootstrap/store/ diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 1f3cbdc600f03e..a59c628e19d424 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -48,6 +48,7 @@ jobs: - '.github/workflows/ci.yaml' core: - './!(var/**)/**' + - 'var/spack/test_repos/**' packages: - 'var/**' # Some links for easier reference: diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 28638e5c6be24b..a8d0f6dbc5de5e 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -11,7 +11,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b with: - python-version: '3.11' + python-version: '3.14' - name: Install python dependencies run: pip install -r .github/workflows/requirements/coverage/requirements.txt diff --git a/.github/workflows/import-check.yaml b/.github/workflows/import-check.yaml index d2a3e48e53079b..d282b7ea88133f 100644 --- a/.github/workflows/import-check.yaml +++ b/.github/workflows/import-check.yaml @@ -40,11 +40,14 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 with: repository: haampie/circular-import-fighter - ref: 6645c9d4e81ac278f4b3970f221dbead699204fe + ref: f1c56367833f3c82f6a85dc58595b2cd7995ad48 path: circular-import-fighter - name: Install dependencies working-directory: circular-import-fighter run: make -j dependencies - - name: Circular import check + - name: Circular import check (without inline imports) working-directory: circular-import-fighter run: make -j compare "SPACK_ROOT=../old ../new" + - name: Circular import check (with inline imports) + working-directory: circular-import-fighter + run: make clean-graph && make -j compare "SPACK_ROOT=../old ../new" IMPORTS_FLAGS=--inline diff --git a/.github/workflows/nightly-win-builds.yml b/.github/workflows/nightly-win-builds.yml deleted file mode 100644 index 11f19c9244c288..00000000000000 --- a/.github/workflows/nightly-win-builds.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: Windows Paraview Nightly - -on: - schedule: - - cron: '0 2 * * *' # Run at 2 am - -defaults: - run: - shell: - powershell Invoke-Expression -Command "./share/spack/qa/windows_test_setup.ps1"; {0} - - -jobs: - build-paraview-deps: - runs-on: windows-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - with: - fetch-depth: 0 - - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b - with: - python-version: 3.9 - - name: Install Python packages - run: | - python -m pip install --upgrade pip six pywin32 setuptools coverage - - name: Build Test - run: | - spack compiler find - spack external find cmake ninja win-sdk win-wdk wgl msmpi - spack -d install -y --cdash-upload-url https://cdash.spack.io/submit.php?project=Spack+on+Windows --cdash-track Nightly --only dependencies paraview - exit 0 \ No newline at end of file diff --git a/.github/workflows/prechecks.yml b/.github/workflows/prechecks.yml index 323e421f284e3c..66be75742c0e3c 100644 --- a/.github/workflows/prechecks.yml +++ b/.github/workflows/prechecks.yml @@ -7,16 +7,12 @@ on: - releases/** - spack-stack-dev - release/** - # DH* TEMPORARY - - feature/update_to_spack_v1 pull_request: branches: - develop - releases/** - spack-stack-dev - release/** - # DH* TEMPORARY - - feature/update_to_spack_v1 workflow_call: inputs: with_coverage: @@ -32,7 +28,6 @@ concurrency: jobs: - # Validate that the code can be run on all the Python versions supported by Spack validate: runs-on: ubuntu-latest steps: @@ -45,6 +40,7 @@ jobs: - name: Install Python Packages run: | pip install -r .github/workflows/requirements/style/requirements.txt + # Validate that the code can be run on all the Python versions supported by Spack - name: vermin run: | vermin --backport importlib \ @@ -55,57 +51,31 @@ jobs: -vvv \ --exclude-regex lib/spack/spack/vendor \ lib/spack/spack/ lib/spack/llnl/ bin/ var/spack/test_repos + # Check that __slots__ are used properly + - name: slotscheck + run: | + ./bin/spack python -m slotscheck --exclude-modules="spack.test|spack.vendor" lib/spack/spack/ -# DH* TURN OFF UNTIL NEXT MAJOR UPDATE FROM SPACK - SEE https://github.com/JCSDA/spack/pull/563 -# # Run style checks on the files that have been changed -# style: -# runs-on: ubuntu-latest -# steps: -# - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 -# with: -# fetch-depth: 2 -# - name: Revert spack-stack modifications -# run: ./REMOVE_SPACK_STACK_MODS_FOR_CI.sh -# - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b -# with: -# python-version: '3.13' -# - name: Install Python packages -# run: | -# pip install -r .github/workflows/requirements/style/requirements.txt -# - name: Run style tests -# run: | -# bin/spack style --base HEAD^1 -# bin/spack license verify -# pylint -j $(nproc) --disable=all --enable=unspecified-encoding --ignore-paths=lib/spack/spack/vendor lib -# -# -# # Check that spack can bootstrap the development environment on Python 3.6 - RHEL8 -# bootstrap-dev-rhel8: -# runs-on: ubuntu-latest -# container: registry.access.redhat.com/ubi8/ubi -# steps: -# - name: Install dependencies -# run: | -# dnf install -y \ -# bzip2 curl file gcc-c++ gcc gcc-gfortran git gnupg2 gzip \ -# make patch tcl unzip which xz -# - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 -# - name: Revert spack-stack modifications -# run: ./REMOVE_SPACK_STACK_MODS_FOR_CI.sh -# - name: Setup repo and non-root user -# run: | -# git --version -# git config --global --add safe.directory '*' -# git fetch --unshallow -# . .github/workflows/bin/setup_git.sh -# useradd spack-test -# chown -R spack-test . -# - name: Bootstrap Spack development environment -# shell: runuser -u spack-test -- bash {0} -# run: | -# source share/spack/setup-env.sh -# spack debug report -# spack -d bootstrap now --dev -# spack -d style -t black -# spack unit-test -V -# *DH \ No newline at end of file + # Run style checks on the files that have been changed + style: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + fetch-depth: 2 + - name: Revert spack-stack modifications + run: ./REMOVE_SPACK_STACK_MODS_FOR_CI.sh + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b + with: + python-version: '3.13' + - name: Install Python packages + run: | + pip install -r .github/workflows/requirements/style/requirements.txt + echo "PYTHONPATH=$PWD/lib/spack" >> $GITHUB_ENV + - name: Run style tests (code) + run: | + bin/spack style --base HEAD^1 + bin/spack license verify + pylint -j $(nproc) --disable=all --enable=unspecified-encoding --ignore-paths=lib/spack/spack/vendor lib + - name: Run style tests (docs) + run: .github/workflows/bin/format-rst.py $(git ls-files 'lib/spack/docs/*.rst') diff --git a/.github/workflows/requirements/coverage/requirements.txt b/.github/workflows/requirements/coverage/requirements.txt index 1bf0f9a76bf272..3ee65e02e420a8 100644 --- a/.github/workflows/requirements/coverage/requirements.txt +++ b/.github/workflows/requirements/coverage/requirements.txt @@ -1 +1 @@ -coverage==7.6.1 +coverage==7.11.0 diff --git a/.github/workflows/requirements/style/requirements.txt b/.github/workflows/requirements/style/requirements.txt index a0139df5fbd9ea..afd9066accadf0 100644 --- a/.github/workflows/requirements/style/requirements.txt +++ b/.github/workflows/requirements/style/requirements.txt @@ -1,8 +1,11 @@ -black==25.1.0 +black==25.12.0 clingo==5.8.0 flake8==7.3.0 -isort==6.0.1 -mypy==1.17.0 -types-six==1.17.0.20250515 -vermin==1.6.0 -pylint==3.3.7 +isort==7.0.0 +mypy==1.19.1 +types-six==1.17.0.20251009 +vermin==1.8.0 +pylint==4.0.4 +docutils==0.22.4 +ruamel.yaml==0.19.1 +slotscheck==0.19.1 diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 233424f0bae7cd..8a5c93bcb35344 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -16,23 +16,41 @@ jobs: - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 with: # Issues configuration - stale-issue-message: | - This issue has been automatically marked as stale because it has not had - any activity in the last 6 months. It will be closed if there is no further activity. + stale-issue-message: > + This issue has been automatically marked as stale because it has not had any activity in the last 6 months. + It will be closed in 30 days if there is no further activity. + + + If the issue is waiting for a reply from maintainers, feel free to ping them as a reminder. + If it is waiting and has no comments yet, feel free to ping `@spack/spack-releasers` or simply leave a comment saying this should not be marked stale. + This will also reset the issue's stale state. + + Thank you for your contributions! - close-issue-message: | + close-issue-message: > This issue was closed because it had no activity for 30 days after being marked stale. + If you feel this is in error, please feel free to reopen this issue. stale-issue-label: 'stale' - any-of-issue-labels: 'build-error' - exempt-issue-labels: 'pinned,bug' + any-of-issue-labels: 'build-error,unreproducible,question,documentation,environments' + exempt-issue-labels: 'pinned,triage,impact-low,impact-medium,impact-high' # Pull requests configuration - stale-pr-message: | - This pull request has been automatically marked as stale because it has not had - any activity in the last 6 months. It will be closed if there is no further activity. + stale-pr-message: > + This pull request has been automatically marked as stale because it has not had any activity in the last 6 months. + It will be closed in 30 days if there is no further activity. + + + If the pull request is waiting for a reply from reviewers, feel free to ping them as a reminder. + If it is waiting and has no assigned reviewer, feel free to ping `@spack/spack-releasers` or simply leave a comment saying this should not be marked stale. + This will reset the pull request's stale state. + + + To get more eyes on your pull request, you can post a link in the #pull-requests channel of the Spack Slack. + Thank you for your contributions! - close-pr-message: | + close-pr-message: > This pull request was closed because it had no activity for 30 days after being marked stale. + If you feel this is in error, please feel free to reopen this pull request. stale-pr-label: 'stale' any-of-pr-labels: 'new-package,update-package' exempt-pr-labels: 'pinned' diff --git a/.github/workflows/triage.yml b/.github/workflows/triage.yml new file mode 100644 index 00000000000000..9fbc25c8e62069 --- /dev/null +++ b/.github/workflows/triage.yml @@ -0,0 +1,22 @@ +#----------------------------------------------------------------------- +# DO NOT modify unless you really know what you are doing. +# +# See https://stackoverflow.com/a/74959635 for more info. +# Talk to @alecbcs if you have questions/are not sure of a change's +# possible impact to security. +#----------------------------------------------------------------------- +name: triage +on: + pull_request_target: + branches: + - develop + +jobs: + pr: + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + issues: write + steps: + - uses: actions/labeler@634933edcd8ababfe52f92936142cc22ac488b1b diff --git a/.github/workflows/unit_tests.yaml b/.github/workflows/unit_tests.yaml index 48677933548b5b..4ff117dd6699b8 100644 --- a/.github/workflows/unit_tests.yaml +++ b/.github/workflows/unit_tests.yaml @@ -5,15 +5,13 @@ on: branches: - develop - releases/** - # DH* TURN OFF UNTIL NEXT MAJOR UPDATE FROM SPACK - SEE https://github.com/JCSDA/spack/pull/563 - #- spack-stack-dev + - spack-stack-dev - release/** pull_request: branches: - develop - releases/** - # DH* TURN OFF UNTIL NEXT MAJOR UPDATE FROM SPACK - SEE https://github.com/JCSDA/spack/pull/563 - #- spack-stack-dev + - spack-stack-dev - release/** workflow_dispatch: workflow_call: @@ -29,7 +27,7 @@ jobs: strategy: matrix: os: [ubuntu-latest] - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + python-version: ['3.8', '3.9', '3.10', '3.11', '3.14'] on_develop: - ${{ github.ref == 'refs/heads/develop' }} include: @@ -74,7 +72,8 @@ jobs: run: "brew install kcov" - name: Install Python packages run: | - pip install --upgrade pip setuptools pytest pytest-xdist pytest-cov + # See https://github.com/coveragepy/coveragepy/issues/2082 + pip install --upgrade pip pytest pytest-xdist pytest-cov "coverage<=7.11.0" pip install --upgrade flake8 "isort>=4.3.5" "mypy>=0.900" "click" "black" - name: Setup git configuration run: | @@ -93,10 +92,10 @@ jobs: - name: Run unit tests env: SPACK_PYTHON: python - SPACK_TEST_PARALLEL: 2 + SPACK_TEST_PARALLEL: 4 COVERAGE: true COVERAGE_FILE: coverage/.coverage-${{ matrix.os }}-python${{ matrix.python-version }} - UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.11' }} + UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.14' }} run: | share/spack/qa/run-unit-tests - uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b @@ -129,7 +128,7 @@ jobs: run: "brew install kcov" - name: Install Python packages run: | - pip install --upgrade pip setuptools pytest coverage[toml] pytest-xdist + pip install --upgrade pip pytest coverage[toml] pytest-xdist - name: Setup git configuration run: | # Need this for the git tests to succeed. @@ -166,14 +165,18 @@ jobs: git config --global --add safe.directory '*' git fetch --unshallow . .github/workflows/bin/setup_git.sh - useradd spack-test - chown -R spack-test . - - name: Run unit tests - shell: runuser -u spack-test -- bash {0} + - name: Setup a virtual environment with platform-python + run: | + /usr/libexec/platform-python -m venv ~/platform-spack + source ~/platform-spack/bin/activate + pip install --upgrade pip pytest coverage[toml] pytest-xdist + - name: Bootstrap Spack development environment and run unit tests run: | + source ~/platform-spack/bin/activate source share/spack/setup-env.sh + spack debug report spack -d bootstrap now --dev - spack unit-test -k 'not cvs and not svn and not hg' -x --verbose + pytest --verbose -x -n3 --dist loadfile -k 'not cvs and not svn and not hg' # Test for the clingo based solver (using clingo-cffi) clingo-cffi: runs-on: ubuntu-latest @@ -192,7 +195,7 @@ jobs: sudo apt-get -y install coreutils gfortran graphviz gnupg2 - name: Install Python packages run: | - pip install --upgrade pip setuptools pytest coverage[toml] pytest-cov clingo + pip install --upgrade pip pytest coverage[toml] pytest-cov clingo pytest-xdist pip install --upgrade flake8 "isort>=4.3.5" "mypy>=0.900" "click" "black" - name: Run unit tests (full suite with coverage) env: @@ -201,11 +204,11 @@ jobs: run: | . share/spack/setup-env.sh spack bootstrap disable spack-install - spack bootstrap disable github-actions-v0.5 spack bootstrap disable github-actions-v0.6 + spack bootstrap disable github-actions-v2 spack bootstrap status spack solve zlib - spack unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml lib/spack/spack/test/concretization/core.py + pytest --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml -x -n3 lib/spack/spack/test/concretization/core.py - uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b with: name: coverage-clingo-cffi @@ -216,8 +219,8 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [macos-13, macos-14] - python-version: ["3.11"] + os: [macos-15-intel, macos-latest] + python-version: ["3.14"] steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 with: @@ -229,23 +232,22 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install Python packages run: | - pip install --upgrade pip setuptools - pip install --upgrade pytest coverage[toml] pytest-xdist pytest-cov + pip install --upgrade pip + # See https://github.com/coveragepy/coveragepy/issues/2082 + pip install --upgrade pytest coverage[toml] pytest-xdist pytest-cov "coverage<=7.11.0" - name: Setup Homebrew packages run: | brew install dash fish gcc gnupg kcov - name: Run unit tests env: - SPACK_TEST_PARALLEL: 4 COVERAGE_FILE: coverage/.coverage-${{ matrix.os }}-python${{ matrix.python-version }} run: | git --version . .github/workflows/bin/setup_git.sh . share/spack/setup-env.sh - $(which spack) bootstrap disable spack-install - $(which spack) solve zlib - common_args=(--dist loadfile --tx '4*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python' -x) - $(which spack) unit-test --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml "${common_args[@]}" + spack bootstrap disable spack-install + spack solve zlib + python3 -m pytest --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml --dist loadfile -x -n4 - uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b with: name: coverage-${{ matrix.os }}-python${{ matrix.python-version }} @@ -253,7 +255,7 @@ jobs: include-hidden-files: true # Run unit tests on Windows windows: - # Can't run this on other repos than spack/spack + # Don't run this on other repos than spack/spack if: github.repository == 'spack/spack' defaults: run: @@ -268,10 +270,10 @@ jobs: run: ./REMOVE_SPACK_STACK_MODS_FOR_CI.sh - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b with: - python-version: 3.9 + python-version: '3.14' - name: Install Python packages run: | - python -m pip install --upgrade pip pywin32 setuptools pytest-cov clingo + python -m pip install --upgrade pip pywin32 pytest-cov clingo "coverage<=7.11.0" - name: Create local develop run: | ./.github/workflows/bin/setup_git.ps1 @@ -279,10 +281,40 @@ jobs: env: COVERAGE_FILE: coverage/.coverage-windows run: | - spack unit-test -x --verbose --cov --cov-config=pyproject.toml + python -m pytest -x --verbose --cov --cov-config=pyproject.toml ./share/spack/qa/validate_last_exit.ps1 - uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b with: name: coverage-windows path: coverage include-hidden-files: true + + canonicalization: + # Don't run this on other repos than spack/spack + if: github.repository == 'spack/spack' + name: package.py canonicalization + runs-on: ubuntu-latest + container: + image: ghcr.io/spack/all-pythons:2025-10-10 + + steps: + - name: Checkout Spack (current) + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + path: spack-current + - name: Checkout Spack (previous) + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + path: spack-previous + ref: ${{ github.event.pull_request.base.sha || github.event.before }} + - name: Checkout Spack Packages + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + repository: spack/spack-packages + path: spack-packages + - name: Test package.py canonicalization + run: spack-current/.github/workflows/bin/canonicalize.py + --spack $PWD/spack-previous $PWD/spack-current + --python python3.6 python3.7 python3.8 python3.9 python3.10 python3.11 python3.12 python3.13 python3.14 + --input-dir spack-packages/repos/spack_repo/builtin/packages/ + --output-dir canonicalized diff --git a/.gitignore b/.gitignore index e53fc3de111637..276119d7131bb5 100644 --- a/.gitignore +++ b/.gitignore @@ -6,9 +6,6 @@ /var/spack/cache /var/spack/environments /opt -# Ignore everything in /etc/spack except /etc/spack/defaults -/etc/spack/* -!/etc/spack/defaults /share/spack/modules /share/spack/lmod # Debug logs @@ -16,6 +13,12 @@ spack-db.* *.in.log *.out.log +# Configuration: Ignore everything in /etc/spack, +# except defaults and site scopes that ship with spack +/etc/spack/* +!/etc/spack/defaults +!/etc/spack/site/README.md + ########################### # Python-specific ignores # ########################### diff --git a/.readthedocs.yml b/.readthedocs.yml index b8d7726716c003..7f005d272e9535 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,37 +1,72 @@ version: 2 build: - os: "ubuntu-22.04" + os: "ubuntu-24.04" apt_packages: - graphviz + - inkscape + - xindy tools: - python: "3.11" + python: "3.14" + jobs: + post_checkout: + - git fetch --unshallow || true # get accurate "Last updated on" info sphinx: configuration: lib/spack/docs/conf.py fail_on_warning: true +formats: + - pdf + python: install: - requirements: lib/spack/docs/requirements.txt search: ranking: - spack.html: -10 - spack.*.html: -10 _modules/*: -10 - command_index.html: -9 - basic_usage.html: 5 - configuration.html: 5 - config_yaml.html: 5 - packages_yaml.html: 5 + spack_repo.*.html: -10 + spack_repo.html: -10 + spack.*.html: -10 + spack.html: -10 + command_index.html: 4 + advanced_topics.html: 5 + binary_caches.html: 5 + bootstrapping.html: 5 build_settings.html: 5 - environments.html: 5 + build_systems.html: 5 + build_systems/*.html: 5 + chain.html: 5 + config_yaml.html: 5 + configuring_compilers.html: 5 containers.html: 5 + roles_and_responsibilities.html: 5 + contribution_guide.html: 5 + developer_guide.html: 5 + package_review_guide.html: 5 + env_vars_yaml.html: 5 + environments.html: 5 + extensions.html: 5 + features.html: 5 + getting_help.html: 5 + getting_started.html: 5 + gpu_configuration.html: 5 + include_yaml.html: 5 + installing_prerequisites.html: 5 mirrors.html: 5 module_file_support.html: 5 - repositories.html: 5 - binary_caches.html: 5 - chain.html: 5 + package_api.html: 5 + package_fundamentals.html: 5 + packages_yaml.html: 5 + packaging_guide_advanced.html: 5 + packaging_guide_build.html: 5 + packaging_guide_creation.html: 5 + packaging_guide_testing.html: 5 pipelines.html: 5 - packaging_guide.html: 5 + replace_conda_homebrew.html: 5 + repositories.html: 5 + signing.html: 5 + spec_syntax.html: 5 + windows.html: 5 + frequently_asked_questions.html: 6 diff --git a/CHANGELOG.md b/CHANGELOG.md index 06775c407acb8e..c18dbee05d75a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,199 @@ +# v1.1.0 (2025-11-14) + +`v1.1.0` features major improvements to **compiler handling** and **configuration management**, a significant refactoring of **externals**, and exciting new **experimental features** like a console UI for parallel installations and concretization caching. + +## Major new features + +1. **Enhanced Compiler Control and Unmixing** + + * Compiler unmixing (#51135) + * Propagated compiler preferences (#51383) + + In Spack v1.0, support for compilers as nodes made it much easier to mix compilers for the same language on different packages in a Spec. This increased flexibility, but did not offer options to constrain compiler selection when needed. + + * #51135 introduces the `concretizer:compiler_mixing` config option. When disabled, all specs in the "root unification set" (root specs and their transitive link/run deps) will be assigned a single compiler for each language. You can also specify a list of packages to be excepted from the restriction. + + * #51383 introduces the `%%` sigil in the spec syntax. While `%` specifies a direct dependency for a single node, `%%` specifies a dependency for that node and a preference for its transitive link/run dependencies (at the same priority as the `prefer` key in `packages.yaml` config). + +2. **Customizable configuration** (#51162) + + All configuration now stems from `$spack/etc/spack` and `$spack/etc/spack/defaults`, so the owner of a Spack instance can have full control over what configuration scopes exist. + + * Scopes included in configuration can be named, and the builtin `site`, `user`, `system`, etc. scopes are now defined in configuration rather than hard-coded. + * `$spack/etc/spack/defaults` is the lowest priority. + * `$spack/etc/spack` *includes* the other scopes at lower precedence than itself. + * You can override with any scopes *except* the defaults with `include::`. e.g., `include::[]` in an environment allows you to ignore everything but defaults entirely. + + Here is `$spack/etc/spack/include.yaml`: + + ```yaml + include: + # user configuration scope + - name: "user" + path: "~/.spack" + optional: true + when: '"SPACK_DISABLE_LOCAL_CONFIG" not in env' + + # site configuration scope + - name: "site" + path: "$spack/etc/spack/site" + optional: true + + # system configuration scope + - name: "system" + path: "/etc/spack" + optional: true + when: '"SPACK_DISABLE_LOCAL_CONFIG" not in env' + ``` + + NOTE: This change inverts the priority order of configuration in `$spack/etc/spack` and `~/.spack`. + + See the [configuration docs](https://spack.readthedocs.io/en/latest/configuration.html) and + [include docs](https://spack.readthedocs.io/en/latest/include_yaml.html) for + more information. + +3. **Git includes** (#51191) + + Configuration files can now be included directly from a **remote Git repository**. This allows for easier sharing and versioning of complex configurations across teams or projects. These entries accept the same syntax as remote repository configuration, and can likewise be conditional with `when:`. + + ```yaml + include: + - git: https://github.com/spack/spack-configs + branch: main + when: os == "centos7" + paths: + - USC/config/config.yaml + - USC/config/packages.yaml + ``` + + See [the docs](https://spack.readthedocs.io/en/latest/include_yaml.html#git-repository-files) for details. + +4. **Externals Can Now Have Dependencies** (#51118) + + Externals are treated as concrete specs, so there is a 1:1 mapping between an entry in `packages.yaml` and any installed external spec (for a fixed repository). + + Their YAML specification has been extended to allow modeling dependencies of external specs. This might be quite useful to better capture e.g. ROCm installations that are already installed on a given system, or in similar cases. + + To be backward compatible with external specs specifying a compiler, for instance `mpich %gcc@9`, Spack will match the compiler specification to an existing external. It will fail when the specification is ambiguous, or if it does not match any other externals. + + +## Experimental Features + +5. **New installer UI** (experimental, see #51434) + + New, experimental console UI for the Spack installer that allows: + + * Spack to show progress on multiple parallel processes concurrently; + * Users to view logs for different installations independently; and + * Spack to share a jobserver among multiple parallel builds. + + Demo: https://asciinema.org/a/755827 + + Usage: + + * Run this to enable by default (and persist across runs): + ``` + spack config add config:installer:new + ``` + or use: + ``` + spack -c config:installer:new install ... + ``` + to try one run with the new UI. + * The `-j` flag in spack install `-j ...` is all you need, it will build packages in parallel. There is no need to set `-p`; the installer spawns as many builds as it can and shares work by default. + * Use `n` for next logs and `p/N` for previous logs + * Use `v` to toggle between logs and overview + * Use `q` or `Esc` to go from logs back to overview. + * Use `/` to enter search mode: filters the overview as you type; press `Enter` to follow logs or `Esc` to exit search mode. + + > [!WARNING] + > This feature is experimental because it is not feature-complete to match the existing installer. See the issue #51515 for a list of features that are not completed. Particularly note that the new installer locks the entire database, and other spack instances will not install concurrently while it is running. + +6. **Concretization Caching** (experimental, see #50905, #51448) + + Spack can cache concretization outputs for performance. With caching, Spack will still set up the concretization problem, but it can look up the solve result and avoid long solve times. This feature is currently off by default, but you can enable it with: + + ``` + spack config add concretizer:concretization_cache:enable:true + ``` + + > [!WARNING] + > Currently there is a bug that the cache will return results that do not properly reflect changes in the `package_hash` (that is, changes in the `package.py` source code). We will enable caching by default in a future release, when this bug is fixed. + +## Potentially breaking changes +* Configurable configuration changes the precedence of the `site` scope. + * The `spack` scope (in `/etc/spack` within the Spack installation) is now the highest precedence scope + * The `site` scope is now *lower* precedence than `spack` and `user`. + * If you previously had configuration files in in `$spack/etc/spack`, they will take precedence over configuration in `~/.spack`. If you do not want that, move them to `$spack/etc/spack/site`. + * See #51162 for details. +* Fixed a bug with command-line and environment scope ordering. The environment scope could previously override custom command-line scopes. Now, the active environment is *always* lower precedence than any configuration scopes provided on the command line. (#51461) + +## Other notable improvements + +### Improved error messages +* solver: catch invalid dependencies during concretization (#51176) +* improved errors for requirements (#45800) + +### Performance Improvements +* `spack mirror create --all` now runs in parallel (#50901) +* `spack develop`: fast automatic reconcretization (#51140) +* Don't spawn a process for `--fake` installs (#51491) +* Use `gethostname` instead of `getfqdn` (#51481) +* Check for `commit` variant only if not developing (#51507) +* Concretization performance improvements (#51160, #51152, #51416) +* spack diff: fix performance bug (#51270) + +### Concretizer improvements +* concretizer: fix direct dep w/ virtuals issue (#51037) +* solver: reduce items in edge optimizations (#51503) + +### UI and Commands +* Managed environments can now be organized into folders (#50994) +* `spack info` shows full info about conditional dependencies and can filter by spec. (#51137) +* `spack help` is now reorganized and has color sections (#51484) +* `spack clean --all` means all (no exception for bootstrap cache) (#50984) +* `--variants-by-name` no longer used (#51450) +* `spack env create`: allow creation from env or env dir (#51433) + +## Notable Bugfixes +* mirror: clean up stage when retrying (#43519) +* Many smaller concretization fixes (#51361, #51355, #51341, #51347, #51282, #51190, #51226, #51065, #51064, #51074) +* Bugfix for failed multi-node parallel installations (#50933) + +## Spack community stats + +* 1,681 commits +* 8,611 packages in the 2025.11.0 release, 112 new since 2025.07.0 +* 276 people contributed to this release +* 265 committers to packages +* 31 committers to core + +See the [2025.11.0 release](https://github.com/spack/spack-packages/releases/tag/v2025.11.0) of [spack-packages](https://github.com/spack/spack-packages/) for more details. + + +# v1.0.2 (2025-09-11) + +## Bug Fixes + +* `spack config edit` can now open malformed YAML files. (#51088) +* `spack edit -b` supports specifying the repository path or its namespace. (#51084) +* `spack repo list` escapes the color code for paths that contain `@g`. (#51178) +* Fixed various issues on the solver: + * Improved the error message when an invalid dependency is specified in the input. (#51176) + * Build the preferred compiler with itself by default. (#51201) + * Fixed a performance regression when using `unify:when_possible`. (#51226) + * Fixed an issue with strong preferences, when provider details are given. (#51263) + * Fixed an issue when specifying flags on a package that appears multiple times in the DAG. (#51218) +* Fixed a regression for `zsh` in `spack env activate --prompt`. (#51258) +* Fix a few cases where the `when` context manager was not dealing with direct dependencies correctly. (#51259) +* Various fixes to string representations of specs. (#51207) + +## Enhancements + +* Various improvements to the documentation (#51145, #51151, #51147, #51181, #51172, #51188, #51195) +* Greatly improve the performance of `spack diff`. (#51270) +* `spack solve` highlights optimization weights in a more intuitive way. (#51198) + # v1.0.1 (2025-08-11) ## Bug Fixes diff --git a/NEWS.md b/NEWS.md index 6488c2bc04b939..0224d3779d3648 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,6 @@ +## Package API v2.4 +- Added the `%%` sigil to spec syntax, to propagate compiler preferences. + ## Spack v1.0.0 Deprecated the implicit attributes: - `PackageBase.legacy_buildsystem` @@ -5,6 +8,9 @@ Deprecated the implicit attributes: - `Builder.legacy_attributes` - `Builder.legacy_long_methods` +## Package API v2.3 +- `spack.package.version` directive: added `git_sparse_paths` parameter. + ## Package API v2.2 Added to `spack.package`: - `BuilderWithDefaults` diff --git a/REMOVE_SPACK_STACK_MODS_FOR_CI.sh b/REMOVE_SPACK_STACK_MODS_FOR_CI.sh index e9947b203136eb..b0656f786220de 100755 --- a/REMOVE_SPACK_STACK_MODS_FOR_CI.sh +++ b/REMOVE_SPACK_STACK_MODS_FOR_CI.sh @@ -8,14 +8,14 @@ else SED_PARAM="" fi -sed -i ${SED_PARAM} 's/extensions:/#extensions:/g' ./etc/spack/defaults/config.yaml -sed -i ${SED_PARAM} 's/- ${SPACK_STACK_DIR}/#- ${SPACK_STACK_DIR}/g' ./etc/spack/defaults/config.yaml +sed -i ${SED_PARAM} 's/extensions:/#extensions:/g' ./etc/spack/defaults/base/config.yaml +sed -i ${SED_PARAM} 's/- ${SPACK_STACK_DIR}/#- ${SPACK_STACK_DIR}/g' ./etc/spack/defaults/base/config.yaml -sed -i ${SED_PARAM} 's/#builtin:/builtin:/g' ./etc/spack/defaults/repos.yaml -sed -i ${SED_PARAM} 's/# git: https/ git: https/g' ./etc/spack/defaults/repos.yaml -sed -i ${SED_PARAM} 's/# branch: / branch: /g' ./etc/spack/defaults/repos.yaml -sed -i ${SED_PARAM} 's/builtin: ${SPACK_STACK_DIR}/#builtin: ${SPACK_STACK_DIR}/g' ./etc/spack/defaults/repos.yaml -sed -i ${SED_PARAM} 's/spack_stack: ${SPACK_STACK_DIR}/#spack_stack: ${SPACK_STACK_DIR}/g' ./etc/spack/defaults/repos.yaml +sed -i ${SED_PARAM} 's/#builtin:/builtin:/g' ./etc/spack/defaults/base/repos.yaml +sed -i ${SED_PARAM} 's/# git: https/ git: https/g' ./etc/spack/defaults/base/repos.yaml +sed -i ${SED_PARAM} 's/# branch: / branch: /g' ./etc/spack/defaults/base/repos.yaml +sed -i ${SED_PARAM} 's/builtin: ${SPACK_STACK_DIR}/#builtin: ${SPACK_STACK_DIR}/g' ./etc/spack/defaults/base/repos.yaml +sed -i ${SED_PARAM} 's/spack_stack: ${SPACK_STACK_DIR}/#spack_stack: ${SPACK_STACK_DIR}/g' ./etc/spack/defaults/base/repos.yaml set +e diff --git a/etc/spack/README.md b/etc/spack/README.md new file mode 100644 index 00000000000000..c23ea064812bff --- /dev/null +++ b/etc/spack/README.md @@ -0,0 +1,6 @@ +# Spack main config scope + +This Spack's top-level configuration scope. Everything except Spack's bare-bones +default configuration is included from here. + +The main configuration scope defines other scopes. diff --git a/etc/spack/defaults/concretizer.yaml b/etc/spack/defaults/base/concretizer.yaml similarity index 81% rename from etc/spack/defaults/concretizer.yaml rename to etc/spack/defaults/base/concretizer.yaml index 0d1042dee258f3..51c8a20013af9f 100644 --- a/etc/spack/defaults/concretizer.yaml +++ b/etc/spack/defaults/base/concretizer.yaml @@ -60,6 +60,7 @@ concretizer: py-flit-core: 2 py-pip: 2 py-setuptools: 2 + py-versioneer: 2 py-wheel: 2 xcb-proto: 2 # Compilers @@ -70,7 +71,10 @@ concretizer: # it can reuse. Note this is a directional compatibility so mutual compatibility between two OS's # requires two entries i.e. os_compatible: {sonoma: [monterey], monterey: [sonoma]} os_compatible: {} - + # If false, force all link/run dependencies of root to match c/c++/Fortran compiler. If this is + # a list, then the listed packages are allowed to use a different compiler, but all others must + # match. + compiler_mixing: true # Option to specify whether to support splicing. Splicing allows for # the relinking of concrete package dependencies in order to better # reuse already built packages with ABI compatible dependencies @@ -89,3 +93,17 @@ concretizer: # Static analysis may reduce the concretization time by generating smaller ASP problems, in # cases where there are requirements that prevent part of the search space to be explored. static_analysis: false + + # If enabled, concretizations are cached in the misc_cache. The cache is keyed by the hash + # of solver inputs, so we only need to run setup (not solve) if there is a cache hit. + # Feature is experimental: enabling may result in potentially invalid concretizations + # if package recipes are changed, see: https://github.com/spack/spack/issues/51553 + concretization_cache: + enable: false + + # Options to control the behavior of the concretizer with external specs + externals: + # Either 'architecture_only', to complete external specs with just the architecture of the + # current host, or 'default_variants' to complete external specs also with missing variants, + # using their default value. + completion: default_variants diff --git a/etc/spack/defaults/config.yaml b/etc/spack/defaults/base/config.yaml similarity index 96% rename from etc/spack/defaults/config.yaml rename to etc/spack/defaults/base/config.yaml index ab71a210aa1e9b..858eae96275824 100644 --- a/etc/spack/defaults/config.yaml +++ b/etc/spack/defaults/base/config.yaml @@ -5,13 +5,13 @@ # sensible defaults out of the box. Spack maintainers should edit this # file to keep it current. # -# Users can override these settings by editing the following files. +# Users can override these settings by editing files in: # -# Per-spack-instance settings (overrides defaults): -# $SPACK_ROOT/etc/spack/config.yaml +# * $SPACK_ROOT/etc/spack/ - Spack instance settings +# * ~/.spack/ - user settings +# * $SPACK_ROOT/etc/spack/site - Spack "site" settings. Like instance settings +# but lower priority then user settings. # -# Per-user settings (overrides default and site settings): -# ~/.spack/config.yaml # ------------------------------------------------------------------------- config: # This is the path to the root of the Spack install tree. @@ -173,6 +173,8 @@ config: # windows due to a lack of filesystem locks. concurrent_packages: 1 + # Which installer to use: "old" or "new". The new installer is experimental. + installer: old # If set to true, Spack will use ccache to cache C compiles. ccache: false diff --git a/etc/spack/defaults/mirrors.yaml b/etc/spack/defaults/base/mirrors.yaml similarity index 100% rename from etc/spack/defaults/mirrors.yaml rename to etc/spack/defaults/base/mirrors.yaml diff --git a/etc/spack/defaults/modules.yaml b/etc/spack/defaults/base/modules.yaml similarity index 100% rename from etc/spack/defaults/modules.yaml rename to etc/spack/defaults/base/modules.yaml diff --git a/etc/spack/defaults/base/packages.yaml b/etc/spack/defaults/base/packages.yaml index 6b0a4a517aadc7..63580f6006531a 100644 --- a/etc/spack/defaults/base/packages.yaml +++ b/etc/spack/defaults/base/packages.yaml @@ -21,7 +21,6 @@ packages: blas: [openblas, amdblis] c: [gcc, llvm, intel-oneapi-compilers] cxx: [gcc, llvm, intel-oneapi-compilers] - D: [ldc] daal: [intel-oneapi-daal] elf: [elfutils] fftw-api: [fftw, amdfftw] diff --git a/etc/spack/defaults/repos.yaml b/etc/spack/defaults/base/repos.yaml similarity index 100% rename from etc/spack/defaults/repos.yaml rename to etc/spack/defaults/base/repos.yaml diff --git a/etc/spack/defaults/bootstrap.yaml b/etc/spack/defaults/bootstrap.yaml index b2e2c0f37385ed..c2a2f25b016e85 100644 --- a/etc/spack/defaults/bootstrap.yaml +++ b/etc/spack/defaults/bootstrap.yaml @@ -9,15 +9,15 @@ bootstrap: # may not be able to bootstrap all the software that Spack needs, # depending on its type. sources: + - name: github-actions-v2 + metadata: $spack/share/spack/bootstrap/github-actions-v2 - name: github-actions-v0.6 metadata: $spack/share/spack/bootstrap/github-actions-v0.6 - - name: github-actions-v0.5 - metadata: $spack/share/spack/bootstrap/github-actions-v0.5 - name: spack-install metadata: $spack/share/spack/bootstrap/spack-install trusted: # By default we trust bootstrapping from sources and from binaries # produced on Github via the workflow + github-actions-v2: true github-actions-v0.6: true - github-actions-v0.5: true spack-install: true diff --git a/etc/spack/defaults/include.yaml b/etc/spack/defaults/include.yaml index 40b7cddcb60096..d03fb044b8be19 100644 --- a/etc/spack/defaults/include.yaml +++ b/etc/spack/defaults/include.yaml @@ -1,4 +1,7 @@ include: + # default platform-specific configuration - path: "${platform}" optional: true + + # base packages.yaml overridable by platform-specific settings - path: base diff --git a/etc/spack/include.yaml b/etc/spack/include.yaml new file mode 100644 index 00000000000000..4bdc37ccb41895 --- /dev/null +++ b/etc/spack/include.yaml @@ -0,0 +1,20 @@ +include: + # user configuration scope + - name: "user" + path_override_env_var: SPACK_USER_CONFIG_PATH + path: "~/.spack" + optional: true + prefer_modify: true + when: '"SPACK_DISABLE_LOCAL_CONFIG" not in env' + + # site configuration scope + - name: "site" + path: "$spack/etc/spack/site" + optional: true + + # system configuration scope + - name: "system" + path_override_env_var: SPACK_SYSTEM_CONFIG_PATH + path: "/etc/spack" + optional: true + when: '"SPACK_DISABLE_LOCAL_CONFIG" not in env' diff --git a/etc/spack/site/README.md b/etc/spack/site/README.md new file mode 100644 index 00000000000000..71dbb31648af92 --- /dev/null +++ b/etc/spack/site/README.md @@ -0,0 +1,5 @@ +# Spack site configuration scope + +This is the "site" configuration scope for Spack. By default, it sits +at *lower* priority than the spack scope in `etc/spack` within the Spack +directory, and *lower* priority than user configuration. diff --git a/lib/spack/docs/_pygments/style.py b/lib/spack/docs/_pygments/style.py deleted file mode 100644 index 6f377fc3c6751c..00000000000000 --- a/lib/spack/docs/_pygments/style.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright Spack Project Developers. See COPYRIGHT file for details. -# -# SPDX-License-Identifier: (Apache-2.0 OR MIT) - -# The name of the Pygments (syntax highlighting) style to use. -# We use our own extension of the default style with a few modifications -from pygments.styles.default import DefaultStyle -from pygments.token import Generic - - -class SpackStyle(DefaultStyle): - styles = DefaultStyle.styles.copy() - background_color = "#f4f4f8" - styles[Generic.Output] = "#355" - styles[Generic.Prompt] = "bold #346ec9" diff --git a/lib/spack/docs/_static/css/custom.css b/lib/spack/docs/_static/css/custom.css index a1f672568e8c40..615fb1be8c844c 100644 --- a/lib/spack/docs/_static/css/custom.css +++ b/lib/spack/docs/_static/css/custom.css @@ -1,9 +1,57 @@ div.versionadded { - border-left: 3px solid #0c731f; - color: #0c731f; - padding-left: 1rem; + border-left: 3px solid #0c731f; + color: #0c731f; + padding-left: 1rem; } .py.property { display: block !important; +} + +div.version-switch { + text-align: center; + min-height: 2em; +} + +div.version-switch>select { + display: inline-block; + text-align-last: center; + background: none; + border: none; + border-radius: 0.5em; + box-shadow: none; + color: var(--color-foreground-primary); + cursor: pointer; + appearance: none; + padding: 0.2em; + -webkit-appearance: none; + -moz-appearance: none; +} + +div.version-switch select:active, +div.version-switch select:focus, +div.version-switch select:hover { + color: var(--color-foreground-secondary); + background: var(--color-background-hover); +} + +.toc-tree li.scroll-current>.reference { + font-weight: normal; +} + +.search-results span { + background-color: #fff3cd; + padding: 0.1rem 0.2rem; + border-radius: 2px; +} + +@media (prefers-color-scheme: dark) { + body:not([data-theme="light"]) .search-results span { + background-color: #664d03; + color: #fff3cd; + } +} + +.highlight .go { + color: #333; } \ No newline at end of file diff --git a/lib/spack/docs/_static/js/versions.js b/lib/spack/docs/_static/js/versions.js new file mode 100644 index 00000000000000..27399f851d6cb7 --- /dev/null +++ b/lib/spack/docs/_static/js/versions.js @@ -0,0 +1,134 @@ +// based on https://github.com/readthedocs/sphinx_rtd_theme/blob/3.0.2/sphinx_rtd_theme/static/js/versions.js_t + +function onSelectorSwitch(event) { + const option = event.target.selectedIndex; + const item = event.target.options[option]; + window.location.href = item.dataset.url; +} + +function initVersionSelector(config) { + const versionSwitch = document.querySelector(".version-switch"); + if (!versionSwitch) { return; } + let versions = config.versions.active; + if (config.versions.current.hidden || config.versions.current.type === "external") { + versions.unshift(config.versions.current); + } + const versionSelect = ` + +`; + + versionSwitch.innerHTML = versionSelect; + versionSwitch.firstElementChild.addEventListener("change", onSelectorSwitch); +} + +function initSearch(currentVersion) { + // Numeric versions are PRs which have no search results; use latest instead. + const searchVersion = /^\d+$/.test(currentVersion) ? 'latest' : currentVersion; + let searchTimeout; + let originalContent; + const searchInput = document.querySelector(".sidebar-search"); + const mainContent = document.getElementById("furo-main-content"); + const searchForm = document.querySelector(".sidebar-search-container"); + + if (!searchInput || !mainContent || !searchForm) { return; } + + // Store original content + originalContent = mainContent.innerHTML; + + searchInput.addEventListener("input", handleSearchInput); + searchInput.addEventListener("keydown", handleTabNavigation); + searchForm.addEventListener("submit", handleFormSubmit); + + function handleSearchInput(e) { + const query = e.target.value.trim(); + clearTimeout(searchTimeout); + if (query.length === 0) { + mainContent.innerHTML = originalContent; + return; + } + searchTimeout = setTimeout(function () { + performSearch(query); + }, 300); + } + + function handleFormSubmit(e) { + e.preventDefault(); + const query = searchInput.value.trim(); + if (query) { + performSearch(query); + } + } + + function handleTabNavigation(e) { + // Check if we're tabbing throught search results + if (e.key !== 'Tab' || e.shiftKey) { return; } + const searchResults = document.querySelector(".search-results"); + if (!searchResults) { return; } + + // Focus on the first link in search results instead of default behavior + e.preventDefault(); + const firstLink = searchResults.querySelector("a"); + if (firstLink) { + firstLink.focus(); + } + } + + function performSearch(query) { + const fullQuery = `project:spack/${searchVersion} ${query}`; + const searchUrl = `/_/api/v3/search/?q=${encodeURIComponent(fullQuery)}`; + + fetch(searchUrl) + .then(function (response) { + if (!response.ok) { throw new Error("HTTP error! status: " + response.status); } + return response.json(); + }) + .then(function (data) { + displaySearchResults(data, query); + }) + .catch(function (error) { + mainContent.innerHTML = "

Error performing search.

"; + }); + } + + function displaySearchResults(data, query) { + if (!data.results?.length) { + mainContent.innerHTML = `

No Results Found

No results found for "${query}".

`; + return; + } + + let html = '

Search Results

'; + + data.results.forEach((result, index) => { + const title = result.highlights?.title?.[0] ?? result.title; + html += `

${title}

`; + + result.blocks?.forEach(block => { + const blockTitle = block.highlights?.title?.[0] ?? block.title; + html += `

${blockTitle}

`; + html += block.highlights?.content?.map(content => `

${content}

`).join('') ?? ''; + }); + + if (index < data.results.length - 1) { + html += `
`; + } + }); + + html += "
"; + mainContent.innerHTML = html; + } +} + +document.addEventListener("readthedocs-addons-data-ready", function (event) { + const config = event.detail.data(); + initVersionSelector(config); + initSearch(config.versions.current.slug); +}); diff --git a/lib/spack/docs/_static/spack-logo-text.svg b/lib/spack/docs/_static/spack-logo-text.svg new file mode 120000 index 00000000000000..bc79bed01e36ad --- /dev/null +++ b/lib/spack/docs/_static/spack-logo-text.svg @@ -0,0 +1 @@ +../_spack_root/share/spack/logo/spack-logo-text.svg \ No newline at end of file diff --git a/lib/spack/docs/_static/spack-logo-white-text.svg b/lib/spack/docs/_static/spack-logo-white-text.svg new file mode 120000 index 00000000000000..edc00ebe0f1202 --- /dev/null +++ b/lib/spack/docs/_static/spack-logo-white-text.svg @@ -0,0 +1 @@ +../_spack_root/share/spack/logo/spack-logo-white-text.svg \ No newline at end of file diff --git a/lib/spack/docs/_templates/layout.html b/lib/spack/docs/_templates/base.html similarity index 63% rename from lib/spack/docs/_templates/layout.html rename to lib/spack/docs/_templates/base.html index 393cb1ebc16639..79bdac8db67bd4 100644 --- a/lib/spack/docs/_templates/layout.html +++ b/lib/spack/docs/_templates/base.html @@ -1,4 +1,4 @@ -{% extends "!layout.html" %} +{% extends "!base.html" %} {%- block extrahead %} @@ -9,4 +9,8 @@ gtag('js', new Date()); gtag('config', 'G-S0PQ7WV75K'); + {%- if READTHEDOCS %} + + + {%- endif %} {% endblock %} diff --git a/lib/spack/docs/_templates/sidebar/brand.html b/lib/spack/docs/_templates/sidebar/brand.html new file mode 100644 index 00000000000000..ee32c92ead7874 --- /dev/null +++ b/lib/spack/docs/_templates/sidebar/brand.html @@ -0,0 +1,23 @@ + + +{%- if READTHEDOCS %} +
+{%- endif %} diff --git a/lib/spack/docs/advanced_topics.rst b/lib/spack/docs/advanced_topics.rst index 270d2c1b900328..1058c99ba03646 100644 --- a/lib/spack/docs/advanced_topics.rst +++ b/lib/spack/docs/advanced_topics.rst @@ -1,78 +1,14 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) .. meta:: :description lang=en: - Explore advanced topics in Spack, including defining and using toolchains, auditing packages and configuration, and verifying installations. + Explore advanced topics in Spack, including auditing packages and configuration, and verifying installations. -.. _toolchains: +.. _cmd-spack-audit: -============================= -Defining and Using Toolchains -============================= - -Spack lets you specify compilers on the CLI with, e.g., ``%gcc`` or ``%c,cxx=clang %fortran=gcc``, and you can specify flags with ``cflags``, ``cxxflags``, and ``fflags``. -Depending on how complex your compiler setup is, it can be cumbersome to specify all of your preferences on the CLI. -Spack has a special type of configuration called ``toolchains``, which let you encapsulate the configuration for compilers, other libraries, and flags into a single name that you can reference as though it were one option. - -Toolchains are referenced by name like a direct dependency, using the ``%`` sigil. -They are defined under the ``toolchains`` section of the configuration: - -.. code-block:: yaml - - toolchains: - llvm_gfortran: - - spec: cflags=-O3 - - spec: '%c=llvm' - when: '%c' - - spec: '%cxx=llvm' - when: '%cxx' - - spec: '%fortran=gcc' - when: '%fortran' - -A toolchain that uses *conditional dependencies*, when constraining a virtual provider, can be applied to any node - regardless of whether it *needs* that virtual dependency. -The *guarantee* that the toolchain gives is that *if* the virtual is needed, then the constraint is applied. - -The ``llvm_gfortran`` toolchain, for instance, enforces using ``llvm`` for the C and C++ languages, and ``gcc`` for Fortran, when these languages are needed. -It also adds ``cflags=-O3`` unconditionally. - -Toolchains can be used to simplify the construction of a list of specs using :ref:`environment-spec-matrices`, when the list includes packages with different language requirements: - -.. code-block:: yaml - - specs: - - matrix: - - [kokkos, hdf5~cxx+fortran, py-scipy] - - ["%llvm_gfortran"] - -Note that in this case we can use a single matrix, and the user doesn't need to know exactly which package requires which language. -If we had to enforce compilers directly, we would need 3 matrices, since: - -* ``kokkos`` depends on C and C++, but not Fortran -* ``hdf5~cxx+fortran`` depends on C and Fortran, but not C++ -* ``py-scipy`` depends on C, C++, and Fortran - -Different toolchains could be used independently or even in the same spec. -If we had a toolchain named ``gcc_all`` that enforces using ``gcc`` for C, C++ and Fortran, we could write: - -.. code-block:: - - spack install hdf5+fortran%llvm_gfortran ^mpich %gcc_all - -to install: - -* An ``hdf5`` compiled with ``llvm`` for the C/C++ components, but with its Fortran components compiled with ``gfortran``, -* Built against an MPICH installation compiled entirely with ``gcc`` for C, C++, and Fortran. - -.. note:: - - Toolchains are currently limited to using only direct dependencies (``%``) in their definition. - Transitive dependencies are not allowed. - -.. _audit-packages-and-configuration: - -=================================== Auditing Packages and Configuration =================================== @@ -85,8 +21,8 @@ A detailed list of the checks currently implemented for each subcommand can be p .. command-output:: spack -v audit list -Depending on the use case, users might run the appropriate subcommands to obtain diagnostics. -Issues, if found, are reported to stdout: +Depending on the use case, users can run the appropriate subcommands to obtain diagnostics. +If issues are found, they are reported to stdout: .. code-block:: console @@ -96,73 +32,51 @@ Issues, if found, are reported to stdout: the variant 'adios' does not exist in spack_repo/builtin/packages/lammps/package.py -.. _verify-installations: +.. _cmd-spack-verify: -======================= Verifying Installations ======================= -The ``spack verify`` command can be used to verify the validity of -Spack-installed packages any time after installation. +The ``spack verify`` command can be used to verify the validity of Spack-installed packages any time after installation. -------------------------- ``spack verify manifest`` ------------------------- -At installation time, Spack creates a manifest of every file in the -installation prefix. For links, Spack tracks the mode, ownership, and -destination. For directories, Spack tracks the mode and -ownership. For files, Spack tracks the mode, ownership, modification -time, hash, and size. The ``spack verify manifest`` command will check, -for every file in each package, whether any of those attributes have -changed. It will also check for newly added files or deleted files from -the installation prefix. Spack can either check all installed packages -using the ``-a,--all`` option or accept specs listed on the command line to -verify. - -The ``spack verify manifest`` command can also verify for individual files -that they haven't been altered since installation time. If the given file -is not in a Spack installation prefix, Spack will report that it is -not owned by any package. To check individual files instead of specs, -use the ``-f,--files`` option. - -Spack installation manifests are part of the tarball signed by Spack -for binary package distribution. When installed from a binary package, -Spack uses the packaged installation manifest instead of creating one -at install time. - -The ``spack verify`` command also accepts the ``-l,--local`` option to -check only local packages (as opposed to those used transparently from -``upstream`` Spack instances) and the ``-j,--json`` option to output -machine-readable JSON data for any errors. +At installation time, Spack creates a manifest of every file in the installation prefix. +For links, Spack tracks the mode, ownership, and destination. +For directories, Spack tracks the mode and ownership. +For files, Spack tracks the mode, ownership, modification time, hash, and size. +The ``spack verify manifest`` command will check, for every file in each package, whether any of those attributes have changed. +It will also check for newly added files or deleted files from the installation prefix. +Spack can either check all installed packages using the ``-a,--all`` option or accept specs listed on the command line to verify. + +The ``spack verify manifest`` command can also verify that individual files haven't been altered since installation time. +If the given file is not in a Spack installation prefix, Spack will report that it is not owned by any package. +To check individual files instead of specs, use the ``-f,--files`` option. + +Spack installation manifests are included in the tarball signed by Spack for binary package distribution. +When installed from a binary package, Spack uses the packaged installation manifest instead of creating one at install time. + +The ``spack verify`` command also accepts the ``-l,--local`` option to check only local packages (as opposed to those used transparently from ``upstream`` Spack instances) and the ``-j,--json`` option to output machine-readable JSON data for any errors. --------------------------- ``spack verify libraries`` -------------------------- -The ``spack verify libraries`` command can be used to verify that packages -do not have accidental system dependencies. This command scans the install -prefixes of packages for executables and shared libraries, and resolves -their needed libraries in their RPATHs. When needed libraries cannot be -located, an error is reported. This typically indicates that a package -was linked against a system library instead of a library provided by -a Spack package. +The ``spack verify libraries`` command can be used to verify that packages do not have accidental system dependencies. +This command scans the install prefixes of packages for executables and shared libraries, and resolves their needed libraries in their RPATHs. +When needed libraries cannot be located, an error is reported. +This typically indicates that a package was linked against a system library instead of a library provided by a Spack package. -This verification can also be enabled as a post-install hook by setting -``config:shared_linking:missing_library_policy`` to ``error`` or ``warn`` -in :ref:`config.yaml `. +This verification can also be enabled as a post-install hook by setting ``config:shared_linking:missing_library_policy`` to ``error`` or ``warn`` in :ref:`config.yaml `. -======================= Filesystem Requirements ======================= -By default, Spack needs to be run from a filesystem that supports -``flock`` locking semantics. Nearly all local filesystems and recent -versions of NFS support this, but parallel filesystems or NFS volumes may -be configured without ``flock`` support enabled. You can determine how -your filesystems are mounted with ``mount``. The output for a Lustre -filesystem might look like this: +By default, Spack needs to be run from a filesystem that supports ``flock`` locking semantics. +Nearly all local filesystems and recent versions of NFS support this, but parallel filesystems or NFS volumes may be configured without ``flock`` support enabled. +You can determine how your filesystems are mounted with ``mount``. +The output for a Lustre filesystem might look like this: .. code-block:: console @@ -172,28 +86,22 @@ filesystem might look like this: Note the ``flock`` option on both Lustre mounts. -If you do not see this or a similar option for your filesystem, you have -a few options. First, you can move your Spack installation to a -filesystem that supports locking. Second, you could ask your system -administrator to enable ``flock`` for your filesystem. +If you do not see this or a similar option for your filesystem, you have a few options. +First, you can move your Spack installation to a filesystem that supports locking. +Second, you could ask your system administrator to enable ``flock`` for your filesystem. If none of those work, you can disable locking in one of two ways: - 1. Run Spack with the ``-L`` or ``--disable-locks`` option to disable - locks on a call-by-call basis. - 2. Edit :ref:`config.yaml ` and set the ``locks`` option - to ``false`` to always disable locking. +1. Run Spack with the ``-L`` or ``--disable-locks`` option to disable locks on a call-by-call basis. +2. Edit :ref:`config.yaml ` and set the ``locks`` option to ``false`` to always disable locking. .. warning:: - If you disable locking, concurrent instances of Spack will have no way - to avoid stepping on each other. You must ensure that there is only - **one** instance of Spack running at a time. Otherwise, Spack may end - up with a corrupted database file, or you may not be able to see all - installed packages in commands like ``spack find``. + If you disable locking, concurrent instances of Spack will have no way to avoid stepping on each other. + You must ensure that there is only **one** instance of Spack running at a time. + Otherwise, Spack may end up with a corrupted database, or you may not be able to see all installed packages when running commands like ``spack find``. - If you are unfortunate enough to run into this situation, you may be - able to fix it by running ``spack reindex``. + If you are unfortunate enough to run into this situation, you may be able to fix it by running ``spack reindex``. This issue typically manifests with the error below: @@ -203,7 +111,7 @@ This issue typically manifests with the error below: Traceback (most recent call last): File "./spack", line 176, in main() - File "./spack", line 154,' in main + File "./spack", line 154, in main return_val = command(parser, args) File "./spack/lib/spack/spack/cmd/find.py", line 170, in find specs = set(spack.installed_db.query(\**q_args)) diff --git a/lib/spack/docs/binary_caches.rst b/lib/spack/docs/binary_caches.rst index 6c93bcab56c457..5a0a6c6e03a288 100644 --- a/lib/spack/docs/binary_caches.rst +++ b/lib/spack/docs/binary_caches.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,30 +9,21 @@ .. _binary_caches: -============ Build Caches ============ -Some sites may encourage users to set up their own test environments -before carrying out central installations, or some users may prefer to set -up these environments on their own motivation. To reduce the load of -recompiling otherwise identical package specs in different installations, -installed packages can be put into build cache tarballs, pushed to -your Spack mirror, and then downloaded and installed by others. +To avoid recompilation of Spack packages, installed packages can be pushed to a build cache, and then downloaded and installed by others. -Whenever a mirror provides prebuilt packages, Spack will take these packages -into account during concretization and installation, making ``spack install`` -significantly faster. +Whenever a mirror provides prebuilt packages, Spack will take these packages into account during concretization and installation, making ``spack install`` significantly faster. .. note:: - We use the terms "build cache" and "mirror" often interchangeably. Mirrors - are used during installation for both sources and prebuilt packages. Build - caches refer to mirrors that provide prebuilt packages. + We use the terms "build cache" and "mirror" often interchangeably. + Mirrors are used during installation for both sources and prebuilt packages. + Build caches refer to mirrors that provide prebuilt packages. ----------------------- Creating a Build Cache ---------------------- @@ -41,15 +33,11 @@ Build caches are created via: $ spack buildcache push -This command takes the locally installed spec and its dependencies and -creates tarballs of their install prefixes. It also generates metadata files, -signed with GPG. These tarballs and metadata files are then pushed to the -provided build cache, which can be a local directory or a remote URL. - -Here is an example where a build cache is created in a local directory named -"spack-cache", to which we push the "ninja" spec: +This command takes the locally installed spec and its dependencies, and creates tarballs of their install prefixes. +It also generates metadata files, signed with GPG. +These tarballs and metadata files are then pushed to the provided build cache, which can be a local directory or a remote URL. -ninja-1.12.1-vmvycib6vmiofkdqgrblo7zsvp7odwut +Here is an example where a build cache is created in a local directory named "spack-cache", to which we push the "ninja" spec: .. code-block:: console @@ -62,20 +50,18 @@ Note that ``ninja`` must be installed locally for this to work. Once you have a build cache, you can add it as a mirror, as discussed next. ---------------------------------------- Finding or Installing Build Cache Files --------------------------------------- -To find or install build cache files, a Spack mirror must be configured -with: +To find or install build cache files, a Spack mirror must be configured with: .. code-block:: console $ spack mirror add -Both web URLs and local paths on the filesystem can be specified. In the previous -example, you might add the directory "spack-cache" and call it ``mymirror``: +Both URLs and local paths on the filesystem can be specified. +In the previous example, you might add the directory "spack-cache" and call it ``mymirror``: .. code-block:: console @@ -93,9 +79,8 @@ You can see that the mirror is added with ``spack mirror list`` as follows: spack-public https://spack-llnl-mirror.s3-us-west-2.amazonaws.com/ -At this point, you've created a build cache, but Spack hasn't indexed it, so if -you run ``spack buildcache list``, you won't see any results. You need to index -this new build cache as follows: +At this point, you've created a build cache, but Spack hasn't indexed it, so if you run ``spack buildcache list``, you won't see any results. +You need to index this new build cache as follows: .. code-block:: console @@ -111,12 +96,11 @@ Now you can use ``list``: [ ... ] ninja@1.12.1 -With ``mymirror`` configured and an index available, Spack will automatically -use it during concretization and installation. That means that you can expect -``spack install ninja`` to fetch prebuilt packages from the mirror. Let's -verify by reinstalling ninja: +With ``mymirror`` configured and an index available, Spack will automatically use it during concretization and installation. +That means that you can expect ``spack install ninja`` to fetch prebuilt packages from the mirror. +Let's verify by reinstalling ninja: -.. code-block:: console +.. code-block:: spec $ spack uninstall ninja $ spack install ninja @@ -132,20 +116,17 @@ verify by reinstalling ninja: Search: 0.00s. Fetch: 0.11s. Install: 0.11s. Extract: 0.10s. Relocate: 0.00s. Total: 0.22s [+] /home/spackuser/spack/opt/spack/linux-ubuntu22.04-sapphirerapids/gcc-12.3.0/ninja-1.12.1-ngldn2kpvb6lqc44oqhhow7fzg7xu7lh -It worked! You've just completed a full example of creating a build cache with -a spec of interest, adding it as a mirror, updating its index, listing the contents, -and finally, installing from it. +It worked! +You've just completed a full example of creating a build cache with a spec of interest, adding it as a mirror, updating its index, listing the contents, and finally, installing from it. -By default, Spack falls back to building from sources when the mirror is not available -or when the package is simply not already available. To force Spack to only install -prebuilt packages, you can use: +By default, Spack falls back to building from sources when the mirror is not available or when the package is simply not already available. +To force Spack to install only prebuilt packages, you can use: .. code-block:: console $ spack install --use-buildcache only -For example, to combine all of the commands above to add the E4S build cache -and then install from it exclusively, you would do: +For example, to combine all of the commands above to add the E4S build cache and then install from it exclusively, you would do: .. code-block:: console @@ -153,31 +134,97 @@ and then install from it exclusively, you would do: $ spack buildcache keys --install --trust $ spack install --use-buildcache only -We use ``--install`` and ``--trust`` to say that we are installing keys to our -keyring and trusting all downloaded keys. +The ``--install`` and ``--trust`` flags install keys to the keyring and trust all downloaded keys. + + +Build Cache Index Views +^^^^^^^^^^^^^^^^^^^^^^^ + +.. note:: + Introduced in Spack v1.2. + The addition of this feature does not increment the build cache version (v3). + +.. note:: + Build cache index views are not supported in OCI build caches. + +Build caches can quickly become large and inefficient to search as binaries are added over time. +A common work around to this problem is to break the build cache into stacks that target specific applications or workflows. +This allows for curation of binaries as smaller collections of packages that push to their own mirrors that each maintain a smaller search area. +However, this approach comes with the tradeoff of requiring much larger storage and computational footprints due to duplication of common dependencies between stacks. +Splitting build caches can also reduce direct fetch hits by reducing the breadth of binaries availabe in a single mirror. + +To better address the issues with large search areas, build cache index views (or just "views" in this section) were introduced. +A view is a named index which provides a curated view into a larger build cache. +This allows build cache maintainers to provide the same granularity of build caches split by stacks without having to pay for the extra storage and compute required for the duplicated dependencies. + +Views can be created or updated using an active environment, or a list of environment names or paths. +The ``spack buildcache`` commands for views are alias of the command ``spack buildcache update-index``. + +View indices are stored similarly to the top level build cache index, but use an additional prefix of the view name ``/v3/manifests/index/my-stack/index.manifest.json``. + +.. _cmd-spack-buildcache-create-view: + +Creating a Build Cache Index View +""""""""""""""""""""""""""""""""" + +Here is an example of creating a view using an active environent. + +.. code-block:: console + + $ spack env activate my-stack + $ spack install + $ spack buildcache push my-mirror + $ spack buildcache update-index --name my-view my-mirror + +It is also possible to create a view from a list of one or more environments by passing the environment names or paths. +If a list of environments is passed while inside of an active environment, the active environment is ignored and only the passed environments are considered. + +.. code-block:: console + + $ spack buildcache update-index --name my-view my-mirror my-stack /path/to/environment/my-other-stack + +.. _cmd-spack-buildcache-update-view: + +Updating a Build Cache Index View +""""""""""""""""""""""""""""""""" + +To prevent accidently overwriting an existing view, it is required to specify how a view should be updated. +It is possible to use one of two options for updating a view index: ``--force`` or ``--append``. +Using the ``--force`` option will replace the index as if the previous one did not exist. +The ``--append`` option will first read the existing index, and then add the new specs to it. + +.. code-block:: console + + $ spack buildcache push my-mirror + $ spack buildcache update-index --append --name my-view my-mirror my-stack + + +.. warning:: + + Using the ``--append`` option with build cache index views is a non-atomic operation. + In the case where multiple writers are appending to the same view, the result will only include the state of the last to write. + When using ``--append`` for build cache workflows it is up to the user to correctly serialize the update operations. + -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ List of Popular Build Caches ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -* `Extreme-scale Scientific Software Stack (E4S) `_: `build cache `_ +* `Spack Public Build Cache `_: `spack build cache `_ +* `Extreme-scale Scientific Software Stack (E4S) `_: `e4s build cache `_ ------------------------------- Creating and Trusting GPG keys ------------------------------ .. _cmd-spack-gpg: -^^^^^^^^^^^^^ ``spack gpg`` ^^^^^^^^^^^^^ Spack has support for signing and verifying packages using GPG keys. A separate keyring is used for Spack, so any keys available in the user's home directory are not used. -^^^^^^^^^^^^^^^^^^ ``spack gpg init`` ^^^^^^^^^^^^^^^^^^ @@ -186,7 +233,6 @@ Keys stored in :file:`var/spack/gpg` are the default keys for a Spack installati These keys may be imported by running ``spack gpg init``. This will import the default keys into the keyring as trusted keys. -^^^^^^^^^^^^^ Trusting keys ^^^^^^^^^^^^^ @@ -198,15 +244,14 @@ Additional keys may be added to the keyring using: Once a key is trusted, packages signed by the owner of the key may be installed. -If you would like to remove keys from your keyring, use instead: +To remove keys from your keyring, use: .. code-block:: console $ spack gpg untrust -Key IDs can be email addresses, names, or (best) fingerprints. +Key IDs can be email addresses, names, or (preferably) fingerprints. -^^^^^^^^^^^^^ Creating keys ^^^^^^^^^^^^^ @@ -214,7 +259,7 @@ You may also create your own key so that you may sign your own packages using .. code-block:: console - spack gpg create + $ spack gpg create By default, the key has no expiration, but it may be set with the ``--expires `` flag. It is also recommended to add a comment as to the use of the key using the ``--comment `` flag. @@ -223,35 +268,26 @@ Secret keys may also be later exported using the ``spack gpg export [ .. admonition:: Key creation speed :class: tip - :collapsible: - The creation of a new GPG key requires generating a lot of random numbers. - Depending on the entropy produced on your system, the entire process may take a long time (*even appearing to hang*). - Virtual machines and cloud instances are particularly likely to display this behavior. + The creation of a new GPG key requires generating a lot of random numbers. + Depending on the entropy produced on your system, the entire process may take a long time (and may even appear to hang). + Virtual machines and cloud instances are particularly likely to display this behavior. - To speed it up, you may install tools like ``rngd``, which is usually available as a package in the host OS. - Another alternative is ``haveged``, which can be installed on RHEL/CentOS machines. + To speed it up, you may install tools like ``rngd``, which is usually available as a package in the host OS. + Another alternative is ``haveged``, which can be installed on RHEL/CentOS machines. - `This Digital Ocean tutorial - `_ - provides a good overview of sources of randomness. + `This Digital Ocean tutorial `_ provides a good overview of sources of randomness. -------------------- Build Cache Signing ------------------- -By default, Spack will add a cryptographic signature to each package pushed to -a build cache and verify the signature when installing from a build cache. +By default, Spack will add a cryptographic signature to each package pushed to a build cache and verify the signature when installing from a build cache. -Keys for signing can be managed with the :ref:`spack gpg ` command, -as well as ``spack buildcache keys``, as mentioned above. +Keys for signing can be managed with the :ref:`spack gpg ` command, as well as ``spack buildcache keys``, as mentioned above. -You can disable signing when pushing with ``spack buildcache push --unsigned`` -and disable verification when installing from any build cache with -``spack install --no-check-signature``. +You can disable signing when pushing with ``spack buildcache push --unsigned`` and disable verification when installing from any build cache with ``spack install --no-check-signature``. -Alternatively, signing and verification can be enabled or disabled on a per-build-cache -basis: +Alternatively, signing and verification can be enabled or disabled on a per-build-cache basis: .. code-block:: console @@ -261,7 +297,7 @@ basis: $ spack mirror set --signed # enable signing and verification for an existing mirror $ spack mirror set --unsigned # disable signing and verification for an existing mirror -Or you can directly edit the ``mirrors.yaml`` configuration file: +Alternatively, you can edit the ``mirrors.yaml`` configuration file directly: .. code-block:: yaml @@ -272,24 +308,17 @@ Or you can directly edit the ``mirrors.yaml`` configuration file: See also :ref:`mirrors`. ----------- Relocation ---------- -When using build caches across different machines, it is likely that the install -root will be different from the one used to build the binaries. +When using build caches across different machines, it is likely that the install root is different from the one used to build the binaries. -To address this issue, Spack automatically relocates all paths encoded in binaries -and scripts to their new location upon installation. +To address this issue, Spack automatically relocates all paths encoded in binaries and scripts to their new location upon installation. -Note that there are some cases where this is not possible: if binaries are built in -a relatively short path and then installed to a longer path, there may not be enough -space in the binary to encode the new path. In this case, Spack will fail to install -the package from the build cache, and a source build is required. +Note that there are some cases where this is not possible: if binaries are built in a relatively short path and then installed to a longer path, there may not be enough space in the binary to encode the new path. +In this case, Spack will fail to install the package from the build cache, and a source build is required. -To reduce the likelihood of this happening, it is highly recommended to add padding to -the install root during the build, as specified in the :ref:`config ` -section of the configuration: +To reduce the likelihood of this happening, it is highly recommended to add padding to the install root during the build, as specified in the :ref:`config ` section of the configuration: .. code-block:: yaml @@ -299,13 +328,11 @@ section of the configuration: padded_length: 128 -.. _binary_caches_oci: - ---------------------------------- Automatic Push to a Build Cache --------------------------------- -Sometimes it is convenient to push packages to a build cache as soon as they are installed. Spack can do this by setting the autopush flag when adding a mirror: +Sometimes it is convenient to push packages to a build cache immediately after they are installed. +Spack can do this by setting the autopush flag when adding a mirror: .. code-block:: console @@ -318,7 +345,8 @@ Or the autopush flag can be set for an existing mirror: $ spack mirror set --autopush # enable automatic push for an existing mirror $ spack mirror set --no-autopush # disable automatic push for an existing mirror -Then, after installing a package, it is automatically pushed to all mirrors with ``autopush: true``. The command +Then, after installing a package, it is automatically pushed to all mirrors with ``autopush: true``. +The command .. code-block:: console @@ -335,19 +363,16 @@ will have the same effect as Packages are automatically pushed to a build cache only if they are built from source. ------------------------------------------ +.. _binary_caches_oci: + OCI / Docker V2 Registries as Build Cache ----------------------------------------- -Spack can also use OCI or Docker V2 registries such as Docker Hub, Quay.io, -GitHub Packages, GitLab Container Registry, JFrog Artifactory, and others -as build caches. This is a convenient way to share binaries using public -infrastructure or to cache Spack-built binaries in GitHub Actions and -GitLab CI. +Spack can also use OCI or Docker V2 registries such as Docker Hub, Quay.io, Amazon ECR, GitHub Packages, GitLab Container Registry, JFrog Artifactory, and others as build caches. +This is a convenient way to share binaries using public infrastructure or to cache Spack-built binaries in GitHub Actions and GitLab CI. +These registries can be used not only to share Spack binaries but also to create and distribute runnable container images. -To get started, configure an OCI mirror using ``oci://`` as the scheme -and optionally specify variables that hold the username and password (or -personal access token) for the registry: +To get started, configure an OCI mirror using ``oci://`` as the scheme and optionally specify variables that hold the username and password (or personal access token) for the registry: .. code-block:: console @@ -355,8 +380,19 @@ personal access token) for the registry: --oci-password-variable REGISTRY_TOKEN \ my_registry oci://example.com/my_image -Spack follows the naming conventions of Docker, with Docker Hub as the default -registry. To use Docker Hub, you can omit the registry domain: +This registers a mirror in your ``mirrors.yaml`` configuration file that looks as follows: + +.. code-block:: yaml + + mirrors: + my_registry: + url: oci://example.com/my_image + access_pair: + id_variable: REGISTRY_USER + secret_variable: REGISTRY_TOKEN + +Spack follows the naming conventions of Docker, with Docker Hub as the default registry. +To use Docker Hub, you can omit the registry domain: .. code-block:: console @@ -371,14 +407,79 @@ From here, you can use the mirror as any other build cache: $ spack buildcache push my_registry # push to the registry $ spack install # or install from the registry -A unique feature of build caches on top of OCI registries is that it's incredibly -easy to generate a runnable container image with the binaries installed. This -is a great way to make applications available to users without requiring them to -install Spack -- all you need is Docker, Podman, or any other OCI-compatible container -runtime. +.. note:: + + Spack defaults to ``https`` for OCI registries, and does not fall back to ``http`` in case of failure. + For local registries which use ``http`` instead of ``https``, you can specify ``oci+http://localhost:5000/my_image``. + +.. _oci-authentication: + +Authentication with popular Container Registries +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To produce container images, all you need to do is add the ``--base-image`` flag -when pushing to the build cache: +Below are instructions for authenticating with some of the most popular container registries. +In all cases, you need to generate a (temporary) token to use as the password -- this is not the same as your account password. + +GHCR +"""""" + +To authenticate with GitHub Container Registry (GHCR), you can use your GitHub username as the username. +For the password, you can use either: + +#. A personal access token (PAT) with ``write:packages`` scope. +#. A GitHub Actions token (``GITHUB_TOKEN``) with ``packages:write`` permission. + +See also `GitHub's documentation `_ and :ref:`github-actions-build-cache` below. + +Docker Hub +"""""""""" + +To authenticate with Docker Hub, you can use your Docker Hub username as the username. +For the password, you need to generate a personal access token (PAT) on the Docker Hub website. +See `Docker's documentation `_ for more information. + +Amazon ECR +"""""""""" + +To authenticate with Amazon ECR, you can use the AWS CLI to generate a temporary password. +The username is always ``AWS``. + +.. code-block:: console + + $ export AWS_ECR_PASSWORD=$(aws ecr get-login-password --region ) + $ spack mirror add \ + --oci-username AWS \ + --oci-password-variable AWS_ECR_PASSWORD \ + my_registry \ + oci://XXX.dkr.ecr..amazonaws.com/my/image + +See also `AWS's documentation `_. + +Azure Container Registry +"""""""""""""""""""""""" + +To authenticate with an Azure Container Registry that has RBAC enabled, you can use the Azure CLI to generate a temporary password for your managed identity. +The username is always ``00000000-0000-0000-0000-000000000000``. + +.. code-block:: console + + $ export AZURE_ACR_PASSWORD=$(az acr login --name --expose-token --output tsv --query accessToken) + $ spack mirror add \ + --oci-username 00000000-0000-0000-0000-000000000000 \ + --oci-password-variable AZURE_ACR_PASSWORD \ + my_registry \ + oci://.azurecr.io/my/image + +See also `Azure's documentation `_. + + +Build Cache and Container Images +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A unique feature of build caches on top of OCI registries is that it's incredibly easy to generate a runnable container image with the binaries installed. +This is a great way to make applications available to users without requiring them to install Spack -- all you need is Docker, Podman, or any other OCI-compatible container runtime. + +To produce container images, all you need to do is add the ``--base-image`` flag when pushing to the build cache: .. code-block:: console @@ -389,39 +490,31 @@ when pushing to the build cache: root@e4c2b6f6b3f4:/# ninja --version 1.11.1 -If ``--base-image`` is not specified, distroless images are produced. In practice, -you won't be able to run these as containers because they don't come with libc and -other system dependencies. However, they are still compatible with tools like -``skopeo``, ``podman``, and ``docker`` for pulling and pushing. +If ``--base-image`` is not specified, Spack produces distroless images. +In practice, you won't be able to run these as containers because they don't come with libc and other system dependencies. +However, they are still compatible with tools like ``skopeo``, ``podman``, and ``docker`` for pulling and pushing. -.. note:: - The Docker ``overlayfs2`` storage driver is limited to 128 layers, above which a - ``max depth exceeded`` error may be produced when pulling the image. There - are `alternative drivers `_. +See the section :ref:`exporting-images` for more details on how to create container images with Spack. + +.. _github-actions-build-cache: ------------------------------------- Spack Build Cache for GitHub Actions ------------------------------------ -To significantly speed up Spack in GitHub Actions, binaries can be cached in -GitHub Packages. This service is an OCI registry that can be linked to a GitHub -repository. +To significantly speed up Spack in GitHub Actions, binaries can be cached in GitHub Packages. +This service is an OCI registry that can be linked to a GitHub repository. -Spack offers a public build cache for GitHub Actions with a set of common packages, -which lets you get started quickly. See the following resources for more information: +Spack offers a public build cache for GitHub Actions with a set of common packages, which lets you get started quickly. +See the following resources for more information: -* `spack/setup-spack `_ for setting up Spack in GitHub - Actions -* `spack/github-actions-buildcache `_ for - more details on the public build cache +* `spack/setup-spack `_ for setting up Spack in GitHub Actions +* `spack/github-actions-buildcache `_ for more details on the public build cache .. _cmd-spack-buildcache: --------------------- ``spack buildcache`` -------------------- -^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``spack buildcache push`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -440,7 +533,6 @@ Arguments Description ``-y`` answer yes to all questions about creating unsigned build caches ============== ======================================================================================================================== -^^^^^^^^^^^^^^^^^^^^^^^^^ ``spack buildcache list`` ^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -454,12 +546,10 @@ Arguments Description E.g., ``spack buildcache list gcc`` will print only commands to install ``gcc`` package(s). -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``spack buildcache install`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Retrieves all specs for build caches available on a Spack mirror and installs build caches -with specs matching the input specs. +Retrieves all specs for build caches available on a Spack mirror and installs build caches with specs matching the input specs. ============== ============================================================================================== Arguments Description @@ -469,7 +559,6 @@ Arguments Description ``-y`` answer yes to all to don't verify package with gpg questions ============== ============================================================================================== -^^^^^^^^^^^^^^^^^^^^^^^^^ ``spack buildcache keys`` ^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -484,31 +573,25 @@ Arguments Description .. _build_cache_layout: ------------------- Build Cache Layout ------------------ -This section describes the structure and content of URL-style build caches, as -distinguished from OCI-style build caches. +This section describes the structure and content of URL-style build caches, as distinguished from OCI-style build caches. -The entry point for a binary package is a manifest JSON file that points to at -least two other files stored as content-addressed blobs. These files include a spec -metadata file, as well as the installation directory of the package stored as -a compressed archive file. Binary package manifest files are named to indicate -the package name and version, as well as the hash of the concrete spec. For -example: +The entry point for a binary package is a manifest JSON file that references at least two other files stored as content-addressed blobs. +These files include a spec metadata file, as well as the installation directory of the package stored as a compressed archive file. +Binary package manifest files are named to indicate the package name and version, as well as the hash of the concrete spec. +For example: .. code-block:: text gcc-runtime-12.3.0-qyu2lvgt3nxh7izxycugdbgf5gsdpkjt.spec.manifest.json would contain the manifest for a binary package of ``gcc-runtime@12.3.0``. -The ID of the built package is defined to be the DAG hash of the concrete spec -and exists in the name of the file as well. The ID distinguishes a particular -binary package from all other binary packages with the same package name and -version. Below is an example binary package manifest file. Such a file would -live in the versioned spec manifests directory of a binary mirror, for example, -``v3/manifests/spec/``: +The ID of the built package is defined to be the DAG hash of the concrete spec and exists in the name of the file as well. +The ID distinguishes a particular binary package from all other binary packages with the same package name and version. +Below is an example binary package manifest file. +Such a file would live in the versioned spec manifests directory of a binary mirror, for example, ``v3/manifests/spec/``: .. code-block:: json @@ -532,17 +615,11 @@ live in the versioned spec manifests directory of a binary mirror, for example, ] } -The manifest points to both the compressed tar file as well as the compressed -spec metadata file and contains the checksum of each. This checksum -is also used as the address of the associated file and, hence, must be -known in order to locate the tarball or spec file within the mirror. Once the -tarball or spec metadata file is downloaded, the checksum should be computed locally -and compared to the checksum in the manifest to ensure the contents have not changed -since the binary package was pushed. Spack stores all data files (including compressed -tar files, spec metadata, indices, public keys, etc.) within a ``blobs//`` -directory, using the first two characters of the checksum as a subdirectory -to reduce the number of files in a single folder. Here is a depiction of the -organization of binary mirror contents: +The manifest references both the compressed tar file as well as the compressed spec metadata file, and contains the checksum of each. +This checksum is also used as the address of the associated file and, hence, must be known in order to locate the tarball or spec file within the mirror. +Once the tarball or spec metadata file is downloaded, the checksum should be computed locally and compared to the checksum in the manifest to ensure the contents have not changed since the binary package was pushed. +Spack stores all data files (including compressed tar files, spec metadata, indices, public keys, etc.) within a ``blobs//`` directory, using the first two characters of the checksum as a subdirectory to reduce the number of files in a single folder. +Here is a depiction of the organization of binary mirror contents: .. code-block:: text @@ -572,25 +649,16 @@ organization of binary mirror contents: 2a21836d206ccf0df780ab0be63fdf76d24501375306a35daa6683c409b7922f ... -Files within the ``manifests`` directory are organized into subdirectories by -the type of entity they represent. Binary package manifests live in the ``spec/`` -directory, build cache index manifests live in the ``index/`` directory, and -manifests for public keys and their indices live in the ``key/`` subdirectory. -Regardless of the type of entity they represent, all manifest files are named -with an extension ``.manifest.json``. - -Every manifest contains a ``data`` array, each element of which refers to an -associated file stored as a content-addressed blob. Considering the example spec -manifest shown above, the compressed installation archive can be found by -picking out the data blob with the appropriate ``mediaType``, which in this -case would be ``application/vnd.spack.install.v1.tar+gzip``. The associated -file is found by looking in the blobs directory under ``blobs/sha256/fb/`` for -the file named with the complete checksum value. - -As mentioned above, every entity in a binary mirror (aka build cache) is stored -as a content-addressed blob pointed to by a manifest. While an example spec -manifest (i.e., a manifest for a binary package) is shown above, here is what -the manifest of a build cache index looks like: +Files within the ``manifests`` directory are organized into subdirectories by the type of entity they represent. +Binary package manifests live in the ``spec/`` directory, build cache index manifests live in the ``index/`` directory, and manifests for public keys and their indices live in the ``key/`` subdirectory. +Regardless of the type of entity they represent, all manifest files are named with an extension ``.manifest.json``. + +Every manifest contains a ``data`` array, each element of which refers to an associated file stored as a content-addressed blob. +Considering the example spec manifest shown above, the compressed installation archive can be found by picking out the data blob with the appropriate ``mediaType``, which in this case would be ``application/vnd.spack.install.v2.tar+gzip``. +The associated file is found by looking in the blobs directory under ``blobs/sha256/fb/`` for the file named with the complete checksum value. + +As mentioned above, every entity in a build cache is stored as a content-addressed blob pointed to by a manifest. +While an example spec manifest (i.e., a manifest for a binary package) is shown above, here is what the manifest of a build cache index looks like: .. code-block:: json @@ -607,15 +675,12 @@ the manifest of a build cache index looks like: ] } -Some things to note about this manifest are that it points to a blob that is not -compressed (``compression: "none"``) and that the ``mediaType`` is one we have -not seen yet, ``application/vnd.spack.db.v8+json``. The decision not to compress -build cache indices stems from the fact that Spack does not yet sign build cache -index manifests. Once that changes, you may start to see these indices stored as -compressed blobs. +Some things to note about this manifest are that it points to a blob that is not compressed (``compression: "none"``) and that the ``mediaType`` is one we have not seen yet, ``application/vnd.spack.db.v8+json``. +The decision not to compress build cache indices stems from the fact that Spack does not yet sign build cache index manifests. +Once that changes, you may start to see these indices stored as compressed blobs. -For completeness, here are examples of manifests for the other two types of entities -you might find in a Spack build cache. First, a public key manifest: +For completeness, here are examples of manifests for the other two types of entities you might find in a Spack build cache. +First, a public key manifest: .. code-block:: json @@ -632,7 +697,8 @@ you might find in a Spack build cache. First, a public key manifest: ] } -Note the ``mediaType`` of ``application/pgp-keys``. Finally, a public key index manifest: +Note the ``mediaType`` of ``application/pgp-keys``. +Finally, a public key index manifest: .. code-block:: json @@ -649,6 +715,5 @@ Note the ``mediaType`` of ``application/pgp-keys``. Finally, a public key index ] } -Again, note the ``mediaType`` of ``application/vnd.spack.keyindex.v1+json``. Also, note -that both the above manifest examples refer to uncompressed blobs; this is for the same -reason Spack does not yet compress build cache index blobs. +Again, note the ``mediaType`` of ``application/vnd.spack.keyindex.v1+json``. +Also, note that both the above manifest examples refer to uncompressed blobs; this is for the same reason Spack does not yet compress build cache index blobs. diff --git a/lib/spack/docs/bootstrapping.rst b/lib/spack/docs/bootstrapping.rst index d84a689c9f1098..dde4cfbe0f56a9 100644 --- a/lib/spack/docs/bootstrapping.rst +++ b/lib/spack/docs/bootstrapping.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -7,19 +8,20 @@ Learn how Spack's bootstrapping feature automatically fetches and installs essential build tools when they are not available on the host system. .. _bootstrapping: +.. _cmd-spack-bootstrap: +.. _cmd-spack-bootstrap-status: +.. _cmd-spack-bootstrap-now: -============= Bootstrapping ============= -In the :ref:`Getting started ` section, we already mentioned that -Spack can bootstrap some of its dependencies, including ``clingo``. In fact, there -is an entire command dedicated to the management of every aspect of bootstrapping: +In the :ref:`Getting started ` section, we already mentioned that Spack can bootstrap some of its dependencies, including ``clingo``. +In fact, there is an entire command dedicated to the management of every aspect of bootstrapping: .. command-output:: spack bootstrap --help -Spack is configured to bootstrap its dependencies lazily by default (i.e., the first time they are needed and -cannot be found). You can readily check if any prerequisite for using Spack is missing by running: +Spack bootstraps its dependencies automatically the first time they are needed. +You can readily check if any prerequisite for using Spack is missing by running: .. code-block:: console @@ -38,11 +40,9 @@ cannot be found). You can readily check if any prerequisite for using Spack is m % echo $? 1 -In the case of the output shown above, Spack detected that both ``clingo`` and ``gnupg`` -are missing, and it's giving detailed information on why they are needed and whether -they can be bootstrapped. The return code of this command summarizes the results; if any -dependencies are missing, the return code is ``1``, otherwise ``0``. Running a command that -concretizes a spec, like: +In the case of the output shown above, Spack detected that both ``clingo`` and ``gnupg`` are missing, and it's giving detailed information on why they are needed and whether they can be bootstrapped. +The return code of this command summarizes the results; if any dependencies are missing, the return code is ``1``, otherwise ``0``. +Running a command that concretizes a spec, like: .. code-block:: console @@ -54,8 +54,7 @@ concretizes a spec, like: automatically triggers the bootstrapping of clingo from pre-built binaries as expected. -Users can also bootstrap all the dependencies needed by Spack in a single command, which -might be useful to set up containers or other similar environments: +Users can also bootstrap all Spack's dependencies in a single command, which is useful to set up containers or other similar environments: .. code-block:: console @@ -69,18 +68,19 @@ might be useful to set up containers or other similar environments: ==> Fetching https://mirror.spack.io/bootstrap/github-actions/v0.3/build_cache/linux-centos7-x86_64/gcc-10.2.1/patchelf-0.15.0/linux-centos7-x86_64-gcc-10.2.1-patchelf-0.15.0-htk62k7efo2z22kh6kmhaselru7bfkuc.spack ==> Installing "patchelf@0.15.0%gcc@10.2.1 ldflags="-static-libstdc++ -static-libgcc" arch=linux-centos7-x86_64" from a buildcache ------------------------ +.. _cmd-spack-bootstrap-root: + The Bootstrapping Store ----------------------- The software installed for bootstrapping purposes is deployed in a separate store. -Its location can be checked with the following command: +You can check its location with the following command: .. code-block:: console % spack bootstrap root -It can also be changed with the same command by just specifying the newly desired path: +You can also change it by specifying the desired path: .. code-block:: console @@ -97,7 +97,7 @@ You can check what is installed in the bootstrapping store at any time using: clingo-bootstrap@spack libassuan@2.5.5 libgpg-error@1.42 libksba@1.5.1 pinentry@1.1.1 zlib@1.2.11 gnupg@2.3.1 libgcrypt@1.9.3 libiconv@1.16 npth@1.6 python@3.8 -In case it is needed, you can remove all the software in the current bootstrapping store with: +If needed, you can remove all the software in the current bootstrapping store with: .. code-block:: console @@ -108,41 +108,43 @@ In case it is needed, you can remove all the software in the current bootstrappi ==> Showing internal bootstrap store at "/Users/spack/.spack/bootstrap/store" ==> 0 installed packages --------------------------------------------- +.. _cmd-spack-bootstrap-list: +.. _cmd-spack-bootstrap-disable: +.. _cmd-spack-bootstrap-enable: +.. _cmd-spack-bootstrap-reset: + Enabling and Disabling Bootstrapping Methods -------------------------------------------- -Bootstrapping is always performed by trying the methods listed by: +Bootstrapping is performed by trying the methods listed by: .. command-output:: spack bootstrap list -in the order they appear, from top to bottom. By default, Spack is -configured to try bootstrapping from pre-built binaries first and to -fall back to bootstrapping from sources if that fails. +in the order they appear, from top to bottom. +By default, Spack is configured to try bootstrapping from pre-built binaries first and to fall back to bootstrapping from sources if that fails. -If need be, you can disable bootstrapping altogether by running: +If needed, you can disable bootstrapping altogether by running: .. code-block:: console % spack bootstrap disable -in which case, it's your responsibility to ensure Spack runs in an -environment where all its prerequisites are installed. You can -also configure Spack to skip certain bootstrapping methods by disabling -them specifically: +in which case, it's your responsibility to ensure Spack runs in an environment where all its prerequisites are installed. +You can also configure Spack to skip certain bootstrapping methods by disabling them specifically: .. code-block:: console % spack bootstrap disable github-actions ==> "github-actions" is now disabled and will not be used for bootstrapping -tells Spack to skip trying to bootstrap from binaries. To add the "github-actions" method back, you can: +tells Spack to skip trying to bootstrap from binaries. +To add the "github-actions" method back, you can: .. code-block:: console % spack bootstrap enable github-actions -There is also an option to reset the bootstrapping configuration to Spack's defaults: +You can also reset the bootstrapping configuration to Spack's defaults: .. code-block:: console @@ -151,17 +153,16 @@ There is also an option to reset the bootstrapping configuration to Spack's defa Do you want to continue? [Y/n] % ----------------------------------------- +.. _cmd-spack-bootstrap-mirror: +.. _cmd-spack-bootstrap-add: + Creating a Mirror for Air-Gapped Systems ---------------------------------------- -Spack's default configuration for bootstrapping relies on the user having -access to the internet, either to fetch precompiled binaries or source tarballs. +Spack's default bootstrapping configuration requires internet connection to fetch precompiled binaries or source tarballs. Sometimes, though, Spack is deployed on air-gapped systems where such access is denied. -To help with similar situations, Spack has a command that recreates, in a local folder -of choice, a mirror containing the source tarballs and/or binary packages needed for -bootstrapping. +To help in these situations, Spack provides a command to create a local mirror containing the source tarballs and/or binary packages needed for bootstrapping. .. code-block:: console @@ -176,6 +177,5 @@ bootstrapping. % spack bootstrap add --trust local-binaries /opt/bootstrap/metadata/binaries % spack buildcache update-index /opt/bootstrap/bootstrap_cache -This command needs to be run on a machine with internet access, and the resulting folder -has to be moved over to the air-gapped system. Once the local sources are added using the -commands suggested at the prompt, they can be used to bootstrap Spack. +Run this command on a machine with internet access, then move the resulting folder to the air-gapped system. +Once the local sources are added using the commands suggested at the prompt, they can be used to bootstrap Spack. diff --git a/lib/spack/docs/build_settings.rst b/lib/spack/docs/build_settings.rst index c985c7001b4dba..6bae1995f4eb72 100644 --- a/lib/spack/docs/build_settings.rst +++ b/lib/spack/docs/build_settings.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,23 +9,39 @@ .. _concretizer-options: -========================================== Concretization Settings (concretizer.yaml) ========================================== -The ``concretizer.yaml`` configuration file allows users to customize aspects of the -algorithm used to select the dependencies they install. The default configuration -is the following: +The ``concretizer.yaml`` configuration file allows users to customize aspects of the algorithm used to select the dependencies they install. +The default configuration is the following: -.. literalinclude:: _spack_root/etc/spack/defaults/concretizer.yaml +.. literalinclude:: _spack_root/etc/spack/defaults/base/concretizer.yaml :language: yaml --------------------------------- + +Completion of external nodes +---------------------------- + +:ref:`The external packages ` available from the ``packages.yaml`` configuration file are usually reporting only a few of the variants defined in the corresponding recipe. +Users can configure how Spack deals with missing information for externals via the ``concretizer:externals:completion`` attribute: + +.. code-block:: yaml + + concretizer: + externals: + completion: default_variants + +This attribute currently allows two possible values: + +- ``architecture_only``: only the mandatory architectural information is completed on externals +- ``default_variants``: external specs are also completed with missing variants, using their default values + + Reuse Already Installed Packages -------------------------------- -The ``reuse`` attribute controls how aggressively Spack reuses binary packages during concretization. The -attribute can be either a single value or an object for more complex configurations. +The ``reuse`` attribute controls how aggressively Spack reuses binary packages during concretization. +The attribute can be either a single value or an object for more complex configurations. In the former case ("single value"), it allows Spack to: @@ -32,13 +49,12 @@ In the former case ("single value"), it allows Spack to: 2. Reuse installed packages and build caches only for the dependencies of the root specs, when ``dependencies``. 3. Disregard reusing installed packages and build caches, when ``false``. -In case finer control over which specs are reused is needed, then the value of this attribute can be -an object with the following keys: +In case finer control over which specs are reused is needed, the value of this attribute can be an object with the following keys: 1. ``roots``: if ``true`` root specs are reused, if ``false`` only dependencies of root specs are reused 2. ``from``: list of sources from which reused specs are taken -Each source in ``from`` is itself an object: +Each source in ``from`` is itself an object with the following attributes: .. list-table:: Attributes for a source or reusable specs :header-rows: 1 @@ -65,11 +81,10 @@ For instance, the following configuration: - "%gcc" - "%clang" -tells the concretizer to reuse all specs compiled with either ``gcc`` or ``clang`` that are installed -in the local store. Any spec from remote build caches is disregarded. +tells the concretizer to reuse all specs compiled with either ``gcc`` or ``clang`` that are installed in the local store. +Any spec from remote build caches is disregarded. -To reduce the boilerplate in configuration files, default values for the ``include`` and -``exclude`` options can be pushed up one level: +To reduce the boilerplate in configuration files, default values for the ``include`` and ``exclude`` options can be pushed up one level: .. code-block:: yaml @@ -85,12 +100,11 @@ To reduce the boilerplate in configuration files, default values for the ``inclu include: - "foo %oneapi" -In the example above, we reuse all specs compiled with ``gcc`` from the local store -and remote build caches, and we also reuse ``foo %oneapi``. Note that the last source of -specs overrides the default ``include`` attribute. +In the example above, we reuse all specs compiled with ``gcc`` from the local store and remote build caches, and we also reuse ``foo %oneapi``. +Note that the last source of specs overrides the default ``include`` attribute. -For one-off concretizations, there are command-line arguments for each of the simple "single value" -configurations. This means a user can: +For one-off concretizations, there are command-line arguments for each of the simple "single value" configurations. +This means a user can: .. code-block:: console @@ -100,7 +114,7 @@ to enable reuse for a single installation, or: .. code-block:: console - spack install --fresh + $ spack install --fresh to do a fresh install if ``reuse`` is enabled by default. @@ -108,13 +122,11 @@ to do a fresh install if ``reuse`` is enabled by default. FAQ: :ref:`Why does Spack pick particular versions and variants? ` ------------------------------------------- Selection of Target Microarchitectures ------------------------------------------ The options under the ``targets`` attribute control which targets are considered during a solve. -Currently, the options in this section are only configurable from the ``concretizer.yaml`` file, -and there are no corresponding command-line arguments to enable them for a single solve. +Currently, the options in this section are only configurable from the ``concretizer.yaml`` file, and there are no corresponding command-line arguments to enable them for a single solve. The ``granularity`` option can take two possible values: ``microarchitectures`` and ``generic``. If set to: @@ -125,8 +137,8 @@ If set to: targets: granularity: microarchitectures -Spack will consider all the microarchitectures known to ``archspec`` to label nodes for -compatibility. If instead the option is set to: +Spack will consider all the microarchitectures known to ``archspec`` to label nodes for compatibility. +If instead the option is set to: .. code-block:: yaml @@ -134,78 +146,59 @@ compatibility. If instead the option is set to: targets: granularity: generic -Spack will consider only generic microarchitectures. For instance, when running on a -Haswell node, Spack will consider ``haswell`` as the best target in the former case and -``x86_64_v3`` as the best target in the latter case. +Spack will consider only generic microarchitectures. +For instance, when running on a Haswell machine, Spack will consider ``haswell`` as the best target in the former case and ``x86_64_v3`` as the best target in the latter case. -The ``host_compatible`` option is a Boolean option that determines whether or not the -microarchitectures considered during the solve are constrained to be compatible with the -host Spack is currently running on. For instance, if this option is set to ``true``, a -user cannot concretize for ``target=icelake`` while running on a Haswell node. +The ``host_compatible`` option is a Boolean option that determines whether or not the microarchitectures considered during the solve are constrained to be compatible with the host Spack is currently running on. +For instance, if this option is set to ``true``, a user cannot concretize for ``target=icelake`` while running on a Haswell machine. ---------------- Duplicate Nodes --------------- -The ``duplicates`` attribute controls whether the DAG can contain multiple configurations of -the same package. This is mainly relevant for build dependencies, which may have their version -pinned by some nodes and thus be required at different versions by different nodes in the same -DAG. +The ``duplicates`` attribute controls whether the DAG can contain multiple configurations of the same package. +This is mainly relevant for build dependencies, which may have their version pinned by some nodes and thus be required at different versions by different nodes in the same DAG. -The ``strategy`` option controls how the solver deals with duplicates. If the value is ``none``, -then a single configuration per package is allowed in the DAG. This means, for instance, that only -a single ``cmake`` or a single ``py-setuptools`` version is allowed. The result would be a slightly -faster concretization at the expense of making a few specs unsolvable. +The ``strategy`` option controls how the solver deals with duplicates. +If the value is ``none``, then a single configuration per package is allowed in the DAG. +This means, for instance, that only a single ``cmake`` or a single ``py-setuptools`` version is allowed. +The result would be a slightly faster concretization at the expense of making a few specs unsolvable. If the value is ``minimal``, Spack will allow packages tagged as ``build-tools`` to have duplicates. -This allows, for instance, to concretize specs whose nodes require different and incompatible ranges -of some build tool. For instance, in the figure below, the latest `py-shapely` requires a newer `py-setuptools`, -while `py-numpy` still needs an older version: +This allows, for instance, to concretize specs whose nodes require different and incompatible ranges of some build tool. +For instance, in the figure below, the latest `py-shapely` requires a newer `py-setuptools`, while `py-numpy` still needs an older version: -.. figure:: images/shapely_duplicates.svg - :width: 100% - :align: center +.. figure:: images/shapely_duplicates.svg + :width: 5580 + :height: 1842 -Up to Spack v0.20, ``duplicates:strategy:none`` was the default (and only) behavior. From Spack v0.21, the -default behavior is ``duplicates:strategy:minimal``. +Up to Spack v0.20, ``duplicates:strategy:none`` was the default (and only) behavior. +From Spack v0.21, the default behavior is ``duplicates:strategy:minimal``. --------- Splicing -------- The ``splice`` key covers configuration attributes for splicing specs in the solver. -"Splicing" is a method for replacing a dependency with another spec -that provides the same package or virtual. There are two types of -splices, referring to different behaviors for shared dependencies -between the root spec and the new spec replacing a dependency: -"transitive" and "intransitive". A "transitive" splice is one that -resolves all conflicts by taking the dependency from the new node. An -"intransitive" splice is one that resolves all conflicts by taking the -dependency from the original root. From a theory perspective, hybrid -splices are possible but are not modeled by Spack. +"Splicing" is a method for replacing a dependency with another spec that provides the same package or virtual. +There are two types of splices, referring to different behaviors for shared dependencies between the root spec and the new spec replacing a dependency: "transitive" and "intransitive". +A "transitive" splice is one that resolves all conflicts by taking the dependency from the new node. +An "intransitive" splice is one that resolves all conflicts by taking the dependency from the original root. +From a theory perspective, hybrid splices are possible but are not modeled by Spack. -All spliced specs retain a ``build_spec`` attribute that points to the -original spec before any splice occurred. The ``build_spec`` for a -non-spliced spec is itself. +All spliced specs retain a ``build_spec`` attribute that points to the original spec before any splice occurred. +The ``build_spec`` for a non-spliced spec is itself. The figure below shows examples of transitive and intransitive splices: .. figure:: images/splices.png - :align: center - -The concretizer can be configured to explicitly splice particular -replacements for a target spec. Splicing will allow the user to make -use of generically built public binary caches while swapping in -highly optimized local builds for performance-critical components -and/or components that interact closely with the specific hardware -details of the system. The most prominent candidate for splicing is -MPI providers. MPI packages have relatively well-understood ABI -characteristics, and most High Performance Computing facilities deploy -highly optimized MPI packages tailored to their particular -hardware. The following configuration block configures Spack to replace -whatever MPI provider each spec was concretized to use with the -particular package of ``mpich`` with the hash that begins ``abcdef``. + :width: 2308 + :height: 1248 + +The concretizer can be configured to explicitly splice particular replacements for a target spec. +Splicing will allow the user to make use of generically built public binary caches while swapping in highly optimized local builds for performance-critical components and/or components that interact closely with the specific hardware details of the system. +The most prominent candidate for splicing is MPI providers. +MPI packages have relatively well-understood ABI characteristics, and most High Performance Computing facilities deploy highly optimized MPI packages tailored to their particular hardware. +The following configuration block configures Spack to replace whatever MPI provider each spec was concretized to use with the particular package of ``mpich`` with the hash that begins ``abcdef``. .. code-block:: yaml @@ -218,56 +211,39 @@ particular package of ``mpich`` with the hash that begins ``abcdef``. .. warning:: - When configuring an explicit splice, you as the user take on the - responsibility for ensuring ABI compatibility between the specs - matched by the target and the replacement you provide. If they are - not compatible, Spack will not warn you, and your application will - fail to run. + When configuring an explicit splice, you as the user take on the responsibility for ensuring ABI compatibility between the specs matched by the target and the replacement you provide. + If they are not compatible, Spack will not warn you, and your application will fail to run. -The ``target`` field of an explicit splice can be any abstract -spec. The ``replacement`` field must be a spec that includes the hash -of a concrete spec, and the replacement must either be the same -package as the target, provide the virtual that is the target, or -provide a virtual that the target provides. The ``transitive`` field -is optional -- by default, splices will be transitive. +The ``target`` field of an explicit splice can be any abstract spec. +The ``replacement`` field must be a spec that includes the hash of a concrete spec, and the replacement must either be the same package as the target, provide the virtual that is the target, or provide a virtual that the target provides. +The ``transitive`` field is optional -- by default, splices will be transitive. .. note:: - With explicit splices configured, it is possible for Spack to - concretize to a spec that does not satisfy the input. For example, - with the configuration above, ``hdf5 ^mvapich2`` will concretize to use - ``mpich/abcdef`` instead of ``mvapich2`` as the MPI provider. Spack - will warn the user in this case, but will not fail the - concretization. + With explicit splices configured, it is possible for Spack to concretize to a spec that does not satisfy the input. + For example, with the configuration above, ``hdf5 ^mvapich2`` will concretize to use ``mpich/abcdef`` instead of ``mvapich2`` as the MPI provider. + Spack will warn the user in this case, but will not fail the concretization. .. _automatic_splicing: -^^^^^^^^^^^^^^^^^^ Automatic Splicing ^^^^^^^^^^^^^^^^^^ -The Spack solver can be configured to do automatic splicing for -ABI-compatible packages. Automatic splices are enabled in the concretizer -configuration section: +The Spack solver can be configured to do automatic splicing for ABI-compatible packages. +Automatic splices are enabled in the concretizer configuration section: .. code-block:: yaml concretizer: splice: - automatic: True + automatic: true -Packages can include ABI-compatibility information using the -``can_splice`` directive. See :ref:`the packaging -guide ` for instructions on specifying ABI -compatibility using the ``can_splice`` directive. +Packages can include ABI-compatibility information using the ``can_splice`` directive. +See :ref:`the packaging guide ` for instructions on specifying ABI compatibility using the ``can_splice`` directive. .. note:: - The ``can_splice`` directive is experimental and may be changed in - future versions. + The ``can_splice`` directive is experimental and may be changed in future versions. -When automatic splicing is enabled, the concretizer will combine any -number of ABI-compatible specs if possible to reuse installed packages -and packages available from binary caches. The end result of these -specs is equivalent to a series of transitive/intransitive splices, -but the series may be non-obvious. +When automatic splicing is enabled, the concretizer will combine any number of ABI-compatible specs if possible to reuse installed packages and packages available from binary caches. +The end result of these specs is equivalent to a series of transitive/intransitive splices, but the series may be non-obvious. diff --git a/lib/spack/docs/build_systems.rst b/lib/spack/docs/build_systems.rst index 3fd479ee9d44e8..033b1c702609f1 100644 --- a/lib/spack/docs/build_systems.rst +++ b/lib/spack/docs/build_systems.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,14 +9,11 @@ .. _build-systems: -============= Build Systems ============= -Spack defines a number of classes that understand how to use common -`build systems `_ -(Makefiles, CMake, etc.). Spack package definitions can inherit these -classes in order to streamline their builds. +Spack defines a number of classes that understand how to use common `build systems `_ (Makefiles, CMake, etc.). +Spack package definitions can inherit these classes in order to streamline their builds. This guide provides information specific to each particular build system. It assumes that you've read the Packaging Guide :doc:`part 1 ` and :doc:`part 2 ` and expands on these ideas for each distinct build system that Spack supports: @@ -68,20 +66,17 @@ It assumes that you've read the Packaging Guide :doc:`part 1 ` -provide a list of build systems and methods/attributes that can be -overridden. If you are curious about the implementation of a particular -build system, you can view the source code by running: +For reference, the :py:mod:`Build System API docs ` provide a list of build systems and methods/attributes that can be overridden. +If you are curious about the implementation of a particular build system, you can view the source code by running: .. code-block:: console $ spack edit --build-system autotools -This will open up the ``AutotoolsPackage`` definition in your favorite -editor. In addition, if you are working with a less common build system -like QMake, SCons, or Waf, it may be useful to see examples of other -packages. You can quickly find examples by running: +This will open up the ``AutotoolsPackage`` definition in your favorite editor. +In addition, if you are working with a less common build system like QMake, SCons, or Waf, it may be useful to see examples of other packages. +You can quickly find examples by running: .. code-block:: console @@ -91,8 +86,5 @@ packages. You can quickly find examples by running: You can then view these packages with ``spack edit``. -This guide is intended to supplement the -:py:mod:`Build System API docs ` with examples of -how to override commonly used methods. It also provides rules of thumb -and suggestions for package developers who are unfamiliar with a -particular build system. +This guide is intended to supplement the :py:mod:`Build System API docs ` with examples of how to override commonly used methods. +It also provides rules of thumb and suggestions for package developers who are unfamiliar with a particular build system. diff --git a/lib/spack/docs/build_systems/autotoolspackage.rst b/lib/spack/docs/build_systems/autotoolspackage.rst index 4033ac5acea5a5..d5f340e6eac3c6 100644 --- a/lib/spack/docs/build_systems/autotoolspackage.rst +++ b/lib/spack/docs/build_systems/autotoolspackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,15 +9,12 @@ .. _autotoolspackage: ---------- Autotools --------- Autotools is a GNU build system that provides a build-script generator. -By running the platform-independent ``./configure`` script that comes -with the package, you can generate a platform-dependent Makefile. +By running the platform-independent ``./configure`` script that comes with the package, you can generate a platform-dependent Makefile. -^^^^^^ Phases ^^^^^^ @@ -27,9 +25,7 @@ The ``AutotoolsBuilder`` and ``AutotoolsPackage`` base classes come with the fol #. ``build`` - build the package #. ``install`` - install the package -Most of the time, the ``autoreconf`` phase will do nothing, but if the -package is missing a ``configure`` script, ``autoreconf`` will generate -one for you. +Most of the time, the ``autoreconf`` phase will do nothing, but if the package is missing a ``configure`` script, ``autoreconf`` will generate one for you. The other phases run: @@ -42,23 +38,19 @@ The other phases run: $ make installcheck # optional -Of course, you may need to add a few arguments to the ``./configure`` -line. +Of course, you may need to add a few arguments to the ``./configure`` line. -^^^^^^^^^^^^^^^ Important files ^^^^^^^^^^^^^^^ -The most important file for an Autotools-based package is the ``configure`` -script. This script is automatically generated by Autotools and generates -the appropriate Makefile when run. +The most important file for an Autotools-based package is the ``configure`` script. +This script is automatically generated by Autotools and generates the appropriate Makefile when run. .. warning:: Watch out for fake Autotools packages! - Autotools is a very popular build system, and many people are used to the - classic steps to install a package: + Autotools is a very popular build system, and many people are used to the classic steps to install a package: .. code-block:: console @@ -67,13 +59,10 @@ the appropriate Makefile when run. $ make install - For this reason, some developers will write their own ``configure`` - scripts that have nothing to do with Autotools. These packages may - not accept the same flags as other Autotools packages, so it is - better to use the ``Package`` base class and create a - :ref:`custom build system `. You can tell if a package - uses Autotools by running ``./configure --help`` and comparing the output - to other known Autotools packages. You should also look for files like: + For this reason, some developers will write their own ``configure`` scripts that have nothing to do with Autotools. + These packages may not accept the same flags as other Autotools packages, so it is better to use the ``Package`` base class and create a :ref:`custom build system `. + You can tell if a package uses Autotools by running ``./configure --help`` and comparing the output to other known Autotools packages. + You should also look for files like: * ``configure.ac`` * ``configure.in`` @@ -81,26 +70,19 @@ the appropriate Makefile when run. Packages that don't use Autotools aren't likely to have these files. -^^^^^^^^^^^^^^^^^^^^^^^^^ Build system dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^ -Whether or not your package requires Autotools to install depends on -how the source code is distributed. Most of the time, when developers -distribute tarballs, they will already contain the ``configure`` script -necessary for installation. If this is the case, your package does not -require any Autotools dependencies. - -However, a basic rule of version control systems is to never commit -code that can be generated. The source code repository itself likely -does not have a ``configure`` script. Developers typically write -(or auto-generate) a ``configure.ac`` script that contains configuration -preferences and a ``Makefile.am`` script that contains build instructions. -Then, ``autoconf`` is used to convert ``configure.ac`` into ``configure``, -while ``automake`` is used to convert ``Makefile.am`` into ``Makefile.in``. -``Makefile.in`` is used by ``configure`` to generate a platform-dependent -``Makefile`` for you. The following diagram provides a high-level overview -of the process: +Whether or not your package requires Autotools to install depends on how the source code is distributed. +Most of the time, when developers distribute tarballs, they will already contain the ``configure`` script necessary for installation. +If this is the case, your package does not require any Autotools dependencies. + +However, a basic rule of version control systems is to never commit code that can be generated. +The source code repository itself likely does not have a ``configure`` script. +Developers typically write (or auto-generate) a ``configure.ac`` script that contains configuration preferences and a ``Makefile.am`` script that contains build instructions. +Then, ``autoconf`` is used to convert ``configure.ac`` into ``configure``, while ``automake`` is used to convert ``Makefile.am`` into ``Makefile.in``. +``Makefile.in`` is used by ``configure`` to generate a platform-dependent ``Makefile`` for you. +The following diagram provides a high-level overview of the process: .. figure:: Autoconf-automake-process.* :target: https://commons.wikimedia.org/w/index.php?curid=15581407 @@ -108,86 +90,68 @@ of the process: `GNU autoconf and automake process for generating makefiles `_ by `Jdthood` under `CC BY-SA 3.0 `_ -If a ``configure`` script is not present in your tarball, you will -need to generate one yourself. Luckily, Spack already has an ``autoreconf`` -phase to do most of the work for you. By default, the ``autoreconf`` -phase runs: +If a ``configure`` script is not present in your tarball, you will need to generate one yourself. +Luckily, Spack already has an ``autoreconf`` phase to do most of the work for you. +By default, the ``autoreconf`` phase runs: .. code-block:: console $ autoreconf --install --verbose --force -I /share/aclocal -In case you need to add more arguments, override ``autoreconf_extra_args`` -in your ``package.py`` on class scope like this: +In case you need to add more arguments, override ``autoreconf_extra_args`` in your ``package.py`` on class scope like this: .. code-block:: python autoreconf_extra_args = ["-Im4"] All you need to do is add a few Autotools dependencies to the package. -Most stable releases will come with a ``configure`` script, but if you -check out a commit from the ``master`` branch, you would want to add: +Most stable releases will come with a ``configure`` script, but if you check out a commit from the ``master`` branch, you would want to add: .. code-block:: python depends_on("autoconf", type="build", when="@master") depends_on("automake", type="build", when="@master") - depends_on("libtool", type="build", when="@master") + depends_on("libtool", type="build", when="@master") -It is typically redundant to list the ``m4`` macro processor package as a -dependency, since ``autoconf`` already depends on it. +It is typically redundant to list the ``m4`` macro processor package as a dependency, since ``autoconf`` already depends on it. -""""""""""""""""""""""""""""""" Using a custom autoreconf phase """"""""""""""""""""""""""""""" -In some cases, it might be needed to replace the default implementation -of the autoreconf phase with one running a script interpreter. In this -example, the ``bash`` shell is used to run the ``autogen.sh`` script. +In some cases, it might be needed to replace the default implementation of the autoreconf phase with one running a script interpreter. +In this example, the ``bash`` shell is used to run the ``autogen.sh`` script. .. code-block:: python def autoreconf(self, spec, prefix): which("bash")("autogen.sh") -If the ``package.py`` has build instructions in a separate -:ref:`builder class `, the signature for a phase changes slightly: +If the ``package.py`` has build instructions in a separate :ref:`builder class `, the signature for a phase changes slightly: .. code-block:: python class AutotoolsBuilder(AutotoolsBuilder): - def autoreconf(self, pkg, spec, prefix): - which("bash")("autogen.sh") + def autoreconf(self, pkg, spec, prefix): + which("bash")("autogen.sh") -""""""""""""""""""""""""""""""""""""""" patching configure or Makefile.in files """"""""""""""""""""""""""""""""""""""" -In some cases, developers might need to distribute a patch that modifies -one of the files used to generate ``configure`` or ``Makefile.in``. -In this case, these scripts will need to be regenerated. It is -preferable to regenerate these manually using the patch, and then -create a new patch that directly modifies ``configure``. That way, -Spack can use the secondary patch and additional build system -dependencies aren't necessary. +In some cases, developers might need to distribute a patch that modifies one of the files used to generate ``configure`` or ``Makefile.in``. +In this case, these scripts will need to be regenerated. +It is preferable to regenerate these manually using the patch, and then create a new patch that directly modifies ``configure``. +That way, Spack can use the secondary patch and additional build system dependencies aren't necessary. -"""""""""""""""""""""""""""" Old Autotools helper scripts """""""""""""""""""""""""""" -Autotools based tarballs come with helper scripts such as ``config.sub`` and -``config.guess``. It is the responsibility of the developers to keep these files -up to date so that they run on every platform, but for very old software -releases this is impossible. In these cases Spack can help to replace these -files with newer ones, without having to add the heavy dependency on -``automake``. +Autotools based tarballs come with helper scripts such as ``config.sub`` and ``config.guess``. +It is the responsibility of the developers to keep these files up to date so that they run on every platform, but for very old software releases this is impossible. +In these cases Spack can help to replace these files with newer ones, without having to add the heavy dependency on ``automake``. -Automatic helper script replacement is currently enabled by default on -``ppc64le`` and ``aarch64``, as these are the known cases where old scripts fail. -On these targets, ``AutotoolsPackage`` adds a build dependency on ``gnuconfig``, -which is a very lightweight package with newer versions of the helper files. -Spack then tries to run all the helper scripts it can find in the release, and -replaces them on failure with the helper scripts from ``gnuconfig``. +Automatic helper script replacement is currently enabled by default on ``ppc64le`` and ``aarch64``, as these are the known cases where old scripts fail. +On these targets, ``AutotoolsPackage`` adds a build dependency on ``gnuconfig``, which is a very lightweight package with newer versions of the helper files. +Spack then tries to run all the helper scripts it can find in the release, and replaces them on failure with the helper scripts from ``gnuconfig``. To opt out of this feature, use the following setting: @@ -195,23 +159,21 @@ To opt out of this feature, use the following setting: patch_config_files = False -To enable it conditionally on different architectures, define a property and -make the package depend on ``gnuconfig`` as a build dependency: +To enable it conditionally on different architectures, define a property and make the package depend on ``gnuconfig`` as a build dependency: .. code-block:: python depends_on("gnuconfig", when="@1.0:") + @property def patch_config_files(self): - return self.spec.satisfies("@1.0:") + return self.spec.satisfies("@1.0:") .. note:: - On some exotic architectures it is necessary to use system provided - ``config.sub`` and ``config.guess`` files. In this case, the most - transparent solution is to mark the ``gnuconfig`` package as external and - non-buildable, with a prefix set to the directory containing the files: + On some exotic architectures it is necessary to use system provided ``config.sub`` and ``config.guess`` files. + In this case, the most transparent solution is to mark the ``gnuconfig`` package as external and non-buildable, with a prefix set to the directory containing the files: .. code-block:: yaml @@ -222,21 +184,17 @@ make the package depend on ``gnuconfig`` as a build dependency: prefix: /usr/share/configure_files/ -"""""""""""""""" force_autoreconf """""""""""""""" -If for whatever reason you really want to add the original patch -and tell Spack to regenerate ``configure``, you can do so using the -following setting: +If for whatever reason you really want to add the original patch and tell Spack to regenerate ``configure``, you can do so using the following setting: .. code-block:: python force_autoreconf = True -This line tells Spack to wipe away the existing ``configure`` script -and generate a new one. If you only need to do this for a single -version, this can be done like so: +This line tells Spack to wipe away the existing ``configure`` script and generate a new one. +If you only need to do this for a single version, this can be done like so: .. code-block:: python @@ -244,20 +202,17 @@ version, this can be done like so: def force_autoreconf(self): return self.version == Version("1.2.3") -^^^^^^^^^^^^^^^^^^^^^^^ Finding configure flags ^^^^^^^^^^^^^^^^^^^^^^^ -Once you have a ``configure`` script present, the next step is to -determine what option flags are available. These flags can be found -by running: +Once you have a ``configure`` script present, the next step is to determine what option flags are available. +These flags can be found by running: .. code-block:: console $ ./configure --help -``configure`` will display a list of valid flags separated into -some or all of the following sections: +``configure`` will display a list of valid flags separated into some or all of the following sections: * Configuration * Installation directories @@ -270,20 +225,15 @@ some or all of the following sections: * **Some influential environment variables** For the most part, you can ignore all but the last 3 sections. -The "Optional Features" section lists flags that enable/disable -features you may be interested in. The "Optional Packages" section -often lists dependencies and the flags needed to locate them. The -"environment variables" section lists environment variables that the -build system uses to pass flags to the compiler and linker. +The "Optional Features" section lists flags that enable/disable features you may be interested in. +The "Optional Packages" section often lists dependencies and the flags needed to locate them. +The "environment variables" section lists environment variables that the build system uses to pass flags to the compiler and linker. -^^^^^^^^^^^^^^^^^^^^^^^^^ Adding flags to configure ^^^^^^^^^^^^^^^^^^^^^^^^^ -For most of the flags you encounter, you will want a variant to -optionally enable/disable them. You can then optionally pass these -flags to the ``configure`` call by overriding the ``configure_args`` -function like so: +For most of the flags you encounter, you will want a variant to optionally enable/disable them. +You can then optionally pass these flags to the ``configure`` call by overriding the ``configure_args`` function like so: .. code-block:: python @@ -309,52 +259,44 @@ Alternatively, you can use the :ref:`enable_or_disable `__ -and `here `__ -for a rationale as to why these so-called "automagic" dependencies -are a problem. +Note that we are explicitly disabling MPI support if it is not requested. +This is important, as many Autotools packages will enable options by default if the dependencies are found, and disable them otherwise. +We want Spack installations to be as deterministic as possible. +If two users install a package with the same variants, the goal is that both installations work the same way. +See `here `__ and `here `__ for a rationale as to why these so-called "automagic" dependencies are a problem. .. note:: - By default, Autotools installs packages to ``/usr``. We don't want this, - so Spack automatically adds ``--prefix=/path/to/installation/prefix`` - to your list of ``configure_args``. You don't need to add this yourself. + By default, Autotools installs packages to ``/usr``. + We don't want this, so Spack automatically adds ``--prefix=/path/to/installation/prefix`` to your list of ``configure_args``. + You don't need to add this yourself. .. _autotools_helper_functions: -^^^^^^^^^^^^^^^^ Helper functions ^^^^^^^^^^^^^^^^ -You may have noticed that most of the Autotools flags are of the form -``--enable-foo``, ``--disable-bar``, ``--with-baz=``, or -``--without-baz``. Since these flags are so common, Spack provides a -couple of helper functions to make your life easier. +You may have noticed that most of the Autotools flags are of the form ``--enable-foo``, ``--disable-bar``, ``--with-baz=``, or ``--without-baz``. +Since these flags are so common, Spack provides a couple of helper functions to make your life easier. .. _autotools_enable_or_disable: -""""""""""""""""" -enable_or_disable -""""""""""""""""" +``enable_or_disable`` +""""""""""""""""""""" -Autotools flags for simple boolean variants can be automatically -generated by calling the ``enable_or_disable`` method. This is -typically used to enable or disable some feature within the package. +Autotools flags for simple boolean variants can be automatically generated by calling the ``enable_or_disable`` method. +This is typically used to enable or disable some feature within the package. .. code-block:: python variant( "memchecker", default=False, - description="Memchecker support for debugging [degrades performance]" + description="Memchecker support for debugging [degrades performance]", ) ... + def configure_args(self): args = [] ... @@ -362,20 +304,16 @@ typically used to enable or disable some feature within the package. return args -In this example, specifying the variant ``+memchecker`` will generate -the following configuration options: +In this example, specifying the variant ``+memchecker`` will generate the following configuration options: .. code-block:: console --enable-memchecker -""""""""""""""" -with_or_without -""""""""""""""" +``with_or_without`` +""""""""""""""""""" -Autotools flags for more complex variants, including boolean variants -and multi-valued variants, can be automatically generated by calling -the ``with_or_without`` method. +Autotools flags for more complex variants, including boolean variants and multi-valued variants, can be automatically generated by calling the ``with_or_without`` method. .. code-block:: python @@ -390,59 +328,43 @@ the ``with_or_without`` method. if not spec.satisfies("schedulers=auto"): config_args.extend(self.with_or_without("schedulers")) -In this example, specifying the variant ``schedulers=slurm,sge`` will -generate the following configuration options: +In this example, specifying the variant ``schedulers=slurm,sge`` will generate the following configuration options: .. code-block:: console --with-slurm --with-sge -``enable_or_disable`` is actually functionally equivalent to -``with_or_without``, and accepts the same arguments and variant types; -but idiomatic Autotools packages often follow these naming -conventions. +``enable_or_disable`` is actually functionally equivalent to ``with_or_without``, and accepts the same arguments and variant types; but idiomatic Autotools packages often follow these naming conventions. -"""""""""""""""" -activation_value -"""""""""""""""" +``activation_value`` +"""""""""""""""""""" -Autotools parameters that require an option can still be automatically -generated, using the ``activation_value`` argument to -``with_or_without`` (or, rarely, ``enable_or_disable``). +Autotools parameters that require an option can still be automatically generated, using the ``activation_value`` argument to ``with_or_without`` (or, rarely, ``enable_or_disable``). .. code-block:: python variant( - "fabrics", + "fabrics", values=disjoint_sets( ("auto",), ("psm", "psm2", "verbs", "mxm", "ucx", "libfabric") ).with_non_feature_values("auto", "none"), - description="List of fabrics that are enabled; " - "'auto' lets openmpi determine", + description="List of fabrics that are enabled; 'auto' lets openmpi determine", ) if not spec.satisfies("fabrics=auto"): - config_args.extend(self.with_or_without("fabrics", - activation_value="prefix")) + config_args.extend(self.with_or_without("fabrics", activation_value="prefix")) -``activation_value`` accepts a callable that generates the configure -parameter value given the variant value; but the special value -``prefix`` tells Spack to automatically use the dependency's -installation prefix, which is the most common use for such -parameters. In this example, specifying the variant -``fabrics=libfabric`` will generate the following configuration -options: +``activation_value`` accepts a callable that generates the configure parameter value given the variant value; but the special value ``prefix`` tells Spack to automatically use the dependency's installation prefix, which is the most common use for such parameters. +In this example, specifying the variant ``fabrics=libfabric`` will generate the following configuration options: .. code-block:: console --with-libfabric= -""""""""""""""""""""""" The ``variant`` keyword """"""""""""""""""""""" -When Spack variants and configure flags do not correspond one-to-one, the -``variant`` keyword can be passed to ``with_or_without`` and -``enable_or_disable``. For example: +When Spack variants and configure flags do not correspond one-to-one, the ``variant`` keyword can be passed to ``with_or_without`` and ``enable_or_disable``. +For example: .. code-block:: python @@ -458,12 +380,10 @@ Or when one variant controls multiple flags: config_args += self.with_or_without("profiler", variant="debug_tools") -"""""""""""""""""""" Conditional variants """""""""""""""""""" -When a variant is conditional and its condition is not met on the concrete spec, the -``with_or_without`` and ``enable_or_disable`` methods will simply return an empty list. +When a variant is conditional and its condition is not met on the concrete spec, the ``with_or_without`` and ``enable_or_disable`` methods will simply return an empty list. For example: @@ -472,17 +392,13 @@ For example: variant("profiler", when="@2.0:") config_args += self.with_or_without("profiler") -will neither add ``--with-profiler`` nor ``--without-profiler`` when the version is -below ``2.0``. +will neither add ``--with-profiler`` nor ``--without-profiler`` when the version is below ``2.0``. -"""""""""""""""""""" Activation overrides """""""""""""""""""" -Finally, the behavior of either ``with_or_without`` or -``enable_or_disable`` can be overridden for specific variant -values. This is most useful for multi-value variants where some of -the variant values require atypical behavior. +Finally, the behavior of either ``with_or_without`` or ``enable_or_disable`` can be overridden for specific variant values. +This is most useful for multi-value variants where some of the variant values require atypical behavior. .. code-block:: python @@ -494,46 +410,36 @@ the variant values require atypical behavior. return f"--without-{opt}" return f"--with-{opt}={self.spec['rdma-core'].prefix}" -Defining ``with_or_without_verbs`` overrides the behavior of a -``fabrics=verbs`` variant, changing the configure-time option to -``--with-openib`` for older versions of the package and specifying an -alternative dependency name: +Defining ``with_or_without_verbs`` overrides the behavior of a ``fabrics=verbs`` variant, changing the configure-time option to ``--with-openib`` for older versions of the package and specifying an alternative dependency name: .. code-block:: text --with-openib= -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Configure script in a sub-directory ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Occasionally, developers will hide their source code and ``configure`` -script in a subdirectory like ``src``. If this happens, Spack won't -be able to automatically detect the build system properly when running -``spack create``. You will have to manually change the package base -class and tell Spack where the ``configure`` script resides. You can -do this like so: +Occasionally, developers will hide their source code and ``configure`` script in a subdirectory like ``src``. +If this happens, Spack won't be able to automatically detect the build system properly when running ``spack create``. +You will have to manually change the package base class and tell Spack where the ``configure`` script resides. +You can do this like so: .. code-block:: python configure_directory = "src" -^^^^^^^^^^^^^^^^^^^^^^ Building out of source ^^^^^^^^^^^^^^^^^^^^^^ -Some packages like ``gcc`` recommend building their software in a -different directory than the source code to prevent build pollution. +Some packages like ``gcc`` recommend building their software in a different directory than the source code to prevent build pollution. This can be done using the ``build_directory`` variable: .. code-block:: python build_directory = "spack-build" -By default, Spack will build the package in the same directory that -contains the ``configure`` script. +By default, Spack will build the package in the same directory that contains the ``configure`` script. -^^^^^^^^^^^^^^^^^^^^^^^^^ Build and install targets ^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -545,29 +451,22 @@ For most Autotools packages, the usual: $ make $ make install -is sufficient to install the package. However, if you need to run -make with any other targets, for example, to build an optional -library or build the documentation, you can add these like so: +is sufficient to install the package. +However, if you need to run make with any other targets, for example, to build an optional library or build the documentation, you can add these like so: .. code-block:: python build_targets = ["all", "docs"] install_targets = ["install", "docs"] -^^^^^^^ Testing ^^^^^^^ -Autotools-based packages typically provide unit testing via the -``check`` and ``installcheck`` targets. If you build your software -with ``spack install --test=root``, Spack will check for the presence -of a ``check`` or ``test`` target in the Makefile and run -``make check`` for you. After installation, it will check for an -``installcheck`` target and run ``make installcheck`` if it finds one. +Autotools-based packages typically provide unit testing via the ``check`` and ``installcheck`` targets. +If you build your software with ``spack install --test=root``, Spack will check for the presence of a ``check`` or ``test`` target in the Makefile and run ``make check`` for you. +After installation, it will check for an ``installcheck`` target and run ``make installcheck`` if it finds one. -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ -For more information on the Autotools build system, see: -https://www.gnu.org/software/automake/manual/html_node/Autotools-Introduction.html +For more information on the Autotools build system, see: https://www.gnu.org/software/automake/manual/html_node/Autotools-Introduction.html diff --git a/lib/spack/docs/build_systems/bundlepackage.rst b/lib/spack/docs/build_systems/bundlepackage.rst index 015aba55bffee7..6db4cf0b6f124d 100644 --- a/lib/spack/docs/build_systems/bundlepackage.rst +++ b/lib/spack/docs/build_systems/bundlepackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,22 +9,18 @@ .. _bundlepackage: ------- Bundle ------ -``BundlePackage`` represents a set of packages that are expected to work -well together, such as a collection of commonly used software libraries. +``BundlePackage`` represents a set of packages that are expected to work well together, such as a collection of commonly used software libraries. The associated software is specified as dependencies. -If it makes sense, variants, conflicts, and requirements can be added to -the package. :ref:`Variants ` ensure that common build options -are consistent across the packages supporting them. +If it makes sense, variants, conflicts, and requirements can be added to the package. +:ref:`Variants ` ensure that common build options are consistent across the packages supporting them. :ref:`Conflicts ` prevent attempts to build with known bugs and limitations. :ref:`Requirements ` prevent attempts to build without critical options. -For example, if ``MyBundlePackage`` is known to only build on ``linux``, -it could use the ``require`` directive as follows: +For example, if ``MyBundlePackage`` is known to only build on ``linux``, it could use the ``require`` directive as follows: .. code-block:: python @@ -36,17 +33,14 @@ Spack has a number of built-in bundle packages, such as: * `Libc `_ * `Xsdk `_ -where ``Xsdk`` also inherits from ``CudaPackage`` and ``RocmPackage`` and -``Libc`` is a virtual bundle package for the C standard library. +where ``Xsdk`` also inherits from ``CudaPackage`` and ``RocmPackage`` and ``Libc`` is a virtual bundle package for the C standard library. -^^^^^^^^ Creation ^^^^^^^^ -Be sure to specify the ``bundle`` template if you are using ``spack create`` -to generate a package from the template. For example, use the following -command to create a bundle package whose class name will be ``Mybundle``: +Be sure to specify the ``bundle`` template if you are using ``spack create`` to generate a package from the template. +For example, use the following command to create a bundle package whose class name will be ``Mybundle``: .. code-block:: console @@ -54,25 +48,19 @@ command to create a bundle package whose class name will be ``Mybundle``: -^^^^^^ Phases ^^^^^^ -The ``BundlePackage`` base class does not provide any phases by default -since the bundle does not represent a build system. +The ``BundlePackage`` base class does not provide any phases by default since the bundle does not represent a build system. -^^^^^^ URL ^^^^^^ -The ``url`` property does not have meaning since there is no package-specific -code to fetch. +The ``url`` property does not have meaning since there is no package-specific code to fetch. -^^^^^^^ Version ^^^^^^^ -At least one ``version`` must be specified in order for the package to -build. +At least one ``version`` must be specified in order for the package to build. diff --git a/lib/spack/docs/build_systems/cachedcmakepackage.rst b/lib/spack/docs/build_systems/cachedcmakepackage.rst index 4eb5bc0db8c7c8..c7a8f73b03e227 100644 --- a/lib/spack/docs/build_systems/cachedcmakepackage.rst +++ b/lib/spack/docs/build_systems/cachedcmakepackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,20 +9,15 @@ .. _cachedcmakepackage: ------------ CachedCMake ----------- -The CachedCMakePackage base class is used for CMake-based workflows -that create a CMake cache file prior to running ``cmake``. This is -useful for packages with arguments longer than the system limit, and -for reproducibility. +The CachedCMakePackage base class is used for CMake-based workflows that create a CMake cache file prior to running ``cmake``. +This is useful for packages with arguments longer than the system limit, and for reproducibility. -The documentation for this class assumes that the user is familiar with -the ``CMakePackage`` class from which it inherits. See the documentation -for :ref:`CMakePackage `. +The documentation for this class assumes that the user is familiar with the ``CMakePackage`` class from which it inherits. +See the documentation for :ref:`CMakePackage `. -^^^^^^ Phases ^^^^^^ @@ -46,45 +42,33 @@ By default, these phases run: $ make test # optional $ make install -The ``CachedCMakePackage`` class inherits from the ``CMakePackage`` -class, and accepts all of the same options and adds all of the same -flags to the ``cmake`` command. Similar to the ``CMakePackage`` class, -you may need to add a few arguments yourself, and the -``CachedCMakePackage`` provides the same interface to add those -flags. +The ``CachedCMakePackage`` class inherits from the ``CMakePackage`` class, and accepts all of the same options and adds all of the same flags to the ``cmake`` command. +Similar to the ``CMakePackage`` class, you may need to add a few arguments yourself, and the ``CachedCMakePackage`` provides the same interface to add those flags. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Adding entries to the CMake cache ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In addition to adding flags to the ``cmake`` command, you may need to -add entries to the CMake cache in the ``initconfig`` phase. This can -be done by overriding one of four methods: +In addition to adding flags to the ``cmake`` command, you may need to add entries to the CMake cache in the ``initconfig`` phase. +This can be done by overriding one of four methods: #. ``CachedCMakePackage.initconfig_compiler_entries`` #. ``CachedCMakePackage.initconfig_mpi_entries`` #. ``CachedCMakePackage.initconfig_hardware_entries`` #. ``CachedCMakePackage.initconfig_package_entries`` -Each of these methods returns a list of CMake cache strings. The -distinction between these methods is merely to provide a -well-structured and legible CMake cache file -- otherwise, entries -from each of these methods are handled identically. +Each of these methods returns a list of CMake cache strings. +The distinction between these methods is merely to provide a well-structured and legible CMake cache file -- otherwise, entries from each of these methods are handled identically. -Spack also provides convenience methods for generating CMake cache -entries. These methods are available at module scope in every Spack -package. Because CMake parses boolean options, strings, and paths -differently, there are three such methods: +Spack also provides convenience methods for generating CMake cache entries. +These methods are available at module scope in every Spack package. +Because CMake parses boolean options, strings, and paths differently, there are three such methods: #. ``cmake_cache_option`` #. ``cmake_cache_string`` #. ``cmake_cache_path`` -These methods each accept three parameters -- the name of the CMake -variable associated with the entry, the value of the entry, and an -optional comment -- and return strings in the appropriate format to be -returned from any of the ``initconfig*`` methods. Additionally, these -methods may return comments beginning with the ``#`` character. +These methods each accept three parameters -- the name of the CMake variable associated with the entry, the value of the entry, and an optional comment -- and return strings in the appropriate format to be returned from any of the ``initconfig*`` methods. +Additionally, these methods may return comments beginning with the ``#`` character. A typical usage of these methods may look something like this: @@ -103,6 +87,7 @@ A typical usage of these methods may look something like this: else: entries.append(cmake_cache_option("FOO_MPI", False, "disable mpi")) + def initconfig_package_entries(self): # Package specific options entries = [] @@ -118,9 +103,7 @@ A typical usage of these methods may look something like this: entries.append(cmake_cache_string("FOO_BLAS", "baz", "Use baz")) entries.append(cmake_cache_path("BAZ_PREFIX", self.spec["baz"].prefix)) -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ -For more information on CMake cache files, see: -https://cmake.org/cmake/help/latest/manual/cmake.1.html +For more information on CMake cache files, see: https://cmake.org/cmake/help/latest/manual/cmake.1.html diff --git a/lib/spack/docs/build_systems/cmakepackage.rst b/lib/spack/docs/build_systems/cmakepackage.rst index d41475828c49c7..575f6846475791 100644 --- a/lib/spack/docs/build_systems/cmakepackage.rst +++ b/lib/spack/docs/build_systems/cmakepackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,19 +9,15 @@ .. _cmakepackage: ------- CMake ------ -Like Autotools, CMake is a widely-used build-script generator. Designed -by Kitware, CMake is the most popular build system for new C, C++, and -Fortran projects, and many older projects are switching to it as well. +Like Autotools, CMake is a widely-used build-script generator. +Designed by Kitware, CMake is the most popular build system for new C, C++, and Fortran projects, and many older projects are switching to it as well. -Unlike Autotools, CMake can generate build scripts for builders other -than Make: Ninja, Visual Studio, etc. It is therefore cross-platform, -whereas Autotools is Unix-only. +Unlike Autotools, CMake can generate build scripts for builders other than Make: Ninja, Visual Studio, etc. +It is therefore cross-platform, whereas Autotools is Unix-only. -^^^^^^ Phases ^^^^^^ @@ -42,19 +39,15 @@ By default, these phases run: $ make install -A few more flags are passed to ``cmake`` by default, including flags -for setting the build type and flags for locating dependencies. Of -course, you may need to add a few arguments yourself. +A few more flags are passed to ``cmake`` by default, including flags for setting the build type and flags for locating dependencies. +Of course, you may need to add a few arguments yourself. -^^^^^^^^^^^^^^^ Important files ^^^^^^^^^^^^^^^ -A CMake-based package can be identified by the presence of a -``CMakeLists.txt`` file. This file defines the build flags that can be -passed to the CMake invocation, as well as linking instructions. If -you are familiar with CMake, it can prove very useful for determining -dependencies and dependency version requirements. +A CMake-based package can be identified by the presence of a ``CMakeLists.txt`` file. +This file defines the build flags that can be passed to the CMake invocation, as well as linking instructions. +If you are familiar with CMake, it can prove very useful for determining dependencies and dependency version requirements. One thing to look for is the ``cmake_minimum_required`` function: @@ -67,76 +60,60 @@ This means that CMake 2.8.12 is the earliest release that will work. You should specify this in a ``depends_on`` statement. CMake-based packages may also contain ``CMakeLists.txt`` in subdirectories. -This modularization helps to manage complex builds in a hierarchical -fashion. Sometimes these nested ``CMakeLists.txt`` require additional -dependencies not mentioned in the top-level file. +This modularization helps to manage complex builds in a hierarchical fashion. +Sometimes these nested ``CMakeLists.txt`` require additional dependencies not mentioned in the top-level file. -There's also usually a ``cmake`` or ``CMake`` directory containing -additional macros, find scripts, etc. These may prove useful in -determining dependency version requirements. +There's also usually a ``cmake`` or ``CMake`` directory containing additional macros, find scripts, etc. +These may prove useful in determining dependency version requirements. -^^^^^^^^^^^^^^^^^^^^^^^^^ Build system dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^ -Every package that uses the CMake build system requires a ``cmake`` -dependency. Since this is always the case, the ``CMakePackage`` base -class already contains: +Every package that uses the CMake build system requires a ``cmake`` dependency. +Since this is always the case, the ``CMakePackage`` base class already contains: .. code-block:: python depends_on("cmake", type="build") -If you need to specify a particular version requirement, you can -override this in your package: +If you need to specify a particular version requirement, you can override this in your package: .. code-block:: python depends_on("cmake@2.8.12:", type="build") -^^^^^^^^^^^^^^^^^^^ Finding cmake flags ^^^^^^^^^^^^^^^^^^^ -To get a list of valid flags that can be passed to ``cmake``, run the -following command in the directory that contains ``CMakeLists.txt``: +To get a list of valid flags that can be passed to ``cmake``, run the following command in the directory that contains ``CMakeLists.txt``: .. code-block:: console $ cmake . -LAH -CMake will start by checking for compilers and dependencies. Eventually -it will begin to list build options. You'll notice that most of the -build options at the top are prefixed with ``CMAKE_``. You can safely -ignore most of these options as Spack already sets them for you. This -includes flags needed to locate dependencies, RPATH libraries, set the -installation directory, and set the build type. - -The rest of the flags are the ones you should consider adding to your -package. They often include flags to enable/disable support for certain -features and locate specific dependencies. One thing you'll notice that -makes CMake different from Autotools is that CMake has an understanding -of build flag hierarchy. That is, certain flags will not display unless -their parent flag has been selected. For example, flags to specify the -``lib`` and ``include`` directories for a package might not appear -unless CMake found the dependency it was looking for. You may need to -manually specify certain flags to explore the full depth of supported -build flags, or check the ``CMakeLists.txt`` yourself. +CMake will start by checking for compilers and dependencies. +Eventually it will begin to list build options. +You'll notice that most of the build options at the top are prefixed with ``CMAKE_``. +You can safely ignore most of these options as Spack already sets them for you. +This includes flags needed to locate dependencies, RPATH libraries, set the installation directory, and set the build type. + +The rest of the flags are the ones you should consider adding to your package. +They often include flags to enable/disable support for certain features and locate specific dependencies. +One thing you'll notice that makes CMake different from Autotools is that CMake has an understanding of build flag hierarchy. +That is, certain flags will not display unless their parent flag has been selected. +For example, flags to specify the ``lib`` and ``include`` directories for a package might not appear unless CMake found the dependency it was looking for. +You may need to manually specify certain flags to explore the full depth of supported build flags, or check the ``CMakeLists.txt`` yourself. .. _cmake_args: -^^^^^^^^^^^^^^^^^^^^^ Adding flags to cmake ^^^^^^^^^^^^^^^^^^^^^ -To add additional flags to the ``cmake`` call, simply override the -``cmake_args`` function. The following example defines values for the flags -``WHATEVER``, ``ENABLE_BROKEN_FEATURE``, ``DETECT_HDF5``, and ``THREADS`` with -and without the :meth:`~spack_repo.builtin.build_systems.cmake.CMakeBuilder.define` and -:meth:`~spack_repo.builtin.build_systems.cmake.CMakeBuilder.define_from_variant` helper functions: +To add additional flags to the ``cmake`` call, simply override the ``cmake_args`` function. +The following example defines values for the flags ``WHATEVER``, ``ENABLE_BROKEN_FEATURE``, ``DETECT_HDF5``, and ``THREADS`` with and without the :meth:`~spack_repo.builtin.build_systems.cmake.CMakeBuilder.define` and :meth:`~spack_repo.builtin.build_systems.cmake.CMakeBuilder.define_from_variant` helper functions: .. code-block:: python @@ -145,26 +122,25 @@ and without the :meth:`~spack_repo.builtin.build_systems.cmake.CMakeBuilder.defi "-DWHATEVER:STRING=somevalue", self.define("ENABLE_BROKEN_FEATURE", False), self.define_from_variant("DETECT_HDF5", "hdf5"), - self.define_from_variant("THREADS"), # True if +threads + self.define_from_variant("THREADS"), # True if +threads ] return args -Spack supports CMake defines from conditional variants too. Whenever the condition on -the variant is not met, ``define_from_variant()`` will simply return an empty string, -and CMake simply ignores the empty command line argument. For example, the following +Spack supports CMake defines from conditional variants too. +Whenever the condition on the variant is not met, ``define_from_variant()`` will simply return an empty string, and CMake simply ignores the empty command line argument. +For example, the following .. code-block:: python variant("example", default=True, when="@2.0:") + def cmake_args(self): - return [self.define_from_variant("EXAMPLE", "example")] + return [self.define_from_variant("EXAMPLE", "example")] -will generate ``'cmake' '-DEXAMPLE=ON' ...`` when `@2.0: +example` is met, but will -result in ``'cmake' '' ...`` when the spec version is below ``2.0``. +will generate ``'cmake' '-DEXAMPLE=ON' ...`` when `@2.0: +example` is met, but will result in ``'cmake' '' ...`` when the spec version is below ``2.0``. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ CMake arguments provided by Spack ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -172,102 +148,91 @@ The following default arguments are controlled by Spack: ``CMAKE_INSTALL_PREFIX`` ------------------------- +"""""""""""""""""""""""" Is set to the package's install directory. ``CMAKE_PREFIX_PATH`` ---------------------- +""""""""""""""""""""" -CMake finds dependencies through calls to ``find_package()``, ``find_program()``, -``find_library()``, ``find_file()``, and ``find_path()``, which use a list of search -paths from ``CMAKE_PREFIX_PATH``. Spack sets this variable to a list of prefixes of the -spec's transitive dependencies. +CMake finds dependencies through calls to ``find_package()``, ``find_program()``, ``find_library()``, ``find_file()``, and ``find_path()``, which use a list of search paths from ``CMAKE_PREFIX_PATH``. +Spack sets this variable to a list of prefixes of the spec's transitive dependencies. -For troubleshooting cases where CMake fails to find a dependency, add the -``--debug-find`` flag to ``cmake_args``. +For troubleshooting cases where CMake fails to find a dependency, add the ``--debug-find`` flag to ``cmake_args``. ``CMAKE_BUILD_TYPE`` --------------------- +"""""""""""""""""""" -Every CMake-based package accepts a ``-DCMAKE_BUILD_TYPE`` flag to -dictate which level of optimization to use. In order to ensure -uniformity across packages, the ``CMakePackage`` base class adds -a variant to control this: +Every CMake-based package accepts a ``-DCMAKE_BUILD_TYPE`` flag to dictate which level of optimization to use. +In order to ensure uniformity across packages, the ``CMakePackage`` base class adds a variant to control this: .. code-block:: python - variant("build_type", default="RelWithDebInfo", - description="CMake build type", - values=("Debug", "Release", "RelWithDebInfo", "MinSizeRel")) + variant( + "build_type", + default="RelWithDebInfo", + description="CMake build type", + values=("Debug", "Release", "RelWithDebInfo", "MinSizeRel"), + ) However, not every CMake package accepts all four of these options. -Grep the ``CMakeLists.txt`` file to see if the default values are -missing or replaced. For example, the -`dealii `_ -package overrides the default variant with: +Grep the ``CMakeLists.txt`` file to see if the default values are missing or replaced. +For example, the `dealii `_ package overrides the default variant with: .. code-block:: python - variant("build_type", default="DebugRelease", - description="The build type to build", - values=("Debug", "Release", "DebugRelease")) + variant( + "build_type", + default="DebugRelease", + description="The build type to build", + values=("Debug", "Release", "DebugRelease"), + ) -For more information on ``CMAKE_BUILD_TYPE``, see: -https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html +For more information on ``CMAKE_BUILD_TYPE``, see: https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html ``CMAKE_INSTALL_RPATH`` and ``CMAKE_INSTALL_RPATH_USE_LINK_PATH=ON`` --------------------------------------------------------------------- +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" -CMake uses different RPATHs during the build and after installation, so that executables -can locate the libraries they're linked to during the build, and installed executables -do not have RPATHs to build directories. In Spack, we have to make sure that RPATHs are -set properly after installation. +CMake uses different RPATHs during the build and after installation, so that executables can locate the libraries they're linked to during the build, and installed executables do not have RPATHs to build directories. +In Spack, we have to make sure that RPATHs are set properly after installation. -Spack sets ``CMAKE_INSTALL_RPATH`` to a list of ``/lib`` or ``/lib64`` -directories of the spec's link-type dependencies. Apart from that, it sets -``-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON``, which should add RPATHs for directories of -linked libraries not in the directories covered by ``CMAKE_INSTALL_RPATH``. +Spack sets ``CMAKE_INSTALL_RPATH`` to a list of ``/lib`` or ``/lib64`` directories of the spec's link-type dependencies. +Apart from that, it sets ``-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON``, which should add RPATHs for directories of linked libraries not in the directories covered by ``CMAKE_INSTALL_RPATH``. -Usually it's enough to set only ``-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON``, but the -reason to provide both options is that packages may dynamically open shared libraries, -which CMake cannot detect. In those cases, the RPATHs from ``CMAKE_INSTALL_RPATH`` are -used as search paths. +Usually it's enough to set only ``-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON``, but the reason to provide both options is that packages may dynamically open shared libraries, which CMake cannot detect. +In those cases, the RPATHs from ``CMAKE_INSTALL_RPATH`` are used as search paths. .. note:: - Some packages provide stub libraries, which contain an interface for linking without - an implementation. When using such libraries, it's best to override the option - ``-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=OFF`` in ``cmake_args``, so that stub libraries - are not used at runtime. + Some packages provide stub libraries, which contain an interface for linking without an implementation. + When using such libraries, it's best to override the option ``-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=OFF`` in ``cmake_args``, so that stub libraries are not used at runtime. -^^^^^^^^^^ Generators ^^^^^^^^^^ -CMake and Autotools are build-script generation tools; they "generate" -the Makefiles that are used to build a software package. CMake actually -supports multiple generators, not just Makefiles. Another common -generator is Ninja. To switch to the Ninja generator, simply add: +CMake and Autotools are build-script generation tools; they "generate" the Makefiles that are used to build a software package. +CMake actually supports multiple generators, not just Makefiles. +Another common generator is Ninja. +To switch to the Ninja generator, simply add: .. code-block:: python generator("ninja") -``CMakePackage`` defaults to "Unix Makefiles". If you switch to the -Ninja generator, make sure to add: +``CMakePackage`` defaults to "Unix Makefiles". +If you switch to the Ninja generator, make sure to add: .. code-block:: python depends_on("ninja", type="build") -to the package as well. Aside from that, you shouldn't need to do -anything else. Spack will automatically detect that you are using -Ninja and run: +to the package as well. +Aside from that, you shouldn't need to do anything else. +Spack will automatically detect that you are using Ninja and run: .. code-block:: console @@ -275,43 +240,35 @@ Ninja and run: $ ninja $ ninja install -Spack currently only supports "Unix Makefiles" and "Ninja" as valid -generators, but it should be simple to add support for alternative -generators. For more information on CMake generators, see: -https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html +Spack currently only supports "Unix Makefiles" and "Ninja" as valid generators, but it should be simple to add support for alternative generators. +For more information on CMake generators, see: https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ CMakeLists.txt in a sub-directory ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Occasionally, developers will hide their source code and ``CMakeLists.txt`` -in a subdirectory like ``src``. If this happens, Spack won't -be able to automatically detect the build system properly when running -``spack create``. You will have to manually change the package base -class and tell Spack where ``CMakeLists.txt`` resides. You can do this -like so: +Occasionally, developers will hide their source code and ``CMakeLists.txt`` in a subdirectory like ``src``. +If this happens, Spack won't be able to automatically detect the build system properly when running ``spack create``. +You will have to manually change the package base class and tell Spack where ``CMakeLists.txt`` resides. +You can do this like so: .. code-block:: python root_cmakelists_dir = "src" -Note that this path is relative to the root of the extracted tarball, -not to the ``build_directory``. It defaults to the current directory. +Note that this path is relative to the root of the extracted tarball, not to the ``build_directory``. +It defaults to the current directory. -^^^^^^^^^^^^^^^^^^^^^^ Building out of source ^^^^^^^^^^^^^^^^^^^^^^ -By default, Spack builds every ``CMakePackage`` in a ``spack-build`` -sub-directory. If, for whatever reason, you would like to build in a -different sub-directory, simply override ``build_directory`` like so: +By default, Spack builds every ``CMakePackage`` in a ``spack-build`` sub-directory. +If, for whatever reason, you would like to build in a different sub-directory, simply override ``build_directory`` like so: .. code-block:: python build_directory = "my-build" -^^^^^^^^^^^^^^^^^^^^^^^^^ Build and install targets ^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -323,28 +280,22 @@ For most CMake packages, the usual: $ make $ make install -is sufficient to install the package. However, if you need to run -make with any other targets, for example, to build an optional -library or build the documentation, you can add these like so: +is sufficient to install the package. +However, if you need to run make with any other targets, for example, to build an optional library or build the documentation, you can add these like so: .. code-block:: python build_targets = ["all", "docs"] install_targets = ["install", "docs"] -^^^^^^^ Testing ^^^^^^^ -CMake-based packages typically provide unit testing via the -``test`` target. If you build your software with ``--test=root``, -Spack will check for the presence of a ``test`` target in the -Makefile and run ``make test`` for you. If you want to run a -different test instead, simply override the ``check`` method. +CMake-based packages typically provide unit testing via the ``test`` target. +If you build your software with ``--test=root``, Spack will check for the presence of a ``test`` target in the Makefile and run ``make test`` for you. +If you want to run a different test instead, simply override the ``check`` method. -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ -For more information on the CMake build system, see: -https://cmake.org/cmake/help/latest/ +For more information on the CMake build system, see: https://cmake.org/cmake/help/latest/ diff --git a/lib/spack/docs/build_systems/cudapackage.rst b/lib/spack/docs/build_systems/cudapackage.rst index c27f28f40a31ac..dc840df3a98e63 100644 --- a/lib/spack/docs/build_systems/cudapackage.rst +++ b/lib/spack/docs/build_systems/cudapackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,17 +9,14 @@ .. _cudapackage: ------- Cuda ------ Different from other packages, ``CudaPackage`` does not represent a build system. Instead its goal is to simplify and unify usage of ``CUDA`` in other packages by providing a `mixin-class `_. -You can find source for the package at -``__. +You can find source for the package at ``__. -^^^^^^^^ Variants ^^^^^^^^ @@ -26,109 +24,86 @@ This package provides the following variants: * **cuda** - This variant is used to enable/disable building with ``CUDA``. The default - is disabled (or ``False``). + This variant is used to enable/disable building with ``CUDA``. + The default is disabled (or ``False``). * **cuda_arch** This variant supports the optional specification of one or more architectures. - Valid values are maintained in the ``cuda_arch_values`` property and - are the numeric character equivalent of the compute capability version - (e.g., '10' for version 1.0). Each provided value affects associated - ``CUDA`` dependencies and compiler conflicts. - - The variant builds both PTX code for the _virtual_ architecture - (e.g. ``compute_10``) and binary code for the _real_ architecture (e.g. ``sm_10``). + Valid values are maintained in the ``cuda_arch_values`` property and are the numeric character equivalent of the compute capability version (e.g., '10' for version 1.0). + Each provided value affects associated ``CUDA`` dependencies and compiler conflicts. - GPUs and their compute capability versions are listed at - https://developer.nvidia.com/cuda-gpus. + The variant builds both PTX code for the *virtual* architecture (e.g. ``compute_10``) and binary code for the *real* architecture (e.g. ``sm_10``). + + GPUs and their compute capability versions are listed at https://developer.nvidia.com/cuda-gpus. -^^^^^^^^^ Conflicts ^^^^^^^^^ -Conflicts are used to prevent builds with known bugs or issues. While -base ``CUDA`` conflicts have been included with this package, you may -want to add more for your software. +Conflicts are used to prevent builds with known bugs or issues. +While base ``CUDA`` conflicts have been included with this package, you may want to add more for your software. -For example, if your package requires ``cuda_arch`` to be specified when -``cuda`` is enabled, you can add the following conflict to your package -to terminate such build attempts with a suitable message: +For example, if your package requires ``cuda_arch`` to be specified when ``cuda`` is enabled, you can add the following conflict to your package to terminate such build attempts with a suitable message: .. code-block:: python - conflicts("cuda_arch=none", when="+cuda", - msg="CUDA architecture is required") + conflicts("cuda_arch=none", when="+cuda", msg="CUDA architecture is required") -Similarly, if your software does not support all versions of the property, -you could add ``conflicts`` to your package for those versions. For example, -suppose your software does not work with CUDA compute capability versions -prior to SM 5.0 (``50``). You can add the following code to display a -custom message should a user attempt such a build: +Similarly, if your software does not support all versions of the property, you could add ``conflicts`` to your package for those versions. +For example, suppose your software does not work with CUDA compute capability versions prior to SM 5.0 (``50``). +You can add the following code to display a custom message should a user attempt such a build: .. code-block:: python - unsupported_cuda_archs = [ - "10", "11", "12", "13", - "20", "21", - "30", "32", "35", "37" - ] + unsupported_cuda_archs = ["10", "11", "12", "13", "20", "21", "30", "32", "35", "37"] for value in unsupported_cuda_archs: - conflicts(f"cuda_arch={value}", when="+cuda", - msg=f"CUDA architecture {value} is not supported") + conflicts( + f"cuda_arch={value}", when="+cuda", msg=f"CUDA architecture {value} is not supported" + ) -^^^^^^^ Methods ^^^^^^^ -This package provides one custom helper method, which is used to build -standard CUDA compiler flags. +This package provides one custom helper method, which is used to build standard CUDA compiler flags. **cuda_flags** + This built-in static method returns a list of command line flags for the chosen ``cuda_arch`` value(s). + The flags are intended to be passed to the CUDA compiler driver (i.e., ``nvcc``). - This built-in static method returns a list of command line flags - for the chosen ``cuda_arch`` value(s). The flags are intended to - be passed to the CUDA compiler driver (i.e., ``nvcc``). - - This method must be explicitly called when you are creating the - arguments for your build in order to use the values. + This method must be explicitly called when you are creating the arguments for your build in order to use the values. -^^^^^^ Usage ^^^^^^ -This helper package can be added to your package by adding it as a base -class of your package. For example, you can add it to your -:ref:`CMakePackage `-based package as follows: +This helper package can be added to your package by adding it as a base class of your package. +For example, you can add it to your :ref:`CMakePackage `-based package as follows: .. code-block:: python - :emphasize-lines: 1,7-16 - - class MyCudaPackage(CMakePackage, CudaPackage): - ... - def cmake_args(self): - spec = self.spec - args = [] - ... - if spec.satisfies("+cuda"): - # Set up the CUDA macros needed by the build - args.append("-DWITH_CUDA=ON") - cuda_arch_list = spec.variants["cuda_arch"].value - cuda_arch = cuda_arch_list[0] - if cuda_arch != "none": - args.append(f"-DCUDA_FLAGS=-arch=sm_{cuda_arch}") - else: - # Ensure build with CUDA is disabled - args.append("-DWITH_CUDA=OFF") - ... - return args + :emphasize-lines: 1,8-17 + + class MyCudaPackage(CMakePackage, CudaPackage): + ... + + def cmake_args(self): + spec = self.spec + args = [] + ... + if spec.satisfies("+cuda"): + # Set up the CUDA macros needed by the build + args.append("-DWITH_CUDA=ON") + cuda_arch_list = spec.variants["cuda_arch"].value + cuda_arch = cuda_arch_list[0] + if cuda_arch != "none": + args.append(f"-DCUDA_FLAGS=-arch=sm_{cuda_arch}") + else: + # Ensure build with CUDA is disabled + args.append("-DWITH_CUDA=OFF") + ... + return args assuming only the ``WITH_CUDA`` and ``CUDA_FLAGS`` flags are required. You will need to customize options as needed for your build. -This example also illustrates how to check for the ``cuda`` variant using -``self.spec`` and how to retrieve the ``cuda_arch`` variant's value, which -is a list, using ``self.spec.variants["cuda_arch"].value``. +This example also illustrates how to check for the ``cuda`` variant using ``self.spec`` and how to retrieve the ``cuda_arch`` variant's value, which is a list, using ``self.spec.variants["cuda_arch"].value``. -With over 70 packages using ``CudaPackage`` as of January 2021 there are -lots of examples to choose from to get more ideas for using this package. +With over 70 packages using ``CudaPackage`` as of January 2021 there are lots of examples to choose from to get more ideas for using this package. diff --git a/lib/spack/docs/build_systems/custompackage.rst b/lib/spack/docs/build_systems/custompackage.rst index 501b9a68b580ea..01f5e0c1ff705b 100644 --- a/lib/spack/docs/build_systems/custompackage.rst +++ b/lib/spack/docs/build_systems/custompackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,53 +9,39 @@ .. _custompackage: --------------------- Custom Build Systems -------------------- -While the built-in build systems should meet your needs for the -vast majority of packages, some packages provide custom build scripts. +While the built-in build systems should meet your needs for the vast majority of packages, some packages provide custom build scripts. This guide is intended for the following use cases: * Packaging software with its own custom build system * Adding support for new build systems -If you want to add support for a new build system, a good place to -start is to look at the definitions of other build systems. This guide -focuses mostly on how Spack's build systems work. +If you want to add support for a new build system, a good place to start is to look at the definitions of other build systems. +This guide focuses mostly on how Spack's build systems work. -In this guide, we will be using the -`perl `_ and -`cmake `_ -packages as examples. ``perl``'s build system is a hand-written -``Configure`` shell script, while ``cmake`` bootstraps itself during -installation. Both of these packages require custom build systems. +In this guide, we will be using the `perl `_ and `cmake `_ packages as examples. +``perl``'s build system is a hand-written ``Configure`` shell script, while ``cmake`` bootstraps itself during installation. +Both of these packages require custom build systems. -^^^^^^^^^^ Base class ^^^^^^^^^^ -If your package does not belong to any of the built-in build -systems that Spack already supports, you should inherit from the -``Package`` base class. ``Package`` is a simple base class with a -single phase: ``install``. If your package is simple, you may be able -to simply write an ``install`` method that gets the job done. However, -if your package is more complex and installation involves multiple -steps, you should add separate phases as mentioned in the next section. +If your package does not belong to any of the built-in build systems that Spack already supports, you should inherit from the ``Package`` base class. +``Package`` is a simple base class with a single phase: ``install``. +If your package is simple, you may be able to simply write an ``install`` method that gets the job done. +However, if your package is more complex and installation involves multiple steps, you should add separate phases as mentioned in the next section. -If you are creating a new build system base class, you should inherit -from ``PackageBase``. This is the superclass for all build systems in -Spack. +If you are creating a new build system base class, you should inherit from ``PackageBase``. +This is the superclass for all build systems in Spack. -^^^^^^ Phases ^^^^^^ -The most important concept in Spack's build system support is the idea -of phases. Each build system defines a set of phases that are necessary -to install the package. They usually follow some sort of "configure", -"build", "install" guideline, but any of those phases may be missing -or combined with another phase. +The most important concept in Spack's build system support is the idea of phases. +Each build system defines a set of phases that are necessary to install the package. +They usually follow some sort of "configure", "build", "install" guideline, but any of those phases may be missing or combined with another phase. If you look at the ``perl`` package, you'll see: @@ -68,11 +55,9 @@ Similarly, ``cmake`` defines: phases = ("bootstrap", "build", "install") -If we look at the ``cmake`` example, this tells Spack's ``PackageBase`` -class to run the ``bootstrap``, ``build``, and ``install`` functions -in that order. It is now up to you to define these methods. +If we look at the ``cmake`` example, this tells Spack's ``PackageBase`` class to run the ``bootstrap``, ``build``, and ``install`` functions in that order. +It is now up to you to define these methods. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Phase and phase_args functions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -84,21 +69,19 @@ If we look at ``perl``, we see that it defines a ``configure`` method: configure = Executable("./Configure") configure(*self.configure_args()) -There is also a corresponding ``configure_args`` function that handles -all of the arguments to pass to ``Configure``, just like in -``AutotoolsPackage``. Comparatively, the ``build`` and ``install`` -phases are pretty simple: +There is also a corresponding ``configure_args`` function that handles all of the arguments to pass to ``Configure``, just like in ``AutotoolsPackage``. +Comparatively, the ``build`` and ``install`` phases are pretty simple: .. code-block:: python def build(self, spec, prefix): make() + def install(self, spec, prefix): make("install") -The ``cmake`` package looks very similar, but with a ``bootstrap`` -function instead of ``configure``: +The ``cmake`` package looks very similar, but with a ``bootstrap`` function instead of ``configure``: .. code-block:: python @@ -106,78 +89,68 @@ function instead of ``configure``: bootstrap = Executable("./bootstrap") bootstrap(*self.bootstrap_args()) + def build(self, spec, prefix): make() + def install(self, spec, prefix): make("install") -Again, there is a ``bootstrap_args`` function that determines the -correct bootstrap flags to use. +Again, there is a ``bootstrap_args`` function that determines the correct bootstrap flags to use. -^^^^^^^^^^^^^^^^^^^^ -run_before/run_after -^^^^^^^^^^^^^^^^^^^^ +``run_before`` / ``run_after`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Occasionally, you may want to run extra steps either before or after -a given phase. This applies not just to custom build systems, but to -existing build systems as well. You may need to patch a file that is -generated by configure, or install extra files in addition to what -``make install`` copies to the installation prefix. This is where -``@run_before`` and ``@run_after`` come in. +Occasionally, you may want to run extra steps either before or after a given phase. +This applies not just to custom build systems, but to existing build systems as well. +You may need to patch a file that is generated by configure, or install extra files in addition to what ``make install`` copies to the installation prefix. +This is where ``@run_before`` and ``@run_after`` come in. -These Python decorators allow you to write functions that are called -before or after a particular phase. For example, in ``perl``, we see: +These Python decorators allow you to write functions that are called before or after a particular phase. +For example, in ``perl``, we see: .. code-block:: python @run_after("install") def install_cpanm(self): - spec = self.spec - maker = make - cpan_dir = join_path("cpanm", "cpanm") - if sys.platform == "win32": - maker = nmake - cpan_dir = join_path(self.stage.source_path, cpan_dir) - cpan_dir = windows_sfn(cpan_dir) - if "+cpanm" in spec: - with working_dir(cpan_dir): - perl = spec["perl"].command - perl("Makefile.PL") - maker() - maker("install") - -This extra step automatically installs ``cpanm`` in addition to the -base Perl installation. - -^^^^^^^^^^^^^^^^^^^^^ -on_package_attributes -^^^^^^^^^^^^^^^^^^^^^ - -The ``run_before``/``run_after`` logic discussed above becomes -particularly powerful when combined with the ``@on_package_attributes`` -decorator. This decorator allows you to conditionally run certain -functions depending on the attributes of that package. The most -common example is conditional testing. Many unit tests are prone to -failure, even when there is nothing wrong with the installation. -Unfortunately, non-portable unit tests and tests that are -"supposed to fail" are more common than we would like. Instead of -always running unit tests on installation, Spack lets users -conditionally run tests with the ``--test=root`` flag. - -If we wanted to define a function that would conditionally run -if and only if this flag is set, we would use the following line: + spec = self.spec + maker = make + cpan_dir = join_path("cpanm", "cpanm") + if sys.platform == "win32": + maker = nmake + cpan_dir = join_path(self.stage.source_path, cpan_dir) + cpan_dir = windows_sfn(cpan_dir) + if "+cpanm" in spec: + with working_dir(cpan_dir): + perl = spec["perl"].command + perl("Makefile.PL") + maker() + maker("install") + +This extra step automatically installs ``cpanm`` in addition to the base Perl installation. + +``on_package_attributes`` +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``run_before`` / ``run_after`` logic discussed above becomes particularly powerful when combined with the ``@on_package_attributes`` decorator. +This decorator allows you to conditionally run certain functions depending on the attributes of that package. +The most common example is conditional testing. +Many unit tests are prone to failure, even when there is nothing wrong with the installation. +Unfortunately, non-portable unit tests and tests that are "supposed to fail" are more common than we would like. +Instead of always running unit tests on installation, Spack lets users conditionally run tests with the ``--test=root`` flag. + +If we wanted to define a function that would conditionally run if and only if this flag is set, we would use the following: .. code-block:: python @on_package_attributes(run_tests=True) + def my_test_function(self): ... -^^^^^^^ Testing ^^^^^^^ -Let's put everything together and add unit tests to be optionally run -during the installation of our package. +Let's put everything together and add unit tests to be optionally run during the installation of our package. In the ``perl`` package, we can see: .. code-block:: python @@ -185,43 +158,42 @@ In the ``perl`` package, we can see: @run_after("build") @on_package_attributes(run_tests=True) def build_test(self): - if sys.platform == "win32": - win32_dir = os.path.join(self.stage.source_path, "win32") - win32_dir = windows_sfn(win32_dir) - with working_dir(win32_dir): - nmake("test", ignore_quotes=True) - else: - make("test") - -As you can guess, this runs ``make test`` *after* building the package, -if and only if testing is requested. Again, this is not specific to -custom build systems, it can be added to existing build systems as well. + if sys.platform == "win32": + win32_dir = os.path.join(self.stage.source_path, "win32") + win32_dir = windows_sfn(win32_dir) + with working_dir(win32_dir): + nmake("test", ignore_quotes=True) + else: + make("test") + +As you can guess, this runs ``make test`` *after* building the package, if and only if testing is requested. +Again, this is not specific to custom build systems, it can be added to existing build systems as well. .. warning:: - The order of decorators matters. The following ordering: + The order of decorators matters. + The following ordering: .. code-block:: python @run_after("install") @on_package_attributes(run_tests=True) + def my_test_function(self): ... - works as expected. However, if you reverse the ordering: + works as expected. + However, if you reverse the ordering: .. code-block:: python @on_package_attributes(run_tests=True) @run_after("install") + def my_test_function(self): ... - the tests will always be run regardless of whether or not - ``--test=root`` is requested. See https://github.com/spack/spack/issues/3833 - for more information + the tests will always be run regardless of whether or not ``--test=root`` is requested. + See https://github.com/spack/spack/issues/3833 for more information -Ideally, every package in Spack will have some sort of test to ensure -that it was built correctly. It is up to the package authors to make -sure this happens. If you are adding a package for some software and -the developers list commands to test the installation, please add these -tests to your ``package.py``. +Ideally, every package in Spack will have some sort of test to ensure that it was built correctly. +It is up to the package authors to make sure this happens. +If you are adding a package for some software and the developers list commands to test the installation, please add these tests to your ``package.py``. -For more information on other forms of package testing, refer to -:ref:`Checking an installation `. +For more information on other forms of package testing, refer to :ref:`Checking an installation `. diff --git a/lib/spack/docs/build_systems/inteloneapipackage.rst b/lib/spack/docs/build_systems/inteloneapipackage.rst index 42d4f84f15dbf3..29722973f32490 100644 --- a/lib/spack/docs/build_systems/inteloneapipackage.rst +++ b/lib/spack/docs/build_systems/inteloneapipackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -9,71 +10,71 @@ .. _inteloneapipackage: -=========== IntelOneapi =========== -.. contents:: - +Spack can install and use the Intel oneAPI products. +You may either use Spack to install the oneAPI tools or use the `Intel installers`_. +After installation, you may use the tools directly, or use Spack to build packages with the tools. -oneAPI packages in Spack -======================== +The Spack Python class ``IntelOneapiPackage`` is a base class that is used by ``IntelOneapiCompilers``, ``IntelOneapiMkl``, ``IntelOneapiTbb`` and other classes to implement the oneAPI packages. +Search for ``oneAPI`` at `packages.spack.io `_ for the full list of available oneAPI packages, or use: -Spack can install and use the Intel oneAPI products. You may either -use Spack to install the oneAPI tools or use the `Intel -installers`_. After installation, you may use the tools directly, or -use Spack to build packages with the tools. +.. code-block:: console -The Spack Python class ``IntelOneapiPackage`` is a base class that is -used by ``IntelOneapiCompilers``, ``IntelOneapiMkl``, -``IntelOneapiTbb`` and other classes to implement the oneAPI -packages. Search for ``oneAPI`` at `packages.spack.io `_ for the full -list of available oneAPI packages, or use:: + $ spack list -d oneAPI - spack list -d oneAPI +For more information on a specific package, do: -For more information on a specific package, do:: +.. code-block:: console - spack info --all + $ spack info --all -Examples -======== - Building a Package With icx --------------------------- -In this example, we build patchelf with ``icc`` and ``icx``. The -compilers are installed with Spack. +In this example, we build patchelf with ``icc`` and ``icx``. +The compilers are installed with Spack. + +Install the oneAPI compilers: -Install the oneAPI compilers:: +.. code-block:: spec - spack install intel-oneapi-compilers + $ spack install intel-oneapi-compilers -To build the ``patchelf`` Spack package with ``icx``, do:: +To build the ``patchelf`` Spack package with ``icx``, do: - spack install patchelf%oneapi +.. code-block:: spec + + $ spack install patchelf%oneapi Using oneAPI Spack environment ------------------------------- -In this example, we build LAMMPS with ``icx`` using Spack environment for oneAPI packages created by Intel. The -compilers are installed with Spack like in example above. +In this example, we build LAMMPS with ``icx`` using Spack environment for oneAPI packages created by Intel. +The compilers are installed with Spack like in example above. + +Install the oneAPI compilers: -Install the oneAPI compilers:: +.. code-block:: spec - spack install intel-oneapi-compilers + $ spack install intel-oneapi-compilers -Clone `spack-configs `_ repo and activate Intel oneAPI CPU environment:: +Clone `spack-configs `_ repo and activate Intel oneAPI CPU environment: - git clone https://github.com/spack/spack-configs - spack env activate spack-configs/INTEL/CPU - spack concretize -f +.. code-block:: console -`Intel oneAPI CPU environment `_ contains applications tested and validated by Intel. This list is constantly extended. Currently, it supports: + $ git clone https://github.com/spack/spack-configs + $ spack env activate spack-configs/INTEL/CPU + $ spack concretize -f + +`Intel oneAPI CPU environment `_ contains applications tested and validated by Intel. +This list is constantly extended. +Currently, it supports: - `Devito `_ - `GROMACS `_ @@ -85,86 +86,96 @@ Clone `spack-configs `_ repo and activat - `STREAM `_ - `WRF `_ -To build LAMMPS with oneAPI compiler from this environment just run:: +To build LAMMPS with oneAPI compiler from this environment just run: + +.. code-block:: spec - spack install lammps + $ spack install lammps -Compiled binaries can be found using:: +Compiled binaries can be found using: - spack cd -i lammps +.. code-block:: console + + $ spack cd -i lammps You can do the same for all other applications from this environment. Using oneAPI MPI to Satisfy a Virtual Dependence ------------------------------------------------------- +------------------------------------------------ + +The ``hdf5`` package works with any compatible MPI implementation. +To build ``hdf5`` with Intel oneAPI MPI do: -The ``hdf5`` package works with any compatible MPI implementation. To -build ``hdf5`` with Intel oneAPI MPI do:: +.. code-block:: spec - spack install hdf5 +mpi ^intel-oneapi-mpi + $ spack install hdf5 +mpi ^intel-oneapi-mpi Using Externally Installed oneAPI Tools -======================================= +--------------------------------------- -Spack can also use oneAPI tools that are manually installed with -`Intel Installers`_. The procedures for configuring Spack to use -external compilers and libraries are different. +Spack can also use oneAPI tools that are manually installed with `Intel Installers`_. +The procedures for configuring Spack to use external compilers and libraries are different. Compilers ---------- +^^^^^^^^^ + +To use the compilers, add some information about the installation to ``packages.yaml``. +For most users, it is sufficient to do: + +.. code-block:: console -To use the compilers, add some information about the installation to -``packages.yaml``. For most users, it is sufficient to do:: + $ spack compiler add /opt/intel/oneapi/compiler/latest/bin - spack compiler add /opt/intel/oneapi/compiler/latest/bin +Adapt the paths above if you did not install the tools in the default location. +After adding the compilers, using them is the same as if you had installed the ``intel-oneapi-compilers`` package. +Another option is to manually add the configuration to ``packages.yaml`` as described in :ref:`Compiler configuration `. -Adapt the paths above if you did not install the tools in the default -location. After adding the compilers, using them is the same -as if you had installed the ``intel-oneapi-compilers`` package. -Another option is to manually add the configuration to -``packages.yaml`` as described in :ref:`Compiler configuration -`. +Before 2024, the directory structure was different: -Before 2024, the directory structure was different:: +.. code-block:: console - spack compiler add /opt/intel/oneapi/compiler/latest/linux/bin/intel64 - spack compiler add /opt/intel/oneapi/compiler/latest/linux/bin + $ spack compiler add /opt/intel/oneapi/compiler/latest/linux/bin/intel64 + $ spack compiler add /opt/intel/oneapi/compiler/latest/linux/bin Libraries ---------- +^^^^^^^^^ -If you want Spack to use oneMKL that you have installed without Spack in -the default location, then add the following to -``~/.spack/packages.yaml``, adjusting the version as appropriate:: +If you want Spack to use oneMKL that you have installed without Spack in the default location, then add the following to ``~/.spack/packages.yaml``, adjusting the version as appropriate: - intel-oneapi-mkl: - externals: - - spec: intel-oneapi-mkl@2021.1.1 - prefix: /opt/intel/oneapi/ +.. code-block:: yaml + + intel-oneapi-mkl: + externals: + - spec: intel-oneapi-mkl@2021.1.1 + prefix: /opt/intel/oneapi/ Using oneAPI Tools Installed by Spack -===================================== +------------------------------------- + +Spack can be a convenient way to install and configure compilers and libraries, even if you do not intend to build a Spack package. +If you want to build a Makefile project using Spack-installed oneAPI compilers, then use Spack to configure your environment: + +.. code-block:: spec + + $ spack load intel-oneapi-compilers -Spack can be a convenient way to install and configure compilers and -libraries, even if you do not intend to build a Spack package. If you -want to build a Makefile project using Spack-installed oneAPI compilers, -then use Spack to configure your environment:: +And then you can build with: - spack load intel-oneapi-compilers +.. code-block:: console -And then you can build with:: + $ CXX=icpx make - CXX=icpx make +You can also use Spack-installed libraries. +For example: -You can also use Spack-installed libraries. For example:: +.. code-block:: spec - spack load intel-oneapi-mkl + $ spack load intel-oneapi-mkl -This updates your environment CPATH, LIBRARY_PATH, and other -environment variables for building an application with oneMKL. +This updates your environment CPATH, LIBRARY_PATH, and other environment variables for building an application with oneMKL. .. _`Intel installers`: https://software.intel.com/content/www/us/en/develop/documentation/installation-guide-for-intel-oneapi-toolkits-linux/top.html diff --git a/lib/spack/docs/build_systems/luapackage.rst b/lib/spack/docs/build_systems/luapackage.rst index 74e25ca178fcfd..6c21468c4c4915 100644 --- a/lib/spack/docs/build_systems/luapackage.rst +++ b/lib/spack/docs/build_systems/luapackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -9,17 +10,13 @@ .. _luapackage: ------- Lua ------ -The ``Lua`` build system is a helper for the common case of Lua packages that provide -a rockspec file. This is not meant to take a rock archive, but to build -a source archive or repository that provides a rockspec, which should cover -most Lua packages. In the case a Lua package builds by Make rather than -LuaRocks, prefer MakefilePackage. +The ``Lua`` build system is a helper for the common case of Lua packages that provide a rockspec file. +This is not meant to take a rock archive, but to build a source archive or repository that provides a rockspec, which should cover most Lua packages. +In the case a Lua package builds by Make rather than LuaRocks, prefer MakefilePackage. -^^^^^^ Phases ^^^^^^ @@ -41,20 +38,16 @@ By default, these phases run: Any of these phases can be overridden in your package as necessary. -^^^^^^^^^^^^^^^ Important files ^^^^^^^^^^^^^^^ -Packages that use the Lua/LuaRocks build system can be identified by the -presence of a ``*.rockspec`` file in their source tree, or can be fetched as -a source rock archive (``.src.rock``). This file declares things like build -instructions and dependencies. The ``.src.rock`` also contains all code. +Packages that use the Lua/LuaRocks build system can be identified by the presence of a ``*.rockspec`` file in their source tree, or can be fetched as a source rock archive (``.src.rock``). +This file declares things like build instructions and dependencies. +The ``.src.rock`` also contains all code. -It is common for the rockspec file to list the Lua version required in -a dependency. The LuaPackage class adds appropriate dependencies on a Lua -implementation, but it is a good idea to specify the version required with -a ``depends_on`` statement. The block normally will be a table definition like -this: +It is common for the rockspec file to list the Lua version required in a dependency. +The LuaPackage class adds appropriate dependencies on a Lua implementation, but it is a good idea to specify the version required with a ``depends_on`` statement. +The block normally will be a table definition like this: .. code-block:: lua @@ -62,32 +55,23 @@ this: "lua >= 5.1", } -The LuaPackage class supports source repositories and archives containing -a rockspec and directly downloading source rock files. It *does not* support -downloading dependencies listed inside a rockspec, and thus does not support -directly downloading a rockspec as an archive. +The LuaPackage class supports source repositories and archives containing a rockspec and directly downloading source rock files. +It *does not* support downloading dependencies listed inside a rockspec, and thus does not support directly downloading a rockspec as an archive. -^^^^^^^^^^^^^^^^^^^^^^^^^ Build system dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^ -All base dependencies are added by the build system, but LuaRocks is run to -avoid downloading extra Lua dependencies during build. If the package needs -Lua libraries outside the standard set, they should be added as dependencies. +All base dependencies are added by the build system, but LuaRocks is run to avoid downloading extra Lua dependencies during build. +If the package needs Lua libraries outside the standard set, they should be added as dependencies. -To specify a Lua version constraint but allow all Lua implementations, prefer -to use ``depends_on("lua-lang@5.1:5.1.99")`` to express any 5.1 compatible -version. If the package requires LuaJit rather than Lua, -a ``depends_on("luajit")`` should be used to ensure a LuaJit distribution is -used instead of the Lua interpreter. Alternately, if only interpreted Lua will -work, ``depends_on("lua")`` will express that. +To specify a Lua version constraint but allow all Lua implementations, prefer to use ``depends_on("lua-lang@5.1:5.1.99")`` to express any 5.1 compatible version. +If the package requires LuaJit rather than Lua, a ``depends_on("luajit")`` should be used to ensure a LuaJit distribution is used instead of the Lua interpreter. +Alternately, if only interpreted Lua will work, ``depends_on("lua")`` will express that. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Passing arguments to luarocks make ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you need to pass any arguments to the ``luarocks make`` call, you can -override the ``luarocks_args`` method like so: +If you need to pass any arguments to the ``luarocks make`` call, you can override the ``luarocks_args`` method like so: .. code-block:: python @@ -101,9 +85,7 @@ One common use of this is to override warnings or flags for newer compilers, as def luarocks_args(self): return ["CFLAGS='-Wno-error=implicit-function-declaration'"] -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ -For more information on the LuaRocks build system, see: -https://luarocks.org/ +For more information on the LuaRocks build system, see: https://luarocks.org/ diff --git a/lib/spack/docs/build_systems/makefilepackage.rst b/lib/spack/docs/build_systems/makefilepackage.rst index ff9aa539ac400f..9eb0ba11bf714b 100644 --- a/lib/spack/docs/build_systems/makefilepackage.rst +++ b/lib/spack/docs/build_systems/makefilepackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,16 +9,12 @@ .. _makefilepackage: --------- Makefile -------- The most primitive build system a package can use is a plain Makefile. -Makefiles are simple to write for small projects, but they usually -require you to edit the Makefile to set platform and compiler-specific -variables. +Makefiles are simple to write for small projects, but they usually require you to edit the Makefile to set platform and compiler-specific variables. -^^^^^^ Phases ^^^^^^ @@ -27,9 +24,8 @@ The ``MakefileBuilder`` and ``MakefilePackage`` base classes come with 3 phases: #. ``build`` - build the project #. ``install`` - install the project -By default, ``edit`` does nothing, but you can override it to replace -hardcoded Makefile variables. The ``build`` and ``install`` phases -run: +By default, ``edit`` does nothing, but you can override it to replace hardcoded Makefile variables. +The ``build`` and ``install`` phases run: .. code-block:: console @@ -37,7 +33,6 @@ run: $ make install -^^^^^^^^^^^^^^^ Important files ^^^^^^^^^^^^^^^ @@ -48,50 +43,38 @@ This file will be named one of the following ways: * Makefile (most common) * makefile -Some Makefiles also *include* other configuration files. Check for an -``include`` directive in the Makefile. +Some Makefiles also *include* other configuration files. +Check for an ``include`` directive in the Makefile. -^^^^^^^^^^^^^^^^^^^^^^^^^ Build system dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^ -Spack assumes that the operating system will have a valid ``make`` utility -installed already, so you don't need to add a dependency on ``make``. -However, if the package uses a ``GNUmakefile`` or the developers recommend -using GNU Make, you should add a dependency on ``gmake``: +Spack assumes that the operating system will have a valid ``make`` utility installed already, so you don't need to add a dependency on ``make``. +However, if the package uses a ``GNUmakefile`` or the developers recommend using GNU Make, you should add a dependency on ``gmake``: .. code-block:: python depends_on("gmake", type="build") -^^^^^^^^^^^^^^^^^^^^^^^^^^ Types of Makefile packages ^^^^^^^^^^^^^^^^^^^^^^^^^^ -Most of the work involved in packaging software that uses Makefiles -involves overriding or replacing hard-coded variables. Many packages -make the mistake of hard-coding compilers, usually for GCC or Intel. -This is fine if you happen to be using that particular compiler, but -Spack is designed to work with *any* compiler, and you need to ensure -that this is the case. +Most of the work involved in packaging software that uses Makefiles involves overriding or replacing hard-coded variables. +Many packages make the mistake of hard-coding compilers, usually for GCC or Intel. +This is fine if you happen to be using that particular compiler, but Spack is designed to work with *any* compiler, and you need to ensure that this is the case. -Depending on how the Makefile is designed, there are 4 common strategies -that can be used to set or override the appropriate variables: +Depending on how the Makefile is designed, there are 4 common strategies that can be used to set or override the appropriate variables: -""""""""""""""""""""" Environment variables """"""""""""""""""""" -Make has multiple types of -`assignment operators `_. -Some Makefiles use ``=`` to assign variables. The only way to override -these variables is to edit the Makefile or override them on the -command-line. However, Makefiles that use ``?=`` for assignment honor -environment variables. Since Spack already sets ``CC``, ``CXX``, ``F77``, -and ``FC``, you won't need to worry about setting these variables. If -there are any other variables you need to set, you can do this in the -``setup_build_environment`` method: +Make has multiple types of `assignment operators `_. +Some Makefiles use ``=`` to assign variables. +The only way to override these variables is to edit the Makefile or override them on the command-line. +However, Makefiles that use ``?=`` for assignment honor environment variables. +Since Spack already sets ``CC``, ``CXX``, ``F77``, and ``FC``, you won't need to worry about setting these variables. +If there are any other variables you need to set, you can do this in the ``setup_build_environment`` method: .. code-block:: python @@ -100,19 +83,14 @@ there are any other variables you need to set, you can do this in the env.set("BLASLIB", spec["blas"].libs.ld_flags) -`cbench `_ -is a good example of a simple package that does this, while -`esmf `_ -is a good example of a more complex package. +`cbench `_ is a good example of a simple package that does this, while `esmf `_ is a good example of a more complex package. -"""""""""""""""""""""" Command-line arguments """""""""""""""""""""" -If the Makefile ignores environment variables, the next thing to try -is command-line arguments. You can do this by overriding the -``build_targets`` attribute. If you don't need access to the spec, -you can do this like so: +If the Makefile ignores environment variables, the next thing to try is command-line arguments. +You can do this by overriding the ``build_targets`` attribute. +If you don't need access to the spec, you can do this like so: .. code-block:: python @@ -133,52 +111,41 @@ If you do need access to the spec, you can create a property like so: ] -`cloverleaf `_ -is a good example of a package that uses this strategy. +`cloverleaf `_ is a good example of a package that uses this strategy. -""""""""""""" Edit Makefile """"""""""""" -Some Makefiles are just plain stubborn and will ignore command-line -variables. The only way to ensure that these packages build correctly -is to directly edit the Makefile. Spack provides a ``FileFilter`` class -and a ``filter`` method to help with this. For example: +Some Makefiles are just plain stubborn and will ignore command-line variables. +The only way to ensure that these packages build correctly is to directly edit the Makefile. +Spack provides a ``FileFilter`` class and a ``filter`` method to help with this. +For example: .. code-block:: python def edit(self, spec, prefix): makefile = FileFilter("Makefile") - makefile.filter(r"^\s*CC\s*=.*", f"CC = {spack_cc}") + makefile.filter(r"^\s*CC\s*=.*", f"CC = {spack_cc}") makefile.filter(r"^\s*CXX\s*=.*", f"CXX = {spack_cxx}") makefile.filter(r"^\s*F77\s*=.*", f"F77 = {spack_f77}") - makefile.filter(r"^\s*FC\s*=.*", f"FC = {spack_fc}") + makefile.filter(r"^\s*FC\s*=.*", f"FC = {spack_fc}") -`stream `_ -is a good example of a package that involves editing a Makefile to set -the appropriate variables. +`stream `_ is a good example of a package that involves editing a Makefile to set the appropriate variables. -""""""""""" Config file """"""""""" -More complex packages often involve Makefiles that *include* a -configuration file. These configuration files are primarily composed -of variables relating to the compiler, platform, and the location of -dependencies or names of libraries. Since these config files are -dependent on the compiler and platform, you will often see entire -directories of examples for common compilers and architectures. Use -these examples to help determine what possible values to use. +More complex packages often involve Makefiles that *include* a configuration file. +These configuration files are primarily composed of variables relating to the compiler, platform, and the location of dependencies or names of libraries. +Since these config files are dependent on the compiler and platform, you will often see entire directories of examples for common compilers and architectures. +Use these examples to help determine what possible values to use. -If the config file is long and only contains one or two variables -that need to be modified, you can use the technique above to edit -the config file. However, if you end up needing to modify most of -the variables, it may be easier to write a new file from scratch. +If the config file is long and only contains one or two variables that need to be modified, you can use the technique above to edit the config file. +However, if you end up needing to modify most of the variables, it may be easier to write a new file from scratch. -If each variable is independent of each other, a dictionary works -well for storing variables: +If each variable is independent of each other, a dictionary works well for storing variables: .. code-block:: python @@ -196,12 +163,9 @@ well for storing variables: inc.write(f"{key} = {config[key]}\n") -`elk `_ -is a good example of a package that uses a dictionary to store -configuration variables. +`elk `_ is a good example of a package that uses a dictionary to store configuration variables. -If the order of variables is important, it may be easier to store -them in a list: +If the order of variables is important, it may be easier to store them in a list: .. code-block:: python @@ -217,86 +181,65 @@ them in a list: inc.write(f"{var}\n") -`hpl `_ -is a good example of a package that uses a list to store -configuration variables. +`hpl `_ is a good example of a package that uses a list to store configuration variables. -^^^^^^^^^^^^^^^^^^^^^^^^^^ Variables to watch out for ^^^^^^^^^^^^^^^^^^^^^^^^^^ -The following is a list of common variables to watch out for. The first -two sections are -`implicit variables `_ -defined by Make and will always use the same name, while the rest are -user-defined variables and may vary from package to package. +The following is a list of common variables to watch out for. +The first two sections are `implicit variables `_ defined by Make and will always use the same name, while the rest are user-defined variables and may vary from package to package. * **Compilers** - This includes variables such as ``CC``, ``CXX``, ``F77``, ``F90``, - and ``FC``, as well as variables related to MPI compiler wrappers, - like ``MPICC`` and friends. + This includes variables such as ``CC``, ``CXX``, ``F77``, ``F90``, and ``FC``, as well as variables related to MPI compiler wrappers, like ``MPICC`` and friends. * **Compiler flags** - This includes variables for compiler flags, such as ``CFLAGS``, - ``CXXFLAGS``, ``F77FLAGS``, ``F90FLAGS``, ``FCFLAGS``, and ``CPPFLAGS``. - These variables are often hard-coded to contain flags specific to a - certain compiler. If these flags don't work for every compiler, - you may want to consider filtering them. + This includes variables for compiler flags, such as ``CFLAGS``, ``CXXFLAGS``, ``F77FLAGS``, ``F90FLAGS``, ``FCFLAGS``, and ``CPPFLAGS``. + These variables are often hard-coded to contain flags specific to a certain compiler. + If these flags don't work for every compiler, you may want to consider filtering them. * **Variables that enable or disable features** - This includes variables like ``MPI``, ``OPENMP``, ``PIC``, and - ``DEBUG``. These flags often require you to create a variant - so that you can either build with or without MPI support, for - example. These flags are often compiler-dependent. You should - replace them with the appropriate compiler flags, such as - ``self.compiler.openmp_flag`` or ``self.compiler.pic_flag``. + This includes variables like ``MPI``, ``OPENMP``, ``PIC``, and ``DEBUG``. + These flags often require you to create a variant so that you can either build with or without MPI support, for example. + These flags are often compiler-dependent. + You should replace them with the appropriate compiler flags, such as ``self.compiler.openmp_flag`` or ``self.compiler.pic_flag``. * **Platform flags** - These flags control the type of architecture that the executable - is compiled for. Watch out for variables like ``PLAT`` or ``ARCH``. + These flags control the type of architecture that the executable is compiled for. + Watch out for variables like ``PLAT`` or ``ARCH``. * **Dependencies** - Look out for variables that sound like they could be used to - locate dependencies, such as ``JAVA_HOME``, ``JPEG_ROOT``, or - ``ZLIBDIR``. Also watch out for variables that control linking, - such as ``LIBS``, ``LDFLAGS``, and ``INCLUDES``. These variables - need to be set to the installation prefix of a dependency, or - to the correct linker flags to link to that dependency. + Look out for variables that sound like they could be used to locate dependencies, such as ``JAVA_HOME``, ``JPEG_ROOT``, or ``ZLIBDIR``. + Also watch out for variables that control linking, such as ``LIBS``, ``LDFLAGS``, and ``INCLUDES``. + These variables need to be set to the installation prefix of a dependency, or to the correct linker flags to link to that dependency. * **Installation prefix** - If your Makefile has an ``install`` target, it needs some way of - knowing where to install. By default, many packages install to - ``/usr`` or ``/usr/local``. Since many Spack users won't have - sudo privileges, it is imperative that each package is installed - to the proper prefix. Look for variables like ``PREFIX`` or - ``INSTALL``. + If your Makefile has an ``install`` target, it needs some way of knowing where to install. + By default, many packages install to ``/usr`` or ``/usr/local``. + Since many Spack users won't have sudo privileges, it is imperative that each package is installed to the proper prefix. + Look for variables like ``PREFIX`` or ``INSTALL``. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Makefiles in a sub-directory ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Not every package places their Makefile in the root of the package -tarball. If the Makefile is in a sub-directory like ``src``, you -can tell Spack where to locate it like so: +Not every package places their Makefile in the root of the package tarball. +If the Makefile is in a sub-directory like ``src``, you can tell Spack where to locate it like so: .. code-block:: python build_directory = "src" -^^^^^^^^^^^^^^^^^^^ Manual installation ^^^^^^^^^^^^^^^^^^^ -Not every Makefile includes an ``install`` target. If this is the -case, you can override the default ``install`` method to manually -install the package: +Not every Makefile includes an ``install`` target. +If this is the case, you can override the default ``install`` method to manually install the package: .. code-block:: python @@ -306,9 +249,7 @@ install the package: install_tree("lib", prefix.lib) -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ -For more information on reading and writing Makefiles, see: -https://www.gnu.org/software/make/manual/make.html +For more information on reading and writing Makefiles, see: https://www.gnu.org/software/make/manual/make.html diff --git a/lib/spack/docs/build_systems/mavenpackage.rst b/lib/spack/docs/build_systems/mavenpackage.rst index 7948393604c977..17583c62c3ba93 100644 --- a/lib/spack/docs/build_systems/mavenpackage.rst +++ b/lib/spack/docs/build_systems/mavenpackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,15 +9,12 @@ .. _mavenpackage: ------- Maven ------ -Apache Maven is a general-purpose build system that does not rely -on Makefiles to build software. It is designed for building and -managing Java-based projects. +Apache Maven is a general-purpose build system that does not rely on Makefiles to build software. +It is designed for building and managing Java-based projects. -^^^^^^ Phases ^^^^^^ @@ -33,7 +31,6 @@ By default, these phases run: $ install . -^^^^^^^^^^^^^^^ Important files ^^^^^^^^^^^^^^^ @@ -41,13 +38,12 @@ Maven packages can be identified by the presence of a ``pom.xml`` file. This file lists dependencies and other metadata about the project. There may also be configuration files in the ``.mvn`` directory. -^^^^^^^^^^^^^^^^^^^^^^^^^ Build system dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^ -Maven requires the ``mvn`` executable to build the project. It also -requires Java at both build- and run-time. Because of this, the base -class automatically adds the following dependencies: +Maven requires the ``mvn`` executable to build the project. +It also requires Java at both build- and run-time. +Because of this, the base class automatically adds the following dependencies: .. code-block:: python @@ -67,11 +63,9 @@ In the ``pom.xml`` file, you may see sections like: -This specifies the versions of Java and Maven that are required to -build the package. See -https://docs.oracle.com/middleware/1212/core/MAVEN/maven_version.htm#MAVEN402 -for a description of this version range syntax. In this case, you -should add: +This specifies the versions of Java and Maven that are required to build the package. +See https://docs.oracle.com/middleware/1212/core/MAVEN/maven_version.htm#MAVEN402 for a description of this version range syntax. +In this case, you should add: .. code-block:: python @@ -79,27 +73,20 @@ should add: depends_on("maven@3.5.4:", type="build") -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Passing arguments to the build phase ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The default build and install phases should be sufficient to install -most packages. However, you may want to pass additional flags to -the build phase. For example: +The default build and install phases should be sufficient to install most packages. +However, you may want to pass additional flags to the build phase. +For example: .. code-block:: python def build_args(self): - return [ - "-Pdist,native", - "-Dtar", - "-Dmaven.javadoc.skip=true" - ] + return ["-Pdist,native", "-Dtar", "-Dmaven.javadoc.skip=true"] -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ -For more information on the Maven build system, see: -https://maven.apache.org/index.html +For more information on the Maven build system, see: https://maven.apache.org/index.html diff --git a/lib/spack/docs/build_systems/mesonpackage.rst b/lib/spack/docs/build_systems/mesonpackage.rst index 0cf35951f7ee0e..3c434a5ef8b539 100644 --- a/lib/spack/docs/build_systems/mesonpackage.rst +++ b/lib/spack/docs/build_systems/mesonpackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,15 +9,13 @@ .. _mesonpackage: ------- Meson ------ -Much like Autotools and CMake, Meson is a build system. But it is -meant to be both fast and as user friendly as possible. GNOME's goal -is to port modules to use the Meson build system. +Much like Autotools and CMake, Meson is a build system. +But it is meant to be both fast and as user friendly as possible. +GNOME's goal is to port modules to use the Meson build system. -^^^^^^ Phases ^^^^^^ @@ -39,8 +38,8 @@ By default, these phases run: Any of these phases can be overridden in your package as necessary. -There is also a ``check`` method that looks for a ``test`` target -in the build file. If a ``test`` target exists and the user runs: +There is also a ``check`` method that looks for a ``test`` target in the build file. +If a ``test`` target exists and the user runs: .. code-block:: console @@ -49,16 +48,13 @@ in the build file. If a ``test`` target exists and the user runs: Spack will run ``ninja test`` after the build phase. -^^^^^^^^^^^^^^^ Important files ^^^^^^^^^^^^^^^ -Packages that use the Meson build system can be identified by the -presence of a ``meson.build`` file. This file declares things -like build instructions and dependencies. +Packages that use the Meson build system can be identified by the presence of a ``meson.build`` file. +This file declares things like build instructions and dependencies. -One thing to look for is the ``meson_version`` key that gets passed -to the ``project`` function: +One thing to look for is the ``meson_version`` key that gets passed to the ``project`` function: .. code-block:: none :emphasize-lines: 10 @@ -79,13 +75,11 @@ to the ``project`` function: This means that Meson 0.43.0 is the earliest release that will work. You should specify this in a ``depends_on`` statement. -^^^^^^^^^^^^^^^^^^^^^^^^^ Build system dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^ -At the bare minimum, packages that use the Meson build system need -``meson`` and ``ninja`` dependencies. Since this is always the case, -the ``MesonPackage`` base class already contains: +At the bare minimum, packages that use the Meson build system need ``meson`` and ``ninja`` dependencies. +Since this is always the case, the ``MesonPackage`` base class already contains: .. code-block:: python @@ -93,8 +87,7 @@ the ``MesonPackage`` base class already contains: depends_on("ninja", type="build") -If you need to specify a particular version requirement, you can -override this in your package: +If you need to specify a particular version requirement, you can override this in your package: .. code-block:: python @@ -102,24 +95,20 @@ override this in your package: depends_on("ninja", type="build") -^^^^^^^^^^^^^^^^^^^ Finding meson flags ^^^^^^^^^^^^^^^^^^^ -To get a list of valid flags that can be passed to ``meson``, run the -following command in the directory that contains ``meson.build``: +To get a list of valid flags that can be passed to ``meson``, run the following command in the directory that contains ``meson.build``: .. code-block:: console $ meson setup --help -^^^^^^^^^^^^^^^^^^^^^^^^^^ Passing arguments to meson ^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you need to pass any arguments to the ``meson`` call, you can -override the ``meson_args`` method like so: +If you need to pass any arguments to the ``meson`` call, you can override the ``meson_args`` method like so: .. code-block:: python @@ -129,13 +118,9 @@ override the ``meson_args`` method like so: This method can be used to pass flags as well as variables. -Note that the ``MesonPackage`` base class already defines variants for -``buildtype``, ``default_library`` and ``strip``, which are mapped to default -Meson arguments, meaning that you don't have to specify these. +Note that the ``MesonPackage`` base class already defines variants for ``buildtype``, ``default_library`` and ``strip``, which are mapped to default Meson arguments, meaning that you don't have to specify these. -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ -For more information on the Meson build system, see: -https://mesonbuild.com/index.html +For more information on the Meson build system, see: https://mesonbuild.com/index.html diff --git a/lib/spack/docs/build_systems/octavepackage.rst b/lib/spack/docs/build_systems/octavepackage.rst index 78745dd1c216f5..473fb1ba859274 100644 --- a/lib/spack/docs/build_systems/octavepackage.rst +++ b/lib/spack/docs/build_systems/octavepackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,13 +9,11 @@ .. _octavepackage: ------- Octave ------ Octave has its own build system for installing packages. -^^^^^^ Phases ^^^^^^ @@ -29,27 +28,21 @@ By default, this phase runs the following command: $ octave '--eval' 'pkg prefix ; pkg install ' -Beware that uninstallation is not implemented at the moment. After uninstalling -a package via Spack, you also need to manually uninstall it from Octave via -``pkg uninstall ``. +Beware that uninstallation is not implemented at the moment. +After uninstalling a package via Spack, you also need to manually uninstall it from Octave via ``pkg uninstall ``. -^^^^^^^^^^^^^^^^^^^^^^^ Finding Octave packages ^^^^^^^^^^^^^^^^^^^^^^^ Most Octave packages are listed at https://octave.sourceforge.io/packages.php. -^^^^^^^^^^^^ Dependencies ^^^^^^^^^^^^ -Usually, the homepage of a package will list dependencies, i.e., -``Dependencies: Octave >= 3.6.0 struct >= 1.0.12``. The same information should -be available in the ``DESCRIPTION`` file in the root of each archive. +Usually, the homepage of a package will list dependencies, i.e., ``Dependencies: Octave >= 3.6.0 struct >= 1.0.12``. +The same information should be available in the ``DESCRIPTION`` file in the root of each archive. -^^^^^^^^^^^^^^^^^^^^^^ External Documentation ^^^^^^^^^^^^^^^^^^^^^^ -For more information on the Octave build system, see: -https://octave.org/doc/v4.4.0/Installing-and-Removing-Packages.html +For more information on the Octave build system, see: https://octave.org/doc/v4.4.0/Installing-and-Removing-Packages.html diff --git a/lib/spack/docs/build_systems/perlpackage.rst b/lib/spack/docs/build_systems/perlpackage.rst index de97322bc40542..8d2e261ef424f3 100644 --- a/lib/spack/docs/build_systems/perlpackage.rst +++ b/lib/spack/docs/build_systems/perlpackage.rst @@ -1,21 +1,53 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) .. meta:: :description lang=en: - A guide to using the Perl build system in Spack for installing Perl modules. + A guide to packaging Perl modules with Spack, covering when to add a package and build system integration. .. _perlpackage: ------- Perl ------ -Much like Octave, Perl has its own language-specific -build system. +Much like Octave, Perl has its own language-specific build system. +This documentation includes information on when **not** to add a Spack package for a Perl module. + + +.. _suitable_perl_modules: + +Suitable Modules +^^^^^^^^^^^^^^^^ + +In general, modules that are part of the standard Perl installation should not be added to Spack. +A possible exception is if the module was not part of the standard installation for earlier versions of ``perl`` that are still listed in the package, which you can check by running ``spack info perl``. + +How do you know if the module is in the standard Perl installation? +You check if it is included in the ``CORE`` by entering the following on the command line: + +.. code-block:: console + + $ corelist + +where is case sensitive. + +Examples of outputs for modules that are and are not in the ``CORE`` using perl v5.42.0 are: + +.. code-block:: console + + $ corelist Carp + + Data for 2025-07-02 + Carp was first released with perl 5 + + $ corelist XML::Writer + + Data for 2025-07-02 + XML::Writer was not in CORE (or so I think) + -^^^^^^ Phases ^^^^^^ @@ -27,13 +59,11 @@ The ``PerlBuilder`` and ``PerlPackage`` base classes come with three phases that Perl packages have two common modules used for module installation: -""""""""""""""""""""""" ``ExtUtils::MakeMaker`` """"""""""""""""""""""" -The ``ExtUtils::MakeMaker`` module is just what it sounds like, a module -designed to generate Makefiles. It can be identified by the presence of -a ``Makefile.PL`` file, and has the following installation steps: +The ``ExtUtils::MakeMaker`` module is just what it sounds like, a module designed to generate Makefiles. +It can be identified by the presence of a ``Makefile.PL`` file, and has the following installation steps: .. code-block:: console @@ -43,13 +73,11 @@ a ``Makefile.PL`` file, and has the following installation steps: $ make install -""""""""""""""""" ``Module::Build`` """"""""""""""""" -The ``Module::Build`` module is a pure-Perl build system, and can be -identified by the presence of a ``Build.PL`` file. It has the following -installation steps: +The ``Module::Build`` module is a pure-Perl build system, and can be identified by the presence of a ``Build.PL`` file. +It has the following installation steps: .. code-block:: console @@ -59,79 +87,63 @@ installation steps: $ ./Build install -If both ``Makefile.PL`` and ``Build.PL`` files exist in the package, -Spack will use ``Makefile.PL`` by default. If your package uses a -different module, ``PerlPackage`` will need to be extended to support -it. +If both ``Makefile.PL`` and ``Build.PL`` files exist in the package, Spack will use ``Makefile.PL`` by default. +If your package uses a different module, ``PerlPackage`` will need to be extended to support it. -``PerlPackage`` automatically detects which build steps to use, so there -shouldn't be much work on the package developer's side to get things -working. +``PerlPackage`` automatically detects which build steps to use, so there shouldn't be much work on the package developer's side to get things working. -^^^^^^^^^^^^^^^^^^^^^ Finding Perl packages ^^^^^^^^^^^^^^^^^^^^^ -Most Perl modules are hosted on CPAN, the Comprehensive Perl Archive -Network. If you need to find a package for ``XML::Parser``, for example, -you should search for "CPAN XML::Parser". +Most Perl modules are hosted on CPAN, the Comprehensive Perl Archive Network. +If you need to find a package for ``XML::Parser``, for example, you should search for "CPAN XML::Parser". +Just make sure that the module is not included in the ``CORE`` (see :ref:`suitable_perl_modules`). + +Some CPAN pages are versioned. +Check for a link to the "Latest Release" to make sure you have the latest version. -Some CPAN pages are versioned. Check for a link to the -"Latest Release" to make sure you have the latest version. -^^^^^^^^^^^^ Package name ^^^^^^^^^^^^ -When you use ``spack create`` to create a new Perl package, Spack will -automatically prepend ``perl-`` to the front of the package name. This -helps to keep Perl modules separate from other packages. The same -naming scheme is used for other language extensions, like Python and R. +When you use ``spack create`` to create a new Perl package, Spack will automatically prepend ``perl-`` to the front of the package name. +This helps to keep Perl modules separate from other packages. +The same naming scheme is used for other language extensions, like Python and R. +See :ref:`creating-and-editing-packages` for more information on the command. -^^^^^^^^^^^ Description ^^^^^^^^^^^ -Most CPAN pages have a short description under "NAME" and a longer -description under "DESCRIPTION". Use whichever you think is more -useful while still being succinct. +Most CPAN pages have a short description under "NAME" and a longer description under "DESCRIPTION". +Use whichever you think is more useful while still being succinct. -^^^^^^^^ Homepage ^^^^^^^^ -In the top-right corner of the CPAN page, you'll find a "permalink" -for the package. This should be used instead of the current URL, as -it doesn't contain the version number and will always link to the -latest release. +In the top-right corner of the CPAN page, you'll find a "permalink" for the package. +This should be used instead of the current URL, as it doesn't contain the version number and will always link to the latest release. -^^^^^^ URL ^^^^^^ -If you haven't found it already, the download URL is on the right -side of the page below the permalink. Search for "Download". +If you haven't found it already, the download URL is on the right side of the page below the permalink. +Search for "Download". -^^^^^^^^^^^^^^^^^^^^^^^^^ Build system dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^ -Every ``PerlPackage`` obviously depends on Perl at build and run-time, -so ``PerlPackage`` contains: +Every ``PerlPackage`` obviously depends on Perl at build and run-time, so ``PerlPackage`` contains: .. code-block:: python extends("perl") -If your package requires a specific version of Perl, you should -specify this. +If your package requires a specific version of Perl, you should specify this. -Although newer versions of Perl include ``ExtUtils::MakeMaker`` and -``Module::Build`` as "core" modules, you may want to add dependencies -on ``perl-extutils-makemaker`` and ``perl-module-build`` anyway. Many -people add Perl as an external package, and we want the build to work -properly. If your package uses ``Makefile.PL`` to build, add: +Although newer versions of Perl include ``ExtUtils::MakeMaker`` and ``Module::Build`` as "core" modules, you may want to add dependencies on ``perl-extutils-makemaker`` and ``perl-module-build`` anyway. +Many people add Perl as an external package, and we want the build to work properly. +If your package uses ``Makefile.PL`` to build, add: .. code-block:: python @@ -145,25 +157,19 @@ If your package uses ``Build.PL`` to build, add: depends_on("perl-module-build", type="build") -^^^^^^^^^^^^^^^^^ Perl dependencies ^^^^^^^^^^^^^^^^^ -Below the download URL, you will find a "Dependencies" link, which -takes you to a page listing all of the dependencies of the package. -Packages listed as "Core module" don't need to be added as dependencies, -but all direct dependencies should be added. Don't add dependencies of -dependencies. These should be added as dependencies to the dependency, -not to your package. +Below the download URL, you will find a "Dependencies" link, which takes you to a page listing all of the dependencies of the package. +Packages listed as "Core module" don't need to be added as dependencies, but all direct dependencies should be added. +Don't add dependencies of dependencies. +These should be added as dependencies to the dependency, not to your package. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Passing arguments to configure ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Packages that have non-Perl dependencies often use command-line -variables to specify their installation directory. You can pass -arguments to ``Makefile.PL`` or ``Build.PL`` by overriding -``configure_args`` like so: +Packages that have non-Perl dependencies often use command-line variables to specify their installation directory. +You can pass arguments to ``Makefile.PL`` or ``Build.PL`` by overriding ``configure_args`` like so: .. code-block:: python @@ -176,19 +182,15 @@ arguments to ``Makefile.PL`` or ``Build.PL`` by overriding ] -^^^^^^^ Testing ^^^^^^^ -``PerlPackage`` provides a simple stand-alone test of the successfully -installed package to confirm that installed Perl module(s) can be used. -These tests can be performed any time after the installation using -``spack -v test run``. (For more information on the command, see -:ref:`cmd-spack-test-run`.) +``PerlPackage`` provides a simple stand-alone test of the successfully installed package to confirm that installed Perl module(s) can be used. +These tests can be performed any time after the installation using ``spack -v test run``. +(For more information on the command, see :ref:`cmd-spack-test-run`.) -The base class automatically detects Perl modules based on the presence -of ``*.pm`` files under the package's library directory. For example, -the files under ``perl-bignum``'s Perl library are: +The base class automatically detects Perl modules based on the presence of ``*.pm`` files under the package's library directory. +For example, the files under ``perl-bignum``'s Perl library are: .. code-block:: console @@ -220,36 +222,31 @@ which results in the package having the ``use_modules`` property containing: This list can often be used to catch missing dependencies. -If the list is somehow wrong, you can provide the names of the modules -yourself by overriding ``use_modules`` like so: +If the list is somehow wrong, you can provide the names of the modules yourself by overriding ``use_modules`` like so: - .. code-block:: python +.. code-block:: python - use_modules = ["bigfloat", "bigrat", "bigint", "bignum"] + use_modules = ["bigfloat", "bigrat", "bigint", "bignum"] -If you only want a subset of the automatically detected modules to be -tested, you could instead define the ``skip_modules`` property on the -package. So, instead of overriding ``use_modules`` as shown above, you -could define the following: +If you only want a subset of the automatically detected modules to be tested, you could instead define the ``skip_modules`` property on the package. +So, instead of overriding ``use_modules`` as shown above, you could define the following: - .. code-block:: python +.. code-block:: python - skip_modules = [ - "Math::BigFloat::Trace", - "Math::BigInt::Trace", - "Math::BigRat::Trace", - ] + skip_modules = [ + "Math::BigFloat::Trace", + "Math::BigInt::Trace", + "Math::BigRat::Trace", + ] for the same use tests. -^^^^^^^^^^^^^^^^^^^^^ Alternatives to Spack ^^^^^^^^^^^^^^^^^^^^^ -If you need to maintain a stack of Perl modules for a user and don't -want to add all of them to Spack, a good alternative is ``cpanm``. -If Perl is already installed on your system, it should come with a -``cpan`` executable. To install ``cpanm``, run the following command: +If you need to maintain a stack of Perl modules for a user and don't want to add all of them to Spack, a good alternative is ``cpanm``. +If Perl is already installed on your system, it should come with a ``cpan`` executable. +To install ``cpanm``, run the following command: .. code-block:: console @@ -265,15 +262,11 @@ Now, you can install any Perl module you want by running: Obviously, these commands can only be run if you have root privileges. Furthermore, ``cpanm`` is not capable of installing non-Perl dependencies. -If you need to install to your home directory or need to install a module -with non-Perl dependencies, Spack is a better option. +If you need to install to your home directory or need to install a module with non-Perl dependencies, Spack is a better option. -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ -You can find more information on installing Perl modules from source -at: http://www.perlmonks.org/?node_id=128077 +You can find more information on installing Perl modules from source at: http://www.perlmonks.org/?node_id=128077 -More generic Perl module installation instructions can be found at: -http://www.cpan.org/modules/INSTALL.html +More generic Perl module installation instructions can be found at: http://www.cpan.org/modules/INSTALL.html diff --git a/lib/spack/docs/build_systems/pythonpackage.rst b/lib/spack/docs/build_systems/pythonpackage.rst index 752c30b95784f1..18ce8576acf6a3 100644 --- a/lib/spack/docs/build_systems/pythonpackage.rst +++ b/lib/spack/docs/build_systems/pythonpackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,78 +9,59 @@ .. _pythonpackage: ------- Python ------ -Python packages and modules have their own special build system. This -documentation covers everything you'll need to know in order to write -a Spack build recipe for a Python library. +Python packages and modules have their own special build system. +This documentation covers everything you'll need to know in order to write a Spack build recipe for a Python library. -^^^^^^^^^^^ Terminology ^^^^^^^^^^^ -In the Python ecosystem, there are a number of terms that are -important to understand. +In the Python ecosystem, there are a number of terms that are important to understand. **PyPI** - The `Python Package Index `_, where most Python - libraries are hosted. + The `Python Package Index `_, where most Python libraries are hosted. **sdist** - Source distributions, distributed as tarballs (.tar.gz) and zip - files (.zip). Contain the source code of the package. + Source distributions, distributed as tarballs (.tar.gz) and zip files (.zip). + Contain the source code of the package. **bdist** - Built distributions, distributed as wheels (.whl). Contain the - pre-built library. + Built distributions, distributed as wheels (.whl). + Contain the pre-built library. **wheel** - A binary distribution format common in the Python ecosystem. This - file is actually just a zip file containing specific metadata and - code. See the - `documentation `_ - for more details. + A binary distribution format common in the Python ecosystem. + This file is actually just a zip file containing specific metadata and code. + See the `documentation `_ for more details. **build frontend** - Command-line tools used to build and install wheels. Examples - include `pip `_, - `build `_, and - `installer `_. + Command-line tools used to build and install wheels. + Examples include `pip `_, `build `_, and `installer `_. **build backend** - Libraries used to define how to build a wheel. Examples - include `setuptools `__, - `flit `_, - `poetry `_, - `hatchling `_, - `meson `_, and - `pdm `_. + Libraries used to define how to build a wheel. + Examples include `setuptools `__, `flit `_, `poetry `_, `hatchling `_, `meson `_, and `pdm `_. -^^^^^^^^^^^ Downloading ^^^^^^^^^^^ -The first step in packaging a Python library is to figure out where -to download it from. The vast majority of Python packages are hosted -on `PyPI `_, which is -:ref:`preferred over GitHub ` for downloading -packages. Search for the package name on PyPI to find the project -page. The project page is usually located at: +The first step in packaging a Python library is to figure out where to download it from. +The vast majority of Python packages are hosted on `PyPI `_, which is :ref:`preferred over GitHub ` for downloading packages. +Search for the package name on PyPI to find the project page. +The project page is usually located at: .. code-block:: text https://pypi.org/project/ -On the project page, there is a "Download files" tab containing -download URLs. Whenever possible, we prefer to build Spack packages -from source. If PyPI only has wheels, check to see if the project is -hosted on GitHub and see if GitHub has source distributions. The -project page usually has a "Homepage" and/or "Source code" link for -this. If the project is closed-source, it may only have wheels -available. For example, ``py-azureml-sdk`` is closed-source and can -be downloaded from: +On the project page, there is a "Download files" tab containing download URLs. +Whenever possible, we prefer to build Spack packages from source. +If PyPI only has wheels, check to see if the project is hosted on GitHub and see if GitHub has source distributions. +The project page usually has a "Homepage" and/or "Source code" link for this. +If the project is closed-source, it may only have wheels available. +For example, ``py-azureml-sdk`` is closed-source and can be downloaded from: .. code-block:: text @@ -96,71 +78,53 @@ to create a new package template. .. _pypi-vs-github: -""""""""""""""" PyPI vs. GitHub """"""""""""""" -Many packages are hosted on PyPI, but are developed on GitHub or -another version control system hosting service. The source code can -be downloaded from either location, but PyPI is preferred for the -following reasons: +Many packages are hosted on PyPI, but are developed on GitHub or another version control system hosting service. +The source code can be downloaded from either location, but PyPI is preferred for the following reasons: -#. PyPI contains the bare minimum number of files needed to install - the package. +#. PyPI contains the bare minimum number of files needed to install the package. - You may notice that the tarball you download from PyPI does not - have the same checksum as the tarball you download from GitHub. - When a developer uploads a new release to PyPI, it doesn't contain - every file in the repository, only the files necessary to install - the package. PyPI tarballs are therefore smaller. + You may notice that the tarball you download from PyPI does not have the same checksum as the tarball you download from GitHub. + When a developer uploads a new release to PyPI, it doesn't contain every file in the repository, only the files necessary to install the package. + PyPI tarballs are therefore smaller. #. PyPI is the official source for package managers like ``pip``. - Let's be honest, ``pip`` is much more popular than Spack. If the - GitHub tarball contains a file not present in the PyPI tarball that - causes a bug, the developers may not realize this for quite some - time. If the bug was in a file contained in the PyPI tarball, users - would notice the bug much more quickly. + Let's be honest, ``pip`` is much more popular than Spack. + If the GitHub tarball contains a file not present in the PyPI tarball that causes a bug, the developers may not realize this for quite some time. + If the bug was in a file contained in the PyPI tarball, users would notice the bug much more quickly. #. GitHub release may be a beta version. - When a developer releases a new version of a package on GitHub, - it may not be intended for most users. Until that release also - makes its way to PyPI, it should be assumed that the release is - not yet ready for general use. + When a developer releases a new version of a package on GitHub, it may not be intended for most users. + Until that release also makes its way to PyPI, it should be assumed that the release is not yet ready for general use. #. The checksum for a GitHub release may change. - Unfortunately, some developers have a habit of patching releases - without incrementing the version number. This results in a change - in tarball checksum. Package managers like Spack that use checksums - to verify the integrity of a download tarball grind to a halt when - the checksum for a known version changes. Most of the time, the - change is intentional, and contains a needed bug fix. However, - sometimes the change indicates a download source that has been - compromised, and a tarball that contains a virus. If this happens, - you must contact the developers to determine which is the case. - PyPI is nice because it makes it physically impossible to - re-release the same version of a package with a different checksum. - -The only reason to use GitHub instead of PyPI is if PyPI only has -wheels or if the PyPI sdist is missing a file needed to build the -package. If this is the case, please add a comment above the ``url`` -explaining this. + Unfortunately, some developers have a habit of patching releases without incrementing the version number. + This results in a change in tarball checksum. + Package managers like Spack that use checksums to verify the integrity of a download tarball grind to a halt when the checksum for a known version changes. + Most of the time, the change is intentional, and contains a needed bug fix. + However, sometimes the change indicates a download source that has been compromised, and a tarball that contains a virus. + If this happens, you must contact the developers to determine which is the case. + PyPI is nice because it makes it physically impossible to re-release the same version of a package with a different checksum. + +The only reason to use GitHub instead of PyPI is if PyPI only has wheels or if the PyPI sdist is missing a file needed to build the package. +If this is the case, please add a comment above the ``url`` explaining this. -^^^^^^ PyPI ^^^^^^ -Since PyPI is so commonly used to host Python libraries, the -``PythonPackage`` base class has a ``pypi`` attribute that can be -set. Once set, ``pypi`` will be used to define the ``homepage``, -``url``, and ``list_url``. For example, the following: +Since PyPI is so commonly used to host Python libraries, the ``PythonPackage`` base class has a ``pypi`` attribute that can be set. +Once set, ``pypi`` will be used to define the ``homepage``, ``url``, and ``list_url``. +For example, the following: .. code-block:: python homepage = "https://pypi.org/project/setuptools/" - url = "https://pypi.org/packages/source/s/setuptools/setuptools-49.2.0.zip" + url = "https://pypi.org/packages/source/s/setuptools/setuptools-49.2.0.zip" list_url = "https://pypi.org/simple/setuptools/" @@ -171,32 +135,25 @@ is equivalent to: pypi = "setuptools/setuptools-49.2.0.zip" -If a package has a different homepage listed on PyPI, you can -override it by setting your own ``homepage``. +If a package has a different homepage listed on PyPI, you can override it by setting your own ``homepage``. -^^^^^^^^^^^ Description ^^^^^^^^^^^ -The top of the PyPI project page contains a short description of the -package. The "Project description" tab may also contain a longer -description of the package. Either of these can be used to populate -the package docstring. +The top of the PyPI project page contains a short description of the package. +The "Project description" tab may also contain a longer description of the package. +Either of these can be used to populate the package docstring. -^^^^^^^^^^^^ Dependencies ^^^^^^^^^^^^ -Once you've determined the basic metadata for a package, the next -step is to determine the build backend. ``PythonPackage`` uses -`pip `_ to install the package, but pip -requires a backend to actually build the package. +Once you've determined the basic metadata for a package, the next step is to determine the build backend. +``PythonPackage`` uses `pip `_ to install the package, but pip requires a backend to actually build the package. To determine the build backend, look for a ``pyproject.toml`` file. -If there is no ``pyproject.toml`` file and only a ``setup.py`` or -``setup.cfg`` file, you can assume that the project uses -:ref:`setuptools`. If there is a ``pyproject.toml`` file, see if it -contains a ``[build-system]`` section. For example: +If there is no ``pyproject.toml`` file and only a ``setup.py`` or ``setup.cfg`` file, you can assume that the project uses :ref:`setuptools`. +If there is a ``pyproject.toml`` file, see if it contains a ``[build-system]`` section. +For example: .. code-block:: toml @@ -208,28 +165,20 @@ contains a ``[build-system]`` section. For example: build-backend = "setuptools.build_meta" -This section does two things: the ``requires`` key lists build -dependencies of the project, and the ``build-backend`` key defines -the build backend. All of these build dependencies should be added as -dependencies to your package: +This section does two things: the ``requires`` key lists build dependencies of the project, and the ``build-backend`` key defines the build backend. +All of these build dependencies should be added as dependencies to your package: .. code-block:: python depends_on("py-setuptools@42:", type="build") -Note that ``py-wheel`` is already listed as a build dependency in the -``PythonPackage`` base class, so you don't need to add it unless you -need to specify a specific version requirement or change the -dependency type. +Note that ``py-wheel`` is already listed as a build dependency in the ``PythonPackage`` base class, so you don't need to add it unless you need to specify a specific version requirement or change the dependency type. -See `PEP 517 `__ and -`PEP 518 `_ for more -information on the design of ``pyproject.toml``. +See `PEP 517 `__ and `PEP 518 `_ for more information on the design of ``pyproject.toml``. -Depending on which build backend a project uses, there are various -places that run-time dependencies can be listed. Most modern build -backends support listing dependencies directly in ``pyproject.toml``. +Depending on which build backend a project uses, there are various places that run-time dependencies can be listed. +Most modern build backends support listing dependencies directly in ``pyproject.toml``. Look for dependencies under the following keys: * ``requires-python`` under ``[project]`` @@ -238,46 +187,36 @@ Look for dependencies under the following keys: * ``dependencies`` under ``[project]`` - These packages are required for building and installation. You can - add them with ``type=("build", "run")``. + These packages are required for building and installation. + You can add them with ``type=("build", "run")``. * ``[project.optional-dependencies]`` - This section includes keys with lists of optional dependencies - needed to enable those features. You should add a variant that - optionally adds these dependencies. This variant should be ``False`` - by default. + This section includes keys with lists of optional dependencies needed to enable those features. + You should add a variant that optionally adds these dependencies. + This variant should be ``False`` by default. -Some build backends may have additional locations where dependencies -can be found. +Some build backends may have additional locations where dependencies can be found. -""""""""" distutils """"""""" -Before the introduction of setuptools and other build backends, -Python packages had to rely on the built-in distutils library. -Distutils is missing many of the features that setuptools and other -build backends offer, and users are encouraged to use setuptools -instead. In fact, distutils was deprecated in Python 3.10 and will be -removed in Python 3.12. Because of this, pip actually replaces all -imports of distutils with setuptools. If a package uses distutils, -you should instead add a build dependency on setuptools. Check for a -``requirements.txt`` file that may list dependencies of the project. +Before the introduction of setuptools and other build backends, Python packages had to rely on the built-in distutils library. +Distutils is missing many of the features that setuptools and other build backends offer, and users are encouraged to use setuptools instead. +In fact, distutils was deprecated in Python 3.10 and will be removed in Python 3.12. +Because of this, pip actually replaces all imports of distutils with setuptools. +If a package uses distutils, you should instead add a build dependency on setuptools. +Check for a ``requirements.txt`` file that may list dependencies of the project. .. _setuptools: -"""""""""" setuptools """""""""" -If the ``pyproject.toml`` lists ``setuptools.build_meta`` as a -``build-backend``, or if the package has a ``setup.py`` that imports -``setuptools``, or if the package has a ``setup.cfg`` file, then it -uses setuptools to build. Setuptools is a replacement for the -distutils library, and has almost the exact same API. In addition to -``pyproject.toml``, dependencies can be listed in the ``setup.py`` or -``setup.cfg`` file. Look for the following arguments: +If the ``pyproject.toml`` lists ``setuptools.build_meta`` as a ``build-backend``, or if the package has a ``setup.py`` that imports ``setuptools``, or if the package has a ``setup.cfg`` file, then it uses setuptools to build. +Setuptools is a replacement for the distutils library, and has almost the exact same API. +In addition to ``pyproject.toml``, dependencies can be listed in the ``setup.py`` or ``setup.cfg`` file. +Look for the following arguments: * ``python_requires`` @@ -285,123 +224,98 @@ distutils library, and has almost the exact same API. In addition to * ``setup_requires`` - These packages are usually only needed at build-time, so you can - add them with ``type="build"``. + These packages are usually only needed at build-time, so you can add them with ``type="build"``. * ``install_requires`` - These packages are required for building and installation. You can - add them with ``type=("build", "run")``. + These packages are required for building and installation. + You can add them with ``type=("build", "run")``. * ``extras_require`` - These packages are optional dependencies that enable additional - functionality. You should add a variant that optionally adds these - dependencies. This variant should be ``False`` by default. + These packages are optional dependencies that enable additional functionality. + You should add a variant that optionally adds these dependencies. + This variant should be ``False`` by default. * ``tests_require`` - These are packages that are required to run the unit tests for the - package. These dependencies can be specified using the - ``type="test"`` dependency type. However, the PyPI tarballs rarely - contain unit tests, so there is usually no reason to add these. + These are packages that are required to run the unit tests for the package. + These dependencies can be specified using the ``type="test"`` dependency type. + However, the PyPI tarballs rarely contain unit tests, so there is usually no reason to add these. -See https://setuptools.pypa.io/en/latest/userguide/dependency_management.html -for more information on how setuptools handles dependency management. -See `PEP 440 `_ -for documentation on version specifiers in setuptools. +See https://setuptools.pypa.io/en/latest/userguide/dependency_management.html for more information on how setuptools handles dependency management. +See `PEP 440 `_ for documentation on version specifiers in setuptools. -"""""" flit """""" -There are actually two possible build backends for flit, ``flit`` -and ``flit_core``. If you see these in the ``pyproject.toml``, add a -build dependency to your package. With flit, all dependencies are -listed directly in the ``pyproject.toml`` file. Older versions of -flit used to store this info in a ``flit.ini`` file, so check for -this too. +There are actually two possible build backends for flit, ``flit`` and ``flit_core``. +If you see these in the ``pyproject.toml``, add a build dependency to your package. +With flit, all dependencies are listed directly in the ``pyproject.toml`` file. +Older versions of flit used to store this info in a ``flit.ini`` file, so check for this too. -In addition to the default ``pyproject.toml`` keys listed above, -older versions of flit may use the following keys: +In addition to the default ``pyproject.toml`` keys listed above, older versions of flit may use the following keys: * ``requires`` under ``[tool.flit.metadata]`` - These packages are required for building and installation. You can - add them with ``type=("build", "run")``. + These packages are required for building and installation. + You can add them with ``type=("build", "run")``. * ``[tool.flit.metadata.requires-extra]`` - This section includes keys with lists of optional dependencies - needed to enable those features. You should add a variant that - optionally adds these dependencies. This variant should be ``False`` - by default. + This section includes keys with lists of optional dependencies needed to enable those features. + You should add a variant that optionally adds these dependencies. + This variant should be ``False`` by default. -See https://flit.pypa.io/en/latest/pyproject_toml.html for -more information. +See https://flit.pypa.io/en/latest/pyproject_toml.html for more information. -"""""" poetry """""" -Like flit, poetry also has two possible build backends, ``poetry`` -and ``poetry_core``. If you see these in the ``pyproject.toml``, add -a build dependency to your package. With poetry, all dependencies are -listed directly in the ``pyproject.toml`` file. Dependencies are -listed in a ``[tool.poetry.dependencies]`` section, and use a -`custom syntax `_ -for specifying the version requirements. Note that ``~=`` works -differently in poetry than in setuptools and flit for versions that -start with a zero. +Like flit, poetry also has two possible build backends, ``poetry`` and ``poetry_core``. +If you see these in the ``pyproject.toml``, add a build dependency to your package. +With poetry, all dependencies are listed directly in the ``pyproject.toml`` file. +Dependencies are listed in a ``[tool.poetry.dependencies]`` section, and use a `custom syntax `_ for specifying the version requirements. +Note that ``~=`` works differently in poetry than in setuptools and flit for versions that start with a zero. -""""""""" hatchling """"""""" -If the ``pyproject.toml`` lists ``hatchling.build`` as the -``build-backend``, it uses the hatchling build system. Hatchling -uses the default ``pyproject.toml`` keys to list dependencies. +If the ``pyproject.toml`` lists ``hatchling.build`` as the ``build-backend``, it uses the hatchling build system. +Hatchling uses the default ``pyproject.toml`` keys to list dependencies. -See https://hatch.pypa.io/latest/config/dependency/ for more -information. +See https://hatch.pypa.io/latest/config/dependency/ for more information. -"""""" meson """""" -If the ``pyproject.toml`` lists ``mesonpy`` as the ``build-backend``, -it uses the meson build system. Meson uses the default -``pyproject.toml`` keys to list dependencies. +If the ``pyproject.toml`` lists ``mesonpy`` as the ``build-backend``, it uses the meson build system. +Meson uses the default ``pyproject.toml`` keys to list dependencies. -See https://meson-python.readthedocs.io/en/latest/tutorials/introduction.html -for more information. +See https://meson-python.readthedocs.io/en/latest/tutorials/introduction.html for more information. -"""""" pdm """""" -If the ``pyproject.toml`` lists ``pdm.pep517.api`` as the ``build-backend``, -it uses the PDM build system. PDM uses the default ``pyproject.toml`` -keys to list dependencies. +If the ``pyproject.toml`` lists ``pdm.pep517.api`` as the ``build-backend``, it uses the PDM build system. +PDM uses the default ``pyproject.toml`` keys to list dependencies. See https://pdm.fming.dev/latest/ for more information. -"""""" wheels """""" -Some Python packages are closed-source and are distributed as Python -wheels. For example, ``py-azureml-sdk`` downloads a ``.whl`` file. This -file is simply a zip file, and can be extracted using: +Some Python packages are closed-source and are distributed as Python wheels. +For example, ``py-azureml-sdk`` downloads a ``.whl`` file. +This file is simply a zip file, and can be extracted using: .. code-block:: console $ unzip *.whl -The zip file will not contain a ``setup.py``, but it will contain a -``METADATA`` file which contains all the information you need to -write a ``package.py`` build recipe. Check for lines like: +The zip file will not contain a ``setup.py``, but it will contain a ``METADATA`` file which contains all the information you need to write a ``package.py`` build recipe. +Check for lines like: .. code-block:: text @@ -417,34 +331,27 @@ write a ``package.py`` build recipe. Check for lines like: Requires-Dist: azureml-train-automl (~=1.11.0); extra == 'automl' -``Requires-Python`` is equivalent to ``python_requires`` and -``Requires-Dist`` is equivalent to ``install_requires``. -``Provides-Extra`` is used to name optional features (variants) and -a ``Requires-Dist`` with ``extra == 'foo'`` will list any -dependencies needed for that feature. +``Requires-Python`` is equivalent to ``python_requires`` and ``Requires-Dist`` is equivalent to ``install_requires``. +``Provides-Extra`` is used to name optional features (variants) and a ``Requires-Dist`` with ``extra == 'foo'`` will list any dependencies needed for that feature. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Passing arguments to setup.py ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The default install phase should be sufficient to install most -packages. However, the installation instructions for a package may -suggest passing certain flags to the ``setup.py`` call. The -``PythonPackage`` class has two techniques for doing this. +The default install phase should be sufficient to install most packages. +However, the installation instructions for a package may suggest passing certain flags to the ``setup.py`` call. +The ``PythonPackage`` class has two techniques for doing this. -""""""""""""""" Config settings """"""""""""""" -These settings are passed to -`PEP 517 `__ build backends. -For example, ``py-scipy`` package allows you to specify the name of -the BLAS/LAPACK library you want pkg-config to search for: +These settings are passed to `PEP 517 `__ build backends. +For example, ``py-scipy`` package allows you to specify the name of the BLAS/LAPACK library you want pkg-config to search for: .. code-block:: python depends_on("py-pip@22.1:", type="build") + def config_settings(self, spec, prefix): return { "blas": spec["blas"].libs.names[0], @@ -454,19 +361,16 @@ the BLAS/LAPACK library you want pkg-config to search for: .. note:: - This flag only works for packages that define a ``build-backend`` - in ``pyproject.toml``. Also, it is only supported by pip 22.1+, - which requires Python 3.7+. For packages that still support Python - 3.6 and older, ``install_options`` should be used instead. + This flag only works for packages that define a ``build-backend`` in ``pyproject.toml``. + Also, it is only supported by pip 22.1+, which requires Python 3.7+. + For packages that still support Python 3.6 and older, ``install_options`` should be used instead. -"""""""""""""" Global options """""""""""""" -These flags are added directly after ``setup.py`` when pip runs -``python setup.py install``. For example, the ``py-pyyaml`` package -has an optional dependency on ``libyaml`` that can be enabled like so: +These flags are added directly after ``setup.py`` when pip runs ``python setup.py install``. +For example, the ``py-pyyaml`` package has an optional dependency on ``libyaml`` that can be enabled like so: .. code-block:: python @@ -481,73 +385,60 @@ has an optional dependency on ``libyaml`` that can be enabled like so: .. note:: - Direct invocation of ``setup.py`` is - `deprecated `_. + Direct invocation of ``setup.py`` is `deprecated `_. This flag forces pip to use a deprecated installation procedure. - It should only be used in packages that don't define a - ``build-backend`` in ``pyproject.toml`` or packages that still - support Python 3.6 and older. + It should only be used in packages that don't define a ``build-backend`` in ``pyproject.toml`` or packages that still support Python 3.6 and older. -""""""""""""""" Install options """"""""""""""" -These flags are added directly after ``install`` when pip runs -``python setup.py install``. For example, the ``py-pyyaml`` package -allows you to specify the directories to search for ``libyaml``: +These flags are added directly after ``install`` when pip runs ``python setup.py install``. +For example, the ``py-pyyaml`` package allows you to specify the directories to search for ``libyaml``: .. code-block:: python def install_options(self, spec, prefix): options = [] if spec.satisfies("+libyaml"): - options.extend([ - spec["libyaml"].libs.search_flags, - spec["libyaml"].headers.include_flags, - ]) + options.extend( + [ + spec["libyaml"].libs.search_flags, + spec["libyaml"].headers.include_flags, + ] + ) return options .. note:: - Direct invocation of ``setup.py`` is - `deprecated `_. + Direct invocation of ``setup.py`` is `deprecated `_. This flag forces pip to use a deprecated installation procedure. - It should only be used in packages that don't define a - ``build-backend`` in ``pyproject.toml`` or packages that still - support Python 3.6 and older. + It should only be used in packages that don't define a ``build-backend`` in ``pyproject.toml`` or packages that still support Python 3.6 and older. -^^^^^^^ Testing ^^^^^^^ -``PythonPackage`` provides a couple of options for testing packages -both during and after the installation process. +``PythonPackage`` provides a couple of options for testing packages both during and after the installation process. -"""""""""""" Import tests """""""""""" -Just because a package successfully built does not mean that it built -correctly. The most reliable test of whether or not the package was -correctly installed is to attempt to import all of the modules that -get installed. To get a list of modules, run the following command -in the source directory: +Just because a package successfully built does not mean that it built correctly. +The most reliable test of whether or not the package was correctly installed is to attempt to import all of the modules that get installed. +To get a list of modules, run the following command in the source directory: -.. code-block:: console +.. code-block:: pycon - $ python >>> import setuptools >>> setuptools.find_packages() ['numpy', 'numpy._build_utils', 'numpy.compat', 'numpy.core', 'numpy.distutils', 'numpy.doc', 'numpy.f2py', 'numpy.fft', 'numpy.lib', 'numpy.linalg', 'numpy.ma', 'numpy.matrixlib', 'numpy.polynomial', 'numpy.random', 'numpy.testing', 'numpy.core.code_generators', 'numpy.distutils.command', 'numpy.distutils.fcompiler'] -Large, complex packages like ``numpy`` will return a long list of -packages, while other packages like ``six`` will return an empty list. -``py-six`` installs a single ``six.py`` file. In Python packaging lingo, -a "package" is a directory containing files like: +Large, complex packages like ``numpy`` will return a long list of packages, while other packages like ``six`` will return an empty list. +``py-six`` installs a single ``six.py`` file. +In Python packaging lingo, a "package" is a directory containing files like: .. code-block:: none @@ -558,22 +449,18 @@ a "package" is a directory containing files like: whereas a "module" is a single Python file. -The ``PythonPackage`` base class automatically detects these package -and module names for you. If, for whatever reason, the module names -detected are wrong, you can provide the names yourself by overriding -``import_modules`` like so: +The ``PythonPackage`` base class automatically detects these package and module names for you. +If, for whatever reason, the module names detected are wrong, you can provide the names yourself by overriding ``import_modules`` like so: .. code-block:: python import_modules = ["six"] -Sometimes the list of module names to import depends on how the -package was built. For example, the ``py-pyyaml`` package has a -``+libyaml`` variant that enables the build of a faster optimized -version of the library. If the user chooses ``~libyaml``, only the -``yaml`` library will be importable. If the user chooses ``+libyaml``, -both the ``yaml`` and ``yaml.cyaml`` libraries will be available. +Sometimes the list of module names to import depends on how the package was built. +For example, the ``py-pyyaml`` package has a ``+libyaml`` variant that enables the build of a faster optimized version of the library. +If the user chooses ``~libyaml``, only the ``yaml`` library will be importable. +If the user chooses ``+libyaml``, both the ``yaml`` and ``yaml.cyaml`` libraries will be available. This can be expressed like so: .. code-block:: python @@ -586,17 +473,11 @@ This can be expressed like so: return modules -These tests often catch missing dependencies and non-RPATHed -libraries. Make sure not to add modules/packages containing the word -"test", as these likely won't end up in the installation directory, -or may require test dependencies like pytest to be installed. +These tests often catch missing dependencies and non-RPATHed libraries. +Make sure not to add modules/packages containing the word "test", as these likely won't end up in the installation directory, or may require test dependencies like pytest to be installed. -Instead of defining the ``import_modules`` explicitly, only the subset -of module names to be skipped can be defined by using ``skip_modules``. -If a defined module has submodules, they are skipped as well, e.g., -in case the ``plotting`` modules should be excluded from the -automatically detected ``import_modules`` ``["nilearn", "nilearn.surface", -"nilearn.plotting", "nilearn.plotting.data"]`` set: +Instead of defining the ``import_modules`` explicitly, only the subset of module names to be skipped can be defined by using ``skip_modules``. +If a defined module has submodules, they are skipped as well, e.g., in case the ``plotting`` modules should be excluded from the automatically detected ``import_modules`` ``["nilearn", "nilearn.surface", "nilearn.plotting", "nilearn.plotting.data"]`` set: .. code-block:: python @@ -604,20 +485,15 @@ automatically detected ``import_modules`` ``["nilearn", "nilearn.surface", This will set ``import_modules`` to ``["nilearn", "nilearn.surface"]``. -Import tests can be run during the installation using ``spack install ---test=root`` or at any time after the installation using -``spack test run``. +Import tests can be run during the installation using ``spack install --test=root`` or at any time after the installation using ``spack test run``. -"""""""""" Unit tests """""""""" -The package may have its own unit or regression tests. Spack can -run these tests during the installation by adding test methods after -installation. +The package may have its own unit or regression tests. +Spack can run these tests during the installation by adding test methods after installation. -For example, ``py-numpy`` adds the following as a check to run -after the ``install`` phase: +For example, ``py-numpy`` adds the following as a check to run after the ``install`` phase: .. code-block:: python @@ -628,58 +504,46 @@ after the ``install`` phase: python("-c", "import numpy; numpy.test('full', verbose=2)") -when testing is enabled during the installation (i.e., ``spack install ---test=root``). +when testing is enabled during the installation (i.e., ``spack install --test=root``). .. note:: - Additional information is available on :ref:`install phase tests - `. + Additional information is available on :ref:`install phase tests `. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Setup file in a sub-directory ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Many C/C++ libraries provide optional Python bindings in a -subdirectory. To tell pip which directory to build from, you can -override the ``build_directory`` attribute. For example, if a package -provides Python bindings in a ``python`` directory, you can use: +Many C/C++ libraries provide optional Python bindings in a subdirectory. +To tell pip which directory to build from, you can override the ``build_directory`` attribute. +For example, if a package provides Python bindings in a ``python`` directory, you can use: .. code-block:: python build_directory = "python" -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PythonPackage vs. packages that use Python ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -There are many packages that make use of Python, but packages that depend -on Python are not necessarily ``PythonPackage``'s. +There are many packages that make use of Python, but packages that depend on Python are not necessarily ``PythonPackage``'s. -""""""""""""""""""""""" Choosing a build system """"""""""""""""""""""" -First of all, you need to select a build system. ``spack create`` -usually does this for you, but if for whatever reason you need to do -this manually, choose ``PythonPackage`` if and only if the package -contains one of the following files: +First of all, you need to select a build system. +``spack create`` usually does this for you, but if for whatever reason you need to do this manually, choose ``PythonPackage`` if and only if the package contains one of the following files: * ``pyproject.toml`` * ``setup.py`` * ``setup.cfg`` -""""""""""""""""""""""" Choosing a package name """"""""""""""""""""""" -Selecting the appropriate package name is a little more complicated -than choosing the build system. By default, ``spack create`` will -prepend ``py-`` to the beginning of the package name if it detects -that the package uses the ``PythonPackage`` build system. However, there -are occasionally packages that use ``PythonPackage`` that shouldn't -start with ``py-``. For example: +Selecting the appropriate package name is a little more complicated than choosing the build system. +By default, ``spack create`` will prepend ``py-`` to the beginning of the package name if it detects that the package uses the ``PythonPackage`` build system. +However, there are occasionally packages that use ``PythonPackage`` that shouldn't start with ``py-``. +For example: * awscli * aws-parallelcluster @@ -690,15 +554,11 @@ start with ``py-``. For example: * scons * snakemake -The thing these packages have in common is that they are command-line -tools that just so happen to be written in Python. Someone who wants -to install ``mercurial`` with Spack isn't going to realize that it is -written in Python, and they certainly aren't going to assume the package -is called ``py-mercurial``. For this reason, we manually renamed the -package to ``mercurial``. +The thing these packages have in common is that they are command-line tools that just so happen to be written in Python. +Someone who wants to install ``mercurial`` with Spack isn't going to realize that it is written in Python, and they certainly aren't going to assume the package is called ``py-mercurial``. +For this reason, we manually renamed the package to ``mercurial``. -Likewise, there are occasionally packages that don't use the -``PythonPackage`` build system but should still be prepended with ``py-``. +Likewise, there are occasionally packages that don't use the ``PythonPackage`` build system but should still be prepended with ``py-``. For example: * py-genders @@ -710,9 +570,8 @@ For example: * py-sip * py-xpyb -These packages are primarily used as Python libraries, not as -command-line tools. You may see C/C++ packages that have optional -Python language bindings, such as: +These packages are primarily used as Python libraries, not as command-line tools. +You may see C/C++ packages that have optional Python language bindings, such as: * antlr * cantera @@ -720,40 +579,29 @@ Python language bindings, such as: * pagmo * vtk -Don't prepend these kinds of packages with ``py-``. When in doubt, -think about how this package will be used. Is it primarily a Python -library that will be imported in other Python scripts? Or is it a -command-line tool, or C/C++/Fortran program with optional Python -modules? The former should be prepended with ``py-``, while the -latter should not. +Don't prepend these kinds of packages with ``py-``. +When in doubt, think about how this package will be used. +Is it primarily a Python library that will be imported in other Python scripts? +Or is it a command-line tool, or C/C++/Fortran program with optional Python modules? +The former should be prepended with ``py-``, while the latter should not. -"""""""""""""""""""""""""""""" ``extends`` vs. ``depends_on`` """""""""""""""""""""""""""""" -As mentioned in the :ref:`Packaging Guide `, -``extends`` and ``depends_on`` are very similar, but ``extends`` ensures -that the extension and extendee share the same prefix in views. -This allows the user to import a Python module without -having to add that module to ``PYTHONPATH``. +As mentioned in the :ref:`Packaging Guide `, ``extends`` and ``depends_on`` are very similar, but ``extends`` ensures that the extension and extendee share the same prefix in views. +This allows the user to import a Python module without having to add that module to ``PYTHONPATH``. -Additionally, ``extends("python")`` adds a dependency on the package -``python-venv``. This improves isolation from the system, whether -it's during the build or at runtime: user and system site packages -cannot accidentally be used by any package that ``extends("python")``. +Additionally, ``extends("python")`` adds a dependency on the package ``python-venv``. +This improves isolation from the system, whether it's during the build or at runtime: user and system site packages cannot accidentally be used by any package that ``extends("python")``. -As a rule of thumb: if a package does not install any Python modules -of its own, and merely puts a Python script in the ``bin`` directory, -then there is no need for ``extends``. If the package installs modules -in the ``site-packages`` directory, it requires ``extends``. +As a rule of thumb: if a package does not install any Python modules of its own, and merely puts a Python script in the ``bin`` directory, then there is no need for ``extends``. +If the package installs modules in the ``site-packages`` directory, it requires ``extends``. -""""""""""""""""""""""""""""""""""""" Executing ``python`` during the build """"""""""""""""""""""""""""""""""""" -Whenever you need to execute a Python command or pass the path of the -Python interpreter to the build system, it is best to use the global -variable ``python`` directly. For example: +Whenever you need to execute a Python command or pass the path of the Python interpreter to the build system, it is best to use the global variable ``python`` directly. +For example: .. code-block:: python @@ -761,37 +609,24 @@ variable ``python`` directly. For example: def recythonize(self): python("setup.py", "clean") # use the `python` global -As mentioned in the previous section, ``extends("python")`` adds an -automatic dependency on ``python-venv``, which is a virtual environment -that guarantees build isolation. The ``python`` global always refers to -the correct Python interpreter, whether the package uses ``extends("python")`` -or ``depends_on("python")``. +As mentioned in the previous section, ``extends("python")`` adds an automatic dependency on ``python-venv``, which is a virtual environment that guarantees build isolation. +The ``python`` global always refers to the correct Python interpreter, whether the package uses ``extends("python")`` or ``depends_on("python")``. -^^^^^^^^^^^^^^^^^^^^^ Alternatives to Spack ^^^^^^^^^^^^^^^^^^^^^ -PyPI has hundreds of thousands of packages that are not yet in Spack, -and ``pip`` may be a perfectly valid alternative to using Spack. The -main advantage of Spack over ``pip`` is its ability to compile -non-Python dependencies. It can also build cythonized versions of a -package or link to an optimized BLAS/LAPACK library like MKL, -resulting in calculations that run orders of magnitude faster. -Spack does not offer a significant advantage over other Python-management -systems for installing and using tools like flake8 and sphinx. -But if you need packages with non-Python dependencies like -numpy and scipy, Spack will be very valuable to you. - -Anaconda is another great alternative to Spack, and comes with its own -``conda`` package manager. Like Spack, Anaconda is capable of compiling -non-Python dependencies. Anaconda contains many Python packages that -are not yet in Spack, and Spack contains many Python packages that are -not yet in Anaconda. The main advantage of Spack over Anaconda is its -ability to choose a specific compiler and BLAS/LAPACK or MPI library. -Spack also has better platform support for supercomputers, and can build -optimized binaries for your specific microarchitecture. +PyPI has hundreds of thousands of packages that are not yet in Spack, and ``pip`` may be a perfectly valid alternative to using Spack. +The main advantage of Spack over ``pip`` is its ability to compile non-Python dependencies. +It can also build cythonized versions of a package or link to an optimized BLAS/LAPACK library like MKL, resulting in calculations that run orders of magnitude faster. +Spack does not offer a significant advantage over other Python-management systems for installing and using tools like flake8 and sphinx. +But if you need packages with non-Python dependencies like numpy and scipy, Spack will be very valuable to you. + +Anaconda is another great alternative to Spack, and comes with its own ``conda`` package manager. +Like Spack, Anaconda is capable of compiling non-Python dependencies. +Anaconda contains many Python packages that are not yet in Spack, and Spack contains many Python packages that are not yet in Anaconda. +The main advantage of Spack over Anaconda is its ability to choose a specific compiler and BLAS/LAPACK or MPI library. +Spack also has better platform support for supercomputers, and can build optimized binaries for your specific microarchitecture. -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ diff --git a/lib/spack/docs/build_systems/qmakepackage.rst b/lib/spack/docs/build_systems/qmakepackage.rst index 730c6d75a8be4f..ce5ca5e7a80893 100644 --- a/lib/spack/docs/build_systems/qmakepackage.rst +++ b/lib/spack/docs/build_systems/qmakepackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,13 +9,11 @@ .. _qmakepackage: ------- QMake ------ -Much like Autotools and CMake, QMake is a build-script generator -designed by the developers of Qt. In its simplest form, Spack's -``QMakePackage`` runs the following steps: +Much like Autotools and CMake, QMake is a build-script generator designed by the developers of Qt. +In its simplest form, Spack's ``QMakePackage`` runs the following steps: .. code-block:: console @@ -24,19 +23,12 @@ designed by the developers of Qt. In its simplest form, Spack's $ make install -QMake does not appear to have a standardized way of specifying -the installation directory, so you may have to set environment -variables or edit ``*.pro`` files to get things working properly. +QMake does not appear to have a standardized way of specifying the installation directory, so you may have to set environment variables or edit ``*.pro`` files to get things working properly. -QMake packages will depend on the virtual ``qmake`` package which -is provided by multiple versions of Qt: ``qt`` provides Qt up to -Qt5, and ``qt-base`` provides Qt from version Qt6 onwards. This -split was motivated by the desire to split the single Qt package -into its components to allow for more fine-grained installation. -To depend on a specific version, refer to the documentation on -:ref:`virtual-dependencies`. +QMake packages will depend on the virtual ``qmake`` package which is provided by multiple versions of Qt: ``qt`` provides Qt up to Qt5, and ``qt-base`` provides Qt from version Qt6 onwards. +This split was motivated by the desire to split the single Qt package into its components to allow for more fine-grained installation. +To depend on a specific version, refer to the documentation on :ref:`virtual-dependencies`. -^^^^^^ Phases ^^^^^^ @@ -56,8 +48,8 @@ By default, these phases run: Any of these phases can be overridden in your package as necessary. -There is also a ``check`` method that looks for a ``check`` target -in the Makefile. If a ``check`` target exists and the user runs: +There is also a ``check`` method that looks for a ``check`` target in the Makefile. +If a ``check`` target exists and the user runs: .. code-block:: console @@ -66,13 +58,11 @@ in the Makefile. If a ``check`` target exists and the user runs: Spack will run ``make check`` after the build phase. -^^^^^^^^^^^^^^^ Important files ^^^^^^^^^^^^^^^ -Packages that use the QMake build system can be identified by the -presence of a ``.pro`` file. This file declares things -like build instructions and dependencies. +Packages that use the QMake build system can be identified by the presence of a ``.pro`` file. +This file declares things like build instructions and dependencies. One thing to look for is the ``minQtVersion`` function: @@ -84,32 +74,27 @@ One thing to look for is the ``minQtVersion`` function: This means that Qt 5.6.0 is the earliest release that will work. You should specify this in a ``depends_on`` statement. -^^^^^^^^^^^^^^^^^^^^^^^^^ Build system dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^ -At the bare minimum, packages that use the QMake build system need a -``qt`` dependency. Since this is always the case, the ``QMakePackage`` -base class already contains: +At the bare minimum, packages that use the QMake build system need a ``qt`` dependency. +Since this is always the case, the ``QMakePackage`` base class already contains: .. code-block:: python depends_on("qt", type="build") -If you want to specify a particular version requirement, or need to -link to the ``qt`` libraries, you can override this in your package: +If you want to specify a particular version requirement, or need to link to the ``qt`` libraries, you can override this in your package: .. code-block:: python depends_on("qt@5.6.0:") -^^^^^^^^^^^^^^^^^^^^^^^^^^ Passing arguments to qmake ^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you need to pass any arguments to the ``qmake`` call, you can -override the ``qmake_args`` method like so: +If you need to pass any arguments to the ``qmake`` call, you can override the ``qmake_args`` method like so: .. code-block:: python @@ -119,22 +104,17 @@ override the ``qmake_args`` method like so: This method can be used to pass flags as well as variables. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``*.pro`` file in a sub-directory ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If the ``*.pro`` file used to tell QMake how to build the package is -found in a sub-directory, you can tell Spack to run all phases in this -sub-directory by adding the following to the package: +If the ``*.pro`` file used to tell QMake how to build the package is found in a sub-directory, you can tell Spack to run all phases in this sub-directory by adding the following to the package: .. code-block:: python build_directory = "src" -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ -For more information on the QMake build system, see: -http://doc.qt.io/qt-5/qmake-manual.html +For more information on the QMake build system, see: http://doc.qt.io/qt-5/qmake-manual.html diff --git a/lib/spack/docs/build_systems/racketpackage.rst b/lib/spack/docs/build_systems/racketpackage.rst index 37820b093335d2..b7469537896825 100644 --- a/lib/spack/docs/build_systems/racketpackage.rst +++ b/lib/spack/docs/build_systems/racketpackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,42 +9,30 @@ .. _racketpackage: ------- Racket ------ Much like Python, Racket packages and modules have their own special build system. -To learn more about the specifics of the Racket package system, please refer to the -`Racket Docs `_. +To learn more about the specifics of the Racket package system, please refer to the `Racket Docs `_. -^^^^^^ Phases ^^^^^^ -The ``RacketBuilder`` and ``RacketPackage`` base classes provide an ``install`` phase that -can be overridden, corresponding to the use of: +The ``RacketBuilder`` and ``RacketPackage`` base classes provide an ``install`` phase that can be overridden, corresponding to the use of: .. code-block:: console $ raco pkg install -^^^^^^^ Caveats ^^^^^^^ -In principle, ``raco`` supports a second, ``setup`` phase; however, we have not -implemented this separately, as in normal circumstances, ``install`` also handles -running ``setup`` automatically. +In principle, ``raco`` supports a second, ``setup`` phase; however, we have not implemented this separately, as in normal circumstances, ``install`` also handles running ``setup`` automatically. -Unlike Python, Racket currently only supports two installation scopes for packages, user -or system, and keeps a registry of installed packages at each scope in its configuration files. -This means we can't simply compose a "``RACKET_PATH``" environment variable listing all of the -places packages are installed, and update this at will. +Unlike Python, Racket currently only supports two installation scopes for packages, user or system, and keeps a registry of installed packages at each scope in its configuration files. +This means we can't simply compose a "``RACKET_PATH``" environment variable listing all of the places packages are installed, and update this at will. -Unfortunately, this means that all currently installed packages which extend Racket via ``raco pkg install`` -are accessible whenever Racket is accessible. +Unfortunately, this means that all currently installed packages which extend Racket via ``raco pkg install`` are accessible whenever Racket is accessible. -Additionally, because Spack does not implement uninstall hooks, uninstalling a Spack ``rkt-`` package -will have no effect on the ``raco`` installed packages visible to your Racket installation. -Instead, you must manually run ``raco pkg remove`` to keep the two package managers in a mutually -consistent state. +Additionally, because Spack does not implement uninstall hooks, uninstalling a Spack ``rkt-`` package will have no effect on the ``raco`` installed packages visible to your Racket installation. +Instead, you must manually run ``raco pkg remove`` to keep the two package managers in a mutually consistent state. diff --git a/lib/spack/docs/build_systems/rocmpackage.rst b/lib/spack/docs/build_systems/rocmpackage.rst index e3317c68a2b345..a591f68756d0f9 100644 --- a/lib/spack/docs/build_systems/rocmpackage.rst +++ b/lib/spack/docs/build_systems/rocmpackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,18 +9,14 @@ .. _rocmpackage: ------- ROCm ------ -The ``ROCmPackage`` is not a build system but a helper package. Like ``CudaPackage``, -it provides standard variants, dependencies, and conflicts to facilitate building -packages targeting AMD GPUs. +The ``ROCmPackage`` is not a build system but a helper package. +Like ``CudaPackage``, it provides standard variants, dependencies, and conflicts to facilitate building packages targeting AMD GPUs. -You can find the source for this package (and suggestions for setting up your ``packages.yaml`` file) at -``__. +You can find the source for this package (and suggestions for setting up your ``packages.yaml`` file) at ``__. -^^^^^^^^ Variants ^^^^^^^^ @@ -33,91 +30,71 @@ This package provides the following variants: * **amdgpu_target** This variant supports the optional specification of the AMD GPU architecture. - Valid values are the names of the GPUs (e.g., ``gfx701``), which are maintained - in the ``amdgpu_targets`` property. + Valid values are the names of the GPUs (e.g., ``gfx701``), which are maintained in the ``amdgpu_targets`` property. -^^^^^^^^^^^^ Dependencies ^^^^^^^^^^^^ This package defines basic ROCm dependencies, including ``llvm`` and ``hip``. -^^^^^^^^^ Conflicts ^^^^^^^^^ -Conflicts are used to prevent builds with known bugs or issues. This package -already requires that the ``amdgpu_target`` always be specified for ROCm -builds. It also defines a conflict that prevents builds with an ``amdgpu_target`` -when ``rocm`` is disabled. +Conflicts are used to prevent builds with known bugs or issues. +This package already requires that the ``amdgpu_target`` always be specified for ROCm builds. +It also defines a conflict that prevents builds with an ``amdgpu_target`` when ``rocm`` is disabled. -Refer to `Conflicts `__ -for more information on package conflicts. +Refer to :ref:`packaging_conflicts` for more information on package conflicts. -^^^^^^^ Methods ^^^^^^^ -This package provides one custom helper method, which is used to build -standard AMD HIP compiler flags. +This package provides one custom helper method, which is used to build standard AMD HIP compiler flags. **hip_flags** + This built-in static method returns the appropriately formatted ``--amdgpu-target`` build option for ``hipcc``. - This built-in static method returns the appropriately formatted - ``--amdgpu-target`` build option for ``hipcc``. - - This method must be explicitly called when you are creating the - arguments for your build in order to use the values. + This method must be explicitly called when you are creating the arguments for your build in order to use the values. -^^^^^^ Usage ^^^^^^ -This helper package can be added to your package by adding it as a base -class of your package. For example, you can add it to your -:ref:`CMakePackage `-based package as follows: +This helper package can be added to your package by adding it as a base class of your package. +For example, you can add it to your :ref:`CMakePackage `-based package as follows: .. code-block:: python - :emphasize-lines: 1,3-7,14-25 - - class MyRocmPackage(CMakePackage, ROCmPackage): - ... - # Ensure +rocm and amdgpu_targets are passed to dependencies - depends_on("mydeppackage", when="+rocm") - for val in ROCmPackage.amdgpu_targets: - depends_on(f"mydeppackage amdgpu_target={val}", - when=f"amdgpu_target={val}") - ... - - def cmake_args(self): - spec = self.spec - args = [] - ... - if spec.satisfies("+rocm"): - # Set up the HIP macros needed by the build - args.extend([ - "-DENABLE_HIP=ON", - f"-DHIP_ROOT_DIR={spec['hip'].prefix}"]) - rocm_archs = spec.variants["amdgpu_target"].value - if "none" not in rocm_archs: - args.append(f"-DHIP_HIPCC_FLAGS=--amdgpu-target={','.join(rocm_archs)}") - else: - # Ensure build with HIP is disabled - args.append("-DENABLE_HIP=OFF") - ... - return args - ... - -assuming only the ``ENABLE_HIP``, ``HIP_ROOT_DIR``, and ``HIP_HIPCC_FLAGS`` -macros are required to be set and the only dependency needing ROCm options -is ``mydeppackage``. You will need to customize the flags as needed for your -build. - -This example also illustrates how to check for the ``rocm`` variant using -``self.spec`` and how to retrieve the ``amdgpu_target`` variant's value -using ``self.spec.variants["amdgpu_target"].value``. - -All five packages using ``ROCmPackage`` as of January 2021 also use the -:ref:`CudaPackage `. So, it is worth looking at those packages -to get ideas for creating a package that can support both ``cuda`` and -``rocm``. + :emphasize-lines: 1,3-6,13-21 + + class MyRocmPackage(CMakePackage, ROCmPackage): + ... + # Ensure +rocm and amdgpu_targets are passed to dependencies + depends_on("mydeppackage", when="+rocm") + for val in ROCmPackage.amdgpu_targets: + depends_on(f"mydeppackage amdgpu_target={val}", when=f"amdgpu_target={val}") + ... + + def cmake_args(self): + spec = self.spec + args = [] + ... + if spec.satisfies("+rocm"): + # Set up the HIP macros needed by the build + args.extend(["-DENABLE_HIP=ON", f"-DHIP_ROOT_DIR={spec['hip'].prefix}"]) + rocm_archs = spec.variants["amdgpu_target"].value + if "none" not in rocm_archs: + args.append(f"-DHIP_HIPCC_FLAGS=--amdgpu-target={','.join(rocm_archs)}") + else: + # Ensure build with HIP is disabled + args.append("-DENABLE_HIP=OFF") + ... + return args + + ... + +assuming only the ``ENABLE_HIP``, ``HIP_ROOT_DIR``, and ``HIP_HIPCC_FLAGS`` macros are required to be set and the only dependency needing ROCm options is ``mydeppackage``. +You will need to customize the flags as needed for your build. + +This example also illustrates how to check for the ``rocm`` variant using ``self.spec`` and how to retrieve the ``amdgpu_target`` variant's value using ``self.spec.variants["amdgpu_target"].value``. + +All five packages using ``ROCmPackage`` as of January 2021 also use the :ref:`CudaPackage `. +So, it is worth looking at those packages to get ideas for creating a package that can support both ``cuda`` and ``rocm``. diff --git a/lib/spack/docs/build_systems/rpackage.rst b/lib/spack/docs/build_systems/rpackage.rst index 37f4b3e951bfae..8e0273d251e492 100644 --- a/lib/spack/docs/build_systems/rpackage.rst +++ b/lib/spack/docs/build_systems/rpackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,17 +9,14 @@ .. _rpackage: ------- R ------ Like Python, R has its own built-in build system. The R build system is remarkably uniform and well-tested. -This makes it one of the easiest build systems to create -new Spack packages for. +This makes it one of the easiest build systems to create new Spack packages for. -^^^^^^ Phases ^^^^^^ @@ -33,36 +31,27 @@ By default, this phase runs the following command: $ R CMD INSTALL --library=/path/to/installation/prefix/rlib/R/library . -^^^^^^^^^^^^^^^^^^ Finding R packages ^^^^^^^^^^^^^^^^^^ -The vast majority of R packages are hosted on CRAN - The Comprehensive -R Archive Network. If you are looking for a particular R package, search -for "CRAN " and you should quickly find what you want. +The vast majority of R packages are hosted on CRAN - The Comprehensive R Archive Network. +If you are looking for a particular R package, search for "CRAN " and you should quickly find what you want. If it isn't on CRAN, try Bioconductor, another common R repository. -For the purposes of this tutorial, we will be walking through -`r-caret `_ -as an example. If you search for "CRAN caret", you will quickly find what -you are looking for at https://cran.r-project.org/package=caret. -https://cran.r-project.org is the main CRAN website. However, CRAN also -has a https://cloud.r-project.org site that automatically redirects to -`mirrors around the world `_. +For the purposes of this tutorial, we will be walking through `r-caret `_ as an example. +If you search for "CRAN caret", you will quickly find what you are looking for at https://cran.r-project.org/package=caret. https://cran.r-project.org is the main CRAN website. +However, CRAN also has a https://cloud.r-project.org site that automatically redirects to `mirrors around the world `_. For stability and performance reasons, we will use https://cloud.r-project.org/package=caret. -If you search for "Package source", you will find the download URL for -the latest release. Use this URL with ``spack create`` to create a new -package. +If you search for "Package source", you will find the download URL for the latest release. +Use this URL with ``spack create`` to create a new package. -^^^^^^^^^^^^ Package name ^^^^^^^^^^^^ -The first thing you'll notice is that Spack prepends ``r-`` to the front -of the package name. This is how Spack separates R extensions -from the rest of the packages in Spack. Without this, we would end up -with package name collisions more frequently than we would like. For -instance, there are already packages for both: +The first thing you'll notice is that Spack prepends ``r-`` to the front of the package name. +This is how Spack separates R extensions from the rest of the packages in Spack. +Without this, we would end up with package name collisions more frequently than we would like. +For instance, there are already packages for both: * ``ape`` and ``r-ape`` * ``curl`` and ``r-curl`` @@ -72,10 +61,8 @@ instance, there are already packages for both: * ``uuid`` and ``r-uuid`` * ``xts`` and ``r-xts`` -Many popular programs written in C/C++ are later ported to R as a -separate project. +Many popular programs written in C/C++ are later ported to R as a separate project. -^^^^^^^^^^^ Description ^^^^^^^^^^^ @@ -86,12 +73,10 @@ The top of the homepage for ``caret`` lists the following description: Misc functions for training and plotting classification and regression models. -The first line is a short description (title) and the second line is a long -description. In this case the description is only one line but often the -description is several lines. Spack makes use of both short and long -descriptions and convention is to use both when creating an R package. +The first line is a short description (title) and the second line is a long description. +In this case the description is only one line but often the description is several lines. +Spack makes use of both short and long descriptions and convention is to use both when creating an R package. -^^^^^^^^ Homepage ^^^^^^^^ @@ -101,68 +86,46 @@ If you look at the bottom of the page, you'll see: Please use the canonical form https://CRAN.R-project.org/package=caret to link to this page. -Please uphold the wishes of the CRAN admins and use -https://cloud.r-project.org/package=caret as the homepage instead of -https://cloud.r-project.org/web/packages/caret/index.html. The latter may -change without notice. +Please uphold the wishes of the CRAN admins and use https://cloud.r-project.org/package=caret as the homepage instead of https://cloud.r-project.org/web/packages/caret/index.html. +The latter may change without notice. -^^^^^^ URL ^^^^^^ -As previously mentioned, the download URL for the latest release can be -found by searching "Package source" on the homepage. +As previously mentioned, the download URL for the latest release can be found by searching "Package source" on the homepage. -^^^^^^^^ List URL ^^^^^^^^ -CRAN maintains a single webpage containing the latest release of every -single package: https://cloud.r-project.org/src/contrib/ +CRAN maintains a single webpage containing the latest release of every single package: https://cloud.r-project.org/src/contrib/ -Of course, as soon as a new release comes out, the version you were using -in your package is no longer available at that URL. It is moved to an -archive directory. If you search for "Old sources", you will find: -https://cloud.r-project.org/src/contrib/Archive/caret +Of course, as soon as a new release comes out, the version you were using in your package is no longer available at that URL. +It is moved to an archive directory. +If you search for "Old sources", you will find: https://cloud.r-project.org/src/contrib/Archive/caret -If you only specify the URL for the latest release, your package will -no longer be able to fetch that version as soon as a new release comes -out. To get around this, add the archive directory as a ``list_url``. +If you only specify the URL for the latest release, your package will no longer be able to fetch that version as soon as a new release comes out. +To get around this, add the archive directory as a ``list_url``. -^^^^^^^^^^^^^^^^^^^^^ Bioconductor packages ^^^^^^^^^^^^^^^^^^^^^ -Bioconductor packages are set up in a similar way to CRAN packages, but there -are some very important distinctions. Bioconductor packages can be found at: -https://bioconductor.org/. Bioconductor packages are R packages and so follow -the same packaging scheme as CRAN packages. What is different is that -Bioconductor itself is versioned and released. This scheme, using the -Bioconductor package installer, allows further specification of the minimum -version of R as well as further restrictions on the dependencies between -packages than what is possible with the native R packaging system. Spack cannot -replicate these extra features and thus Bioconductor packages in Spack need -to be managed as a group during updates in order to maintain package -consistency with Bioconductor itself. - -Another key difference is that, while previous versions of packages are -available, they are not available from a site that can be programmatically set, -thus a ``list_url`` attribute cannot be used. However, each package is also -available in a git repository, with branches corresponding to each Bioconductor -release. Thus, it is always possible to retrieve the version of any package -corresponding to a Bioconductor release simply by fetching the branch that -corresponds to the Bioconductor release of the package repository. For this -reason, Spack Bioconductor R packages use the git repository, with the commit -of the respective branch used in the ``version()`` attribute of the package. +Bioconductor packages are set up in a similar way to CRAN packages, but there are some very important distinctions. +Bioconductor packages can be found at: https://bioconductor.org/. +Bioconductor packages are R packages and so follow the same packaging scheme as CRAN packages. +What is different is that Bioconductor itself is versioned and released. +This scheme, using the Bioconductor package installer, allows further specification of the minimum version of R as well as further restrictions on the dependencies between packages than what is possible with the native R packaging system. +Spack cannot replicate these extra features and thus Bioconductor packages in Spack need to be managed as a group during updates in order to maintain package consistency with Bioconductor itself. + +Another key difference is that, while previous versions of packages are available, they are not available from a site that can be programmatically set, thus a ``list_url`` attribute cannot be used. +However, each package is also available in a git repository, with branches corresponding to each Bioconductor release. +Thus, it is always possible to retrieve the version of any package corresponding to a Bioconductor release simply by fetching the branch that corresponds to the Bioconductor release of the package repository. +For this reason, Spack Bioconductor R packages use the git repository, with the commit of the respective branch used in the ``version()`` attribute of the package. -^^^^^^^^^^^^^^^^^^^^^^^^ cran and bioc attributes ^^^^^^^^^^^^^^^^^^^^^^^^ -Much like the ``pypi`` attribute for Python packages, due to the fact that R -packages are obtained from specific repositories, it is possible to set up shortcut -attributes that can be used to set ``homepage``, ``url``, ``list_url``, and -``git``. For example, the following ``cran`` attribute: +Much like the ``pypi`` attribute for Python packages, due to the fact that R packages are obtained from specific repositories, it is possible to set up shortcut attributes that can be used to set ``homepage``, ``url``, ``list_url``, and ``git``. +For example, the following ``cran`` attribute: .. code-block:: python @@ -173,7 +136,7 @@ is equivalent to: .. code-block:: python homepage = "https://cloud.r-project.org/package=caret" - url = "https://cloud.r-project.org/src/contrib/caret_6.0-86.tar.gz" + url = "https://cloud.r-project.org/src/contrib/caret_6.0-86.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/caret" Likewise, the following ``bioc`` attribute: @@ -187,27 +150,24 @@ is equivalent to: .. code-block:: python homepage = "https://bioconductor.org/packages/BiocVersion/" - git = "https://git.bioconductor.org/packages/BiocVersion" + git = "https://git.bioconductor.org/packages/BiocVersion" -^^^^^^^^^^^^^^^^^^^^^^^^^ Build system dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^ -As an extension of the R ecosystem, your package will obviously depend -on R to build and run. Normally, we would use ``depends_on`` to express -this, but for R packages, we use ``extends``. This implies a special -dependency on R, which is used to set environment variables such as -``R_LIBS`` uniformly. Since every R package needs this, the ``RPackage`` -base class contains: +As an extension of the R ecosystem, your package will obviously depend on R to build and run. +Normally, we would use ``depends_on`` to express this, but for R packages, we use ``extends``. +This implies a special dependency on R, which is used to set environment variables such as ``R_LIBS`` uniformly. +Since every R package needs this, the ``RPackage`` base class contains: .. code-block:: python extends("r") -Take a close look at the homepage for ``caret``. If you look at the -"Depends" section, you'll notice that ``caret`` depends on "R (≥ 3.2.0)". +Take a close look at the homepage for ``caret``. +If you look at the "Depends" section, you'll notice that ``caret`` depends on "R (≥ 3.2.0)". You should add this to your package like so: .. code-block:: python @@ -215,39 +175,30 @@ You should add this to your package like so: depends_on("r@3.2.0:", type=("build", "run")) -^^^^^^^^^^^^^^ R dependencies ^^^^^^^^^^^^^^ -R packages are often small and follow the classic Unix philosophy -of doing one thing well. They are modular and usually depend on -several other packages. You may find a single package with over a -hundred dependencies. Luckily, R packages are well-documented -and list all of their dependencies in the following sections: +R packages are often small and follow the classic Unix philosophy of doing one thing well. +They are modular and usually depend on several other packages. +You may find a single package with over a hundred dependencies. +Luckily, R packages are well-documented and list all of their dependencies in the following sections: * Depends * Imports * LinkingTo -As far as Spack is concerned, all three of these dependency types -correspond to ``type=("build", "run")``, so you don't have to worry -about the details. If you are curious what they mean, -https://github.com/spack/spack/issues/2951 has a pretty good summary: +As far as Spack is concerned, all three of these dependency types correspond to ``type=("build", "run")``, so you don't have to worry about the details. +If you are curious what they mean, https://github.com/spack/spack/issues/2951 has a pretty good summary: - ``Depends`` is required and will cause those R packages to be *attached*, - that is, their APIs are exposed to the user. ``Imports`` *loads* packages - so that *the package* importing these packages can access their APIs, - while *not* being exposed to the user. When a user calls ``library(foo)`` - s/he *attaches* package ``foo`` and all of the packages under ``Depends``. + ``Depends`` is required and will cause those R packages to be *attached*, that is, their APIs are exposed to the user. + ``Imports`` *loads* packages so that *the package* importing these packages can access their APIs, while *not* being exposed to the user. + When a user calls ``library(foo)`` s/he *attaches* package ``foo`` and all of the packages under ``Depends``. Any function in one of these packages can be called directly as ``bar()``. - If there are conflicts, a user can also specify ``pkgA::bar()`` and - ``pkgB::bar()`` to distinguish between them. Historically, there was only - ``Depends`` and ``Suggests``, hence the confusing names. Today, maybe - ``Depends`` would have been named ``Attaches``. + If there are conflicts, a user can also specify ``pkgA::bar()`` and ``pkgB::bar()`` to distinguish between them. + Historically, there was only ``Depends`` and ``Suggests``, hence the confusing names. + Today, maybe ``Depends`` would have been named ``Attaches``. - The ``LinkingTo`` is not perfect and there was recently an extensive - discussion about API/ABI among other things on the R-devel mailing - list among very skilled R developers: + The ``LinkingTo`` is not perfect and there was recently an extensive discussion about API/ABI among other things on the R-devel mailing list among very skilled R developers: * https://stat.ethz.ch/pipermail/r-devel/2016-December/073505.html * https://stat.ethz.ch/pipermail/r-devel/2017-January/073647.html @@ -256,110 +207,86 @@ Some packages also have a fourth section: * Suggests -These are optional, rarely-used dependencies that a user might find -useful. You should **NOT** add these dependencies to your package. -R packages already have enough dependencies as it is, and adding -optional dependencies can really slow down the concretization -process. They can also introduce circular dependencies. +These are optional, rarely-used dependencies that a user might find useful. +You should **NOT** add these dependencies to your package. +R packages already have enough dependencies as it is, and adding optional dependencies can really slow down the concretization process. +They can also introduce circular dependencies. A fifth rarely used section is: * Enhances -This means that the package can be used as an optional dependency -for another package. Again, these packages should **NOT** be listed -as dependencies. +This means that the package can be used as an optional dependency for another package. +Again, these packages should **NOT** be listed as dependencies. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Core, recommended, and non-core packages ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you look at "Depends", "Imports", and "LinkingTo", you will notice -3 different types of packages: +If you look at "Depends", "Imports", and "LinkingTo", you will notice 3 different types of packages: -""""""""""""" Core packages """"""""""""" -If you look at the ``caret`` homepage, you'll notice a few dependencies -that don't have a link to the package, like ``methods``, ``stats``, and -``utils``. These packages are part of the core R distribution and are -tied to the R version installed. You can basically consider these to be -"R itself". These are so essential to R that it would not make sense for -them to be updated via CRAN. If you did, you would basically get a different -version of R. Thus, they're updated when R is updated. +If you look at the ``caret`` homepage, you'll notice a few dependencies that don't have a link to the package, like ``methods``, ``stats``, and ``utils``. +These packages are part of the core R distribution and are tied to the R version installed. +You can basically consider these to be "R itself". +These are so essential to R that it would not make sense for them to be updated via CRAN. +If you did, you would basically get a different version of R. +Thus, they're updated when R is updated. -You can find a list of these core libraries at: -https://github.com/wch/r-source/tree/trunk/src/library +You can find a list of these core libraries at: https://github.com/wch/r-source/tree/trunk/src/library -"""""""""""""""""""" Recommended packages """""""""""""""""""" When you install R, there is an option called ``--with-recommended-packages``. -This flag causes the R installation to include a few "Recommended" packages -(legacy term). They are for historical reasons quite tied to the core R -distribution, developed by the R core team or people closely related to it. -The R core distribution "knows" about these packages, but they are indeed -distributed via CRAN. Because they're distributed via CRAN, they can also be -updated between R version releases. - -Spack explicitly adds the ``--without-recommended-packages`` flag to prevent -the installation of these packages. Due to the way Spack handles package -activation (symlinking packages to the R installation directory), -pre-existing recommended packages will cause conflicts for already-existing -files. We could either not include these recommended packages in Spack and -require them to be installed through ``--with-recommended-packages``, or -we could not install them with R and let users choose the version of the -package they want to install. We chose the latter. - -Since these packages are so commonly distributed with the R system, many -developers may assume these packages exist and fail to list them as -dependencies. Watch out for this. - -You can find a list of these recommended packages at: -https://github.com/wch/r-source/blob/trunk/share/make/vars.mk +This flag causes the R installation to include a few "Recommended" packages (legacy term). +They are for historical reasons quite tied to the core R distribution, developed by the R core team or people closely related to it. +The R core distribution "knows" about these packages, but they are indeed distributed via CRAN. +Because they're distributed via CRAN, they can also be updated between R version releases. + +Spack explicitly adds the ``--without-recommended-packages`` flag to prevent the installation of these packages. +Due to the way Spack handles package activation (symlinking packages to the R installation directory), pre-existing recommended packages will cause conflicts for already-existing files. +We could either not include these recommended packages in Spack and require them to be installed through ``--with-recommended-packages``, or we could not install them with R and let users choose the version of the package they want to install. +We chose the latter. + +Since these packages are so commonly distributed with the R system, many developers may assume these packages exist and fail to list them as dependencies. +Watch out for this. + +You can find a list of these recommended packages at: https://github.com/wch/r-source/blob/trunk/share/make/vars.mk -""""""""""""""""" Non-core packages """"""""""""""""" -These are packages that are neither "core" nor "recommended". There are more -than 10,000 of these packages hosted on CRAN alone. +These are packages that are neither "core" nor "recommended". +There are more than 10,000 of these packages hosted on CRAN alone. -For each of these package types, if you see that a specific version is -required, for example, "lattice (≥ 0.20)", please add this information to -the dependency: +For each of these package types, if you see that a specific version is required, for example, "lattice (≥ 0.20)", please add this information to the dependency: .. code-block:: python depends_on("r-lattice@0.20:", type=("build", "run")) -^^^^^^^^^^^^^^^^^^ Non-R dependencies ^^^^^^^^^^^^^^^^^^ -Some packages depend on non-R libraries for linking. Check out the -`r-stringi `_ -package for an example: https://cloud.r-project.org/package=stringi. +Some packages depend on non-R libraries for linking. +Check out the `r-stringi `_ package for an example: https://cloud.r-project.org/package=stringi. If you search for the text "SystemRequirements", you will see: ICU4C (>= 52, optional) -This is how non-R dependencies are listed. Make sure to add these -dependencies. The default dependency type should suffice. +This is how non-R dependencies are listed. +Make sure to add these dependencies. +The default dependency type should suffice. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Passing arguments to the installation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Some R packages provide additional flags that can be passed to -``R CMD INSTALL``, often to locate non-R dependencies. -`r-rmpi `_ -is an example of this, as it uses flags for linking to an MPI library. To pass -these to the installation command, you can override ``configure_args`` -like so: +Some R packages provide additional flags that can be passed to ``R CMD INSTALL``, often to locate non-R dependencies. +`r-rmpi `_ is an example of this, as it uses flags for linking to an MPI library. +To pass these to the installation command, you can override ``configure_args`` like so: .. code-block:: python @@ -381,16 +308,13 @@ like so: ] -There is a similar ``configure_vars`` function that can be overridden -to pass variables to the build. +There is a similar ``configure_vars`` function that can be overridden to pass variables to the build. -^^^^^^^^^^^^^^^^^^^^^ Alternatives to Spack ^^^^^^^^^^^^^^^^^^^^^ -CRAN hosts over 10,000 R packages, most of which are not in Spack. Many -users may not need the advanced features of Spack, and may prefer to -install R packages the normal way: +CRAN hosts over 10,000 R packages, most of which are not in Spack. +Many users may not need the advanced features of Spack, and may prefer to install R packages the normal way: .. code-block:: console @@ -398,34 +322,23 @@ install R packages the normal way: > install.packages("ggplot2") -R will search CRAN for the ``ggplot2`` package and install all necessary -dependencies for you. If you want to update all installed R packages to -the latest release, you can use: +R will search CRAN for the ``ggplot2`` package and install all necessary dependencies for you. +If you want to update all installed R packages to the latest release, you can use: .. code-block:: console > update.packages(ask = FALSE) -This works great for users who have internet access, but those on an -air-gapped cluster will find it easier to let Spack build a download -mirror and install these packages for you. +This works great for users who have internet access, but those on an air-gapped cluster will find it easier to let Spack build a download mirror and install these packages for you. -Where Spack really shines is its ability to install non-R dependencies -and link to them properly, something the R installation mechanism -cannot handle. +Where Spack really shines is its ability to install non-R dependencies and link to them properly, something the R installation mechanism cannot handle. -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ -For more information on installing R packages, see: -https://stat.ethz.ch/R-manual/R-devel/library/utils/html/INSTALL.html +For more information on installing R packages, see: https://stat.ethz.ch/R-manual/R-devel/library/utils/html/INSTALL.html -For more information on writing R packages, see: -https://cloud.r-project.org/doc/manuals/r-release/R-exts.html +For more information on writing R packages, see: https://cloud.r-project.org/doc/manuals/r-release/R-exts.html -In particular, -https://cloud.r-project.org/doc/manuals/r-release/R-exts.html#Package-Dependencies -has a great explanation of the difference between Depends, Imports, -and LinkingTo. +In particular, https://cloud.r-project.org/doc/manuals/r-release/R-exts.html#Package-Dependencies has a great explanation of the difference between Depends, Imports, and LinkingTo. diff --git a/lib/spack/docs/build_systems/rubypackage.rst b/lib/spack/docs/build_systems/rubypackage.rst index 13d13a6393eacf..07dc520f2d85ee 100644 --- a/lib/spack/docs/build_systems/rubypackage.rst +++ b/lib/spack/docs/build_systems/rubypackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,19 +9,15 @@ .. _rubypackage: ------- Ruby ------ -Like Perl, Python, and R, Ruby has its own build system for -installing Ruby gems. +Like Perl, Python, and R, Ruby has its own build system for installing Ruby gems. -^^^^^^ Phases ^^^^^^ -The ``RubyBuilder`` and ``RubyPackage`` base classes provide the following phases that -can be overridden: +The ``RubyBuilder`` and ``RubyPackage`` base classes provide the following phases that can be overridden: #. ``build`` - build everything needed to install #. ``install`` - install everything from build directory @@ -41,8 +38,7 @@ For packages that come with a ``Rakefile`` file, these phases run: $ gem install *.gem -For packages that come pre-packaged as a ``*.gem`` file, the build -phase is skipped and the install phase runs: +For packages that come pre-packaged as a ``*.gem`` file, the build phase is skipped and the install phase runs: .. code-block:: console @@ -56,30 +52,27 @@ These are all standard ``gem`` commands and can be found by running: $ gem help commands -For packages that only distribute ``*.gem`` files, these files can be -downloaded with the ``expand=False`` option in the ``version`` directive. +For packages that only distribute ``*.gem`` files, these files can be downloaded with the ``expand=False`` option in the ``version`` directive. The build phase will be automatically skipped. -^^^^^^^^^^^^^^^ Important files ^^^^^^^^^^^^^^^ -When building from source, Ruby packages can be identified by the -presence of any of the following files: +When building from source, Ruby packages can be identified by the presence of any of the following files: * ``*.gemspec`` * ``Rakefile`` * ``setup.rb`` (not yet supported) -However, not all Ruby packages are released as source code. Some are only -released as ``*.gem`` files. These files can be extracted using: +However, not all Ruby packages are released as source code. +Some are only released as ``*.gem`` files. +These files can be extracted using: .. code-block:: console $ gem unpack *.gem -^^^^^^^^^^^ Description ^^^^^^^^^^^ @@ -93,7 +86,6 @@ The ``*.gemspec`` file may contain something like: Either of these can be used for the description of the Spack package. -^^^^^^^^ Homepage ^^^^^^^^ @@ -106,12 +98,11 @@ The ``*.gemspec`` file may contain something like: This should be used as the official homepage of the Spack package. -^^^^^^^^^^^^^^^^^^^^^^^^^ Build system dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^ -All Ruby packages require Ruby at build and run-time. For this reason, -the base class contains: +All Ruby packages require Ruby at build and run-time. +For this reason, the base class contains: .. code-block:: python @@ -132,36 +123,27 @@ This can be added to the Spack package using: depends_on("ruby@2.3.0:", type=("build", "run")) -^^^^^^^^^^^^^^^^^ Ruby dependencies ^^^^^^^^^^^^^^^^^ -When you install a package with ``gem``, it reads the ``*.gemspec`` -file in order to determine the dependencies of the package. -If the dependencies are not yet installed, ``gem`` downloads them -and installs them for you. This may sound convenient, but Spack -cannot rely on this behavior for two reasons: +When you install a package with ``gem``, it reads the ``*.gemspec`` file in order to determine the dependencies of the package. +If the dependencies are not yet installed, ``gem`` downloads them and installs them for you. +This may sound convenient, but Spack cannot rely on this behavior for two reasons: #. Spack needs to be able to install packages on air-gapped networks. - If there is no internet connection, ``gem`` can't download the - package dependencies. By explicitly listing every dependency in - the ``package.py``, Spack knows what to download ahead of time. + If there is no internet connection, ``gem`` can't download the package dependencies. + By explicitly listing every dependency in the ``package.py``, Spack knows what to download ahead of time. #. Duplicate installations of the same dependency may occur. - Spack supports *activation* of Ruby extensions, which involves - symlinking the package installation prefix to the Ruby installation - prefix. If your package is missing a dependency, that dependency - will be installed to the installation directory of the same package. - If you try to activate the package + dependency, it may cause a - problem if that package has already been activated. + Spack supports *activation* of Ruby extensions, which involves symlinking the package installation prefix to the Ruby installation prefix. + If your package is missing a dependency, that dependency will be installed to the installation directory of the same package. + If you try to activate the package + dependency, it may cause a problem if that package has already been activated. For these reasons, you must always explicitly list all dependencies. -Although the documentation may list the package's dependencies, -often the developers assume people will use ``gem`` and won't have to -worry about it. Always check the ``*.gemspec`` file to find the true -dependencies. +Although the documentation may list the package's dependencies, often the developers assume people will use ``gem`` and won't have to worry about it. +Always check the ``*.gemspec`` file to find the true dependencies. Check for the following clues in the ``*.gemspec`` file: @@ -178,9 +160,7 @@ Check for the following clues in the ``*.gemspec`` file: These packages are optional dependencies used for development. They should not be added as dependencies of the package. -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ -For more information on Ruby packaging, see: -https://guides.rubygems.org/ +For more information on Ruby packaging, see: https://guides.rubygems.org/ diff --git a/lib/spack/docs/build_systems/sconspackage.rst b/lib/spack/docs/build_systems/sconspackage.rst index d7354d0d39810b..f4aef2adaca680 100644 --- a/lib/spack/docs/build_systems/sconspackage.rst +++ b/lib/spack/docs/build_systems/sconspackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,18 +9,15 @@ .. _sconspackage: ------- SCons ------ -SCons is a general-purpose build system that does not rely on -Makefiles to build software. SCons is written in Python, and handles -all building and linking itself. +SCons is a general-purpose build system that does not rely on Makefiles to build software. +SCons is written in Python, and handles all building and linking itself. -As far as build systems go, SCons is very non-uniform. It provides a -common framework for developers to write build scripts, but the build -scripts themselves can vary drastically. Some developers add subcommands -like: +As far as build systems go, SCons is very non-uniform. +It provides a common framework for developers to write build scripts, but the build scripts themselves can vary drastically. +Some developers add subcommands like: .. code-block:: console @@ -29,15 +27,14 @@ like: $ scons install -Others don't add any subcommands. Some have configuration options that -can be specified through variables on the command line. Others don't. +Others don't add any subcommands. +Some have configuration options that can be specified through variables on the command line. +Others don't. -^^^^^^ Phases ^^^^^^ -As previously mentioned, SCons allows developers to add subcommands like -``build`` and ``install``, but by default, installation usually looks like: +As previously mentioned, SCons allows developers to add subcommands like ``build`` and ``install``, but by default, installation usually looks like: .. code-block:: console @@ -45,17 +42,14 @@ As previously mentioned, SCons allows developers to add subcommands like $ scons install -To facilitate this, the ``SConsBuilder`` and ``SConsPackage`` base classes provide the -following phases: +To facilitate this, the ``SConsBuilder`` and ``SConsPackage`` base classes provide the following phases: #. ``build`` - build the package #. ``install`` - install the package -Package developers often add unit tests that can be invoked with -``scons test`` or ``scons check``. Spack provides a ``build_test`` method -to handle this. Since we don't know which one the package developer -chose, the ``build_test`` method does nothing by default, but can be easily -overridden like so: +Package developers often add unit tests that can be invoked with ``scons test`` or ``scons check``. +Spack provides a ``build_test`` method to handle this. +Since we don't know which one the package developer chose, the ``build_test`` method does nothing by default, but can be easily overridden like so: .. code-block:: python @@ -63,13 +57,11 @@ overridden like so: scons("check") -^^^^^^^^^^^^^^^ Important files ^^^^^^^^^^^^^^^ -SCons packages can be identified by their ``SConstruct`` files. These -files handle everything from setting up subcommands and command-line -options to linking and compiling. +SCons packages can be identified by their ``SConstruct`` files. +These files handle everything from setting up subcommands and command-line options to linking and compiling. One thing to look for is the ``EnsureSConsVersion`` function: @@ -81,38 +73,29 @@ One thing to look for is the ``EnsureSConsVersion`` function: This means that SCons 2.3.0 is the earliest release that will work. You should specify this in a ``depends_on`` statement. -^^^^^^^^^^^^^^^^^^^^^^^^^ Build system dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^ -At the bare minimum, packages that use the SCons build system need a -``scons`` dependency. Since this is always the case, the ``SConsPackage`` -base class already contains: +At the bare minimum, packages that use the SCons build system need a ``scons`` dependency. +Since this is always the case, the ``SConsPackage`` base class already contains: .. code-block:: python depends_on("scons", type="build") -If you want to specify a particular version requirement, you can override -this in your package: +If you want to specify a particular version requirement, you can override this in your package: .. code-block:: python depends_on("scons@2.3.0:", type="build") -^^^^^^^^^^^^^^^^^^^^^^^^^ Finding available options ^^^^^^^^^^^^^^^^^^^^^^^^^ -The first place to start when looking for a list of valid options to -build a package is ``scons --help``. Some packages like -`kahip `_ -don't bother overwriting the default SCons help message, so this isn't -very useful, but other packages like -`serf `_ -print a list of valid command-line variables: +The first place to start when looking for a list of valid options to build a package is ``scons --help``. +Some packages like `kahip `_ don't bother overwriting the default SCons help message, so this isn't very useful, but other packages like `serf `_ print a list of valid command-line variables: .. code-block:: console @@ -180,9 +163,7 @@ print a list of valid command-line variables: Use scons -H for help about command-line options. -More advanced packages like -`cantera `_ -use ``scons --help`` to print a list of subcommands: +More advanced packages like `cantera `_ use ``scons --help`` to print a list of subcommands: .. code-block:: console @@ -227,22 +208,21 @@ use ``scons --help`` to print a list of subcommands: 'scons doxygen' - Build the Doxygen documentation -You'll notice that cantera provides a ``scons help`` subcommand. Running -``scons help`` prints a list of valid command-line variables. +You'll notice that cantera provides a ``scons help`` subcommand. +Running ``scons help`` prints a list of valid command-line variables. -^^^^^^^^^^^^^^^^^^^^^^^^^^ Passing arguments to SCons ^^^^^^^^^^^^^^^^^^^^^^^^^^ -Now that you know what arguments the project accepts, you can add them to -the package build phase. This is done by overriding ``build_args`` like so: +Now that you know what arguments the project accepts, you can add them to the package build phase. +This is done by overriding ``build_args`` like so: .. code-block:: python def build_args(self, spec, prefix): args = [ - f"PREFIX={prefix}", - f"ZLIB={spec['zlib'].prefix}", + f"PREFIX={prefix}", + f"ZLIB={spec['zlib'].prefix}", ] if spec.satisfies("+debug"): @@ -253,25 +233,19 @@ the package build phase. This is done by overriding ``build_args`` like so: return args -``SConsPackage`` also provides an ``install_args`` function that you can -override to pass additional arguments to ``scons install``. +``SConsPackage`` also provides an ``install_args`` function that you can override to pass additional arguments to ``scons install``. -^^^^^^^^^^^^^^^^^ Compiler wrappers ^^^^^^^^^^^^^^^^^ -By default, SCons builds all packages in a separate execution environment, -and doesn't pass any environment variables from the user environment. -Even changes to ``PATH`` are not propagated unless the package developer -does so. +By default, SCons builds all packages in a separate execution environment, and doesn't pass any environment variables from the user environment. +Even changes to ``PATH`` are not propagated unless the package developer does so. -This is particularly troublesome for Spack's compiler wrappers, which depend -on environment variables to manage dependencies and linking flags. In many -cases, SCons packages are not compatible with Spack's compiler wrappers, -and linking must be done manually. +This is particularly troublesome for Spack's compiler wrappers, which depend on environment variables to manage dependencies and linking flags. +In many cases, SCons packages are not compatible with Spack's compiler wrappers, and linking must be done manually. -First of all, check the list of valid options for anything relating to -environment variables. For example, cantera has the following option: +First of all, check the list of valid options for anything relating to environment variables. +For example, cantera has the following option: .. code-block:: none @@ -282,28 +256,20 @@ environment variables. For example, cantera has the following option: - default: "LD_LIBRARY_PATH,PYTHONPATH" -In the case of cantera, using ``env_vars=all`` allows us to use -Spack's compiler wrappers. If you don't see an option related to -environment variables, try using Spack's compiler wrappers by passing -``spack_cc``, ``spack_cxx``, and ``spack_fc`` via the ``CC``, ``CXX``, -and ``FC`` arguments, respectively. If you pass them to the build and -you see an error message like: +In the case of cantera, using ``env_vars=all`` allows us to use Spack's compiler wrappers. +If you don't see an option related to environment variables, try using Spack's compiler wrappers by passing ``spack_cc``, ``spack_cxx``, and ``spack_fc`` via the ``CC``, ``CXX``, and ``FC`` arguments, respectively. +If you pass them to the build and you see an error message like: .. code-block:: none Spack compiler must be run from Spack! Input 'SPACK_PREFIX' is missing. -you'll know that the package isn't compatible with Spack's compiler -wrappers. In this case, you'll have to use the path to the actual -compilers, which are stored in ``self.compiler.cc`` and friends. -Note that this may involve passing additional flags to the build to -locate dependencies, a task normally done by the compiler wrappers. -serf is an example of a package with this limitation. +you'll know that the package isn't compatible with Spack's compiler wrappers. +In this case, you'll have to use the path to the actual compilers, which are stored in ``self.compiler.cc`` and friends. +Note that this may involve passing additional flags to the build to locate dependencies, a task normally done by the compiler wrappers. serf is an example of a package with this limitation. -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ -For more information on the SCons build system, see: -http://scons.org/documentation.html +For more information on the SCons build system, see: http://scons.org/documentation.html diff --git a/lib/spack/docs/build_systems/sippackage.rst b/lib/spack/docs/build_systems/sippackage.rst index e336d475396e61..ec80ced62defda 100644 --- a/lib/spack/docs/build_systems/sippackage.rst +++ b/lib/spack/docs/build_systems/sippackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,20 +9,16 @@ .. _sippackage: ------- SIP ------ -SIP is a tool that makes it very easy to create Python bindings for C and C++ -libraries. It was originally developed to create PyQt, the Python bindings for -the Qt toolkit, but can be used to create bindings for any C or C++ library. +SIP is a tool that makes it very easy to create Python bindings for C and C++ libraries. +It was originally developed to create PyQt, the Python bindings for the Qt toolkit, but can be used to create bindings for any C or C++ library. -SIP comprises a code generator and a Python module. The code generator -processes a set of specification files and generates C or C++ code which is -then compiled to create the bindings extension module. The SIP Python module -provides support functions to the automatically generated code. +SIP comprises a code generator and a Python module. +The code generator processes a set of specification files and generates C or C++ code which is then compiled to create the bindings extension module. +The SIP Python module provides support functions to the automatically generated code. -^^^^^^ Phases ^^^^^^ @@ -40,24 +37,21 @@ By default, these phases run: $ make install -^^^^^^^^^^^^^^^ Important files ^^^^^^^^^^^^^^^ Each SIP package comes with a custom configuration file written in Python. -For newer packages, this is called ``project.py``, while in older packages, -it may be called ``configure.py``. This script contains instructions to build -the project. +For newer packages, this is called ``project.py``, while in older packages, it may be called ``configure.py``. +This script contains instructions to build the project. -^^^^^^^^^^^^^^^^^^^^^^^^^ Build system dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^ -``SIPPackage`` requires several dependencies. Python and SIP are needed at build-time -to run the aforementioned configure script. Python is also needed at run-time to -actually use the installed Python library. And as we are building Python bindings -for C/C++ libraries, Python is also needed as a link dependency. All of these -dependencies are automatically added via the base class. +``SIPPackage`` requires several dependencies. +Python and SIP are needed at build-time to run the aforementioned configure script. +Python is also needed at run-time to actually use the installed Python library. +And as we are building Python bindings for C/C++ libraries, Python is also needed as a link dependency. +All of these dependencies are automatically added via the base class. .. code-block:: python @@ -65,13 +59,11 @@ dependencies are automatically added via the base class. depends_on("py-sip", type="build") -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Passing arguments to ``sip-build`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Each phase comes with a ```` function that can be used to pass -arguments to that particular phase. For example, if you need to pass -arguments to the configure phase, you can use: +Each phase comes with a ```` function that can be used to pass arguments to that particular phase. +For example, if you need to pass arguments to the configure phase, you can use: .. code-block:: python @@ -81,19 +73,15 @@ arguments to the configure phase, you can use: A list of valid options can be found by running ``sip-build --help``. -^^^^^^^ Testing ^^^^^^^ -Just because a package successfully built does not mean that it built -correctly. The most reliable test of whether or not the package was -correctly installed is to attempt to import all of the modules that -get installed. To get a list of modules, run the following command -in the site-packages directory: +Just because a package successfully built does not mean that it built correctly. +The most reliable test of whether or not the package was correctly installed is to attempt to import all of the modules that get installed. +To get a list of modules, run the following command in the site-packages directory: -.. code-block:: console +.. code-block:: pycon - $ python >>> import setuptools >>> setuptools.find_packages() [ @@ -106,10 +94,9 @@ in the site-packages directory: ] -Large, complex packages like ``py-pyqt5`` will return a long list of -packages, while other packages may return an empty list. These packages -only install a single ``foo.py`` file. In Python packaging lingo, -a "package" is a directory containing files like: +Large, complex packages like ``py-pyqt5`` will return a long list of packages, while other packages may return an empty list. +These packages only install a single ``foo.py`` file. +In Python packaging lingo, a "package" is a directory containing files like: .. code-block:: none @@ -120,25 +107,19 @@ a "package" is a directory containing files like: whereas a "module" is a single Python file. -The ``SIPPackage`` base class automatically detects these module -names for you. If, for whatever reason, the module names detected -are wrong, you can provide the names yourself by overriding -``import_modules`` like so: +The ``SIPPackage`` base class automatically detects these module names for you. +If, for whatever reason, the module names detected are wrong, you can provide the names yourself by overriding ``import_modules`` like so: .. code-block:: python import_modules = ["PyQt5"] -These tests often catch missing dependencies and non-RPATHed -libraries. Make sure not to add modules/packages containing the word -"test", as these likely won't end up in the installation directory, -or may require test dependencies like pytest to be installed. +These tests often catch missing dependencies and non-RPATHed libraries. +Make sure not to add modules/packages containing the word "test", as these likely won't end up in the installation directory, or may require test dependencies like pytest to be installed. -These tests can be triggered by running ``spack install --test=root`` -or by running ``spack test run`` after the installation has finished. +These tests can be triggered by running ``spack install --test=root`` or by running ``spack test run`` after the installation has finished. -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ diff --git a/lib/spack/docs/build_systems/sourceforgepackage.rst b/lib/spack/docs/build_systems/sourceforgepackage.rst index befb7ff0f0b3e7..77dd445060b52b 100644 --- a/lib/spack/docs/build_systems/sourceforgepackage.rst +++ b/lib/spack/docs/build_systems/sourceforgepackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,51 +9,36 @@ .. _sourceforgepackage: ------------ SourceForge ----------- -``SourceforgePackage`` is a -`mixin-class `_. It automatically -sets the URL based on a list of SourceForge mirrors listed in -`sourceforge_mirror_path`, which defaults to a half dozen known mirrors. -Refer to the package source -(``__) for the current list of mirrors used by Spack. +``SourceforgePackage`` is a `mixin-class `_. +It automatically sets the URL based on a list of SourceForge mirrors listed in ``sourceforge_mirror_path``, which defaults to a half dozen known mirrors. +Refer to the `package source `__ for the current list of mirrors used by Spack. -^^^^^^^ Methods ^^^^^^^ This package provides a method for populating mirror URLs. **urls** - This method returns a list of possible URLs for package source. - It is decorated with `property` so its results are treated as - a package attribute. + It is decorated with `property` so its results are treated as a package attribute. - Refer to - ``__ - for information on how Spack uses the `urls` attribute during - fetching. + Refer to :ref:`mirrors-of-the-main-url` for information on how Spack uses the ``urls`` attribute during fetching. -^^^^^^ Usage ^^^^^^ -This helper package can be added to your package by adding it as a base -class of your package and defining the relative location of an archive -file for one version of your software. +This helper package can be added to your package by adding it as a base class of your package and defining the relative location of an archive file for one version of your software. .. code-block:: python :emphasize-lines: 1,3 - class MyPackage(AutotoolsPackage, SourceforgePackage): - ... - sourceforge_mirror_path = "my-package/mypackage.1.0.0.tar.gz" - ... + class MyPackage(AutotoolsPackage, SourceforgePackage): + ... + sourceforge_mirror_path = "my-package/mypackage.1.0.0.tar.gz" + ... -Over 40 packages are using ``SourceforgePackage`` this mix-in as of -July 2022 so there are multiple packages to choose from if you want -to see a real example. +Over 40 packages are using ``SourceforgePackage`` this mix-in as of July 2022 so there are multiple packages to choose from if you want to see a real example. diff --git a/lib/spack/docs/build_systems/wafpackage.rst b/lib/spack/docs/build_systems/wafpackage.rst index 827f939c9c6fcd..46fc7e8715b680 100644 --- a/lib/spack/docs/build_systems/wafpackage.rst +++ b/lib/spack/docs/build_systems/wafpackage.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,14 +9,11 @@ .. _wafpackage: ------- Waf ------ -Like SCons, Waf is a general-purpose build system that does not rely -on Makefiles to build software. +Like SCons, Waf is a general-purpose build system that does not rely on Makefiles to build software. -^^^^^^ Phases ^^^^^^ @@ -48,20 +46,16 @@ Each phase provides a ```` function that runs: $ python waf -j -where ```` is the number of parallel jobs to build with. Each phase -also has a ```` function that can pass arguments to this call. -All of these functions are empty. The ``configure`` phase -automatically adds ``--prefix=/path/to/installation/prefix``, so you -don't need to add that in the ``configure_args``. +where ```` is the number of parallel jobs to build with. +Each phase also has a ```` function that can pass arguments to this call. +All of these functions are empty. +The ``configure`` phase automatically adds ``--prefix=/path/to/installation/prefix``, so you don't need to add that in the ``configure_args``. -^^^^^^^ Testing ^^^^^^^ -``WafPackage`` also provides ``test`` and ``installtest`` methods, -which are run after the ``build`` and ``install`` phases, respectively. -By default, these phases do nothing, but you can override them to -run package-specific unit tests. +``WafPackage`` also provides ``test`` and ``installtest`` methods, which are run after the ``build`` and ``install`` phases, respectively. +By default, these phases do nothing, but you can override them to run package-specific unit tests. .. code-block:: python @@ -71,28 +65,24 @@ run package-specific unit tests. pytest() -^^^^^^^^^^^^^^^ Important files ^^^^^^^^^^^^^^^ -Each Waf package comes with a custom ``waf`` build script, written in -Python. This script contains instructions to build the project. +Each Waf package comes with a custom ``waf`` build script, written in Python. +This script contains instructions to build the project. -The package also comes with a ``wscript`` file. This file is used to -override the default ``configure``, ``build``, and ``install`` phases -to customize the Waf project. It also allows developers to override -the default ``./waf --help`` message. Check this file to find useful -information about dependencies and the minimum versions that are -supported. +The package also comes with a ``wscript`` file. +This file is used to override the default ``configure``, ``build``, and ``install`` phases to customize the Waf project. +It also allows developers to override the default ``./waf --help`` message. +Check this file to find useful information about dependencies and the minimum versions that are supported. -^^^^^^^^^^^^^^^^^^^^^^^^^ Build system dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^ -``WafPackage`` does not require ``waf`` to build. ``waf`` is only -needed to create the ``./waf`` script. Since ``./waf`` is a Python -script, Python is needed to build the project. ``WafPackage`` adds -the following dependency automatically: +``WafPackage`` does not require ``waf`` to build. +``waf`` is only needed to create the ``./waf`` script. +Since ``./waf`` is a Python script, Python is needed to build the project. +``WafPackage`` adds the following dependency automatically: .. code-block:: python @@ -101,14 +91,11 @@ the following dependency automatically: Waf only supports Python 2.5 and up. -^^^^^^^^^^^^^^^^^^^^^^^^ Passing arguments to Waf ^^^^^^^^^^^^^^^^^^^^^^^^ -As previously mentioned, each phase comes with a ```` -function that can be used to pass arguments to that particular -phase. For example, if you need to pass arguments to the build -phase, you can use: +As previously mentioned, each phase comes with a ```` function that can be used to pass arguments to that particular phase. +For example, if you need to pass arguments to the build phase, you can use: .. code-block:: python @@ -123,9 +110,7 @@ phase, you can use: A list of valid options can be found by running ``./waf --help``. -^^^^^^^^^^^^^^^^^^^^^^ External documentation ^^^^^^^^^^^^^^^^^^^^^^ -For more information on the Waf build system, see: -https://waf.io/book/ +For more information on the Waf build system, see: https://waf.io/book/ diff --git a/lib/spack/docs/chain.rst b/lib/spack/docs/chain.rst index 3be4d45132e723..f4b6a74d9eb634 100644 --- a/lib/spack/docs/chain.rst +++ b/lib/spack/docs/chain.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -6,14 +7,11 @@ :description lang=en: Learn how to chain Spack installations by pointing one Spack instance to another to use its installed packages. -============================================= Chaining Spack Installations (upstreams.yaml) ============================================= -You can point your Spack installation to another Spack installation to use any -packages that are installed there. To register the other Spack instance, -you can add it as an entry to ``upstreams.yaml`` at any of the -:ref:`configuration-scopes`: +You can point your Spack installation to another Spack installation to use any packages that are installed there. +To register the other Spack instance, you can add it as an entry to ``upstreams.yaml`` at any of the :ref:`configuration-scopes`: .. code-block:: yaml @@ -23,62 +21,41 @@ you can add it as an entry to ``upstreams.yaml`` at any of the spack-instance-2: install_tree: /path/to/another/spack/opt/spack -The ``install_tree`` must point to the ``opt/spack`` directory inside of the -Spack base directory, or the location of the ``install_tree`` defined -in :ref:`config.yaml `. +The ``install_tree`` must point to the ``opt/spack`` directory inside of the Spack base directory, or the location of the ``install_tree`` defined in :ref:`config.yaml `. -Once the upstream Spack instance has been added, ``spack find`` will -automatically check the upstream instance when querying installed packages, -and new package installations for the local Spack installation will use any -dependencies that are installed in the upstream instance. +Once the upstream Spack instance has been added, ``spack find`` will automatically check the upstream instance when querying installed packages, and new package installations for the local Spack installation will use any dependencies that are installed in the upstream instance. -This other instance of Spack has no knowledge of the local Spack instance -and may not have the same permissions or ownership as the local Spack instance. +The upstream Spack instance has no knowledge of the local Spack instance and may not have the same permissions or ownership as the local Spack instance. This has the following consequences: -#. Upstream Spack instances are not locked. Therefore, it is up to users to - make sure that the local instance is not using an upstream instance when it - is being modified. +#. Upstream Spack instances are not locked. + Therefore, it is up to users to make sure that the local instance is not using an upstream instance when it is being modified. -#. Users should not uninstall packages from the upstream instance. Since the - upstream instance does not know about the local instance, it cannot prevent - the uninstallation of packages that the local instance depends on. +#. Users should not uninstall packages from the upstream instance. + Since the upstream instance does not know about the local instance, it cannot prevent the uninstallation of packages that the local instance depends on. Other details about upstream Spack installations: -#. If a package is installed both locally and upstream, the local installation - will always be used as a dependency. This can occur if the local Spack - installs a package which is not present in the upstream, but later on the - upstream Spack instance also installs that package. +#. If a package is installed both locally and upstream, the local installation will always be used as a dependency. + This can occur if the local Spack installs a package which is not present in the upstream, but later on the upstream Spack instance also installs that package. -#. If an upstream Spack instance registers and installs an external package, - the local Spack instance will treat this the same as a Spack-installed - package. This feature will only work if the upstream Spack instance - includes the upstream functionality (i.e., if its commit is after March - 27, 2019). +#. If an upstream Spack instance registers and installs an external package, the local Spack instance will treat this the same as a Spack-installed package. + This feature will only work if the upstream Spack instance includes the upstream functionality (i.e., if its commit is after March 27, 2019). ---------------------------------------- Using Multiple Upstream Spack Instances --------------------------------------- -A single Spack instance can use multiple upstream Spack installations. Spack -will search upstream instances in the order that you list them in your -configuration. If your Spack installation refers to instances X and Y, in that order, -then instance X must list Y as an upstream in its own ``upstreams.yaml``. +A single Spack instance can use multiple upstream Spack installations. +Spack will search upstream instances in the order that you list them in your configuration. +If your Spack installation refers to instances X and Y, in that order, then instance X must list Y as an upstream in its own ``upstreams.yaml``. ------------------------------------ Using Modules for Upstream Packages ----------------------------------- -The local Spack instance does not generate modules for packages that are -installed upstream. The local Spack instance can be configured to use the -modules generated by the upstream Spack instance. +The local Spack instance does not generate modules for packages that are installed upstream. +The local Spack instance can be configured to use the modules generated by the upstream Spack instance. -There are two requirements to use the modules created by an upstream Spack -instance: firstly, the upstream instance must do a ``spack module tcl refresh``, -which generates an index file that maps installed packages to their modules; -secondly, the local Spack instance must add a ``modules`` entry to the -configuration: +There are two requirements to use the modules created by an upstream Spack instance: firstly, the upstream instance must do a ``spack module tcl refresh``, which generates an index file that maps installed packages to their modules; secondly, the local Spack instance must add a ``modules`` entry to the configuration: .. code-block:: yaml @@ -88,12 +65,9 @@ configuration: modules: tcl: /path/to/other/spack/share/spack/modules -Each time new packages are installed in the upstream Spack instance, the -upstream Spack maintainer should run ``spack module tcl refresh`` (or the -corresponding command for the type of module that they intend to use). +Each time new packages are installed in the upstream Spack instance, the upstream Spack maintainer should run ``spack module tcl refresh`` (or the corresponding command for the type of module that they intend to use). .. note:: - Spack can generate modules that :ref:`automatically load - ` the modules of dependency packages. Spack cannot - currently do this for modules in upstream packages. + Spack can generate modules that :ref:`automatically load ` the modules of dependency packages. + Spack cannot currently do this for modules in upstream packages. diff --git a/lib/spack/docs/conf.py b/lib/spack/docs/conf.py index f348c8b769c988..97724413a500c7 100644 --- a/lib/spack/docs/conf.py +++ b/lib/spack/docs/conf.py @@ -23,8 +23,12 @@ from typing import List from docutils.statemachine import StringList +from pygments.formatters.html import HtmlFormatter +from pygments.lexer import RegexLexer, default +from pygments.token import * from sphinx.domains.python import PythonDomain from sphinx.ext.apidoc import main as sphinx_apidoc +from sphinx.highlighting import PygmentsBridge from sphinx.parsers import RSTParser # -- Spack customizations ----------------------------------------------------- @@ -50,9 +54,17 @@ os.path.abspath(".spack/spack-packages/repos"), ] -subprocess.call(["spack", "list"], stdout=subprocess.DEVNULL) +# Init the package repo with all git history, so "Last updated on" is accurate. +subprocess.call(["spack", "repo", "update"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) +if os.path.exists(".spack/spack-packages/.git/shallow"): + subprocess.call( + ["git", "fetch", "--unshallow"], + cwd=".spack/spack-packages", + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) -# Generate a command index if an update is needed -- this also clones the package repository. +# Generate a command index if an update is needed subprocess.call( [ "spack", @@ -95,6 +107,101 @@ ] ) + +class NoWhitespaceHtmlFormatter(HtmlFormatter): + """HTML formatter that suppresses redundant span elements for Text.Whitespace tokens.""" + + def _get_css_classes(self, ttype): + # For Text.Whitespace return an empty string, which avoids + # elements from being generated. + return "" if ttype is Text.Whitespace else super()._get_css_classes(ttype) + + +class CustomPygmentsBridge(PygmentsBridge): + def get_formatter(self, **options): + return NoWhitespaceHtmlFormatter(**options) + + +# Use custom HTML formatter to avoid redundant elements. +# See https://github.com/pygments/pygments/issues/1905#issuecomment-3170486995. +PygmentsBridge.html_formatter = NoWhitespaceHtmlFormatter + + +from spack.llnl.util.lang import classproperty +from spack.spec_parser import SpecTokens + +# replace classproperty.__get__ to return `self` so Sphinx can document it correctly. Otherwise +# it evaluates the callback, and it documents the result, which is not what we want. +classproperty.__get__ = lambda self, instance, owner: self + + +class SpecLexer(RegexLexer): + """A custom lexer for Spack spec strings and spack commands.""" + + name = "Spack spec" + aliases = ["spec"] + filenames = [] + tokens = { + "root": [ + # Looks for `$ command`, which may need spec highlighting. + (r"^\$\s+", Generic.Prompt, "command"), + (r"#.*?\n", Comment.Single), + # Alternatively, we just get a literal spec string, so we move to spec mode. We just + # look ahead here, without consuming the spec string. + (r"(?=\S+)", Generic.Prompt, "spec"), + ], + "command": [ + # A spack install command is followed by a spec string, which we highlight. + ( + r"spack(?:\s+(?:-[eC]\s+\S+|--?\S+))*\s+(?:install|uninstall|spec|load|unload|find|info|list|versions|providers|mark|diff|add)(?: +(?:--?\S+)?)*", + Text, + "spec", + ), + # Comment + (r"\s+#.*?\n", Comment.Single, "command_output"), + # Escaped newline should leave us in this mode + (r".*?\\\n", Text), + # Otherwise, it's the end of the command + (r".*?\n", Text, "command_output"), + ], + "command_output": [ + (r"^\$\s+", Generic.Prompt, "#pop"), # new command + (r"#.*?\n", Comment.Single), # comments + (r".*?\n", Generic.Output), # command output + ], + "spec": [ + # New line terminates the spec string + (r"\s*?$", Text, "#pop"), + # Dependency, with optional virtual assignment specifier + (SpecTokens.START_EDGE_PROPERTIES.regex, Name.Variable, "edge_properties"), + (SpecTokens.DEPENDENCY.regex, Name.Variable), + # versions + (SpecTokens.VERSION_HASH_PAIR.regex, Keyword.Pseudo), + (SpecTokens.GIT_VERSION.regex, Keyword.Pseudo), + (SpecTokens.VERSION.regex, Keyword.Pseudo), + # variants + (SpecTokens.PROPAGATED_BOOL_VARIANT.regex, Name.Function), + (SpecTokens.BOOL_VARIANT.regex, Name.Function), + (SpecTokens.PROPAGATED_KEY_VALUE_PAIR.regex, Name.Function), + (SpecTokens.KEY_VALUE_PAIR.regex, Name.Function), + # filename + (SpecTokens.FILENAME.regex, Text), + # Package name + (SpecTokens.FULLY_QUALIFIED_PACKAGE_NAME.regex, Name.Class), + (SpecTokens.UNQUALIFIED_PACKAGE_NAME.regex, Name.Class), + # DAG hash + (SpecTokens.DAG_HASH.regex, Text), + (SpecTokens.WS.regex, Text), + # Also stop at unrecognized tokens (without consuming them) + default("#pop"), + ], + "edge_properties": [ + (SpecTokens.KEY_VALUE_PAIR.regex, Name.Function), + (SpecTokens.END_EDGE_PROPERTIES.regex, Name.Variable, "#pop"), + ], + } + + # Enable todo items todo_include_todos = True @@ -146,6 +253,7 @@ def setup(sphinx): sphinx.connect("autodoc-skip-member", skip_member) sphinx.add_domain(PatchedPythonDomain, override=True) sphinx.add_source_parser(NoTabExpansionRSTParser, override=True) + sphinx.add_lexer("spec", SpecLexer) # -- General configuration ----------------------------------------------------- @@ -163,7 +271,9 @@ def setup(sphinx): "sphinx.ext.todo", "sphinx.ext.viewcode", "sphinx_copybutton", - "sphinx_design", + "sphinx_last_updated_by_git", + "sphinx_sitemap", + "sphinxcontrib.inkscapeconverter", "sphinxcontrib.programoutput", ] @@ -228,52 +338,46 @@ def setup(sphinx): # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ["_build", "_spack_root", ".spack-env", ".spack"] +exclude_patterns = ["_build", "_spack_root", ".spack-env", ".spack", ".venv"] autodoc_mock_imports = ["llnl"] +autodoc_default_options = {"no-value": True} +autodoc_preserve_defaults = True nitpicky = True nitpick_ignore = [ # Python classes that intersphinx is unable to resolve ("py:class", "argparse.HelpFormatter"), - ("py:class", "contextlib.contextmanager"), - ("py:class", "module"), - ("py:class", "_io.BufferedReader"), - ("py:class", "_io.BytesIO"), - ("py:class", "unittest.case.TestCase"), - ("py:class", "_frozen_importlib_external.SourceFileLoader"), - ("py:class", "clingo.Control"), - ("py:class", "six.moves.urllib.parse.ParseResult"), - ("py:class", "TextIO"), - ("py:class", "hashlib._Hash"), ("py:class", "concurrent.futures._base.Executor"), - ("py:class", "multiprocessing.context.Process"), + ("py:class", "hashlib._Hash"), + ("py:class", "multiprocessing.context.BaseContext"), + ("py:class", "posix.DirEntry"), # Spack classes that are private and we don't want to expose - ("py:class", "spack.provider_index._IndexBase"), - ("py:class", "spack.repo._PrependFileLoader"), ("py:class", "spack_repo.builtin.build_systems._checks.BuilderWithDefaults"), + ("py:class", "spack.repo._PrependFileLoader"), # Spack classes that intersphinx is unable to resolve - ("py:class", "spack.version.StandardVersion"), - ("py:class", "spack.spec.DependencySpec"), + ("py:class", "GitOrStandardVersion"), + ("py:class", "spack.bootstrap._common.QueryInfo"), + ("py:class", "spack.filesystem_view.SimpleFilesystemView"), ("py:class", "spack.spec.ArchSpec"), + ("py:class", "spack.spec.DependencySpec"), ("py:class", "spack.spec.InstallStatus"), ("py:class", "spack.spec.SpecfileReaderBase"), - ("py:class", "spack.filesystem_view.SimpleFilesystemView"), ("py:class", "spack.traverse.EdgeAndDepth"), ("py:class", "spack.vendor.archspec.cpu.microarchitecture.Microarchitecture"), - ("py:class", "spack.compiler.CompilerCache"), + ("py:class", "spack.vendor.jinja2.Environment"), # TypeVar that is not handled correctly - ("py:class", "spack.llnl.util.lang.T"), - ("py:class", "spack.llnl.util.lang.KT"), - ("py:class", "spack.llnl.util.lang.VT"), + ("py:class", "spack.llnl.util.lang.ClassPropertyType"), ("py:class", "spack.llnl.util.lang.K"), + ("py:class", "spack.llnl.util.lang.KT"), + ("py:class", "spack.llnl.util.lang.T"), ("py:class", "spack.llnl.util.lang.V"), - ("py:class", "spack.llnl.util.lang.ClassPropertyType"), - ("py:obj", "spack.llnl.util.lang.KT"), - ("py:obj", "spack.llnl.util.lang.VT"), + ("py:class", "spack.llnl.util.lang.VT"), ("py:obj", "spack.llnl.util.lang.ClassPropertyType"), ("py:obj", "spack.llnl.util.lang.K"), + ("py:obj", "spack.llnl.util.lang.KT"), ("py:obj", "spack.llnl.util.lang.V"), + ("py:obj", "spack.llnl.util.lang.VT"), ] # The reST default role (used for this markup: `text`) to use for all documents. @@ -289,8 +393,6 @@ def setup(sphinx): # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False -sys.path.append("./_pygments") -pygments_style = "style.SpackStyle" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] @@ -300,16 +402,14 @@ def setup(sphinx): # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = "sphinx_rtd_theme" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = {"logo_only": True} +html_theme = "furo" # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = ["_themes"] +# Google Search Console verification file +html_extra_path = ["google5fda5f94b4ffb8de.html"] + # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None @@ -319,7 +419,11 @@ def setup(sphinx): # The name of an image file (relative to this directory) to place at the top # of the sidebar. -html_logo = "_spack_root/share/spack/logo/spack-logo-white-text.svg" +html_theme_options = { + "sidebar_hide_name": True, + "light_logo": "spack-logo-text.svg", + "dark_logo": "spack-logo-white-text.svg", +} # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 @@ -334,6 +438,8 @@ def setup(sphinx): # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = "%b %d, %Y" +pygments_style = "default" +pygments_dark_style = "monokai" # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. @@ -359,7 +465,7 @@ def setup(sphinx): # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = False +html_show_sphinx = False # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True @@ -378,8 +484,15 @@ def setup(sphinx): # Output file base name for HTML help builder. htmlhelp_basename = "Spackdoc" +# Sitemap settings +sitemap_show_lastmod = True +sitemap_url_scheme = "{link}" +sitemap_excludes = ["search.html", "_modules/*"] + # -- Options for LaTeX output -------------------------------------------------- +latex_engine = "lualatex" + latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', @@ -391,7 +504,7 @@ def setup(sphinx): # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [("index", "Spack.tex", "Spack Documentation", "Todd Gamblin", "manual")] +latex_documents = [("index", "Spack.tex", "Spack Documentation", "", "manual")] # The name of an image file (relative to this directory) to place at the top of # the title page. @@ -464,3 +577,7 @@ def setup(sphinx): html_static_path = ["_static"] html_css_files = ["css/custom.css"] +html_context = {} + +if os.environ.get("READTHEDOCS", "") == "True": + html_context["READTHEDOCS"] = True diff --git a/lib/spack/docs/config_yaml.rst b/lib/spack/docs/config_yaml.rst index b57a8398b9f90b..e2d8daf158c026 100644 --- a/lib/spack/docs/config_yaml.rst +++ b/lib/spack/docs/config_yaml.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,28 +9,24 @@ :description lang=en: A detailed guide to the config.yaml file in Spack, which allows you to set core configuration options like installation paths, build parallelism, and trusted sources. -============================ Spack Settings (config.yaml) ============================ -Spack's basic configuration options are set in ``config.yaml``. You can -see the default settings by looking at -``etc/spack/defaults/config.yaml``: +Spack's basic configuration options are set in ``config.yaml``. +You can see the default settings by looking at ``etc/spack/defaults/config.yaml``: -.. literalinclude:: _spack_root/etc/spack/defaults/config.yaml +.. literalinclude:: _spack_root/etc/spack/defaults/base/config.yaml :language: yaml -These settings can be overridden in ``etc/spack/config.yaml`` or -``~/.spack/config.yaml``. See :ref:`configuration-scopes` for details. +These settings can be overridden in ``etc/spack/config.yaml`` or ``~/.spack/config.yaml``. +See :ref:`configuration-scopes` for details. ---------------------- ``install_tree:root`` --------------------- The location where Spack will install packages and their dependencies. The default is ``$spack/opt/spack``. ---------------- ``projections`` --------------- @@ -37,361 +34,282 @@ The default is ``$spack/opt/spack``. Modifying projections of the install tree is strongly discouraged. -By default, Spack installs all packages into a unique directory relative to the install -tree root with the following layout: +By default, Spack installs all packages into a unique directory relative to the install tree root with the following layout: .. code-block:: text - {architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash} + {architecture.platform}-{architecture.target}/{name}-{version}-{hash} -In very rare cases, it may be necessary to reduce the length of this path. For example, -very old versions of the Intel compiler are known to segfault when input paths are too long: +In very rare cases, it may be necessary to reduce the length of this path. +For example, very old versions of the Intel compiler are known to segfault when input paths are too long: - .. code-block:: console +.. code-block:: console - : internal error: ** The compiler has encountered an unexpected problem. - ** Segmentation violation signal raised. ** - Access violation or stack overflow. Please contact Intel Support for assistance. + : internal error: ** The compiler has encountered an unexpected problem. + ** Segmentation violation signal raised. ** + Access violation or stack overflow. Please contact Intel Support for assistance. -Another case is Python and R packages with many runtime dependencies, which can result -in very large ``PYTHONPATH`` and ``R_LIBS`` environment variables. This can cause the -``execve`` system call to fail with ``E2BIG``, preventing processes from starting. +Another case is Python and R packages with many runtime dependencies, which can result in very large ``PYTHONPATH`` and ``R_LIBS`` environment variables. +This can cause the ``execve`` system call to fail with ``E2BIG``, preventing processes from starting. -For this reason, Spack allows users to modify the installation layout through custom -projections. For example: +For this reason, Spack allows users to modify the installation layout through custom projections. +For example: - .. code-block:: yaml +.. code-block:: yaml - config: - install_tree: - root: $spack/opt/spack - projections: - all: "{name}/{version}/{hash:16}" + config: + install_tree: + root: $spack/opt/spack + projections: + all: "{name}/{version}/{hash:16}" -would install packages into subdirectories using only the package name, version, and a -hash length of 16 characters. +would install packages into subdirectories using only the package name, version, and a hash length of 16 characters. Notice that reducing the hash length increases the likelihood of hash collisions. --------------------- ``build_stage`` -------------------- -Spack is designed to run from a user home directory, and on many -systems, the home directory is a (slow) network file system. On most systems, -building in a temporary file system is faster. Usually, there is also more -space available in the temporary location than in the home directory. If the -username is not already in the path, Spack will append the value of ``$user`` to -the selected ``build_stage`` path. - -.. warning:: We highly recommend specifying ``build_stage`` paths that - distinguish between staging and other activities to ensure - ``spack clean`` does not inadvertently remove unrelated files. - Spack prepends ``spack-stage-`` to temporary staging directory names to - reduce this risk. Using a combination of ``spack`` and or ``stage`` in - each specified path, as shown in the default settings and documented - examples, will add another layer of protection. +Spack is designed to run from a user home directory, and on many systems, the home directory is a (slow) network file system. +On most systems, building in a temporary file system is faster. +Usually, there is also more space available in the temporary location than in the home directory. +If the username is not already in the path, Spack will append the value of ``$user`` to the selected ``build_stage`` path. + +.. warning:: + We highly recommend specifying ``build_stage`` paths that distinguish between staging and other activities to ensure ``spack clean`` does not inadvertently remove unrelated files. + Spack prepends ``spack-stage-`` to temporary staging directory names to reduce this risk. + Using a combination of ``spack`` and or ``stage`` in each specified path, as shown in the default settings and documented examples, will add another layer of protection. By default, Spack's ``build_stage`` is configured like this: .. code-block:: yaml build_stage: - - $tempdir/$user/spack-stage - - ~/.spack/stage + - $tempdir/$user/spack-stage + - ~/.spack/stage -This can be an ordered list of paths that Spack should search when trying to -find a temporary directory for the build stage. The list is searched in -order, and Spack will use the first directory to which it has write access. +This can be an ordered list of paths that Spack should search when trying to find a temporary directory for the build stage. +The list is searched in order, and Spack will use the first directory to which it has write access. -Specifying `~/.spack/stage` first will ensure each user builds in their home -directory. The historic Spack stage path `$spack/var/spack/stage` will build -directly inside the Spack instance. See :ref:`config-file-variables` for more -on ``$tempdir`` and ``$spack``. +Specifying `~/.spack/stage` first will ensure each user builds in their home directory. +The historic Spack stage path `$spack/var/spack/stage` will build directly inside the Spack instance. +See :ref:`config-file-variables` for more on ``$tempdir`` and ``$spack``. -When Spack builds a package, it creates a temporary directory within the -``build_stage``. After the package is successfully installed, Spack deletes -the temporary directory it used to build. Unsuccessful builds are not -deleted, but you can manually purge them with ``spack clean --stage``. +When Spack builds a package, it creates a temporary directory within the ``build_stage``. +After the package is successfully installed, Spack deletes the temporary directory it used to build. +Unsuccessful builds are not deleted, but you can manually purge them with ``spack clean --stage``. .. note:: - The build will fail if there is no writable directory in the ``build_stage`` - list, where any user- and site-specific setting will be searched first. + The build will fail if there is no writable directory in the ``build_stage`` list, where any user- and site-specific setting will be searched first. --------------------- ``source_cache`` -------------------- -Location to cache downloaded tarballs and repositories. By default, these -are stored in ``$spack/var/spack/cache``. These are stored indefinitely -by default and can be purged with ``spack clean --downloads``. +Location to cache downloaded tarballs and repositories. +By default, these are stored in ``$spack/var/spack/cache``. +These are stored indefinitely by default and can be purged with ``spack clean --downloads``. .. _Misc Cache: --------------------- ``misc_cache`` -------------------- -Temporary directory to store long-lived cache files, such as indices of -packages available in repositories. Defaults to ``~/.spack/cache``. Can -be purged with ``spack clean --misc-cache``. +Temporary directory to store long-lived cache files, such as indices of packages available in repositories. +Defaults to ``~/.spack/cache``. +Can be purged with ``spack clean --misc-cache``. -In some cases, e.g., if you work with many Spack instances or many different -versions of Spack, it makes sense to have a cache per instance or per version. +In some cases, e.g., if you work with many Spack instances or many different versions of Spack, it makes sense to have a cache per instance or per version. You can do that by changing the value to either: * ``~/.spack/$spack_instance_id/cache`` for per-instance caches, or * ``~/.spack/$spack_short_version/cache`` for per-spack-version caches. --------------------- ``verify_ssl`` -------------------- -When set to ``true`` (default), Spack will verify certificates of remote -hosts when making ``ssl`` connections. Set to ``false`` to disable, and -tools like ``curl`` will use their ``--insecure`` options. Disabling -this can expose you to attacks. Use at your own risk. +When set to ``true`` (default), Spack will verify certificates of remote hosts when making ``ssl`` connections. +Set to ``false`` to disable, and tools like ``curl`` will use their ``--insecure`` options. +Disabling this can expose you to attacks. +Use at your own risk. --------------------- ``ssl_certs`` -------------------- -Path to custom certificates for SSL verification. The value can be a -filesystem path, or an environment variable that expands to an absolute file path. -The default value is set to the environment variable ``SSL_CERT_FILE`` -to use the same syntax used by many other applications that automatically -detect custom certificates. -When ``url_fetch_method:curl``, the ``config:ssl_certs`` should resolve to -a single file. Spack will then set the environment variable ``CURL_CA_BUNDLE`` -in the subprocess calling ``curl``. If additional ``curl`` arguments are required, -they can be set in the config, e.g., ``url_fetch_method:'curl -k -q'``. -If ``url_fetch_method:urllib``, then files and directories are supported, i.e., -``config:ssl_certs:$SSL_CERT_FILE`` or ``config:ssl_certs:$SSL_CERT_DIR`` -will work. +Path to custom certificates for SSL verification. +The value can be a filesystem path, or an environment variable that expands to an absolute file path. +The default value is set to the environment variable ``SSL_CERT_FILE`` to use the same syntax used by many other applications that automatically detect custom certificates. +When ``url_fetch_method:curl``, the ``config:ssl_certs`` should resolve to a single file. +Spack will then set the environment variable ``CURL_CA_BUNDLE`` in the subprocess calling ``curl``. +If additional ``curl`` arguments are required, they can be set in the config, e.g., ``url_fetch_method:'curl -k -q'``. +If ``url_fetch_method:urllib``, then files and directories are supported, i.e., ``config:ssl_certs:$SSL_CERT_FILE`` or ``config:ssl_certs:$SSL_CERT_DIR`` will work. In all cases, the expanded path must be absolute for Spack to use the certificates. -Certificates relative to an environment can be created by prepending the path variable -with the Spack configuration variable ``$env``. +Certificates relative to an environment can be created by prepending the path variable with the Spack configuration variable ``$env``. --------------------- ``checksum`` -------------------- -When set to ``true``, Spack verifies downloaded source code using a -checksum and will refuse to build packages that it cannot verify. Set -to ``false`` to disable these checks. Disabling this can expose you to -attacks. Use at your own risk. +When set to ``true``, Spack verifies downloaded source code using a checksum and will refuse to build packages that it cannot verify. +Set to ``false`` to disable these checks. +Disabling this can expose you to attacks. +Use at your own risk. --------------------- ``locks`` -------------------- -When set to ``true``, concurrent instances of Spack will use locks to -avoid modifying the install tree, database file, etc. If ``false``, Spack -will disable all locking, but you must **not** run concurrent instances -of Spack. For file systems that do not support locking, you should set -this to ``false`` and run one Spack instance at a time; otherwise, we recommend -enabling locks. +When set to ``true``, concurrent instances of Spack will use locks to avoid modifying the install tree, database file, etc. +If ``false``, Spack will disable all locking, but you must **not** run concurrent instances of Spack. +For file systems that do not support locking, you should set this to ``false`` and run one Spack instance at a time; otherwise, we recommend enabling locks. --------------------- ``dirty`` -------------------- -By default, Spack unsets variables in your environment that can change -the way packages build. This includes ``LD_LIBRARY_PATH``, ``CPATH``, -``LIBRARY_PATH``, ``DYLD_LIBRARY_PATH``, and others. +By default, Spack unsets variables in your environment that can change the way packages build. +This includes ``LD_LIBRARY_PATH``, ``CPATH``, ``LIBRARY_PATH``, ``DYLD_LIBRARY_PATH``, and others. -By default, builds are ``clean``, but on some machines, compilers and -other tools may need custom ``LD_LIBRARY_PATH`` settings to run. You can -set ``dirty`` to ``true`` to skip the cleaning step and make all builds -"dirty" by default. Be aware that this will reduce the reproducibility -of builds. +By default, builds are ``clean``, but on some machines, compilers and other tools may need custom ``LD_LIBRARY_PATH`` settings to run. +You can set ``dirty`` to ``true`` to skip the cleaning step and make all builds "dirty" by default. +Be aware that this will reduce the reproducibility of builds. .. _build-jobs: --------------- ``build_jobs`` -------------- -Unless overridden in a package or on the command line, Spack builds all -packages in parallel. The default parallelism is equal to the number of -cores available to the process, up to 16 (the default of ``build_jobs``). +Unless overridden in a package or on the command line, Spack builds all packages in parallel. +The default parallelism is equal to the number of cores available to the process, up to 16 (the default of ``build_jobs``). For a build system that uses Makefiles, ``spack install`` runs: -- ``make -j``, when ``build_jobs`` is less than the number of - cores available -- ``make -j``, when ``build_jobs`` is greater or equal to the - number of cores available +- ``make -j``, when ``build_jobs`` is less than the number of cores available +- ``make -j``, when ``build_jobs`` is greater or equal to the number of cores available -If you work on a shared login node or have a strict ulimit, it may be -necessary to set the default to a lower value. By setting ``build_jobs`` -to 4, for example, commands like ``spack install`` will run ``make -j4`` -instead of using every core. To build all software in serial, -set ``build_jobs`` to 1. +If you work on a shared login node or have a strict ulimit, it may be necessary to set the default to a lower value. +By setting ``build_jobs`` to 4, for example, commands like ``spack install`` will run ``make -j4`` instead of using every core. +To build all software in serial, set ``build_jobs`` to 1. -Note that specifying the number of jobs on the command line always takes -priority, so that ``spack install -j`` always runs ``make -j``, even -when that exceeds the number of cores available. +Note that specifying the number of jobs on the command line always takes priority, so that ``spack install -j`` always runs ``make -j``, even when that exceeds the number of cores available. --------------------- ``ccache`` -------------------- -When set to ``true``, Spack will use ccache to cache compiles. This is -useful specifically in two cases: (1) when using ``spack dev-build`` and (2) -when building the same package with many different variants. The default is -``false``. - -When enabled, Spack will look inside your ``PATH`` for a ``ccache`` -executable and stop if it is not found. Some systems come with -``ccache``, but it can also be installed using ``spack install -ccache``. ``ccache`` comes with reasonable defaults for cache size -and location. (See the *Configuration settings* section of ``man -ccache`` to learn more about the default settings and how to change -them.) Please note that we currently disable ccache's ``hash_dir`` -feature to avoid an issue with the stage directory (see -https://github.com/spack/spack/pull/3761#issuecomment-294352232). +When set to ``true``, Spack will use ccache to cache compiles. +This is useful specifically in two cases: (1) when using ``spack dev-build`` and (2) when building the same package with many different variants. +The default is ``false``. + +When enabled, Spack will look inside your ``PATH`` for a ``ccache`` executable and stop if it is not found. +Some systems come with ``ccache``, but it can also be installed using ``spack install ccache``. +``ccache`` comes with reasonable defaults for cache size and location. +(See the *Configuration settings* section of ``man ccache`` to learn more about the default settings and how to change them.) +Please note that we currently disable ccache's ``hash_dir`` feature to avoid an issue with the stage directory (see https://github.com/spack/spack/pull/3761#issuecomment-294352232). ------------------------ ``shared_linking:type`` ----------------------- -Controls whether Spack embeds ``RPATH`` or ``RUNPATH`` attributes in ELF binaries -so that they can find their dependencies. This has no effect on macOS. +Controls whether Spack embeds ``RPATH`` or ``RUNPATH`` attributes in ELF binaries so that they can find their dependencies. +This has no effect on macOS. Two options are allowed: - 1. ``rpath`` uses ``RPATH`` and forces the ``--disable-new-tags`` flag to be passed to the linker. - 2. ``runpath`` uses ``RUNPATH`` and forces the ``--enable-new-tags`` flag to be passed to the linker. +1. ``rpath`` uses ``RPATH`` and forces the ``--disable-new-tags`` flag to be passed to the linker. +2. ``runpath`` uses ``RUNPATH`` and forces the ``--enable-new-tags`` flag to be passed to the linker. -``RPATH`` search paths have higher precedence than ``LD_LIBRARY_PATH``, -and ``ld.so`` will search for libraries in transitive RPATHs of -parent objects. +``RPATH`` search paths have higher precedence than ``LD_LIBRARY_PATH``, and ``ld.so`` will search for libraries in transitive RPATHs of parent objects. -``RUNPATH`` search paths have lower precedence than ``LD_LIBRARY_PATH``, -and ``ld.so`` will ONLY search for dependencies in the ``RUNPATH`` of -the loading object. +``RUNPATH`` search paths have lower precedence than ``LD_LIBRARY_PATH``, and ``ld.so`` will ONLY search for dependencies in the ``RUNPATH`` of the loading object. DO NOT MIX the two options within the same install tree. ------------------------ ``shared_linking:bind`` ----------------------- -This is an *experimental option* that controls whether Spack embeds absolute paths -to needed shared libraries in ELF executables and shared libraries on Linux. Setting -this option to ``true`` has two advantages: +This is an *experimental option* that controls whether Spack embeds absolute paths to needed shared libraries in ELF executables and shared libraries on Linux. +Setting this option to ``true`` has two advantages: -1. **Improved startup time**: when running an executable, the dynamic loader does not - have to perform a search for needed libraries, they are loaded directly. -2. **Reliability**: libraries loaded at runtime are those that were linked to. This - minimizes the risk of accidentally picking up system libraries. +1. **Improved startup time**: when running an executable, the dynamic loader does not have to search for needed libraries. + They are loaded directly. +2. **Reliability**: libraries loaded at runtime are those that were linked during the build. + This minimizes the risk of accidentally picking up system libraries. -In the current implementation, Spack sets the soname (shared object name) of -libraries to their install path upon installation. This has two implications: +In the current implementation, Spack sets the soname (shared object name) of libraries to their install path upon installation. +This has two implications: 1. Binding does not apply to libraries installed *before* the option was enabled. -2. Toggling the option off does *not* prevent binding of libraries installed when - the option was still enabled. +2. Disabling the option does *not* prevent binding of libraries installed when the option was still enabled. It is also worth noting that: -1. Applications relying on ``dlopen(3)`` will continue to work, even when they open - a library by name. This is because RPATHs are retained in binaries also - when ``bind`` is enabled. -2. ``LD_PRELOAD`` continues to work for the typical use case of overriding - symbols, such as preloading a library with a more efficient ``malloc``. - However, the preloaded library will be loaded *additionally to*, instead of - *in place of* another library with the same name --- this can be problematic - in very rare cases where libraries rely on a particular ``init`` or ``fini`` - order. +1. Applications relying on ``dlopen(3)`` will continue to work, even when they open a library by name. + This is because RPATHs are retained in binaries also when ``bind`` is enabled. +2. ``LD_PRELOAD`` continues to work for the typical use case of overriding symbols, such as preloading a library with a more efficient ``malloc``. + However, the preloaded library will be loaded *in addition to*, rather than *in place of*, another library with the same name -- which can be problematic in rare cases where libraries rely on a particular ``init`` or ``fini`` order. .. note:: - In some cases, packages provide *stub libraries* that only contain an interface - for linking but lack an implementation for runtime. An example of this is - ``libcuda.so``, provided by the CUDA toolkit; it can be used to link against, - but the library needed at runtime is the one installed with the CUDA driver. - To avoid binding those libraries, they can be marked as non-bindable using - a property in the package: + In some cases, packages provide *stub libraries* that only contain an interface for linking but lack an implementation for runtime. + An example of this is ``libcuda.so``, provided by the CUDA toolkit; it can be used to link against, but the library needed at runtime is the one installed with the CUDA driver. + To avoid binding those libraries, they can be marked as non-bindable using a property in the package: .. code-block:: python class Example(Package): - non_bindable_shared_objects = ["libinterface.so"] + non_bindable_shared_objects = ["libinterface.so"] ----------------------- ``install_status`` ---------------------- -When set to ``true``, Spack will show information about its current progress -as well as the current and total package numbers. Progress is shown both -in the terminal title and inline. Setting it to ``false`` will not show any -progress information. +When set to ``true``, Spack will show information about its current progress as well as the current and total package numbers. +Progress is shown both in the terminal title and inline. +Setting it to ``false`` will not show any progress information. -To work properly, this requires your terminal to reset its title after -Spack has finished its work; otherwise, Spack's status information will -remain in the terminal's title indefinitely. Most terminals should already -be set up this way and clear Spack's status information. +To work properly, this requires your terminal to reset its title after Spack has finished its work; otherwise, Spack's status information will remain in the terminal's title indefinitely. +Most terminals should already be set up this way and clear Spack's status information. ------------ ``aliases`` ----------- -Aliases can be used to define new Spack commands. They can be either shortcuts -for longer commands or include specific arguments for convenience. For instance, -if users want to use ``spack install``'s ``-v`` argument all the time, they can -create a new alias called ``inst`` that will always call ``install -v``: +Aliases can be used to define new Spack commands. +They can be either shortcuts for longer commands or include specific arguments for convenience. +For instance, if users want to use ``spack install``'s ``-v`` argument all the time, they can create a new alias called ``inst`` that will always call ``install -v``: .. code-block:: yaml aliases: inst: install -v -------------------------------- ``concretization_cache:enable`` ------------------------------- -When set to ``true``, Spack will utilize a cache of solver outputs from -successful concretization runs. When enabled, Spack will check the concretization -cache prior to running the solver. If a previous request to solve a given -problem is present in the cache, Spack will load the concrete specs and other -solver data from the cache rather than running the solver. Specs not previously -concretized will be added to the cache on a successful solve. The cache additionally -holds solver statistics, so commands like ``spack solve`` will still return information -about the run that produced a given solver result. +When set to ``true``, Spack will utilize a cache of solver outputs from successful concretization runs. +When enabled, Spack will check the concretization cache prior to running the solver. +If a previous request to solve a given problem is present in the cache, Spack will load the concrete specs and other solver data from the cache rather than running the solver. +Specs not previously concretized will be added to the cache on a successful solve. +The cache additionally holds solver statistics, so commands like ``spack solve`` will still return information about the run that produced a given solver result. -This cache is a subcache of the :ref:`Misc Cache` and as such will be cleaned when the Misc -Cache is cleaned. +This cache is a subcache of the :ref:`Misc Cache` and as such will be cleaned when the Misc Cache is cleaned. When ``false`` or omitted, all concretization requests will be performed from scratch ----------------------------- ``concretization_cache:url`` ---------------------------- -Path to the location where Spack will root the concretization cache. Currently this only supports -paths on the local filesystem. +Path to the location where Spack will root the concretization cache. +Currently this only supports paths on the local filesystem. Default location is under the :ref:`Misc Cache` at: ``$misc_cache/concretization`` ------------------------------------- ``concretization_cache:entry_limit`` ------------------------------------ -Sets a limit on the number of concretization results that Spack will cache. The limit is evaluated -after each concretization run; if Spack has stored more results than the limit allows, the -oldest concretization results are pruned until 10% of the limit has been removed. +Sets a limit on the number of concretization results that Spack will cache. +The limit is evaluated after each concretization run; if Spack has stored more results than the limit allows, the oldest concretization results are pruned until 10% of the limit has been removed. -Setting this value to 0 disables automatic pruning. It is expected that users will be -responsible for maintaining this cache. +Setting this value to 0 disables automatic pruning. +It is expected that users will be responsible for maintaining this cache. ------------------------------------ ``concretization_cache:size_limit`` ----------------------------------- -Sets a limit on the size of the concretization cache in bytes. The limit is evaluated -after each concretization run; if Spack has stored more results than the limit allows, the -oldest concretization results are pruned until 10% of the limit has been removed. +Sets a limit on the size of the concretization cache in bytes. +The limit is evaluated after each concretization run; if Spack has stored more results than the limit allows, the oldest concretization results are pruned until 10% of the limit has been removed. -Setting this value to 0 disables automatic pruning. It is expected that users will be -responsible for maintaining this cache. +Setting this value to 0 disables automatic pruning. +It is expected that users will be responsible for maintaining this cache. diff --git a/lib/spack/docs/configuration.rst b/lib/spack/docs/configuration.rst index 847446fe654b16..7ed6ad392e4507 100644 --- a/lib/spack/docs/configuration.rst +++ b/lib/spack/docs/configuration.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,12 +9,11 @@ .. _configuration: -=================== Configuration Files =================== -Spack has many configuration files. Here is a quick list of them, in -case you want to skip directly to specific docs: +Spack has many configuration files. +Here is a quick list of them, in case you want to skip directly to specific docs: * :ref:`concretizer.yaml ` * :ref:`config.yaml ` @@ -22,210 +22,205 @@ case you want to skip directly to specific docs: * :ref:`modules.yaml ` * :ref:`packages.yaml ` (including :ref:`compiler configuration `) * :ref:`repos.yaml ` +* :ref:`toolchains.yaml ` -You can also add any of these as inline configuration in the YAML -manifest file (``spack.yaml``) describing an :ref:`environment -`. +You can also add any of these as inline configuration in the YAML manifest file (``spack.yaml``) describing an :ref:`environment `. ------------ YAML Format ----------- -Spack configuration files are written in YAML. We chose YAML because -it's human-readable but also versatile in that it supports dictionaries, -lists, and nested sections. For more details on the format, see `yaml.org -`_ and `libyaml `_. +Spack configuration files are written in YAML. +We chose YAML because it's human-readable but also versatile in that it supports dictionaries, lists, and nested sections. +For more details on the format, see `yaml.org `_. Here is an example ``config.yaml`` file: .. code-block:: yaml config: - install_tree: $spack/opt/spack + install_tree: + root: $spack/opt/spack build_stage: - - $tempdir/$user/spack-stage - - ~/.spack/stage + - $tempdir/$user/spack-stage + - ~/.spack/stage -Each Spack configuration file is nested under a top-level section -corresponding to its name. So, ``config.yaml`` starts with ``config:``, -``mirrors.yaml`` starts with ``mirrors:``, etc. +Each Spack configuration file is nested under a top-level section corresponding to its name. +So, ``config.yaml`` starts with ``config:``, ``mirrors.yaml`` starts with ``mirrors:``, etc. .. tip:: - Validation and autocompletion of Spack config files can be enabled in - your editor using `JSON Schema Store `_. + Validation and autocompletion of Spack config files can be enabled in your editor using `JSON Schema Store `_. .. _configuration-scopes: --------------------- Configuration Scopes -------------------- -Spack pulls configuration data from files in several directories. There -are multiple configuration scopes. From lowest to highest precedence: +Spack pulls configuration data from files in several directories. +There are multiple configuration scopes. +From lowest to highest precedence: + +#. **defaults**: Stored in ``$(prefix)/etc/spack/defaults/``. + These are the "factory" settings. + Users should generally not modify the settings here, but should override them in other configuration scopes. + The defaults here will change from version to version of Spack. + +#. **system**: Stored in ``/etc/spack/``. + These are settings for this machine or for all machines on which this file system is mounted. + The systm scope overrides the defaults scope. + It can be used for settings idiosyncratic to a particular machine, such as the locations of compilers or external packages. + Be careful when modifying this scope, as changes here affect all Spack users on a machine. + Before putting configuration here, instead consider using the ``site`` scope, which only affects the spack instance it's part of. -#. **defaults**: Stored in ``$(prefix)/etc/spack/defaults/``. These are - the "factory" settings. Users should generally not modify the settings - here, but should override them in other configuration scopes. The - defaults here will change from version to version of Spack. +#. **site**: Stored in ``$(prefix)/etc/spack/site/``. + Settings here affect only *this instance* of Spack, and they override the defaults and system scopes. + The site scope is intended for site-wide settings on multi-user machines (e.g., for a common Spack instance). -#. **system**: Stored in ``/etc/spack/``. These are settings for this - machine or for all machines on which this file system is - mounted. The system scope can be used for settings idiosyncratic to a - particular machine, such as the locations of compilers or external - packages. These settings are presumably controlled by someone with - root access on the machine. They override the defaults scope. +#. **plugin**: Read from a Python package's entry points. + Settings here affect all instances of Spack running with the same Python installation. + This scope takes higher precedence than site, system, and default scopes. -#. **site**: Stored in ``$(prefix)/etc/spack/``. Settings here affect - only *this instance* of Spack, and they override the defaults and system - scopes. The site scope can be used for per-project settings (one - Spack instance per project) or for site-wide settings on a multi-user - machine (e.g., for a common Spack instance). +#. **user**: Stored in the home directory: ``~/.spack/``. + These settings affect all instances of Spack and take higher precedence than site, system, plugin, or defaults scopes. -#. **plugin**: Read from a Python package's entry points. Settings here affect - all instances of Spack running with the same Python installation. This scope takes higher precedence than site, system, and default scopes. +#. **spack**: Stored in ``$(prefix)/etc/spack/``. + Settings here affect only *this instance* of Spack, and they override ``user`` and lower configuration scopes. + This is intended for project-specific or single-user spack installations. + This is the the topmost built-in spack scope, and modifying it gives you full control over configuration scopes. + For example, it defines the ``user``, ``site``, and ``system`` scopes, so you can use it to remove them completely if you want. -#. **user**: Stored in the home directory: ``~/.spack/``. These settings - affect all instances of Spack and take higher precedence than site, - system, plugin, or defaults scopes. +#. **environment**: When using Spack :ref:`environments`, Spack reads additional configuration from the environment file. + See :ref:`environment-configuration` for further details on these scopes. + Environment scopes can be referenced from the command line as ``env:name`` (e.g., to reference environment ``foo``, use ``env:foo``). #. **custom**: Stored in a custom directory specified by ``--config-scope``. - If multiple scopes are listed on the command line, they are ordered - from lowest to highest precedence. + If multiple scopes are listed on the command line, they are ordered from lowest to highest precedence. -#. **environment**: When using Spack :ref:`environments`, Spack reads - additional configuration from the environment file. See - :ref:`environment-configuration` for further details on these - scopes. Environment scopes can be referenced from the command line - as ``env:name`` (e.g., to reference environment ``foo``, use - ``env:foo``). +#. **command line**: Build settings specified on the command line take precedence over all other scopes. -#. **command line**: Build settings specified on the command line take - precedence over all other scopes. +Each configuration directory may contain several configuration files, such as ``config.yaml``, ``packages.yaml``, or ``mirrors.yaml``. +When configurations conflict, settings from higher-precedence scopes override lower-precedence settings. -Each configuration directory may contain several configuration files, -such as ``config.yaml``, ``packages.yaml``, or ``mirrors.yaml``. When -configurations conflict, settings from higher-precedence scopes override -lower-precedence settings. +All of these except ``spack`` and ``defaults`` are initially empty, so you don't have to think about the others unless you need them. +The most commonly used scopes are ``environment``, ``user``, and ``spack``. +If you forget, you can always see the available configuration scopes in order of precedece wiht the ``spack config scopes`` command:: -Commands that modify scopes (e.g., ``spack compilers``, ``spack repo``, -etc.) take a ``--scope=`` parameter that you can use to control -which scope is modified. By default, they modify the highest-precedence -available scope that is not read-only (like `defaults`). + > spack config scopes -p + Scope Path + command_line + spack /home/username/spack/etc/spack + user /home/username/.spack/ + site /home/username/spack/etc/spack/site/ + defaults /home/username/spack/etc/spack/defaults/ + defaults:darwin /home/username/spack/etc/spack/defaults/darwin/ + defaults:base /home/username/spack/etc/spack/defaults/base/ + _builtin + +Commands that modify scopes (e.g., ``spack compilers``, ``spack repo``, ``spack external find``, etc.) take a ``--scope=`` parameter that you can use to control which scope is modified. +By default, they modify the highest-precedence available scope that is not read-only (like `defaults`). .. _custom-scopes: -^^^^^^^^^^^^^ Custom scopes ^^^^^^^^^^^^^ -In addition to the ``defaults``, ``system``, ``site``, and ``user`` -scopes, you may add configuration scopes directly on the command -line with the ``--config-scope`` argument, or ``-C`` for short. +You may add configuration scopes directly on the command line with the ``--config-scope`` argument, or ``-C`` for short. +Custom command-line scopes override any active environments, as well as the ``defaults``, ``system``, ``site``, ``user``, and ``spack`` scopes, -For example, the following adds two configuration scopes, named -``scopea`` and ``scopeb``, to a ``spack spec`` command: +For example, the following adds two configuration scopes, named ``scope-a`` and ``scope-b``, to a ``spack spec`` command: -.. code-block:: console +.. code-block:: spec - $ spack -C ~/myscopes/scopea -C ~/myscopes/scopeb spec ncurses + $ spack -C ~/myscopes/scope-a -C ~/myscopes/scope-b spec ncurses -Custom scopes come *after* the ``spack`` command and *before* the -subcommand, and they specify a single path to a directory containing -configuration files. You can add the same configuration files to that -directory that you can add to any other scope (e.g., ``config.yaml``, -``packages.yaml``, etc.). +Custom scopes come *after* the ``spack`` command and *before* the subcommand, and they specify a single path to a directory containing configuration files. +You can add the same configuration files to that directory that you can add to any other scope (e.g., ``config.yaml``, ``packages.yaml``, etc.). If multiple scopes are provided: #. Each must be preceded with the ``--config-scope`` or ``-C`` flag. #. They must be ordered from lowest to highest precedence. -""""""""""""""""""""""""""""""""""""""""""" Example: scopes for release and development """"""""""""""""""""""""""""""""""""""""""" -Suppose that you need to support simultaneous building of release and -development versions of ``mypackage``, where ``mypackage`` depends on ``A``, which in turn depends on ``B``. +Suppose that you need to support simultaneous building of release and development versions of ``mypackage``, where ``mypackage`` depends on ``pkg-a``, which in turn depends on ``pkg-b``. You could create the following files: .. code-block:: yaml - :caption: ~/myscopes/release/packages.yaml + :caption: ``~/myscopes/release/packages.yaml`` + :name: code-example-release-packages-yaml packages: - mypackage: - version: [1.7] - A: - version: [2.3] - B: - version: [0.8] + mypackage: + prefer: ["@1.7"] + pkg-a: + prefer: ["@2.3"] + pkg-b: + prefer: ["@0.8"] .. code-block:: yaml - :caption: ~/myscopes/develop/packages.yaml + :caption: ``~/myscopes/develop/packages.yaml`` + :name: code-example-develop-packages-yaml packages: - mypackage: - version: [develop] - A: - version: [develop] - B: - version: [develop] - -You can switch between ``release`` and ``develop`` configurations using -configuration arguments. You would type ``spack -C ~/myscopes/release`` -when you want to build the designated release versions of ``mypackage``, -``A``, and ``B``, and you would type ``spack -C ~/myscopes/develop`` when -you want to build all of these packages at the ``develop`` version. + mypackage: + prefer: ["@develop"] + pkg-a: + prefer: ["@develop"] + pkg-b: + prefer: ["@develop"] + +You can switch between ``release`` and ``develop`` configurations using configuration arguments. +You would type ``spack -C ~/myscopes/release`` when you want to build the designated release versions of ``mypackage``, ``pkg-a``, and ``pkg-b``, and you would type ``spack -C ~/myscopes/develop`` when you want to build all of these packages at the ``develop`` version. -""""""""""""""""""""""""""""""" Example: swapping MPI providers """"""""""""""""""""""""""""""" -Suppose that you need to build two software packages, ``packagea`` and -``packageb``. ``packagea`` is Python 2-based, and ``packageb`` is Python -3-based. ``packagea`` only builds with OpenMPI, and ``packageb`` only builds -with MPICH. You can create different configuration scopes for use with -``packagea`` and ``packageb``: +Suppose that you need to build two software packages, ``pkg-a`` and ``pkg-b``. +For ``pkg-b`` you want a newer Python version and a different MPI implementation than for ``pkg-a``. +You can create different configuration scopes for use with ``pkg-a`` and ``pkg-b``: .. code-block:: yaml - :caption: ~/myscopes/packgea/packages.yaml + :caption: ``~/myscopes/pkg-a/packages.yaml`` + :name: code-example-pkg-a-packages-yaml packages: - python: - version: [2.7.11] - all: - providers: - mpi: [openmpi] + python: + require: ["@3.11"] + mpi: + require: [openmpi] .. code-block:: yaml - :caption: ~/myscopes/packageb/packages.yaml + :caption: ``~/myscopes/pkg-b/packages.yaml`` + :name: code-example-pkg-b-packages-yaml packages: - python: - version: [3.5.2] - all: - providers: - mpi: [mpich] + python: + require: ["@3.13"] + mpi: + require: [mpich] .. _plugin-scopes: -^^^^^^^^^^^^^ Plugin scopes ^^^^^^^^^^^^^ .. note:: Python version >= 3.8 is required to enable plugin configuration. -Spack can be made aware of configuration scopes that are installed as part of a Python package. To do so, register a function that returns the scope's path to the ``"spack.config"`` entry point. Consider the Python package ``my_package`` that includes Spack configurations: +Spack can be made aware of configuration scopes that are installed as part of a Python package. +To do so, register a function that returns the scope's path to the ``"spack.config"`` entry point. +Consider the Python package ``my_package`` that includes Spack configurations: .. code-block:: console my-package/ ├── src - │   ├── my_package - │   │   ├── __init__.py - │   │   └── spack/ - │   │   │   └── config.yaml + │ ├── my_package + │ │ ├── __init__.py + │ │ └── spack/ + │ │ │ └── config.yaml └── pyproject.toml Adding the following to ``my_package``'s ``pyproject.toml`` will make ``my_package``'s ``spack/`` configurations visible to Spack when ``my_package`` is installed: @@ -241,6 +236,7 @@ The function ``my_package.get_config_path`` (matching the entry point definition import importlib.resources + def get_config_path(): dirname = importlib.resources.files("my_package").joinpath("spack") if dirname.exists(): @@ -248,35 +244,19 @@ The function ``my_package.get_config_path`` (matching the entry point definition .. _platform-scopes: -------------------------------- Platform-specific Configuration ------------------------------- .. warning:: - Prior to v1.0, each scope above -- except environment scopes -- had a - corresponding platform-specific scope (e.g., ``defaults/linux``, - ``system/windows``). This can now be accomplished through a suitably - placed :ref:`include.yaml ` file. + Prior to v1.0, each scope above -- except environment scopes -- had a corresponding platform-specific scope (e.g., ``defaults/linux``, ``system/windows``). + This can now be accomplished through a suitably placed :ref:`include.yaml ` file. There is often a need for platform-specific configuration settings. -For example, on most platforms, GCC is the preferred compiler. However, -on macOS (darwin), Clang often works for more packages, and is set as -the default compiler. This configuration is set in -``$(prefix)/etc/spack/defaults/darwin/packages.yaml``, which is included -as by ``$(prefix)/etc/spack/defaults/include.yaml``. Since it is an included -configuration of the ``defaults`` scope, settings in the ``defaults`` scope -will take precedence. You can override the values by specifying settings in -``system``, ``site``, ``user``, or ``custom``, where scope precedence is: - -#. ``defaults`` -#. ``system`` -#. ``site`` -#. ``user`` -#. ``custom`` - -and settings in each scope taking precedence over those found in configuration -files listed in the corresponding ``include.yaml`` files. +For example, on most platforms, GCC is the preferred compiler. +However, on macOS (darwin), Clang often works for more packages, and is set as the default compiler. +This configuration is set in ``$(prefix)/etc/spack/defaults/darwin/packages.yaml``, which is included by ``$(prefix)/etc/spack/defaults/include.yaml``. +Since it is an included configuration of the ``defaults`` scope, settings in the ``defaults`` scope will take precedence. For example, if ``$(prefix)/etc/spack/defaults/include.yaml`` contains: @@ -285,216 +265,210 @@ For example, if ``$(prefix)/etc/spack/defaults/include.yaml`` contains: include: - path: "${platform}" optional: true + - path: base -then, on macOS (``darwin``), configuration settings for files under the -``$(prefix)/etc/spack/defaults/darwin`` directory would be picked up. +then, on macOS (``darwin``), configuration settings for files under the ``$(prefix)/etc/spack/defaults/darwin`` directory would be picked up if they are present. +Because ``${platform}`` is above the ``base`` include in the list, ``${platform}`` settings will override anything in ``base`` if there are conflicts. .. note:: - You can get the name to use for ```` by running ``spack arch - --platform``. - -Platform-specific configuration files can similarly be set up for the -``system``, ``site``, and ``user`` scopes by creating an ``include.yaml`` -similar to the one above for ``defaults`` -- under the appropriate -configuration paths (see :ref:`config-overrides`) and creating a subdirectory -with the platform name that contains the configuration files. - -.. note:: + You can get the name to use for ```` by running ``spack arch --platform``. - Site-specific settings are located in configuration files under the - ``$(prefix)/etc/spack/`` directory. +Platform-specific configuration files can similarly be set up for any other scope by creating an ``include.yaml`` similar to the one above for ``defaults`` -- under the appropriate configuration paths (see :ref:`config-overrides`) and creating a subdirectory with the platform name that contains the configurations. .. _config-scope-precedence: ----------------- Scope Precedence ---------------- -When Spack queries for configuration parameters, it searches in -higher-precedence scopes first. So, settings in a higher-precedence file -can override those with the same key in a lower-precedence one. -For list-valued settings, Spack merges lists by *prepending* items from higher-precedence configurations -to items from lower-precedence configurations by default. -Completely ignoring lower-precedence configuration -options is supported with the ``::`` notation for keys (see -:ref:`config-overrides` below). +When Spack queries for configuration parameters, it searches in higher-precedence scopes first. +So, settings in a higher-precedence file can override those with the same key in a lower-precedence one. +For list-valued settings, Spack merges lists by *prepending* items from higher-precedence configurations to items from lower-precedence configurations by default. +Completely ignoring lower-precedence configuration options is supported with the ``::`` notation for keys (see :ref:`config-overrides` below). .. note:: - Settings in a scope take precedence over those provided in any included - configuration files (i.e., files listed in :ref:`include.yaml ` or - an ``include:`` section in ``spack.yaml``). + Settings in a scope take precedence over those provided in any included configuration files (i.e., files listed in :ref:`include.yaml ` or an ``include:`` section in ``spack.yaml``). There are also special notations for string concatenation and precedence override: -* ``+:`` will force *prepending* strings or lists. For lists, this is the default behavior. +* ``+:`` will force *prepending* strings or lists. + For lists, this is the default behavior. * ``-:`` works similarly, but for *appending* values. See :ref:`config-prepend-append` for more details. -^^^^^^^^^^^ Simple keys ^^^^^^^^^^^ -Let's look at an example of overriding a single key in a Spack configuration file. If -your configurations look like this: +Let's look at an example of overriding a single key in a Spack configuration file. +If your configurations look like this: .. code-block:: yaml - :caption: $(prefix)/etc/spack/defaults/config.yaml + :caption: ``$(prefix)/etc/spack/defaults/config.yaml`` + :name: code-example-defaults-config-yaml config: - install_tree: $spack/opt/spack + install_tree: + root: $spack/opt/spack build_stage: - - $tempdir/$user/spack-stage - - ~/.spack/stage + - $tempdir/$user/spack-stage + - ~/.spack/stage .. code-block:: yaml - :caption: ~/.spack/config.yaml + :caption: ``~/.spack/config.yaml`` + :name: code-example-user-config-yaml config: - install_tree: /some/other/directory + install_tree: + root: /some/other/directory -Spack will only override ``install_tree`` in the ``config`` section, and -will take the site preferences for other settings. You can see the -final, combined configuration with the ``spack config get `` -command: +Spack will only override ``install_tree`` in the ``config`` section, and will take the site preferences for other settings. +You can see the final, combined configuration with the ``spack config get `` command: .. code-block:: console :emphasize-lines: 3 $ spack config get config config: - install_tree: /some/other/directory + install_tree: + root: /some/other/directory build_stage: - - $tempdir/$user/spack-stage - - ~/.spack/stage + - $tempdir/$user/spack-stage + - ~/.spack/stage .. _config-prepend-append: -^^^^^^^^^^^^^^^^^^^^ String Concatenation ^^^^^^^^^^^^^^^^^^^^ -Above, the user ``config.yaml`` *completely* overrides specific settings in the -default ``config.yaml``. Sometimes, it is useful to add a suffix/prefix -to a path or name. To do this, you can use the ``-:`` notation for *append* -string concatenation at the end of a key in a configuration file. For example: +Above, the user ``config.yaml`` *completely* overrides specific settings in the default ``config.yaml``. +Sometimes, it is useful to add a suffix/prefix to a path or name. +To do this, you can use the ``-:`` notation for *append* string concatenation at the end of a key in a configuration file. +For example: .. code-block:: yaml :emphasize-lines: 1 - :caption: ~/.spack/config.yaml + :caption: ``~/.spack/config.yaml`` + :name: code-example-append-install-tree config: - install_tree-: /my/custom/suffix/ + install_tree: + root-: /my/custom/suffix/ -Spack will then append to the lower-precedence configuration under the -``install_tree-:`` section: +Spack will then append to the lower-precedence configuration under the ``root`` key: .. code-block:: console $ spack config get config config: - install_tree: /some/other/directory/my/custom/suffix + install_tree: + root: /some/other/directory/my/custom/suffix build_stage: - - $tempdir/$user/spack-stage - - ~/.spack/stage + - $tempdir/$user/spack-stage + - ~/.spack/stage Similarly, ``+:`` can be used to *prepend* to a path or name: .. code-block:: yaml :emphasize-lines: 1 - :caption: ~/.spack/config.yaml + :caption: ``~/.spack/config.yaml`` + :name: code-example-prepend-install-tree config: - install_tree+: /my/custom/suffix/ + install_tree: + root+: /my/custom/suffix/ .. _config-overrides: -^^^^^^^^^^^^^^^^^^^^^^^^^^ Overriding entire sections ^^^^^^^^^^^^^^^^^^^^^^^^^^ -Above, the user ``config.yaml`` only overrides specific settings in the -default ``config.yaml``. Sometimes, it is useful to *completely* -override lower-precedence settings. To do this, you can use *two* colons -at the end of a key in a configuration file. For example: +Above, the user ``config.yaml`` only overrides specific settings in the default ``config.yaml``. +Sometimes, it is useful to *completely* override lower-precedence settings. +To do this, you can use *two* colons at the end of a key in a configuration file. +For example: .. code-block:: yaml :emphasize-lines: 1 - :caption: ~/.spack/config.yaml + :caption: ``~/.spack/config.yaml`` + :name: code-example-override-config-section config:: - install_tree: /some/other/directory + install_tree: + root: /some/other/directory -Spack will ignore all lower-precedence configuration under the -``config::`` section: +Spack will ignore all lower-precedence configuration under the ``config::`` section: .. code-block:: console $ spack config get config config: - install_tree: /some/other/directory + install_tree: + root: /some/other/directory -^^^^^^^^^^^^^^^^^^^^ List-valued settings ^^^^^^^^^^^^^^^^^^^^ -Let's revisit the ``config.yaml`` example one more time. The -``build_stage`` setting's value is an ordered list of directories: +Let's revisit the ``config.yaml`` example one more time. +The ``build_stage`` setting's value is an ordered list of directories: .. code-block:: yaml - :caption: $(prefix)/etc/spack/defaults/config.yaml + :caption: ``$(prefix)/etc/spack/defaults/config.yaml`` + :name: code-example-defaults-build-stage - build_stage: + config: + build_stage: - $tempdir/$user/spack-stage - ~/.spack/stage -Suppose the user configuration adds its *own* list of ``build_stage`` -paths: +Suppose the user configuration adds its *own* list of ``build_stage`` paths: .. code-block:: yaml - :caption: ~/.spack/config.yaml + :caption: ``~/.spack/config.yaml`` + :name: code-example-user-build-stage - build_stage: + config: + build_stage: - /lustre-scratch/$user/spack - ~/mystage -Spack will first look at the paths in the defaults ``config.yaml``, then the -paths in the user's ``~/.spack/config.yaml``. The list in the -higher-precedence scope is *prepended* to the defaults. ``spack config -get config`` shows the result: +Spack will first look at the paths in the defaults ``config.yaml``, then the paths in the user's ``~/.spack/config.yaml``. +The list in the higher-precedence scope is *prepended* to the defaults. +``spack config get config`` shows the result: .. code-block:: console :emphasize-lines: 5-8 $ spack config get config config: - install_tree: /some/other/directory + install_tree: + root: /some/other/directory build_stage: - - /lustre-scratch/$user/spack - - ~/mystage - - $tempdir/$user/spack-stage - - ~/.spack/stage + - /lustre-scratch/$user/spack + - ~/mystage + - $tempdir/$user/spack-stage + - ~/.spack/stage -As in :ref:`config-overrides`, the higher-precedence scope can -*completely* override the lower-precedence scope using ``::``. So if the -user config looked like this: +As in :ref:`config-overrides`, the higher-precedence scope can *completely* override the lower-precedence scope using ``::``. +So if the user config looked like this: .. code-block:: yaml :emphasize-lines: 1 - :caption: ~/.spack/config.yaml + :caption: ``~/.spack/config.yaml`` + :name: code-example-override-build-stage - build_stage:: + config: + build_stage:: - /lustre-scratch/$user/spack - ~/mystage @@ -506,7 +480,8 @@ The merged configuration would look like this: $ spack config get config config: - install_tree: /some/other/directory + install_tree: + root: /some/other/directory build_stage: - /lustre-scratch/$user/spack - ~/mystage @@ -514,119 +489,96 @@ The merged configuration would look like this: .. _config-file-variables: ---------------------- Config File Variables --------------------- -Spack understands several variables which can be used in config file -paths wherever they appear. There are three sets of these variables: -Spack-specific variables, environment variables, and user path -variables. Spack-specific variables and environment variables are both -indicated by prefixing the variable name with ``$``. User path variables -are indicated at the start of the path with ``~`` or ``~user``. +Spack understands several variables which can be used in config file paths wherever they appear. +There are three sets of these variables: Spack-specific variables, environment variables, and user path variables. +Spack-specific variables and environment variables are both indicated by prefixing the variable name with ``$``. +User path variables are indicated at the start of the path with ``~`` or ``~user``. -^^^^^^^^^^^^^^^^^^^^^^^^ Spack-specific variables ^^^^^^^^^^^^^^^^^^^^^^^^ -Spack understands over a dozen special variables. These are: +Spack understands over a dozen special variables. +These are: * ``$env``: name of the currently active :ref:`environment ` * ``$spack``: path to the prefix of this Spack installation -* ``$tempdir``: default system temporary directory (as specified in - Python's `tempfile.tempdir - `_ - variable. +* ``$tempdir``: default system temporary directory (as specified in Python's `tempfile.tempdir `_ variable. * ``$user``: name of the current user -* ``$user_cache_path``: user cache directory (``~/.spack`` unless - :ref:`overridden `) -* ``$architecture``: the architecture triple of the current host, as - detected by Spack. +* ``$user_cache_path``: user cache directory (``~/.spack`` unless :ref:`overridden `) +* ``$architecture``: the architecture triple of the current host, as detected by Spack. * ``$arch``: alias for ``$architecture``. * ``$platform``: the platform of the current host, as detected by Spack. -* ``$operating_system``: the operating system of the current host, as - detected by the ``distro`` Python module. +* ``$operating_system``: the operating system of the current host, as detected by the ``distro`` Python module. * ``$os``: alias for ``$operating_system``. -* ``$target``: the ISA target for the current host, as detected by - ArchSpec. E.g. ``skylake`` or ``neoverse-n1``. -* ``$target_family``. The target family for the current host, as - detected by ArchSpec. E.g. ``x86_64`` or ``aarch64``. +* ``$target``: the ISA target for the current host, as detected by ArchSpec. + E.g. + ``skylake`` or ``neoverse-n1``. +* ``$target_family``. + The target family for the current host, as detected by ArchSpec. + E.g. + ``x86_64`` or ``aarch64``. * ``$date``: the current date in the format YYYY-MM-DD * ``$spack_short_version``: the Spack version truncated to the first components. -Note that, as with shell variables, you can write these as ``$varname`` -or with braces to distinguish the variable from surrounding characters: -``${varname}``. Their names are also case insensitive, meaning that -``$SPACK`` works just as well as ``$spack``. These special variables are -substituted first, so any environment variables with the same name will -not be used. +Note that, as with shell variables, you can write these as ``$varname`` or with braces to distinguish the variable from surrounding characters: ``${varname}``. +Their names are also case insensitive, meaning that ``$SPACK`` works just as well as ``$spack``. +These special variables are substituted first, so any environment variables with the same name will not be used. -^^^^^^^^^^^^^^^^^^^^^ Environment variables ^^^^^^^^^^^^^^^^^^^^^ -After Spack-specific variables are evaluated, environment variables are -expanded. These are formatted like Spack-specific variables, e.g., -``${varname}``. You can use this to insert environment variables in your -Spack configuration. +After Spack-specific variables are evaluated, environment variables are expanded. +These are formatted like Spack-specific variables, e.g., ``${varname}``. +You can use this to insert environment variables in your Spack configuration. -^^^^^^^^^^^^^^^^^^^^^ User home directories ^^^^^^^^^^^^^^^^^^^^^ -Spack performs Unix-style tilde expansion on paths in configuration -files. This means that tilde (``~``) will expand to the current user's -home directory, and ``~user`` will expand to a specified user's home -directory. The ``~`` must appear at the beginning of the path, or Spack -will not expand it. +Spack performs Unix-style tilde expansion on paths in configuration files. +This means that tilde (``~``) will expand to the current user's home directory, and ``~user`` will expand to a specified user's home directory. +The ``~`` must appear at the beginning of the path, or Spack will not expand it. .. _configuration_environment_variables: -------------------------- Environment Modifications ------------------------- -Spack allows users to prescribe custom environment modifications in a few places -within its configuration files. Every time these modifications are allowed, -they are specified as a dictionary, like in the following example: +Spack allows users to prescribe custom environment modifications in a few places within its configuration files. +Every time these modifications are allowed, they are specified as a dictionary, like in the following example: .. code-block:: yaml environment: set: - LICENSE_FILE: '/path/to/license' + LICENSE_FILE: "/path/to/license" unset: - CPATH - LIBRARY_PATH append_path: - PATH: '/new/bin/dir' + PATH: "/new/bin/dir" -The possible actions that are permitted are ``set``, ``unset``, ``append_path``, -``prepend_path``, and finally ``remove_path``. They all require a dictionary -of variable names mapped to the values used for the modification, -with the exception of ``unset``, which requires just a list of variable names. +The possible actions that are permitted are ``set``, ``unset``, ``append_path``, ``prepend_path``, and finally ``remove_path``. +They all require a dictionary of variable names mapped to the values used for the modification, with the exception of ``unset``, which requires just a list of variable names. No particular order is ensured for the execution of each of these modifications. ----------------------------- Seeing Spack's Configuration ---------------------------- -With so many scopes overriding each other, it can sometimes be difficult -to understand what Spack's final configuration looks like. +With so many scopes overriding each other, it can sometimes be difficult to understand what Spack's final configuration looks like. -Spack provides two useful ways to view the final "merged" version of any -configuration file: ``spack config get`` and ``spack config blame``. +Spack provides two useful ways to view the final "merged" version of any configuration file: ``spack config get`` and ``spack config blame``. .. _cmd-spack-config-get: -^^^^^^^^^^^^^^^^^^^^ ``spack config get`` ^^^^^^^^^^^^^^^^^^^^ -``spack config get`` shows a fully merged configuration file, taking into -account all scopes. For example, to see the fully merged -``config.yaml``, you can type: +``spack config get`` shows a fully merged configuration file, taking into account all scopes. +For example, to see the fully merged ``config.yaml``, you can type: .. code-block:: console @@ -637,7 +589,8 @@ account all scopes. For example, to see the fully merged verify_ssl: true dirty: false build_jobs: 8 - install_tree: $spack/opt/spack + install_tree: + root: $spack/opt/spack template_dirs: - $spack/templates directory_layout: {architecture}/{compiler.name}-{compiler.version}/{name}-{version}-{hash} @@ -655,8 +608,7 @@ Likewise, this will show the fully merged ``packages.yaml``: $ spack config get packages -You can use this in conjunction with the ``-C`` / ``--config-scope`` argument to -see how your scope will affect Spack's configuration: +You can use this in conjunction with the ``-C`` / ``--config-scope`` argument to see how your scope will affect Spack's configuration: .. code-block:: console @@ -665,14 +617,11 @@ see how your scope will affect Spack's configuration: .. _cmd-spack-config-blame: -^^^^^^^^^^^^^^^^^^^^^^ ``spack config blame`` ^^^^^^^^^^^^^^^^^^^^^^ -``spack config blame`` functions much like ``spack config get``, but it -shows exactly which configuration file each setting came from. If you -do not know why Spack is behaving a certain way, this command can help you track -down the source of the configuration: +``spack config blame`` functions much like ``spack config get``, but it shows exactly which configuration file each setting came from. +If you do not know why Spack is behaving a certain way, this command can help you track down the source of the configuration: .. code-block:: console @@ -696,43 +645,30 @@ down the source of the configuration: /home/myuser/spack/etc/spack/defaults/config.yaml:62 misc_cache: ~/.spack/cache /home/myuser/spack/etc/spack/defaults/config.yaml:86 locks: True -You can see above that the ``build_jobs`` and ``debug`` settings are -built-in and are not overridden by a configuration file. The -``verify_ssl`` setting comes from the ``--insecure`` option on the -command line. The ``dirty`` and ``install_tree`` settings come from the custom -scopes ``./my-scope`` and ``./my-scope-2``, and all other configuration -options come from the default configuration files that ship with Spack. +You can see above that the ``build_jobs`` and ``debug`` settings are built-in and are not overridden by a configuration file. +The ``verify_ssl`` setting comes from the ``--insecure`` option on the command line. +The ``dirty`` and ``install_tree`` settings come from the custom scopes ``./my-scope`` and ``./my-scope-2``, and all other configuration options come from the default configuration files that ship with Spack. .. _local-config-overrides: ------------------------------- Overriding Local Configuration ------------------------------ -Spack's ``system`` and ``user`` scopes provide ways for administrators and users to set -global defaults for all Spack instances, but for use cases where one wants a clean Spack -installation, these scopes can be undesirable. For example, users may want to opt out of -global system configuration, or they may want to ignore their own home directory -settings when running in a continuous integration environment. +Spack's ``system`` and ``user`` scopes provide ways for administrators and users to set global defaults for all Spack instances, but for use cases where one wants a clean Spack installation, these scopes can be undesirable. +For example, users may want to opt out of global system configuration, or they may want to ignore their own home directory settings when running in a continuous integration environment. -Spack also, by default, keeps various caches and user data in ``~/.spack``, but -users may want to override these locations. +Spack also, by default, keeps various caches and user data in ``~/.spack``, but users may want to override these locations. -Spack provides three environment variables that allow you to override or opt out of -configuration locations: +Spack provides three environment variables that allow you to override or opt out of configuration locations: -* ``SPACK_USER_CONFIG_PATH``: Override the path to use for the - ``user`` scope (``~/.spack`` by default). -* ``SPACK_SYSTEM_CONFIG_PATH``: Override the path to use for the - ``system`` scope (``/etc/spack`` by default). -* ``SPACK_DISABLE_LOCAL_CONFIG``: Set this environment variable to completely disable - **both** the system and user configuration directories. Spack will then only consider its - own defaults and ``site`` configuration locations. +* ``SPACK_USER_CONFIG_PATH``: Override the path to use for the ``user`` scope (``~/.spack`` by default). +* ``SPACK_SYSTEM_CONFIG_PATH``: Override the path to use for the ``system`` scope (``/etc/spack`` by default). +* ``SPACK_DISABLE_LOCAL_CONFIG``: Set this environment variable to completely disable **both** the system and user configuration directories. + Spack will then only consider its own defaults and ``site`` configuration locations. And one that allows you to move the default cache location: -* ``SPACK_USER_CACHE_PATH``: Override the default path to use for user data - (misc_cache, tests, reports, etc.) +* ``SPACK_USER_CACHE_PATH``: Override the default path to use for user data (misc_cache, tests, reports, etc.) With these settings, if you want to isolate Spack in a CI environment, you can do this: diff --git a/lib/spack/docs/configuring_compilers.rst b/lib/spack/docs/configuring_compilers.rst index 85d8164586e83b..973ec8211029a5 100644 --- a/lib/spack/docs/configuring_compilers.rst +++ b/lib/spack/docs/configuring_compilers.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,7 +9,6 @@ .. _compiler-config: -===================== Configuring Compilers ===================== @@ -23,13 +23,12 @@ For convenience, Spack will automatically detect compilers as externals the firs .. _cmd-spack-compilers: ------------------------ ``spack compiler list`` ----------------------- You can see which compilers are available to Spack by running ``spack compiler list``: -.. code-block:: console +.. code-block:: spec $ spack compiler list ==> Available compilers @@ -50,12 +49,11 @@ To see them you need a specific option: -- gcc ubuntu20.04-x86_64 --------------------------------------- - gcc@12.4.0 -Any of these compilers can be used to build Spack packages. +Any of these compilers can be used to build Spack packages. More details on how this is done can be found in :ref:`sec-specs`. .. _cmd-spack-compiler-find: ------------------------ ``spack compiler find`` ----------------------- @@ -65,8 +63,8 @@ If you do not see a compiler in the list shown by: $ spack compiler list -but you want to use it with Spack, you can simply run ``spack compiler find`` with the -path to where the compiler is installed. For example: +but you want to use it with Spack, you can simply run ``spack compiler find`` with the path to where the compiler is installed. +For example: .. code-block:: console @@ -76,10 +74,9 @@ path to where the compiler is installed. For example: ==> Compilers are defined in the following files: /home/user/.spack/packages.yaml -Or you can run ``spack compiler find`` with no arguments to force -auto-detection. This is useful if you do not know where compilers are -installed, but you know that new compilers have been added to your -``PATH``. For example, you might load a module, like this: +Or you can run ``spack compiler find`` with no arguments to force auto-detection. +This is useful if you do not know where compilers are installed, but you know that new compilers have been added to your ``PATH``. +For example, you might load a module, like this: .. code-block:: console @@ -88,24 +85,20 @@ installed, but you know that new compilers have been added to your ==> Added 1 new compiler to /home/user/.spack/packages.yaml gcc@4.9.0 -This loads the environment module for gcc-4.9.0 to add it to -``PATH``, and then it adds the compiler to Spack. +This loads the environment module for gcc-4.9.0 to add it to ``PATH``, and then it adds the compiler to Spack. .. note:: - By default, Spack does not fill in the ``modules:`` field in the - ``packages.yaml`` file. If you are using a compiler from a - module, then you should add this field manually. + By default, Spack does not fill in the ``modules:`` field in the ``packages.yaml`` file. + If you are using a compiler from a module, then you should add this field manually. See the section on :ref:`compilers-requiring-modules`. .. _cmd-spack-compiler-info: ------------------------ ``spack compiler info`` ----------------------- -If you want to see additional information about specific compilers, you can run -``spack compiler info``: +If you want to see additional information about specific compilers, you can run ``spack compiler info``: .. code-block:: console @@ -132,10 +125,9 @@ If you want to see additional information about specific compilers, you can run fortran: /usr/bin/gfortran-10 This shows the details of the compilers that were detected by Spack. -Notice also that we didn't have to be too specific about the version. We just said ``gcc``, and we got information -about all the matching compilers. +Notice also that we didn't have to be too specific about the version. +We just said ``gcc``, and we got information about all the matching compilers. ------------------------------------------- Manual configuration of external compilers ------------------------------------------ @@ -166,9 +158,10 @@ Each compiler has an "external" entry in the file with ``extra_attributes``: The compiler executables are listed under ``extra_attributes:compilers``, and are keyed by language. Once you save the file, the configured compilers will show up in the list displayed by ``spack compilers``. -You can also add compiler flags to manually configured compilers. These flags should be specified in the -``flags`` section of the compiler specification. The valid flags are ``cflags``, ``cxxflags``, ``fflags``, -``cppflags``, ``ldflags``, and ``ldlibs``. For example: +You can also add compiler flags to manually configured compilers. +These flags should be specified in the ``flags`` section of the compiler specification. +The valid flags are ``cflags``, ``cxxflags``, ``fflags``, ``cppflags``, ``ldflags``, and ``ldlibs``. +For example: .. code-block:: yaml @@ -187,18 +180,15 @@ You can also add compiler flags to manually configured compilers. These flags sh cxxflags: -O3 -fPIC cppflags: -O3 -fPIC -These flags will be treated by Spack as if they were entered from -the command line each time this compiler is used. The compiler wrappers -then inject those flags into the compiler command. Compiler flags -entered from the command line will be discussed in more detail in the -following section. +These flags will be treated by Spack as if they were entered from the command line each time this compiler is used. +The compiler wrappers then inject those flags into the compiler command. +Compiler flags entered from the command line will be discussed in more detail in the following section. Some compilers also require additional environment configuration. -Examples include Intel's oneAPI and AMD's AOCC compiler suites, -which have custom scripts for loading environment variables and setting paths. -These variables should be specified in the ``environment`` section of the compiler -specification. The operations available to modify the environment are ``set``, ``unset``, -``prepend_path``, ``append_path``, and ``remove_path``. For example: +Examples include Intel's oneAPI and AMD's AOCC compiler suites, which have custom scripts for loading environment variables and setting paths. +These variables should be specified in the ``environment`` section of the compiler specification. +The operations available to modify the environment are ``set``, ``unset``, ``prepend_path``, ``append_path``, and ``remove_path``. +For example: .. code-block:: yaml @@ -216,7 +206,7 @@ specification. The operations available to modify the environment are ``set``, ` set: MKL_ROOT: "/path/to/mkl/root" unset: # A list of environment variables to unset - - CC + - CC prepend_path: # Similar for append|remove_path LD_LIBRARY_PATH: /ld/paths/added/by/setvars/sh @@ -241,7 +231,6 @@ This is useful for forcing certain compilers to RPATH their own runtime librarie .. _compilers-requiring-modules: ---------------------------- Compilers Requiring Modules --------------------------- @@ -265,31 +254,50 @@ In such a case, you should tell Spack which module(s) to load in order to run th fortran: /opt/compilers/bin/gfortran-10 modules: [gcc/10.5.0] -Some compilers require special environment settings to be loaded not just -to run, but also to execute the code they build, breaking packages that -need to execute code they just compiled. If it's not possible or -practical to use a better compiler, you'll need to ensure that -environment settings are preserved for compilers like this (i.e., you'll -need to load the module or source the compiler's shell script). +Some compilers require special environment settings to be loaded not just to run, but also to execute the code they build, breaking packages that need to execute code they just compiled. +If it's not possible or practical to use a better compiler, you'll need to ensure that environment settings are preserved for compilers like this (i.e., you'll need to load the module or source the compiler's shell script). -By default, Spack tries to ensure that builds are reproducible by -cleaning the environment before building. If this interferes with your -compiler settings, you CAN use ``spack install --dirty`` as a workaround. +By default, Spack tries to ensure that builds are reproducible by cleaning the environment before building. +If this interferes with your compiler settings, you CAN use ``spack install --dirty`` as a workaround. Note that this MAY interfere with package builds. ------------------------ Build Your Own Compiler ----------------------- If you require a specific compiler and version, you can have Spack build it for you. For example: -.. code-block:: console +.. code-block:: spec $ spack install gcc@14+binutils Once the compiler is installed, you can start using it without additional configuration: -.. code-block:: console +.. code-block:: spec $ spack install hdf5~mpi %gcc@14 + +Mixing Compilers +---------------- + +For more options on configuring Spack to mix different compilers for different languages, see :ref:`the toolchains configuration docs `. + +To disable mixing (e.g. if you have multiple compilers defined, but want each concretized DAG to use one of them consistently), you can set: + +.. code-block:: yaml + + concretizer: + compiler_mixing: false + +This affects root specs and any (transitive) link or run dependencies. +Build-only dependencies are allowed to use different compilers (even when this is set). + +Some packages are difficult to build with high performance compilers, and it may be necessary to enable compiler mixing just for those packages. +To enable mixing for specific packages, specify an allow-list in the ``compiler_mixing`` config: + +.. code-block:: yaml + + concretizer: + compiler_mixing: ["openssl"] + +Adding ``openssl`` to the compiler mixing allow-list does not allow mixing for dependencies of ``openssl``. \ No newline at end of file diff --git a/lib/spack/docs/containers.rst b/lib/spack/docs/containers.rst index e16a6e968f27c1..6086fd8549c2a7 100644 --- a/lib/spack/docs/containers.rst +++ b/lib/spack/docs/containers.rst @@ -1,94 +1,249 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) .. meta:: :description lang=en: - Learn how to turn Spack environments into container images, either by copying existing installations or by generating recipes for Docker and Singularity. + Learn how to turn Spack packages and Spack environments into OCI-compatible container images, either by exporting existing installations or by generating recipes for Docker and Singularity. .. _containers: -================ Container Images ================ -Spack :ref:`environments` can easily be turned into container images. This page -outlines two ways in which this can be done: +Whether you want to share applications with others who do not use Spack, deploy on cloud services that run container images, or move workloads to HPC clusters, containers are an effective way to package and distribute software. + +Spack offers two fundamentally different paradigms for creating container images, each with distinct advantages. +You can either export software packages already built on your host system as a container image, or you can generate a traditional recipe file (``Dockerfile`` or Singularity Definition File) to build the software from scratch inside the container. + +.. list-table:: Comparison of Spack container image creation methods + :widths: 15 42 43 + :header-rows: 1 + + * - + - :ref:`Container Image Export ` + - :ref:`Recipe Generation ` + * - **Purpose** + - Exports existing installations from the host system as a container image + - Runs ``spack install`` to build software from source *inside* the container build process + * - **Spack Command** + - ``spack buildcache push`` + - ``spack containerize`` + * - **Reproducibility** + - Limited: depends on the host system + - High: controlled build environment + * - **Input** + - Installed Spack packages or environments + - A ``spack.yaml`` file + * - **Speed** + - Faster: copies existing binaries + - Slower: typically builds from source + * - **Troubleshooting** + - Build issues are resolved on the host, where debugging is simpler + - Build issues must be resolved inside the container build process + * - **Build Tools** + - None + - Docker, Podman, Singularity, or similar + * - **Privileges** + - None (rootless) + - May require elevated privileges, depending on the container build tool (root) + * - **Output destination** + - OCI-compatible registry + - Local Docker or Singularity image + + +.. _exporting-images: + +Exporting Spack installations as Container Images +------------------------------------------------- + +The command + +.. code-block:: text + + spack buildcache push [--base-image BASE_IMAGE] [--tag TAG] mirror [specs...] + +creates and pushes a container image to an OCI-compatible container registry, with the ``mirror`` argument specifying a registry (see below). + +Think of this command less as "building a container" and more as archiving a working software stack into a portable image. + +Container images created this way are **minimal**: they contain only runtime dependencies of the specified specs, the base image, and nothing else. +Spack itself is *not* included in the resulting image. + +The arguments are as follows: + +``--base-image BASE_IMAGE`` + Specifies the base image to use for the container. + This should be a minimal Linux distribution with a libc that is compatible with the host system. + For example, if your host system is Ubuntu 22.04, you can use ``ubuntu:22.04``, ``ubuntu:24.04``, or newer: the libc in the container image must be at least the version of the host system, assuming ABI compatibility. + It is also perfectly fine to use a completely different Linux distribution as long as the libc is compatible. + +``--tag TAG`` + Specifies a container image tag to use. + This tag is used for the image consisting of all specs specified in the command line together. + +``mirror`` argument + Either the name of a configured OCI registry image (in ``mirrors.yaml``), or a URL specifying the registry and image name. + + * When pushing to remote registries, you will typically :ref:`specify the name of a registry ` from your Spack configuration. + * When pushing to a local registry, you can simply specify a URL like ``oci+http://localhost:5000/[image]``, where ``[image]`` is the name of the image to create, and ``oci+http://`` indicates that the registry does not support HTTPS. + +``specs...`` arguments + is a list of Spack specs to include in the image. + These are packages that have already been installed by Spack. + When a Spack environment is activated, only the packages in the environment are included in the image. + If no specs are given, and a Spack environment is active, all packages in the environment are included. + +Spack publishes every individual dependency as a separate image layer, which allows for efficient storage and transfer of images with overlapping dependencies. -1. By installing the environment on the host system and copying the installations - into the container image. This approach does not require any tools like Docker - or Singularity to be installed. -2. By generating a Docker or Singularity recipe that can be used to build the - container image. In this approach, Spack builds the software inside the - container runtime, not on the host system. +.. note:: + The Docker ``overlayfs2`` storage driver is limited to 128 layers, above which a ``max depth exceeded`` error may be produced when pulling the image. + You can hit this limit when exporting container images from larger environments or packages with many dependencies. + There are `alternative drivers `_ to work around this limitation. + +The ``spack buildcache push --base-image ...`` command serves a **dual purpose**: + +1. It makes container images available for container runtimes like Docker and Podman. +2. It makes the *same* binaries available :ref:`as a build cache ` for ``spack install``. + +.. _configuring-container-registries: + +Container registries +^^^^^^^^^^^^^^^^^^^^ + +The ``spack buildcache push`` command exports container images directly to an OCI-compatible container registry, such as Docker Hub, GitHub Container Registry (GHCR), Amazon ECR, Google GCR, Azure ACR, or a private registry. + +These services require authentication, which is configured with the ``spack mirror add`` command: + +.. code-block:: spec -The first approach is easiest if you already have an installed environment, -the second approach gives more control over the container image. + $ spack mirror add \ + --oci-username-variable REGISTRY_USER \ + --oci-password-variable REGISTRY_TOKEN \ + example-registry \ + oci://example.com/name/image ---------------------------- -From Existing Installations ---------------------------- +This registers a mirror named ``example-registry`` in your ``mirrors.yaml`` configuration file that is associated with a container registry and image ``example.com/name/image``. +The registry can then be referred to by its name, e.g. ``spack buildcache push example-registry ...``. -If you already have a Spack environment installed on your system, you can -share the binaries as an OCI-compatible container image. To get started, you -just have to configure an OCI registry and run ``spack buildcache push``. +The ``oci://`` scheme in the URL indicates that this is an OCI-compatible registry with HTTPS support. +If you only specify ``oci://name/image``, Spack will assume the registry is hosted on Docker Hub. + +The ``--oci-username-variable`` and ``--oci-password-variable`` options specify the names of *environment variables* that will be used to authenticate with the registry. +Spack does not store your credentials in configuration files; it expects you to set the corresponding environment variables in your shell before running the ``spack buildcache push`` command: .. code-block:: console - # Create and install an environment in the current directory - spack env create -d . - spack -e . add pkg-a pkg-b - spack -e . install + $ REGISTRY_USER=user REGISTRY_TOKEN=token spack buildcache push ... - # Configure the registry - spack -e . mirror add --oci-username-variable REGISTRY_USER \ - --oci-password-variable REGISTRY_TOKEN \ - container-registry oci://example.com/name/image +.. seealso:: - # Push the image (do set REGISTRY_USER and REGISTRY_TOKEN) - spack -e . buildcache push --update-index --base-image ubuntu:22.04 --tag my_env container-registry + The registry password is typically a *personal access token* (PAT) generated on the registry website or a command line tool. + In the section :ref:`oci-authentication` we list specific examples for popular registries. -The resulting container image can then be run as follows: +If you don't have access to a remote registry, or wish to experiment with container images locally, you can run a *local registry* on your machine and let Spack push to it. +This is as simple as running the `official registry image `_ in the background: .. code-block:: console - $ docker run -it example.com/name/image:my_env + $ docker run -d -p 5000:5000 --name registry registry -The image generated by Spack consists of the specified base image with each package from the -environment as a separate layer on top. The image is minimal by construction, it only contains the -environment roots and its runtime dependencies. +In this case, it is not necessary to configure a named mirror, you can simply refer to it by URL using ``oci+http://localhost:5000/[image]``, where ``[image]`` is the name of the image to create, and ``oci+http://`` indicates that the registry does not support HTTPS. -.. note:: +.. _local-registry-example: - When using registries like GHCR and Docker Hub, the ``--oci-password`` flag specifies not - the password for your account but rather a personal access token that you need to generate separately. +Example 1: pushing selected specs as container images +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The specified ``--base-image`` should have a libc that is compatible with the host system. -For example, if your host system is Ubuntu 20.04, you can use ``ubuntu:20.04``, ``ubuntu:22.04``, -or newer: the libc in the container image must be at least the version of the host system, -assuming ABI compatibility. It is also perfectly fine to use a completely different -Linux distribution as long as the libc is compatible. +Assume we have ``python@3.13`` and ``cmake@3`` already installed by Spack, and we want to push them as a combined container image ``software_stack:latest`` to a local registry. -For convenience, Spack also turns the OCI registry into a :ref:`build cache `, -so that future ``spack install`` of the environment will simply pull the binaries from the -registry instead of doing source builds. The flag ``--update-index`` is needed to make Spack -take the build cache into account when concretizing. +First we verify that the specs are indeed installed: -.. note:: +.. code-block:: spec - When generating container images in CI, the approach above is recommended when CI jobs - already run in a sandboxed environment. You can simply use Spack directly - in the CI job and push the resulting image to a registry. Subsequent CI jobs should - run faster because Spack can install from the same registry instead of rebuilding from - sources. + $ spack find --long python@3.13 cmake@3 + + -- linux-ubuntu24.04-zen2 / %c,cxx=gcc@13.3.0 ------------------- + scpgv2h cmake@3.31.8 n54tvjw python@3.13.5 + +Since these are the only installations on our system, we can simply refer to them by their spec strings. +In case there are multiple installations, we could use ``python/n54tvjw`` and ``cmake/scpgv2h`` to uniquely refer to them by hashes. + +We now use ``spack buildcache push`` to publish these packages as a container image with ``ubuntu:24.04`` as a base image: + +.. code-block:: console + + $ spack buildcache push \ + --base-image ubuntu:24.04 \ + --tag latest \ + oci+http://localhost:5000/software_stack \ + python@3.13 cmake@3 + +They can now be pulled and run with Docker or any other OCI-compatible container runtime: + +.. code-block:: console + + $ docker run -it localhost:5000/software_stack:latest + root@container-id:/# python3 --version + Python 3.13.5 + root@container-id:/# cmake --version + cmake version 3.31.8 + +.. _installed-environments-as-containers: + +Example 2: pushing entire Spack environments as container images +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In this example we show how to export an installed :ref:`Spack environment ` as a container image and push it to a remote registry. + +.. code-block:: spec + + # Create and install an environment + $ spack env create . + $ spack -e . add python@3.13 cmake@3 + $ spack -e . install + + # Configure a remote registry + $ spack -e . mirror add \ + --oci-username-variable REGISTRY_USER \ + --oci-password-variable REGISTRY_TOKEN \ + container-registry \ + oci://example.com/name/image + + # Push the image + $ REGISTRY_USER=user REGISTRY_TOKEN=token \ + spack -e . buildcache push \ + --update-index \ + --base-image ubuntu:24.04 \ + --tag my_env \ + container-registry + +The resulting container image can then be run as follows: + +.. code-block:: console + + $ docker run -it example.com/name/image:my_env + root@container-id:/# python3 --version + Python 3.13.5 + root@container-id:/# cmake --version + cmake version 3.31.8 + +The advantage of using a Spack environment is that we do not have to specify the individual specs on the command line when pushing the image. +With environments, all root specs and their runtime dependencies are included in the container image. + +If you do specify specs in ``spack buildcache push`` with an environment active, only those matching specs from the environment are included in the image. + + +.. _generating-recipes: ---------------------------------------------- Generating recipes for Docker and Singularity --------------------------------------------- -Apart from copying existing installations into container images, Spack can also -generate recipes for container images. This is useful if you want to run Spack -itself in a sandboxed environment instead of on the host system. +Apart from exporting existing installations into container images, Spack can also generate recipes for container images. +This is useful if you want to run Spack itself in a sandboxed environment instead of on the host system. + +This approach requires you to have a container runtime like Docker or Singularity installed on your system, and can only be used using Spack environments. Since recipes need a little more boilerplate than: @@ -97,13 +252,13 @@ Since recipes need a little more boilerplate than: COPY spack.yaml /environment RUN spack -e /environment install -Spack provides a command to generate customizable recipes for container images. Customizations -include minimizing the size of the image, installing packages in the base image using the system -package manager, and setting up a proper entrypoint to run the image. +Spack provides a command to generate customizable recipes for container images. +Customizations include minimizing the size of the image, installing packages in the base image using the system package manager, and setting up a proper entrypoint to run the image. + +.. _cmd-spack-containerize: -~~~~~~~~~~~~~~~~~~~~ A Quick Introduction -~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^ Consider having a Spack environment like the following: @@ -114,60 +269,72 @@ Consider having a Spack environment like the following: - gromacs+mpi - mpich -Producing a ``Dockerfile`` from it is as simple as changing directories to -where the ``spack.yaml`` file is stored and running the following command: +Producing a ``Dockerfile`` from it is as simple as changing directories to where the ``spack.yaml`` file is stored and running the following command: .. code-block:: console $ spack containerize > Dockerfile -The ``Dockerfile`` that gets created uses multi-stage builds and -other techniques to minimize the size of the final image: +The ``Dockerfile`` that gets created uses multi-stage builds and other techniques to minimize the size of the final image: .. code-block:: docker # Build stage with Spack pre-installed and ready to be used - FROM spack/ubuntu-noble:latest as builder + FROM spack/ubuntu-jammy:develop AS builder + # What we want to install and how we want to install it # is specified in a manifest file (spack.yaml) - RUN mkdir /opt/spack-environment \ - && (echo "spack:" \ - && echo " specs:" \ - && echo " - gromacs+mpi" \ - && echo " - mpich" \ - && echo " concretizer:" \ - && echo " unify: true" \ - && echo " config:" \ - && echo " install_tree: /opt/software" \ - && echo " view: /opt/view") > /opt/spack-environment/spack.yaml + RUN mkdir -p /opt/spack-environment && \ + set -o noclobber \ + && (echo spack: \ + && echo ' specs:' \ + && echo ' - gromacs+mpi' \ + && echo ' - mpich' \ + && echo ' concretizer:' \ + && echo ' unify: true' \ + && echo ' config:' \ + && echo ' install_tree:' \ + && echo ' root: /opt/software' \ + && echo ' view: /opt/views/view') > /opt/spack-environment/spack.yaml # Install the software, remove unnecessary deps RUN cd /opt/spack-environment && spack env activate . && spack install --fail-fast && spack gc -y # Strip all the binaries - RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \ + RUN find -L /opt/views/view/* -type f -exec readlink -f '{}' \; | \ xargs file -i | \ grep 'charset=binary' | \ grep 'x-executable\|x-archive\|x-sharedlib' | \ - awk -F: '{print $1}' | xargs strip -s + awk -F: '{print $1}' | xargs strip # Modifications to the environment that are necessary to run RUN cd /opt/spack-environment && \ - spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh + spack env activate --sh -d . > activate.sh + # Bare OS image to run the installed executables - FROM ubuntu:18.04 + FROM ubuntu:22.04 COPY --from=builder /opt/spack-environment /opt/spack-environment COPY --from=builder /opt/software /opt/software - COPY --from=builder /opt/view /opt/view - COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh + COPY --from=builder /opt/views /opt/views - ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"] + RUN { \ + echo '#!/bin/sh' \ + && echo '.' /opt/spack-environment/activate.sh \ + && echo 'exec "$@"'; \ + } > /entrypoint.sh \ + && chmod a+x /entrypoint.sh \ + && ln -s /opt/views/view /opt/view -The image itself can then be built and run in the usual way with any of the -tools suitable for the task. For instance, if we decided to use Docker: + + ENTRYPOINT [ "/entrypoint.sh" ] + CMD [ "/bin/bash" ] + + +The image itself can then be built and run in the usual way with any of the tools suitable for the task. +For instance, if we decided to use Docker: .. code-block:: bash @@ -176,27 +343,24 @@ tools suitable for the task. For instance, if we decided to use Docker: [ ... ] $ docker run -it myimage -The various components involved in the generation of the recipe and their -configuration are discussed in detail in the sections below. +The various components involved in the generation of the recipe and their configuration are discussed in detail in the sections below. .. _container_spack_images: -~~~~~~~~~~~~~~~~~~~~~~~~~~ -Spack Images on Docker Hub -~~~~~~~~~~~~~~~~~~~~~~~~~~ +Official Container Images for Spack +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Container images with Spack preinstalled are available on `Docker Hub `_ and `GitHub Container Registry `_. +These images are based on popular distributions and are named accordingly (e.g. ``spack/ubuntu-noble`` for Spack on top of ``ubuntu:24.04``). -Docker images with Spack preinstalled and ready to be used are -built when a release is tagged, or nightly on ``develop``. The images -are then pushed both to `Docker Hub `_ -and to `GitHub Container Registry `_. -The OSes that are currently supported are summarized in the table below: +The table below summarizes the available base images and their corresponding Spack images: .. _containers-supported-os: -.. list-table:: Supported operating systems +.. list-table:: Supported base container images :header-rows: 1 - * - Operating System + * - Base Distribution - Base Image - Spack Image * - Ubuntu 20.04 @@ -236,29 +400,33 @@ The OSes that are currently supported are summarized in the table below: - ``fedora:40`` - ``spack/fedora40`` +All container images are tagged with the version of Spack they contain. +.. list-table:: Spack container image tags + :header-rows: 1 -All the images are tagged with the corresponding release of Spack: - -.. image:: images/ghcr_spack.png - -with the exception of the ``latest`` tag that points to the HEAD -of the ``develop`` branch. These images are available for anyone -to use and take care of all the repetitive tasks that are necessary -to set up Spack within a container. The container recipes generated -by Spack use them as default base images for their ``build`` stage, -even though options to use custom base images provided by users are -available to accommodate complex use cases. + * - Tag + - Meaning + * - ``:latest`` + - Latest *stable* release of Spack + * - ``:1`` + - Latest ``1.x.y`` release of Spack + * - ``:1.0`` + - Latest ``1.0.y`` release of Spack + * - ``:1.0.2`` + - Specific ``1.0.2`` release of Spack + * - ``:develop`` + - Latest *development* version of Spack + +These images are available for anyone to use and take care of all the repetitive tasks that are necessary to set up Spack within a container. +The container recipes generated by Spack use them as default base images for their ``build`` stage, even though options to use custom base images provided by users are available to accommodate complex use cases. -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Configuring the Container Recipe -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Any Spack environment can be used for the automatic generation of container -recipes. Sensible defaults are provided for things like the base image or the -version of Spack used in the image. -If finer tuning is needed, it can be obtained by adding the relevant metadata -under the ``container`` attribute of environments: +Any Spack environment can be used for the automatic generation of container recipes. +Sensible defaults are provided for things like the base image or the version of Spack used in the image. +If finer tuning is needed, it can be obtained by adding the relevant metadata under the ``container`` attribute of environments: .. code-block:: yaml @@ -273,7 +441,7 @@ under the ``container`` attribute of environments: format: docker # Sets the base images for the stages where Spack builds the - # software or where the software gets installed after being built.. + # software or where the software gets installed after being built. images: os: "almalinux:9" spack: develop @@ -293,29 +461,22 @@ under the ``container`` attribute of environments: A detailed description of the options available can be found in the :ref:`container_config_options` section. -~~~~~~~~~~~~~~~~~~~ Setting Base Images -~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^ -The ``images`` subsection is used to select both the image where -Spack builds the software and the image where the built software -is installed. This attribute can be set in different ways and -which one to use depends on the use case at hand. +The ``images`` subsection is used to select both the image where Spack builds the software and the image where the built software is installed. +This attribute can be set in different ways and which one to use depends on the use case at hand. -"""""""""""""""""""""""""""""""""""""""" Use Official Spack Images From Dockerhub """""""""""""""""""""""""""""""""""""""" -To generate a recipe that uses an official Docker image from the -Spack organization to build the software and the corresponding official OS image -to install the built software, all the user has to do is specify: +To generate a recipe that uses an official Docker image from the Spack organization to build the software and the corresponding official OS image to install the built software, all the user has to do is specify: 1. An operating system under ``images:os`` 2. A Spack version under ``images:spack`` -Any combination of these two values that can be mapped to one of the images -discussed in :ref:`container_spack_images` is allowed. For instance, the -following ``spack.yaml``: +Any combination of these two values that can be mapped to one of the images discussed in :ref:`container_spack_images` is allowed. +For instance, the following ``spack.yaml``: .. code-block:: yaml @@ -327,15 +488,15 @@ following ``spack.yaml``: container: images: os: almalinux:9 - spack: 0.22.0 + spack: "1.0" -uses ``spack/almalinux9:0.22.0`` and ``almalinux:9`` for the stages where the -software is respectively built and installed: +uses ``spack/almalinux9:1.0`` and ``almalinux:9`` for the stages where the software is respectively built and installed: .. code-block:: docker # Build stage with Spack pre-installed and ready to be used - FROM spack/almalinux9:0.22.0 AS builder + FROM spack/almalinux9:1.0 AS builder + # What we want to install and how we want to install it # is specified in a manifest file (spack.yaml) @@ -348,34 +509,43 @@ software is respectively built and installed: && echo ' concretizer:' \ && echo ' unify: true' \ && echo ' config:' \ - && echo ' install_tree: /opt/software' \ + && echo ' install_tree:' \ + && echo ' root: /opt/software' \ && echo ' view: /opt/views/view') > /opt/spack-environment/spack.yaml - [ ... ] + + # ... + # Bare OS image to run the installed executables FROM quay.io/almalinuxorg/almalinux:9 COPY --from=builder /opt/spack-environment /opt/spack-environment COPY --from=builder /opt/software /opt/software - COPY --from=builder /opt/view /opt/view - COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh + COPY --from=builder /opt/views /opt/views + + RUN { \ + echo '#!/bin/sh' \ + && echo '.' /opt/spack-environment/activate.sh \ + && echo 'exec "$@"'; \ + } > /entrypoint.sh \ + && chmod a+x /entrypoint.sh \ + && ln -s /opt/views/view /opt/view + - ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"] + ENTRYPOINT [ "/entrypoint.sh" ] + CMD [ "/bin/bash" ] + + + +This is the simplest available method of selecting base images, and we advise its use whenever possible. +There are cases, though, where using Spack official images is not enough to fit production needs. +In these situations, users can extend the recipe to start with the bootstrapping of Spack at a certain pinned version or manually select which base image to start from in the recipe, as we'll see next. -This is the simplest available method of selecting base images, and we advise -its use whenever possible. There are cases, though, where using Spack official -images is not enough to fit production needs. In these situations, users can -extend the recipe to start with the bootstrapping of Spack at a certain pinned -version or manually select which base image to start from in the recipe, -as we'll see next. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Use a Bootstrap Stage for Spack -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +""""""""""""""""""""""""""""""" -In some cases, users may want to pin the commit SHA that is used for Spack to ensure later -reproducibility or start from a fork of the official Spack repository to try a bugfix or -a feature in an early stage of development. This is possible by being just a little more -verbose when specifying information about Spack in the ``spack.yaml`` file: +In some cases, users may want to pin the commit SHA that is used for Spack to ensure later reproducibility or start from a fork of the official Spack repository to try a bugfix or a feature in an early stage of development. +This is possible by being just a little more verbose when specifying information about Spack in the ``spack.yaml`` file: .. code-block:: yaml @@ -391,36 +561,28 @@ verbose when specifying information about Spack in the ``spack.yaml`` file: resolve_sha: ``url`` specifies the URL from which to clone Spack and defaults to https://github.com/spack/spack. -The ``ref`` attribute can be either a commit SHA, a branch name, or a tag. The default value in -this case is to use the ``develop`` branch, but it may change in the future to point to the latest stable -release. Finally, ``resolve_sha`` transforms branch names or tags into the corresponding commit -SHAs at the time of recipe generation to allow for greater reproducibility of the results -at a later time. +The ``ref`` attribute can be either a commit SHA, a branch name, or a tag. +The default value in this case is to use the ``develop`` branch, but it may change in the future to point to the latest stable release. +Finally, ``resolve_sha`` transforms branch names or tags into the corresponding commit SHAs at the time of recipe generation to allow for greater reproducibility of the results at a later time. -The list of operating systems that can be used to bootstrap Spack can be -obtained with: +The list of operating systems that can be used to bootstrap Spack can be obtained with: .. command-output:: spack containerize --list-os .. note:: - The ``resolve_sha`` option uses ``git rev-parse`` under the hood and thus requires - checking out the corresponding Spack repository in a temporary folder before generating - the recipe. Recipe generation may take longer when this option is set to true because - of this additional step. + The ``resolve_sha`` option uses ``git rev-parse`` under the hood and thus requires checking out the corresponding Spack repository in a temporary folder before generating the recipe. + Recipe generation may take longer when this option is set to true because of this additional step. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Use Custom Images Provided by Users -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +""""""""""""""""""""""""""""""""""" -Consider, as an example, building a production-grade image for a CUDA -application. The best strategy would probably be to build on top of -images provided by the vendor and regard CUDA as an external package. +Consider, as an example, building a production-grade image for a CUDA application. +The best strategy would probably be to build on top of images provided by the vendor and regard CUDA as an external package. -Spack does not currently provide an official image with CUDA configured -this way, but users can build it on their own and then configure the -environment to explicitly pull it. This requires users to: +Spack does not currently provide an official image with CUDA configured this way, but users can build it on their own and then configure the environment to explicitly pull it. +This requires users to: 1. Specify the image used to build the software under ``images:build`` 2. Specify the image used to install the built software under ``images:final`` @@ -436,87 +598,96 @@ A ``spack.yaml`` like the following: - fftw precision=float packages: cuda: - buildable: False + buildable: false externals: - spec: cuda%gcc prefix: /usr/local/cuda container: images: - build: custom/cuda-10.1-ubuntu18.04:latest - final: nvidia/cuda:10.1-base-ubuntu18.04 + build: custom/cuda-13.0.1-ubuntu22.04:latest + final: nvidia/cuda:13.0.1-base-ubuntu22.04 produces, for instance, the following ``Dockerfile``: .. code-block:: docker # Build stage with Spack pre-installed and ready to be used - FROM custom/cuda-10.1-ubuntu18.04:latest as builder + FROM custom/cuda-13.0.1-ubuntu22.04:latest AS builder + # What we want to install and how we want to install it # is specified in a manifest file (spack.yaml) - RUN mkdir /opt/spack-environment \ - && (echo "spack:" \ - && echo " specs:" \ - && echo " - gromacs@2019.4+cuda build_type=Release" \ - && echo " - mpich" \ - && echo " - fftw precision=float" \ - && echo " packages:" \ - && echo " cuda:" \ - && echo " buildable: false" \ - && echo " externals:" \ - && echo " - spec: cuda%gcc" \ - && echo " prefix: /usr/local/cuda" \ - && echo " concretizer:" \ - && echo " unify: true" \ - && echo " config:" \ - && echo " install_tree: /opt/software" \ - && echo " view: /opt/view") > /opt/spack-environment/spack.yaml + RUN mkdir -p /opt/spack-environment && \ + set -o noclobber \ + && (echo spack: \ + && echo ' specs:' \ + && echo ' - gromacs@2019.4+cuda build_type=Release' \ + && echo ' - mpich' \ + && echo ' - fftw precision=float' \ + && echo ' packages:' \ + && echo ' cuda:' \ + && echo ' buildable: false' \ + && echo ' externals:' \ + && echo ' - spec: cuda%gcc' \ + && echo ' prefix: /usr/local/cuda' \ + && echo '' \ + && echo ' concretizer:' \ + && echo ' unify: true' \ + && echo ' config:' \ + && echo ' install_tree:' \ + && echo ' root: /opt/software' \ + && echo ' view: /opt/views/view') > /opt/spack-environment/spack.yaml # Install the software, remove unnecessary deps RUN cd /opt/spack-environment && spack env activate . && spack install --fail-fast && spack gc -y # Strip all the binaries - RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \ + RUN find -L /opt/views/view/* -type f -exec readlink -f '{}' \; | \ xargs file -i | \ grep 'charset=binary' | \ grep 'x-executable\|x-archive\|x-sharedlib' | \ - awk -F: '{print $1}' | xargs strip -s + awk -F: '{print $1}' | xargs strip # Modifications to the environment that are necessary to run RUN cd /opt/spack-environment && \ - spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh + spack env activate --sh -d . > activate.sh + # Bare OS image to run the installed executables - FROM nvidia/cuda:10.1-base-ubuntu18.04 + FROM nvidia/cuda:13.0.1-base-ubuntu22.04 COPY --from=builder /opt/spack-environment /opt/spack-environment COPY --from=builder /opt/software /opt/software - COPY --from=builder /opt/view /opt/view - COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh + COPY --from=builder /opt/views /opt/views + + RUN { \ + echo '#!/bin/sh' \ + && echo '.' /opt/spack-environment/activate.sh \ + && echo 'exec "$@"'; \ + } > /entrypoint.sh \ + && chmod a+x /entrypoint.sh \ + && ln -s /opt/views/view /opt/view + + + ENTRYPOINT [ "/entrypoint.sh" ] + CMD [ "/bin/bash" ] - ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l"] where the base images for both stages are completely custom. -This second mode of selection for base images is more flexible than just -choosing an operating system and a Spack version but is also more demanding. -Users may need to generate their base images themselves, and it's also their -responsibility to ensure that: +This second mode of selection for base images is more flexible than just choosing an operating system and a Spack version but is also more demanding. +Users may need to generate their base images themselves, and it's also their responsibility to ensure that: 1. Spack is available in the ``build`` stage and set up correctly to install the required software 2. The artifacts produced in the ``build`` stage can be executed in the ``final`` stage -Therefore, we do not recommend its use in cases that can be otherwise -covered by the simplified mode shown first. +Therefore, we do not recommend its use in cases that can be otherwise covered by the simplified mode shown first. -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Singularity Definition Files -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In addition to producing recipes in ``Dockerfile`` format, Spack can produce -Singularity Definition Files by just changing the value of the ``format`` -attribute: +In addition to producing recipes in ``Dockerfile`` format, Spack can produce Singularity Definition Files by just changing the value of the ``format`` attribute: .. code-block:: console @@ -530,21 +701,16 @@ attribute: $ spack containerize > hdf5.def $ sudo singularity build hdf5.sif hdf5.def -The minimum version of Singularity required to build a SIF (Singularity Image Format) -image from the recipes generated by Spack is ``3.5.3``. +The minimum version of Singularity required to build a SIF (Singularity Image Format) image from the recipes generated by Spack is ``3.5.3``. -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Extending the Jinja2 Templates -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``Dockerfile`` and the Singularity definition file that Spack can generate are based on -a few Jinja2 templates that are rendered according to the Spack environment being containerized. -Even though Spack allows a great deal of customization by just setting appropriate values for -the configuration options, sometimes that is not enough. +The ``Dockerfile`` and the Singularity definition file that Spack can generate are based on a few Jinja2 templates that are rendered according to the Spack environment being containerized. +Even though Spack allows a great deal of customization by just setting appropriate values for the configuration options, sometimes that is not enough. -In those cases, a user can directly extend the template that Spack uses to render the image -to, e.g., set additional environment variables or perform specific operations either before or -after a given stage of the build. Let's consider as an example the following structure: +In those cases, a user can directly extend the template that Spack uses to render the image to, e.g., set additional environment variables or perform specific operations either before or after a given stage of the build. +Let's consider as an example the following structure: .. code-block:: console @@ -558,9 +724,8 @@ after a given stage of the build. Let's consider as an example the following str └── container └── CustomDockerfile -containing both the custom template extension and the Spack environment manifest file. To use a custom -template, the Spack environment must register the directory containing it and declare its use under the -``container`` configuration: +containing both the custom template extension and the Spack environment manifest file. +To use a custom template, the Spack environment must register the directory containing it and declare its use under the ``container`` configuration: .. code-block:: yaml :emphasize-lines: 7-8,12 @@ -578,10 +743,10 @@ template, the Spack environment must register the directory containing it and de depfile: true template: container/CustomDockerfile -The template extension can override two blocks, named ``build_stage`` and ``final_stage``, similarly to -the example below: +The template extension can override two blocks, named ``build_stage`` and ``final_stage``, similarly to the example below: .. code-block:: text + :caption: /opt/environment/templates/container/CustomDockerfile :emphasize-lines: 3,8 {% extends "container/Dockerfile" %} @@ -604,64 +769,73 @@ Note that the Spack environment must be active for Spack to read the template. The recipe that gets generated contains the two extra instructions that we added in our template extension: .. code-block:: Dockerfile - :emphasize-lines: 4,43 + :emphasize-lines: 4,55 # Build stage with Spack pre-installed and ready to be used - FROM spack/ubuntu-jammy:latest as builder + FROM spack/ubuntu-jammy:develop AS builder RUN echo "Start building" # What we want to install and how we want to install it # is specified in a manifest file (spack.yaml) - RUN mkdir /opt/spack-environment \ - && (echo "spack:" \ - && echo " specs:" \ - && echo " - hdf5~mpi" \ - && echo " concretizer:" \ - && echo " unify: true" \ - && echo " config:" \ - && echo " template_dirs:" \ - && echo " - /tmp/environment/templates" \ - && echo " install_tree: /opt/software" \ - && echo " view: /opt/view") > /opt/spack-environment/spack.yaml + RUN mkdir -p /opt/spack-environment && \ + set -o noclobber \ + && (echo spack: \ + && echo ' specs:' \ + && echo ' - hdf5~mpi' \ + && echo ' concretizer:' \ + && echo ' unify: true' \ + && echo ' config:' \ + && echo ' template_dirs:' \ + && echo ' - /tmp/tmp.xvyLqAZpZg' \ + && echo ' install_tree:' \ + && echo ' root: /opt/software' \ + && echo ' view: /opt/views/view') > /opt/spack-environment/spack.yaml # Install the software, remove unnecessary deps RUN cd /opt/spack-environment && spack env activate . && spack concretize && spack env depfile -o Makefile && make -j $(nproc) && spack gc -y # Strip all the binaries - RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \ + RUN find -L /opt/views/view/* -type f -exec readlink -f '{}' \; | \ xargs file -i | \ grep 'charset=binary' | \ grep 'x-executable\|x-archive\|x-sharedlib' | \ - awk -F: '{print $1}' | xargs strip -s + awk -F: '{print $1}' | xargs strip # Modifications to the environment that are necessary to run RUN cd /opt/spack-environment && \ - spack env activate --sh -d . >> /etc/profile.d/z10_spack_environment.sh + spack env activate --sh -d . > activate.sh + + # Bare OS image to run the installed executables FROM ubuntu:22.04 COPY --from=builder /opt/spack-environment /opt/spack-environment COPY --from=builder /opt/software /opt/software - COPY --from=builder /opt/._view /opt/._view - COPY --from=builder /opt/view /opt/view - COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh + COPY --from=builder /opt/views /opt/views - COPY data /share/myapp/data + RUN { \ + echo '#!/bin/sh' \ + && echo '.' /opt/spack-environment/activate.sh \ + && echo 'exec "$@"'; \ + } > /entrypoint.sh \ + && chmod a+x /entrypoint.sh \ + && ln -s /opt/views/view /opt/view - ENTRYPOINT ["/bin/bash", "--rcfile", "/etc/profile", "-l", "-c", "$*", "--" ] + + + COPY data /share/myapp/data + ENTRYPOINT [ "/entrypoint.sh" ] CMD [ "/bin/bash" ] .. _container_config_options: -~~~~~~~~~~~~~~~~~~~~~~~ Configuration Reference -~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^ -The tables below describe all the configuration options that are currently supported -to customize the generation of container recipes: +The tables below describe all the configuration options that are currently supported to customize the generation of container recipes: .. list-table:: General configuration options for the ``container`` section of ``spack.yaml`` :header-rows: 1 @@ -755,28 +929,24 @@ to customize the generation of container recipes: - Description string - No -~~~~~~~~~~~~~~ Best Practices -~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^ -"""""" MPI """""" -Due to the dependency on Fortran for OpenMPI, which is the Spack default -implementation, consider adding ``gfortran`` to the ``apt-get install`` list. -Recent versions of OpenMPI will require you to pass ``--allow-run-as-root`` -to your ``mpirun`` calls if started as root user inside Docker. +Due to the dependency on Fortran for OpenMPI, which is the Spack default implementation, consider adding ``gfortran`` to the ``apt-get install`` list. -For execution on HPC clusters, it can be helpful to import the Docker -image into Singularity in order to start a program with an *external* -MPI. Otherwise, also add ``openssh-server`` to the ``apt-get install`` list. +Recent versions of OpenMPI will require you to pass ``--allow-run-as-root`` to your ``mpirun`` calls if started as root user inside Docker. + +For execution on HPC clusters, it can be helpful to import the Docker image into Singularity in order to start a program with an *external* MPI. +Otherwise, also add ``openssh-server`` to the ``apt-get install`` list. -"""""" CUDA """""" -Starting from CUDA 9.0, NVIDIA provides minimal CUDA images based on -Ubuntu. Please see `their instructions `_. + +Starting from CUDA 9.0, NVIDIA provides minimal CUDA images based on Ubuntu. +Please see `their instructions `_. Avoid double-installing CUDA by adding, e.g.: .. code-block:: yaml @@ -784,24 +954,20 @@ Avoid double-installing CUDA by adding, e.g.: packages: cuda: externals: - - spec: "cuda@9.0.176%gcc@5.4.0 arch=linux-ubuntu16-x86_64" + - spec: "cuda@9.0.176 arch=linux-ubuntu16-x86_64 %gcc@5.4.0" prefix: /usr/local/cuda - buildable: False + buildable: false to your ``spack.yaml``. -Users will either need ``nvidia-docker`` or, e.g., Singularity to *execute* -device kernels. +Users will either need ``nvidia-docker`` or, e.g., Singularity to *execute* device kernels. -""""""""""""""""""""""""""" Docker on Windows and macOS """"""""""""""""""""""""""" -On macOS and Windows, Docker runs on a hypervisor that is not allocated much -memory by default, and some Spack packages may fail to build due to lack of -memory. To work around this issue, consider configuring your Docker installation -to use more of your host memory. In some cases, you can also ease the memory -pressure on parallel builds by limiting the parallelism in your ``config.yaml``. +On macOS and Windows, Docker runs on a hypervisor that is not allocated much memory by default, and some Spack packages may fail to build due to lack of memory. +To work around this issue, consider configuring your Docker installation to use more of your host memory. +In some cases, you can also ease the memory pressure on parallel builds by limiting the parallelism in your ``config.yaml``. .. code-block:: yaml diff --git a/lib/spack/docs/contribution_guide.rst b/lib/spack/docs/contribution_guide.rst index 6ced415012a81d..fffacfd001ad9e 100644 --- a/lib/spack/docs/contribution_guide.rst +++ b/lib/spack/docs/contribution_guide.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,92 +9,73 @@ .. _contribution-guide: -================== Contribution Guide ================== -This guide is intended for developers or administrators who want to -contribute a new package, feature, or bug fix to Spack. +This guide is intended for developers or administrators who want to contribute a new package, feature, or bug fix to Spack. It assumes that you have at least some familiarity with Git and GitHub. -The guide will show a few examples of contributing workflows and discuss -the granularity of pull requests (PRs). It will also discuss the tests your -PR must pass in order to be accepted into Spack. +The guide will show a few examples of contributing workflows and discuss the granularity of pull requests (PRs). +It will also discuss the tests your PR must pass in order to be accepted into Spack. -First, what is a PR? Quoting `Bitbucket's tutorials `_: +First, what is a PR? +Quoting `Bitbucket's tutorials `_: - Pull requests are a mechanism for a developer to notify team members that - they have **completed a feature**. The pull request is more than just a - notification -- it's a dedicated forum for discussing the proposed feature. + Pull requests are a mechanism for a developer to notify team members that they have **completed a feature**. + The pull request is more than just a notification -- it's a dedicated forum for discussing the proposed feature. -Important is **completed feature**. The changes one proposes in a PR should -correspond to one feature, bug fix, extension, etc. One can create PRs with -changes relevant to different ideas; however, reviewing such PRs becomes tedious -and error-prone. If possible, try to follow the **one-PR-one-package/feature** rule. +Important is **completed feature**. +The changes one proposes in a PR should correspond to one feature, bug fix, extension, etc. +One can create PRs with changes relevant to different ideas; however, reviewing such PRs becomes tedious and error-prone. +If possible, try to follow the **one-PR-one-package/feature** rule. --------- Branches -------- -Spack's ``develop`` branch has the latest contributions. Nearly all pull -requests should start from ``develop`` and target ``develop``. +Spack's ``develop`` branch has the latest contributions. +Nearly all pull requests should start from ``develop`` and target ``develop``. -There is a branch for each major release series. Release branches -originate from ``develop`` and have tags for each point release in the -series. For example, ``releases/v0.14`` has tags for ``v0.14.0``, -``v0.14.1``, ``v0.14.2``, etc., versions of Spack. We backport important bug -fixes to these branches, but we do not advance the package versions or -make other changes that would change the way Spack concretizes -dependencies. Currently, the maintainers manage these branches by -cherry-picking from ``develop``. See :ref:`releases` for more -information. +There is a branch for each major release series. +Release branches originate from ``develop`` and have tags for each point release in the series. +For example, ``releases/v0.14`` has tags for ``v0.14.0``, ``v0.14.1``, ``v0.14.2``, etc., versions of Spack. +We backport important bug fixes to these branches, but we do not advance the package versions or make other changes that would change the way Spack concretizes dependencies. +Currently, the maintainers manage these branches by cherry-picking from ``develop``. +See :ref:`releases` for more information. ----------------------- Continuous Integration ---------------------- -Spack uses `GitHub Actions `_ for Continuous Integration -(CI) testing. This means that every time you submit a pull request, a series of tests will -be run to make sure you did not accidentally introduce any bugs into Spack. **Your PR -will not be accepted until it passes all of these tests.** While you can certainly wait -for the results of these tests after submitting a PR, we recommend that you run them -locally to speed up the review process. +Spack uses `GitHub Actions `_ for Continuous Integration (CI) testing. +This means that every time you submit a pull request, a series of tests will be run to make sure you did not accidentally introduce any bugs into Spack. +**Your PR will not be accepted until it passes all of these tests.** +While you can certainly wait for the results of these tests after submitting a PR, we recommend that you run them locally to speed up the review process. .. note:: Oftentimes, CI will fail for reasons other than a problem with your PR. - For example, ``apt-get``, ``pip``, or ``brew`` (Homebrew) might fail to download one of the - dependencies for the test suite, or a transient bug might cause the unit tests - to timeout. If any job fails, click the "Details" link and click on the test(s) - that is failing. If it does not look like it is failing for reasons related to - your PR, you have two options. If you have write permissions for the Spack - repository, you should see a "Restart workflow" button on the right-hand side. If - not, you can close and reopen your PR to rerun all of the tests. If the same - test keeps failing, there may be a problem with your PR. If you notice that - every recent PR is failing with the same error message, it may be that an issue - occurred with the CI infrastructure, or one of Spack's dependencies put out a - new release that is causing problems. If this is the case, please file an issue. - - -We currently test against Python 2.7 and 3.6-3.10 on both macOS and Linux and -perform three types of tests: + For example, ``apt-get``, ``pip``, or ``brew`` (Homebrew) might fail to download one of the dependencies for the test suite, or a transient bug might cause the unit tests to timeout. + If any job fails, click the "Details" link and click on the test(s) that is failing. + If it does not look like it is failing for reasons related to your PR, you have two options. + If you have write permissions for the Spack repository, you should see a "Restart workflow" button on the right-hand side. + If not, you can close and reopen your PR to rerun all of the tests. + If the same test keeps failing, there may be a problem with your PR. + If you notice that every recent PR is failing with the same error message, it may be that an issue occurred with the CI infrastructure, or one of Spack's dependencies put out a new release that is causing problems. + If this is the case, please file an issue. + + +We currently test against Python 3.6 and up on both macOS and Linux and perform three types of tests: .. _cmd-spack-unit-test: -^^^^^^^^^^ Unit Tests ^^^^^^^^^^ -Unit tests ensure that core Spack features like fetching or spec resolution are -working as expected. If your PR only adds new packages or modifies existing ones, -there's very little chance that your changes could cause the unit tests to fail. -However, if you make changes to Spack's core libraries, you should run the unit -tests to make sure you didn't break anything. +Unit tests ensure that core Spack features like fetching or spec resolution are working as expected. +If your PR only adds new packages or modifies existing ones, there's very little chance that your changes could cause the unit tests to fail. +However, if you make changes to Spack's core libraries, you should run the unit tests to make sure you didn't break anything. -Since they test things like fetching from VCS repos, the unit tests require -`git `_, `mercurial `_, -and `subversion `_ to run. Make sure these are -installed on your system and can be found in your ``PATH``. All of these can be -installed with Spack or with your system package manager. +Since they test things like fetching from VCS repos, the unit tests require `git `_, `mercurial `_, and `subversion `_ to run. +Make sure these are installed on your system and can be found in your ``PATH``. +All of these can be installed with Spack or with your system package manager. To run *all* of the unit tests, use: @@ -101,10 +83,9 @@ To run *all* of the unit tests, use: $ spack unit-test -These tests may take several minutes to complete. If you know you are -only modifying a single Spack feature, you can run subsets of tests at a -time. For example, this would run all the tests in -``lib/spack/spack/test/architecture.py``: +These tests may take several minutes to complete. +If you know you are only modifying a single Spack feature, you can run subsets of tests at a time. +For example, this would run all the tests in ``lib/spack/spack/test/architecture.py``: .. code-block:: console @@ -116,23 +97,17 @@ And this would run the ``test_platform`` test from that file: $ spack unit-test lib/spack/spack/test/architecture.py::test_platform -This allows you to develop iteratively: make a change, test that change, -make another change, test that change, etc. We use `pytest -`_ as our tests framework, and these types of -arguments are just passed to the ``pytest`` command underneath. See `the -pytest docs -`_ -for more details on test selection syntax. +This allows you to develop iteratively: make a change, test that change, make another change, test that change, etc. +We use `pytest `_ as our tests framework, and these types of arguments are just passed to the ``pytest`` command underneath. +See `the pytest docs `_ for more details on test selection syntax. -``spack unit-test`` has a few special options that can help you -understand what tests are available. To get a list of all available -unit test files, run: +``spack unit-test`` has a few special options that can help you understand what tests are available. +To get a list of all available unit test files, run: .. command-output:: spack unit-test --list :ellipsis: 5 -To see a more detailed list of available unit tests, use ``spack -unit-test --list-long``: +To see a more detailed list of available unit tests, use ``spack unit-test --list-long``: .. command-output:: spack unit-test --list-long :ellipsis: 10 @@ -142,60 +117,47 @@ And to see the fully qualified names of all tests, use ``--list-names``: .. command-output:: spack unit-test --list-names :ellipsis: 5 -You can combine these with ``pytest`` arguments to restrict which tests -you want to know about. For example, to see just the tests in -``architecture.py``: +You can combine these with ``pytest`` arguments to restrict which tests you want to know about. +For example, to see just the tests in ``architecture.py``: .. command-output:: spack unit-test --list-long lib/spack/spack/test/architecture.py -You can also combine any of these options with a ``pytest`` keyword -search. See the `pytest usage documentation -`_ -for more details on test selection syntax. For example, to see the names of all tests that have "spec" -or "concretize" somewhere in their names: +You can also combine any of these options with a ``pytest`` keyword search. +See the `pytest usage documentation `_ for more details on test selection syntax. +For example, to see the names of all tests that have "spec" or "concretize" somewhere in their names: .. command-output:: spack unit-test --list-names -k "spec and concretize" -By default, ``pytest`` captures the output of all unit tests, and it will -print any captured output for failed tests. Sometimes it is helpful to see -your output interactively while the tests run (e.g., if you add print -statements to unit tests). To see the output *live*, use the ``-s`` -argument to ``pytest``: +By default, ``pytest`` captures the output of all unit tests, and it will print any captured output for failed tests. +Sometimes it is helpful to see your output interactively while the tests run (e.g., if you add print statements to unit tests). +To see the output *live*, use the ``-s`` argument to ``pytest``: .. code-block:: console $ spack unit-test -s --list-long lib/spack/spack/test/architecture.py::test_platform -Unit tests are crucial to making sure bugs are not introduced into -Spack. If you are modifying core Spack libraries or adding new -functionality, please add new unit tests for your feature and consider -strengthening existing tests. You will likely be asked to do this if you -submit a pull request to the Spack project on GitHub. Check out the -`pytest documentation `_ and feel free to ask for guidance on -how to write tests! +Unit tests are crucial to making sure bugs are not introduced into Spack. +If you are modifying core Spack libraries or adding new functionality, please add new unit tests for your feature and consider strengthening existing tests. +You will likely be asked to do this if you submit a pull request to the Spack project on GitHub. +Check out the `pytest documentation `_ and feel free to ask for guidance on how to write tests! .. note:: - You may notice the ``share/spack/qa/run-unit-tests`` script in the - repository. This script is designed for CI. It runs the unit - tests and reports coverage statistics back to Codecov. If you want to - run the unit tests yourself, we suggest you use ``spack unit-test``. + You may notice the ``share/spack/qa/run-unit-tests`` script in the repository. + This script is designed for CI. + It runs the unit tests and reports coverage statistics back to Codecov. + If you want to run the unit tests yourself, we suggest you use ``spack unit-test``. -^^^^^^^^^^^^ Style Tests ^^^^^^^^^^^^ -Spack uses `Flake8 `_ to test for -`PEP 8 `_ conformance and -`mypy `_ for type checking. PEP 8 is -a series of style guides for Python that provide suggestions for everything -from variable naming to indentation. In order to limit the number of PRs that -were mostly style changes, we decided to enforce PEP 8 conformance. Your PR -needs to comply with PEP 8 in order to be accepted, and if it modifies the -Spack library, it needs to successfully type-check with mypy as well. +Spack uses `Flake8 `_ to test for `PEP 8 `_ conformance and `mypy `_ for type checking. +PEP 8 is a series of style guides for Python that provide suggestions for everything from variable naming to indentation. +In order to limit the number of PRs that were mostly style changes, we decided to enforce PEP 8 conformance. +Your PR needs to comply with PEP 8 in order to be accepted, and if it modifies the Spack library, it needs to successfully type-check with mypy as well. -Testing for compliance with Spack's style is easy. Simply run the ``spack style`` -command: +Testing for compliance with Spack's style is easy. +Simply run the ``spack style`` command: .. code-block:: console @@ -203,21 +165,16 @@ command: ``spack style`` has a couple advantages over running the tools by hand: -#. It only tests files that you have modified since branching off of - ``develop``. +#. It only tests files that you have modified since branching off of ``develop``. #. It works regardless of what directory you are in. -#. It automatically adds approved exemptions from the ``flake8`` - checks. For example, URLs are often longer than 80 characters, so we - exempt them from line length checks. We also exempt lines that start - with ``homepage =``, ``url =``, ``version()``, ``variant()``, ``depends_on()``, and - ``extends()`` in ``package.py`` files. This is now also possible when directly - running Flake8 if you can use the ``spack`` formatter plugin included with - Spack. +#. It automatically adds approved exemptions from the ``flake8`` checks. + For example, URLs are often longer than 80 characters, so we exempt them from line length checks. + We also exempt lines that start with ``homepage =``, ``url =``, ``version()``, ``variant()``, ``depends_on()``, and ``extends()`` in ``package.py`` files. + This is now also possible when directly running Flake8 if you can use the ``spack`` formatter plugin included with Spack. -More approved Flake8 exemptions can be found -`here `_. +More approved Flake8 exemptions can be found `here `_. If all is well, you'll see something like this: @@ -245,26 +202,20 @@ However, if you are not compliant with PEP 8, Flake8 will complain: var/spack/repos/spack_repo/builtin/packages/netcdf/package.py:106: [E501] line too long (92 > 79 characters) Flake8 found errors. -Most of the error messages are straightforward, but if you do not understand what -they mean, just ask questions about them when you submit your PR. The line numbers -will change if you add or delete lines, so simply run ``spack style`` again -to update them. +Most of the error messages are straightforward, but if you do not understand what they mean, just ask questions about them when you submit your PR. +The line numbers will change if you add or delete lines, so simply run ``spack style`` again to update them. .. tip:: - Try fixing Flake8 errors in reverse order. This eliminates the need for - multiple runs of ``spack style`` just to re-compute line numbers and - makes it much easier to fix errors directly off of the CI output. + Try fixing Flake8 errors in reverse order. + This eliminates the need for multiple runs of ``spack style`` just to re-compute line numbers and makes it much easier to fix errors directly off of the CI output. -^^^^^^^^^^^^^^^^^^^ Documentation Tests ^^^^^^^^^^^^^^^^^^^ -Spack uses `Sphinx `_ to build its -documentation. In order to prevent things like broken links and missing imports, -we added documentation tests that build the documentation and fail if there -are any warning or error messages. +Spack uses `Sphinx `_ to build its documentation. +In order to prevent things like broken links and missing imports, we added documentation tests that build the documentation and fail if there are any warning or error messages. Building the documentation requires several dependencies: @@ -285,16 +236,15 @@ All of these can be installed with Spack, e.g.: .. warning:: Sphinx has `several required dependencies `_. - If you are using a Python from Spack and you installed - ``py-sphinx`` and friends, you need to make them available to your - Python interpreter. The easiest way to do this is to run: + If you are using a Python from Spack and you installed ``py-sphinx`` and friends, you need to make them available to your Python interpreter. + The easiest way to do this is to run: .. code-block:: console $ spack load py-sphinx py-sphinx-rtd-theme py-sphinxcontrib-programoutput - so that all of the dependencies are added to ``PYTHONPATH``. If you see an error message - like: + so that all of the dependencies are added to ``PYTHONPATH``. + If you see an error message like: .. code-block:: console @@ -302,8 +252,7 @@ All of these can be installed with Spack, e.g.: Could not import extension sphinxcontrib.programoutput (exception: No module named sphinxcontrib.programoutput) make: *** [html] Error 1 - that means Sphinx could not find ``py-sphinxcontrib-programoutput`` in your - ``PYTHONPATH``. + that means Sphinx could not find ``py-sphinxcontrib-programoutput`` in your ``PYTHONPATH``. Once all of the dependencies are installed, you can try building the documentation: @@ -313,48 +262,40 @@ Once all of the dependencies are installed, you can try building the documentati $ make clean $ make -If you see any warning or error messages, you will have to correct those before your PR -is accepted. If you are editing the documentation, you should be running the -documentation tests to make sure there are no errors. Documentation changes can result -in some obfuscated warning messages. If you do not understand what they mean, feel free -to ask when you submit your PR. +If you see any warning or error messages, you will have to correct those before your PR is accepted. +If you are editing the documentation, you should be running the documentation tests to make sure there are no errors. +Documentation changes can result in some obfuscated warning messages. +If you do not understand what they mean, feel free to ask when you submit your PR. .. _spack-builders-and-pipelines: -^^^^^^^^^ GitLab CI ^^^^^^^^^ -"""""""""""""""""" Build Cache Stacks """""""""""""""""" -Spack welcomes the contribution of software stacks of interest to the community. These -stacks are used to test package recipes and generate publicly available build caches. +Spack welcomes the contribution of software stacks of interest to the community. +These stacks are used to test package recipes and generate publicly available build caches. Spack uses GitLab CI for managing the orchestration of build jobs. GitLab Entry Point ~~~~~~~~~~~~~~~~~~ -Add a stack entrypoint to ``share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml``. There -are two stages required for each new stack: the generation stage and the build stage. +Add a stack entrypoint to ``share/spack/gitlab/cloud_pipelines/.gitlab-ci.yml``. +There are two stages required for each new stack: the generation stage and the build stage. -The generate stage is defined using the job template ``.generate`` configured with -environment variables defining the name of the stack in ``SPACK_CI_STACK_NAME``, the -platform (``SPACK_TARGET_PLATFORM``) and architecture (``SPACK_TARGET_ARCH``) configuration, -and the tags associated with the class of runners to build on. +The generate stage is defined using the job template ``.generate`` configured with environment variables defining the name of the stack in ``SPACK_CI_STACK_NAME``, the platform (``SPACK_TARGET_PLATFORM``) and architecture (``SPACK_TARGET_ARCH``) configuration, and the tags associated with the class of runners to build on. .. note:: - The ``SPACK_CI_STACK_NAME`` must match the name of the directory containing the - stack's ``spack.yaml`` file. + The ``SPACK_CI_STACK_NAME`` must match the name of the directory containing the stack's ``spack.yaml`` file. .. note:: - The platform and architecture variables are specified in order to select the - correct configurations from the generic configurations used in Spack CI. The - configurations currently available are: + The platform and architecture variables are specified in order to select the correct configurations from the generic configurations used in Spack CI. + The configurations currently available are: * ``.cray_rhel_zen4`` * ``.cray_sles_zen4`` @@ -372,34 +313,33 @@ and the tags associated with the class of runners to build on. New configurations can be added to accommodate new platforms and architectures. -The build stage is defined as a trigger job that consumes the GitLab CI pipeline generated in -the generate stage for this stack. Build stage jobs use the ``.build`` job template, which -handles the basic configuration. +The build stage is defined as a trigger job that consumes the GitLab CI pipeline generated in the generate stage for this stack. +Build stage jobs use the ``.build`` job template, which handles the basic configuration. An example entry point for a new stack called ``my-super-cool-stack`` .. code-block:: yaml .my-super-cool-stack: - extends: [ ".linux_x86_64_v3" ] + extends: [".linux_x86_64_v3"] variables: SPACK_CI_STACK_NAME: my-super-cool-stack - tags: [ "all", "tags", "your", "job", "needs"] + tags: ["all", "tags", "your", "job", "needs"] my-super-cool-stack-generate: - extends: [ ".generate", ".my-super-cool-stack" ] + extends: [".generate", ".my-super-cool-stack"] image: my-super-cool-stack-image:0.0.1 my-super-cool-stack-build: - extends: [ ".build", ".my-super-cool-stack" ] + extends: [".build", ".my-super-cool-stack"] trigger: include: - - artifact: jobs_scratch_dir/cloud-ci-pipeline.yml - job: my-super-cool-stack-generate + - artifact: jobs_scratch_dir/cloud-ci-pipeline.yml + job: my-super-cool-stack-generate strategy: depend needs: - - artifacts: True - job: my-super-cool-stack-generate + - artifacts: true + job: my-super-cool-stack-generate Stack Configuration @@ -411,11 +351,9 @@ Stack configurations should be located in ``share/spack/gitlab/cloud_pipelines/s The ``ci`` section is generally used to define stack-specific mappings such as image or tags. For more information on what can go into the ``ci`` section, refer to the docs on pipelines. -The ``cdash`` section is used for defining where to upload the results of builds. Spack configures -most of the details for posting pipeline results to -`cdash.spack.io `_. The only -requirement in the stack configuration is to define a ``build-group`` that is unique; -this is usually the long name of the stack. +The ``cdash`` section is used for defining where to upload the results of builds. +Spack configures most of the details for posting pipeline results to `cdash.spack.io `_. +The only requirement in the stack configuration is to define a ``build-group`` that is unique; this is usually the long name of the stack. An example stack that builds ``zlib``. @@ -430,7 +368,7 @@ An example stack that builds ``zlib``. - zlib ci: - pipeline-gen + pipeline-gen: - build-job: image: my-super-cool-stack-image:0.0.1 @@ -443,57 +381,49 @@ An example stack that builds ``zlib``. When the images do not match, the build job may fail. -""""""""""""""""""" Registering Runners """"""""""""""""""" -Contributing computational resources to Spack's CI build farm is one way to help expand the -capabilities and offerings of the public Spack build caches. Currently, Spack utilizes Linux runners -from AWS, Google, and the University of Oregon (UO). +Contributing computational resources to Spack's CI build farm is one way to help expand the capabilities and offerings of the public Spack build caches. +Currently, Spack utilizes Linux runners from AWS, Google, and the University of Oregon (UO). + +Runners require four key pieces: -Runners require three key pieces: * Runner Registration Token * Accurate tags * OIDC Authentication script * GPG keys - -Minimum GitLab Runner Version: ``16.1.0`` -`Installation instructions `_ +Minimum GitLab Runner Version: ``16.1.0`` `Installation instructions `_ Registration Token ~~~~~~~~~~~~~~~~~~ -The first step to contribute new runners is to open an issue in the `Spack infrastructure `_ -project. This will be reported to the Spack infrastructure team, who will guide users through the process -of registering new runners for Spack CI. +The first step to contribute new runners is to open an issue in the `Spack infrastructure `_ project. +This will be reported to the Spack infrastructure team, who will guide users through the process of registering new runners for Spack CI. -The information needed to register a runner is the motivation for the new resources, a semi-detailed description of -the runner, and finally the point of contact for maintaining the software on the runner. +The information needed to register a runner is the motivation for the new resources, a semi-detailed description of the runner, and finally the point of contact for maintaining the software on the runner. -The point of contact will then work with the infrastructure team to obtain runner registration token(s) for interacting -with Spack's GitLab instance. Once the runner is active, this point of contact will also be responsible for updating the -GitLab runner software to keep pace with Spack's GitLab. +The point of contact will then work with the infrastructure team to obtain runner registration token(s) for interacting with Spack's GitLab instance. +Once the runner is active, this point of contact will also be responsible for updating the GitLab runner software to keep pace with Spack's GitLab. Tagging ~~~~~~~ -In the initial stages of runner registration, it is important to **exclude** the special tag ``spack``. This will prevent -the new runner(s) from being picked up for production CI jobs while it is configured and evaluated. Once it is determined -that the runner is ready for production use, the ``spack`` tag will be added. +In the initial stages of runner registration, it is important to **exclude** the special tag ``spack``. +This will prevent the new runner(s) from being picked up for production CI jobs while it is configured and evaluated. +Once it is determined that the runner is ready for production use, the ``spack`` tag will be added. Because GitLab has no concept of tag exclusion, runners that provide specialized resources also require specialized tags. -For example, a basic CPU-only x86_64 runner may have a tag ``x86_64`` associated with it. However, a runner containing a -CUDA-capable GPU may have the tag ``x86_64-cuda`` to denote that it should only be used for packages that will benefit from -a CUDA-capable resource. +For example, a basic CPU-only x86_64 runner may have a tag ``x86_64`` associated with it. +However, a runner containing a CUDA-capable GPU may have the tag ``x86_64-cuda`` to denote that it should only be used for packages that will benefit from a CUDA-capable resource. OIDC ~~~~ -Spack runners use OIDC authentication for connecting to the appropriate AWS bucket, -which is used for coordinating the communication of binaries between build jobs. In -order to configure OIDC authentication, Spack CI runners use a Python script with minimal -dependencies. This script can be configured for runners as seen here using the ``pre_build_script``. +Spack runners use OIDC authentication for connecting to the appropriate AWS bucket, which is used for coordinating the communication of binaries between build jobs. +In order to configure OIDC authentication, Spack CI runners use a Python script with minimal dependencies. +This script can be configured for runners as seen here using the ``pre_build_script``. .. code-block:: toml @@ -524,56 +454,46 @@ dependencies. This script can be configured for runners as seen here using the ` GPG Keys ~~~~~~~~ -Runners that may be utilized for ``protected`` CI require the registration of an intermediate signing key that -can be used to sign packages. For more information on package signing, read :ref:`key_architecture`. +Runners that may be utilized for ``protected`` CI require the registration of an intermediate signing key that can be used to sign packages. +For more information on package signing, read :ref:`key_architecture`. --------- Coverage -------- -Spack uses `Codecov `_ to generate and report unit test -coverage. This helps us tell what percentage of lines of code in Spack are -covered by unit tests. Although code covered by unit tests can still contain -bugs, it is much less error-prone than code that is not covered by unit tests. +Spack uses `Codecov `_ to generate and report unit test coverage. +This helps us tell what percentage of lines of code in Spack are covered by unit tests. +Although code covered by unit tests can still contain bugs, it is much less error-prone than code that is not covered by unit tests. -Codecov provides `browser extensions `_ -for Google Chrome and Firefox. These extensions integrate with GitHub -and allow you to see coverage line-by-line when viewing the Spack repository. -If you are new to Spack, a great way to get started is to write unit tests to -increase coverage! +Codecov provides `browser extensions `_ for Google Chrome and Firefox. +These extensions integrate with GitHub and allow you to see coverage line-by-line when viewing the Spack repository. +If you are new to Spack, a great way to get started is to write unit tests to increase coverage! -Unlike with CI on GitHub Actions, Codecov tests are not required to pass in order for your -PR to be merged. If you modify core Spack libraries, we would greatly -appreciate unit tests that cover these changed lines. Otherwise, we have no -way of knowing whether or not your changes introduce a bug. If you make -substantial changes to the core, we may request unit tests to increase coverage. +Unlike with CI on GitHub Actions, Codecov tests are not required to pass in order for your PR to be merged. +If you modify core Spack libraries, we would greatly appreciate unit tests that cover these changed lines. +Otherwise, we have no way of knowing whether or not your changes introduce a bug. +If you make substantial changes to the core, we may request unit tests to increase coverage. .. note:: - If the only files you modified are package files, we do not care about - coverage on your PR. You may notice that the Codecov tests fail even though - you did not modify any core files. This means that Spack's overall coverage - has increased since you branched off of ``develop``. This is a good thing! - If you really want to get the Codecov tests to pass, you can rebase off of - the latest ``develop``, but again, this is not required. + If the only files you modified are package files, we do not care about coverage on your PR. + You may notice that the Codecov tests fail even though you did not modify any core files. + This means that Spack's overall coverage has increased since you branched off of ``develop``. + This is a good thing! + If you really want to get the Codecov tests to pass, you can rebase off of the latest ``develop``, but again, this is not required. -------------- Git Workflows ------------- -Spack is still in the beta stages of development. Most of our users run off of -the ``develop`` branch, and fixes and new features are constantly being merged. So, -how do you keep up-to-date with upstream while maintaining your own local -differences and contributing PRs to Spack? +Spack is still in the beta stages of development. +Most of our users run off of the ``develop`` branch, and fixes and new features are constantly being merged. +So, how do you keep up-to-date with upstream while maintaining your own local differences and contributing PRs to Spack? -^^^^^^^^^ Branching ^^^^^^^^^ -The easiest way to contribute a pull request is to make all of your changes on -new branches. Make sure your ``develop`` branch is up-to-date and create a new branch -off of it: +The easiest way to contribute a pull request is to make all of your changes on new branches. +Make sure your ``develop`` branch is up-to-date and create a new branch off of it: .. code-block:: console @@ -582,15 +502,12 @@ off of it: $ git branch $ git checkout -Here we assume that the local ``develop`` branch tracks the upstream ``develop`` -branch of Spack. This is not a requirement, and you could also do the same with -remote branches. But for some, it is more convenient to have a local branch that -tracks upstream. +Here we assume that the local ``develop`` branch tracks the upstream ``develop`` branch of Spack. +This is not a requirement, and you could also do the same with remote branches. +But for some, it is more convenient to have a local branch that tracks upstream. -Normally, we prefer that commits pertaining to a package ```` have -a message in the format ``: descriptive message``. It is important to add a -descriptive message so that others who might be looking at your changes later -(in a year or maybe two) can understand the rationale behind them. +Normally, we prefer that commits pertaining to a package ```` have a message in the format ``: descriptive message``. +It is important to add a descriptive message so that others who might be looking at your changes later (in a year or maybe two) can understand the rationale behind them. Now, you can make your changes while keeping the ``develop`` branch clean. Edit a few files and commit them by running: @@ -606,14 +523,11 @@ Next, push it to your remote fork and create a PR: $ git push origin --set-upstream -GitHub provides a `tutorial `_ -on how to file a pull request. When you send the request, make ``develop`` the -destination branch. +GitHub provides a `tutorial `_ on how to file a pull request. +When you send the request, make ``develop`` the destination branch. -If you need this change immediately and do not have time to wait for your PR to -be merged, you can always work on this branch. But if you have multiple PRs, -another option is to maintain a "Frankenstein" branch that combines all of your -other branches: +If you need this change immediately and do not have time to wait for your PR to be merged, you can always work on this branch. +But if you have multiple PRs, another option is to maintain a "Frankenstein" branch that combines all of your other branches: .. code-block:: console @@ -622,16 +536,14 @@ other branches: $ git checkout $ git merge -This can be done with each new PR you submit. Just make sure to keep this local -branch up-to-date with the upstream ``develop`` branch too. +This can be done with each new PR you submit. +Just make sure to keep this local branch up-to-date with the upstream ``develop`` branch too. -^^^^^^^^^^^^^^ Cherry-Picking ^^^^^^^^^^^^^^ -What if you made some changes to your local modified ``develop`` branch and already -committed them, but later decided to contribute them to Spack? You can use -cherry-picking to create a new branch with only these commits. +What if you made some changes to your local modified ``develop`` branch and already committed them, but later decided to contribute them to Spack? +You can use cherry-picking to create a new branch with only these commits. First, check out your local modified ``develop`` branch: @@ -645,8 +557,7 @@ Now, get the hashes of the commits you want from the output of ``git log``: $ git log -Next, create a new branch off of the upstream ``develop`` branch and copy the commits -that you want in your PR: +Next, create a new branch off of the upstream ``develop`` branch and copy the commits that you want in your PR: .. code-block:: console @@ -657,34 +568,29 @@ that you want in your PR: $ git cherry-pick $ git push origin --set-upstream -Now you can create a PR from the web interface of GitHub. The net result is as -follows: +Now you can create a PR from the web interface of GitHub. +The net result is as follows: #. You patched your local version of Spack and can use it further. -#. You "cherry-picked" these changes into a standalone branch and submitted it - as a PR upstream. +#. You "cherry-picked" these changes into a standalone branch and submitted it as a PR upstream. -Should you have several commits to contribute, you could follow the same -procedure by getting hashes of all of them and cherry-picking them to the PR branch. +Should you have several commits to contribute, you could follow the same procedure by getting hashes of all of them and cherry-picking them to the PR branch. .. note:: - It is important that whenever you change something that might be of - importance upstream, create a pull request as soon as possible. Do not wait - for weeks or months to do this, because: + It is important that whenever you change something that might be of importance upstream, create a pull request as soon as possible. + Do not wait for weeks or months to do this, because: #. you might forget why you modified certain files. #. it could get difficult to isolate this change into a standalone, clean PR. -^^^^^^^^ Rebasing ^^^^^^^^ -Other developers are constantly making contributions to Spack, possibly on the -same files that your PR changed. If their PR is merged before yours, it can -create a merge conflict. This means that your PR can no longer be automatically -merged without a chance of breaking your changes. In this case, you will be -asked to rebase on top of the latest upstream ``develop`` branch. +Other developers are constantly making contributions to Spack, possibly on the same files that your PR changed. +If their PR is merged before yours, it can create a merge conflict. +This means that your PR can no longer be automatically merged without a chance of breaking your changes. +In this case, you will be asked to rebase on top of the latest upstream ``develop`` branch. First, make sure your ``develop`` branch is up-to-date: @@ -693,16 +599,16 @@ First, make sure your ``develop`` branch is up-to-date: $ git checkout develop $ git pull upstream develop -Now, we need to switch to the branch you submitted for your PR and rebase it -on top of ``develop``: +Now, we need to switch to the branch you submitted for your PR and rebase it on top of ``develop``: .. code-block:: console $ git checkout $ git rebase develop -Git will likely ask you to resolve conflicts. Edit the file that it says cannot -be merged automatically and resolve the conflict. Then, run: +Git will likely ask you to resolve conflicts. +Edit the file that it says cannot be merged automatically and resolve the conflict. +Then, run: .. code-block:: console @@ -716,12 +622,11 @@ Once this is done, simply force push your rebased branch to your remote fork: $ git push --force origin -^^^^^^^^^^^^^^^^^^^^^^^^^ Rebasing with cherry-pick ^^^^^^^^^^^^^^^^^^^^^^^^^ -You can also perform a rebase using ``cherry-pick``. First, create a temporary -backup branch: +You can also perform a rebase using ``cherry-pick``. +First, create a temporary backup branch: .. code-block:: console @@ -736,8 +641,7 @@ Now, look at the logs and save the hashes of any commits you would like to keep: $ git log Next, go back to the original branch and reset it to ``develop``. -Before doing so, make sure that your local ``develop`` branch is up-to-date -with upstream: +Before doing so, make sure that your local ``develop`` branch is up-to-date with upstream: .. code-block:: console @@ -765,27 +669,24 @@ If everything looks good, delete the backup branch: $ git branch --delete --force tmp -^^^^^^^^^^^^^^^^^^ Re-writing History ^^^^^^^^^^^^^^^^^^ -Sometimes you may end up on a branch that has diverged so much from ``develop`` -that it cannot easily be rebased. If the current commit history is more of -an experimental nature and only the net result is important, you may rewrite -the history. +Sometimes you may end up on a branch that has diverged so much from ``develop`` that it cannot easily be rebased. +If the current commit history is more of an experimental nature and only the net result is important, you may rewrite the history. -First, merge upstream ``develop`` and reset your branch to it. On the branch -in question, run: +First, merge upstream ``develop`` and reset your branch to it. +On the branch in question, run: .. code-block:: console $ git merge develop $ git reset develop -At this point, your branch will point to the same commit as ``develop``, and -thereby the two are indistinguishable. However, all the files that were -previously modified will stay as such. In other words, you do not lose the -changes you made. Changes can be reviewed by looking at diffs: +At this point, your branch will point to the same commit as ``develop``, and thereby the two are indistinguishable. +However, all the files that were previously modified will stay as such. +In other words, you do not lose the changes you made. +Changes can be reviewed by looking at diffs: .. code-block:: console @@ -799,8 +700,7 @@ The next step is to rewrite the history by adding files and creating commits: $ git add $ git commit --message -After all changed files are committed, you can push the branch to your fork -and create a PR: +After all changed files are committed, you can push the branch to your fork and create a PR: .. code-block:: console diff --git a/lib/spack/docs/developer_guide.rst b/lib/spack/docs/developer_guide.rst index 3a9e47b3a65e0e..0a7b8bba74751b 100644 --- a/lib/spack/docs/developer_guide.rst +++ b/lib/spack/docs/developer_guide.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,7 +9,6 @@ .. _developer_guide: -=============== Developer Guide =============== @@ -17,72 +17,46 @@ If you just want to develop packages, see the :doc:`Packaging Guide ` sections and that you are familiar with the concepts discussed there. --------- Overview -------- Spack is designed with three separate roles in mind: -#. **Users**, who need to install software *without* knowing all the - details about how it is built. -#. **Packagers**, who know how a particular software package is - built and encode this information in package files. -#. **Developers**, who work on Spack, add new features, and try to - make the jobs of packagers and users easier. - -Users could be end-users installing software in their home directory -or administrators installing software to a shared directory on a -shared machine. Packagers could be administrators who want to -automate software builds or application developers who want to make -their software more accessible to users. - -As you might expect, there are many types of users with different -levels of sophistication, and Spack is designed to accommodate both -simple and complex use cases for packages. A user who only knows that -they need a certain package should be able to type something simple, -like ``spack install ``, and get the package that they -want. If a user wants to ask for a specific version, use particular -compilers, or build several versions with different configurations, -then that should be possible with a minimal amount of additional -specification. +#. **Users**, who need to install software *without* knowing all the details about how it is built. +#. **Packagers**, who know how a particular software package is built and encode this information in package files. +#. **Developers**, who work on Spack, add new features, and try to make the jobs of packagers and users easier. + +Users could be end-users installing software in their home directory or administrators installing software to a shared directory on a shared machine. +Packagers could be administrators who want to automate software builds or application developers who want to make their software more accessible to users. + +As you might expect, there are many types of users with different levels of sophistication, and Spack is designed to accommodate both simple and complex use cases for packages. +A user who only knows that they need a certain package should be able to type something simple, like ``spack install ``, and get the package that they want. +If a user wants to ask for a specific version, use particular compilers, or build several versions with different configurations, then that should be possible with a minimal amount of additional specification. This gets us to the two key concepts in Spack's software design: #. **Specs**: expressions for describing builds of software, and -#. **Packages**: Python modules that build software according to a - spec. - -A package is a template for building particular software, and a spec -is a descriptor for one or more instances of that template. Users -express the configuration they want using a spec, and a package turns -the spec into a complete build. - -The obvious difficulty with this design is that users underspecify -what they want. To build a software package, the package object needs -a *complete* specification. In Spack, if a spec describes only one -instance of a package, then we say it is **concrete**. If a spec -could describe many instances (i.e., it is underspecified in one way -or another), then we say it is **abstract**. - -Spack's job is to take an *abstract* spec from the user, find a -*concrete* spec that satisfies the constraints, and hand the task of -building the software off to the package object. - -Packages are managed through Spack's **package repositories**, which allow -packages to be stored in multiple repositories with different namespaces. -The built-in packages are hosted in a separate Git repository and -automatically managed by Spack, while custom repositories can be added -for organization-specific or experimental packages. - -The rest of this document describes all the pieces that come together to make that -happen. +#. **Packages**: Python modules that build software according to a spec. + +A package is a template for building particular software, and a spec is a descriptor for one or more instances of that template. +Users express the configuration they want using a spec, and a package turns the spec into a complete build. + +The obvious difficulty with this design is that users underspecify what they want. +To build a software package, the package object needs a *complete* specification. +In Spack, if a spec describes only one instance of a package, then we say it is **concrete**. +If a spec could describe many instances (i.e., it is underspecified in one way or another), then we say it is **abstract**. + +Spack's job is to take an *abstract* spec from the user, find a *concrete* spec that satisfies the constraints, and hand the task of building the software off to the package object. + +Packages are managed through Spack's **package repositories**, which allow packages to be stored in multiple repositories with different namespaces. +The built-in packages are hosted in a separate Git repository and automatically managed by Spack, while custom repositories can be added for organization-specific or experimental packages. + +The rest of this document describes all the pieces that come together to make that happen. -------------------- Directory Structure ------------------- -So that you can familiarize yourself with the project, we will start -with a high-level view of Spack's directory structure: +So that you can familiarize yourself with the project, we will start with a high-level view of Spack's directory structure: .. code-block:: none @@ -123,80 +97,59 @@ with a high-level view of Spack's directory structure: test/ <- unit test modules util/ <- common code -Spack is designed so that it could live within a `standard UNIX -directory hierarchy `_, so ``lib``, -``var``, and ``opt`` all contain a ``spack`` subdirectory in case -Spack is installed alongside other software. Most of the interesting -parts of Spack live in ``lib/spack``. +Spack is designed so that it could live within a `standard UNIX directory hierarchy `_, so ``lib``, ``var``, and ``opt`` all contain a ``spack`` subdirectory in case Spack is installed alongside other software. +Most of the interesting parts of Spack live in ``lib/spack``. .. note:: - **Package Repositories**: Built-in packages are hosted - in a separate Git repository at `spack/spack-packages `_ - and are automatically cloned to ``~/.spack/package_repos/`` when needed. + **Package Repositories**: Built-in packages are hosted in a separate Git repository at `spack/spack-packages `_ and are automatically cloned to ``~/.spack/package_repos/`` when needed. The ``var/spack/test_repos/`` directory is used for unit tests only. See :ref:`repositories` for details on package repositories. Spack has *one* directory layout, and there is no installation process. -Most Python programs do not look like this (they use ``distutils``, ``setup.py``, -etc.), but we wanted to make Spack *very* easy to use. The simple layout -spares users from the need to install Spack into a Python environment. -Many users do not have write access to a Python installation, and installing -an entire new instance of Python to bootstrap Spack would be very complicated. -Users should not have to install a big, complicated package to -use the thing that is supposed to spare them from the details of big, -complicated packages. The end result is that Spack works out of the -box: clone it and add ``bin`` to your ``PATH``, and you are ready to go. +Most Python programs do not look like this (they use ``distutils``, ``setup.py``, etc.), but we wanted to make Spack *very* easy to use. +The simple layout spares users from the need to install Spack into a Python environment. +Many users do not have write access to a Python installation, and installing an entire new instance of Python to bootstrap Spack would be very complicated. +Users should not have to install a big, complicated package to use the thing that is supposed to spare them from the details of big, complicated packages. +The end result is that Spack works out of the box: clone it and add ``bin`` to your ``PATH``, and you are ready to go. --------------- Code Structure -------------- -This section gives an overview of the various Python modules in Spack, -grouped by functionality. +This section gives an overview of the various Python modules in Spack, grouped by functionality. -^^^^^^^^^^^^^^^^^^^^^^^ Package-related modules ^^^^^^^^^^^^^^^^^^^^^^^ :mod:`spack.package_base` - Contains the :class:`~spack.package_base.PackageBase` class, which - is the superclass for all packages in Spack. + Contains the :class:`~spack.package_base.PackageBase` class, which is the superclass for all packages in Spack. :mod:`spack.util.naming` - Contains functions for mapping between Spack package names, - Python module names, and Python class names. + Contains functions for mapping between Spack package names, Python module names, and Python class names. :mod:`spack.directives` - *Directives* are functions that can be called inside a package definition - to modify the package, like :func:`~spack.directives.depends_on` - and :func:`~spack.directives.provides`. See :ref:`dependencies` - and :ref:`virtual-dependencies`. + *Directives* are functions that can be called inside a package definition to modify the package, like :func:`~spack.directives.depends_on` and :func:`~spack.directives.provides`. + See :ref:`dependencies` and :ref:`virtual-dependencies`. :mod:`spack.multimethod` - Implementation of the :func:`@when ` - decorator, which allows :ref:`multimethods ` in - packages. + Implementation of the :func:`@when ` decorator, which allows :ref:`multimethods ` in packages. -^^^^^^^^^^^^^^^^^^^^ Spec-related modules ^^^^^^^^^^^^^^^^^^^^ :mod:`spack.spec` - Contains :class:`~spack.spec.Spec`. Also implements most of the logic for concretization - of specs. + Contains :class:`~spack.spec.Spec`. + Also implements most of the logic for concretization of specs. :mod:`spack.spec_parser` Contains :class:`~spack.spec_parser.SpecParser` and functions related to parsing specs. :mod:`spack.version` - Implements a simple :class:`~spack.version.Version` class with simple - comparison semantics. It also implements :class:`~spack.version.VersionRange` - and :class:`~spack.version.VersionList`. All three are comparable with each - other and offer union and intersection operations. Spack uses these classes - to compare versions and to manage version constraints on specs. Comparison - semantics are similar to the ``LooseVersion`` class in ``distutils`` and to - the way RPM compares version strings. + Implements a simple :class:`~spack.version.Version` class with simple comparison semantics. + It also implements :class:`~spack.version.VersionRange` and :class:`~spack.version.VersionList`. + All three are comparable with each other and offer union and intersection operations. + Spack uses these classes to compare versions and to manage version constraints on specs. + Comparison semantics are similar to the ``LooseVersion`` class in ``distutils`` and to the way RPM compares version strings. :mod:`spack.compilers` Submodules contains descriptors for all valid compilers in Spack. @@ -204,11 +157,9 @@ Spec-related modules .. warning:: - Not yet implemented. Currently has two compiler descriptions, - but compilers aren't fully integrated with the build process - yet. + Not yet implemented. + Currently has two compiler descriptions, but compilers aren't fully integrated with the build process yet. -^^^^^^^^^^^^^^^^^ Build environment ^^^^^^^^^^^^^^^^^ @@ -216,57 +167,47 @@ Build environment Handles creating temporary directories for builds. :mod:`spack.build_environment` - This contains utility functions used by the compiler wrapper script, - ``cc``. + This contains utility functions used by the compiler wrapper script, ``cc``. :mod:`spack.directory_layout` Classes that control the way an installation directory is laid out. - Create more implementations of this to change the hierarchy and - naming scheme in ``$spack_prefix/opt`` + Create more implementations of this to change the hierarchy and naming scheme in ``$spack_prefix/opt`` -^^^^^^^^^^^^^^^^^ Spack Subcommands ^^^^^^^^^^^^^^^^^ :mod:`spack.cmd` - Each module in this package implements a Spack subcommand. See - :ref:`writing commands ` for details. + Each module in this package implements a Spack subcommand. + See :ref:`writing commands ` for details. -^^^^^^^^^^ Unit tests ^^^^^^^^^^ ``spack.test`` - Implements Spack's test suite. Add a module and put its name in - the test suite in ``__init__.py`` to add more unit tests. + Implements Spack's test suite. + Add a module and put its name in the test suite in ``__init__.py`` to add more unit tests. -^^^^^^^^^^^^^ Other Modules ^^^^^^^^^^^^^ :mod:`spack.url` - URL parsing, for deducing names and versions of packages from - tarball URLs. + URL parsing, for deducing names and versions of packages from tarball URLs. :mod:`spack.error` - :class:`~spack.error.SpackError`, the base class for - Spack's exception hierarchy. + :class:`~spack.error.SpackError`, the base class for Spack's exception hierarchy. :mod:`spack.llnl.util.tty` - Basic output functions for all of the messages Spack writes to the - terminal. + Basic output functions for all of the messages Spack writes to the terminal. :mod:`spack.llnl.util.tty.color` Implements a color formatting syntax used by ``spack.tty``. :mod:`spack.llnl.util` - In this package are a number of utility modules for the rest of - Spack. + In this package are a number of utility modules for the rest of Spack. .. _package-repositories: -^^^^^^^^^^^^^^^^^^^^ Package Repositories ^^^^^^^^^^^^^^^^^^^^ @@ -274,13 +215,11 @@ Spack's package repositories allow developers to manage packages from multiple s Understanding this system is important for developing Spack itself. :mod:`spack.repo` - The core module for managing package repositories. Contains the ``Repo`` and ``RepoPath`` - classes that handle loading and searching packages from multiple repositories. + The core module for managing package repositories. + Contains the ``Repo`` and ``RepoPath`` classes that handle loading and searching packages from multiple repositories. -Built-in packages are stored in a separate Git repository (`spack/spack-packages -`_) rather than being included directly in -the Spack source tree. This repository is automatically cloned to ``~/.spack/package_repos/`` -when needed. +Built-in packages are stored in a separate Git repository (`spack/spack-packages `_) rather than being included directly in the Spack source tree. +This repository is automatically cloned to ``~/.spack/package_repos/`` when needed. Key concepts: @@ -293,43 +232,34 @@ See :ref:`repositories` for complete details on configuring and managing package .. _package_class_structure: --------------------------- Package class architecture -------------------------- .. note:: - This section aims to provide a high-level knowledge of how the package class architecture evolved - in Spack, and provides some insights on the current design. + This section aims to provide a high-level knowledge of how the package class architecture evolved in Spack, and provides some insights on the current design. -Packages in Spack were originally designed to support only a single build system. The overall -class structure for a package looked like: +Packages in Spack were originally designed to support only a single build system. +The overall class structure for a package looked like: .. image:: images/original_package_architecture.png :scale: 60 % :align: center -In this architecture the base class ``AutotoolsPackage`` was responsible for both the metadata -related to the ``autotools`` build system (e.g. dependencies or variants common to all packages -using it), and for encoding the default installation procedure. +In this architecture the base class ``AutotoolsPackage`` was responsible for both the metadata related to the ``autotools`` build system (e.g. dependencies or variants common to all packages using it), and for encoding the default installation procedure. -In reality, a non-negligible number of packages are either changing their build system during the evolution of the -project, or using different build systems for different platforms. An architecture based on a single class -requires hacks or other workarounds to deal with these cases. +In reality, a non-negligible number of packages are either changing their build system during the evolution of the project, or using different build systems for different platforms. +An architecture based on a single class requires hacks or other workarounds to deal with these cases. -To support a model more adherent to reality, Spack v0.19 changed its internal design by extracting -the attributes and methods related to building a software into a separate hierarchy: +To support a model more adherent to reality, Spack v0.19 changed its internal design by extracting the attributes and methods related to building a software into a separate hierarchy: .. image:: images/builder_package_architecture.png :scale: 60 % :align: center -In this new format each ``package.py`` contains one ``*Package`` class that gathers all the metadata, -and one or more ``*Builder`` classes that encode the installation procedure. A specific builder object -is created just before the software is built, so at a time where Spack knows which build system needs -to be used for the current installation, and receives a ``package`` object during initialization. +In this new format each ``package.py`` contains one ``*Package`` class that gathers all the metadata, and one or more ``*Builder`` classes that encode the installation procedure. +A specific builder object is created just before the software is built, so at a time where Spack knows which build system needs to be used for the current installation, and receives a ``package`` object during initialization. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Compatibility with single-class format ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -339,88 +269,69 @@ The builders are created in the ``spack.builder.create`` function. .. literalinclude:: _spack_root/lib/spack/spack/builder.py :pyobject: create -To achieve backward compatibility with the single-class format Spack creates in this function a special -"adapter builder", if no custom builder is detected in the recipe: +To achieve backward compatibility with the single-class format Spack creates in this function a special "adapter builder", if no custom builder is detected in the recipe: .. image:: images/adapter.png :scale: 60 % :align: center -Overall the role of the adapter is to route access to attributes of methods first through the ``*Package`` -hierarchy, and then back to the base class builder. This is schematically shown in the diagram above, where -the adapter role is to "emulate" a method resolution order like the one represented by the red arrows. +Overall the role of the adapter is to route access to attributes of methods first through the ``*Package`` hierarchy, and then back to the base class builder. +This is schematically shown in the diagram above, where the adapter role is to "emulate" a method resolution order like the one represented by the red arrows. .. _writing-commands: ----------------- Writing commands ---------------- -Adding a new command to Spack is easy. Simply add a ``.py`` file to -``lib/spack/spack/cmd/``, where ```` is the name of the subcommand. +Adding a new command to Spack is easy. +Simply add a ``.py`` file to ``lib/spack/spack/cmd/``, where ```` is the name of the subcommand. At a bare minimum, two functions are required in this file: -^^^^^^^^^^^^^^^^^^ ``setup_parser()`` ^^^^^^^^^^^^^^^^^^ -Unless your command does not accept any arguments, a ``setup_parser()`` -function is required to define what arguments and flags your command takes. -See the `Argparse documentation `_ -for more details on how to add arguments. +Unless your command does not accept any arguments, a ``setup_parser()`` function is required to define what arguments and flags your command takes. +See the `Argparse documentation `_ for more details on how to add arguments. -Some commands have a set of subcommands, like ``spack compiler find`` or -``spack module lmod refresh``. You can add subparsers to your parser to handle -this. Check out ``spack edit --command compiler`` for an example of this. +Some commands have a set of subcommands, like ``spack compiler find`` or ``spack module lmod refresh``. +You can add subparsers to your parser to handle this. +Check out ``spack edit --command compiler`` for an example of this. -Many commands take the same arguments and flags. These arguments should -be defined in ``lib/spack/spack/cmd/common/arguments.py`` so that they do not -need to be redefined in multiple commands. +Many commands take the same arguments and flags. +These arguments should be defined in ``lib/spack/spack/cmd/common/arguments.py`` so that they do not need to be redefined in multiple commands. -^^^^^^^^^^^^ ``()`` ^^^^^^^^^^^^ -In order to run your command, Spack searches for a function with the same -name as your command in ``.py``. This is the main method for your -command and can call other helper methods to handle common tasks. +In order to run your command, Spack searches for a function with the same name as your command in ``.py``. +This is the main method for your command and can call other helper methods to handle common tasks. -Remember, before adding a new command, think to yourself whether or not this -new command is actually necessary. Sometimes, the functionality you desire -can be added to an existing command. Also, remember to add unit tests for -your command. If it is not used very frequently, changes to the rest of -Spack can cause your command to break without sufficient unit tests to -prevent this from happening. +Remember, before adding a new command, think to yourself whether or not this new command is actually necessary. +Sometimes, the functionality you desire can be added to an existing command. +Also, remember to add unit tests for your command. +If it is not used very frequently, changes to the rest of Spack can cause your command to break without sufficient unit tests to prevent this from happening. -Whenever you add/remove/rename a command or flags for an existing command, -make sure to update Spack's `Bash tab completion script -`_. +Whenever you add/remove/rename a command or flags for an existing command, make sure to update Spack's `Bash tab completion script `_. -------------- Writing Hooks ------------- -A hook is a callback that makes it easy to design functions that run -for different events. We do this by defining hook types and then -inserting them at different places in the Spack codebase. Whenever a hook -type triggers by way of a function call, we find all the hooks of that type -and run them. +A hook is a callback that makes it easy to design functions that run for different events. +We do this by defining hook types and then inserting them at different places in the Spack codebase. +Whenever a hook type triggers by way of a function call, we find all the hooks of that type and run them. Spack defines hooks by way of a module in the ``lib/spack/spack/hooks`` directory. This module has to be registered in ``lib/spack/spack/hooks/__init__.py`` so that Spack is aware of it. This section will cover the basic kind of hooks and how to write them. -^^^^^^^^^^^^^^ Types of Hooks ^^^^^^^^^^^^^^ -The following hooks are currently implemented to make it easy for you, -the developer, to add hooks at different stages of a Spack install or similar. +The following hooks are currently implemented to make it easy for you, the developer, to add hooks at different stages of a Spack install or similar. If there is a hook that you would like and it is missing, you can propose to add a new one. -""""""""""""""""""""" ``pre_install(spec)`` """"""""""""""""""""" @@ -428,45 +339,39 @@ A ``pre_install`` hook is run within the install subprocess, directly before the It expects a single argument of a spec. -""""""""""""""""""""""""""""""""""""" ``post_install(spec, explicit=None)`` """"""""""""""""""""""""""""""""""""" -A ``post_install`` hook is run within the install subprocess, directly after the installation finishes, -but before the build stage is removed and the spec is registered in the database. It expects two -arguments: the spec and an optional boolean indicating whether this spec is being installed explicitly. +A ``post_install`` hook is run within the install subprocess, directly after the installation finishes, but before the build stage is removed and the spec is registered in the database. +It expects two arguments: the spec and an optional boolean indicating whether this spec is being installed explicitly. -"""""""""""""""""""""""""""""""""""""""""""""""""""" ``pre_uninstall(spec)`` and ``post_uninstall(spec)`` """""""""""""""""""""""""""""""""""""""""""""""""""" These hooks are currently used for cleaning up module files after uninstall. -^^^^^^^^^^^^^^^^^^^^^^ Adding a New Hook Type ^^^^^^^^^^^^^^^^^^^^^^ -Adding a new hook type is very simple! In ``lib/spack/spack/hooks/__init__.py``, -you can simply create a new ``HookRunner`` that is named to match your new hook. -For example, let's say you want to add a new hook called ``post_log_write`` -to trigger after anything is written to a logger. You would add it as follows: +Adding a new hook type is very simple! +In ``lib/spack/spack/hooks/__init__.py``, you can simply create a new ``HookRunner`` that is named to match your new hook. +For example, let's say you want to add a new hook called ``post_log_write`` to trigger after anything is written to a logger. +You would add it as follows: .. code-block:: python # pre/post install and run by the install subprocess - pre_install = HookRunner('pre_install') - post_install = HookRunner('post_install') + pre_install = HookRunner("pre_install") + post_install = HookRunner("post_install") # hooks related to logging - post_log_write = HookRunner('post_log_write') # <- here is my new hook! + post_log_write = HookRunner("post_log_write") # <- here is my new hook! -You then need to decide what arguments your hook would expect. Since this is -related to logging, let's say that you want a message and level. That means -that when you add a Python file to the ``lib/spack/spack/hooks`` -folder with one or more callbacks intended to be triggered by this hook, you might -use your new hook as follows: +You then need to decide what arguments your hook would expect. +Since this is related to logging, let's say that you want a message and level. +That means that when you add a Python file to the ``lib/spack/spack/hooks`` folder with one or more callbacks intended to be triggered by this hook, you might use your new hook as follows: .. code-block:: python @@ -474,7 +379,7 @@ use your new hook as follows: """Do something custom with the message and level every time we write to the log """ - print('running post_log_write!') + print("running post_log_write!") To use the hook, we would call it as follows somewhere in the logic to do logging. @@ -488,32 +393,102 @@ In this example, we use it outside of a logger that is already defined: spack.hooks.post_log_write(message, logger.level) -This is not to say that this would be the best way to implement an integration -with the logger (you would probably want to write a custom logger, or you could -have the hook defined within the logger), but it serves as an example of writing a hook. +This is not to say that this would be the best way to implement an integration with the logger (you would probably want to write a custom logger, or you could have the hook defined within the logger), but it serves as an example of writing a hook. ----------- Unit tests ---------- ------------- Unit testing ------------ ---------------------- +Debugging Unit Tests in CI +-------------------------- + +Spack runs its CI for unit tests via Github Actions from the Spack repo. +The unit tests are run for each platform Spack supports, Windows, Linux, and MacOS. +It may be the case that a unit test fails or passes on just one of these platforms. +When the platform is one the PR author does not have access to, it can be difficult to reproduce, diagnose, and fix a CI failure. +Thankfully, PR authors can take advantage of a Github Actions Action to gain temporary access to the failing platform from the context of their PRs. +Simply copy the following Github actions yaml stanza into `the GHA workflow file `__ in the `steps` section of whatever unit test needs debugging. + +.. code-block:: yaml + + - name: Setup tmate session + uses: mxschmitt/action-tmate@c0afd6f790e3a5564914980036ebf83216678101 + +Ideally this would be inserted somewhere after GHA checks out Spack and does any setup, but before the unit tests themselves are run. +You can of course put this stanza after the unit-tests, but then you'll be stuck waiting for the unit tests to complete (potentially up to ~30m) and will need to add additional logic to the yaml in the case where the unit tests fail. + +For example, if you were to add this step to the Linux unit test CI, it would look something like: + +.. code-block:: yaml + + - name: Bootstrap clingo + if: ${{ matrix.concretizer == 'clingo' }} + env: + SPACK_PYTHON: python + run: | + . share/spack/setup-env.sh + spack bootstrap disable spack-install + spack bootstrap now + spack -v solve zlib + - name: Setup tmate session + uses: mxschmitt/action-tmate@c0afd6f790e3a5564914980036ebf83216678101 + - name: Run unit tests + env: + SPACK_PYTHON: python + SPACK_TEST_PARALLEL: 4 + COVERAGE: true + COVERAGE_FILE: coverage/.coverage-${{ matrix.os }}-python${{ matrix.python-version }} + UNIT_TEST_COVERAGE: ${{ matrix.python-version == '3.14' }} + run: |- + share/spack/qa/run-unit-tests + + +Note that the ssh session comes after Spack does its setup but before it runs the unit tests. + +Once this step is present in the job definition, it will be triggered for each CI run. +This action provides access to an SSH server running on the GHA runner that is hosting a given CI run. +As the action runs, you should observe output similar to: + +.. code-block:: console + + ssh 5RjFs7LPdtwGG8cwSPkGrdMNg@sfo2.tmate.io + https://tmate.io/t/5RjFs7LPdtwGG8cwSPkGrdMNg + +The first line is the ssh command neccesary to connect to the server, the second line is a tmate web-ui that also provides access to the ssh server on the runner. + +.. note:: The web UI has occasionally been unresponsive, if it does not respond within ~10s, you'll need to use your local ssh utility. + +Once connected via SSH, you have the same level of access to the machine that the CI job's user does. +Spack's source should be available already (depending on where the step was inserted). +So you can just setup the shell to run Spack via the setup scripts and then debug as needed. + +.. note:: If you have configured your Github profile with SSH keys, the action will be aware of this and require those keys to access the SSH session. + +.. note:: If you are on Windows you'll be dropped into an MSYS shell, Spack is not supported inside MSYS, so it is strongly recommended to drop into a CMD or powershell prompt. + +You will have access to this ssh session for as long as Github allows a job to be alive. + +Once you have finished debugging, remove this action from the Github actions workflow. + +If you want to continue a workflow and you are inside a session, just create a empty file with the name continue either in the root directory or in the project directory. + +This action has a few option to configure behavior like ssh key handling, tmate server, detached mode, etc. +For more on how to use those options, see the actions docs at https://github.com/mxschmitt/action-tmate + Developer environment --------------------- .. warning:: - This is an experimental feature. It is expected to change and you should - not use it in a production environment. + This is an experimental feature. + It is expected to change and you should not use it in a production environment. -When installing a package, we currently have support to export environment -variables to specify adding debug flags to the build. By default, a package -installation will build without any debug flags. However, if you want to add them, -you can export: +When installing a package, we currently have support to export environment variables to specify adding debug flags to the build. +By default, a package installation will build without any debug flags. +However, if you want to add them, you can export: .. code-block:: console @@ -529,27 +504,23 @@ If you want to add custom flags, you should export an additional variable: export SPACK_DEBUG_FLAGS="-g" spack install zlib -These environment variables will eventually be integrated into Spack so -they are set from the command line. +These environment variables will eventually be integrated into Spack so they are set from the command line. ------------------- Developer commands ------------------ .. _cmd-spack-doc: -^^^^^^^^^^^^^ ``spack doc`` ^^^^^^^^^^^^^ .. _cmd-spack-style: -^^^^^^^^^^^^^^^ ``spack style`` ^^^^^^^^^^^^^^^ -``spack style`` exists to help the developer check imports and style with -mypy, Flake8, isort, and (soon) Black. To run all style checks, simply do: +``spack style`` exists to help the developer check imports and style with mypy, Flake8, isort, and (soon) Black. +To run all style checks, simply do: .. code-block:: console @@ -561,57 +532,40 @@ To run automatic fixes for isort, you can do: $ spack style --fix -You do not need any of these Python packages installed on your system for -the checks to work! Spack will bootstrap install them from packages for -your use. +You do not need any of these Python packages installed on your system for the checks to work! +Spack will bootstrap install them from packages for your use. -^^^^^^^^^^^^^^^^^^^ ``spack unit-test`` ^^^^^^^^^^^^^^^^^^^ -See the :ref:`contributor guide section ` on -``spack unit-test``. +See the :ref:`contributor guide section ` on ``spack unit-test``. .. _cmd-spack-python: -^^^^^^^^^^^^^^^^ ``spack python`` ^^^^^^^^^^^^^^^^ -``spack python`` is a command that lets you import and debug things as if -you were in a Spack interactive shell. Without any arguments, it is similar -to a normal interactive Python shell, except you can import ``spack`` and any -other Spack modules: +``spack python`` is a command that lets you import and debug things as if you were in a Spack interactive shell. +Without any arguments, it is similar to a normal interactive Python shell, except you can import ``spack`` and any other Spack modules: .. code-block:: console $ spack python - Spack version 0.10.0 - Python 2.7.13, Linux x86_64 >>> from spack.version import Version - >>> a = Version('1.2.3') - >>> b = Version('1_2_3') + >>> a = Version("1.2.3") + >>> b = Version("1_2_3") >>> a == b True - >>> c = Version('1.2.3b') + >>> c = Version("1.2.3b") >>> c > a True >>> -If you prefer using an IPython interpreter, given that IPython is installed, -you can specify the interpreter with ``-i``: +If you prefer using an IPython interpreter, given that IPython is installed, you can specify the interpreter with ``-i``: .. code-block:: console $ spack python -i ipython - Python 3.8.3 (default, May 19 2020, 18:47:26) - Type 'copyright', 'credits' or 'license' for more information - IPython 7.17.0 -- An enhanced Interactive Python. Type '?' for help. - - - Spack version 0.16.0 - Python 3.8.3, Linux x86_64 - In [1]: @@ -635,18 +589,15 @@ or a file: just like you would with the normal Python command. -.. _cmd-spack-url: +.. _cmd-spack-blame: - -^^^^^^^^^^^^^^^ ``spack blame`` ^^^^^^^^^^^^^^^ -``spack blame`` is a way to quickly see contributors to packages or files -in Spack's source tree. For built-in packages, this shows contributors to the package -files in the separate ``spack/spack-packages`` repository. You should provide a target -package name or file name to the command. Here is an example asking to see contributions -for the package "python": +``spack blame`` is a way to quickly see contributors to packages or files in Spack's source tree. +For built-in packages, this shows contributors to the package files in the separate ``spack/spack-packages`` repository. +You should provide a target package name or file name to the command. +Here is an example asking to see contributions for the package "python": .. code-block:: console @@ -658,9 +609,8 @@ for the package "python": 2 weeks ago 930 100.0 -By default, you will get a table view (shown above) sorted by date of contribution, -with the most recent contribution at the top. If you want to sort instead -by percentage of code contribution, then add ``-p``: +By default, you will get a table view (shown above) sorted by date of contribution, with the most recent contribution at the top. +If you want to sort instead by percentage of code contribution, then add ``-p``: .. code-block:: console @@ -682,23 +632,23 @@ Finally, to get a JSON export of the data, add ``--json``: $ spack blame --json python -^^^^^^^^^^^^^ +.. _cmd-spack-url: + ``spack url`` ^^^^^^^^^^^^^ -A package containing a single URL can be used to download several different -versions of the package. If you have ever wondered how this works, all of the -magic is in :mod:`spack.url`. This module contains methods for extracting -the name and version of a package from its URL. The name is used by -``spack create`` to guess the name of the package. By determining the version -from the URL, Spack can replace it with other versions to determine where to -download them from. +A package containing a single URL can be used to download several different versions of the package. +If you have ever wondered how this works, all of the magic is in :mod:`spack.url`. +This module contains methods for extracting the name and version of a package from its URL. +The name is used by ``spack create`` to guess the name of the package. +By determining the version from the URL, Spack can replace it with other versions to determine where to download them from. -The regular expressions in ``parse_name_offset`` and ``parse_version_offset`` -are used to extract the name and version, but they are not perfect. In order -to debug Spack's URL parsing support, the ``spack url`` command can be used. +The regular expressions in ``parse_name_offset`` and ``parse_version_offset`` are used to extract the name and version, but they are not perfect. +In order to debug Spack's URL parsing support, the ``spack url`` command can be used. + + +.. _cmd-spack-url-parse: -""""""""""""""""""" ``spack url parse`` """"""""""""""""""" @@ -706,53 +656,45 @@ If you need to debug a single URL, you can use the following command: .. command-output:: spack url parse http://cache.ruby-lang.org/pub/ruby/2.2/ruby-2.2.0.tar.gz -You will notice that the name and version of this URL are correctly detected, -and you can even see which regular expressions it was matched to. However, -you will notice that when it substitutes the version number in, it does not -replace the ``2.2`` with ``9.9`` where we would expect ``9.9.9b`` to live. -This particular package may require a ``list_url`` or ``url_for_version`` -function. +You will notice that the name and version of this URL are correctly detected, and you can even see which regular expressions it was matched to. +However, you will notice that when it substitutes the version number in, it does not replace the ``2.2`` with ``9.9`` where we would expect ``9.9.9b`` to live. +This particular package may require a ``list_url`` or ``url_for_version`` function. -This command also accepts a ``--spider`` flag. If provided, Spack searches -for other versions of the package and prints the matching URLs. +This command also accepts a ``--spider`` flag. +If provided, Spack searches for other versions of the package and prints the matching URLs. + + +.. _cmd-spack-url-list: -"""""""""""""""""" ``spack url list`` """""""""""""""""" -This command lists every URL in every package in Spack. If given the -``--color`` and ``--extrapolation`` flags, it also colors the part of -the string that it detected to be the name and version. The -``--incorrect-name`` and ``--incorrect-version`` flags can be used to -print URLs that were not being parsed correctly. +This command lists every URL in every package in Spack. +If given the ``--color`` and ``--extrapolation`` flags, it also colors the part of the string that it detected to be the name and version. +The ``--incorrect-name`` and ``--incorrect-version`` flags can be used to print URLs that were not being parsed correctly. + + +.. _cmd-spack-url-summary: -""""""""""""""""""""" ``spack url summary`` """"""""""""""""""""" -This command attempts to parse every URL for every package in Spack -and prints a summary of how many of them are being correctly parsed. -It also prints a histogram showing which regular expressions are being -matched and how frequently: +This command attempts to parse every URL for every package in Spack and prints a summary of how many of them are being correctly parsed. +It also prints a histogram showing which regular expressions are being matched and how frequently: .. command-output:: spack url summary -This command is essential for anyone adding or changing the regular -expressions that parse names and versions. By running this command -before and after the change, you can make sure that your regular -expression fixes more packages than it breaks. +This command is essential for anyone adding or changing the regular expressions that parse names and versions. +By running this command before and after the change, you can make sure that your regular expression fixes more packages than it breaks. ---------- Profiling --------- -Spack has some limited built-in support for profiling, and can report -statistics using standard Python timing tools. To use this feature, -supply ``--profile`` to Spack on the command line, before any subcommands. +Spack has some limited built-in support for profiling, and can report statistics using standard Python timing tools. +To use this feature, supply ``--profile`` to Spack on the command line, before any subcommands. .. _spack-p: -^^^^^^^^^^^^^^^^^^^ ``spack --profile`` ^^^^^^^^^^^^^^^^^^^ @@ -761,139 +703,110 @@ supply ``--profile`` to Spack on the command line, before any subcommands. .. command-output:: spack --profile graph hdf5 :ellipsis: 25 -The bottom of the output shows the most time-consuming functions, -slowest on top. The profiling support is from Python's built-in tool, -`cProfile -`_. +The bottom of the output shows the most time-consuming functions, slowest on top. +The profiling support is from Python's built-in tool, `cProfile `_. .. _releases: --------- Releases -------- -This section documents Spack's release process. It is intended for -project maintainers, as the tasks described here require maintainer -privileges on the Spack repository. For others, we hope this section at -least provides some insight into how the Spack project works. +This section documents Spack's release process. +It is intended for project maintainers, as the tasks described here require maintainer privileges on the Spack repository. +For others, we hope this section at least provides some insight into how the Spack project works. .. _release-branches: -^^^^^^^^^^^^^^^^ Release branches ^^^^^^^^^^^^^^^^ -There are currently two types of Spack releases: :ref:`major releases -` (``0.21.0``, ``0.22.0``, etc.) and :ref:`patch releases -` (``0.22.1``, ``0.22.2``, ``0.22.3``, etc.). Here is a -diagram of how Spack release branches work: +There are currently two types of Spack releases: :ref:`minor releases ` (``1.1.0``, ``1.2.0``, etc.) and :ref:`patch releases ` (``1.1.1``, ``1.1.2``, ``1.1.3``, etc.). +Here is a diagram of how Spack release branches work: .. code-block:: text - o branch: develop (latest version, v0.23.0.dev0) + o branch: develop (latest version, v1.2.0.dev0) | o - | o branch: releases/v0.22, tag: v0.22.1 + | o branch: releases/v1.1, tag: v1.1.1 o | - | o tag: v0.22.0 + | o tag: v1.1.0 o | | o |/ o | o - | o branch: releases/v0.21, tag: v0.21.2 + | o branch: releases/v1.0, tag: v1.0.2 o | - | o tag: v0.21.1 + | o tag: v1.0.1 o | - | o tag: v0.21.0 + | o tag: v1.0.0 o | | o |/ o -The ``develop`` branch has the latest contributions, and nearly all pull -requests target ``develop``. The ``develop`` branch will report that its -version is that of the next **major** release with a ``.dev0`` suffix. - -Each Spack release series also has a corresponding branch, e.g., -``releases/v0.22`` has ``v0.22.x`` versions of Spack, and -``releases/v0.21`` has ``v0.21.x`` versions. A major release is the first -tagged version on a release branch. Minor releases are back-ported from -develop onto release branches. This is typically done by cherry-picking -bugfix commits off of ``develop``. +The ``develop`` branch has the latest contributions, and nearly all pull requests target ``develop``. +The ``develop`` branch will report that its version is that of the next **minor** release with a ``.dev0`` suffix. -To avoid version churn for users of a release series, minor releases -**should not** make changes that would change the concretization of -packages. They should generally only contain fixes to the Spack core. -However, sometimes priorities are such that new functionality needs to -be added to a minor release. +Each Spack release series also has a corresponding branch, e.g., ``releases/v1.1`` has ``v1.1.x`` versions of Spack, and ``releases/v1.0`` has ``v1.0.x`` versions. +A minor release is the first tagged version on a release branch. +Patch releases are back-ported from develop onto release branches. +This is typically done by cherry-picking bugfix commits off of ``develop``. -Both major and minor releases are tagged. As a convenience, we also tag -the latest release as ``releases/latest``, so that users can easily check -it out to get the latest stable version. See :ref:`updating-latest-release` -for more details. +To avoid version churn for users of a release series, patch releases **should not** make changes that would change the concretization of packages. +They should generally only contain fixes to the Spack core. +However, sometimes priorities are such that new functionality needs to be added to a patch release. -.. note:: +Both minor and patch releases are tagged. +As a convenience, we also tag the latest release as ``releases/latest``, so that users can easily check it out to get the latest stable version. +See :ref:`updating-latest-release` for more details. - Older spack releases were merged **back** into develop so that we could - do fancy things with tags, but since tarballs and many git checkouts do - not have tags, this proved overly complex and confusing. +.. admonition:: PEP 440 compliance + :class: note - We have since converted to using `PEP 440 `_ - compliant versions. `See here `_ for - details. + Spack releases up to ``v0.17`` were merged back into the ``develop`` branch to ensure that release tags would appear among its ancestors. + Since ``v0.18`` we opted to have a linear history of the ``develop`` branch, for reasons explained `here `_. + At the same time, we converted to using `PEP 440 `_ compliant versions. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Scheduling work for releases ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -We schedule work for **major releases** through `milestones -`_ and `GitHub Projects -`_, while **patch releases** use `labels -`_. +We schedule work for **minor releases** through `milestones `_ and `GitHub Projects `_, while **patch releases** use `labels `_. -There is only one milestone open at a time. Its name corresponds to the next major version, for -example ``v0.23``. Important issues and pull requests should be assigned to this milestone by -core developers, so that they are not forgotten at the time of release. The milestone is closed -when the release is made, and a new milestone is created for the next major release. +While there can be multiple milestones open at a given time, only one is usually active. +Its name corresponds to the next major/minor version, for example ``v1.1.0``. +Important issues and pull requests should be assigned to this milestone by core developers, so that they are not forgotten at the time of release. +The milestone is closed when the release is made, and a new milestone is created for the next major/minor release, if not already there. -Bug reports in GitHub issues are automatically labelled ``bug`` and ``triage``. Spack developers -assign one of the labels ``impact-low``, ``impact-medium`` or ``impact-high``. This will make the -issue appear in the `Triaged bugs `_ project board. -Important issues should be assigned to the next milestone as well, so they appear at the top of -the project board. +Bug reports in GitHub issues are automatically labelled ``bug`` and ``triage``. +Spack developers assign one of the labels ``impact-low``, ``impact-medium`` or ``impact-high``. +This will make the issue appear in the `Triaged bugs `_ project board. +Important issues should be assigned to the next milestone as well, so they appear at the top of the project board. -Spack's milestones are not firm commitments so we move work between releases frequently. If we -need to make a release and some tasks are not yet done, we will simply move them to the next major -release milestone, rather than delaying the release to complete them. +Spack's milestones are not firm commitments so we move work between releases frequently. +If we need to make a release and some tasks are not yet done, we will simply move them to the next minor release milestone, rather than delaying the release to complete them. -^^^^^^^^^^^^^^^^^^^^^ Backporting bug fixes ^^^^^^^^^^^^^^^^^^^^^ -When a bug is fixed in the ``develop`` branch, it is often necessary to backport the fix to one -(or more) of the ``releases/vX.Y`` branches. Only the release manager is responsible for doing -backports, but Spack maintainers are responsible for labelling pull requests (and issues if no bug -fix is available yet) with ``vX.Y.Z`` labels. The label should correspond to the next patch version -that the bug fix should be backported to. +When a bug is fixed in the ``develop`` branch, it is often necessary to backport the fix to one (or more) of the ``releases/vX.Y`` branches. +Only the release manager is responsible for doing backports, but Spack maintainers are responsible for labelling pull requests (and issues if no bug fix is available yet) with ``vX.Y.Z`` labels. +The labels should correspond to the future patch versions that the bug fix should be backported to. Backports are done publicly by the release manager using a pull request named ``Backports vX.Y.Z``. -This pull request is opened from the ``backports/vX.Y.Z`` branch, targets the ``releases/vX.Y`` -branch and contains a (growing) list of cherry-picked commits from the ``develop`` branch. +This pull request is opened from the ``backports/vX.Y.Z`` branch, targets the ``releases/vX.Y`` branch and contains a (growing) list of cherry-picked commits from the ``develop`` branch. Typically there are one or two backport pull requests open at any given time. -.. _major-releases: +.. _minor-releases: -^^^^^^^^^^^^^^^^^^^^^ -Making major releases +Making minor releases ^^^^^^^^^^^^^^^^^^^^^ -Assuming all required work from the milestone is completed, the steps to make the major release -are: +Assuming all required work from the milestone is completed, the steps to make the minor release are: -#. `Create a new milestone `_ for the next major - release. +#. `Create a new milestone `_ for the next major/minor release. #. `Create a new label `_ for the next patch release. @@ -903,22 +816,18 @@ are: .. code-block:: console - $ git checkout -b releases/v0.23 develop + $ git checkout -b releases/v1.1 develop - For a version ``vX.Y.Z``, the branch's name should be - ``releases/vX.Y``. That is, you should create a ``releases/vX.Y`` - branch if you are preparing the ``X.Y.0`` release. + For a version ``vX.Y.Z``, the branch's name should be ``releases/vX.Y``. + That is, you should create a ``releases/vX.Y`` branch if you are preparing the ``X.Y.0`` release. -#. Remove the ``dev0`` development release segment from the version tuple in - ``lib/spack/spack/__init__.py``. +#. Remove the ``dev0`` development release segment from the version tuple in ``lib/spack/spack/__init__.py``. - The version number itself should already be correct and should not be - modified. + The version number itself should already be correct and should not be modified. #. Update ``CHANGELOG.md`` with major highlights in bullet form. - Use proper Markdown formatting, like `this example from v0.15.0 - `_. + Use proper Markdown formatting, like `this example from v1.0.0 `_. #. Push the release branch to GitHub. @@ -928,19 +837,15 @@ are: * Build tests * The E4S pipeline at `gitlab.spack.io `_ - If CI is not passing, submit pull requests to ``develop`` as normal - and keep rebasing the release branch on ``develop`` until CI passes. + If CI is not passing, submit pull requests to ``develop`` as normal and keep rebasing the release branch on ``develop`` until CI passes. -#. Make sure the entire documentation is up to date. If documentation - is outdated, submit pull requests to ``develop`` as normal - and keep rebasing the release branch on ``develop``. +#. Make sure the entire documentation is up to date. + If documentation is outdated, submit pull requests to ``develop`` as normal and keep rebasing the release branch on ``develop``. -#. Bump the major version in the ``develop`` branch. +#. Bump the minor version in the ``develop`` branch. - Create a pull request targeting the ``develop`` branch, bumping the major - version in ``lib/spack/spack/__init__.py`` with a ``dev0`` release segment. - For instance, when you have just released ``v0.23.0``, set the version - to ``(0, 24, 0, 'dev0')`` on ``develop``. + Create a pull request targeting the ``develop`` branch, bumping the minor version in ``lib/spack/spack/__init__.py`` with a ``dev0`` release segment. + For instance, when you have just released ``v1.1.0``, set the version to ``(1, 2, 0, 'dev0')`` on ``develop``. #. Follow the steps in :ref:`publishing-releases`. @@ -951,57 +856,44 @@ are: .. _patch-releases: -^^^^^^^^^^^^^^^^^^^^^ Making patch releases ^^^^^^^^^^^^^^^^^^^^^ -To make the patch release process both efficient and transparent, we use a *backports pull request* -which contains cherry-picked commits from the ``develop`` branch. The majority of the work is to -cherry-pick the bug fixes, which ideally should be done as soon as they land on ``develop``; -this ensures cherry-picking happens in order and makes conflicts easier to resolve since the -changes are fresh in the mind of the developer. +To make the patch release process both efficient and transparent, we use a *backports pull request* which contains cherry-picked commits from the ``develop`` branch. +The majority of the work is to cherry-pick the bug fixes, which ideally should be done as soon as they land on ``develop``; this ensures cherry-picking happens in order and makes conflicts easier to resolve since the changes are fresh in the mind of the developer. -The backports pull request is always titled ``Backports vX.Y.Z`` and is labelled ``backports``. It -is opened from a branch named ``backports/vX.Y.Z`` and targets the ``releases/vX.Y`` branch. +The backports pull request is always titled ``Backports vX.Y.Z`` and is labelled ``backports``. +It is opened from a branch named ``backports/vX.Y.Z`` and targets the ``releases/vX.Y`` branch. -Whenever a pull request labelled ``vX.Y.Z`` is merged, cherry-pick the associated squashed commit -on ``develop`` to the ``backports/vX.Y.Z`` branch. For pull requests that were rebased (or not -squashed), cherry-pick each associated commit individually. Never force-push to the -``backports/vX.Y.Z`` branch. +Whenever a pull request labelled ``vX.Y.Z`` is merged, cherry-pick the associated squashed commit on ``develop`` to the ``backports/vX.Y.Z`` branch. +For pull requests that were rebased (or not squashed), cherry-pick each associated commit individually. +Never force-push to the ``backports/vX.Y.Z`` branch. .. warning:: - Sometimes you may **still** get merge conflicts even if you have - cherry-picked all the commits in order. This generally means there - is some other intervening pull request that the one you are trying - to pick depends on. In these cases, you will need to make a judgment - call regarding those pull requests. Consider the number of affected - files and/or the resulting differences. + Sometimes you may **still** get merge conflicts even if you have cherry-picked all the commits in order. + This generally means there is some other intervening pull request that the one you are trying to pick depends on. + In these cases, you will need to make a judgment call regarding those pull requests. + Consider the number of affected files and/or the resulting differences. 1. If the changes are small, you might just cherry-pick it. - 2. If the changes are large, then you may decide that this fix is not - worth including in a patch release, in which case you should remove - the label from the pull request. Remember that large, manual backports - are seldom the right choice for a patch release. + 2. If the changes are large, then you may decide that this fix is not worth including in a patch release, in which case you should remove the label from the pull request. + Remember that large, manual backports are seldom the right choice for a patch release. -When all commits are cherry-picked in the ``backports/vX.Y.Z`` branch, make the patch -release as follows: +When all commits are cherry-picked in the ``backports/vX.Y.Z`` branch, make the patch release as follows: -#. `Create a new label `_ ``vX.Y.{Z+1}`` for the next patch - release. +#. `Create a new label `_ ``vX.Y.{Z+1}`` for the next patch release. #. Replace the label ``vX.Y.Z`` with ``vX.Y.{Z+1}`` for all PRs and issues that are not yet done. -#. Manually push a single commit with commit message ``Set version to vX.Y.Z`` to the - ``backports/vX.Y.Z`` branch, that both bumps the Spack version number and updates the changelog: +#. Manually push a single commit with commit message ``Set version to vX.Y.Z`` to the ``backports/vX.Y.Z`` branch, that both bumps the Spack version number and updates the changelog: 1. Bump the version in ``lib/spack/spack/__init__.py``. 2. Update ``CHANGELOG.md`` with a list of the changes. - This is typically a summary of the commits you cherry-picked onto the - release branch. See `the changelog from v0.14.1 - `_. + This is typically a summary of the commits you cherry-picked onto the release branch. + See `the changelog from v1.0.2 `_. #. Make sure CI passes on the **backports pull request**, including: @@ -1009,16 +901,13 @@ release as follows: * Build tests * The E4S pipeline at `gitlab.spack.io `_ -#. Merge the ``Backports vX.Y.Z`` PR with the **Rebase and merge** strategy. This - is needed to keep track in the release branch of all the commits that were - cherry-picked. +#. Merge the ``Backports vX.Y.Z`` PR with the **Rebase and merge** strategy. + This is needed to keep track in the release branch of all the commits that were cherry-picked. #. Make sure CI passes on the last commit of the **release branch**. -#. In the rare case you need to include additional commits in the patch release after the backports - PR is merged, it is best to delete the last commit ``Set version to vX.Y.Z`` from the release - branch with a single force-push, open a new backports PR named ``Backports vX.Y.Z (2)``, and - repeat the process. Avoid repeated force-pushes to the release branch. +#. In the rare case you need to include additional commits in the patch release after the backports PR is merged, it is best to delete the last commit ``Set version to vX.Y.Z`` from the release branch with a single force-push, open a new backports PR named ``Backports vX.Y.Z (2)``, and repeat the process. + Avoid repeated force-pushes to the release branch. #. Follow the steps in :ref:`publishing-releases`. @@ -1026,29 +915,24 @@ release as follows: #. Follow the steps in :ref:`announcing-releases`. -#. Submit a PR to update the ``CHANGELOG.md`` in the ``develop`` branch - with the addition of this patch release. +#. Submit a PR to update the ``CHANGELOG.md`` in the ``develop`` branch with the addition of this patch release. .. _publishing-releases: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Publishing a release on GitHub ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #. Create the release in GitHub. - * Go to - `github.com/spack/spack/releases `_ - and click ``Draft a new release``. + * Go to `github.com/spack/spack/releases `_ and click ``Draft a new release``. * Set ``Tag version`` to the name of the tag that will be created. - The name should start with ``v`` and contain *all three* - parts of the version (e.g., ``v0.15.0`` or ``v0.15.1``). + The name should start with ``v`` and contain *all three* parts of the version (e.g., ``v1.1.0`` or ``v1.1.1``). - * Set ``Target`` to the ``releases/vX.Y`` branch (e.g., ``releases/v0.15``). + * Set ``Target`` to the ``releases/vX.Y`` branch (e.g., ``releases/v1.0``). - * Set ``Release title`` to ``vX.Y.Z`` to match the tag (e.g., ``v0.15.1``). + * Set ``Release title`` to ``vX.Y.Z`` to match the tag (e.g., ``v1.0.1``). * Paste the latest release Markdown from your ``CHANGELOG.md`` file as the text. @@ -1056,57 +940,43 @@ Publishing a release on GitHub #. When you are ready to finalize the release, click ``Publish release``. -#. Immediately after publishing, go back to - `github.com/spack/spack/releases - `_ and download the - auto-generated ``.tar.gz`` file for the release. It is the ``Source - code (tar.gz)`` link. +#. Immediately after publishing, go back to `github.com/spack/spack/releases `_ and download the auto-generated ``.tar.gz`` file for the release. + It is the ``Source code (tar.gz)`` link. -#. Click ``Edit`` on the release you just made and attach the downloaded - release tarball as a binary. This does two things: +#. Click ``Edit`` on the release you just made and attach the downloaded release tarball as a binary. + This does two things: #. Makes sure that the hash of our releases does not change over time. - GitHub sometimes annoyingly changes the way they generate tarballs - that can result in the hashes changing if you rely on the - auto-generated tarball links. + GitHub sometimes annoyingly changes the way they generate tarballs that can result in the hashes changing if you rely on the auto-generated tarball links. #. Gets download counts on releases visible through the GitHub API. - GitHub tracks downloads of artifacts, but *not* the source - links. See the `releases - page `_ and search - for ``download_count`` to see this. + GitHub tracks downloads of artifacts, but *not* the source links. + See the `releases page `_ and search for ``download_count`` to see this. -#. Go to `readthedocs.org `_ and - activate the release tag. +#. Go to `readthedocs.org `_ and activate the release tag. - This builds the documentation and makes the released version - selectable in the versions menu. + This builds the documentation and makes the released version selectable in the versions menu. .. _updating-latest-release: -^^^^^^^^^^^^^^^^^^^^^^^^^^ Updating `releases/latest` ^^^^^^^^^^^^^^^^^^^^^^^^^^ -If the new release is the **highest** Spack release yet, you should -also tag it as ``releases/latest``. For example, suppose the highest -release is currently ``v0.22.3``: +If the new release is the **highest** Spack release yet, you should also tag it as ``releases/latest``. +For example, suppose the highest release is currently ``v1.1.3``: -* If you are releasing ``v0.22.4`` or ``v0.23.0``, then you should tag - it with ``releases/latest``, as these are higher than ``v0.22.3``. +* If you are releasing ``v1.1.4`` or ``v1.2.0``, then you should tag it with ``releases/latest``, as these are higher than ``v1.1.3``. -* If you are making a new release of an **older** major version of - Spack, e.g., ``v0.21.4``, then you should not tag it as - ``releases/latest`` (as there are newer major versions). +* If you are making a new release of an **older** minor version of Spack, e.g., ``v1.0.5``, then you should not tag it as ``releases/latest`` (as there are newer major/minor versions). To do so, first fetch the latest tag created on GitHub, since you may not have it locally: .. code-block:: console - $ git fetch --force git@github.com:spack/spack vX.Y.Z + $ git fetch --force git@github.com:spack/spack tag vX.Y.Z Then tag ``vX.Y.Z`` as ``releases/latest`` and push the individual tag to GitHub. @@ -1115,43 +985,37 @@ Then tag ``vX.Y.Z`` as ``releases/latest`` and push the individual tag to GitHub $ git tag --force releases/latest vX.Y.Z $ git push --force git@github.com:spack/spack releases/latest -The ``--force`` argument to ``git tag`` makes Git overwrite the existing ``releases/latest`` -tag with the new one. Do **not** use the ``--tags`` flag when pushing, as this will push *all* -local tags. +The ``--force`` argument to ``git tag`` makes Git overwrite the existing ``releases/latest`` tag with the new one. +Do **not** use the ``--tags`` flag when pushing, as this will push *all* local tags. .. _announcing-releases: -^^^^^^^^^^^^^^^^^^^^ Announcing a release ^^^^^^^^^^^^^^^^^^^^ We announce releases in all of the major Spack communication channels. -Publishing the release takes care of GitHub. The remaining channels are -X, Slack, and the mailing list. Here are the steps: +Publishing the release takes care of GitHub. +The remaining channels are X, Slack, and the mailing list. +Here are the steps: #. Announce the release on X. - * Compose the tweet on the ``@spackpm`` account per the - ``spack-twitter`` slack channel. + * Compose the tweet on the ``@spackpm`` account per the ``spack-twitter`` slack channel. * Be sure to include a link to the release's page on GitHub. - You can base the tweet on `this - example `_. + You can base the tweet on `this example `_. #. Announce the release on Slack. - * Compose a message in the ``#general`` Slack channel - (`spackpm.slack.com `_). + * Compose a message in the ``#announcements`` Slack channel (`spackpm.slack.com `_). - * Preface the message with ``@channel`` to notify even those - people not currently logged in. + * Preface the message with ``@channel`` to notify even those people not currently logged in. * Be sure to include a link to the tweet above. - The tweet will be shown inline so that you do not have to retype - your release announcement. + The tweet will be shown inline so that you do not have to retype your release announcement. #. Announce the release on the Spack mailing list. @@ -1159,11 +1023,9 @@ X, Slack, and the mailing list. Here are the steps: * Be sure to include a link to the release's page on GitHub. - * It is also helpful to include some information directly in the - email. + * It is also helpful to include some information directly in the email. - You can base your announcement on this `example - email `_. + You can base your announcement on this `example email `_. Once you have completed the above steps, congratulations, you are done! You have finished making the release! diff --git a/lib/spack/docs/env_vars_yaml.rst b/lib/spack/docs/env_vars_yaml.rst index 6e29956c75df55..5cd60f641f8469 100644 --- a/lib/spack/docs/env_vars_yaml.rst +++ b/lib/spack/docs/env_vars_yaml.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,16 +9,12 @@ .. _env-vars-yaml: -============================================= Environment Variable Settings (env_vars.yaml) ============================================= -Spack allows you to include shell environment variable modifications -for a Spack environment by including an ``env_vars.yaml`` file. Environment -variables can be modified by setting, unsetting, appending, and prepending -variables in the shell environment. -The changes to the shell environment will take effect when the Spack -environment is activated. +Spack allows you to include shell environment variable modifications for a Spack environment by including an ``env_vars.yaml`` file. +Environment variables can be modified by setting, unsetting, appending, and prepending variables in the shell environment. +The changes to the shell environment will take effect when the Spack environment is activated. For example: @@ -27,7 +24,7 @@ For example: set: ENVAR_TO_SET_IN_ENV_LOAD: "FOO" unset: - ENVAR_TO_UNSET_IN_ENV_LOAD: + - ENVAR_TO_UNSET_IN_ENV_LOAD prepend_path: PATH_LIST: "path/to/prepend" append_path: diff --git a/lib/spack/docs/environments.rst b/lib/spack/docs/environments.rst index ba41802d7ad927..9ccba530b01ba7 100644 --- a/lib/spack/docs/environments.rst +++ b/lib/spack/docs/environments.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,67 +9,51 @@ .. _environments: -===================================== Environments (spack.yaml, spack.lock) ===================================== -An environment is used to group a set of specs intended for some purpose -to be built, rebuilt, and deployed in a coherent fashion. Environments -define aspects of the installation of the software, such as: +An environment is used to group a set of specs intended for some purpose to be built, rebuilt, and deployed in a coherent fashion. +Environments define aspects of the installation of the software, such as: #. *which* specs to install; #. *how* those specs are configured; and #. *where* the concretized software will be installed. -Aggregating this information into an environment for processing has advantages -over the *à la carte* approach of building and loading individual Spack modules. +Aggregating this information into an environment for processing has advantages over the *à la carte* approach of building and loading individual Spack modules. -With environments, you concretize, install, or load (activate) all of the -specs with a single command. Concretization fully configures the specs -and dependencies of the environment in preparation for installing the -software. This is a more robust solution than ad-hoc installation scripts. +With environments, you concretize, install, or load (activate) all of the specs with a single command. +Concretization fully configures the specs and dependencies of the environment in preparation for installing the software. +This is a more robust solution than ad-hoc installation scripts. And you can share an environment or even re-use it on a different computer. -Environment definitions, especially *how* specs are configured, allow the -software to remain stable and repeatable even when Spack packages are upgraded. Changes are only picked up when the environment is explicitly re-concretized. +Environment definitions, especially *how* specs are configured, allow the software to remain stable and repeatable even when Spack packages are upgraded. +Changes are only picked up when the environment is explicitly re-concretized. -Defining *where* specs are installed supports a filesystem view of the -environment. Yet Spack maintains a single installation of the software that -can be re-used across multiple environments. +Defining *where* specs are installed supports a filesystem view of the environment. +Yet Spack maintains a single installation of the software that can be re-used across multiple environments. -Activating an environment determines *when* all of the associated (and -installed) specs are loaded so limits the software loaded to those specs -actually needed by the environment. Spack can even generate a script to -load all modules related to an environment. +Activating an environment determines *when* all of the associated (and installed) specs are loaded so limits the software loaded to those specs actually needed by the environment. +Spack can even generate a script to load all modules related to an environment. -Other packaging systems also provide environments that are similar in -some ways to Spack environments; for example, `Conda environments -`_ or -`Python Virtual Environments -`_. Spack environments -provide some distinctive features though: +Other packaging systems also provide environments that are similar in some ways to Spack environments; for example, `Conda environments `_ or `Python Virtual Environments `_. +Spack environments provide some distinctive features though: -#. A spec installed "in" an environment is no different from the same - spec installed anywhere else in Spack. -#. Spack environments may contain more than one spec of the same - package. +#. A spec installed "in" an environment is no different from the same spec installed anywhere else in Spack. +#. Spack environments may contain more than one spec of the same package. -Spack uses a "manifest and lock" model similar to `Bundler gemfiles -`_ and other package managers. +Spack uses a "manifest and lock" model similar to `Bundler gemfiles `_ and other package managers. The environment's user input file (or manifest), is named ``spack.yaml``. -The lock file, which contains the fully configured and concretized specs, -is named ``spack.lock``. +The lock file, which contains the fully configured and concretized specs, is named ``spack.lock``. .. _environments-using: ------------------- Using Environments ------------------ -Here we follow a typical use case of creating, concretizing, -installing and loading an environment. +Here we follow a typical use case of creating, concretizing, installing and loading an environment. + +.. _cmd-spack-env-create: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Creating a managed Environment ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -78,56 +63,44 @@ An environment is created by: $ spack env create myenv -The directory ``$SPACK_ROOT/var/spack/environments/myenv`` is created -to manage the environment. +The directory ``$SPACK_ROOT/var/spack/environments/myenv`` is created to manage the environment. .. note:: - By default, all managed environments are stored in the - ``$SPACK_ROOT/var/spack/environments`` folder. This location can be changed - by setting the ``environments_root`` variable in ``config.yaml``. + By default, all managed environments are stored in the ``$SPACK_ROOT/var/spack/environments`` folder. + This location can be changed by setting the ``environments_root`` variable in ``config.yaml``. -Spack creates the file ``spack.yaml``, hidden directory ``.spack-env``, and -``spack.lock`` file under ``$SPACK_ROOT/var/spack/environments/myenv``. User -interaction occurs through the ``spack.yaml`` file and the Spack commands -that affect it. Metadata and, by default, the view are stored in the -``.spack-env`` directory. When the environment is concretized, Spack creates -the ``spack.lock`` file with the fully configured specs and dependencies for -the environment. +Spack creates the file ``spack.yaml``, hidden directory ``.spack-env``, and ``spack.lock`` file under ``$SPACK_ROOT/var/spack/environments/myenv``. +User interaction occurs through the ``spack.yaml`` file and the Spack commands that affect it. +Metadata and, by default, the view are stored in the ``.spack-env`` directory. +When the environment is concretized, Spack creates the ``spack.lock`` file with the fully configured specs and dependencies for the environment. The ``.spack-env`` subdirectory also contains: - * ``repo/``: A subdirectory acting as the repo consisting of the Spack - packages used in the environment. It allows the environment to build - the same, in theory, even on different versions of Spack with different - packages! - * ``logs/``: A subdirectory containing the build logs for the packages - in this environment. +* ``repo/``: A subdirectory acting as the repo consisting of the Spack packages used in the environment. + It allows the environment to build the same, in theory, even on different versions of Spack with different packages! +* ``logs/``: A subdirectory containing the build logs for the packages in this environment. -Spack Environments can also be created from either the user input, or -manifest, file or the lockfile. Create an environment from a manifest using: +Spack Environments can also be created from another environment. +Environments can be created from the manifest file (the user input), the lockfile, or the entire environment at once. +Create an environment from a manifest using: .. code-block:: console $ spack env create myenv spack.yaml -The resulting environment is guaranteed to have the same root specs as -the original but may concretize differently in the presence of different -explicit or default configuration settings (e.g., a different version of -Spack or for a different user account). +The resulting environment is guaranteed to have the same root specs as the original but may concretize differently in the presence of different explicit or default configuration settings (e.g., a different version of Spack or for a different user account). -Environments created from a manifest will copy any included configs -from relative paths inside the environment. Relative paths from -outside the environment will cause errors, and absolute paths will be -kept absolute. For example, if ``spack.yaml`` includes: +Environments created from a manifest will copy any included configs from relative paths inside the environment. +Relative paths from outside the environment will cause errors, and absolute paths will be kept absolute. +For example, if ``spack.yaml`` includes: .. code-block:: yaml spack: include: [./config.yaml] -then the created environment will have its own copy of the file -``config.yaml`` copied from the location in the original environment. +then the created environment will have its own copy of the file ``config.yaml`` copied from the location in the original environment. Create an environment from a ``spack.lock`` file using: @@ -135,18 +108,36 @@ Create an environment from a ``spack.lock`` file using: $ spack env create myenv spack.lock -The resulting environment, when on the same or a compatible machine, is -guaranteed to initially have the same concrete specs as the original. +The resulting environment, when on the same or a compatible machine, is guaranteed to initially have the same concrete specs as the original. + +Create an environment from an entire environment using either the environment name or path: + +.. code-block:: console + + $ spack env create myenv /path/to/env + $ spack env create myenv2 myenv + +The resulting environment will include the concrete specs from the original if the original is concretized (as when created from a lockfile) and all of the config options and abstract specs specified in the original (as when created from a manifest file). +It will also include any other files included in the environment directory, such as repos or source code, as they could be referenced in the environment by relative path. .. note:: Environment creation also accepts a full path to the file. - If the path is not under the ``$SPACK_ROOT/var/spack/environments`` - directory then the source is referred to as an - :ref:`independent environment `. + If the path is not under the ``$SPACK_ROOT/var/spack/environments`` directory then the source is referred to as an :ref:`independent environment `. + +The name of an environment can be a nested path to help organize environments via subdirectories. + +.. code-block:: console + + $ spack env create projectA/configA/myenv + +This will create a managed environment under ``$environments_root/projectA/configA/myenv``. +Changing ``environment_root`` can therefore also be used to make a whole group of nested environments available. + +.. _cmd-spack-env-activate: +.. _cmd-spack-env-deactivate: -^^^^^^^^^^^^^^^^^^^^^^^^^ Activating an Environment ^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -156,25 +147,19 @@ To activate an environment, use the following command: $ spack env activate myenv -By default, the ``spack env activate`` will load the view associated -with the environment into the user environment. The ``-v, ---with-view`` argument ensures this behavior, and the ``-V, ---without-view`` argument activates the environment without changing -the user environment variables. +By default, the ``spack env activate`` will load the view associated with the environment into the user environment. +The ``-v, --with-view`` argument ensures this behavior, and the ``-V, --without-view`` argument activates the environment without changing the user environment variables. -The ``-p`` option to the ``spack env activate`` command modifies the -user's prompt to begin with the environment name in brackets. +The ``-p`` option to the ``spack env activate`` command modifies the user's prompt to begin with the environment name in brackets. .. code-block:: console $ spack env activate -p myenv [myenv] $ ... -The ``activate`` command can also be used to create a new environment, if it is -not already defined, by adding the ``--create`` flag. Managed and independent -environments can both be created using the same flags that `spack env create` -accepts. If an environment already exists then Spack will simply activate it -and ignore the create-specific flags. +The ``activate`` command can also be used to create a new environment, if it is not already defined, by adding the ``--create`` flag. +Managed and independent environments can both be created using the same flags that `spack env create` accepts. +If an environment already exists then Spack will simply activate it and ignore the create-specific flags. .. code-block:: console @@ -196,12 +181,10 @@ or the shortcut alias $ despacktivate -If the environment was activated with its view, deactivating the -environment will remove the view from the user environment. +If the environment was activated with its view, deactivating the environment will remove the view from the user environment. .. _independent_environments: -^^^^^^^^^^^^^^^^^^^^^^^^ Independent Environments ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -209,9 +192,8 @@ Independent environments can be located in any directory outside of Spack. .. note:: - When uninstalling packages, Spack asks the user to confirm the removal of packages - that are still used in a managed environment. This is not the case for independent - environments. + When uninstalling packages, Spack asks the user to confirm the removal of packages that are still used in a managed environment. + This is not the case for independent environments. To create an independent environment, use one of the following commands: @@ -220,8 +202,7 @@ To create an independent environment, use one of the following commands: $ spack env create --dir my_env $ spack env create ./my_env -As a shorthand, you can also create an independent environment upon activation if it does not -already exist: +As a shorthand, you can also create an independent environment upon activation if it does not already exist: .. code-block:: console @@ -234,17 +215,15 @@ For convenience, Spack can also place an independent environment in a temporary $ spack env activate --temp -^^^^^^^^^^^^^^^^^^^^^^^^^^ Environment-Aware Commands ^^^^^^^^^^^^^^^^^^^^^^^^^^ -Spack commands are environment-aware. For example, the ``find`` -command shows only the specs in the active environment if an -environment has been activated. Otherwise it shows all specs in -the Spack instance. The same rule applies to the ``install`` and -``uninstall`` commands. +Spack commands are environment-aware. +For example, the ``find`` command shows only the specs in the active environment if an environment has been activated. +Otherwise it shows all specs in the Spack instance. +The same rule applies to the ``install`` and ``uninstall`` commands. -.. code-block:: console +.. code-block:: spec $ spack find ==> 0 installed packages @@ -286,84 +265,67 @@ the Spack instance. The same rule applies to the ``install`` and zlib@1.2.8 zlib@1.2.11 -Note that when we installed the abstract spec ``zlib@1.2.8``, it was -presented as a root of the environment. All explicitly installed -packages will be listed as roots of the environment. +Note that when we installed the abstract spec ``zlib@1.2.8``, it was presented as a root of the environment. +All explicitly installed packages will be listed as roots of the environment. -All of the Spack commands that act on the list of installed specs are -environment-aware in this way, including ``install``, -``uninstall``, ``find``, ``extensions``, etc. In the -:ref:`environment-configuration` section we will discuss -environment-aware commands further. +All of the Spack commands that act on the list of installed specs are environment-aware in this way, including ``install``, ``uninstall``, ``find``, ``extensions``, etc. +In the :ref:`environment-configuration` section we will discuss environment-aware commands further. + +.. _cmd-spack-add: -^^^^^^^^^^^^^^^^^^^^^ Adding Abstract Specs ^^^^^^^^^^^^^^^^^^^^^ -An abstract spec is the user-specified spec before Spack applies -defaults or dependency information. +An abstract spec is the user-specified spec before Spack applies defaults or dependency information. -Users can add abstract specs to an environment using the ``spack add`` -command. The most important component of an environment is a list of -abstract specs. +Users can add abstract specs to an environment using the ``spack add`` command. +The most important component of an environment is a list of abstract specs. -Adding a spec adds it as a root spec of the environment in the user -input file (``spack.yaml``). It does not affect the concrete specs -in the lock file (``spack.lock``) and it does not install the spec. +Adding a spec adds it as a root spec of the environment in the user input file (``spack.yaml``). +It does not affect the concrete specs in the lock file (``spack.lock``) and it does not install the spec. -The ``spack add`` command is environment-aware. It adds the spec to the -currently active environment. An error is generated if there isn't an -active environment. All environment-aware commands can also -be called using the ``spack -e`` flag to specify the environment. +The ``spack add`` command is environment-aware. +It adds the spec to the currently active environment. +An error is generated if there isn't an active environment. +All environment-aware commands can also be called using the ``spack -e`` flag to specify the environment. -.. code-block:: console +.. code-block:: spec $ spack env activate myenv $ spack add mpileaks or -.. code-block:: console +.. code-block:: spec $ spack -e myenv add python -.. _environments_concretization: +.. _cmd-spack-concretize: -^^^^^^^^^^^^ Concretizing ^^^^^^^^^^^^ Once user specs have been added to an environment, they can be concretized. -There are three different modes of operation to concretize an environment, -explained in detail in :ref:`environments_concretization_config`. -Regardless of which mode of operation is chosen, the following -command will ensure all of the root specs are concretized according to the -constraints that are prescribed in the configuration: +There are three different modes of operation to concretize an environment, explained in detail in :ref:`environments_concretization_config`. +Regardless of which mode of operation is chosen, the following command will ensure all of the root specs are concretized according to the constraints that are prescribed in the configuration: .. code-block:: console [myenv]$ spack concretize -In the case of specs that are not concretized together, the command -above will concretize only the specs that were added and not yet -concretized. Forcing a re-concretization of all of the specs can be done -by adding the ``-f`` option: +In the case of specs that are not concretized together, the command above will concretize only the specs that were added and not yet concretized. +Forcing a re-concretization of all of the specs can be done by adding the ``-f`` option: .. code-block:: console [myenv]$ spack concretize -f -Without the option, Spack guarantees that already concretized specs are -unchanged in the environment. +Without the option, Spack guarantees that already concretized specs are unchanged in the environment. -The ``concretize`` command does not install any packages. For packages -that have already been installed outside of the environment, the -process of adding the spec and concretizing is identical to installing -the spec assuming it concretizes to the exact spec that was installed -outside of the environment. +The ``concretize`` command does not install any packages. +For packages that have already been installed outside of the environment, the process of adding the spec and concretizing is identical to installing the spec assuming it concretizes to the exact spec that was installed outside of the environment. -The ``spack find`` command can show concretized specs separately from -installed specs using the ``-c`` (``--concretized``) flag. +The ``spack find`` command can show concretized specs separately from installed specs using the ``-c`` (``--concretized``) flag. .. code-block:: console @@ -383,159 +345,172 @@ installed specs using the ``-c`` (``--concretized``) flag. .. _installing-environment: -^^^^^^^^^^^^^^^^^^^^^^^^^ Installing an Environment ^^^^^^^^^^^^^^^^^^^^^^^^^ -In addition to adding individual specs to an environment, one -can install the entire environment at once using the command +In addition to adding individual specs to an environment, one can install the entire environment at once using the command .. code-block:: console [myenv]$ spack install -If the environment has been concretized, Spack will install the -concretized specs. Otherwise, ``spack install`` will concretize -the environment before installing the concretized specs. +If the environment has been concretized, Spack will install the concretized specs. +Otherwise, ``spack install`` will concretize the environment before installing the concretized specs. .. note:: - Every ``spack install`` process builds one package at a time with multiple build - jobs, controlled by the ``-j`` flag and the ``config:build_jobs`` option - (see :ref:`build-jobs`). To speed up environment builds further, independent - packages can be installed in parallel by launching more Spack instances. For - example, the following will build at most four packages in parallel using - three background jobs: + Every ``spack install`` process builds one package at a time with multiple build jobs, controlled by the ``-j`` flag and the ``config:build_jobs`` option (see :ref:`build-jobs`). + To speed up environment builds further, independent packages can be installed in parallel by launching more Spack instances. + For example, the following will build at most four packages in parallel using three background jobs: .. code-block:: console [myenv]$ spack install & spack install & spack install & spack install - Another option is to generate a ``Makefile`` and run ``make -j`` to control - the number of parallel install processes. See :ref:`env-generate-depfile` - for details. + Another option is to generate a ``Makefile`` and run ``make -j`` to control the number of parallel install processes. + See :ref:`cmd-spack-env-depfile` for details. -As it installs, ``spack install`` creates symbolic links in the -``logs/`` directory in the environment, allowing for easy inspection -of build logs related to that environment. The ``spack install`` -command also stores a Spack repo containing the ``package.py`` file -used at install time for each package in the ``repos/`` directory in -the environment. +As it installs, ``spack install`` creates symbolic links in the ``logs/`` directory in the environment, allowing for easy inspection of build logs related to that environment. +The ``spack install`` command also stores a Spack repo containing the ``package.py`` file used at install time for each package in the ``repos/`` directory in the environment. -The ``--no-add`` option can be used in a concrete environment to tell -Spack to install specs already present in the environment but not to -add any new root specs to the environment. For root specs provided -to ``spack install`` on the command line, ``--no-add`` is the default, -while for dependency specs, it is optional. In other -words, if there is an unambiguous match in the active concrete environment -for a root spec provided to ``spack install`` on the command line, Spack -does not require you to specify the ``--no-add`` option to prevent the spec -from being added again. At the same time, a spec that already exists in the -environment, but only as a dependency, will be added to the environment as a -root spec without the ``--no-add`` option. +The ``--no-add`` option can be used in a concrete environment to tell Spack to install specs already present in the environment but not to add any new root specs to the environment. +For root specs provided to ``spack install`` on the command line, ``--no-add`` is the default, while for dependency specs, it is optional. +In other words, if there is an unambiguous match in the active concrete environment for a root spec provided to ``spack install`` on the command line, Spack does not require you to specify the ``--no-add`` option to prevent the spec from being added again. +At the same time, a spec that already exists in the environment, but only as a dependency, will be added to the environment as a root spec without the ``--no-add`` option. -.. _develop-specs: +.. _cmd-spack-develop: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Developing Packages in a Spack Environment ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``spack develop`` command allows one to develop Spack packages in -an environment. It requires a spec containing a concrete version, and -will configure Spack to install the package from local source. -If a version is not provided from the command line interface then Spack -will automatically pick the highest version the package has defined. -This means any infinity versions (``develop``, ``main``, ``stable``) will be -preferred in this selection process. -By default, ``spack develop`` will also clone the package to a subdirectory in the -environment for the local source. This package will have a special variant ``dev_path`` -set, and Spack will ensure the package and its dependents are rebuilt -any time the environment is installed if the package's local source -code has been modified. Spack's native implementation to check for modifications -is to check if ``mtime`` is newer than the installation. -A custom check can be created by overriding the ``detect_dev_src_change`` method -in your package class. This is particularly useful for projects using custom Spack repos -to drive development and want to optimize performance. - -Spack ensures that all instances of a -developed package in the environment are concretized to match the -version (and other constraints) passed as the spec argument to the -``spack develop`` command. - -When working deep in the graph it is often desirable to have multiple specs marked -as ``develop`` so you don't have to restage and/or do full rebuilds each time you -call ``spack install``. The ``--recursive`` flag can be used in these scenarios -to ensure that all the dependents of the initial spec you provide are also marked -as develop specs. The ``--recursive`` flag requires a pre-concretized environment -so the graph can be traversed from the supplied spec all the way to the root specs. - -For packages with ``git`` attributes, git branches, tags, and commits can -also be used as valid concrete versions (see :ref:`version-specifier`). -This means that for a package ``foo``, ``spack develop foo@git.main`` will clone -the ``main`` branch of the package, and ``spack install`` will install from -that git clone if ``foo`` is in the environment. -Further development on ``foo`` can be tested by re-installing the environment, -and eventually committed and pushed to the upstream git repo. - -If the package being developed supports out-of-source builds then users can use the -``--build_directory`` flag to control the location and name of the build directory. -This is a shortcut to set the ``package_attributes:build_directory`` in the -``packages`` configuration (see :ref:`assigning-package-attributes`). +The ``spack develop`` command allows one to develop Spack packages in an environment. +It will configure Spack to install the package from local source. +By default, ``spack develop`` will also clone the package to a subdirectory in the environment for the local source. +These choices can be overridden with the ``--path`` argument, and the ``--no-clone`` argument. +Relative paths provided to the ``--path`` argument will be resolved relative to the environment directory. +All of these options are recorded in the environment manifest, although default values may be left implied. + +.. code-block:: console + + $ spack develop --path src/foo foo@develop + $ cat `spack location -e`/spack.yaml + spack: + ... + develop + foo: + spec: foo@develop + path: src/foo + +When ``spack develop`` is run in a concretized environment, Spack will modify the concrete specs in the environment to reflect the modified provenance. +Any package built from local source will have a ``dev_path`` variant, and the hash of any dependent of those packages will be modified to reflect the change. +The value of the ``dev_path`` variant will be the absolute path to the package source directory. +If the develop spec conflicts with the concrete specs in the environment, Spack will raise an exception and require the ``spack develop --no-modify-concrete-specs`` option, followed by a ``spack concretize --force`` to apply the ``dev_path`` variant and constraints from the develop spec. + +When concretizing an environment with develop specs, the version, variants, and other attributes of the spec provided to the ``spack develop`` command will be treated as constraints by the concretizer (in addition to any constraints from the packages ``specs`` list). +If the ``develop`` configuration for the package does not include a spec version, Spack will choose the **highest** version of the package. +This means that any "infinity" versions (``develop``, ``main``, etc.) will be preferred for specs marked with the ``spack develop`` command, which is different from the standard Spack behavior to prefer the highest **numeric** version. +These packages will have an automatic ``dev_path`` variant added by the concretizer, with a value of the absolute path to the local source Spack is building from. + +Spack will ensure the package and its dependents are rebuilt any time the environment is installed if the package's local source code has been modified. +Spack's native implementation is to check if ``mtime`` is newer than the installation. +A custom check can be created by overriding the ``detect_dev_src_change`` method in your package class. +This is particularly useful for projects using custom Spack repos to drive development and want to optimize performance. + +When ``spack develop`` is run without any arguments, Spack will clone any develop specs in the environment for which the specified path does not exist. + +When working deep in the graph it is often desirable to have multiple specs marked as ``develop`` so you don't have to restage and/or do full rebuilds each time you call ``spack install``. +The ``--recursive`` flag can be used in these scenarios to ensure that all the dependents of the initial spec you provide are also marked as develop specs. +The ``--recursive`` flag requires a pre-concretized environment so the graph can be traversed from the supplied spec all the way to the root specs. + +For packages with ``git`` attributes, git branches, tags, and commits can also be used as valid concrete versions (see :ref:`version-specifier`). +This means that for a package ``foo``, ``spack develop foo@git.main`` will clone the ``main`` branch of the package, and ``spack install`` will install from that git clone if ``foo`` is in the environment. +Further development on ``foo`` can be tested by re-installing the environment, and eventually committed and pushed to the upstream git repo. + +If the package being developed supports out-of-source builds then users can use the ``--build_directory`` flag to control the location and name of the build directory. +This is a shortcut to set the ``package_attributes:build_directory`` in the ``packages`` configuration (see :ref:`assigning-package-attributes`). The supplied location will become the build-directory for that package in all future builds. +.. admonition:: Potential pitfalls of setting the build directory + :class: warning + + Spack does not check for out-of-source build compatibility with the packages and so the onus of making sure the package supports out-of-source builds is on the user. + For example, most ``autotool`` and ``makefile`` packages do not support out-of-source builds while all ``CMake`` packages do. + Understanding these nuances is up to the software developers and we strongly encourage developers to only redirect the build directory if they understand their package's build-system. + +Modifying Specs in an Environment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``spack change`` command allows the user to change individual specs in a Spack environment. + +By default, ``spack change`` operates on the abstract specs of an environment. +The command a list of spec arguments. +For each argument, the root spec with the same name as the provided spec is modified to satisfy the provided spec. +For example, in an environment with the root spec ``hdf5+mpi+fortran``, then + +.. code-block:: console + + spack change hdf5~mpi+cxx + +will change the root spec to ``hdf5~mpi+cxx+fortran``. + +When more complex matching semantics are necessary, the ``--match-spec`` argument replaces the spec name as the selection criterion. +When using the ``--match-spec`` argument, the spec name is not required. +In the same environment, + +.. code-block:: console + + spack change --match-spec "+fortran" +hl + +will constrain the ``hdf5`` spec to ``+hl``. + +By default, the ``spack change`` command will result in an error and no change to the environment if it will modify more than one abstract spec. +Use the ``--all`` option to allow ``spack change`` to modify multiple abstract specs. + +The ``--concrete`` option allows ``spack change`` to modify the concrete specs of an environment as well as the abstract specs. +Multiple concrete specs may be modified, even for a change that modifies only a single abstract spec. +The ``--all`` option does not affect how many concrete specs may be modified. + .. warning:: - Potential pitfalls of setting the build directory - Spack does not check for out-of-source build compatibility with the packages and - so the onus of making sure the package supports out-of-source builds is on - the user. - For example, most ``autotool`` and ``makefile`` packages do not support out-of-source builds - while all ``CMake`` packages do. - Understanding these nuances is up to the software developers and we strongly encourage - developers to only redirect the build directory if they understand their package's - build-system. -^^^^^^^ + Concrete specs are modified without any constraints from the packages. + The ``spack change --concrete`` command may create invalid specs that will not build properly if applied without caution. + +The ``--concrete-only`` option allows for modifying concrete specs without modifying abstract specs. +It allows changes to be applied to non-root nodes in the environment, and other changes that do not modify any root specs. + Loading ^^^^^^^ -Once an environment has been installed, the following creates a load -script for it: +Once an environment has been installed, the following creates a load script for it: .. code-block:: console $ spack env loads -r This creates a file called ``loads`` in the environment directory. -Sourcing that file in Bash will make the environment available to the -user, and can be included in ``.bashrc`` files, etc. The ``loads`` -file may also be copied out of the environment, renamed, etc. +Sourcing that file in Bash will make the environment available to the user, and can be included in ``.bashrc`` files, etc. +The ``loads`` file may also be copied out of the environment, renamed, etc. .. _environment_include_concrete: ------------------------------- Included Concrete Environments ------------------------------ -Spack environments can create an environment based off of information in already -established environments. You can think of it as a combination of existing -environments. It will gather information from the existing environment's -``spack.lock`` and use that during the creation of this included concrete -environment. When an included concrete environment is created it will generate -a ``spack.lock`` file for the newly created environment. +Spack environments can create an environment based off of information in already established environments. +You can think of it as a combination of existing environments. +It will gather information from the existing environment's ``spack.lock`` and use that during the creation of this included concrete environment. +When an included concrete environment is created it will generate a ``spack.lock`` file for the newly created environment. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Creating included environments ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To create a combined concrete environment, you must have at least one existing -concrete environment. You will use the command ``spack env create`` with the -argument ``--include-concrete`` followed by the name or path of the environment -you'd like to include. Here is an example of how to create a combined environment -from the command line. +To create a combined concrete environment, you must have at least one existing concrete environment. +You will use the command ``spack env create`` with the argument ``--include-concrete`` followed by the name or path of the environment you'd like to include. +Here is an example of how to create a combined environment from the command line. -.. code-block:: console +.. code-block:: spec $ spack env create myenv $ spack -e myenv add python @@ -543,34 +518,29 @@ from the command line. $ spack env create --include-concrete myenv included_env -You can also include an environment directly in the ``spack.yaml`` file. It -involves adding the ``include_concrete`` heading in the yaml followed by the -absolute path to the independent environments. Note that you may use Spack -config variables such as ``$spack`` or environment variables as long as the -expression expands to an absolute path. +You can also include an environment directly in the ``spack.yaml`` file. +It involves adding the ``include_concrete`` heading in the yaml followed by the absolute path to the independent environments. +Note that you may use Spack config variables such as ``$spack`` or environment variables as long as the expression expands to an absolute path. .. code-block:: yaml spack: specs: [] concretizer: - unify: true + unify: true include_concrete: - /absolute/path/to/environment1 - $spack/../path/to/environment2 -Once the ``spack.yaml`` has been updated you must concretize the environment to -get the concrete specs from the included environments. +Once the ``spack.yaml`` has been updated you must concretize the environment to get the concrete specs from the included environments. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Updating an included environment ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If changes were made to the base environment and you want that reflected in the -included environment you will need to re-concretize both the base environment and the -included environment for the change to be implemented. For example: +If changes were made to the base environment and you want that reflected in the included environment you will need to re-concretize both the base environment and the included environment for the change to be implemented. +For example: -.. code-block:: console +.. code-block:: spec $ spack env create myenv $ spack -e myenv add python @@ -594,11 +564,10 @@ included environment for the change to be implemented. For example: ==> 0 installed packages -Here we see that ``included_env`` has access to the python package through -the ``myenv`` environment. But if we were to add another spec to ``myenv``, -``included_env`` will not be able to access the new information. +Here we see that ``included_env`` has access to the python package through the ``myenv`` environment. +But if we were to add another spec to ``myenv``, ``included_env`` will not be able to access the new information. -.. code-block:: console +.. code-block:: spec $ spack -e myenv add perl $ spack -e myenv concretize @@ -618,8 +587,7 @@ the ``myenv`` environment. But if we were to add another spec to ``myenv``, ==> 0 installed packages -It isn't until you run the ``spack concretize`` command that the combined -environment will get the updated information from the re-concretized base environment. +It isn't until you run the ``spack concretize`` command that the combined environment will get the updated information from the re-concretized base environment. .. code-block:: console @@ -634,17 +602,12 @@ environment will get the updated information from the re-concretized base enviro .. _environment-configuration: ------------------------- Configuring Environments ------------------------ -A variety of Spack behaviors are changed through Spack configuration -files, covered in more detail in the :ref:`configuration` -section. +A variety of Spack behaviors are changed through Spack configuration files, covered in more detail in the :ref:`configuration` section. -Spack Environments provide an additional level of configuration scope -between the custom scope and the user scope discussed in the -configuration documentation. +Spack Environments provide an additional level of configuration scope between the custom scope and the user scope discussed in the configuration documentation. There are two ways to include configuration information in a Spack Environment: @@ -652,22 +615,16 @@ There are two ways to include configuration information in a Spack Environment: #. Included in the ``spack.yaml`` file from another file. -Many Spack commands also affect configuration information in files -automatically. Those commands take a ``--scope`` argument, and the -environment can be specified by ``env:NAME`` (to affect environment -``foo``, set ``--scope env:foo``). These commands will automatically -manipulate configuration inline in the ``spack.yaml`` file. +Many Spack commands also affect configuration information in files automatically. +Those commands take a ``--scope`` argument, and the environment can be specified by ``env:NAME`` (to affect environment ``foo``, set ``--scope env:foo``). +These commands will automatically manipulate configuration inline in the ``spack.yaml`` file. -^^^^^^^^^^^^^^^^^^^^^ Inline configurations ^^^^^^^^^^^^^^^^^^^^^ -Inline environment-scope configuration is done using the same yaml -format as standard Spack configuration scopes, covered in the -:ref:`configuration` section. Each section is contained under a -top-level yaml object with its name. For example, a ``spack.yaml`` -manifest file containing some package preference configuration (as in -a ``packages.yaml`` file) could contain: +Inline environment-scope configuration is done using the same yaml format as standard Spack configuration scopes, covered in the :ref:`configuration` section. +Each section is contained under a top-level yaml object with its name. +For example, a ``spack.yaml`` manifest file containing some package preference configuration (as in a ``packages.yaml`` file) could contain: .. code-block:: yaml @@ -681,13 +638,11 @@ a ``packages.yaml`` file) could contain: This configuration sets the default mpi provider to be openmpi. -^^^^^^^^^^^^^^^^^^^^^^^ Included configurations ^^^^^^^^^^^^^^^^^^^^^^^ Spack environments allow an ``include`` heading in their yaml schema. -This heading pulls in external configuration files and applies them to -the environment. +This heading pulls in external configuration files and applies them to the environment. .. code-block:: yaml @@ -702,152 +657,122 @@ the environment. - path: /path/to/os-specific/config-dir when: os == "ventura" -Included configuration files are required *unless* they are explicitly optional -or the entry's condition evaluates to ``false``. Optional includes are specified -with the ``optional`` clause and conditional with the ``when`` clause. (See -:ref:`include-yaml` for more information on optional and conditional entries.) +Included configuration files are required *unless* they are explicitly optional or the entry's condition evaluates to ``false``. +Optional includes are specified with the ``optional`` clause and conditional with the ``when`` clause. +(See :ref:`include-yaml` for more information on optional and conditional entries.) Files are listed using paths to individual files or directories containing them. -Path entries may be absolute or relative to the environment or specified as -URLs. URLs to individual files must link to the **raw** form of the file's -contents (e.g., `GitHub -`_ -or `GitLab -`_) **and** include a valid sha256 for the file. -Only the ``file``, ``ftp``, ``http`` and ``https`` protocols (or schemes) are -supported. Spack-specific, environment and user path variables can be used. +Path entries may be absolute or relative to the environment or specified as URLs. +URLs to individual files must link to the **raw** form of the file's contents (e.g., `GitHub `_ or `GitLab `_) **and** include a valid sha256 for the file. +Only the ``file``, ``ftp``, ``http`` and ``https`` protocols (or schemes) are supported. +Spack-specific, environment and user path variables can be used. (See :ref:`config-file-variables` for more information.) .. warning:: - Recursive includes are not currently processed in a breadth-first manner - so the value of a configuration option that is altered by multiple included - files may not be what you expect. This will be addressed in a future - update. + Recursive includes are not currently processed in a breadth-first manner so the value of a configuration option that is altered by multiple included files may not be what you expect. + This will be addressed in a future update. -^^^^^^^^^^^^^^^^^^^^^^^^ Configuration precedence ^^^^^^^^^^^^^^^^^^^^^^^^ -Inline configurations take precedence over included configurations, so -you don't have to change shared configuration files to make small changes -to an individual environment. Included configurations listed earlier will -have higher precedence, as the included configs are applied in reverse order. +Inline configurations take precedence over included configurations, so you don't have to change shared configuration files to make small changes to an individual environment. +Included configurations listed earlier will have higher precedence, as the included configs are applied in reverse order. -------------------------------- Manually Editing the Specs List ------------------------------- -The list of abstract/root specs in the environment is maintained in -the ``spack.yaml`` manifest under the heading ``specs``. +The list of abstract/root specs in the environment is maintained in the ``spack.yaml`` manifest under the heading ``specs``. .. code-block:: yaml spack: - specs: - - ncview - - netcdf - - nco - - py-sphinx + specs: + - ncview + - netcdf + - nco + - py-sphinx -Appending to this list in the yaml is identical to using the ``spack -add`` command from the command line. However, there is more power -available from the yaml file. +Appending to this list in the yaml is identical to using the ``spack add`` command from the command line. +However, there is more power available from the yaml file. .. _environments_concretization_config: -^^^^^^^^^^^^^^^^^^^ Spec concretization ^^^^^^^^^^^^^^^^^^^ -An environment can be concretized in three different modes and the behavior active under -any environment is determined by the ``concretizer:unify`` configuration option. +An environment can be concretized in three different modes and the behavior active under any environment is determined by the ``concretizer:unify`` configuration option. The *default* mode is to unify all specs: .. code-block:: yaml spack: - specs: - - hdf5+mpi - - zlib@1.2.8 - concretizer: - unify: true - -This means that any package in the environment corresponds to a single concrete spec. In -the above example, when ``hdf5`` depends down the line of ``zlib``, it is required to -take ``zlib@1.2.8`` instead of a newer version. This mode of concretization is -particularly useful when environment views are used: if every package occurs in -only one flavor, it is usually possible to merge all install directories into a view. - -A downside of unified concretization is that it can be overly strict. For example, a -concretization error would happen when both ``hdf5+mpi`` and ``hdf5~mpi`` are specified -in an environment. - -The second mode is to *unify when possible*: this makes concretization of root specs -more independent. Instead of requiring reuse of dependencies across different root -specs, it is only maximized: + specs: + - hdf5+mpi + - zlib@1.2.8 + concretizer: + unify: true + +This means that any package in the environment corresponds to a single concrete spec. +In the above example, when ``hdf5`` depends down the line of ``zlib``, it is required to take ``zlib@1.2.8`` instead of a newer version. +This mode of concretization is particularly useful when environment views are used: if every package occurs in only one flavor, it is usually possible to merge all install directories into a view. + +A downside of unified concretization is that it can be overly strict. +For example, a concretization error would happen when both ``hdf5+mpi`` and ``hdf5~mpi`` are specified in an environment. + +The second mode is to *unify when possible*: this makes concretization of root specs more independent. +Instead of requiring reuse of dependencies across different root specs, it is only maximized: .. code-block:: yaml spack: - specs: - - hdf5~mpi - - hdf5+mpi - - zlib@1.2.8 - concretizer: - unify: when_possible + specs: + - hdf5~mpi + - hdf5+mpi + - zlib@1.2.8 + concretizer: + unify: when_possible -This means that both ``hdf5`` installations will use ``zlib@1.2.8`` as a dependency even -if newer versions of that library are available. +This means that both ``hdf5`` installations will use ``zlib@1.2.8`` as a dependency even if newer versions of that library are available. -The third mode of operation is to concretize root specs entirely independently by -disabling unified concretization: +The third mode of operation is to concretize root specs entirely independently by disabling unified concretization: .. code-block:: yaml spack: - specs: - - hdf5~mpi - - hdf5+mpi - - zlib@1.2.8 - concretizer: - unify: false + specs: + - hdf5~mpi + - hdf5+mpi + - zlib@1.2.8 + concretizer: + unify: false -In this example ``hdf5`` is concretized separately, and does not consider ``zlib@1.2.8`` -as a constraint or preference. Instead, it will take the latest possible version. +In this example ``hdf5`` is concretized separately, and does not consider ``zlib@1.2.8`` as a constraint or preference. +Instead, it will take the latest possible version. -The last two concretization options are typically useful for system administrators and -user support groups providing a large software stack for their HPC center. +The last two concretization options are typically useful for system administrators and user support groups providing a large software stack for their HPC center. .. note:: - The ``concretizer:unify`` config option was introduced in Spack 0.18 to - replace the ``concretization`` property. For reference, - ``concretization: together`` is replaced by ``concretizer:unify:true``, - and ``concretization: separately`` is replaced by ``concretizer:unify:false``. + The ``concretizer:unify`` config option was introduced in Spack 0.18 to replace the ``concretization`` property. + For reference, ``concretization: together`` is replaced by ``concretizer:unify:true``, and ``concretization: separately`` is replaced by ``concretizer:unify:false``. .. admonition:: Re-concretization of user specs - The ``spack concretize`` command without additional arguments will *not* change any - previously concretized specs. This may prevent it from finding a solution when using - ``unify: true``, and it may prevent it from finding a minimal solution when using - ``unify: when_possible``. You can force Spack to ignore the existing concrete environment - with ``spack concretize -f``. + The ``spack concretize`` command without additional arguments will *not* change any previously concretized specs. + This may prevent it from finding a solution when using ``unify: true``, and it may prevent it from finding a minimal solution when using ``unify: when_possible``. + You can force Spack to ignore the existing concrete environment with ``spack concretize -f``. .. _environment-spec-matrices: -^^^^^^^^^^^^^ Spec Matrices ^^^^^^^^^^^^^ -Entries in the ``specs`` list can be individual abstract specs or a -spec matrix. +Entries in the ``specs`` list can be individual abstract specs or a spec matrix. -A spec matrix is a yaml object containing multiple lists of specs, and -evaluates to the cross-product of those specs. Spec matrices also -contain an ``excludes`` directive, which eliminates certain -combinations from the evaluated result. +A spec matrix is a yaml object containing multiple lists of specs, and evaluates to the cross-product of those specs. +Spec matrices also contain an ``excludes`` directive, which eliminates certain combinations from the evaluated result. The following two environment manifests are identical: @@ -855,139 +780,130 @@ The following two environment manifests are identical: spack: specs: - - zlib %gcc@7.1.0 - - zlib %gcc@4.9.3 - - libelf %gcc@7.1.0 - - libelf %gcc@4.9.3 - - libdwarf %gcc@7.1.0 - - cmake + - zlib %gcc@7.1.0 + - zlib %gcc@4.9.3 + - libelf %gcc@7.1.0 + - libelf %gcc@4.9.3 + - libdwarf %gcc@7.1.0 + - cmake + +.. code-block:: yaml spack: specs: - - matrix: - - [zlib, libelf, libdwarf] - - ['%gcc@7.1.0', '%gcc@4.9.3'] - exclude: - - libdwarf%gcc@4.9.3 - - cmake + - matrix: + - [zlib, libelf, libdwarf] + - ["%gcc@7.1.0", "%gcc@4.9.3"] + exclude: + - libdwarf%gcc@4.9.3 + - cmake -Spec matrices can be used to install swaths of software across various -toolchains. +Spec matrices can be used to install swaths of software across various toolchains. + +.. _spec-list-references: -^^^^^^^^^^^^^^^^^^^^ Spec List References ^^^^^^^^^^^^^^^^^^^^ The last type of possible entry in the specs list is a reference. -The Spack Environment manifest yaml schema contains an additional -heading ``definitions``. Under definitions is an array of yaml -objects. Each object has one or two fields. The one required field is -a name, and the optional field is a ``when`` clause. +The Spack Environment manifest yaml schema contains an additional heading ``definitions``. +Under definitions is an array of yaml objects. +Each object has one or two fields. +The one required field is a name, and the optional field is a ``when`` clause. -The named field is a spec list. The spec list uses the same syntax as -the ``specs`` entry. Each entry in the spec list can be a spec, a spec -matrix, or a reference to an earlier named list. References are -specified using the ``$`` sigil, and are "splatted" into place -(i.e. the elements of the referent are at the same level as the -elements listed separately). As an example, the following two manifest -files are identical. +The named field is a spec list. +The spec list uses the same syntax as the ``specs`` entry. +Each entry in the spec list can be a spec, a spec matrix, or a reference to an earlier named list. +References are specified using the ``$`` sigil, and are "splatted" into place (i.e. the elements of the referent are at the same level as the elements listed separately). +As an example, the following two manifest files are identical. .. code-block:: yaml spack: definitions: - - first: [libelf, libdwarf] - - compilers: ['%gcc', '%intel'] - - second: - - $first - - matrix: - - [zlib] - - [$compilers] + - first: [libelf, libdwarf] + - compilers: ["%gcc", "%intel"] + - second: + - $first + - matrix: + - [zlib] + - [$compilers] specs: - - $second - - cmake + - $second + - cmake + +.. code-block:: yaml spack: specs: - - libelf - - libdwarf - - zlib%gcc - - zlib%intel - - cmake + - libelf + - libdwarf + - zlib%gcc + - zlib%intel + - cmake .. note:: - Named spec lists in the definitions section may only refer - to a named list defined above itself. Order matters. + Named spec lists in the definitions section may only refer to a named list defined above itself. + Order matters. -In short files like the example, it may be easier to simply list the -included specs. However for more complicated examples involving many -packages across many toolchains, separately factored lists make -environments substantially more manageable. +In short files like the example, it may be easier to simply list the included specs. +However for more complicated examples involving many packages across many toolchains, separately factored lists make environments substantially more manageable. -Additionally, the ``-l`` option to the ``spack add`` command allows -one to add to named lists in the definitions section of the manifest -file directly from the command line. +Additionally, the ``-l`` option to the ``spack add`` command allows one to add to named lists in the definitions section of the manifest file directly from the command line. -The ``when`` directive can be used to conditionally add specs to a -named list. The ``when`` directive takes a string of Python code -referring to a restricted set of variables, and evaluates to a -boolean. The specs listed are appended to the named list if the -``when`` string evaluates to ``True``. In the following snippet, the -named list ``compilers`` is ``['%gcc', '%clang', '%intel']`` on -``x86_64`` systems and ``['%gcc', '%clang']`` on all other systems. +The ``when`` directive can be used to conditionally add specs to a named list. +The ``when`` directive takes a string of Python code referring to a restricted set of variables, and evaluates to a boolean. +The specs listed are appended to the named list if the ``when`` string evaluates to ``True``. +In the following snippet, the named list ``compilers`` is ``["%gcc", "%clang", "%intel"]`` on ``x86_64`` systems and ``["%gcc", "%clang"]`` on all other systems. .. code-block:: yaml spack: definitions: - - compilers: ['%gcc', '%clang'] - - when: arch.satisfies('target=x86_64:') - compilers: ['%intel'] + - compilers: ["%gcc", "%clang"] + - when: arch.satisfies("target=x86_64:") + compilers: ["%intel"] .. note:: - Any definitions with the same named list with true ``when`` - clauses (or absent ``when`` clauses) will be appended together + Any definitions with the same named list with true ``when`` clauses (or absent ``when`` clauses) will be appended together The valid variables for a ``when`` clause are: -#. ``platform``. The platform string of the default Spack - architecture on the system. +#. ``platform``. + The platform string of the default Spack architecture on the system. -#. ``os``. The os string of the default Spack architecture on - the system. +#. ``os``. + The os string of the default Spack architecture on the system. -#. ``target``. The target string of the default Spack - architecture on the system. +#. ``target``. + The target string of the default Spack architecture on the system. -#. ``architecture`` or ``arch``. A Spack spec satisfying the default Spack - architecture on the system. This supports querying via the ``satisfies`` - method, as shown above. +#. ``architecture`` or ``arch``. + A Spack spec satisfying the default Spack architecture on the system. + This supports querying via the ``satisfies`` method, as shown above. -#. ``arch_str``. The architecture string of the default Spack architecture - on the system. +#. ``arch_str``. + The architecture string of the default Spack architecture on the system. -#. ``re``. The standard regex module in Python. +#. ``re``. + The standard regex module in Python. -#. ``env``. The user environment (usually ``os.environ`` in Python). +#. ``env``. + The user environment (usually ``os.environ`` in Python). -#. ``hostname``. The hostname of the system (if ``hostname`` is an - executable in the user's PATH). +#. ``hostname``. + The hostname of the system (if ``hostname`` is an executable in the user's PATH). -^^^^^^^^^^^^^^^^^^^^^^^^ SpecLists as Constraints ^^^^^^^^^^^^^^^^^^^^^^^^ -Dependencies and compilers in Spack can be both packages in an -environment and constraints on other packages. References to SpecLists -allow a shorthand to treat packages in a list as either a compiler or -a dependency using the ``$%`` or ``$^`` syntax respectively. +Dependencies and compilers in Spack can be both packages in an environment and constraints on other packages. +References to SpecLists allow a shorthand to treat packages in a list as either a compiler or a dependency using the ``$%`` or ``$^`` syntax respectively. -For example, the following environment has three root packages: -``gcc@8.1.0``, ``mvapich2@2.3.1 %gcc@8.1.0``, and ``hdf5+mpi -%gcc@8.1.0 ^mvapich2@2.3.1``. +For example, the following environment has three root packages: ``gcc@8.1.0``, ``mvapich2@2.3.1 %gcc@8.1.0``, and ``hdf5+mpi %gcc@8.1.0 ^mvapich2@2.3.1``. .. code-block:: yaml @@ -1007,15 +923,13 @@ For example, the following environment has three root packages: - [$^mpis] - [$%compilers] -This allows for a much-needed reduction in redundancy between packages -and constraints. +This allows for a much-needed reduction in redundancy between packages and constraints. -------------------------------- Modifying Environment Variables ------------------------------- -Spack Environments can modify the active shell's environment variables when activated. The environment can be -configured to set, unset, prepend, or append using ``env_vars`` configuration in ``spack.yaml``: +Spack Environments can modify the active shell's environment variables when activated. +The environment can be configured to set, unset, prepend, or append using ``env_vars`` configuration in ``spack.yaml``: .. code-block:: yaml @@ -1024,7 +938,7 @@ configured to set, unset, prepend, or append using ``env_vars`` configuration in set: ENVAR_TO_SET_IN_ENV_LOAD: "FOO" unset: - ENVAR_TO_UNSET_IN_ENV_LOAD: + - ENVAR_TO_UNSET_IN_ENV_LOAD prepend_path: PATH_LIST: "path/to/prepend" append_path: @@ -1032,16 +946,12 @@ configured to set, unset, prepend, or append using ``env_vars`` configuration in remove_path: PATH_LIST: "path/to/remove" ------------------ Environment Views ----------------- -Spack Environments can have an associated filesystem view, which is a directory -with a more traditional structure ``/bin``, ``/lib``, ``/include`` -in which all files of the installed packages are linked. +Spack Environments can have an associated filesystem view, which is a directory with a more traditional structure ``/bin``, ``/lib``, ``/include`` in which all files of the installed packages are linked. -By default a view is created for each environment, thanks to the ``view: true`` -option in the ``spack.yaml`` manifest file: +By default a view is created for each environment, thanks to the ``view: true`` option in the ``spack.yaml`` manifest file: .. code-block:: yaml @@ -1050,17 +960,13 @@ option in the ``spack.yaml`` manifest file: view: true The view is created in a hidden directory ``.spack-env/view`` relative to the environment. -If you've used ``spack env activate``, you may have already interacted with this view. Spack -prepends its ``/bin`` dir to ``PATH`` when the environment is activated, so that -you can directly run executables from all installed packages in the environment. +If you've used ``spack env activate``, you may have already interacted with this view. +Spack prepends its ``/bin`` dir to ``PATH`` when the environment is activated, so that you can directly run executables from all installed packages in the environment. -Views are highly customizable: you can control where they are put, modify their structure, -include and exclude specs, change how files are linked, and you can even generate multiple -views for a single environment. +Views are highly customizable: you can control where they are put, modify their structure, include and exclude specs, change how files are linked, and you can even generate multiple views for a single environment. .. _configuring_environment_views: -^^^^^^^^^^^^^^^^^^^^^^^^^^ Minimal view configuration ^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1072,8 +978,7 @@ The minimal configuration # ... view: true -lets Spack generate a single view with default settings under the -``.spack-env/view`` directory of the environment. +lets Spack generate a single view with default settings under the ``.spack-env/view`` directory of the environment. Another short way to configure a view is to specify just where to put it: @@ -1085,13 +990,13 @@ Another short way to configure a view is to specify just where to put it: Views can also be disabled by setting ``view: false``. -^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. _cmd-spack-env-view: + Advanced view configuration ^^^^^^^^^^^^^^^^^^^^^^^^^^^ One or more **view descriptors** can be defined under ``view``, keyed by a name. -The example from the previous section with ``view: /path/to/view`` is equivalent -to defining a view descriptor named ``default`` with a ``root`` attribute: +The example from the previous section with ``view: /path/to/view`` is equivalent to defining a view descriptor named ``default`` with a ``root`` attribute: .. code-block:: yaml @@ -1101,24 +1006,13 @@ to defining a view descriptor named ``default`` with a ``root`` attribute: default: # name of the view root: /path/to/view # view descriptor attribute -The ``default`` view descriptor name is special: when you ``spack env activate`` your -environment, this view will be used to update (among other things) your ``PATH`` -variable. - -View descriptors must contain the root of the view, and optionally projections, -``select`` and ``exclude`` lists and link information via ``link`` and -``link_type``. - -As a more advanced example, in the following manifest -file snippet we define a view named ``mpis``, rooted at -``/path/to/view`` in which all projections use the package name, -version, and compiler name to determine the path for a given -package. This view selects all packages that depend on MPI, and -excludes those built with the GCC compiler at version 18.5. -The root specs with their (transitive) link and run type dependencies -will be put in the view due to the ``link: all`` option, -and the files in the view will be symlinks to the Spack install -directories. +The ``default`` view descriptor name is special: when you ``spack env activate`` your environment, this view will be used to update (among other things) your ``PATH`` variable. + +View descriptors must contain the root of the view, and optionally projections, ``select`` and ``exclude`` lists and link information via ``link`` and ``link_type``. + +As a more advanced example, in the following manifest file snippet we define a view named ``mpis``, rooted at ``/path/to/view`` in which all projections use the package name, version, and compiler name to determine the path for a given package. +This view selects all packages that depend on MPI, and excludes those built with the GCC compiler at version 8.5. +The root specs with their (transitive) link and run type dependencies will be put in the view due to the ``link: all`` option, and the files in the view will be symlinks to the Spack install directories. .. code-block:: yaml @@ -1128,57 +1022,44 @@ directories. mpis: root: /path/to/view select: [^mpi] - exclude: ['%gcc@18.5'] + exclude: ["%gcc@8.5"] projections: - all: '{name}/{version}-{compiler.name}' + all: "{name}/{version}-{compiler.name}" link: all link_type: symlink -The default for the ``select`` and -``exclude`` values is to select everything and exclude nothing. The -default projection is the default view projection (``{}``). The ``link`` -attribute allows the following values: +The default for the ``select`` and ``exclude`` values is to select everything and exclude nothing. +The default projection is the default view projection (``{}``). +The ``link`` attribute allows the following values: -#. ``link: all`` include root specs with their transitive run and link type - dependencies (default); +#. ``link: all`` include root specs with their transitive run and link type dependencies (default); #. ``link: run`` include root specs with their transitive run type dependencies; #. ``link: roots`` include root specs without their dependencies. -The ``link_type`` defaults to ``symlink`` but can also take the value -of ``hardlink`` or ``copy``. +The ``link_type`` defaults to ``symlink`` but can also take the value of ``hardlink`` or ``copy``. .. tip:: - The option ``link: run`` can be used to create small environment views for - Python packages. Python will be able to import packages *inside* of the view even - when the environment is not activated, and linked libraries will be located - *outside* of the view thanks to rpaths. + The option ``link: run`` can be used to create small environment views for Python packages. + Python will be able to import packages *inside* of the view even when the environment is not activated, and linked libraries will be located *outside* of the view thanks to rpaths. -From the command line, the ``spack env create`` command takes an -argument ``--with-view [PATH]`` that sets the path for a single, default -view. If no path is specified, the default path is used (``view: -true``). The argument ``--without-view`` can be used to create an -environment without any view configured. +From the command line, the ``spack env create`` command takes an argument ``--with-view [PATH]`` that sets the path for a single, default view. +If no path is specified, the default path is used (``view: true``). +The argument ``--without-view`` can be used to create an environment without any view configured. The ``spack env view`` command can be used to manage views of an environment. -The subcommand ``spack env view enable`` will add a -view named ``default`` to an environment. It takes an optional -argument to specify the path for the new default view. The subcommand -``spack env view disable`` will remove the view named ``default`` from -an environment if one exists. The subcommand ``spack env view -regenerate`` will regenerate the views for the environment. This will -apply any updates in the environment configuration that have not yet -been applied. +The subcommand ``spack env view enable`` will add a view named ``default`` to an environment. +It takes an optional argument to specify the path for the new default view. +The subcommand ``spack env view disable`` will remove the view named ``default`` from an environment if one exists. +The subcommand ``spack env view regenerate`` will regenerate the views for the environment. +This will apply any updates in the environment configuration that have not yet been applied. .. _view_projections: -"""""""""""""""" View Projections """""""""""""""" -The default projection into a view is to link every package into the -root of the view. The projections attribute is a mapping of partial specs to -spec format strings, defined by the :meth:`~spack.spec.Spec.format` -function, as shown in the example below: +The default projection into a view is to link every package into the root of the view. +The projections attribute is a mapping of partial specs to spec format strings, defined by the :meth:`~spack.spec.Spec.format` function, as shown in the example below: .. code-block:: yaml @@ -1187,60 +1068,37 @@ function, as shown in the example below: ^mpi: "{name}-{version}/{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}" all: "{name}-{version}/{compiler.name}-{compiler.version}" -Projections also permit environment and Spack configuration variable -expansions as shown below: +Projections also permit environment and Spack configuration variable expansions as shown below: .. code-block:: yaml projections: all: "{name}-{version}/{compiler.name}-{compiler.version}/$date/$SYSTEM_ENV_VARIABLE" -where ``$date`` is the Spack configuration variable that will expand with the ``YYYY-MM-DD`` -format and ``$SYSTEM_ENV_VARIABLE`` is an environment variable defined in the shell. - -The entries in the projections configuration file must all be either -specs or the keyword ``all``. For each spec, the projection used will -be the first non-``all`` entry that the spec satisfies, or ``all`` if -there is an entry for ``all`` and no other entry is satisfied by the -spec. Where the keyword ``all`` appears in the file does not -matter. - -Given the example above, the spec ``zlib@1.2.8`` -will be linked into ``/my/view/zlib-1.2.8/``, the spec -``hdf5@1.8.10+mpi %gcc@4.9.3 ^mvapich2@2.2`` will be linked into -``/my/view/hdf5-1.8.10/mvapich2-2.2-gcc-4.9.3``, and the spec -``hdf5@1.8.10~mpi %gcc@4.9.3`` will be linked into -``/my/view/hdf5-1.8.10/gcc-4.9.3``. - -If the keyword ``all`` does not appear in the projections -configuration file, any spec that does not satisfy any entry in the -file will be linked into the root of the view as in a single-prefix -view. Any entries that appear below the keyword ``all`` in the -projections configuration file will not be used, as all specs will use -the projection under ``all`` before reaching those entries. +where ``$date`` is the Spack configuration variable that will expand with the ``YYYY-MM-DD`` format and ``$SYSTEM_ENV_VARIABLE`` is an environment variable defined in the shell. + +The entries in the projections configuration file must all be either specs or the keyword ``all``. +For each spec, the projection used will be the first non-``all`` entry that the spec satisfies, or ``all`` if there is an entry for ``all`` and no other entry is satisfied by the spec. +Where the keyword ``all`` appears in the file does not matter. + +Given the example above, the spec ``zlib@1.2.8`` will be linked into ``/my/view/zlib-1.2.8/``, the spec ``hdf5@1.8.10+mpi %gcc@4.9.3 ^mvapich2@2.2`` will be linked into ``/my/view/hdf5-1.8.10/mvapich2-2.2-gcc-4.9.3``, and the spec ``hdf5@1.8.10~mpi %gcc@4.9.3`` will be linked into ``/my/view/hdf5-1.8.10/gcc-4.9.3``. + +If the keyword ``all`` does not appear in the projections configuration file, any spec that does not satisfy any entry in the file will be linked into the root of the view as in a single-prefix view. +Any entries that appear below the keyword ``all`` in the projections configuration file will not be used, as all specs will use the projection under ``all`` before reaching those entries. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Activating environment views ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``spack env activate `` has two effects: +The ``spack env activate `` command has two effects: -1. It activates the environment so that further Spack commands such - as ``spack install`` will run in the context of the environment. -2. It activates the view so that environment variables such as - ``PATH`` are updated to include the view. +1. It activates the environment so that further Spack commands such as ``spack install`` will run in the context of the environment. +2. It activates the view so that environment variables such as ``PATH`` are updated to include the view. -Without further arguments, the ``default`` view of the environment is -activated. If a view with a different name has to be activated, -``spack env activate --with-view `` can be -used instead. You can also activate the environment without modifying -further environment variables using ``--without-view``. +Without further arguments, the ``default`` view of the environment is activated. +If a view with a different name has to be activated, ``spack env activate --with-view `` can be used instead. +You can also activate the environment without modifying further environment variables using ``--without-view``. -The environment variables affected by the ``spack env activate`` -command and the paths that are used to update them are determined by -the :ref:`prefix inspections ` defined in -your modules configuration; the defaults are summarized in the following -table. +The environment variables affected by the ``spack env activate`` command and the paths that are used to update them are determined by the :ref:`prefix inspections ` defined in your modules configuration; the defaults are summarized in the following table. =================== ========= Variable Paths @@ -1252,43 +1110,40 @@ PKG_CONFIG_PATH lib/pkgconfig, lib64/pkgconfig, share/pkgconfig CMAKE_PREFIX_PATH . =================== ========= -Each of these paths are appended to the view root, and added to the -relevant variable if the path exists. For this reason, it is not -recommended to use non-default projections with the default view of an -environment. +Each of these paths are appended to the view root, and added to the relevant variable if the path exists. +For this reason, it is not recommended to use non-default projections with the default view of an environment. -The ``spack env deactivate`` command will remove the active view of -the Spack environment from the user's environment variables. +The ``spack env deactivate`` command will remove the active view of the Spack environment from the user's environment variables. -.. _env-generate-depfile: +.. _cmd-spack-env-depfile: ------------------------------------------- Generating Depfiles from Environments ------------------------------------------ -Spack can generate ``Makefile``\s to make it easier to build multiple -packages in an environment in parallel. Generated ``Makefile``\s expose -targets that can be included in existing ``Makefile``\s, to allow -other targets to depend on the environment installation. +Spack can generate ``Makefile``\s to make it easier to build multiple packages in an environment in parallel. + +.. note:: + + Since Spack v1.1, there is a new experimental installer that supports package-level parallelism out of the box with POSIX jobserver support. + You can enable it with ``spack config add config:installer:new``. + This new installer may provide a simpler alternative to the ``spack env depfile`` workflow described in this section for users primarily interested in speeding up environment installations. + +Generated ``Makefile``\s expose targets that can be included in existing ``Makefile``\s, to allow other targets to depend on the environment installation. A typical workflow is as follows: -.. code-block:: console +.. code-block:: spec - spack env create -d . - spack -e . add perl - spack -e . concretize - spack -e . env depfile -o Makefile - make -j64 + $ spack env create -d . + $ spack -e . add perl + $ spack -e . concretize + $ spack -e . env depfile -o Makefile + $ make -j64 -This generates a ``Makefile`` from a concretized environment in the -current working directory, and ``make -j64`` installs the environment, -exploiting parallelism across packages as much as possible. Spack -respects the Make jobserver and forwards it to the build environment -of packages, meaning that a single ``-j`` flag is enough to control the -load, even when packages are built in parallel. +This generates a ``Makefile`` from a concretized environment in the current working directory, and ``make -j64`` installs the environment, exploiting parallelism across packages as much as possible. +Spack respects the Make jobserver and forwards it to the build environment of packages, meaning that a single ``-j`` flag is enough to control the load, even when packages are built in parallel. By default the following phony convenience targets are available: @@ -1297,24 +1152,18 @@ By default the following phony convenience targets are available: .. tip:: - GNU Make version 4.3 and above have great support for output synchronization - through the ``-O`` and ``--output-sync`` flags, which ensure that output is - printed orderly per package install. To get synchronized output with colors, - use ``make -j SPACK_COLOR=always --output-sync=recurse``. + GNU Make version 4.3 and above have great support for output synchronization through the ``-O`` and ``--output-sync`` flags, which ensure that output is printed orderly per package install. + To get synchronized output with colors, use ``make -j SPACK_COLOR=always --output-sync=recurse``. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Specifying dependencies on generated ``make`` targets ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -An interesting question is how to include generated ``Makefile``\s in your own -``Makefile``\s. This comes up when you want to install an environment that provides -executables required in a command for a make target of your own. +An interesting question is how to include generated ``Makefile``\s in your own ``Makefile``\s. +This comes up when you want to install an environment that provides executables required in a command for a make target of your own. -The example below shows how to accomplish this: the ``env`` target specifies -the generated ``spack/env`` target as a prerequisite, meaning that the environment -gets installed and is available for use in the ``env`` target. +The example below shows how to accomplish this: the ``env`` target specifies the generated ``spack/env`` target as a prerequisite, meaning that the environment gets installed and is available for use in the ``env`` target. -.. code:: Makefile +.. code-block:: Makefile SPACK ?= spack @@ -1338,35 +1187,26 @@ gets installed and is available for use in the ``env`` target. include env.mk endif -This works as follows: when ``make`` is invoked, it first "remakes" the missing -include ``env.mk`` as there is a target for it. This triggers concretization of -the environment and makes Spack output ``env.mk``. At that point the -generated target ``spack/env`` becomes available through ``include env.mk``. +This works as follows: when ``make`` is invoked, it first "remakes" the missing include ``env.mk`` as there is a target for it. +This triggers concretization of the environment and makes Spack output ``env.mk``. +At that point the generated target ``spack/env`` becomes available through ``include env.mk``. -As it is typically undesirable to remake ``env.mk`` as part of ``make clean``, -the include is conditional. +As it is typically undesirable to remake ``env.mk`` as part of ``make clean``, the include is conditional. .. note:: - When including generated ``Makefile``\s, it is important to use - the ``--make-prefix`` flag and use the non-phony target - ``/env`` as prerequisite, instead of the phony target - ``/all``. + When including generated ``Makefile``\s, it is important to use the ``--make-prefix`` flag and use the non-phony target ``/env`` as prerequisite, instead of the phony target ``/all``. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Building a subset of the environment ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The generated ``Makefile``\s contain install targets for each spec, identified -by ``--``. This allows you to install only a subset of the -packages in the environment. When packages are unique in the environment, it's -enough to know the name and let tab-completion fill out the version and hash. +The generated ``Makefile``\s contain install targets for each spec, identified by ``--``. +This allows you to install only a subset of the packages in the environment. +When packages are unique in the environment, it's enough to know the name and let tab-completion fill out the version and hash. -The following phony targets are available: ``install/`` to install the -spec with its dependencies, and ``install-deps/`` to *only* install -its dependencies. This can be useful when certain flags should only apply to -dependencies. Below we show a use case where a spec is installed with verbose -output (``spack install --verbose``) while its dependencies are installed silently: +The following phony targets are available: ``install/`` to install the spec with its dependencies, and ``install-deps/`` to *only* install its dependencies. +This can be useful when certain flags should only apply to dependencies. +Below we show a use case where a spec is installed with verbose output (``spack install --verbose``) while its dependencies are installed silently: .. code-block:: console @@ -1378,32 +1218,27 @@ output (``spack install --verbose``) while its dependencies are installed silent # Install the root spec with verbose output. $ make -j16 install/python-3.11.0- SPACK_INSTALL_FLAGS=--verbose -^^^^^^^^^^^^^^^^^^^^^^^^^ Adding post-install hooks ^^^^^^^^^^^^^^^^^^^^^^^^^ -Another advanced use-case of generated ``Makefile``\s is running a post-install -command for each package. These "hooks" could be anything from printing a -post-install message, running tests, or pushing just-built binaries to a buildcache. +Another advanced use-case of generated ``Makefile``\s is running a post-install command for each package. +These "hooks" could be anything from printing a post-install message, running tests, or pushing just-built binaries to a buildcache. -This can be accomplished through the generated ``[/]SPACK_PACKAGE_IDS`` -variable. Assuming we have an active and concrete environment, we generate the -associated ``Makefile`` with a prefix ``example``: +This can be accomplished through the generated ``[/]SPACK_PACKAGE_IDS`` variable. +Assuming we have an active and concrete environment, we generate the associated ``Makefile`` with a prefix ``example``: .. code-block:: console $ spack env depfile -o env.mk --make-prefix example -And we now include it in a different ``Makefile``, in which we create a target -``example/push/%`` with ``%`` referring to a package identifier. This target -depends on the particular package installation. In this target we automatically -have the target-specific ``HASH`` and ``SPEC`` variables at our disposal. They -are respectively the spec hash (excluding leading ``/``), and a human-readable spec. -Finally, we have an entry point target ``push`` that will update the buildcache -index once every package is pushed. Note how this target uses the generated -``example/SPACK_PACKAGE_IDS`` variable to define its prerequisites. +And we now include it in a different ``Makefile``, in which we create a target ``example/push/%`` with ``%`` referring to a package identifier. +This target depends on the particular package installation. +In this target we automatically have the target-specific ``HASH`` and ``SPEC`` variables at our disposal. +They are respectively the spec hash (excluding leading ``/``), and a human-readable spec. +Finally, we have an entry point target ``push`` that will update the buildcache index once every package is pushed. +Note how this target uses the generated ``example/SPACK_PACKAGE_IDS`` variable to define its prerequisites. -.. code:: Makefile +.. code-block:: Makefile SPACK ?= spack BUILDCACHE_DIR = $(CURDIR)/tarballs diff --git a/lib/spack/docs/environments_basics.rst b/lib/spack/docs/environments_basics.rst new file mode 100644 index 00000000000000..9548c3bbe3303b --- /dev/null +++ b/lib/spack/docs/environments_basics.rst @@ -0,0 +1,193 @@ +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. + + SPDX-License-Identifier: (Apache-2.0 OR MIT) + +.. meta:: + :description lang=en: + Learn how to use Spack environments to manage reproducible software stacks on a local machine. + +Spack Environments +================== + +Spack is a powerful package manager designed for the complex software needs of supercomputers. +These same robust features for managing versions and dependencies also make it an excellent tool for local development on a laptop or workstation. + +If you are used to tools like Conda, Homebrew or pip for managing local command-line tools and development projects, you will find Spack environments to be a powerful and flexible alternative. +Spack environments allow you to create self-contained, reproducible software collections, a concept similar to Conda environments and Python's virtual environments. + +Unlike other package managers, Spack environments do not contain copies of the software themselves. +Instead, they reference installations in the Spack store, which is a central location where Spack keeps all installed packages. +This means that multiple environments can share the same package installations, saving disk space and reducing duplication. + +In this section, we will walk through creating a simple environment to manage a personal software stack. + +Creating and Activating an Environment +-------------------------------------- + +First, let's create and activate a new environment. +This places you "inside" the environment, so all subsequent Spack commands apply to it by default. + +.. code-block:: console + + $ spack env create myenv + ==> Created environment myenv in /path/to/spack/var/spack/environments/myenv + $ spack env activate myenv + +Here, *myenv* is the name of our new environment. + +You can verify you are in the environment using: + +.. code-block:: console + + $ spack env status + ==> In environment myenv + +Adding Specs to the Environment +------------------------------- + +Now that our environment is active, we can add the packages we want to install. +Let's say we want a newer version of curl and a few Python libraries. + +.. code-block:: spec + + $ spack add curl@8 python py-numpy py-scipy py-matplotlib + +You can add packages one at a time or all at once. +Notice that we didn't need to specify the environment name, as Spack knows we are working inside ``myenv``. +These packages are now added to the environment's manifest file, ``spack.yaml``. + +You can view the manifest at any time by running: + +.. code-block:: console + + $ spack config edit + +This will open your ``spack.yaml``, which should look like this: + +.. code-block:: yaml + :caption: Example ``spack.yaml`` for our environment + + # This is a Spack Environment file. + # + # It describes a set of packages to be installed, along with + # configuration settings. + spack: + # add package specs to the `specs` list + specs: + - curl@8 + - python + - py-numpy + - py-scipy + - py-matplotlib + view: true + concretizer: + unify: true + +The ``view: true`` setting tells Spack to create a single directory where all executables, libraries, etc., are symlinked together, similar to a traditional Unix prefix. +By default, this view is located inside the environment directory. + +Installing the Software +----------------------- + +With our specs defined, the next step is to have Spack solve the dependency graph. +This is called "concretization." + +.. code-block:: console + + $ spack concretize + ==> Concretized ... + ... + +Spack will find a consistent set of versions and dependencies for the packages you requested. +Once this is done, you can install everything with a single command: + +.. code-block:: console + + $ spack install + +Spack will now download, build, and install all the necessary packages. +After the installation is complete, the environment's view is automatically updated. +Because the environment is active, your ``PATH`` and other variables are already configured. + +You can verify the installation: + +.. code-block:: console + + $ which python3 + /path/to/spack/var/spack/environments/myenv/.spack-env/view/bin/python3 + +When you are finished working in the environment, you can deactivate it: + +.. code-block:: console + + $ spack env deactivate + +Keeping Up With Updates +----------------------- + +Over time, you may want to update the packages in your environment to their latest versions. +Spack makes this easy. + +First, update Spack's package repository to make the latest package versions available: + +.. code-block:: console + + $ spack repo update + +Then, activate the environment, re-concretize and reinstall. + +.. code-block:: console + + $ spack env activate myenv + $ spack concretize --fresh-roots --force + $ spack install + +The ``--fresh-roots`` flag tells the concretizer to prefer the latest available package versions you've added explicitly to the environment, while allowing existing dependencies to remain unchanged if possible. +Alternatively, you can use the ``--fresh`` flag to prefer the latest versions of all packages including dependencies, but that might lead to longer install times and more changes. +The ``--force`` flag allows it to overwrite the previously solved dependencies. +The ``install`` command is smart and will only build packages that are not already installed for the new configuration. + +Cleaning Up Old Packages +------------------------ + +After an update, you may have old, unused packages taking up space. +You can safely remove any package that is no longer part of an environment's dependency tree. + +.. code-block:: console + + $ spack gc --except-any-environment + +This runs Spack's garbage collector, which will find and uninstall any package versions that are no longer referenced by *any* of your environments. + +Removing the Environment +------------------------ + +If you no longer need an environment, you can completely remove it. + +First, ensure the environment is not active: + +.. code-block:: console + + $ spack env deactivate + +Then, remove the environment. + +.. code-block:: console + + $ spack env rm myenv + +This removes the environment's directory and its view, but the packages that were installed for it remain in the Spack store. +To actually remove the installations from the Spack store and free up disk space, you can run the garbage collector again. + +.. code-block:: console + + $ spack gc --except-any-environment + +This command will safely uninstall any packages that are no longer referenced by any of your remaining environments. + +Next steps +---------- + +Spack has many other features for managing software environments. +See :doc:`environments` for more advanced usage. diff --git a/lib/spack/docs/extensions.rst b/lib/spack/docs/extensions.rst index e9d51a3510fd1d..b3e11903896005 100644 --- a/lib/spack/docs/extensions.rst +++ b/lib/spack/docs/extensions.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -6,40 +7,34 @@ :description lang=en: Discover how to extend Spack's core functionality by creating custom commands and plugins. -================= Custom Extensions ================= -*Spack extensions* allow you to extend Spack capabilities by deploying your -own custom commands or logic in an arbitrary location on your filesystem. -This might be extremely useful e.g., to develop and maintain a command whose purpose is -too specific to be considered for reintegration into the mainline or to -evolve a command through its early stages before starting a discussion to merge -it upstream. +*Spack extensions* allow you to add custom subcommands to the ``spack`` command. +This is extremely useful when developing and maintaining a command whose purpose is too specific to be included in the Spack codebase. +It's also useful for evolving a command through its early stages before starting a discussion to merge it upstream. -From Spack's point of view an extension is any path in your filesystem that -respects the following naming and layout for files: +From Spack's point of view, an extension is any path in your filesystem that respects the following naming and layout for files: .. code-block:: console spack-scripting/ # The top level directory must match the format 'spack-{extension_name}' ├── pytest.ini # Optional file if the extension ships its own tests ├── scripting # Folder that may contain modules that are needed for the extension commands - │   ├── cmd # Folder containing extension commands - │   │   └── filter.py # A new command that will be available - │   └── functions.py # Module with internal details - └── tests # Tests for this extension + │ ├── cmd # Folder containing extension commands + │ │ └── filter.py # A new command that will be available + │ └── functions.py # Module with internal details + ├── tests # Tests for this extension │ ├── conftest.py │ └── test_filter.py └── templates # Templates that may be needed by the extension -In the example above, the extension is named *scripting*. It adds an additional command -(``spack filter``) and unit tests to verify its behavior. +In the example above, the extension is named *scripting*. +It adds an additional command (``spack filter``) and unit tests to verify its behavior. -The extension can import any core Spack module in its implementation. When loaded by -the ``spack`` command, the extension itself is imported as a Python package in the -``spack.extensions`` namespace. In the example above, since the extension is named -"scripting", the corresponding Python module is ``spack.extensions.scripting``. +The extension can import any core Spack module in its implementation. +When loaded by the ``spack`` command, the extension itself is imported as a Python package in the ``spack.extensions`` namespace. +In the example above, since the extension is named "scripting", the corresponding Python module is ``spack.extensions.scripting``. The code for this example extension can be obtained by cloning the corresponding git repository: @@ -47,12 +42,11 @@ The code for this example extension can be obtained by cloning the corresponding $ git -C /tmp clone https://github.com/spack/spack-scripting.git ---------------------------------- Configure Spack to Use Extensions --------------------------------- -To make your current Spack instance aware of extensions you should add their root -paths to ``config.yaml``. In the case of our example this means ensuring that: +To make your current Spack instance aware of extensions you should add their root paths to ``config.yaml``. +In the case of our example, this means ensuring that: .. code-block:: yaml @@ -60,8 +54,8 @@ paths to ``config.yaml``. In the case of our example this means ensuring that: extensions: - /tmp/spack-scripting -is part of your configuration file. Once this is set up, any command that the extension provides -will be available from the command line: +is part of your configuration file. +Once this is set up, any command that the extension provides will be available from the command line: .. code-block:: console @@ -113,22 +107,23 @@ The corresponding unit tests can be run giving the appropriate options to ``spac (5 durations < 0.005s hidden. Use -vv to show these durations.) =========================================== 5 passed in 5.06s ============================================ ---------------------------------------- Registering Extensions via Entry Points --------------------------------------- .. note:: Python version >= 3.8 is required to register extensions via entry points. -Spack can be made aware of extensions that are installed as part of a Python package. To do so, register a function that returns the extension path, or paths, to the ``"spack.extensions"`` entry point. Consider the Python package ``my_package`` that includes a Spack extension: +Spack can be made aware of extensions that are installed as part of a Python package. +To do so, register a function that returns the extension path, or paths, to the ``"spack.extensions"`` entry point. +Consider the Python package ``my_package`` that includes a Spack extension: .. code-block:: console my-package/ ├── src - │   ├── my_package - │   │   └── __init__.py - │   └── spack-scripting/ # the spack extensions + │ ├── my_package + │ │ └── __init__.py + │ └── spack-scripting/ # the spack extensions └── pyproject.toml adding the following to ``my_package``'s ``pyproject.toml`` will make the ``spack-scripting`` extension visible to Spack when ``my_package`` is installed: @@ -144,6 +139,7 @@ The function ``my_package.get_extension_path`` in ``my_package/__init__.py`` mig import importlib.resources + def get_extension_path(): dirname = importlib.resources.files("my_package").joinpath("spack-scripting") if dirname.exists(): diff --git a/lib/spack/docs/features.rst b/lib/spack/docs/features.rst index c5ccb072c7b0a9..8324357a48d14d 100644 --- a/lib/spack/docs/features.rst +++ b/lib/spack/docs/features.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -6,100 +7,96 @@ :description lang=en: An overview of the key features that distinguish Spack from other package managers, including simple installation, custom configurations, and non-destructive installs. -================ Feature Overview ================ -This is a high-level overview of features that make Spack different -from other `package managers -`_ and `port -systems `_. +This is a high-level overview of features that make Spack different from other `package managers `_ and `port systems `_. ---------------------------- Simple package installation --------------------------- -Installing the default version of a package is simple. This will install -the latest version of the ``mpileaks`` package and all of its dependencies: +Installing the default version of a package is simple. +This will install the latest version of the ``mpileaks`` package and all of its dependencies: -.. code-block:: console +.. code-block:: spec $ spack install mpileaks --------------------------------- Custom versions & configurations -------------------------------- -Spack allows installation to be customized. Users can specify the -version, compile-time options, and cross-compile platform, all on the command line. +Spack allows installation to be customized. +Users can specify the version, compile-time options, and target architecture, all on the command line. -.. code-block:: console +.. code-block:: spec # Install a particular version by appending @ - $ spack install hdf5@1.14.6 + $ spack install hdf5@1.14 # Add special compile-time options by name - $ spack install hdf5@1.14.6 api=v110 + $ spack install hdf5@1.14 api=v110 # Add special boolean compile-time options with + - $ spack install hdf5@1.14.6 +hl + $ spack install hdf5@1.14 +hl # Add compiler flags using the conventional names - $ spack install hdf5@1.14.6 cflags="-O3 -floop-block" + $ spack install hdf5@1.14 cflags="-O3 -floop-block" - # Cross-compile for a different micro-architecture with target= - $ spack install hdf5@1.14.6 target=icelake + # Target a specific micro-architecture + $ spack install hdf5@1.14 target=icelake -Users can specify as many or as few options as they care about. Spack -will fill in the unspecified values with sensible defaults. +Users can specify as many or as few options as they care about. +Spack will fill in the unspecified values with sensible defaults. ----------------------- Customize dependencies ---------------------- Spack allows *dependencies* of a particular installation to be customized extensively. -Users can specify both *direct* dependencies of a node, using the ``%`` sigil, or *transitive* -dependencies, using the ``^`` sigil: +Users can specify both *direct* dependencies of a package, using the ``%`` sigil, or *transitive* dependencies, using the ``^`` sigil: -.. code-block:: console +.. code-block:: spec - # Install hdf5 using gcc@15.1.0 as a compiler (direct dependency of hdf5) - $ spack install hdf5@1.14.6 %gcc@15.1.0 + # Install hdf5 using gcc@15 as a compiler (direct dependency of hdf5) + $ spack install hdf5@1.14 %gcc@15 # Install hdf5 using hwloc with CUDA enabled (transitive dependency) - $ spack install hdf5@1.14.6 ^hwloc+cuda + $ spack install hdf5@1.14 ^hwloc+cuda -The expression on the command line can be as simple, or as complicated, as the user needs: +The expression on the command line can be as simple or as complicated as the user needs: -.. code-block:: console +.. code-block:: spec # Install hdf5 compiled with gcc@15, linked to mpich compiled with gcc@14 - $ spack install hdf5@1.14.6 %gcc@15 ^mpich %gcc@14 + $ spack install hdf5@1.14 %gcc@15 ^mpich %gcc@14 ------------------------- Non-destructive installs ------------------------ -Spack installs every unique package/dependency configuration into its -own prefix, so new installs will not break existing ones. +Spack installs every unique package/dependency configuration into its own prefix, so new installs will not break existing ones. -------------------------------- Packages can peacefully coexist ------------------------------- -Spack avoids library misconfiguration by using ``RPATH`` to link -dependencies. When a user links a library or runs a program, it is -tied to the dependencies it was built with, so there is no need to -manipulate ``LD_LIBRARY_PATH`` at runtime. +Spack avoids library misconfiguration by using ``RPATH`` to link dependencies. +When a user links a library or runs a program, it is tied to the dependencies it was built with, so there is no need to manipulate ``LD_LIBRARY_PATH`` at runtime. -------------------------- -Creating packages is easy -------------------------- +Unprivileged user installs +-------------------------- -To create a new package, all Spack needs is a URL for the source -archive. The ``spack create`` command will create a boilerplate -package file, and the package authors can fill in specific build steps -in pure Python. +Spack does not require administrator privileges to install packages. +You can install software in any directory you choose, making it easy to manage packages in your home directory or shared project locations without needing sudo access. + +From source and binary +---------------------- + +Spack's core strength is creating highly customized, optimized software builds from source code. +While it's primarily a from-source package manager, it also supports fast binary installations through build caches. + +Contributing is easy +-------------------- + +To contribute a new package, all Spack needs is a URL for the source archive. +The ``spack create`` command will create a boilerplate package file, and the package authors can fill in specific build steps in pure Python. For example, this command: @@ -136,12 +133,23 @@ creates a simple Python file: args = [] return args -It doesn't take much Python coding to get from there to a working -package: +It doesn't take much Python coding to get from there to a working package: .. literalinclude:: .spack/spack-packages/repos/spack_repo/builtin/packages/libelf/package.py :lines: 5- -Spack also provides wrapper functions around common commands like -``configure``, ``make``, and ``cmake`` to make writing packages -simple. + +Understanding Spack's scope +--------------------------- + +Spack is a package manager designed for performance and customization of software. +To clarify its role and prevent common misconceptions, it's helpful to understand what falls outside of its current scope: + +1. Spack is a user-space tool, not an operating system. + It runs on top of your existing OS (like Linux, macOS, or Windows) and complements the system's native package manager (like ``yum`` or ``apt``), but does not replace it. + Spack relies on the host system for essentials like the C runtime libraries. + Building a software stack with a custom `libc` is a planned future capability but is not yet implemented. + +2. Spack performs native builds, not cross-compilation. + It builds software for the same processor architecture it is running on. + Support for cross-compilation (e.g., building for an ARM processor on an x86 machine) is a planned future capability but is not yet implemented. diff --git a/lib/spack/docs/frequently_asked_questions.rst b/lib/spack/docs/frequently_asked_questions.rst index 866dfa83a04b9e..ada520fc06c630 100644 --- a/lib/spack/docs/frequently_asked_questions.rst +++ b/lib/spack/docs/frequently_asked_questions.rst @@ -1,47 +1,40 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) .. meta:: :description lang=en: - Find answers to common questions about Spack, covering topics like version and variant selection, package preferences, and concretizer behavior. + Answers to common Spack questions, including version and variant selection, package preferences, compiler configuration, and concretizer behavior, with practical YAML and command-line examples. -========================== Frequently Asked Questions ========================== This page contains answers to frequently asked questions about Spack. -If you have questions that are not answered here, feel free to ask on -`Slack `_ or `GitHub Discussions -`_. If you've learned the -answer to a question that you think should be here, please consider -contributing to this page. +If you have questions that are not answered here, feel free to ask on `Slack `_ or `GitHub Discussions `_. +If you've learned the answer to a question that you think should be here, please consider contributing to this page. .. _faq-concretizer-precedence: ------------------------------------------------------ Why does Spack pick particular versions and variants? ----------------------------------------------------- This question comes up in a variety of forms: - 1. Why does Spack seem to ignore my package preferences from ``packages.yaml`` configuration? - 2. Why does Spack toggle a variant instead of using the default from the ``package.py`` file? +1. Why does Spack seem to ignore my package preferences from ``packages.yaml`` configuration? +2. Why does Spack toggle a variant instead of using the default from the ``package.py`` file? -The short answer is that Spack always picks an optimal configuration -based on a complex set of criteria\ [#f1]_. These criteria are more nuanced -than always choosing the latest versions or default variants. +The short answer is that Spack always picks an optimal configuration based on a complex set of criteria\ [#f1]_. +These criteria are more nuanced than always choosing the latest versions or default variants. .. note:: As a rule of thumb: requirements + constraints > strong preferences > reuse > preferences > defaults. -The following set of criteria (from lowest to highest precedence) explain -common cases where concretization output may seem surprising at first. +The following set of criteria (from lowest to highest precedence) explains common cases where concretization output may seem surprising at first. -1. :ref:`Package preferences ` configured in ``packages.yaml`` - override variant defaults from ``package.py`` files, and influence the optimal - ordering of versions. Preferences are specified as follows: +1. :ref:`Package preferences ` configured in ``packages.yaml`` override variant defaults from ``package.py`` files, and influence the optimal ordering of versions. + Preferences are specified as follows: .. code-block:: yaml @@ -50,20 +43,17 @@ common cases where concretization output may seem surprising at first. version: [1.0, 1.1] variants: ~mpi -2. :ref:`Reuse concretization ` configured in ``concretizer.yaml`` - overrides preferences, since it's typically faster to reuse an existing spec than to - build a preferred one from sources. When build caches are enabled, specs may be reused - from a remote location too. Reuse concretization is configured as follows: +2. :ref:`Reuse concretization ` configured in ``concretizer.yaml`` overrides preferences, since it's typically faster to reuse an existing spec than to build a preferred one from sources. + When build caches are enabled, specs may be reused from a remote location too. + Reuse concretization is configured as follows: .. code-block:: yaml concretizer: reuse: dependencies # other options are 'true' and 'false' -3. :ref:`Strong preferences ` configured in ``packages.yaml`` - are higher priority than reuse, and can be used to strongly prefer a specific version - or variant, without erroring out if it's not possible. Strong preferences are specified - as follows: +3. :ref:`Strong preferences ` configured in ``packages.yaml`` are higher priority than reuse, and can be used to strongly prefer a specific version or variant, without erroring out if it's not possible. + Strong preferences are specified as follows: .. code-block:: yaml @@ -72,9 +62,8 @@ common cases where concretization output may seem surprising at first. prefer: - "@1.1: ~mpi" -4. :ref:`Package requirements ` configured in ``packages.yaml``, - and constraints from the command line as well as ``package.py`` files override all - of the above. Requirements are specified as follows: +4. :ref:`Package requirements ` configured in ``packages.yaml``, and constraints from the command line as well as ``package.py`` files override all of the above. + Requirements are specified as follows: .. code-block:: yaml @@ -82,13 +71,86 @@ common cases where concretization output may seem surprising at first. foo: require: - "@1.2: +mpi" - conflicts: + conflict: - "@1.4" -Requirements and constraints restrict the set of possible solutions, while reuse -behavior and preferences influence what an optimal solution looks like. +Requirements and constraints restrict the set of possible solutions, while reuse behavior and preferences influence what an optimal solution looks like. + +How do I use a specific compiler? +--------------------------------- + +When you have multiple compilers available in :ref:`spack-compiler-list`, and want to build your packages with a specific one, you have the following options: + +1. Specify your compiler preferences globally for all packages in configuration files. +2. Specify them on the level of individual specs, like ``pkg %gcc@15`` or ``pkg %c,cxx=gcc@15``. + +We'll explore both options in more detail. + +Specific compiler for all packages +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you want to use a specific compiler for all packages, it's best to use :ref:`strong preferences in packages.yaml config `. +The following example prefers GCC 15 for all languages ``c``, ``cxx``, and ``fortran``: + +.. code-block:: yaml + :caption: Recommended: *prefer* a specific compiler + :name: code-example-prefer-compiler + + packages: + c: + prefer: + - gcc@15 + cxx: + prefer: + - gcc@15 + fortran: + prefer: + - gcc@15 + +You can also replace ``prefer:`` with ``require:`` if you want Spack to produce an error if the preferred compiler cannot be used. +See also :ref:`the previous FAQ entry `. + +In Spack, the languages ``c``, ``cxx`` and ``fortran`` are :ref:`virtual packages `, on which packages depend if they need a compiler for that language. +Compiler packages provide these language virtuals. +When you specify these strong preferences, Spack determines whether the package depends on any of the language virtuals, and if so, it applies the associated compiler spec when possible. + +What is **not recommended** is to define ``%gcc`` as a required dependency of all packages: + +.. code-block:: yaml + :caption: Incorrect: requiring a dependency on a compiler for all packages + :name: code-example-typical-mistake-require-compiler + + packages: + all: + require: + - "%gcc@15" + +This is *incorrect*, because some packages do not need a compiler at all (e.g. pure Python packages). + +Specific compiler for individual specs +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If different parts of your software stack need to be built with different compilers, it's best to specify compilers as dependencies of the relevant specs (whether on the command line or in Spack environments). + +.. code-block:: spec + :caption: Example of specifying different compilers for different specs + :name: console-example-different-compilers + + $ spack install foo %gcc@15 ^bar %intel-oneapi-compilers + +What this means is that ``foo`` will depend on GCC 15, while ``bar`` will depend on ``intel-oneapi-compilers``. + +You can also be more specific about what compiler to use for a particular language: + +.. code-block:: spec + :caption: Example of specifying different compilers for different languages + :name: console-example-different-languages + + $ spack install foo %c,cxx=gcc@15 %fortran=intel-oneapi-compilers +These input specs can be simplified using :doc:`toolchains_yaml`. +See also :ref:`pitfalls-without-toolchains` for common mistakes to avoid. .. rubric:: Footnotes -.. [#f1] The exact list of criteria can be retrieved with the ``spack solve`` command +.. [#f1] The exact list of criteria can be retrieved with the :ref:`spack-solve` command. diff --git a/lib/spack/docs/getting_help.rst b/lib/spack/docs/getting_help.rst index ded21719fdbcdb..ebd8fd3496d960 100644 --- a/lib/spack/docs/getting_help.rst +++ b/lib/spack/docs/getting_help.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -6,25 +7,20 @@ :description lang=en: Find out how to get help with Spack, including using the spack help command. -============ Getting Help ============ .. _cmd-spack-help: --------------- ``spack help`` -------------- -If you don't find what you need here, the ``help`` subcommand will -print out a list of *all* of Spack's options and subcommands: +If you don't find what you need here, the ``help`` subcommand will print out a list of *all* of Spack's options and subcommands: .. command-output:: spack help -Adding an argument, e.g., ``spack help ``, will print out -usage information for a particular subcommand: +Adding an argument, e.g., ``spack help ``, will print out usage information for a particular subcommand: .. command-output:: spack help install -Alternatively, you can use ``spack --help`` in place of ``spack help``, or -``spack --help`` to get help on a particular subcommand. +Alternatively, you can use ``spack --help`` in place of ``spack help``, or ``spack --help`` to get help on a particular subcommand. diff --git a/lib/spack/docs/getting_started.rst b/lib/spack/docs/getting_started.rst index 52fe4a23323c2b..cef44679a75434 100644 --- a/lib/spack/docs/getting_started.rst +++ b/lib/spack/docs/getting_started.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,68 +9,63 @@ .. _getting_started: -=============== Getting Started =============== -Getting Spack is easy. You can clone it from the `GitHub repository -`_ using this command: +Getting Spack is easy. +You can clone it from the `GitHub repository `_ using this command: .. code-block:: console $ git clone --depth=2 https://github.com/spack/spack.git -This will create a directory called ``spack``. Once you have cloned Spack, we recommend sourcing the appropriate script for your shell: - -.. tab-set:: +This will create a directory called ``spack``. +Once you have cloned Spack, we recommend sourcing the appropriate script for your shell. - .. tab-item:: bash/zsh/sh +For *bash*, *zsh* and *sh* users: - .. code-block:: console +.. code-block:: console - $ . spack/share/spack/setup-env.sh + $ . spack/share/spack/setup-env.sh - .. tab-item:: tcsh/csh +For *csh* and *tcsh* users: - .. code-block:: console +.. code-block:: console - $ source spack/share/spack/setup-env.csh + $ source spack/share/spack/setup-env.csh - .. tab-item:: fish +For *fish* users: - .. code-block:: console +.. code-block:: console - $ . spack/share/spack/setup-env.fish + $ . spack/share/spack/setup-env.fish Now you're ready to use Spack! ------------------------------ List packages you can install ----------------------------- -Once Spack is ready you can list all the packages it knows about with the following command: +Once Spack is ready, you can list all the packages it knows about with the following command: -.. code-block:: console +.. code-block:: spec $ spack list If you want to get more information on a specific package, for instance ``hdf5``, you can use: -.. code-block:: console +.. code-block:: spec $ spack info hdf5 This command shows information about ``hdf5``, including a brief description, the versions of the package Spack knows about, and all the options you can activate when installing. -As you can see it's quite simple to gather basic information on packages, before you install them! +As you can see, it's quite simple to gather basic information on packages before you install them! .. admonition:: Slowdown on the very first command :class: warning - :collapsible: - The very first command run with Spack will take a while to finish, as Spack has to build a few caches to speed up subsequent command execution. + The first command you run with Spack may take a while, as Spack builds caches to speed up future commands. ------------------------------ Installing your first package ----------------------------- @@ -80,7 +76,7 @@ To search your machine for available compilers, you can run: $ spack compiler find -The command shows users if any compiler was found, and where its configuration is stored. +The command shows users whether any compilers were found and where their configuration is stored. If the search was successful, you can now list known compilers, and get an output similar to the following: .. code-block:: console @@ -90,14 +86,14 @@ If the search was successful, you can now list known compilers, and get an outpu -- gcc ubuntu20.04-x86_64 --------------------------------------- [e] gcc@9.4.0 [e] gcc@8.4.0 [e] gcc@10.5.0 -If no compilers were found, you need either to: +If no compilers were found, you need to either: * Install further prerequisites, see :ref:`verify-spack-prerequisites`, and repeat the search above. * Register a buildcache that provides a compiler already available as a binary -Once a compiler is available you can proceed installing your first package: +Once a compiler is available, you can proceed installing your first package: -.. code-block:: console +.. code-block:: spec $ spack install tcl @@ -154,9 +150,9 @@ The output of this command should look similar to the following: Stage: 0.46s. Autoreconf: 0.00s. Configure: 9.25s. Build: 1m 8.71s. Install: 3.32s. Post-install: 0.68s. Total: 1m 22.61s [+] /home/spack/.local/spack/opt/linux-icelake/tcl-8.6.12-6vo5hxeqw5plzd6gvzm74wlfz5stnzcv -Congratulations! You just installed your first package with Spack! +Congratulations! +You just installed your first package with Spack! ------------------------------------ Use the software you just installed ----------------------------------- @@ -172,45 +168,41 @@ This works, but using such a long absolute path is not the most convenient way t The simplest way to have ``tclsh`` available on the command line is: -.. code-block:: console +.. code-block:: spec $ spack load tcl -The environment of the current shell has now been modified, and you can run: +The environment of the current shell has now been modified, and you can run .. code-block:: console - $ tcsh + $ tclsh -directly. To undo these modifications, you can: +directly. +To undo these modifications, you can: -.. code-block:: console +.. code-block:: spec $ spack unload tcl .. admonition:: Environments and views :class: tip - :ref:`Spack Environments ` are a better way to install and load a set of packages that are frequently used together. + :doc:`Spack Environments ` are a better way to install and load a set of packages that are frequently used together. The discussion of this topic goes beyond this ``Getting Started`` guide, and we refer to :ref:`environments` for more information. ----------- Next steps ---------- This section helped you get Spack installed and running quickly. There are further resources in the documentation that cover both basic and advanced topics in more detail: -.. tab-set:: - - .. tab-item:: Basic Usage - - 1. :ref:`basic-usage` - 2. :ref:`compiler-config` - 3. :ref:`spack-environments-basic-usage` - - .. tab-item:: Advanced Topics +Basic Usage + 1. :ref:`basic-usage` + 2. :ref:`compiler-config` + 3. :doc:`environments_basics` - 1. :ref:`toolchains` - 2. :ref:`audit-packages-and-configuration` - 3. :ref:`verify-installations` +Advanced Topics + 1. :ref:`toolchains` + 2. :ref:`cmd-spack-audit` + 3. :ref:`cmd-spack-verify` diff --git a/lib/spack/docs/google5fda5f94b4ffb8de.html b/lib/spack/docs/google5fda5f94b4ffb8de.html new file mode 100644 index 00000000000000..8a53765ad8912f --- /dev/null +++ b/lib/spack/docs/google5fda5f94b4ffb8de.html @@ -0,0 +1 @@ +google-site-verification: google5fda5f94b4ffb8de.html \ No newline at end of file diff --git a/lib/spack/docs/gpu_configuration.rst b/lib/spack/docs/gpu_configuration.rst index 5ab48c80081517..cec238e71268a0 100644 --- a/lib/spack/docs/gpu_configuration.rst +++ b/lib/spack/docs/gpu_configuration.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -6,22 +7,18 @@ :description lang=en: A guide to configuring Spack to use external GPU support, including ROCm and CUDA installations, as well as the OpenGL API. -========================== Using External GPU Support ========================== -Many packages come with a ``+cuda`` or ``+rocm`` variant. With no added -configuration, Spack will download and install the needed components. -It may be preferable to use existing system support: the following sections -help with using a system installation of GPU libraries. +Many packages come with a ``+cuda`` or ``+rocm`` variant. +With no added configuration, Spack will download and install the needed components. +It may be preferable to use existing system support: the following sections help with using a system installation of GPU libraries. ------------------------------------ Using an External ROCm Installation ----------------------------------- -Spack breaks down ROCm into many separate component packages. The following -is an example ``packages.yaml`` that organizes a consistent set of ROCm -components for use by dependent packages: +Spack breaks down ROCm into many separate component packages. +The following is an example ``packages.yaml`` that organizes a consistent set of ROCm components for use by dependent packages: .. code-block:: yaml @@ -38,11 +35,6 @@ components for use by dependent packages: externals: - spec: hsa-rocr-dev@5.3.0 prefix: /opt/rocm-5.3.0/ - llvm-amdgpu: - buildable: false - externals: - - spec: llvm-amdgpu@5.3.0 - prefix: /opt/rocm-5.3.0/llvm/ comgr: buildable: false externals: @@ -78,22 +70,17 @@ This is in combination with the following compiler definition: externals: - spec: llvm-amdgpu@=5.3.0 prefix: /opt/rocm-5.3.0 - compilers: - c: /opt/rocm-5.3.0/bin/amdclang - cxx: /opt/rocm-5.3.0/bin/amdclang++ - fortran: null + extra_attributes: + compilers: + c: /opt/rocm-5.3.0/bin/amdclang + cxx: /opt/rocm-5.3.0/bin/amdclang++ This includes the following considerations: -- Each of the listed externals specifies ``buildable: false`` to force Spack - to use only the externals we defined. -- ``spack external find`` can automatically locate some of the ``hip``/``rocm`` - packages, but not all of them, and furthermore not in a manner that - guarantees a complementary set if multiple ROCm installations are available. -- The ``prefix`` is the same for several components, but note that others - require listing one of the subdirectories as a prefix. +- Each of the listed externals specifies ``buildable: false`` to force Spack to use only the externals we defined. +- ``spack external find`` can automatically locate some of the ``hip``/``rocm`` packages, but not all of them, and furthermore not in a manner that guarantees a complementary set if multiple ROCm installations are available. +- The ``prefix`` is the same for several components, but note that others require listing one of the subdirectories as a prefix. ------------------------------------ Using an External CUDA Installation ----------------------------------- @@ -115,25 +102,25 @@ where ``/opt/cuda/cuda-11.0.2/lib/`` contains ``libcudart.so``. ------------------------------------ Using an External OpenGL API ------------------------------------ +---------------------------- Depending on whether we have a graphics card or not, we may choose to use OSMesa or GLX to implement the OpenGL API. If a graphics card is unavailable, OSMesa is recommended and can typically be built with Spack. -However, if we prefer to utilize the system GLX tailored to our graphics card, we need to declare it as an external. Here's how to do it: +However, if we prefer to utilize the system GLX tailored to our graphics card, we need to declare it as an external. +Here's how to do it: .. code-block:: yaml - packages: - libglx: - require: [opengl] - opengl: - buildable: false - externals: - - prefix: /usr/ - spec: opengl@4.6 + packages: + libglx: + require: [opengl] + opengl: + buildable: false + externals: + - prefix: /usr/ + spec: opengl@4.6 Note that the prefix has to be the root of both the libraries and the headers (e.g., ``/usr``), not the path to the ``lib`` directory. To know which spec for OpenGL is available, use ``cd /usr/include/GL && grep -Ri gl_version``. diff --git a/lib/spack/docs/images/ghcr_spack.png b/lib/spack/docs/images/ghcr_spack.png deleted file mode 100644 index 03287da866d016..00000000000000 Binary files a/lib/spack/docs/images/ghcr_spack.png and /dev/null differ diff --git a/lib/spack/docs/images/spec_anatomy.svg b/lib/spack/docs/images/spec_anatomy.svg index 060258aae075b9..76bc238d9fcbd1 100644 --- a/lib/spack/docs/images/spec_anatomy.svg +++ b/lib/spack/docs/images/spec_anatomy.svg @@ -1,365 +1,32 @@ - - - - - - image/svg+xml - - - - - - - - - - Spack Spec Anatomy - - mpileaks@1.2:1.4 +debug ~qt target=x86_64_v3 %gcc@15.1.0 ^libelf@1.1 %gcc@14.2.0 - - - - - Package Name - - - - - Version - - - - - Variants - - - - - Architecture - - - - - Direct Dependency - - - - - Transitive Dependency - - - Ver - - - - - Direct Dependency - - - Spec Components: - - - - mpileaks - - Package name - - @1.2:1.4 - - Version range (1.2 to 1.4 inclusive) - - +debug ~qt - - Variants (enable debug, disable qt) - - target=x86_64_v3 - - Target architecture (x86-64 machine with AVX2 support) - - %gcc@14.2.0 - - Direct dependency (typically, on a compiler) - - ^libelf - - Transitive dependency - + + +mpileaks@1.2:1.4 +debug ~qt target=x86_64_v3 %gcc@15 ^libelf@1.1 %clang@20 + + +Package name + + +Version + + +Variants + + +Architecture + + +Direct dependency + + +Transitive dependency diff --git a/lib/spack/docs/include_yaml.rst b/lib/spack/docs/include_yaml.rst index 6cc35146568edd..3761c33f7d4ae8 100644 --- a/lib/spack/docs/include_yaml.rst +++ b/lib/spack/docs/include_yaml.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,18 +9,15 @@ .. _include-yaml: -=============================== Include Settings (include.yaml) =============================== -Spack allows you to include configuration files through ``include.yaml``. -Using the ``include:`` heading results in pulling in external configuration -information to be used by any Spack command. +Spack allows you to include configuration files through ``include.yaml``, or in the ``include:`` section in an environment. + +Local files +~~~~~~~~~~~ -Included configuration files are required *unless* they are explicitly optional -or the entry's condition evaluates to ``false``. Optional includes are specified -with the ``optional`` clause and conditional ones with the ``when`` clause. For -example, +You can include a single configuration file or an entire configuration *scope* like this: .. code-block:: yaml @@ -30,40 +28,244 @@ example, - path: /path/to/os-specific/config-dir when: os == "ventura" -shows all three. The first entry, ``/path/to/a/required/config.yaml``, -indicates that the included ``config.yaml`` file is required (so must exist). -Use of ``optional: true`` for ``/path/to/$os/$target/config`` means -the path is only included if it exists. The condition ``os == "ventura"`` -in the ``when`` clause for ``/path/to/os-specific/config-dir`` means the -path is only included when the operating system (``os``) is ``ventura``. +Included paths may be absolute, relative (to the configuration file), or they can be specified as URLs. + +* ``optional``: Spack will raise an error when an included configuration file does not exist, *unless* it is explicitly made ``optional: true``, like the second path above. +* ``when``: Configuration scopes can also be included *conditionally* with ``when``. + ``when:`` conditions are evaluated as described for :ref:`Spec List References `. + + +The same conditions and variables in :ref:`Spec List References ` can be used for conditional activation in the ``when`` clauses. -The same conditions and variables in `Spec List References -`_ -can be used for conditional activation in the ``when`` clauses. +Remote file URLs +~~~~~~~~~~~~~~~~ -Included files can be specified by path or by their parent directory. -Paths may be absolute, relative (to the configuration file including the path), -or specified as URLs. Only the ``file``, ``ftp``, ``http``, and ``https`` protocols (or -schemes) are supported. Spack-specific, environment and user path variables -can be used. (See :ref:`config-file-variables` for more information.) +Only the ``file``, ``ftp``, ``http``, and ``https`` protocols (or schemes) are supported for remote file URLs. +Spack-specific, environment, and user path variables can be used. +(See :ref:`config-file-variables` for more information.) -A ``sha256`` is required for remote file URLs and must be specified as follows: +A ``sha256`` is required and must be specified as follows: .. code-block:: yaml include: - - path: https://github.com/path/to/raw/config/compilers.yaml + - path: https://github.com/path/to/raw/config/config.yaml sha256: 26e871804a92cd07bb3d611b31b4156ae93d35b6a6d6e0ef3a67871fcb1d258b -Additionally, remote file URLs must link to the **raw** form of the file's -contents (e.g., `GitHub -`_ -or `GitLab -`_). +The ``config.yaml`` file would be cached locally to a special include location and its contents included in Spack's configuration. .. warning:: - Recursive includes are not currently processed in a breadth-first manner, - so the value of a configuration option that is altered by multiple included - files may not be what you expect. This will be addressed in a future - update. + Remote file URLs must link to the **raw** form of the file's contents (e.g., `GitHub `_ or `GitLab `_). + +``git`` repository files +~~~~~~~~~~~~~~~~~~~~~~~~ + +You can also include configuration files from a ``git`` repository. +The `branch`, `commit`, or `tag` to be checked out is required. +A list of relative paths in which to find the configuration files is also required. +Inclusion of the repository (and its paths) can be optional or conditional. + +For example, suppose we only want to include the ``config.yaml`` and ``packages.yaml`` files from the `spack/spack-configs `_ repository's ``USC/config`` directory when using the ``centos7`` operating system. +We would then configure the ``include.yaml`` file as follows: + +.. code-block:: yaml + + include: + - git: https://github.com/spack/spack-configs + branch: main + when: os == "centos7" + paths: + - USC/config/config.yaml + - USC/config/packages.yaml + +If the condition is satisfied, then the ``main`` branch of the repository will be cloned and the settings for the two files integrated into Spack's configuration. + +.. versionadded:: 1.1 + ``git:``, ``branch:``, ``commit:``, and ``tag:`` attributes. + +Precedence +~~~~~~~~~~ + +Using ``include:`` adds the included files as a configuration scope *below* the including file. +This is so that you can override settings from files you include. +If you want one file to take precedence over another, you can put the include with higher precedence earlier in the list: + +.. code-block:: yaml + + include: + - /path/to/higher/precedence/scope/ + - /path/to/middle/precedence/scope/ + - git: https://github.com/org/git-repo-scope + commit: 95c59784bd02ea248bf905d79d063df38e087b19 + +``prefer_modify`` +^^^^^^^^^^^^^^^^^ + +When you use commands like ``spack compiler find``, ``spack external find``, ``spack config edit`` or ``spack config add``, they modify the topmost writable scope in the current configuration. +Scopes can tell Spack to prefer to edit their included scopes instead, using ``prefer_modify``: + +.. code-block:: yaml + + include: + - name: "preferred" + path: /path/to/scope/we/want-to-write + prefer_modify: true + +Now, if the including scope is the highest precedence scope and would otherwise be selected automatically by one fo these commands, they will instead prefer to edit ``preferred``. +The including scope can still be modified by using the ``--scope`` argument (e.g., ``spack compiler find --scope NAME``). + +.. warning:: + + Recursive includes are not currently processed in a breadth-first manner, so the value of a configuration option that is altered by multiple included files may not be what you expect. + This will be addressed in a future update. + +.. versionadded:: 1.1 The ``prefer_modify:`` attribute. + +Overriding local paths +~~~~~~~~~~~~~~~~~~~~~~ + +Optionally, you can enable a local path to be overridden by an environment variable using ``path_override_env_var:``: + +.. code-block:: yaml + + include: + - path_override_env_var: SPECIAL_CONFIG_PATH + path: /path/to/special/config.yaml + +Here, If ``SPECIAL_CONFIG_PATH`` is set, its value will be used as the path. +If not, Spack will instead use the ``path:`` specified in configuration. + +.. note:: + + ``path_override_env_var:`` is currently only supported for ``path:`` includes, not ``git:`` includes. + +.. versionadded:: 1.1 + The ``path_override_env_var:`` attribute. + +Named configuration scopes +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default, the included scope names are is constructed by appending ``:`` and the included scope's basename to the parent scope name. +For example, Spack's own ``defaults`` scope includes a ``base`` scope and a platform-specific scope:: + + > spack config scopes -p + Scope Path + command_line + spack /home/username/spack/etc/spack/ + user /home/username/.spack/ + site /home/username/spack/etc/spack/site/ + defaults /home/username/spack/etc/spack/defaults/ + defaults:darwin /home/username/spack/etc/spack/defaults/darwin/ + defaults:base /home/username/spack/etc/spack/defaults/base/ + _builtin + +You can see ``defaults`` and the included ``defaults:base`` and ``defaults:darwin`` scopes here. + +If you want to define your own name for an included scope, you can supply an optional ``name:`` argument when you include it: + +.. code-block:: yaml + + spack: + include: + - path: foo + name: myscope + +You can see the ``myscope`` name when we activate this environment:: + + > spack -e ./env config scopes -p + Scope Path + command_line + env:/home/username/env /home/username/env/spack.yaml/ + myscope /home/username/env/foo/ + spack /home/username/spack/etc/spack/ + user /home/username/.spack/ + site /home/username/spack/etc/spack/site/ + defaults /home/username/spack/etc/spack/defaults/ + defaults:darwin /home/username/spack/etc/spack/defaults/darwin/ + defaults:base /home/username/spack/etc/spack/defaults/base/ + _builtin + +You can now use the argument ``myscope`` to refer to this, for example with ``spack config --scope myscope add ...``. + +Built-in configuration scopes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The default ``user``, ``system``, and ``site`` scopes are defined using ``include:`` in ``$spack/etc/spack/include.yaml``: + +.. literalinclude:: _spack_root/etc/spack/include.yaml + :language: yaml + +You can see that all three of these scopes are given meaningful names, and all three are ``optional``, i.e., they'll be ignored if their directories do not exist. +The ``user`` and ``system`` scopes can also be disabled by setting ``SPACK_DISABLE_LOCAL_CONFIG``. +Finally, the ``user`` scope can be overridden with a path in ``SPACK_USER_CONFIG_PATH`` if it is set. + +Overriding scopes by name: +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Configuration scopes have unique names. +This means that you can use the ``name:`` attribute to *replace* a builtin scope. +If you supply an environment like this: + +.. code-block:: yaml + + spack: + include: + - path: foo + name: user + +The newly included ``user`` scope will *completely* override the builtin ``user`` scope:: + + > spack -e ~/env config scopes -p + Scope Path + command_line + env:/home/username/env /home/username/env/spack.yaml/ + user /home/username/env/foo/ + spack /home/username/spack/etc/spack/ + site /home/username/spack/etc/spack/site/ + defaults /home/username/spack/etc/spack/defaults/ + defaults:darwin /home/username/spack/etc/spack/defaults/darwin/ + defaults:base /home/username/spack/etc/spack/defaults/base/ + _builtin + +.. warning:: + + Using ``name:`` to override the ``defaults`` scope can have *very* unexpected consequences and is not advised. + +.. versionadded:: 1.1 + The ``name:`` attribute. + +Overriding built-in scopes with ``include::`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In some cases, you may want to override *all* of the built-in configuration scopes. +The ``user`` and ``system`` scopes depend on the user and the machine on which Spack is running, and they can end up bringing in unexpected configuration settings in surprising ways. + +If you want to eliminate them completely from an environment, you can write: + +.. code-block:: yaml + + spack: + include:: [] + +This overrides all scopes except the ``defaults`` that Spack needs in order to function. +You can see that ``spack``, ``user``, and ``site`` are overridden:: + + > spack -e ~/env config scopes -vp + Scope Type Status Path + command_line internal active + env:/home/username/env env,path active /home/username/env/spack.yaml/ + spack path override /home/username/spack/etc/spack/ + user include,path override /home/username/.spack/ + site include,path override /home/username/spack/etc/spack/site/ + defaults path active /home/username/spack/etc/spack/defaults/ + defaults:darwin include,path active /home/username/spack/etc/spack/defaults/darwin/ + defaults:base include,path active /home/username/spack/etc/spack/defaults/base/ + _builtin internal active + +And if you run ``spack config blame``, the settings from these scopes will no longer show up. +``defaults`` are not overridden as they are needed by Spack to function. +This allows you to create completely isolated environments that do not bring in external settings. + +.. versionadded:: 1.1 + ``include::`` with two colons for overriding. diff --git a/lib/spack/docs/index.rst b/lib/spack/docs/index.rst index de03dc2122e892..2aaa4c6d0aaba4 100644 --- a/lib/spack/docs/index.rst +++ b/lib/spack/docs/index.rst @@ -1,8 +1,9 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) -.. Spack documentation master file, created by + Spack documentation master file, created by sphinx-quickstart on Mon Dec 9 15:32:41 2013. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. @@ -11,30 +12,20 @@ :description lang=en: Documentation of Spack, a flexible package manager for high-performance computing, designed to support multiple versions and configurations of software on a wide variety of platforms. -=================== Spack =================== -Spack is a package management tool designed to support multiple -versions and configurations of software on a wide variety of platforms -and environments. It was designed for large supercomputing centers, -where many users and application teams share common installations of -software on clusters with exotic architectures, using libraries that -do not have a standard ABI. Spack is non-destructive: installing a -new version does not break existing installations, so many -configurations can coexist on the same system. - -Most importantly, Spack is *simple*. It offers a simple *spec* syntax -so that users can specify versions and configuration options -concisely. Spack is also simple for package authors: package files -are written in pure Python, and specs allow package authors to -maintain a single file for many different builds of the same package. +Spack is a package management tool designed to support multiple versions and configurations of software on a wide variety of platforms and environments. +It was designed for large supercomputing centers, where many users and application teams share common installations of software on clusters with exotic architectures, using libraries that do not have a standard ABI. +Spack is non-destructive: installing a new version does not break existing installations, so many configurations can coexist on the same system. + +Most importantly, Spack is *simple*. +It offers a simple *spec* syntax so that users can specify versions and configuration options concisely. +Spack is also simple for package authors: package files are written in pure Python, and specs allow package authors to maintain a single file for many different builds of the same package. See the :doc:`features` for examples and highlights. -Get Spack from the `GitHub repository -`_ and install your first -package: +Get Spack from the `GitHub repository `_ and install your first package: .. code-block:: console @@ -45,8 +36,7 @@ package: .. note:: ``--depth=2`` prunes the git history to reduce the size of the Spack installation. -If you're new to Spack and want to start using it, see :doc:`getting_started`, -or refer to the full manual below. +If you're new to Spack and want to start using it, see :doc:`getting_started`, or refer to the full manual below. .. toctree:: @@ -65,16 +55,10 @@ or refer to the full manual below. package_fundamentals configuring_compilers - replace_conda_homebrew + environments_basics frequently_asked_questions getting_help -.. toctree:: - :maxdepth: 2 - :caption: Advanced Topics - - advanced_topics - .. toctree:: :maxdepth: 2 :caption: Links @@ -85,23 +69,29 @@ or refer to the full manual below. .. toctree:: :maxdepth: 2 - :caption: Reference + :caption: Configuration configuration config_yaml - include_yaml packages_yaml + toolchains_yaml build_settings - environments - env_vars_yaml - containers + repositories mirrors + chain module_file_support - repositories + include_yaml + env_vars_yaml + +.. toctree:: + :maxdepth: 2 + :caption: Reference + + environments + containers binary_caches bootstrapping command_index - chain extensions pipelines signing @@ -116,8 +106,16 @@ or refer to the full manual below. packaging_guide_testing packaging_guide_advanced build_systems + roles_and_responsibilities contribution_guide developer_guide + package_review_guide + +.. toctree:: + :maxdepth: 2 + :caption: Advanced Topics + + advanced_topics .. toctree:: :maxdepth: 2 @@ -127,9 +125,8 @@ or refer to the full manual below. Spack Builtin Repo Spack API Docs -================== Indices and tables -================== +------------------ * :ref:`genindex` * :ref:`modindex` diff --git a/lib/spack/docs/installing_prerequisites.rst b/lib/spack/docs/installing_prerequisites.rst index 99568b65bec5a8..7dd6d1da5b5ff3 100644 --- a/lib/spack/docs/installing_prerequisites.rst +++ b/lib/spack/docs/installing_prerequisites.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,68 +9,66 @@ .. _verify-spack-prerequisites: -=================== Spack Prerequisites =================== Spack relies on a few basic utilities to be present on the system where it runs, depending on the operating system. To install them, follow the instructions below. -.. tab-set:: - - .. tab-item:: Linux - - .. tab-set:: +Linux +----- - .. tab-item:: Debian/Ubuntu +For **Debian** and **Ubuntu** users: - .. code-block:: console +.. code-block:: console - apt update - apt install file bzip2 ca-certificates g++ gcc gfortran git gzip lsb-release patch python3 tar unzip xz-utils zstd + $ apt update + $ apt install file bzip2 ca-certificates g++ gcc gfortran git gzip lsb-release patch python3 tar unzip xz-utils zstd - .. tab-item:: RHEL/AlmaLinux/Rocky Linux +For **RHEL**, **AlmaLinux**, and **Rocky Linux** users: - .. code-block:: console +.. code-block:: console - dnf install epel-release - dnf install file bzip2 ca-certificates git gzip patch python3 tar unzip xz zstd gcc gcc-c++ gcc-gfortran + $ dnf install epel-release + $ dnf install file bzip2 ca-certificates git gzip patch python3 tar unzip xz zstd gcc gcc-c++ gcc-gfortran - .. tab-item:: macOS +macOS +----- - On macOS, the Command Line Tools package is required, and a full Xcode suite may be necessary for some packages such as Qt and apple-gl. - To install Xcode you can use the following command: +On macOS, the Command Line Tools package is required, and the full Xcode suite may be necessary for some packages, such as Qt and apple-gl. +To install Xcode, you can use the following command: - .. code-block:: console +.. code-block:: console - $ xcode-select --install + $ xcode-select --install - For most packages, the Xcode command-line tools are sufficient. - However, some packages like ``qt`` require the full Xcode suite. - You can check to see which you have installed by running: +For most packages, the Xcode command-line tools are sufficient. +However, some packages like ``qt`` require the full Xcode suite. +You can check to see which you have installed by running: - .. code-block:: console +.. code-block:: console - $ xcode-select -p + $ xcode-select -p - If the output is: +If the output is: - .. code-block:: none +.. code-block:: none - /Applications/Xcode.app/Contents/Developer + /Applications/Xcode.app/Contents/Developer - you already have the full Xcode suite installed. If the output is: +you already have the full Xcode suite installed. +If the output is: - .. code-block:: none +.. code-block:: none - /Library/Developer/CommandLineTools + /Library/Developer/CommandLineTools - you only have the command-line tools installed. - The full Xcode suite can be installed through the App Store. - Make sure you launch the Xcode application and accept the license agreement before using Spack. - It may ask you to install additional components. - Alternatively, the license can be accepted through the command line: +you only have the command-line tools installed. +The full Xcode suite can be installed through the App Store. +Make sure to launch the Xcode application and accept the license agreement before using Spack. +It may ask you to install additional components. +Alternatively, the Xcode license can be accepted through the command line: - .. code-block:: console +.. code-block:: console - $ sudo xcodebuild -license accept + $ sudo xcodebuild -license accept diff --git a/lib/spack/docs/mirrors.rst b/lib/spack/docs/mirrors.rst index b93e90f0c8a2e2..436c3aff56eb08 100644 --- a/lib/spack/docs/mirrors.rst +++ b/lib/spack/docs/mirrors.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,16 +9,13 @@ .. _mirrors: -====================== Mirrors (mirrors.yaml) ====================== Some sites may not have access to the internet for fetching packages. -These sites will need a local repository of tarballs from which they -can get their files. Spack has support for this with *mirrors*. A -mirror is a URL that points to a directory, either on the local -filesystem or on some server, containing tarballs for all of Spack's -packages. +These sites will need a local repository of tarballs from which they can get their files. +Spack has support for this with *mirrors*. +A mirror is a URL that points to a directory, either on the local filesystem or on some server, containing tarballs for all of Spack's packages. Here's an example of a mirror's directory structure: @@ -43,52 +41,41 @@ Here's an example of a mirror's directory structure: mvapich2/ mvapich2-1.9.tgz -The structure is very simple. There is a top-level directory. The -second level directories are named after packages, and the third level -contains tarballs for each package, named after each package. +The structure is very simple. +There is a top-level directory. +The second level directories are named after packages, and the third level contains tarballs for each package, named after each package. .. note:: - Archives are **not** named exactly the way they were in the package's fetch - URL. They have the form ``-.``, where - ```` is Spack's name for the package, ```` is the - version of the tarball, and ```` is whatever format the - package's fetch URL contains. + Archives are **not** named exactly the way they were in the package's fetch URL. + They have the form ``-.``, where ```` is Spack's name for the package, ```` is the version of the tarball, and ```` is whatever format the package's fetch URL contains. - In order to make mirror creation reasonably fast, we copy the - tarball in its original format to the mirror directory, but we do - not standardize on a particular compression algorithm, because this - would potentially require expanding and recompressing each archive. + In order to make mirror creation reasonably fast, we copy the tarball in its original format to the mirror directory, but we do not standardize on a particular compression algorithm, because this would potentially require expanding and recompressing each archive. .. _cmd-spack-mirror: ----------------- ``spack mirror`` ---------------- -Mirrors are managed with the ``spack mirror`` command. The help for -``spack mirror`` looks like this: +Mirrors are managed with the ``spack mirror`` command. +The help for ``spack mirror`` looks like this: .. command-output:: spack help mirror -The ``create`` command actually builds a mirror by fetching all of its -packages from the internet and checksumming them. +The ``create`` command actually builds a mirror by fetching all of its packages from the internet and checksumming them. -The other three commands are for managing mirror configuration. They -control the URL(s) from which Spack downloads its packages. +The other three commands are for managing mirror configuration. +They control the URL(s) from which Spack downloads its packages. .. _cmd-spack-mirror-create: ------------------------ ``spack mirror create`` ----------------------- -You can create a mirror using the ``spack mirror create`` command, assuming -you're on a machine where you can access the internet. +You can create a mirror using the ``spack mirror create`` command, assuming you're on a machine where you can access the internet. -The command will iterate through all of Spack's packages and download -the safe ones into a directory structure like the one above. Here is -what it looks like: +The command will iterate through all of Spack's packages and download the safe ones into a directory structure like the one above. +Here is what it looks like: .. code-block:: console @@ -121,32 +108,25 @@ what it looks like: 5 added 0 failed to fetch. -Once this is done, you can tar up the ``spack-mirror-2014-06-24`` directory and -copy it over to the machine you want it hosted on. +Once this is done, you can tar up the ``spack-mirror-2014-06-24`` directory and copy it over to the machine you want it hosted on. -^^^^^^^^^^^^^^^^^^^ Custom package sets ^^^^^^^^^^^^^^^^^^^ -Normally, ``spack mirror create`` downloads all the archives it has -checksums for. If you want to only create a mirror for a subset of -packages, you can do that by supplying a list of package specs on the -command line after ``spack mirror create``. For example, this -command: +Normally, ``spack mirror create`` downloads all the archives it has checksums for. +If you want to only create a mirror for a subset of packages, you can do that by supplying a list of package specs on the command line after ``spack mirror create``. +For example, this command: .. code-block:: console $ spack mirror create libelf@0.8.12: boost@1.44: -Will create a mirror for libelf versions greater than or equal to -0.8.12 and boost versions greater than or equal to 1.44. +Will create a mirror for libelf versions greater than or equal to 0.8.12 and boost versions greater than or equal to 1.44. -^^^^^^^^^^^^ Mirror files ^^^^^^^^^^^^ -If you have a *very* large number of packages you want to mirror, you -can supply a file with specs in it, one per line: +If you have a *very* large number of packages you want to mirror, you can supply a file with specs in it, one per line: .. code-block:: console @@ -159,15 +139,18 @@ can supply a file with specs in it, one per line: $ spack mirror create --file specs.txt ... -This is useful if there is a specific suite of software managed by -your site. +This is useful if there is a specific suite of software managed by your site. -^^^^^^^^^^^^^^^^^^ Mirror environment ^^^^^^^^^^^^^^^^^^ -To create a mirror of all packages required by a concrete environment, activate the environment and call ``spack mirror create -a``. -This is especially useful to create a mirror of an environment concretized on another machine. +To create a mirror of all packages required by a concrete environment, activate the environment and run ``spack mirror create -a``. +This is especially useful to create a mirror of an environment that was concretized on another machine. + +Optionally specify ``-j `` to control the number of workers used to create a full mirror. +If not specified, the optimal number of workers is determined dynamically. +For a full mirror, the number of workers used is the minimum of 16 workers, available CPU cores, and number of packages to mirror. +For individual packages, 1 worker is used. .. code-block:: console @@ -185,20 +168,20 @@ This is especially useful to create a mirror of an environment concretized on an .. _cmd-spack-mirror-add: --------------------- ``spack mirror add`` -------------------- -Once you have a mirror, you need to let Spack know about it. This is -relatively simple. First, figure out the URL for the mirror. If it's -a directory, you can use a file URL like this one: +Once you have a mirror, you need to let Spack know about it. +This is relatively simple. +First, figure out the URL for the mirror. +If it's a directory, you can use a file URL like this one: .. code-block:: none file://$HOME/spack-mirror-2014-06-24 -That points to the directory on the local filesystem. If it were on a -web server, you could use a URL like this one: +That points to the directory on the local filesystem. +If it were on a web server, you could use a URL like this one: https://example.com/some/web-hosted/directory/spack-mirror-2014-06-24 @@ -213,7 +196,6 @@ Each mirror has a name so that you can refer to it again later. .. _cmd-spack-mirror-list: ---------------------- ``spack mirror list`` --------------------- @@ -226,7 +208,6 @@ To see all the mirrors Spack knows about, run ``spack mirror list``: .. _cmd-spack-mirror-remove: ------------------------ ``spack mirror remove`` ----------------------- @@ -238,7 +219,6 @@ To remove a mirror by name, run: $ spack mirror list ==> No mirrors configured. ------------------ Mirror precedence ----------------- @@ -250,25 +230,17 @@ Adding a mirror really adds a line in ``~/.spack/mirrors.yaml``: local_filesystem: file:///home/username/spack-mirror-2014-06-24 remote_server: https://example.com/some/web-hosted/directory/spack-mirror-2014-06-24 -If you want to change the order in which mirrors are searched for -packages, you can edit this file and reorder the sections. Spack will -search the topmost mirror first and the bottom-most mirror last. +If you want to change the order in which mirrors are searched for packages, you can edit this file and reorder the sections. +Spack will search the topmost mirror first and the bottom-most mirror last. .. _caching: -------------------- Local Default Cache ------------------- -Spack caches resources that are downloaded as part of installations. The cache is -a valid Spack mirror: it uses the same directory structure and naming scheme -as other Spack mirrors (so it can be copied anywhere and referenced with a URL -like other mirrors). The mirror is maintained locally (within the Spack -installation directory) at :file:`var/spack/cache/`. It is always enabled (and -is always searched first when attempting to retrieve files for an installation) -but can be cleared with ``spack clean --misc-cache``; the cache directory can also -be deleted manually without issue. - -Caching includes retrieved tarball archives and source control repositories, but -only resources with an associated digest or commit ID (e.g. a revision number -for SVN) will be cached. +Spack caches resources that are downloaded as part of installations. +The cache is a valid Spack mirror: it uses the same directory structure and naming scheme as other Spack mirrors (so it can be copied anywhere and referenced with a URL like other mirrors). +The mirror is maintained locally (within the Spack installation directory) at :file:`var/spack/cache/`. +It is always enabled (and is always searched first when attempting to retrieve files for an installation) but can be cleared with ``spack clean --misc-cache``; the cache directory can also be deleted manually without issue. + +Caching includes retrieved tarball archives and source control repositories, but only resources with an associated digest or commit ID (e.g. a revision number for SVN) will be cached. diff --git a/lib/spack/docs/module_file_support.rst b/lib/spack/docs/module_file_support.rst index 9fe4fcbb308c1b..86036db649b1c3 100644 --- a/lib/spack/docs/module_file_support.rst +++ b/lib/spack/docs/module_file_support.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,28 +9,20 @@ .. _modules: -====================== Modules (modules.yaml) ====================== -The use of module systems to manage user environments in a controlled way -is a common practice at HPC centers that is sometimes embraced also by -individual programmers on their development machines. To support this -common practice Spack integrates with `Environment Modules -`_ and `Lmod -`_ by providing post-install hooks -that generate module files and commands to manipulate them. +The use of module systems to manage user environments in a controlled way is a common practice at HPC centers that is sometimes embraced also by individual programmers on their development machines. +To support this common practice Spack integrates with `Environment Modules `_ and `Lmod `_ by providing post-install hooks that generate module files and commands to manipulate them. -Modules are one of several ways you can use Spack packages. For other -options that may fit your use case better, you should also look at -:ref:`spack load ` and :ref:`environments `. +Modules are one of several ways you can use Spack packages. +For other options that may fit your use case better, you should also look at :ref:`spack load ` and :ref:`environments `. ------------ Quick start ----------- -In the current version of Spack, module files are not generated by default. To get started, you -can generate module files for all currently installed packages by running either +In the current version of Spack, module files are not generated by default. +To get started, you can generate module files for all currently installed packages by running either .. code-block:: console @@ -41,8 +34,7 @@ or $ spack module lmod refresh -Spack can also generate module files for all future installations automatically through the -following configuration: +Spack can also generate module files for all future installations automatically through the following configuration: .. code-block:: console @@ -54,8 +46,7 @@ or $ spack config add modules:default:enable:[lmod] -Assuming you have a module system installed, you should now be able to use the ``module`` command -to interact with them: +Assuming you have a module system installed, you should now be able to use the ``module`` command to interact with them: .. code-block:: console @@ -84,51 +75,41 @@ For example, you could type the following command to load the ``cmake`` module: $ module load cmake/3.7.2-gcc-6.3.0-fowuuby -Neither of these is particularly pretty, easy to remember, or easy to -type. Luckily, Spack offers many facilities for customizing the module -scheme used at your site. +Neither of these is particularly pretty, easy to remember, or easy to type. +Luckily, Spack offers many facilities for customizing the module scheme used at your site. -------------------------- Module file customization ------------------------- -The table below summarizes the essential information associated with -the different file formats that can be generated by Spack: +The table below summarizes the essential information associated with the different file formats that can be generated by Spack: - +-----------+--------------+------------------------------+----------------------------------------------+----------------------+ - | | Hierarchical | **Default root directory** | **Default template file** | **Compatible tools** | - +===========+==============+==============================+==============================================+======================+ - | ``tcl`` | No | share/spack/modules | share/spack/templates/modules/modulefile.tcl | Env. Modules/Lmod | - +-----------+--------------+------------------------------+----------------------------------------------+----------------------+ - | ``lmod`` | Yes | share/spack/lmod | share/spack/templates/modules/modulefile.lua | Lmod | - +-----------+--------------+------------------------------+----------------------------------------------+----------------------+ ++-----------+--------------+------------------------------+----------------------------------------------+----------------------+ +| | Hierarchical | **Default root directory** | **Default template file** | **Compatible tools** | ++===========+==============+==============================+==============================================+======================+ +| ``tcl`` | No | share/spack/modules | share/spack/templates/modules/modulefile.tcl | Env. Modules/Lmod | ++-----------+--------------+------------------------------+----------------------------------------------+----------------------+ +| ``lmod`` | Yes | share/spack/lmod | share/spack/templates/modules/modulefile.lua | Lmod | ++-----------+--------------+------------------------------+----------------------------------------------+----------------------+ -Spack ships with sensible defaults for the generation of module files, but -you can customize many aspects of it to accommodate package or site specific needs. +Spack ships with sensible defaults for the generation of module files, but you can customize many aspects of it to accommodate package or site specific needs. In general you can override or extend the default behavior by: - 1. overriding certain callback APIs in the Python packages - 2. writing specific rules in the ``modules.yaml`` configuration file - 3. writing your own templates to override or extend the defaults +1. overriding certain callback APIs in the Python packages +2. writing specific rules in the ``modules.yaml`` configuration file +3. writing your own templates to override or extend the defaults -The former method lets you express changes in the run-time environment -that are needed to use the installed software properly, e.g. injecting variables -from language interpreters into their extensions. The latter two instead permit to -fine tune the filesystem layout, content and creation of module files to meet -site specific conventions. +The former method lets you express changes in the run-time environment that are needed to use the installed software properly, e.g. injecting variables from language interpreters into their extensions. +The latter two instead permit to fine tune the filesystem layout, content and creation of module files to meet site specific conventions. .. _overide-api-calls-in-package-py: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Setting environment variables dynamically in ``package.py`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -There are two methods that you can implement in any ``package.py`` to dynamically affect the -content of the module files generated by Spack. The most important one is -``setup_run_environment``, which can be used to set environment variables in the module file that -depend on the spec: +There are two methods that you can implement in any ``package.py`` to dynamically affect the content of the module files generated by Spack. +The most important one is ``setup_run_environment``, which can be used to set environment variables in the module file that depend on the spec: .. code-block:: python @@ -136,52 +117,44 @@ depend on the spec: if self.spec.satisfies("+foo"): env.set("FOO", "bar") -The second, less commonly used, is ``setup_dependent_run_environment(self, env, dependent_spec)``, -which allows a dependency to set variables in the module file of its dependents. This is typically -used in packages like ``python``, ``r``, or ``perl`` to prepend the dependent's prefix to the -search path of the interpreter (``PYTHONPATH``, ``R_LIBS``, ``PERL5LIB`` resp.), so it can locate -the packages at runtime. +The second, less commonly used, is ``setup_dependent_run_environment(self, env, dependent_spec)``, which allows a dependency to set variables in the module file of its dependents. +This is typically used in packages like ``python``, ``r``, or ``perl`` to prepend the dependent's prefix to the search path of the interpreter (``PYTHONPATH``, ``R_LIBS``, ``PERL5LIB`` resp.), so it can locate the packages at runtime. For example, a simplified version of the ``python`` package could look like this: .. code-block:: python - def setup_dependent_run_environment(self, env: EnvironmentModifications, dependent_spec: Spec) -> None: + def setup_dependent_run_environment( + self, env: EnvironmentModifications, dependent_spec: Spec + ) -> None: if dependent_spec.package.extends(self.spec): env.prepend_path("PYTHONPATH", dependent_spec.prefix.lib.python) -and would make any package that ``extends("python")`` have its library directory added to the -``PYTHONPATH`` environment variable in the module file. It's much more convenient to set this -variable here, than to repeat it in every Python extension's ``setup_run_environment`` method. +and would make any package that ``extends("python")`` have its library directory added to the ``PYTHONPATH`` environment variable in the module file. +It's much more convenient to set this variable here, than to repeat it in every Python extension's ``setup_run_environment`` method. .. _modules-yaml: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``modules.yaml`` config file and module sets ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The configuration files that control module generation behavior are named ``modules.yaml``. The -default configuration looks like this: +The configuration files that control module generation behavior are named ``modules.yaml``. +The default configuration looks like this: -.. literalinclude:: _spack_root/etc/spack/defaults/modules.yaml +.. literalinclude:: _spack_root/etc/spack/defaults/base/modules.yaml :language: yaml -You can define one or more **module sets**, each of which can be configured separately with regard -to install location, naming scheme, inclusion and exclusion, autoloading, et cetera. +You can define one or more **module sets**, each of which can be configured separately with regard to install location, naming scheme, inclusion and exclusion, autoloading, et cetera. -The default module set is aptly named ``default``. All -:ref:`Spack commands that operate on modules ` apply to the ``default`` -module set, unless another module set is specified explicitly (with the ``--name`` flag). +The default module set is aptly named ``default``. +All :ref:`Spack commands that operate on modules ` apply to the ``default`` module set, unless another module set is specified explicitly (with the ``--name`` flag). -^^^^^^^^^^^^^^^^^^^^^^^^^ Changing the modules root ^^^^^^^^^^^^^^^^^^^^^^^^^ -As shown in the table above, the default module root for ``lmod`` is -``$spack/share/spack/lmod`` and the default root for ``tcl`` is -``$spack/share/spack/modules``. This can be overridden for any module -set by changing the ``roots`` key of the configuration. +As shown in the table above, the default module root for ``lmod`` is ``$spack/share/spack/lmod`` and the default root for ``tcl`` is ``$spack/share/spack/modules``. +This can be overridden for any module set by changing the ``roots`` key of the configuration. .. code-block:: yaml @@ -194,16 +167,12 @@ set by changing the ``roots`` key of the configuration. lmod: /path/to/install/custom/lmod/modules # ... -This configuration will create two module sets. The default module set -will install its ``tcl`` modules to ``/path/to/install/tcl/modules`` -(and still install its lmod modules, if any, to the default -location). The set ``my_custom_lmod_modules`` will install its lmod -modules to ``/path/to/install/custom/lmod/modules`` (and still install -its tcl modules, if any, to the default location). +This configuration will create two module sets. +The default module set will install its ``tcl`` modules to ``/path/to/install/tcl/modules`` (and still install its lmod modules, if any, to the default location). +The set ``my_custom_lmod_modules`` will install its lmod modules to ``/path/to/install/custom/lmod/modules`` (and still install its tcl modules, if any, to the default location). -By default, an architecture-specific directory is added to the root -directory. A module set may override that behavior by setting the -``arch_folder`` config value to ``False``. +By default, an architecture-specific directory is added to the root directory. +A module set may override that behavior by setting the ``arch_folder`` config value to ``False``. .. code-block:: yaml @@ -213,12 +182,9 @@ directory. A module set may override that behavior by setting the tcl: /path/to/install/tcl/modules arch_folder: false -Obviously, having multiple module sets install modules to the default -location could be confusing to users of your modules. In the next -section, we will discuss enabling and disabling module types (module -file generators) for each module set. +Obviously, having multiple module sets install modules to the default location could be confusing to users of your modules. +In the next section, we will discuss enabling and disabling module types (module file generators) for each module set. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Automatically generating module files ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -233,12 +199,10 @@ This is done by adding the desired module systems to the ``enable`` list. - tcl - lmod -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Configuring ``tcl`` and ``lmod`` modules ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -You can configure the behavior of either module system separately, under a key corresponding to -the generator being customized: +You can configure the behavior of either module system separately, under a key corresponding to the generator being customized: .. code-block:: yaml @@ -249,85 +213,72 @@ the generator being customized: lmod: # contains lmod specific customizations -In general, the configuration options that you can use in ``modules.yaml`` will -either change the layout of the module files on the filesystem, or they will affect -their content. For the latter point it is possible to use anonymous specs -to fine tune the set of packages on which the modifications should be applied. +In general, the configuration options that you can use in ``modules.yaml`` will either change the layout of the module files on the filesystem, or they will affect their content. +For the latter point it is possible to use anonymous specs to fine tune the set of packages on which the modifications should be applied. .. _autoloading-dependencies: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Autoloading and hiding dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A module file should set the variables that are needed for an application to work. But since an -application often has many dependencies, where should all the environment variables for those be -set? In Spack the rule is that each package sets the runtime variables that are needed by the -package itself, and no more. This way, dependencies can be loaded standalone too, and duplication -of environment variables is avoided. +A module file should set the variables that are needed for an application to work. +But since an application often has many dependencies, where should all the environment variables for those be set? +In Spack the rule is that each package sets the runtime variables that are needed by the package itself, and no more. +This way, dependencies can be loaded standalone too, and duplication of environment variables is avoided. -That means however that if you want to use an application, you need to load the modules for all its -dependencies. Of course this is not something you would want users to do manually. +That means however that if you want to use an application, you need to load the modules for all its dependencies. +Of course this is not something you would want users to do manually. -Since Spack knows the dependency graph of every package, it can easily generate module files that -automatically load the modules for its dependencies recursively. It is enabled by default for both -Lmod and Environment Modules under the ``autoload: direct`` config option. The former system has -builtin support through the ``depends_on`` function, the latter simply uses a ``module load`` -statement. Both module systems (at least in newer versions) do reference counting, so that if a -module is loaded by two different modules, it will only be unloaded after the others are. +Since Spack knows the dependency graph of every package, it can easily generate module files that automatically load the modules for its dependencies recursively. +It is enabled by default for both Lmod and Environment Modules under the ``autoload: direct`` config option. +The former system has builtin support through the ``depends_on`` function, the latter simply uses a ``module load`` statement. +Both module systems (at least in newer versions) do reference counting, so that if a module is loaded by two different modules, it will only be unloaded after the others are. The ``autoload`` key accepts the values: - * ``none``: no autoloading - * ``run``: autoload direct *run* type dependencies - * ``direct``: autoload direct *link and run* type dependencies - * ``all``: autoload all dependencies +* ``none``: no autoloading +* ``run``: autoload direct *run* type dependencies +* ``direct``: autoload direct *link and run* type dependencies +* ``all``: autoload all dependencies In case of ``run`` and ``direct``, a ``module load`` triggers a recursive load. -The ``direct`` option is most correct: there are cases where pure link dependencies need to set -variables for themselves, or need to have variables of their own dependencies set. +The ``direct`` option is most correct: there are cases where pure link dependencies need to set variables for themselves, or need to have variables of their own dependencies set. In practice however, ``run`` is often sufficient, and may make ``module load`` snappier. The ``all`` option is discouraged and seldomly used. A common complaint about autoloading is the large number of modules that are visible to the user. -Spack has a solution for this as well: ``hide_implicits: true``. This ensures that only those -packages you've explicitly installed are exposed by ``module avail``, but still allows for -autoloading of hidden dependencies. Lmod should support hiding implicits in general, while -Environment Modules requires version 4.7 or higher. +Spack has a solution for this as well: ``hide_implicits: true``. +This ensures that only those packages you've explicitly installed are exposed by ``module avail``, but still allows for autoloading of hidden dependencies. +Lmod should support hiding implicits in general, while Environment Modules requires version 4.7 or higher. .. note:: - If supported by your module system, we highly encourage the following configuration that enables - autoloading and hiding of implicits. It ensures all runtime variables are set correctly, - including those for dependencies, without overwhelming the user with a large number of available - modules. Further, it makes it easier to get readable module names without collisions, see the - section below on :ref:`modules-projections`. - - .. code-block:: yaml - - modules: - default: - tcl: - hide_implicits: true - all: - autoload: direct # or `run` - lmod: - hide_implicits: true - all: - autoload: direct # or `run` + If supported by your module system, we highly encourage the following configuration that enables autoloading and hiding of implicits. + It ensures all runtime variables are set correctly, including those for dependencies, without overwhelming the user with a large number of available modules. + Further, it makes it easier to get readable module names without collisions, see the section below on :ref:`modules-projections`. + + .. code-block:: yaml + + modules: + default: + tcl: + hide_implicits: true + all: + autoload: direct # or `run` + lmod: + hide_implicits: true + all: + autoload: direct # or `run` .. _anonymous_specs: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Setting environment variables for selected packages in config ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In the configuration file you can filter particular specs, and make further changes to the -environment variables that go into their module files. This is very powerful when you want to avoid -:ref:`modifying the package itself `, or when you want to set -certain variables on multiple selected packages at once. +In the configuration file you can filter particular specs, and make further changes to the environment variables that go into their module files. +This is very powerful when you want to avoid :ref:`modifying the package itself `, or when you want to set certain variables on multiple selected packages at once. For instance, in the snippet below: @@ -340,99 +291,84 @@ For instance, in the snippet below: all: environment: set: - BAR: 'bar' + BAR: "bar" # This anonymous spec selects any package that # depends on mpi. The double colon at the # end clears the set of rules that matched so far. ^mpi:: environment: prepend_path: - PATH: '{^mpi.prefix}/bin' + PATH: "{^mpi.prefix}/bin" set: - BAR: 'baz' + BAR: "baz" # Selects any zlib package zlib: environment: prepend_path: - LD_LIBRARY_PATH: 'foo' + LD_LIBRARY_PATH: "foo" # Selects zlib compiled with gcc@4.8 zlib%gcc@4.8: environment: unset: - FOOBAR -you are instructing Spack to set the environment variable ``BAR=bar`` for every module, -unless the associated spec satisfies the abstract dependency ``^mpi`` in which case -``BAR=baz``, and the directory containing the respective MPI executables is prepended -to the ``PATH`` variable. -In addition in any spec that satisfies ``zlib`` the value ``foo`` will be -prepended to ``LD_LIBRARY_PATH`` and in any spec that satisfies ``zlib%gcc@4.8`` -the variable ``FOOBAR`` will be unset. +you are instructing Spack to set the environment variable ``BAR=bar`` for every module, unless the associated spec satisfies the abstract dependency ``^mpi`` in which case ``BAR=baz``, and the directory containing the respective MPI executables is prepended to the ``PATH`` variable. +In addition in any spec that satisfies ``zlib`` the value ``foo`` will be prepended to ``LD_LIBRARY_PATH`` and in any spec that satisfies ``zlib%gcc@4.8`` the variable ``FOOBAR`` will be unset. -.. note:: - Order does matter - The modifications associated with the ``all`` keyword are always evaluated - first, no matter where they appear in the configuration file. All the other changes to - environment variables for matching specs are evaluated from top to bottom. +.. admonition:: Note: order does matter + :class: note + + The modifications associated with the ``all`` keyword are always evaluated first, no matter where they appear in the configuration file. + All the other changes to environment variables for matching specs are evaluated from top to bottom. .. warning:: - As general advice, it's often better to set as few unnecessary variables as possible. For - example, the following seemingly innocent and potentially useful configuration + As general advice, it's often better to set as few unnecessary variables as possible. + For example, the following seemingly innocent and potentially useful configuration - .. code-block:: yaml + .. code-block:: yaml - all: - environment: - set: - "{name}_ROOT": "{prefix}" + all: + environment: + set: + "{name}_ROOT": "{prefix}" - sets ``BINUTILS_ROOT`` to its prefix in modules for ``binutils``, which happens to break - the ``gcc`` compiler: it uses this variable as its default search path for certain object - files and libraries, and by merely setting it, everything fails to link. + sets ``BINUTILS_ROOT`` to its prefix in modules for ``binutils``, which happens to break the ``gcc`` compiler: it uses this variable as its default search path for certain object files and libraries, and by merely setting it, everything fails to link. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Exclude or include specific module files ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -You can use anonymous specs also to prevent module files from being written or -to force them to be written. Consider the case where you want to hide from users -all the boilerplate software that you had to build in order to bootstrap a new -compiler. Suppose for instance that ``gcc@4.4.7`` is the compiler provided by -your system. If you write a configuration file like: +You can use anonymous specs also to prevent module files from being written or to force them to be written. +Consider the case where you want to hide from users all the boilerplate software that you had to build in order to bootstrap a new compiler. +Suppose for instance that ``gcc@4.4.7`` is the compiler provided by your system. +If you write a configuration file like: .. code-block:: yaml modules: default: tcl: - include: ['gcc', 'llvm'] # include will have precedence over exclude - exclude: ['%gcc@4.4.7'] # Assuming gcc@4.4.7 is the system compiler + include: ["gcc", "llvm"] # include will have precedence over exclude + exclude: ["%gcc@4.4.7"] # Assuming gcc@4.4.7 is the system compiler -you will prevent the generation of module files for any package that -is compiled with ``gcc@4.4.7``, with the only exception of any ``gcc`` -or any ``llvm`` installation. +you will prevent the generation of module files for any package that is compiled with ``gcc@4.4.7``, with the only exception of any ``gcc`` or any ``llvm`` installation. -It is safe to combine ``exclude`` and ``autoload`` -:ref:`mentioned above `. When ``exclude`` prevents a module file to be -generated for a dependency, the ``autoload`` feature will simply not generate a statement to load -it. +It is safe to combine ``exclude`` and ``autoload`` :ref:`mentioned above `. +When ``exclude`` prevents a module file to be generated for a dependency, the ``autoload`` feature will simply not generate a statement to load it. .. _modules-projections: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Customize the naming of modules ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The names of environment modules generated by Spack are not always easy to -fully comprehend due to the long hash in the name. There are three module -configuration options to help with that. The first is a global setting to -adjust the hash length. It can be set anywhere from 0 to 32 and has a default -length of 7. This is the representation of the hash in the module file name and -does not affect the size of the package hash. Be aware that the smaller the -hash length the more likely naming conflicts will occur. The following snippet -shows how to set hash length in the module file names: +The names of environment modules generated by Spack are not always easy to fully comprehend due to the long hash in the name. +There are three module configuration options to help with that. +The first is a global setting to adjust the hash length. +It can be set anywhere from 0 to 32 and has a default length of 7. +This is the representation of the hash in the module file name and does not affect the size of the package hash. +Be aware that the smaller the hash length the more likely naming conflicts will occur. +The following snippet shows how to set hash length in the module file names: .. code-block:: yaml @@ -443,14 +379,12 @@ shows how to set hash length in the module file names: .. tip:: - Using ``hide_implicits: true`` (see :ref:`autoloading-dependencies`) vastly reduces the number - modules exposed to the user. The hidden modules always contain the hash in their name, and are - not influenced by the ``hash_length`` setting. Hidden implicits thus make it easier to use a - short hash length or no hash at all, without risking name conflicts. + Using ``hide_implicits: true`` (see :ref:`autoloading-dependencies`) vastly reduces the number modules exposed to the user. + The hidden modules always contain the hash in their name, and are not influenced by the ``hash_length`` setting. + Hidden implicits thus make it easier to use a short hash length or no hash at all, without risking name conflicts. -To help make module names more readable, and to help alleviate name conflicts -with a short hash, one can use the ``suffixes`` option in the modules -configuration file. This option will add strings to modules that match a spec. +To help make module names more readable, and to help alleviate name conflicts with a short hash, one can use the ``suffixes`` option in the modules configuration file. +This option will add strings to modules that match a spec. For instance, the following config options, .. code-block:: yaml @@ -460,40 +394,33 @@ For instance, the following config options, tcl: all: suffixes: - ^python@3: 'python{^python.version.up_to_2}' - ^openblas: 'openblas' + ^python@3: "python{^python.version.up_to_2}" + ^openblas: "openblas" -will add a ``python3.12`` to module names of packages compiled with Python 3.12, and similarly for -all specs depending on ``python@3``. This is useful to know which version of Python a set of Python -extensions is associated with. Likewise, the ``openblas`` string is attached to any program that -has openblas in the spec, most likely via the ``+blas`` variant specification. +will add a ``python3.12`` to module names of packages compiled with Python 3.12, and similarly for all specs depending on ``python@3``. +This is useful to know which version of Python a set of Python extensions is associated with. +Likewise, the ``openblas`` string is attached to any program that has openblas in the spec, most likely via the ``+blas`` variant specification. -The most heavyweight solution to module naming is to change the entire -naming convention for module files. This uses the projections format -covered in :ref:`view_projections`. +The most heavyweight solution to module naming is to change the entire naming convention for module files. +This uses the projections format covered in :ref:`view_projections`. .. code-block:: yaml - modules: - default: - tcl: - projections: - all: '{name}/{version}-{compiler.name}-{compiler.version}-module' - ^mpi: '{name}/{version}-{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}-module' + modules: + default: + tcl: + projections: + all: "{name}/{version}-{compiler.name}-{compiler.version}-module" + ^mpi: "{name}/{version}-{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}-module" -will create module files that are nested in directories by package -name, contain the version and compiler name and version, and have the -word ``module`` before the hash for all specs that do not depend on -mpi, and will have the same information plus the MPI implementation -name and version for all packages that depend on mpi. +will create module files that are nested in directories by package name, contain the version and compiler name and version, and have the word ``module`` before the hash for all specs that do not depend on mpi, and will have the same information plus the MPI implementation name and version for all packages that depend on mpi. -When specifying module names by projection for Lmod modules, we -recommend NOT including names of dependencies (e.g., MPI, compilers) -that are already in the Lmod hierarchy. +When specifying module names by projection for Lmod modules, we recommend NOT including names of dependencies (e.g., MPI, compilers) that are already in the Lmod hierarchy. .. note:: + Tcl and Lua modules also allow for explicit conflicts between module files. .. code-block:: yaml @@ -501,140 +428,104 @@ that are already in the Lmod hierarchy. modules: default: enable: - - tcl + - tcl tcl: projections: - all: '{name}/{version}-{compiler.name}-{compiler.version}' + all: "{name}/{version}-{compiler.name}-{compiler.version}" all: conflict: - - '{name}' - - 'intel/14.0.1' + - "{name}" + - "intel/14.0.1" - will create module files that will conflict with ``intel/14.0.1`` and with the - base directory of the same module, effectively preventing the possibility to - load two or more versions of the same software at the same time. The tokens - that are available for use in this directive are the same understood by the - :meth:`~spack.spec.Spec.format` method. + will create module files that will conflict with ``intel/14.0.1`` and with the base directory of the same module, effectively preventing the possibility to load two or more versions of the same software at the same time. + The tokens that are available for use in this directive are those understood by the :meth:`~spack.spec.Spec.format` method. - For Lmod and Environment Modules versions prior to 4.2, it is important to - express the conflict on both module files conflicting with each other. + For Lmod and Environment Modules versions prior to 4.2, it is important to express the conflict on both module files conflicting with each other. -.. note:: - Lmod hierarchical module files - When ``lmod`` is activated Spack will generate a set of hierarchical lua module - files that are understood by Lmod. The hierarchy will always contain the - two layers ``Core`` / ``Compiler`` but can be further extended to - any of the virtual dependencies present in Spack. A case that could be useful in - practice is for instance: +.. admonition:: Note: Lmod hierarchical module files + :class: note - .. code-block:: yaml + When ``lmod`` is activated Spack will generate a set of hierarchical lua module files that are understood by Lmod. + The hierarchy always contains the ``Core`` and ``Compiler`` layers, but can be extended to include any virtual packages present in Spack. + A case that could be useful in practice is for instance: - modules: - default: - enable: - - lmod - lmod: - core_compilers: - - 'gcc@4.8' - core_specs: - - 'python' - hierarchy: - - 'mpi' - - 'lapack' - - that will generate a hierarchy in which the ``lapack`` and ``mpi`` layer can be switched - independently. This allows a site to build the same libraries or applications against different - implementations of ``mpi`` and ``lapack``, and let Lmod switch safely from one to the - other. - - All packages built with a compiler in ``core_compilers`` and all - packages that satisfy a spec in ``core_specs`` will be put in the - ``Core`` hierarchy of the lua modules. + .. code-block:: yaml -.. warning:: - Consistency of Core packages - The user is responsible for maintaining consistency among core packages, as ``core_specs`` - bypasses the hierarchy that allows Lmod to safely switch between coherent software stacks. + modules: + default: + enable: + - lmod + lmod: + core_compilers: + - "gcc@4.8" + core_specs: + - "python" + hierarchy: + - "mpi" + - "lapack" + + that will generate a hierarchy in which the ``lapack`` and ``mpi`` layer can be switched independently. + This allows a site to build the same libraries or applications against different implementations of ``mpi`` and ``lapack``, and let Lmod switch safely from one to the other. + + All packages built with a compiler in ``core_compilers`` and all packages that satisfy a spec in ``core_specs`` will be put in the ``Core`` hierarchy of the lua modules. + +.. admonition:: Warning: consistency of core packages + :class: warning + + The user is responsible for maintaining consistency among core packages, as ``core_specs`` bypasses the hierarchy that allows Lmod to safely switch between coherent software stacks. + +.. admonition:: Warning: deep hierarchies + :class: warning -.. warning:: - Deep hierarchies and ``lmod spider`` For hierarchies that are deeper than three layers ``lmod spider`` may have some issues. See `this discussion on the Lmod project `_. .. _customize-env-modifications: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Customize environment modifications ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -You can control which prefixes in a Spack package are added to -environment variables with the ``prefix_inspections`` section; this -section maps relative prefixes to the list of environment variables -which should be updated with those prefixes. +You can control which prefixes in a Spack package are added to environment variables with the ``prefix_inspections`` section; this section maps relative prefixes to the list of environment variables which should be updated with those prefixes. -The ``prefix_inspections`` configuration is different from other -settings in that a ``prefix_inspections`` configuration at the -``modules`` level of the configuration file applies to all module -sets. This allows users to make general overrides to the default -inspections and customize them per-module-set. +The ``prefix_inspections`` configuration is different from other settings in that a ``prefix_inspections`` configuration at the ``modules`` level of the configuration file applies to all module sets. +This allows users to make general overrides to the default inspections and customize them per-module-set. .. code-block:: yaml - modules: - prefix_inspections: - ./bin: - - PATH - ./man: - - MANPATH - ./: - - CMAKE_PREFIX_PATH - -Prefix inspections are only applied if the relative path inside the -installation prefix exists. In this case, for a Spack package ``foo`` -installed to ``/spack/prefix/foo``, if ``foo`` installs executables to -``bin`` but no manpages in ``man``, the generated module file for -``foo`` would update ``PATH`` to contain ``/spack/prefix/foo/bin`` and -``CMAKE_PREFIX_PATH`` to contain ``/spack/prefix/foo``, but would not -update ``MANPATH``. - -The default list of environment variables in this config section -includes ``PATH``, ``MANPATH``, ``ACLOCAL_PATH``, ``PKG_CONFIG_PATH`` -and ``CMAKE_PREFIX_PATH``, as well as ``DYLD_FALLBACK_LIBRARY_PATH`` -on macOS. On Linux however, the corresponding ``LD_LIBRARY_PATH`` -variable is *not* set, because it affects the behavior of -system executables too. + modules: + prefix_inspections: + ./bin: + - PATH + ./man: + - MANPATH + ./: + - CMAKE_PREFIX_PATH + +Prefix inspections are only applied if the relative path inside the installation prefix exists. +In this case, for a Spack package ``foo`` installed to ``/spack/prefix/foo``, if ``foo`` installs executables to ``bin`` but no manpages in ``man``, the generated module file for ``foo`` would update ``PATH`` to contain ``/spack/prefix/foo/bin`` and ``CMAKE_PREFIX_PATH`` to contain ``/spack/prefix/foo``, but would not update ``MANPATH``. + +The default list of environment variables in this config section includes ``PATH``, ``MANPATH``, ``ACLOCAL_PATH``, ``PKG_CONFIG_PATH`` and ``CMAKE_PREFIX_PATH``, as well as ``DYLD_FALLBACK_LIBRARY_PATH`` on macOS. +On Linux however, the corresponding ``LD_LIBRARY_PATH`` variable is *not* set, because it affects the behavior of system executables too. .. note:: - In general, the ``LD_LIBRARY_PATH`` variable is not required - when using packages built with Spack, thanks to the use of RPATH. - Some packages may still need the variable, which is best handled - on a per-package basis instead of globally, as explained in - :ref:`overide-api-calls-in-package-py`. + In general, the ``LD_LIBRARY_PATH`` variable is not required when using packages built with Spack, thanks to the use of RPATH. + Some packages may still need the variable, which is best handled on a per-package basis instead of globally, as explained in :ref:`overide-api-calls-in-package-py`. -There is a special case for prefix inspections relative to environment -views. If all of the following conditions hold for a module set -configuration: +There is a special case for prefix inspections relative to environment views. +If all of the following conditions hold for a module set configuration: -#. The configuration is for an :ref:`environment ` and - will never be applied outside the environment, +#. The configuration is for an :ref:`environment ` and will never be applied outside the environment, #. The environment in question is configured to use a view, -#. The :ref:`environment view is configured - ` with a projection that ensures - every package is linked to a unique directory, - -then the module set may be configured to create modules relative to -the environment view. This is specified by the ``use_view`` -configuration option in the module set. If ``True``, the module set is -constructed relative to the default view of the -environment. Otherwise, the value must be the name of the environment -view relative to which to construct modules, or ``False-ish`` to -disable the feature explicitly (the default is ``False``). - -If the ``use_view`` value is set in the config, then the prefix -inspections for the package are done relative to the package's path in -the view. +#. The :ref:`environment view is configured ` with a projection that ensures every package is linked to a unique directory, + +then the module set may be configured to create modules relative to the environment view. +This is specified by the ``use_view`` configuration option in the module set. +If ``True``, the module set is constructed relative to the default view of the environment. +Otherwise, the value must be the name of the environment view relative to which to construct modules, or ``False-ish`` to disable the feature explicitly (the default is ``False``). + +If the ``use_view`` value is set in the config, then the prefix inspections for the package are done relative to the package's path in the view. .. code-block:: yaml @@ -644,34 +535,23 @@ the view. use_view: my_view prefix_inspections: ./bin: - - PATH + - PATH view: my_view: + root: /path/to/my/view projections: - root: /path/to/my/view - all: '{name}-{hash}' - -The ``spack`` key is relevant to :ref:`environment ` -configuration, and the view key is discussed in detail in the section -on :ref:`Configuring environment views -`. With this configuration the -generated module for package ``foo`` would set ``PATH`` to include -``/path/to/my/view/foo-/bin`` instead of -``/spack/prefix/foo/bin``. - -The ``use_view`` option is useful when deploying a large software -stack to users who are likely to inspect the modules to find full -paths to software, when it is desirable to present the users with a -simpler set of paths than those generated by the Spack install tree. + all: "{name}-{hash}" + +The ``spack`` key is relevant to :ref:`environment ` configuration, and the view key is discussed in detail in the section on :ref:`Configuring environment views `. +With this configuration the generated module for package ``foo`` would set ``PATH`` to include ``/path/to/my/view/foo-/bin`` instead of ``/spack/prefix/foo/bin``. + +The ``use_view`` option is useful when deploying a large software stack to users who are likely to inspect the modules to find full paths to software, when it is desirable to present the users with a simpler set of paths than those generated by the Spack install tree. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Filter out environment modifications ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Modifications to certain environment variables in module files are there by -default, for instance because they are generated by prefix inspections. -If you want to prevent modifications to some environment variables, you can -do so by using the ``exclude_env_vars``: +Modifications to certain environment variables in module files are there by default, for instance because they are generated by prefix inspections. +If you want to prevent modifications to some environment variables, you can do so by using the ``exclude_env_vars``: .. code-block:: yaml @@ -681,98 +561,79 @@ do so by using the ``exclude_env_vars``: all: filter: # Exclude changes to any of these variables - exclude_env_vars: ['CPATH', 'LIBRARY_PATH'] + exclude_env_vars: ["CPATH", "LIBRARY_PATH"] -The configuration above will generate module files that will not contain -modifications to either ``CPATH`` or ``LIBRARY_PATH``. +The configuration above will generate module files that will not contain modifications to either ``CPATH`` or ``LIBRARY_PATH``. -^^^^^^^^^^^^^^^^^^^^^^ Select default modules ^^^^^^^^^^^^^^^^^^^^^^ -By default, when multiple modules of the same name share a directory, -the highest version number will be the default module. This behavior -of the ``module`` command can be overridden with a symlink named -``default`` to the desired default module. If you wish to configure -default modules with Spack, add a ``defaults`` key to your modules -configuration: +By default, when multiple modules of the same name share a directory, the highest version number will be the default module. +This behavior of the ``module`` command can be overridden with a symlink named ``default`` to the desired default module. +If you wish to configure default modules with Spack, add a ``defaults`` key to your modules configuration: .. code-block:: yaml - modules: - my-module-set: - tcl: - defaults: - - gcc@10.2.1 - - hdf5@1.2.10+mpi+hl%gcc + modules: + my-module-set: + tcl: + defaults: + - gcc@10.2.1 + - hdf5@1.2.10+mpi+hl%gcc -These defaults may be arbitrarily specific. For any package that -satisfies a default, Spack will generate the module file in the -appropriate path, and will generate a default symlink to the module -file as well. +These defaults may be arbitrarily specific. +For any package that satisfies a default, Spack will generate the module file in the appropriate path, and will generate a default symlink to the module file as well. .. warning:: - If Spack is configured to generate multiple default packages in the - same directory, the last modulefile to be generated will be the - default module. + + If Spack is configured to generate multiple default packages in the same directory, the last modulefile to be generated will be the default module. .. _maintaining-module-files: ------------------------- Maintaining Module Files ------------------------ -Each type of module file has a command with the same name associated -with it. The actions these commands permit are usually associated -with the maintenance of a production environment. Here's, for instance, -a sample of the features of the ``spack module tcl`` command: +Each type of module file has a command with the same name associated with it. +The actions these commands permit are usually associated with the maintenance of a production environment. +Here's, for instance, a sample of the features of the ``spack module tcl`` command: .. command-output:: spack module tcl --help .. _cmd-spack-module-refresh: -^^^^^^^^^^^^^^^^^^^^^^^^^^ Refresh the set of modules ^^^^^^^^^^^^^^^^^^^^^^^^^^ -The subcommand that regenerates module files to update their content or -their layout is ``refresh``: +The subcommand that regenerates module files to update their content or their layout is ``refresh``: .. command-output:: spack module tcl refresh --help -A set of packages can be selected using anonymous specs for the optional -``constraint`` positional argument. Optionally the entire tree can be deleted -before regeneration if the change in layout is radical. +A set of packages can be selected using anonymous specs for the optional ``constraint`` positional argument. +Optionally the entire tree can be deleted before regeneration if the change in layout is radical. .. _cmd-spack-module-rm: -^^^^^^^^^^^^^^^^^^^ Delete module files ^^^^^^^^^^^^^^^^^^^ -If instead what you need is just to delete a few module files, then the right -subcommand is ``rm``: +If instead what you need is just to delete a few module files, then the right subcommand is ``rm``: .. command-output:: spack module tcl rm --help .. note:: - We care about your module files! - Every modification done on modules - that are already existing will ask for a confirmation by default. If - the command is used in a script it is possible though to pass the - ``-y`` argument, that will skip this safety measure. + + We care about your module files! + Every modification done on modules that are already existing will ask for a confirmation by default. + If the command is used in a script it is possible though to pass the ``-y`` argument, that will skip this safety measure. .. _modules-in-shell-scripts: ------------------------------------- Using Spack modules in shell scripts ------------------------------------ -The easiest To enable additional Spack commands for loading and unloading -module files, and to add the correct path to ``MODULEPATH``, you need to -source the appropriate setup file. Assuming Spack is installed in -``$SPACK_ROOT``, run the appropriate command for your shell: +To enable additional Spack commands for loading and unloading module files, and to add the correct path to ``MODULEPATH``, you need to source the appropriate setup file. +Assuming Spack is installed in ``$SPACK_ROOT``, run the appropriate command for your shell: .. code-block:: console @@ -785,111 +646,97 @@ source the appropriate setup file. Assuming Spack is installed in # For fish $ . $SPACK_ROOT/share/spack/setup-env.fish -If you want to have Spack's shell support available on the command line -at any login you can put this source line in one of the files that are -sourced at startup (like ``.profile``, ``.bashrc`` or ``.cshrc``). Be -aware that the shell startup time may increase slightly as a result. +If you want to have Spack's shell support available on the command line at any login you can put this source line in one of the files that are sourced at startup (like ``.profile``, ``.bashrc`` or ``.cshrc``). +Be aware that the shell startup time may increase slightly as a result. .. _cmd-spack-module-loads: -^^^^^^^^^^^^^^^^^^^^^^^^^^ ``spack module tcl loads`` ^^^^^^^^^^^^^^^^^^^^^^^^^^ -In some cases, it is desirable to use a Spack-generated module, rather -than relying on Spack's built-in user-environment modification -capabilities. To translate a spec into a module name, use ``spack -module tcl loads`` or ``spack module lmod loads`` depending on the -module system desired. +In some cases, it is desirable to use a Spack-generated module, rather than relying on Spack's built-in user-environment modification capabilities. +To translate a spec into a module name, use ``spack module tcl loads`` or ``spack module lmod loads`` depending on the module system desired. -To load not just a module, but also all the modules it depends on, use -the ``--dependencies`` option. This is not required for most modules -because Spack builds binaries with RPATH support. However, not all -packages use RPATH to find their dependencies: this can be true in -particular for Python extensions, which are currently *not* built with -RPATH. +To load not just a module, but also all the modules it depends on, use the ``--dependencies`` option. +This is not required for most modules because Spack builds binaries with RPATH support. +However, not all packages use RPATH to find their dependencies: this can be true in particular for Python extensions, which are currently *not* built with RPATH. Scripts to load modules recursively may be made with the command: .. code-block:: console - $ spack module tcl loads --dependencies + $ spack module tcl loads --dependencies An equivalent alternative using `process substitution `_ is: .. code-block:: console - $ source <( spack module tcl loads --dependencies ) + $ source <( spack module tcl loads --dependencies ) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Module Commands for Shell Scripts ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Although Spack is flexible, the ``module`` command is much faster. -This could become an issue when emitting a series of ``spack load`` -commands inside a shell script. By adding the ``--dependencies`` flag, -``spack module tcl loads`` may also be used to generate code that can be -cut-and-pasted into a shell script. For example: +This can become an issue when emitting a series of ``spack load`` commands inside a shell script. +By adding the ``--dependencies`` flag, ``spack module tcl loads`` may also be used to generate code that can be cut-and-pasted into a shell script. +For example: .. code-block:: console - $ spack module tcl loads --dependencies py-numpy git - # bzip2@1.0.6%gcc@4.9.3=linux-x86_64 - module load bzip2/1.0.6-gcc-4.9.3-ktnrhkrmbbtlvnagfatrarzjojmkvzsx - # ncurses@6.0%gcc@4.9.3=linux-x86_64 - module load ncurses/6.0-gcc-4.9.3-kaazyneh3bjkfnalunchyqtygoe2mncv - # zlib@1.2.8%gcc@4.9.3=linux-x86_64 - module load zlib/1.2.8-gcc-4.9.3-v3ufwaahjnviyvgjcelo36nywx2ufj7z - # sqlite@3.8.5%gcc@4.9.3=linux-x86_64 - module load sqlite/3.8.5-gcc-4.9.3-a3eediswgd5f3rmto7g3szoew5nhehbr - # readline@6.3%gcc@4.9.3=linux-x86_64 - module load readline/6.3-gcc-4.9.3-se6r3lsycrwxyhreg4lqirp6xixxejh3 - # python@3.5.1%gcc@4.9.3=linux-x86_64 - module load python/3.5.1-gcc-4.9.3-5q5rsrtjld4u6jiicuvtnx52m7tfhegi - # py-setuptools@20.5%gcc@4.9.3=linux-x86_64 - module load py-setuptools/20.5-gcc-4.9.3-4qr2suj6p6glepnedmwhl4f62x64wxw2 - # py-nose@1.3.7%gcc@4.9.3=linux-x86_64 - module load py-nose/1.3.7-gcc-4.9.3-pwhtjw2dvdvfzjwuuztkzr7b4l6zepli - # openblas@0.2.17%gcc@4.9.3+shared=linux-x86_64 - module load openblas/0.2.17-gcc-4.9.3-pw6rmlom7apfsnjtzfttyayzc7nx5e7y - # py-numpy@1.11.0%gcc@4.9.3+blas+lapack=linux-x86_64 - module load py-numpy/1.11.0-gcc-4.9.3-mulodttw5pcyjufva4htsktwty4qd52r - # curl@7.47.1%gcc@4.9.3=linux-x86_64 - module load curl/7.47.1-gcc-4.9.3-ohz3fwsepm3b462p5lnaquv7op7naqbi - # autoconf@2.69%gcc@4.9.3=linux-x86_64 - module load autoconf/2.69-gcc-4.9.3-bkibjqhgqm5e3o423ogfv2y3o6h2uoq4 - # cmake@3.5.0%gcc@4.9.3~doc+ncurses+openssl~qt=linux-x86_64 - module load cmake/3.5.0-gcc-4.9.3-x7xnsklmgwla3ubfgzppamtbqk5rwn7t - # expat@2.1.0%gcc@4.9.3=linux-x86_64 - module load expat/2.1.0-gcc-4.9.3-6pkz2ucnk2e62imwakejjvbv6egncppd - # git@2.8.0-rc2%gcc@4.9.3+curl+expat=linux-x86_64 - module load git/2.8.0-rc2-gcc-4.9.3-3bib4hqtnv5xjjoq5ugt3inblt4xrgkd + $ spack module tcl loads --dependencies py-numpy git + # bzip2@1.0.6%gcc@4.9.3=linux-x86_64 + module load bzip2/1.0.6-gcc-4.9.3-ktnrhkrmbbtlvnagfatrarzjojmkvzsx + # ncurses@6.0%gcc@4.9.3=linux-x86_64 + module load ncurses/6.0-gcc-4.9.3-kaazyneh3bjkfnalunchyqtygoe2mncv + # zlib@1.2.8%gcc@4.9.3=linux-x86_64 + module load zlib/1.2.8-gcc-4.9.3-v3ufwaahjnviyvgjcelo36nywx2ufj7z + # sqlite@3.8.5%gcc@4.9.3=linux-x86_64 + module load sqlite/3.8.5-gcc-4.9.3-a3eediswgd5f3rmto7g3szoew5nhehbr + # readline@6.3%gcc@4.9.3=linux-x86_64 + module load readline/6.3-gcc-4.9.3-se6r3lsycrwxyhreg4lqirp6xixxejh3 + # python@3.5.1%gcc@4.9.3=linux-x86_64 + module load python/3.5.1-gcc-4.9.3-5q5rsrtjld4u6jiicuvtnx52m7tfhegi + # py-setuptools@20.5%gcc@4.9.3=linux-x86_64 + module load py-setuptools/20.5-gcc-4.9.3-4qr2suj6p6glepnedmwhl4f62x64wxw2 + # py-nose@1.3.7%gcc@4.9.3=linux-x86_64 + module load py-nose/1.3.7-gcc-4.9.3-pwhtjw2dvdvfzjwuuztkzr7b4l6zepli + # openblas@0.2.17%gcc@4.9.3+shared=linux-x86_64 + module load openblas/0.2.17-gcc-4.9.3-pw6rmlom7apfsnjtzfttyayzc7nx5e7y + # py-numpy@1.11.0%gcc@4.9.3+blas+lapack=linux-x86_64 + module load py-numpy/1.11.0-gcc-4.9.3-mulodttw5pcyjufva4htsktwty4qd52r + # curl@7.47.1%gcc@4.9.3=linux-x86_64 + module load curl/7.47.1-gcc-4.9.3-ohz3fwsepm3b462p5lnaquv7op7naqbi + # autoconf@2.69%gcc@4.9.3=linux-x86_64 + module load autoconf/2.69-gcc-4.9.3-bkibjqhgqm5e3o423ogfv2y3o6h2uoq4 + # cmake@3.5.0%gcc@4.9.3~doc+ncurses+openssl~qt=linux-x86_64 + module load cmake/3.5.0-gcc-4.9.3-x7xnsklmgwla3ubfgzppamtbqk5rwn7t + # expat@2.1.0%gcc@4.9.3=linux-x86_64 + module load expat/2.1.0-gcc-4.9.3-6pkz2ucnk2e62imwakejjvbv6egncppd + # git@2.8.0-rc2%gcc@4.9.3+curl+expat=linux-x86_64 + module load git/2.8.0-rc2-gcc-4.9.3-3bib4hqtnv5xjjoq5ugt3inblt4xrgkd The script may be further edited by removing unnecessary modules. -^^^^^^^^^^^^^^^ Module Prefixes ^^^^^^^^^^^^^^^ -On some systems, modules are automatically prefixed with a certain -string; ``spack module tcl loads`` needs to know about that prefix when it -issues ``module load`` commands. Add the ``--prefix`` option to your -``spack module tcl loads`` commands if this is necessary. +On some systems, modules are automatically prefixed with a certain string; ``spack module tcl loads`` needs to know about that prefix when it issues ``module load`` commands. +Add the ``--prefix`` option to your ``spack module tcl loads`` commands if this is necessary. For example, consider the following on one system: .. code-block:: console - $ module avail - linux-SuSE11-x86_64/antlr/2.7.7-gcc-5.3.0-bdpl46y + $ module avail + linux-SuSE11-x86_64/antlr/2.7.7-gcc-5.3.0-bdpl46y - $ spack module tcl loads antlr # WRONG! - # antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64 - module load antlr/2.7.7-gcc-5.3.0-bdpl46y + $ spack module tcl loads antlr # WRONG! + # antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64 + module load antlr/2.7.7-gcc-5.3.0-bdpl46y - $ spack module tcl loads --prefix linux-SuSE11-x86_64/ antlr - # antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64 - module load linux-SuSE11-x86_64/antlr/2.7.7-gcc-5.3.0-bdpl46y + $ spack module tcl loads --prefix linux-SuSE11-x86_64/ antlr + # antlr@2.7.7%gcc@5.3.0~csharp+cxx~java~python arch=linux-SuSE11-x86_64 + module load linux-SuSE11-x86_64/antlr/2.7.7-gcc-5.3.0-bdpl46y diff --git a/lib/spack/docs/package_api.rst b/lib/spack/docs/package_api.rst index cd7f4d27cea45c..53fd0b10d6c1bb 100644 --- a/lib/spack/docs/package_api.rst +++ b/lib/spack/docs/package_api.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -6,13 +7,13 @@ :description lang=en: An overview of the Spack Package API, a stable interface for package authors to interact with the Spack framework. -Spack Package API v2.2 -====================== +Spack Package API +================= This document describes the Spack Package API (:mod:`spack.package`), the stable interface for Spack package authors. It is assumed you have already read the :doc:`Spack Packaging Guide `. -The Spack Package API is the *only* module from the Spack codebase considered public API. +The Spack Package API is the *only* module in the Spack codebase considered public API. It re-exports essential functions and classes from various Spack modules, allowing package authors to import them directly from :mod:`spack.package` without needing to know Spack's internal structure. Spack Package API Versioning diff --git a/lib/spack/docs/package_fundamentals.rst b/lib/spack/docs/package_fundamentals.rst index b10a5bc491ff8f..8a6e86db9451b0 100644 --- a/lib/spack/docs/package_fundamentals.rst +++ b/lib/spack/docs/package_fundamentals.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,7 +9,6 @@ .. _basic-usage: -==================== Package Fundamentals ==================== @@ -24,7 +24,6 @@ In this section you'll learn: .. _basic-list-and-info-packages: --------------------------- Listing Available Packages -------------------------- @@ -33,26 +32,24 @@ You can search for available packages on the `packages.spack.io ` and :ref:`virtual dependencies -` are described in more detail later. +:ref:`Dependencies ` and :ref:`virtual dependencies ` are described in more detail later. .. _cmd-spack-versions: -^^^^^^^^^^^^^^^^^^ ``spack versions`` ^^^^^^^^^^^^^^^^^^ @@ -94,21 +88,18 @@ To see *more* available versions of a package, run ``spack versions``. For example: .. command-output:: spack versions libelf - :language: console + :language: spec -There are two sections in the output. *Safe versions* are versions -for which Spack has a checksum on file. It can verify that these -versions are downloaded correctly. +There are two sections in the output. +*Safe versions* are versions for which Spack has a checksum on file. +It can verify that these versions are downloaded correctly. -In many cases, Spack can also show you what versions are available out -on the web -- these are *remote versions*. Spack gets this information -by scraping it directly from package web pages. Depending on the -package and how its releases are organized, Spack may or may not be -able to find remote versions. +In many cases, Spack can also show you what versions are available out on the web -- these are *remote versions*. +Spack gets this information by scraping it directly from package web pages. +Depending on the package and how its releases are organized, Spack may or may not be able to find remote versions. .. _cmd-spack-providers: -^^^^^^^^^^^^^^^^^^^ ``spack providers`` ^^^^^^^^^^^^^^^^^^^ @@ -116,39 +107,35 @@ You can see what packages provide a particular virtual package using ``spack pro If you wanted to see what packages provide ``mpi``, you would just run: .. command-output:: spack providers mpi + :language: spec And if you *only* wanted to see packages that provide MPI-2, you would add a version specifier to the spec: .. command-output:: spack providers mpi@2 + :language: spec Notice that the package versions that provide insufficient MPI versions are now filtered out. ---------------------------- Installing and Uninstalling --------------------------- .. _cmd-spack-install: -^^^^^^^^^^^^^^^^^ ``spack install`` ^^^^^^^^^^^^^^^^^ ``spack install`` will install any package shown by ``spack list``. -For example, to install the latest version of the ``mpileaks`` -package, you might type this: +For example, to install the latest version of the ``mpileaks`` package, you might type this: -.. code-block:: console +.. code-block:: spec $ spack install mpileaks -If ``mpileaks`` depends on other packages, Spack will install the -dependencies first. It then fetches the ``mpileaks`` tarball, expands -it, verifies that it was downloaded without errors, builds it, and -installs it in its own directory under ``$SPACK_ROOT/opt``. You'll see -a number of messages from Spack, a lot of build output, and a message -that the package is installed. +If ``mpileaks`` depends on other packages, Spack will install the dependencies first. +It then fetches the ``mpileaks`` tarball, expands it, verifies that it was downloaded without errors, builds it, and installs it in its own directory under ``$SPACK_ROOT/opt``. +You'll see a number of messages from Spack, a lot of build output, and a message that the package is installed. -.. code-block:: console +.. code-block:: spec $ spack install mpileaks ... dependency build output ... @@ -160,64 +147,49 @@ that the package is installed. ==> mpileaks: Executing phase: 'install' [+] ~/spack/opt/linux-rhel7-broadwell/gcc-8.1.0/mpileaks-1.0-ph7pbnhl334wuhogmugriohcwempqry2 -The last line, with the ``[+]``, indicates where the package is -installed. +The last line, with the ``[+]``, indicates where the package is installed. -Add the Spack debug option (one or more times) -- ``spack -d install -mpileaks`` -- to get additional (and even more verbose) output. +Add the Spack debug option (one or more times) -- ``spack -d install mpileaks`` -- to get additional (and even more verbose) output. -^^^^^^^^^^^^^^^^^^^^^^^^^^^ Building a specific version ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Spack can also build *specific versions* of a package. To do this, -just add ``@`` after the package name, followed by a version: +Spack can also build *specific versions* of a package. +To do this, just add ``@`` after the package name, followed by a version: -.. code-block:: console +.. code-block:: spec $ spack install mpich@3.0.4 -Any number of versions of the same package can be installed at once -without interfering with each other. This is useful for multi-user -sites, as installing a version that one user needs will not disrupt -existing installations for other users. +Any number of versions of the same package can be installed at once without interfering with each other. +This is useful for multi-user sites, as installing a version that one user needs will not disrupt existing installations for other users. -In addition to different versions, Spack can customize the compiler, -compile-time options (variants), compiler flags, and platform (for -cross-compiles) of an installation. Spack is unique in that it can -also configure the *dependencies* a package is built with. For example, -two configurations of the same version of a package, one built with boost -1.39.0, and the other version built with version 1.43.0, can coexist. +In addition to different versions, Spack can customize the compiler, compile-time options (variants), compiler flags, and target architecture of an installation. +Spack is unique in that it can also configure the *dependencies* a package is built with. +For example, two configurations of the same version of a package, one built with boost 1.39.0, and the other version built with version 1.43.0, can coexist. This can all be done on the command line using the *spec* syntax. -Spack calls the descriptor used to refer to a particular package -configuration a **spec**. In the commands above, ``mpileaks`` and -``mpileaks@3.0.4`` are both valid *specs*. We'll talk more about how -you can use them to customize an installation in :ref:`sec-specs`. +Spack calls the descriptor used to refer to a particular package configuration a **spec**. +In the commands above, ``mpileaks`` and ``mpileaks@3.0.4`` are both valid *specs*. +We'll talk more about how you can use them to customize an installation in :ref:`sec-specs`. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Reusing installed dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -By default, when you run ``spack install``, Spack tries hard to reuse existing installations -as dependencies, either from a local store or from remote buildcaches, if configured. -This minimizes unwanted rebuilds of common dependencies, in particular if -you update Spack frequently. +By default, when you run ``spack install``, Spack tries hard to reuse existing installations as dependencies, either from a local store or from remote buildcaches, if configured. +This minimizes unwanted rebuilds of common dependencies, in particular if you update Spack frequently. -In case you want the latest versions and configurations to be installed instead, -you can add the ``--fresh`` option: +In case you want the latest versions and configurations to be installed instead, you can add the ``--fresh`` option: -.. code-block:: console +.. code-block:: spec $ spack install --fresh mpich Reusing installations in this mode is "accidental" and happens only if there's a match between existing installations and what Spack would have installed anyway. -You can use the ``spack spec -I mpich`` command to see what -will be reused and what will be built before you install. +You can use the ``spack spec -I mpich`` command to see what will be reused and what will be built before you install. -You can configure Spack to use the ``--fresh`` behavior by default in -``concretizer.yaml``: +You can configure Spack to use the ``--fresh`` behavior by default in ``concretizer.yaml``: .. code-block:: yaml @@ -226,65 +198,54 @@ You can configure Spack to use the ``--fresh`` behavior by default in .. _cmd-spack-uninstall: -^^^^^^^^^^^^^^^^^^^ ``spack uninstall`` ^^^^^^^^^^^^^^^^^^^ -To uninstall a package, run ``spack uninstall ``. This will ask -the user for confirmation before completely removing the directory -in which the package was installed. +To uninstall a package, run ``spack uninstall ``. +This will ask the user for confirmation before completely removing the directory in which the package was installed. -.. code-block:: console +.. code-block:: spec $ spack uninstall mpich -If there are still installed packages that depend on the package to be -uninstalled, Spack will refuse to uninstall it. +If there are still installed packages that depend on the package to be uninstalled, Spack will refuse to uninstall it. -To uninstall a package and every package that depends on it, you may give the -``--dependents`` option. +To uninstall a package and every package that depends on it, you may give the ``--dependents`` option. -.. code-block:: console +.. code-block:: spec $ spack uninstall --dependents mpich -will display a list of all the packages that depend on ``mpich`` and, upon -confirmation, will uninstall them in the correct order. +will display a list of all the packages that depend on ``mpich`` and, upon confirmation, will uninstall them in the correct order. A command like -.. code-block:: console +.. code-block:: spec $ spack uninstall mpich may be ambiguous if multiple ``mpich`` configurations are installed. -For example, if both ``mpich@3.0.2`` and ``mpich@3.1`` are installed, -``mpich`` could refer to either one. Because it cannot determine which -one to uninstall, Spack will ask you either to provide a version number -to remove the ambiguity or use the ``--all`` option to uninstall all -matching packages. +For example, if both ``mpich@3.0.2`` and ``mpich@3.1`` are installed, ``mpich`` could refer to either one. +Because it cannot determine which one to uninstall, Spack will ask you either to provide a version number to remove the ambiguity or use the ``--all`` option to uninstall all matching packages. You may force uninstall a package with the ``--force`` option -.. code-block:: console +.. code-block:: spec $ spack uninstall --force mpich -but you risk breaking other installed packages. In general, it is safer to -remove dependent packages *before* removing their dependencies or to use the -``--dependents`` option. +but you risk breaking other installed packages. +In general, it is safer to remove dependent packages *before* removing their dependencies or to use the ``--dependents`` option. -.. _nondownloadable: +.. _cmd-spack-gc: -^^^^^^^^^^^^^^^^^^ Garbage collection ^^^^^^^^^^^^^^^^^^ -When Spack builds software from sources, it often installs tools that are needed -only to build or test other software. These are not necessary at runtime. -To support cases where removing these tools can be a benefit, Spack provides -the ``spack gc`` ("garbage collector") command, which will uninstall all unneeded packages: +When Spack builds software from sources, it often installs tools that are needed only to build or test other software. +These are not necessary at runtime. +To support cases where removing these tools can be a benefit, Spack provides the ``spack gc`` ("garbage collector") command, which will uninstall all unneeded packages: .. code-block:: console @@ -316,20 +277,18 @@ In the example above, ``spack gc`` scans the package database. It keeps only the packages that were explicitly installed by a user, along with their required ``link`` and ``run`` dependencies (including transitive dependencies). All other packages, such as build-only dependencies or orphaned packages, are identified as "garbage" and removed. -You can check :ref:`cmd-spack-find-metadata` to see how to query for explicitly installed packages -or :ref:`dependency-types` for a more thorough treatment of dependency types. +You can check :ref:`cmd-spack-find-metadata` to see how to query for explicitly installed packages or :ref:`dependency-types` for a more thorough treatment of dependency types. + +.. _cmd-spack-mark: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Marking packages explicit or implicit ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -By default, Spack will mark packages a user installs as explicitly installed, -while all of its dependencies will be marked as implicitly installed. Packages -can be marked manually as explicitly or implicitly installed by using -``spack mark``. This can be used in combination with ``spack gc`` to clean up -packages that are no longer required. +By default, Spack will mark packages a user installs as explicitly installed, while all of its dependencies will be marked as implicitly installed. +Packages can be marked manually as explicitly or implicitly installed by using ``spack mark``. +This can be used in combination with ``spack gc`` to clean up packages that are no longer required. -.. code-block:: console +.. code-block:: spec $ spack install m4 ==> 29005: Installing libsigsegv @@ -370,23 +329,18 @@ packages that are no longer required. ==> Do you want to proceed? [y/N] -In the example above, we ended up with two versions of ``m4`` because they depend -on different versions of ``libsigsegv``. ``spack gc`` will not remove any of -the packages because both versions of ``m4`` have been installed explicitly -and both versions of ``libsigsegv`` are required by the ``m4`` packages. +In the example above, we ended up with two versions of ``m4`` because they depend on different versions of ``libsigsegv``. +``spack gc`` will not remove any of the packages because both versions of ``m4`` have been installed explicitly and both versions of ``libsigsegv`` are required by the ``m4`` packages. -``spack mark`` can also be used to implement upgrade workflows. The following -example demonstrates how ``spack mark`` and ``spack gc`` can be used to -only keep the current version of a package installed. +``spack mark`` can also be used to implement upgrade workflows. +The following example demonstrates how ``spack mark`` and ``spack gc`` can be used to only keep the current version of a package installed. -When updating Spack via ``git pull``, new versions for either ``libsigsegv`` -or ``m4`` might be introduced. This will cause Spack to install duplicates. -Because we only want to keep one version, we mark everything as implicitly -installed before updating Spack. If there is no new version for either of the -packages, ``spack install`` will simply mark them as explicitly installed, and -``spack gc`` will not remove them. +When updating Spack via ``git pull``, new versions for either ``libsigsegv`` or ``m4`` might be introduced. +This will cause Spack to install duplicates. +Because we only want to keep one version, we mark everything as implicitly installed before updating Spack. +If there is no new version for either of the packages, ``spack install`` will simply mark them as explicitly installed, and ``spack gc`` will not remove them. -.. code-block:: console +.. code-block:: spec $ spack install m4 ==> 62843: Installing libsigsegv @@ -408,31 +362,26 @@ packages, ``spack install`` will simply mark them as explicitly installed, and $ spack gc ==> There are no unused specs. Spack's store is clean. -When using this workflow for installations that contain more packages, care -must be taken to either only mark selected packages or issue ``spack install`` -for all packages that should be kept. +When using this workflow for installations that contain more packages, care must be taken to either only mark selected packages or issue ``spack install`` for all packages that should be kept. -You can check :ref:`cmd-spack-find-metadata` to see how to query for explicitly -or implicitly installed packages. +You can check :ref:`cmd-spack-find-metadata` to see how to query for explicitly or implicitly installed packages. + +.. _nondownloadable: -^^^^^^^^^^^^^^^^^^^^^^^^^ Non-Downloadable Tarballs ^^^^^^^^^^^^^^^^^^^^^^^^^ -The tarballs for some packages cannot be automatically downloaded by -Spack. This could be for a number of reasons: +The tarballs for some packages cannot be automatically downloaded by Spack. +This could be for a number of reasons: -#. The author requires users to manually accept a license agreement - before downloading (e.g., ``jdk`` and ``galahad``). +#. The author requires users to manually accept a license agreement before downloading (e.g., ``jdk`` and ``galahad``). -#. The software is proprietary and cannot be downloaded on the open - Internet. +#. The software is proprietary and cannot be downloaded on the open Internet. -To install these packages, one must create a mirror and manually add -the tarballs in question to it (see :ref:`mirrors`): +To install these packages, one must create a mirror and manually add the tarballs in question to it (see :ref:`mirrors`): -#. Create a directory for the mirror. You can create this directory - anywhere you like, it does not have to be inside ``~/.spack``: +#. Create a directory for the mirror. + You can create this directory anywhere you like, it does not have to be inside ``~/.spack``: .. code-block:: console @@ -445,8 +394,9 @@ the tarballs in question to it (see :ref:`mirrors`): mirrors: manual: file://~/.spack/manual_mirror -#. Put your tarballs in it. Tarballs should be named - ``/-.tar.gz``. For example: +#. Put your tarballs in it. + Tarballs should be named ``/-.tar.gz``. + For example: .. code-block:: console @@ -461,27 +411,23 @@ the tarballs in question to it (see :ref:`mirrors`): $ spack install galahad -------------------------- Seeing Installed Packages ------------------------- -We know that ``spack list`` shows you the names of available packages, -but how do you figure out which are already installed? +We know that ``spack list`` shows you the names of available packages, but how do you figure out which are already installed? .. _cmd-spack-find: -^^^^^^^^^^^^^^ ``spack find`` ^^^^^^^^^^^^^^ -``spack find`` shows the *specs* of installed packages. A spec is -like a name, but it has a version, compiler, architecture, and build -options associated with it. In Spack, you can have many installations -of the same package with different specs. +``spack find`` shows the *specs* of installed packages. +A spec is like a name, but it has a version, compiler, architecture, and build options associated with it. +In Spack, you can have many installations of the same package with different specs. Running ``spack find`` with no arguments lists installed packages: -.. code-block:: console +.. code-block:: spec $ spack find ==> 74 installed packages. @@ -514,32 +460,22 @@ Running ``spack find`` with no arguments lists installed packages: -- linux-debian7-x86_64 / gcc@4.9.2 -------------------------------- libelf@0.8.10 mpich@3.0.4 -Packages are divided into groups according to their architecture and -compiler. Within each group, Spack tries to keep the view simple and -only shows the version of installed packages. +Packages are divided into groups according to their architecture and compiler. +Within each group, Spack tries to keep the view simple and only shows the version of installed packages. .. _cmd-spack-find-metadata: -"""""""""""""""""""""""""""""""" Viewing more metadata """""""""""""""""""""""""""""""" -``spack find`` can filter the package list based on the package name, -spec, or a number of properties of their installation status. For -example, missing dependencies of a spec can be shown with -``--missing``, deprecated packages can be included with -``--deprecated``, packages that were explicitly installed with -``spack install `` can be singled out with ``--explicit``, and -those that have been pulled in only as dependencies with -``--implicit``. - -In some cases, there may be different configurations of the *same* -version of a package installed. For example, there are two -installations of ``libdwarf@20130729`` above. We can look at them -in more detail using ``spack find --deps`` and by asking only to show -``libdwarf`` packages: +``spack find`` can filter the package list based on the package name, spec, or a number of properties of their installation status. +For example, missing dependencies of a spec can be shown with ``--missing``, deprecated packages can be included with ``--deprecated``, packages that were explicitly installed with ``spack install `` can be singled out with ``--explicit``, and those that have been pulled in only as dependencies with ``--implicit``. -.. code-block:: console +In some cases, there may be different configurations of the *same* version of a package installed. +For example, there are two installations of ``libdwarf@20130729`` above. +We can look at them in more detail using ``spack find --deps`` and by asking only to show ``libdwarf`` packages: + +.. code-block:: spec $ spack find --deps libdwarf ==> 2 installed packages. @@ -549,27 +485,24 @@ in more detail using ``spack find --deps`` and by asking only to show libdwarf@20130729-b52fac98 ^libelf@0.8.13 -Now we see that the two instances of ``libdwarf`` depend on -*different* versions of ``libelf``: 0.8.12 and 0.8.13. This view can -become complicated for packages with many dependencies. If you just -want to know whether two packages' dependencies differ, you can use -``spack find --long``: +Now we see that the two instances of ``libdwarf`` depend on *different* versions of ``libelf``: 0.8.12 and 0.8.13. +This view can become complicated for packages with many dependencies. +If you just want to know whether two packages' dependencies differ, you can use ``spack find --long``: -.. code-block:: console +.. code-block:: spec $ spack find --long libdwarf ==> 2 installed packages. -- linux-debian7-x86_64 / gcc@4.4.7 -------------------------------- libdwarf@20130729-d9b90962 libdwarf@20130729-b52fac98 -Now the ``libdwarf`` installs have hashes after their names. These are -hashes over all of the dependencies of each package. If the hashes -are the same, then the packages have the same dependency configuration. +Now the ``libdwarf`` installs have hashes after their names. +These are hashes over all of the dependencies of each package. +If the hashes are the same, then the packages have the same dependency configuration. -If you want to know the path where each package is installed, you can -use ``spack find --paths``: +If you want to know the path where each package is installed, you can use ``spack find --paths``: -.. code-block:: console +.. code-block:: spec $ spack find --paths ==> 74 installed packages. @@ -583,10 +516,9 @@ use ``spack find --paths``: callpath@1.0.2 ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/callpath@1.0.2-5dce4318 ... -You can restrict your search to a particular package by supplying its -name: +You can restrict your search to a particular package by supplying its name: -.. code-block:: console +.. code-block:: spec $ spack find --paths libelf -- linux-debian7-x86_64 / gcc@4.4.7 -------------------------------- @@ -594,45 +526,39 @@ name: libelf@0.8.12 ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/libelf@0.8.12 libelf@0.8.13 ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/libelf@0.8.13 -"""""""""""""""""""""""""""""""" Spec queries """""""""""""""""""""""""""""""" -``spack find`` actually does a lot more than this. You can use -*specs* to query for specific configurations and builds of each -package. If you want to find only libelf versions greater than version -0.8.12, you could say: +``spack find`` actually does a lot more than this. +You can use *specs* to query for specific configurations and builds of each package. +If you want to find only libelf versions greater than version 0.8.12, you could say: -.. code-block:: console +.. code-block:: spec $ spack find libelf@0.8.12: -- linux-debian7-x86_64 / gcc@4.4.7 -------------------------------- libelf@0.8.12 libelf@0.8.13 -Finding just the versions of libdwarf built with a particular version -of libelf would look like this: +Finding just the versions of libdwarf built with a particular version of libelf would look like this: -.. code-block:: console +.. code-block:: spec $ spack find --long libdwarf ^libelf@0.8.12 ==> 1 installed packages. -- linux-debian7-x86_64 / gcc@4.4.7 -------------------------------- libdwarf@20130729-d9b90962 -We can also search for packages that have a certain attribute. For example, -``spack find libdwarf +debug`` will show only installations of libdwarf -with the 'debug' compile-time option enabled. +We can also search for packages that have a certain attribute. +For example, ``spack find libdwarf +debug`` will show only installations of libdwarf with the 'debug' compile-time option enabled. The full spec syntax is discussed in detail in :ref:`sec-specs`. -"""""""""""""""""""""""""""""""" Machine-readable output """""""""""""""""""""""""""""""" -If you only want to see very specific things about installed packages, -Spack has some options for you. ``spack find --format`` can be used to -output only specific fields: +If you only want to see very specific things about installed packages, Spack has some options for you. +``spack find --format`` can be used to output only specific fields: .. code-block:: console @@ -656,16 +582,13 @@ or: 7cf6onn ... -This uses the same syntax as described in the documentation for -:meth:`~spack.spec.Spec.format` -- you can use any of the options there. -This is useful for passing metadata about packages to other command-line -tools. +This uses the same syntax as described in the documentation for :meth:`~spack.spec.Spec.format` -- you can use any of the options there. +This is useful for passing metadata about packages to other command-line tools. -Alternatively, if you want something even more machine readable, you can -output each spec as JSON records using ``spack find --json``. This will -output metadata on specs and all dependencies as JSON: +Alternatively, if you want something even more machine readable, you can output each spec as JSON records using ``spack find --json``. +This will output metadata on specs and all dependencies as JSON: -.. code-block:: console +.. code-block:: spec $ spack find --json sqlite@3.28.0 [ @@ -706,8 +629,7 @@ output metadata on specs and all dependencies as JSON: ... ] -You can use this with tools like `jq `_ to quickly create JSON records -structured the way you want: +You can use this with tools like `jq `_ to quickly create JSON records structured the way you want: .. code-block:: console @@ -728,23 +650,22 @@ structured the way you want: "hash": "zvaa4lhlhilypw5quj3akyd3apbq5gap" } +.. _cmd-spack-diff: -^^^^^^^^^^^^^^ ``spack diff`` ^^^^^^^^^^^^^^ -It's often the case that you have two versions of a spec that you need to -disambiguate. Let's say that we've installed two variants of zlib, one with -and one without the optimize variant: +It's often the case that you have two versions of a spec that you need to disambiguate. +Let's say that we've installed two variants of zlib, one with and one without the optimize variant: -.. code-block:: console +.. code-block:: spec $ spack install zlib $ spack install zlib -optimize When we do ``spack find``, we see the two versions. -.. code-block:: console +.. code-block:: spec $ spack find zlib ==> 2 installed packages @@ -755,7 +676,7 @@ When we do ``spack find``, we see the two versions. Let's say we want to uninstall ``zlib``. We run the command and quickly encounter a problem because two versions are installed. -.. code-block:: console +.. code-block:: spec $ spack uninstall zlib ==> Error: zlib matches multiple packages: @@ -768,17 +689,16 @@ We run the command and quickly encounter a problem because two versions are inst b) specify the spec by its hash (e.g. `spack uninstall /hash`), or c) use `spack uninstall --all` to uninstall ALL matching specs. -Oh no! We can see from the above that we have two different versions of zlib installed, -and the only difference between the two is the hash. This is a good use case for -``spack diff``, which can easily show us the "diff" or set difference -between properties for two packages. Let's try it out. -Because the only difference we see in the ``spack find`` view is the hash, let's use -``spack diff`` to look for more detail. We will provide the two hashes: +Oh no! +We can see from the above that we have two different versions of zlib installed, and the only difference between the two is the hash. +This is a good use case for ``spack diff``, which can easily show us the "diff" or set difference between properties for two packages. +Let's try it out. +Because the only difference we see in the ``spack find`` view is the hash, let's use ``spack diff`` to look for more detail. +We will provide the two hashes: -.. code-block:: console +.. code-block:: diff $ spack diff /efzjziy /sl7m27m - ==> Warning: This interface is subject to change. --- zlib@1.2.11efzjziyc3dmb5h5u5azsthgbgog5mj7g +++ zlib@1.2.11sl7m27mzkbejtkrajigj3a3m37ygv4u2 @@ -787,55 +707,50 @@ Because the only difference we see in the ``spack find`` view is the hash, let's + zlib optimize True -The output is colored and written in the style of a git diff. This means that you -can copy and paste it into a GitHub markdown as a code block with language "diff" -and it will render nicely! Here is an example: +The output is colored and written in the style of a git diff. +This means that you can copy and paste it into a GitHub markdown as a code block with language "diff" and it will render nicely! +Here is an example: -.. code-block:: md +.. code-block:: diff - ```diff --- zlib@1.2.11/efzjziyc3dmb5h5u5azsthgbgog5mj7g +++ zlib@1.2.11/sl7m27mzkbejtkrajigj3a3m37ygv4u2 @@ variant_value @@ - zlib optimize False + zlib optimize True - ``` -Awesome! Now let's read the diff. It tells us that our first zlib was built with ``~optimize`` -(``False``) and the second was built with ``+optimize`` (``True``). You can't see it in the docs -here, but the output above is also colored based on the content being an addition (+) or -subtraction (-). +Awesome! +Now let's read the diff. +It tells us that our first zlib was built with ``~optimize`` (``False``) and the second was built with ``+optimize`` (``True``). +You can't see it in the docs here, but the output above is also colored based on the content being an addition (+) or subtraction (-). -This is a small example, but you will be able to see differences for any attributes on the -installation spec. Running ``spack diff A B`` means we'll see which spec attributes are on -``B`` but not on ``A`` (green) and which are on ``A`` but not on ``B`` (red). Here is another -example with an additional difference type, ``version``: +This is a small example, but you will be able to see differences for any attributes on the installation spec. +Running ``spack diff A B`` means we'll see which spec attributes are on ``B`` but not on ``A`` (green) and which are on ``A`` but not on ``B`` (red). +Here is another example with an additional difference type, ``version``: -.. code-block:: console +.. code-block:: diff - $ spack diff python@2.7.8 python@3.8.11 - ==> Warning: This interface is subject to change. + $ spack diff python@2.7.8 python@3.8.11 - --- python@2.7.8/tsxdi6gl4lihp25qrm4d6nys3nypufbf - +++ python@3.8.11/yjtseru4nbpllbaxb46q7wfkyxbuvzxx - @@ variant_value @@ - - python patches a8c52415a8b03c0e5f28b5d52ae498f7a7e602007db2b9554df28cd5685839b8 - + python patches 0d98e93189bc278fbc37a50ed7f183bd8aaf249a8e1670a465f0db6bb4f8cf87 - @@ version @@ - - openssl 1.0.2u - + openssl 1.1.1k - - python 2.7.8 - + python 3.8.11 + --- python@2.7.8/tsxdi6gl4lihp25qrm4d6nys3nypufbf + +++ python@3.8.11/yjtseru4nbpllbaxb46q7wfkyxbuvzxx + @@ variant_value @@ + - python patches a8c52415a8b03c0e5f28b5d52ae498f7a7e602007db2b9554df28cd5685839b8 + + python patches 0d98e93189bc278fbc37a50ed7f183bd8aaf249a8e1670a465f0db6bb4f8cf87 + @@ version @@ + - openssl 1.0.2u + + openssl 1.1.1k + - python 2.7.8 + + python 3.8.11 Let's say that we were only interested in one kind of attribute above, ``version``. -We can ask the command to only output this attribute. To do this, you'd add -the ``--attribute`` for attribute parameter, which defaults to all. Here is how you -would filter to show just versions: +We can ask the command to only output this attribute. +To do this, you'd add the ``--attribute`` for attribute parameter, which defaults to all. +Here is how you would filter to show just versions: -.. code-block:: console +.. code-block:: diff $ spack diff --attribute version python@2.7.8 python@3.8.11 - ==> Warning: This interface is subject to change. --- python@2.7.8/tsxdi6gl4lihp25qrm4d6nys3nypufbf +++ python@3.8.11/yjtseru4nbpllbaxb46q7wfkyxbuvzxx @@ -845,21 +760,20 @@ would filter to show just versions: - python 2.7.8 + python 3.8.11 -And you can add as many attributes as you'd like with multiple `--attribute` arguments -(for lots of attributes, you can use ``-a`` for short). Finally, if you want to view the -data as JSON (and possibly pipe into an output file), just add ``--json``: +And you can add as many attributes as you'd like with multiple ``--attribute`` arguments (for lots of attributes, you can use ``-a`` for short). +Finally, if you want to view the data as JSON (and possibly pipe into an output file), just add ``--json``: -.. code-block:: console +.. code-block:: spec $ spack diff --json python@2.7.8 python@3.8.11 -This data will be much longer because along with the differences for ``A`` vs. ``B`` and -``B`` vs. ``A``, the JSON output also shows the intersection. +This data will be much longer because along with the differences for ``A`` vs. +``B`` and ``B`` vs. +``A``, the JSON output also shows the intersection. ------------------------- Using Installed Packages ------------------------ @@ -874,17 +788,16 @@ Spack has three different ways to solve this problem, which fit different use ca .. _cmd-spack-load: +.. _cmd-spack-unload: -^^^^^^^^^^^^^^^^^^^^^^^ ``spack load / unload`` ^^^^^^^^^^^^^^^^^^^^^^^ If you sourced the appropriate shell script, as shown in :ref:`getting_started`, you can use the ``spack load`` command to quickly add a package to your ``PATH``. -For example, this will add the ``mpich`` package built with ``gcc`` to -your path: +For example, this will add the ``mpich`` package built with ``gcc`` to your path: -.. code-block:: console +.. code-block:: spec $ spack install mpich %gcc@4.4.7 @@ -894,26 +807,20 @@ your path: $ which mpicc ~/spack/opt/linux-debian7-x86_64/gcc@4.4.7/mpich@3.0.4/bin/mpicc -These commands will add appropriate directories to your ``PATH`` -and ``MANPATH`` according to the -:ref:`prefix inspections ` defined in your -modules configuration. -When you no longer want to use a package, you can type unload or -unuse similarly: +These commands will add appropriate directories to your ``PATH`` and ``MANPATH`` according to the :ref:`prefix inspections ` defined in your modules configuration. +When you no longer want to use a package, you can type unload or unuse similarly: -.. code-block:: console +.. code-block:: spec $ spack unload mpich %gcc@4.4.7 -""""""""""""""" Ambiguous specs """"""""""""""" -If a spec used with load/unload is ambiguous (i.e., more than one -installed package matches it), then Spack will warn you: +If a spec used with load/unload is ambiguous (i.e., more than one installed package matches it), then Spack will warn you: -.. code-block:: console +.. code-block:: spec $ spack load libelf ==> Error: libelf matches multiple packages. @@ -922,26 +829,23 @@ installed package matches it), then Spack will warn you: cd2u6jt libelf@0.8.13%intel@15.0.0 arch=linux-debian7-x86_64 Use a more specific spec -You can either type the ``spack load`` command again with a fully -qualified argument, or you can add just enough extra constraints to -identify one package. For example, above, the key differentiator is -that one ``libelf`` is built with the Intel compiler, while the other -used ``gcc``. You could therefore just type: +You can either type the ``spack load`` command again with a fully qualified argument, or you can add just enough extra constraints to identify one package. +For example, above, the key differentiator is that one ``libelf`` is built with the Intel compiler, while the other used ``gcc``. +You could therefore just type: -.. code-block:: console +.. code-block:: spec $ spack load libelf %intel -To identify just the one built with the Intel compiler. If you want to be -*very* specific, you can load it by its hash. For example, to load the -first ``libelf`` above, you would run: +To identify just the one built with the Intel compiler. +If you want to be *very* specific, you can load it by its hash. +For example, to load the first ``libelf`` above, you would run: -.. code-block:: console +.. code-block:: spec $ spack load /qmm4kso -To see which packages that you have loaded into your environment, you would -use ``spack find --loaded``. +To see which packages that you have loaded into your environment, you would use ``spack find --loaded``. .. code-block:: console @@ -953,49 +857,39 @@ use ``spack find --loaded``. -- linux-debian7 / intel@15.0.0 --------------------------------- libelf@0.8.13 -You can also use ``spack load --list`` to get the same output, but it -does not have the full set of query options that ``spack find`` offers. +You can also use ``spack load --list`` to get the same output, but it does not have the full set of query options that ``spack find`` offers. We'll learn more about Spack's spec syntax in :ref:`a later section `. .. _extensions: -^^^^^^^^^^^^^^^^^^ Spack environments ^^^^^^^^^^^^^^^^^^ -Spack can install a large number of Python packages. Their names are -typically prefixed with ``py-``. Installing and using them is no -different from any other package: +Spack can install a large number of Python packages. +Their names are typically prefixed with ``py-``. +Installing and using them is no different from any other package: -.. code-block:: console +.. code-block:: spec $ spack install py-numpy $ spack load py-numpy $ python3 >>> import numpy -The ``spack load`` command sets the ``PATH`` variable so that the correct Python -executable is used and makes sure that ``numpy`` and its dependencies can be -located in the ``PYTHONPATH``. +The ``spack load`` command sets the ``PATH`` variable so that the correct Python executable is used and makes sure that ``numpy`` and its dependencies can be located in the ``PYTHONPATH``. -Spack is different from other Python package managers in that it installs -every package into its *own* prefix. This is in contrast to ``pip``, which -installs all packages into the same prefix, whether in a virtual environment -or not. +Spack is different from other Python package managers in that it installs every package into its *own* prefix. +This is in contrast to ``pip``, which installs all packages into the same prefix, whether in a virtual environment or not. -For many users, **virtual environments** are more convenient than repeated -``spack load`` commands, particularly when working with multiple Python -packages. Fortunately, Spack supports environments itself, which together -with a view are no different from Python virtual environments. +For many users, **virtual environments** are more convenient than repeated ``spack load`` commands, particularly when working with multiple Python packages. +Fortunately, Spack supports environments itself, which together with a view are no different from Python virtual environments. -The recommended way of working with Python extensions such as ``py-numpy`` -is through :ref:`Environments `. The following example creates -a Spack environment with ``numpy`` in the current working directory. It also -puts a filesystem view in ``./view``, which is a more traditional combined -prefix for all packages in the environment. +The recommended way of working with Python extensions such as ``py-numpy`` is through :ref:`Environments `. +The following example creates a Spack environment with ``numpy`` in the current working directory. +It also puts a filesystem view in ``./view``, which is a more traditional combined prefix for all packages in the environment. -.. code-block:: console +.. code-block:: spec $ spack env create --with-view view --dir . $ spack -e . add py-numpy @@ -1010,9 +904,8 @@ Now you can activate the environment and start using the packages: $ python3 >>> import numpy -The environment view is also a virtual environment, which is useful if you are -sharing the environment with others who are unfamiliar with Spack. They can -either use the Python executable directly: +The environment view is also a virtual environment, which is useful if you are sharing the environment with others who are unfamiliar with Spack. +They can either use the Python executable directly: .. code-block:: console @@ -1027,10 +920,7 @@ or use the activation script: $ python3 >>> import numpy -In general, there should not be much difference between ``spack env activate`` -and using the virtual environment. The main advantage of ``spack env activate`` -is that it knows about more packages than just Python packages, and it may set -additional runtime variables that are not covered by the virtual environment -activation script. +In general, there should not be much difference between ``spack env activate`` and using the virtual environment. +The main advantage of ``spack env activate`` is that it knows about more packages than just Python packages, and it may set additional runtime variables that are not covered by the virtual environment activation script. See :ref:`environments` for a more in-depth description of Spack environments and customizations to views. diff --git a/lib/spack/docs/package_review_guide.rst b/lib/spack/docs/package_review_guide.rst new file mode 100644 index 00000000000000..a62066b3a5882f --- /dev/null +++ b/lib/spack/docs/package_review_guide.rst @@ -0,0 +1,388 @@ +.. Copyright Spack Project Developers. See COPYRIGHT file for details. + + SPDX-License-Identifier: (Apache-2.0 OR MIT) + +.. meta:: + :description lang=en: + This is a guide for people who review package pull requests and includes criteria for them to be merged into the develop branch. + +.. _package-review-guide: + +Package Review Guide +==================== + +Package reviews are performed with the goals of minimizing build errors and making packages as **uniform and stable** as possible. + +This section establishes guidelines to help assess Spack community `package repository `_ pull requests (PRs). +It describes the considerations and actions to be taken when reviewing new and updated `Spack packages `_. +In some cases, there are possible solutions to common issues. + +How to use this guide +--------------------- + +Whether you are a :ref:`Package Reviewer `, :ref:`Maintainer `, or :ref:`Committer `, this guide highlights relevant aspects to consider when reviewing package pull requests. +If you are a :ref:`Package Contributor ` (or simply ``Contributor``), you may also find the information and solutions useful in your work. +While we provide information on what to look for, the changes themselves should drive the actual review process. + +.. note:: + + :ref:`Confirmation of successful package builds ` of **all** affected versions can reduce the amount of effort needed to review a PR. + However, packaging conventions and the combinatorial nature of versions and directives mean each change should still be checked. + +Reviewing a new package +~~~~~~~~~~~~~~~~~~~~~~~ + +If the pull request includes a new package, then focus on answering the following questions: + +* Should the :ref:`package ` be added to the repository? +* Does the package :ref:`structure ` conform to conventions? +* Are the directives and their options correct? +* Do all :ref:`automated checks ` pass? + If not, are there easy-to-resolve CI and/or test issues that can be addressed or does the submitter need to investigate the failures? +* Is there :ref:`confirmation ` that every version builds successfully on at least one platform? + +Refer to the relevant sections below for more guidance. + +Reviewing changes to an existing package +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If the pull request includes changes to an existing package, then focus on answering the following questions: + +* Are there any changes to the package :ref:`structure ` and, if so, do they conform to conventions? +* If there are new or updated directives, then are they and their options correct? +* If there are changes to the :ref:`url or its equivalent `, are the older versions still correct? +* If there are changes to the :ref:`git or equivalent URL `, do older branches exist in the new location? +* Do all :ref:`automated checks ` pass? + If not, are there easy-to-resolve CI and/or test issues that can be addressed or does the submitter need to investigate the failures? +* Is there :ref:`confirmation ` that every new version builds successfully on at least one platform? + +Refer to the relevant sections below for more guidance. + +.. _suitable_package: + +Package suitability +------------------- + +It is rare that a package would be considered inappropriate for inclusion in the public `Spack package `_ repository. +One exception is making packages for standard Perl modules. + +**Action.** +Should you find the software is not appropriate, ask that the package be removed from the PR if it is one of multiple affected files or suggest the PR be closed. +In either case, explain the reason for the request. + +CORE Perl modules +~~~~~~~~~~~~~~~~~ + +In general, modules that are part of the standard installation for all listed Perl versions (i.e., ``CORE``) should **not be implemented or contributed**. +Details on the exceptions and process for checking Perl modules can be found in the :ref:`Perl build system ` documentation. + +.. _structure_reviews: + +Package structure +----------------- + +The `convention `_ for structuring Spack packages has metadata (key properties) listed first followed by directives then methods: + +* :ref:`url_equivalent_reviews`; +* :ref:`vcs_url_reviews`; +* :ref:`maintainers_reviews`; +* :ref:`license_reviews`; +* :ref:`version_reviews`; +* :ref:`variant_reviews`; +* :ref:`depends_on_reviews`; +* :ref:`packaging_conflicts` and :ref:`packaging_requires` directives; then +* methods. + +`Groupings `_ using ``with`` context managers can affect the order of dependency, conflict, and requires directives to some degree. +However, they do cut down on visual clutter and make packages more readable. + +**Action.** +If you see clear deviations from the convention, request that they be addressed. +When in doubt, ask others with merge privileges for advice. + +.. _url_equivalent_reviews: + +``url``, ``url_for_version``, or URL equivalent +----------------------------------------------- + +Changes to URLs may invalidate existing versions, which should be checked when there is a URL-related modification. +All packages have a URL, though for some :ref:`build-systems` it is derived automatically and not visible in the package. + +Reasons :ref:`versions ` may become invalid include: + +* the new URL does not support Spack version extrapolation; +* the addition of or changes to ``url_for_version`` involve checks of the ``spec``'s version instead of the ``version`` argument or the (usually older) versions are not covered; +* extrapolation of the derived URL no longer matches that of older versions; and +* the older versions are no longer available. + +**Action.** +Checking existing version directives with checksums can usually be done manually with the modified package using `spack checksum `_. + +**Solutions.** +Options for resolving the problem that can be suggested for investigation depend on the source. + +In simpler cases involving ``url`` or ``url_for_version``, invalid versions can sometimes be corrected by ensuring all versions are covered by ``url_for_version``. +Alternatively, especially for older versions, the version-specific URL can be added as an argument to the ``version`` directive. + +Sometimes the derived URLs of versions on the hosting system can vary. +This commonly happens with Python packages. +For example, the case of one or more letters in the package name may change at some point (e.g., `py-sphinx `_). +Also, dashes may be replaced with underscores (e.g., `py-scikit-build `_). +In some cases, both changes can occur for the same package. +As these examples illlustrate, it is sometimes possible to add a ``url_for_version`` method to override the default derived URL to ensure the correct one is returned. + +If older versions are no longer available and there is a chance someone has the package in a build cache, the usual approach is to first suggest :ref:`deprecating ` them in the package. + +.. _vcs_url_reviews: + +``git``, ``hg``, ``svn``, or ``cvs`` +------------------------------------ + +If the :ref:`repository-specific URL ` for fetching branches or the version control system (VCS) equivalent changes, there is a risk that the listed versions are no longer accessible. + +**Action.** +You may need to check the new source repository to confirm the presence of all of the listed versions. + +.. _maintainers_reviews: + +``maintainers`` directive +------------------------- + +**Action.** +If the new package does not have a :ref:`maintainers ` directive, ask the Contributor to add one. + +.. note:: + + This request is optional for existing packages. + + Be prepared for them to refuse. + +.. _license_reviews: + +``license`` directive +--------------------- + +**Action.** +If the new package does not have a :ref:`license ` directive, ask the Contributor to investigate and add it. + +.. note:: + + This request is optional for existing packages. + + Be prepared for them to refuse. + +.. _version_reviews: + +``version`` directives +---------------------- + +In general, Spack packages are expected to be built from source code. +There are a few exceptions (e.g., :ref:`BundlePackage `). +Typically every package will have at least one :ref:`version ` directive. + +The goals of reviewing version directives are to confirm that versions are listed in the proper order **and** that the arguments for new and updated versions are correct. + +.. note:: + + Additions and removals of version directives should generally trigger a review of :ref:`dependencies `. + +``version`` directive order +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By :ref:`convention ` version directives should be listed in descending order, from newest to oldest. +If branch versions are included, then they should be listed first. + +**Action.** +When versions are being added, check the ordering of the directives. +Request that the directives be re-ordered if any of the directives do not conform to the convention. + +.. note:: + + Edge cases, such as manually downloaded software, may be difficult to confirm. + +Checksums, commits, tags, and branches +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Checksums, commits, and tags** + Normally these version arguments are automatically validated by GitHub Actions using `spack ci verify-versions `_. + + **Action.** + Review the PR's ``verify-checksums`` precheck to confirm. + If necessary, checksums can usually be manually confirmed using `spack checksum `_. + + .. warning:: + + From a security and reproducibility standpoint, it is important that Spack be able to verify downloaded source. + This is accomplished using a hash (e.g., checksum or commit). + See :ref:`checksum verification ` for more information. + + Exceptions are allowed in rare cases, such as software supplied from reputable vendors. + When in doubt, ask others with merge privileges for advice. + +**Tags** + If a ``tag`` is provided without a ``commit``, the downloaded software will not be trusted. + + **Action.** + Suggest that the ``commit`` argument be included in the ``version`` directive. + +**Branches** + Confirming new branch versions involves checking that the branches exist in the repository *and* that the version and branch names are consistent. + Let's take each in turn. + + **Action.** + Confirming branch existence often involves checking the source repository though is not necessary if there is confirmation that the branch was built successfully from the package. + + In general, the version and branch names should match. + When they do not, it is sometimes the result of people not being aware of how Spack handles :ref:`version-comparison`. + + **Action.** + If there is a name mismatch, especially for the most common branch names (e.g., ``develop``, ``main``, and ``master``), ask why and suggest the arguments be changed such that they match the actual branch name. + +**Manual downloads** + + **Action.** + Since these can be difficult to confirm, it is acceptable to rely on the package's Maintainers, if any. + +Deprecating versions +~~~~~~~~~~~~~~~~~~~~ + +If someone is deprecating versions, it is good to find out why. +Sometimes there are concerns, such as security or lack of availability. + +**Action.** +Suggest the Contributor review the :ref:`deprecation guidelines ` before finalizing the changes if they haven't already explained why they made the choice in the PR description or comments. + +.. _variant_reviews: + +``variant`` directives +---------------------- + +:ref:`Variants ` represent build options so any changes involving these directives should be reflected elsewhere in the package. + +Adding or modifying variants +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Action.** +Confirm that new or modified variants are actually used in the package. +The most common uses are additions and changes to: + +* :ref:`dependencies `; +* configure options; and/or +* build arguments. + +Removing or disabling variants +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If the variant is still relevant to listed version directives, it may be preferable to adjust or add `conditions `_. + +**Action.** +Consider asking why the variant (or build option) is being removed and suggest making it conditional when it is still relevant. + +.. warning:: + + If the default value of a variant is changed in the PR, then there is a risk that other packages relying on that value will no longer build as others expect. + This may be worth noting in the review. + +.. _depends_on_reviews: + +``depends_on`` directives +------------------------- + +:ref:`Dependencies ` represent software that must be installed before the package builds or is able to work correctly. + +Updating dependencies +~~~~~~~~~~~~~~~~~~~~~ + +It is important that dependencies reflect the requirements of listed versions. +They only need to be checked in a review when versions are being added or removed or the dependencies are being changed. + +**Action.** +Dependencies affected by such changes should be confirmed, when possible, and *at least* when the Contributor is not a Maintainer of the package. + +**Solutions.** +In some cases, the needed change may be as simple as ensuring the version range and or variant options in the dependency are accurate. +In others, one or more of the dependencies needed by new versions are missing and need to be added. +Or there may be dependencies that are no longer relevant when versions requiring them are removed, meaning the dependencies should be removed as well. + +For example, it is not uncommon for Python package dependencies to be out of date when new versions are added. +In this case, check Python package dependencies by following the build system `guidelines `_. + +.. tip:: + + In general, refer to the relevant dependencies section, if any, for the package’s :ref:`build-systems` for guidance. + +Updating language and compiler dependencies +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When :ref:`language and compiler dependencies ` were introduced, their ``depends_on`` directives were derived from the source for existing packages. +These dependencies are flagged with ``# generated`` comments when they have not been confirmed. +Unfortunately, the generated dependencies are not always complete or necessarily required. + +**Action.** +If these dependencies are being updated, ask that the ``# generated`` comments be removed if the Contributor can confirm they are relevant. +Definitely make sure Contributors do **not** include ``# generated`` on the dependencies they are adding to the package. + +.. _automated_checks_reviews: + +Failed automated checks +----------------------- + +All PRs are expected to pass **at least the required** automated checks. + +Style failures +~~~~~~~~~~~~~~ + +The PR may fail one or more style checks. + +**Action.** +If the failure is due to issues raised by the ``black`` style checker *and* the PR is otherwise ready to be merged, you can add ``@spackbot fix style`` in a comment to see if Spack will fix the errors. +Otherwise, inform the Contributor that the style failures need to be addressed. + +CI stack failures +~~~~~~~~~~~~~~~~~ + +Existing packages **may** be included in GitLab CI pipelines through inclusion in one or more `stacks `_. + +**Action.** +It is worth checking **at least a sampling** of the failed job logs, if present, to determine the possible cause and take or suggest an action accordingly. + +**CI Runner Failures** + Sometimes CI runners time out or the pods become unavailable. + + **Action.** + If that is the case, the resolution may be as simple as restarting the pipeline by adding a ``@spackbot run pipeline`` comment. + Otherwise, the Contributor will need to investigate and resolve the problem. + +**Stand-alone Test Failures** + Sometimes :ref:`stand-alone tests ` could be causing the build job to time out. + If the tests take too long, the issue could be that the package is running too many and/or long running tests. + Or the tests may be trying to use resources (e.g., a batch scheduler) that are not available on runners. + + **Action.** + If the tests for a package are hanging, at a minimum create a `new issue `_ if there is not one already, to flag the package. + + **(Temporary) Solution.** + Look at the package implementation to see if the tests are using a batch scheduler or there appear to be too many or long running tests. + If that is the case, then a pull request should be created in the ``spack/spack-packages`` repository that adds the package to the ``broken-tests-packages`` list in the `ci configuration `_. + Once the fix PR is merged, then the affected PR can be rebased to pick up the change. + +.. _build_success_reviews: + +Successful builds +----------------- + +Is there evidence that the package builds successfully on at least one platform? +For a new package, we would ideally have confirmation for every version; whereas, we would want confirmation of only the affected versions for changes to an existing package. + +Acceptable forms of confirmation are **one or more of**: + +* the Contributor or another reviewer explicitly confirms that a successful build of **each new version on at least one platform**; +* the software is built successfully by Spack CI by **at least one of the CI stacks**; and +* **at least one Maintainer** explicitly confirms they are able to successfully build the software. + +Individuals are expected to update the PR description or add a comment to explicitly confirm the builds. +You may need to check the CI stacks and/or outputs to confirm that there is a stack that builds the new version. + +.. note:: + + When builds are confirmed by individuals, we would prefer the output of ``spack debug report`` be included in either the PR description or a comment. diff --git a/lib/spack/docs/packages_yaml.rst b/lib/spack/docs/packages_yaml.rst index f188eb51592623..867d18b3793143 100644 --- a/lib/spack/docs/packages_yaml.rst +++ b/lib/spack/docs/packages_yaml.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,16 +9,12 @@ .. _packages-config: -================================ Package Settings (packages.yaml) ================================ -Spack allows you to customize how your software is built through the -``packages.yaml`` file. Using it, you can make Spack prefer particular -implementations of virtual dependencies (e.g., MPI or BLAS/LAPACK), -or you can make it prefer to build with particular compilers. You can -also tell Spack to use *external* software installations already -present on your system. +Spack allows you to customize how your software is built through the ``packages.yaml`` file. +Using it, you can make Spack prefer particular implementations of virtual dependencies (e.g., MPI or BLAS/LAPACK), or you can make it prefer to build with particular compilers. +You can also tell Spack to use *external* software installations already present on your system. At a high level, the ``packages.yaml`` file is structured like this: @@ -32,25 +29,20 @@ At a high level, the ``packages.yaml`` file is structured like this: all: # settings that apply to all packages. -So you can either set build preferences specifically for *one* package, -or you can specify that certain settings should apply to *all* packages. +You can either set build preferences specifically for *one* package, or you can specify that certain settings should apply to *all* packages. The types of settings you can customize are described in detail below. -Spack's build defaults are in the default -``etc/spack/defaults/packages.yaml`` file. You can override them in -``~/.spack/packages.yaml`` or ``etc/spack/packages.yaml``. For more -details on how this works, see :ref:`configuration-scopes`. +Spack's build defaults are in the default ``etc/spack/defaults/packages.yaml`` file. +You can override them in ``~/.spack/packages.yaml`` or ``etc/spack/packages.yaml``. +For more details on how this works, see :ref:`configuration-scopes`. .. _sec-external-packages: ------------------ -External Packages +External packages ----------------- -Spack can be configured to use externally-installed -packages rather than building its own packages. This may be desirable -if machines ship with system packages, such as a customized MPI -that should be used instead of Spack building its own MPI. +Spack can be configured to use externally-installed packages rather than building its own packages. +This may be desirable if machines ship with system packages, such as a customized MPI, which should be used instead of Spack building its own MPI. External packages are configured through the ``packages.yaml`` file. Here's an example of an external configuration: @@ -60,23 +52,17 @@ Here's an example of an external configuration: packages: openmpi: externals: - - spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64" + - spec: "openmpi@1.4.3~debug" prefix: /opt/openmpi-1.4.3 - - spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug" + - spec: "openmpi@1.4.3+debug" prefix: /opt/openmpi-1.4.3-debug - - spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64" - prefix: /opt/openmpi-1.6.5-intel -This example lists three installations of OpenMPI, one built with GCC, -one built with GCC and debug information, and another built with Intel. -If Spack is asked to build a package that uses one of these MPIs as a -dependency, it will use the pre-installed OpenMPI in -the given directory. Note that the specified path is the top-level -install prefix, not the ``bin`` subdirectory. +This example lists two installations of OpenMPI, one with debug information, and one without. +If Spack is asked to build a package that uses one of these MPIs as a dependency, it will use the pre-installed OpenMPI in the given directory. +Note that the specified path is the top-level install prefix, not the ``bin`` subdirectory. -``packages.yaml`` can also be used to specify modules to load instead -of the installation prefixes. The following example says that module -``CMake/3.7.2`` provides cmake version 3.7.2. +``packages.yaml`` can also be used to specify modules to load instead of the installation prefixes. +The following example says that module ``CMake/3.7.2`` provides cmake version 3.7.2. .. code-block:: yaml @@ -86,294 +72,353 @@ of the installation prefixes. The following example says that module modules: - CMake/3.7.2 -Each ``packages.yaml`` begins with a ``packages:`` attribute, followed -by a list of package names. To specify externals, add an ``externals:`` -attribute under the package name, which lists externals. -Each external should specify a ``spec:`` string that should be as -well-defined as reasonably possible. If a -package lacks a spec component, such as missing a compiler or -package version, then Spack will guess the missing component based -on its most-favored packages, and it may guess incorrectly. - -Each package version and compiler listed in an external should -have entries in Spack's packages and compiler configuration, even -though the package and compiler may not ever be built. +Each ``packages.yaml`` begins with a ``packages:`` attribute, followed by a list of package names. +To specify externals, add an ``externals:`` attribute under the package name, which lists externals. +Each external should specify a ``spec:`` string that should be as well-defined as reasonably possible. +If a package lacks a spec component, such as missing a compiler or package version, then Spack will guess the missing component based on its most-favored packages, and it may guess incorrectly. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Extra attributes for external packages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Sometimes external packages require additional attributes to be used -effectively. This information can be defined on a per-package basis -and stored in the ``extra_attributes`` section of the external package -configuration. In addition to per-package information, this section -can be used to define environment modifications to be performed -whenever the package is used. For example, if an external package is -built without ``rpath`` support, it may require ``LD_LIBRARY_PATH`` -settings to find its dependencies. This could be configured as -follows: - -.. code-block:: yaml - - packages: - mpich: - externals: - - spec: mpich@3.3 %clang@12.0.0 +hwloc - prefix: /path/to/mpich - extra_attributes: - environment: - prepend_path: - LD_LIBRARY_PATH: /path/to/hwloc/lib64 -See :ref:`configuration_environment_variables` for more information on -how to configure environment modifications in Spack config files. +.. _cmd-spack-external-find: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Extra attributes for external compilers -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Automatically find external packages +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -External package configuration allows several extra attributes for -configuring compilers. The ``compilers`` extra attribute field is -required to clarify which paths within the compiler prefix are used -for which languages: +You can run the :ref:`spack external find ` command to search for system-provided packages and add them to ``packages.yaml``. +After running this command your ``packages.yaml`` may include new entries: .. code-block:: yaml packages: - gcc: + cmake: externals: - - spec: gcc@10.5.0 languages='c,c++,fortran' + - spec: cmake@3.17.2 prefix: /usr - extra_attributes: - compilers: - c: /usr/bin/gcc-10 - cxx: /usr/bin/g++-10 - fortran: /usr/bin/gfortran-10 -Other fields accepted by compilers under ``extra_attributes`` are ``flags``, ``environment``, ``extra_rpaths``, and ``implicit_rpaths``. - -.. code-block:: yaml - - packages: - gcc: - externals: - - spec: gcc@10.5.0 languages='c,c++,fortran' - prefix: /usr - extra_attributes: - compilers: - c: /usr/bin/gcc-10 - cxx: /usr/bin/g++-10 - fortran: /usr/bin/gfortran-10 - flags: - cflags: -O3 - fflags: -g -O2 - environment: - set: - GCC_ROOT: /usr - prepend_path: - PATH: /usr/unusual_path_for_ld/bin - implicit_rpaths: - - /usr/lib/gcc - extra_rpaths: - - /usr/lib/unusual_gcc_path - -The ``flags`` attribute specifies compiler flags to apply to every -spec that depends on this compiler. The accepted flag types are -``cflags``, ``cxxflags``, ``fflags``, ``cppflags``, ``ldflags``, and -``ldlibs``. In the example above, every spec compiled with this -compiler will pass the flags ``-g -O2`` to ``/usr/bin/gfortran-10`` -and will pass the flag ``-O3`` to ``/usr/bin/gcc-10``. - -The ``environment`` attribute specifies user environment modifications -to apply before every time the compiler is invoked. The available -operations are ``set``, ``unset``, ``prepend_path``, ``append_path``, -and ``remove_path``. In the example above, Spack will set -``GCC_ROOT=/usr`` and set ``PATH=/usr/unusual_path_for_ld/bin:$PATH`` -before handing control to the build system that will use this -compiler. - -The ``extra_rpaths`` and ``implicit_rpaths`` fields specify additional -paths to pass as rpaths to the linker when using this compiler. The -``implicit_rpaths`` field is filled in automatically by Spack when -detecting compilers, and the ``extra_rpaths`` field is available for -users to configure necessary rpaths that have not been detected by -Spack. In addition, paths from ``extra_rpaths`` are added as library -search paths for the linker. In the example above, both -``/usr/lib/gcc`` and ``/usr/lib/unusual_gcc_path`` would be added as -rpaths to the linker, and ``-L/usr/lib/unusual_gcc_path`` would be -added as well. +Generally this is useful for detecting a small set of commonly-used packages; for now this is generally limited to finding build-only dependencies. +Specific limitations include: +* Packages are not discoverable by default: For a package to be discoverable with ``spack external find``, it needs to add special logic. + See :ref:`here ` for more details. +* The logic does not search through module files, it can only detect packages with executables defined in ``PATH``; you can help Spack locate externals which use module files by loading any associated modules for packages that you want Spack to know about before running ``spack external find``. +* Spack does not overwrite existing entries in the package configuration: If there is an external defined for a spec at any configuration scope, then Spack will not add a new external entry (``spack config blame packages`` can help locate all external entries). -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Prevent packages from being built from sources ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Adding an external spec in ``packages.yaml`` allows Spack to use an external location, -but it does not prevent Spack from building packages from sources. In the above example, -Spack might choose for many valid reasons to start building and linking with the -latest version of OpenMPI rather than continue using the pre-installed OpenMPI versions. +Adding an external spec in ``packages.yaml`` allows Spack to use an external location, but it does not prevent Spack from building packages from sources. +In the above example, Spack might choose for many valid reasons to start building and linking with the latest version of OpenMPI rather than continue using the pre-installed OpenMPI versions. -To prevent this, the ``packages.yaml`` configuration also allows packages -to be flagged as non-buildable. The previous example could be modified to -be: +To prevent this, the ``packages.yaml`` configuration also allows packages to be flagged as non-buildable. +The previous example could be modified to be: .. code-block:: yaml packages: openmpi: externals: - - spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64" + - spec: "openmpi@1.4.3~debug" prefix: /opt/openmpi-1.4.3 - - spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug" + - spec: "openmpi@1.4.3+debug" prefix: /opt/openmpi-1.4.3-debug - - spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64" - prefix: /opt/openmpi-1.6.5-intel - buildable: False + buildable: false -The addition of the ``buildable`` flag tells Spack that it should never build -its own version of OpenMPI from sources, and it will instead always rely on a pre-built -OpenMPI. +The addition of the ``buildable`` flag tells Spack that it should never build its own version of OpenMPI from sources, and it will instead always rely on a pre-built OpenMPI. .. note:: If ``concretizer:reuse`` is on (see :ref:`concretizer-options` for more information on that flag) pre-built specs are taken from: the local store, an upstream store, a registered buildcache and externals in ``packages.yaml``. If ``concretizer:reuse`` is off, only external specs in ``packages.yaml`` are included in the list of pre-built specs. -If an external module is specified as not buildable, then Spack will load the -external module into the build environment which can be used for linking. +If an external module is specified as not buildable, then Spack will load the external module into the build environment which can be used for linking. -The ``buildable`` does not need to be paired with external packages. -It could also be used alone to forbid packages that may be -buggy or otherwise undesirable. +The ``buildable`` attribute does not need to be paired with external packages. +It could also be used alone to forbid packages that may be buggy or otherwise undesirable. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Non-buildable virtual packages ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Virtual packages in Spack can also be specified as not buildable, and -external implementations can be provided. In the example above, -OpenMPI is configured as not buildable, but Spack will often prefer -other MPI implementations over the externally available OpenMPI. Spack -can be configured with every MPI provider not buildable individually, -but more conveniently: +Virtual packages in Spack can also be specified as not buildable, and external implementations can be provided. +In the example above, OpenMPI is configured as not buildable, but Spack will often prefer other MPI implementations over the externally available OpenMPI. +Spack can be configured with every MPI provider not buildable individually, but more conveniently: .. code-block:: yaml packages: mpi: - buildable: False + buildable: false openmpi: externals: - - spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64" + - spec: "openmpi@1.4.3~debug" prefix: /opt/openmpi-1.4.3 - - spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug" + - spec: "openmpi@1.4.3+debug" prefix: /opt/openmpi-1.4.3-debug - - spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64" - prefix: /opt/openmpi-1.6.5-intel -Spack can then use any of the listed external implementations of MPI -to satisfy a dependency, and will choose depending on the compiler and -architecture. +Spack can then use any of the listed external implementations of MPI to satisfy a dependency, and will choose among them depending on the compiler and architecture. -In cases where the concretizer is configured to reuse specs, and other ``mpi`` providers -(available via stores or buildcaches) are not desirable, Spack can be configured to require -specs matching only the available externals: +In cases where the concretizer is configured to reuse specs, and other ``mpi`` providers (available via stores or buildcaches) are not desirable, Spack can be configured to require specs matching only the available externals: .. code-block:: yaml packages: mpi: - buildable: False + buildable: false require: - - one_of: [ - "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64", - "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug", - "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64" - ] + - one_of: + - "openmpi@1.4.3~debug" + - "openmpi@1.4.3+debug" openmpi: externals: - - spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64" + - spec: "openmpi@1.4.3~debug" prefix: /opt/openmpi-1.4.3 - - spec: "openmpi@1.4.3%gcc@4.4.7 arch=linux-debian7-x86_64+debug" + - spec: "openmpi@1.4.3+debug" prefix: /opt/openmpi-1.4.3-debug - - spec: "openmpi@1.6.5%intel@10.1 arch=linux-debian7-x86_64" - prefix: /opt/openmpi-1.6.5-intel -This configuration prevents any spec using MPI and originating from stores or buildcaches to be reused, -unless it matches the requirements under ``packages:mpi:require``. For more information on requirements see -:ref:`package-requirements`. +This configuration prevents any spec using MPI and originating from stores or buildcaches to be reused, unless it matches the requirements under ``packages:mpi:require``. +For more information on requirements see :ref:`package-requirements`. -.. _cmd-spack-external-find: +Specifying dependencies among external packages +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Automatically Find External Packages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +External packages frequently have dependencies on other software components. +Explicitly modeling these relationships provides Spack with a more complete representation of the software stack. +This ensures that: -You can run the :ref:`spack external find ` command -to search for system-provided packages and add them to ``packages.yaml``. -After running this command your ``packages.yaml`` may include new entries: +- Runtime environments include all necessary components. +- Build-time dependencies are accurately represented. + +This comprehensive view, in turn, enables Spack to more reliably build software that depends on these externals. + +Spack provides two methods for configuring dependency relationships among externals, each offering different trade-offs between conciseness and explicit control: + +- An "inline" spec syntax. +- A structured YAML configuration that is more verbose but also more explicit. + +The following sections will detail both approaches. + +Dependencies using inline spec syntax +""""""""""""""""""""""""""""""""""""" + +Spack allows you to define external package dependencies using the standard spec syntax directly within your package configuration. +This approach is concise and leverages the familiar spec syntax that you already use elsewhere in Spack. + +When configuring an external package with dependencies using the spec syntax, you can include dependency specifications directly in the main ``spec:`` field: .. code-block:: yaml + # Specification for the following DAG: + # + # o mpileaks@2.3 + # |\ + # | o callpath@1.0 + # |/ + # o mpich@3.0.4 packages: - cmake: + mpileaks: externals: - - spec: cmake@3.17.2 + - spec: "mpileaks@2.3~debug+opt %mpich@3 %callpath" + prefix: /user/path + callpath: + externals: + - spec: "callpath@1.0 %mpi=mpich" + prefix: /user/path + mpich: + externals: + - spec: "mpich@3.0.4" + prefix: /user/path + +In this example ``mpileaks`` depends on both ``mpich`` and ``callpath``. +Spack will parse the ``mpileaks`` spec string, and create the appropriate dependency relationships automatically. + +Users *need* to ensure that each dependency maps exactly to a single other external package. +In case multiple externals can satisfy the same dependency, or in case no external can satisfy a dependency, Spack will error and point to the configuration line causing the issue. + +Whenever no information is given about the dependency type, Spack will infer it from the current package recipe. +For instance, the dependencies in the configuration above are inferred to be of ``build,link`` type from the recipe of ``mpileaks`` and ``callpath``: + +.. code-block:: console + + $ spack -m spec --types -l --cover edges mpileaks + [e] oelprl6 [ ] mpileaks@2.3~debug+opt+shared+static build_system=generic platform=linux os=ubuntu20.04 target=icelake + [e] jdhzy2t [bl ] ^callpath@1.0 build_system=generic platform=linux os=ubuntu20.04 target=icelake + [e] pgem3yp [bl ] ^mpich@3.0.4~debug build_system=generic platform=linux os=ubuntu20.04 target=icelake + [e] pgem3yp [bl ] ^mpich@3.0.4~debug build_system=generic platform=linux os=ubuntu20.04 target=icelake + +When inferring the dependency types, Spack will also infer virtuals if they are not already specified. + +This method's conciseness comes with a strict requirement: each dependency must resolve to a single, unambiguous external package. +This makes the approach suitable for simple or temporary configurations. +In larger, more dynamic environments, however, it can become a maintenance challenge, as adding new external packages over time may require frequent updates to existing specs to preserve their uniqueness. + +Dependencies using YAML configuration +""""""""""""""""""""""""""""""""""""" + +While the spec syntax offers a concise way to specify dependencies, Spack's YAML-based explicit dependency configuration provides more control and clarity, especially for complex dependency relationships. +This approach uses the ``dependencies:`` field to precisely define each dependency relationship. +The example in the previous section, written using the YAML configuration, becomes: + +.. code-block:: yaml + + # Specification for the following DAG: + # + # o mpileaks@2.3 + # |\ + # | o callpath@1.0 + # |/ + # o mpich@3.0.4 + packages: + mpileaks: + externals: + - spec: "mpileaks@2.3~debug+opt" + prefix: /user/path + dependencies: + - id: callpath_id + deptypes: link + - spec: mpich + deptypes: + - "build" + - "link" + virtuals: "mpi" + callpath: + externals: + - spec: "callpath@1.0" + prefix: /user/path + id: callpath_id + dependencies: + - spec: mpich + deptypes: + - "build" + - "link" + virtuals: "mpi" + mpich: + externals: + - spec: "mpich@3.0.4" + prefix: /user/path + +Each dependency can be specified either by: + +- A ``spec:`` that matches an available external package, like in the previous case, or by +- An ``id`` that explicitly references another external package. + +Using the ``id`` provides an unambiguous reference to a specific external package, which is essential for differentiating between externals that have similar specs but differ, for example, only by their installation prefix. + +The dependency types can be specified in the optional ``deptypes`` field, while virtuals can be specified in the optional ``virtuals`` field. +As before, when the dependency types are not specified, Spack will infer them from the package recipe. + +.. _extra-attributes-for-externals: + +Extra attributes for external packages +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Sometimes external packages require additional attributes to be used effectively. +This information can be defined on a per-package basis and stored in the ``extra_attributes`` section of the external package configuration. +In addition to per-package information, this section can be used to define environment modifications to be performed whenever the package is used. +For example, if an external package is built without ``rpath`` support, it may require ``LD_LIBRARY_PATH`` settings to find its dependencies. +This could be configured as follows: + +.. code-block:: yaml + + packages: + mpich: + externals: + - spec: mpich@3.3 +hwloc + prefix: /path/to/mpich + extra_attributes: + environment: + prepend_path: + LD_LIBRARY_PATH: /path/to/hwloc/lib64 + +See :ref:`configuration_environment_variables` for more information on how to configure environment modifications in Spack config files. + +.. _configuring-system-compilers-as-external-packages: + +Configuring system compilers as external packages +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In Spack, compilers are treated as packages like any other. +This means that you can also configure system compilers as external packages and use them in Spack. + +Spack automatically detects system compilers and configures them in ``packages.yaml`` for you. +You can also run :ref:`spack-compiler-find` to find and configure new system compilers. + +When configuring compilers as external packages, you need to set a few :ref:`extra attributes ` for them to work properly. +The ``compilers`` extra attribute field is required to clarify which paths within the compiler prefix are used for which languages: + +.. code-block:: yaml + + packages: + gcc: + externals: + - spec: gcc@10.5.0 languages='c,c++,fortran' + prefix: /usr + extra_attributes: + compilers: + c: /usr/bin/gcc-10 + cxx: /usr/bin/g++-10 + fortran: /usr/bin/gfortran-10 + +Other fields accepted by compilers under ``extra_attributes`` are ``flags``, ``environment``, ``extra_rpaths``, and ``implicit_rpaths``. + +.. code-block:: yaml + + packages: + gcc: + externals: + - spec: gcc@10.5.0 languages='c,c++,fortran' prefix: /usr + extra_attributes: + compilers: + c: /usr/bin/gcc-10 + cxx: /usr/bin/g++-10 + fortran: /usr/bin/gfortran-10 + flags: + cflags: -O3 + fflags: -g -O2 + environment: + set: + GCC_ROOT: /usr + prepend_path: + PATH: /usr/unusual_path_for_ld/bin + implicit_rpaths: + - /usr/lib/gcc + extra_rpaths: + - /usr/lib/unusual_gcc_path -Generally this is useful for detecting a small set of commonly-used packages; -for now this is generally limited to finding build-only dependencies. -Specific limitations include: +The ``flags`` attribute specifies compiler flags to apply to every spec that depends on this compiler. +The accepted flag types are ``cflags``, ``cxxflags``, ``fflags``, ``cppflags``, ``ldflags``, and ``ldlibs``. +In the example above, every spec compiled with this compiler will pass the flags ``-g -O2`` to ``/usr/bin/gfortran-10`` and will pass the flag ``-O3`` to ``/usr/bin/gcc-10``. + +The ``environment`` attribute specifies user environment modifications to apply before every time the compiler is invoked. +The available operations are ``set``, ``unset``, ``prepend_path``, ``append_path``, and ``remove_path``. +In the example above, Spack will set ``GCC_ROOT=/usr`` and set ``PATH=/usr/unusual_path_for_ld/bin:$PATH`` before handing control to the build system that will use this compiler. -* Packages are not discoverable by default: For a package to be - discoverable with ``spack external find``, it needs to add special - logic. See :ref:`here ` for more details. -* The logic does not search through module files, it can only detect - packages with executables defined in ``PATH``; you can help Spack locate - externals which use module files by loading any associated modules for - packages that you want Spack to know about before running - ``spack external find``. -* Spack does not overwrite existing entries in the package configuration: - If there is an external defined for a spec at any configuration scope, - then Spack will not add a new external entry (``spack config blame packages`` - can help locate all external entries). +The ``extra_rpaths`` and ``implicit_rpaths`` fields specify additional paths to pass as rpaths to the linker when using this compiler. +The ``implicit_rpaths`` field is filled in automatically by Spack when detecting compilers, and the ``extra_rpaths`` field is available for users to configure necessary rpaths that have not been detected by Spack. +In addition, paths from ``extra_rpaths`` are added as library search paths for the linker. +In the example above, both ``/usr/lib/gcc`` and ``/usr/lib/unusual_gcc_path`` would be added as rpaths to the linker, and ``-L/usr/lib/unusual_gcc_path`` would be added as well. .. _package-requirements: --------------------- Package Requirements -------------------- -Spack can be configured to always use certain compilers, package -versions, and variants during concretization through package -requirements. +Spack can be configured to always use certain compilers, package versions, and variants during concretization through package requirements. -Package requirements are useful when you find yourself repeatedly -specifying the same constraints on the command line, and wish that -Spack respects these constraints whether you mention them explicitly -or not. Another use case is specifying constraints that should apply -to all root specs in an environment, without having to repeat the -constraint everywhere. +Package requirements are useful when you find yourself repeatedly specifying the same constraints on the command line, and wish that Spack respects these constraints whether you mention them explicitly or not. +Another use case is specifying constraints that should apply to all root specs in an environment, without having to repeat the constraint everywhere. -Apart from that, requirements config is more flexible than constraints -on the command line, because it can specify constraints on packages -*when they occur* as a dependency. In contrast, on the command line it -is not possible to specify constraints on dependencies while also keeping -those dependencies optional. +Apart from that, requirements config is more flexible than constraints on the command line, because it can specify constraints on packages *when they occur* as a dependency. +In contrast, on the command line it is not possible to specify constraints on dependencies while also keeping those dependencies optional. .. seealso:: FAQ: :ref:`Why does Spack pick particular versions and variants? ` -^^^^^^^^^^^^^^^^^^^ Requirements syntax ^^^^^^^^^^^^^^^^^^^ -The package requirements configuration is specified in ``packages.yaml``, -keyed by package name and expressed using the Spec syntax. In the simplest -case you can specify attributes that you always want the package to have -by providing a single spec string to ``require``: +The package requirements configuration is specified in ``packages.yaml``, keyed by package name and expressed using the Spec syntax. +In the simplest case you can specify attributes that you always want the package to have by providing a single spec string to ``require``: .. code-block:: yaml @@ -381,9 +426,8 @@ by providing a single spec string to ``require``: libfabric: require: "@1.13.2" -In the above example, ``libfabric`` will always build with version 1.13.2. If you -need to compose multiple configuration scopes ``require`` accepts a list of -strings: +In the above example, ``libfabric`` will always build with version 1.13.2. +If you need to compose multiple configuration scopes ``require`` accepts a list of strings: .. code-block:: yaml @@ -393,12 +437,10 @@ strings: - "@1.13.2" - "%gcc" -In this case ``libfabric`` will always build with version 1.13.2 **and** using GCC -as a compiler. +In this case ``libfabric`` will always build with version 1.13.2 **and** using GCC as a compiler. -For more complex use cases, require accepts also a list of objects. These objects -must have either a ``any_of`` or a ``one_of`` field, containing a list of spec strings, -and they can optionally have a ``when`` and a ``message`` attribute: +For more complex use cases, require accepts also a list of objects. +These objects must have either a ``any_of`` or a ``one_of`` field, containing a list of spec strings, and they can optionally have a ``when`` and a ``message`` attribute: .. code-block:: yaml @@ -408,16 +450,13 @@ and they can optionally have a ``when`` and a ``message`` attribute: - any_of: ["@4.1.5", "%c,cxx,fortran=gcc"] message: "in this example only 4.1.5 can build with other compilers" -``any_of`` is a list of specs. One of those specs must be satisfied -and it is also allowed for the concretized spec to match more than one. -In the above example, that means you could build ``openmpi@4.1.5%gcc``, -``openmpi@4.1.5%clang`` or ``openmpi@3.9%gcc``, but -not ``openmpi@3.9%clang``. +``any_of`` is a list of specs. +One of those specs must be satisfied and it is also allowed for the concretized spec to match more than one. +In the above example, that means you could build ``openmpi@4.1.5%gcc``, ``openmpi@4.1.5%clang`` or ``openmpi@3.9%gcc``, but not ``openmpi@3.9%clang``. -If a custom message is provided, and the requirement is not satisfiable, -Spack will print the custom error message: +If a custom message is provided, and the requirement is not satisfiable, Spack will print the custom error message: -.. code-block:: console +.. code-block:: spec $ spack spec openmpi@3.9%clang ==> Error: in this example only 4.1.5 can build with other compilers @@ -434,8 +473,7 @@ We could express a similar requirement using the ``when`` attribute: message: "in this example only 4.1.5 can build with other compilers" In the example above, if the version turns out to be 4.1.4 or less, we require the compiler to be GCC. -For readability, Spack also allows a ``spec`` key accepting a string when there is only a single -constraint: +For readability, Spack also allows a ``spec`` key accepting a string when there is only a single constraint: .. code-block:: yaml @@ -448,8 +486,8 @@ constraint: This code snippet and the one before it are semantically equivalent. -Finally, instead of ``any_of`` you can use ``one_of`` which also takes a list of specs. The final -concretized spec must match one and only one of them: +Finally, instead of ``any_of`` you can use ``one_of`` which also takes a list of specs. +The final concretized spec must match one and only one of them: .. code-block:: yaml @@ -462,44 +500,33 @@ In the example above, that means you could build ``mpich+cuda`` or ``mpich+rocm` .. note:: - For ``any_of`` and ``one_of``, the order of specs indicates a - preference: items that appear earlier in the list are preferred - (note that these preferences can be ignored in favor of others). + For ``any_of`` and ``one_of``, the order of specs indicates a preference: items that appear earlier in the list are preferred (note that these preferences can be ignored in favor of others). .. note:: - When using a conditional requirement, Spack is allowed to actively avoid the triggering - condition (the ``when=...`` spec) if that leads to a concrete spec with better scores in - the optimization criteria. To check the current optimization criteria and their - priorities you can run ``spack solve zlib``. + When using a conditional requirement, Spack is allowed to actively avoid the triggering condition (the ``when=...`` spec) if that leads to a concrete spec with better scores in the optimization criteria. + To check the current optimization criteria and their priorities you can run ``spack solve zlib``. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Setting default requirements ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -You can also set default requirements for all packages under ``all`` -like this: +You can also set default requirements for all packages under ``all`` like this: .. code-block:: yaml packages: all: - require: '%[when=%c]c=clang %[when=%cxx]cxx=clang' + require: "%[when=%c]c=clang %[when=%cxx]cxx=clang" which means every spec will be required to use ``clang`` as the compiler for C and C++ code. .. warning:: - The simpler config ``require: %clang`` will fail to build any - package that does not include compiled code, because those packages - cannot depend on ``clang`` (alias for ``llvm+clang``). In most - contexts, default requirements must use either conditional - dependencies or a :ref:`toolchain ` that combines conditional - dependencies. + The simpler config ``require: %clang`` will fail to build any package that does not include compiled code, because those packages cannot depend on ``clang`` (alias for ``llvm+clang``). + In most contexts, default requirements must use either conditional dependencies or a :ref:`toolchain ` that combines conditional dependencies. -Requirements on variants for all packages are possible too, but note that they -are only enforced for those packages that define these variants, otherwise they -are disregarded. For example: +Requirements on variants for all packages are possible too, but note that they are only enforced for those packages that define these variants, otherwise they are disregarded. +For example: .. code-block:: yaml @@ -509,8 +536,7 @@ are disregarded. For example: - "+shared" - "+cuda" -will just enforce ``+shared`` on ``zlib``, which has a boolean ``shared`` variant but -no ``cuda`` variant. +will just enforce ``+shared`` on ``zlib``, which has a boolean ``shared`` variant but no ``cuda`` variant. Constraints in a single spec literal are always considered as a whole, so in a case like: @@ -520,31 +546,28 @@ Constraints in a single spec literal are always considered as a whole, so in a c all: require: "+shared +cuda" -the default requirement will be enforced only if a package has both a ``cuda`` and -a ``shared`` variant, and will never be partially enforced. +the default requirement will be enforced only if a package has both a ``cuda`` and a ``shared`` variant, and will never be partially enforced. -Finally, ``all`` represents a *default set of requirements* - -if there are specific package requirements, then the default requirements -under ``all`` are disregarded. For example, with a configuration like this: +Finally, ``all`` represents a *default set of requirements* - if there are specific package requirements, then the default requirements under ``all`` are disregarded. +For example, with a configuration like this: .. code-block:: yaml packages: all: require: - - 'build_type=Debug' - - '%[when=%c]c=clang %[when=%cxx]cxx=clang' + - "build_type=Debug" + - "%[when=%c]c=clang %[when=%cxx]cxx=clang" cmake: require: - - 'build_type=Debug' - - '%c,cxx=gcc' + - "build_type=Debug" + - "%c,cxx=gcc" -Spack requires ``cmake`` to use ``gcc`` and all other nodes (including ``cmake`` -dependencies) to use ``clang``. If enforcing ``build_type=Debug`` is needed also -on ``cmake``, it must be repeated in the specific ``cmake`` requirements. +Spack requires ``cmake`` to use ``gcc`` and all other nodes (including ``cmake`` dependencies) to use ``clang``. +If enforcing ``build_type=Debug`` is needed also on ``cmake``, it must be repeated in the specific ``cmake`` requirements. +.. _setting-requirements-on-virtual-specs: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Setting requirements on virtual specs ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -555,95 +578,84 @@ This can be useful for fixing which virtual provider you want to use: packages: mpi: - require: 'mvapich2 %c,cxx,fortran=gcc' + require: "mvapich2 %c,cxx,fortran=gcc" -With the configuration above the only allowed ``mpi`` provider is -``mvapich2`` built with ``gcc``/``g++``/``gfortran``. +With the configuration above the only allowed ``mpi`` provider is ``mvapich2`` built with ``gcc``/``g++``/``gfortran``. -Requirements on the virtual spec and on the specific provider are both applied, if -present. For instance with a configuration like: +Requirements on the virtual spec and on the specific provider are both applied, if present. +For instance with a configuration like: .. code-block:: yaml packages: mpi: - require: 'mvapich2 %c,cxx,fortran=gcc' + require: "mvapich2 %c,cxx,fortran=gcc" mvapich2: - require: '~cuda' + require: "~cuda" you will use ``mvapich2~cuda %c,cxx,fortran=gcc`` as an ``mpi`` provider. .. _package-strong-preferences: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Conflicts and strong preferences ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If the semantic of requirements is too strong, you can also express "strong preferences" and "conflicts" -from configuration files: +If the semantic of requirements is too strong, you can also express "strong preferences" and "conflicts" from configuration files: .. code-block:: yaml packages: all: prefer: - - '%c,cxx=clang' + - "%c,cxx=clang" conflict: - - '+shared' + - "+shared" The ``prefer`` and ``conflict`` sections can be used whenever a ``require`` section is allowed. -The argument is always a list of constraints, and each constraint can be either a simple string, -or a more complex object: +The argument is always a list of constraints, and each constraint can be either a simple string, or a more complex object: .. code-block:: yaml packages: all: conflict: - - spec: '%c,cxx=clang' - when: 'target=x86_64_v3' - message: 'reason why clang cannot be used' + - spec: "%c,cxx=clang" + when: "target=x86_64_v3" + message: "reason why clang cannot be used" The ``spec`` attribute is mandatory, while both ``when`` and ``message`` are optional. .. note:: Requirements allow for expressing both "strong preferences" and "conflicts". - The syntax for doing so, though, may not be immediately clear. For - instance, if we want to prevent any package from using ``%clang``, we can set: + The syntax for doing so, though, may not be immediately clear. + For instance, if we want to prevent any package from using ``%clang``, we can set: .. code-block:: yaml packages: all: require: - - one_of: ['%clang', '@:'] + - one_of: ["%clang", "@:"] - Since only one of the requirements must hold, and ``@:`` is always true, the rule above is - equivalent to a conflict. For "strong preferences" the same construction works, with the ``any_of`` - policy instead of the ``one_of`` policy. + Since only one of the requirements must hold, and ``@:`` is always true, the rule above is equivalent to a conflict. + For "strong preferences" the same construction works, with the ``any_of`` policy instead of the ``one_of`` policy. .. _package-preferences: -------------------- Package Preferences ------------------- -In some cases package requirements can be too strong, and package -preferences are the better option. Package preferences do not impose -constraints on packages for particular versions or variants values, -they rather only set defaults. The concretizer is free to change -them if it must, due to other constraints, and also prefers reusing -installed packages over building new ones that are a better match for -preferences. +In some cases package requirements can be too strong, and package preferences are the better option. +Package preferences do not impose constraints on packages for particular versions or variants values, they rather only set defaults. +The concretizer is free to change them if it must, due to other constraints, and also prefers reusing installed packages over building new ones that are a better match for preferences. .. seealso:: FAQ: :ref:`Why does Spack pick particular versions and variants? ` -The ``target`` and ``providers`` preferences -can only be set globally under the ``all`` section of ``packages.yaml``: +The ``target`` and ``providers`` preferences can only be set globally under the ``all`` section of ``packages.yaml``: .. code-block:: yaml @@ -653,16 +665,12 @@ can only be set globally under the ``all`` section of ``packages.yaml``: providers: mpi: [mvapich2, mpich, openmpi] -These preferences override Spack's default and effectively reorder priorities -when looking for the best compiler, target or virtual package provider. Each -preference takes an ordered list of spec constraints, with earlier entries in -the list being preferred over later entries. +These preferences override Spack's default and effectively reorder priorities when looking for the best compiler, target or virtual package provider. +Each preference takes an ordered list of spec constraints, with earlier entries in the list being preferred over later entries. -In the example above all packages prefer to target the ``x86_64_v3`` -microarchitecture and to use ``mvapich2`` if they depend on ``mpi``. +In the example above all packages prefer to target the ``x86_64_v3`` microarchitecture and to use ``mvapich2`` if they depend on ``mpi``. -The ``variants`` and ``version`` preferences can be set under -package specific sections of the ``packages.yaml`` file: +The ``variants`` and ``version`` preferences can be set under package specific sections of the ``packages.yaml`` file: .. code-block:: yaml @@ -672,68 +680,52 @@ package specific sections of the ``packages.yaml`` file: gperftools: version: [2.2, 2.4, 2.3] -In this case, the preference for ``opencv`` is to build with debug options, while -``gperftools`` prefers version 2.2 over 2.4. +In this case, the preference for ``opencv`` is to build with debug options, while ``gperftools`` prefers version 2.2 over 2.4. Any preference can be overwritten on the command line if explicitly requested. -Preferences cannot overcome explicit constraints, as they only set a preferred -ordering among homogeneous attribute values. Going back to the example, if -``gperftools@2.3:`` was requested, then Spack will install version 2.4 -since the most preferred version 2.2 is prohibited by the version constraint. +Preferences cannot overcome explicit constraints, as they only set a preferred ordering among homogeneous attribute values. +Going back to the example, if ``gperftools@2.3:`` was requested, then Spack will install version 2.4 since the most preferred version 2.2 is prohibited by the version constraint. .. _package_permissions: -------------------- Package Permissions ------------------- -Spack can be configured to assign permissions to the files installed -by a package. +Spack can be configured to assign permissions to the files installed by a package. -In the ``packages.yaml`` file under ``permissions``, the attributes -``read``, ``write``, and ``group`` control the package -permissions. These attributes can be set per-package, or for all -packages under ``all``. If permissions are set under ``all`` and for a -specific package, the package-specific settings take precedence. +In the ``packages.yaml`` file under ``permissions``, the attributes ``read``, ``write``, and ``group`` control the package permissions. +These attributes can be set per-package, or for all packages under ``all``. +If permissions are set under ``all`` and for a specific package, the package-specific settings take precedence. -The ``read`` and ``write`` attributes take one of ``user``, ``group``, -and ``world``. +The ``read`` and ``write`` attributes take one of ``user``, ``group``, and ``world``. .. code-block:: yaml - packages: - all: - permissions: - write: group - group: spack - my_app: - permissions: - read: group - group: my_team - -The permissions settings describe the broadest level of access to -installations of the specified packages. The execute permissions of -the file are set to the same level as read permissions for those files -that are executable. The default setting for ``read`` is ``world``, -and for ``write`` is ``user``. In the example above, installations of -``my_app`` will be installed with user and group permissions but no -world permissions, and owned by the group ``my_team``. All other -packages will be installed with user and group write privileges, and -world read privileges. Those packages will be owned by the group -``spack``. - -The ``group`` attribute assigns a Unix-style group to a package. All -files installed by the package will be owned by the assigned group, -and the sticky group bit will be set on the install prefix and all -directories inside the install prefix. This will ensure that even -manually placed files within the install prefix are owned by the -assigned group. If no group is assigned, Spack will allow the OS -default behavior to go as expected. + packages: + all: + permissions: + write: group + group: spack + my_app: + permissions: + read: group + group: my_team + +The permissions settings describe the broadest level of access to installations of the specified packages. +The execute permissions of the file are set to the same level as read permissions for those files that are executable. +The default setting for ``read`` is ``world``, and for ``write`` is ``user``. +In the example above, installations of ``my_app`` will be installed with user and group permissions but no world permissions, and owned by the group ``my_team``. +All other packages will be installed with user and group write privileges, and world read privileges. +Those packages will be owned by the group ``spack``. + +The ``group`` attribute assigns a Unix-style group to a package. +All files installed by the package will be owned by the assigned group, and the sticky group bit will be set on the install prefix and all directories inside the install prefix. +This will ensure that even manually placed files within the install prefix are owned by the assigned group. +If no group is assigned, Spack will allow the OS default behavior to go as expected. .. _assigning-package-attributes: ----------------------------- Assigning Package Attributes ---------------------------- @@ -741,17 +733,15 @@ You can assign class-level attributes in the configuration: .. code-block:: yaml - packages: - mpileaks: - package_attributes: - # Override existing attributes - url: http://www.somewhereelse.com/mpileaks-1.0.tar.gz - # ... or add new ones - x: 1 + packages: + mpileaks: + package_attributes: + # Override existing attributes + url: http://www.somewhereelse.com/mpileaks-1.0.tar.gz + # ... or add new ones + x: 1 -Attributes set this way will be accessible to any method executed -in the package.py file (e.g. the ``install()`` method). Values for these -attributes may be any value parseable by yaml. +Attributes set this way will be accessible to any method executed in the package.py file (e.g. the ``install()`` method). +Values for these attributes may be any value parseable by yaml. -These can only be applied to specific packages, not "all" or -virtual packages. +These can only be applied to specific packages, not "all" or virtual packages. diff --git a/lib/spack/docs/packaging_guide_advanced.rst b/lib/spack/docs/packaging_guide_advanced.rst index bac1717003c9f4..26a2bc322c6222 100644 --- a/lib/spack/docs/packaging_guide_advanced.rst +++ b/lib/spack/docs/packaging_guide_advanced.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -16,7 +17,6 @@ - :doc:`3. Testing ` - **4. Advanced** -================================ Packaging Guide: advanced topics ================================ @@ -24,7 +24,6 @@ This section of the packaging guide covers a few advanced topics. .. _multiple_build_systems: ----------------------- Multiple build systems ---------------------- @@ -47,15 +46,16 @@ Here is a simple example of a package that supports both CMake and Autotools: from spack.package import * from spack_repo.builtin.build_systems import cmake, autotools + class Example(cmake.CMakePackage, autotools.AutotoolsPackage): variant("my_feature", default=True) build_system("cmake", "autotools", default="cmake") + class CMakeBuilder(cmake.CMakeBuilder): def cmake_args(self): - return [ - self.define_from_variant("MY_FEATURE", "my_feature") - ] + return [self.define_from_variant("MY_FEATURE", "my_feature")] + class AutotoolsBuilder(autotools.AutotoolsBuilder): def configure_args(self): @@ -64,7 +64,7 @@ Here is a simple example of a package that supports both CMake and Autotools: When defining a package like this, Spack automatically makes the ``build_system`` **variant** available, which can be used to pick the desired build system at install time. For example -.. code-block:: console +.. code-block:: spec $ spack install example +feature build_system=cmake @@ -72,7 +72,7 @@ makes Spack pick the ``CMakeBuilder`` class and runs ``cmake -DMY_FEATURE:BOOL=O Similarly -.. code-block:: console +.. code-block:: spec $ spack install example +feature build_system=autotools @@ -92,6 +92,7 @@ The directives such as ``depends_on``, ``variant``, ``patch`` go into the packag from spack.package import * from spack_repo.builtin.build_systems import autotools + class Example(autotools.AutotoolsPackage): def install(self, spec: Spec, prefix: str) -> None: # ...existing code... @@ -104,9 +105,11 @@ The directives such as ``depends_on``, ``variant``, ``patch`` go into the packag from spack.package import * from spack_repo.builtin.build_systems import autotools, cmake + class Example(autotools.AutotoolsPackage, cmake.CMakePackage): build_system("autotools", "cmake", default="cmake") + class AutotoolsBuilder(autotools.AutotoolsBuilder): def install(self, pkg: Example, spec: Spec, prefix: str) -> None: # ...existing code... @@ -123,6 +126,7 @@ An effective way to handle this is to use a ``with when("build_system=...")`` bl from spack.package import * from spack_repo.builtin.build_systems import cmake, autotools + class Example(cmake.CMakePackage, autotools.AutotoolsPackage): build_system("cmake", "autotools", default="cmake") @@ -151,6 +155,7 @@ In such cases we have to use the ``build_system`` directive to indicate when whi from spack.package import * from spack_repo.builtin.build_systems import cmake, autotools + class Example(cmake.CMakePackage, autotools.AutotoolsPackage): build_system( @@ -159,7 +164,7 @@ In such cases we have to use the ``build_system`` directive to indicate when whi default="cmake", ) -In the example the directive imposes a change from ``Autotools`` to ``CMake`` going from ``v0.63`` to ``v0.64``. +In the example, the directive imposes a change from ``Autotools`` to ``CMake`` going from ``v0.63`` to ``v0.64``. We have seen how users can run ``spack install example build_system=cmake`` to pick the desired build system. The same can be done in ``depends_on`` statements, which has certain use cases. @@ -174,22 +179,18 @@ In that case, you can *force* the choice of the build system of the dependency: .. _make-package-findable: ----------------------------------------------------------- Making a package discoverable with ``spack external find`` ---------------------------------------------------------- -The simplest way to make a package discoverable with -:ref:`spack external find ` is to: +The simplest way to make a package discoverable with :ref:`spack external find ` is to: 1. Define the executables associated with the package. 2. Implement a method to determine the versions of these executables. -^^^^^^^^^^^^^^^^^ Minimal detection ^^^^^^^^^^^^^^^^^ -The first step is fairly simple, as it requires only to -specify a package-level ``executables`` attribute: +The first step is fairly simple, as it requires only to specify a package-level ``executables`` attribute: .. code-block:: python @@ -198,14 +199,11 @@ specify a package-level ``executables`` attribute: # would match for example "foo", "foobar", and "bazfoo". executables = ["foo"] -This attribute must be a list of strings. Each string is a regular -expression (e.g. "gcc" would match "gcc", "gcc-8.3", "my-weird-gcc", etc.) to -determine a set of system executables that might be part of this package. Note -that to match only executables named "gcc" the regular expression ``"^gcc$"`` -must be used. +This attribute must be a list of strings. +Each string is a regular expression (e.g. "gcc" would match "gcc", "gcc-8.3", "my-weird-gcc", etc.) to determine a set of system executables that might be part of this package. +Note that to match only executables named "gcc" the regular expression ``"^gcc$"`` must be used. -Finally, to determine the version of each executable the ``determine_version`` -method must be implemented: +Finally, to determine the version of each executable the ``determine_version`` method must be implemented: .. code-block:: python @@ -218,31 +216,22 @@ method must be implemented: exe (str): absolute path to the executable being examined """ -This method receives as input the path to a single executable and must return -as output its version as a string; if the user cannot determine the version -or determines that the executable is not an instance of the package, they can -return None and the executable will be discarded as a candidate. -Implementing the two steps above is mandatory, and gives the package the -basic ability to detect if a spec is present on the system at a given version. +This method receives as input the path to a single executable and must return as output its version as a string. +If the version cannot be determined, or if the executable turns out to be a false positive, the value ``None`` must be returned, which ensures that the executable is discarded as a candidate. +Implementing the two steps above is mandatory, and gives the package the basic ability to detect if a spec is present on the system at a given version. .. note:: - Any executable for which the ``determine_version`` method returns ``None`` - will be discarded and won't appear in later stages of the workflow described below. + Any executable for which the ``determine_version`` method returns ``None`` will be discarded and won't appear in later stages of the workflow described below. -^^^^^^^^^^^^^^^^^^^^^^^^ Additional functionality ^^^^^^^^^^^^^^^^^^^^^^^^ -Besides the two mandatory steps described above, there are also optional -methods that can be implemented to either increase the amount of details -being detected or improve the robustness of the detection logic in a package. +Besides the two mandatory steps described above, there are also optional methods that can be implemented to either increase the amount of details being detected or improve the robustness of the detection logic in a package. -"""""""""""""""""""""""""""""" Variants and custom attributes """""""""""""""""""""""""""""" -The ``determine_variants`` method can be optionally implemented in a package -to detect additional details of the spec: +The ``determine_variants`` method can be optionally implemented in a package to detect additional details of the spec: .. code-block:: python @@ -259,18 +248,14 @@ to detect additional details of the spec: executables, as detected by ``determine_version`` """ -This method takes as input a list of executables that live in the same prefix and -share the same version string, and returns either: +This method takes as input a list of executables that live in the same prefix and share the same version string, and returns either: 1. A variant string 2. A tuple of a variant string and a dictionary of extra attributes -3. A list of items matching either 1 or 2 (if multiple specs are detected - from the set of executables) +3. A list of items matching either 1 or 2 (if multiple specs are detected from the set of executables) -If extra attributes are returned, they will be recorded in ``packages.yaml`` -and be available for later reuse. As an example, the ``gcc`` package will record -by default the different compilers found and an entry in ``packages.yaml`` -would look like: +If extra attributes are returned, they will be recorded in ``packages.yaml`` and be available for later reuse. +As an example, the ``gcc`` package will record by default the different compilers found and an entry in ``packages.yaml`` would look like: .. code-block:: yaml @@ -285,24 +270,18 @@ would look like: c++: /usr/bin/x86_64-linux-gnu-g++-9 fortran: /usr/bin/x86_64-linux-gnu-gfortran-9 -This allows us, for instance, to keep track of executables that would be named -differently if built by Spack (e.g. ``x86_64-linux-gnu-gcc-9`` -instead of just ``gcc``). +This allows us, for instance, to keep track of executables that would be named differently if built by Spack (e.g. ``x86_64-linux-gnu-gcc-9`` instead of just ``gcc``). .. TODO: we need to gather some more experience on overriding "prefix" and other special keywords in extra attributes, but as soon as we are confident that this is the way to go we should document the process. See https://github.com/spack/spack/pull/16526#issuecomment-653783204 -""""""""""""""""""""""""""" Filter matching executables """"""""""""""""""""""""""" -Sometimes defining the appropriate regex for the ``executables`` -attribute might prove to be difficult, especially if one has to -deal with corner cases or exclude "red herrings". To help keep -the regular expressions as simple as possible, each package can -optionally implement a ``filter_detected_exes`` method: +Sometimes defining the appropriate regex for the ``executables`` attribute might prove to be difficult, especially if one has to deal with corner cases or exclude "red herrings". +To help keep the regular expressions as simple as possible, each package can optionally implement a ``filter_detected_exes`` method: .. code-block:: python @@ -310,38 +289,29 @@ optionally implement a ``filter_detected_exes`` method: def filter_detected_exes(cls, prefix, exes_in_prefix): """Return a filtered list of the executables in prefix""" -which takes as input a prefix and a list of matching executables and -returns a filtered list of said executables. +which takes as input a prefix and a list of matching executables and returns a filtered list of said executables. -Using this method has the advantage of allowing custom logic for -filtering, and does not restrict the user to regular expressions -only. Consider the case of detecting the GNU C++ compiler. If we -try to search for executables that match ``g++``, that would have -the unwanted side effect of selecting also ``clang++`` - which is -a C++ compiler provided by another package - if present on the system. -Trying to select executables that contain ``g++`` but not ``clang`` -would be quite complicated to do using regex only. Employing the -``filter_detected_exes`` method it becomes: +Using this method has the advantage of allowing custom logic for filtering, and does not restrict the user to regular expressions only. +Consider the case of detecting the GNU C++ compiler. +If we try to search for executables that match ``g++``, that would have the unwanted side effect of selecting also ``clang++`` - which is a C++ compiler provided by another package - if present on the system. +Trying to select executables that contain ``g++`` but not ``clang`` would be quite complicated to do using only regular expressions. +Employing the ``filter_detected_exes`` method it becomes: .. code-block:: python class Gcc(Package): - executables = ["g++"] + executables = ["g++"] - @classmethod - def filter_detected_exes(cls, prefix, exes_in_prefix): - return [x for x in exes_in_prefix if "clang" not in x] + @classmethod + def filter_detected_exes(cls, prefix, exes_in_prefix): + return [x for x in exes_in_prefix if "clang" not in x] -Another possibility that this method opens is to apply certain -filtering logic when specific conditions are met (e.g. take some -decisions on an OS and not on another). +Another possibility that this method opens is to apply certain filtering logic when specific conditions are met (e.g. take some decisions on an OS and not on another). -^^^^^^^^^^^^^^^^^^ Validate detection ^^^^^^^^^^^^^^^^^^ -To increase detection robustness, packagers may also implement a method -to validate the detected Spec objects: +To increase detection robustness, packagers may also implement a method to validate the detected Spec objects: .. code-block:: python @@ -349,23 +319,18 @@ to validate the detected Spec objects: def validate_detected_spec(cls, spec, extra_attributes): """Validate a detected spec. Raise an exception if validation fails.""" -This method receives a detected spec along with its extra attributes and can be -used to check that certain conditions are met by the spec. Packagers can either -use assertions or raise an ``InvalidSpecDetected`` exception when the check fails. -If the conditions are not honored the spec will be discarded and any message -associated with the assertion or the exception will be logged as the reason for -discarding it. +This method receives a detected spec along with its extra attributes and can be used to check that certain conditions are met by the spec. +Packagers can either use assertions or raise an ``InvalidSpecDetected`` exception when the check fails. +If the conditions are not honored the spec will be discarded and any message associated with the assertion or the exception will be logged as the reason for discarding it. -As an example, a package that wants to check that the ``compilers`` attribute is -in the extra attributes can implement this method like this: +As an example, a package that wants to check that the ``compilers`` attribute is in the extra attributes can implement this method like this: .. code-block:: python @classmethod def validate_detected_spec(cls, spec, extra_attributes): """Check that "compilers" is in the extra attributes.""" - msg = ("the extra attribute 'compilers' must be set for " - "the detected spec '{0}'".format(spec)) + msg = "the extra attribute 'compilers' must be set for the detected spec '{0}'".format(spec) assert "compilers" in extra_attributes, msg or like this: @@ -376,21 +341,17 @@ or like this: def validate_detected_spec(cls, spec, extra_attributes): """Check that "compilers" is in the extra attributes.""" if "compilers" not in extra_attributes: - msg = ("the extra attribute 'compilers' must be set for " - "the detected spec '{0}'".format(spec)) + msg = "the extra attribute 'compilers' must be set for the detected spec '{0}'".format( + spec + ) raise InvalidSpecDetected(msg) .. _determine_spec_details: -^^^^^^^^^^^^^^^^^^^^^^^^^ Custom detection workflow ^^^^^^^^^^^^^^^^^^^^^^^^^ -In the rare case when the mechanisms described so far don't fit the -detection of a package, the implementation of all the methods above -can be disregarded and instead a custom ``determine_spec_details`` -method can be implemented directly in the package class (note that -the definition of the ``executables`` attribute is still required): +In the rare case when the mechanisms described so far don't fit the detection of a package, the implementation of all the methods above can be disregarded and instead a custom ``determine_spec_details`` method can be implemented directly in the package class (note that the definition of the ``executables`` attribute is still required): .. code-block:: python @@ -402,16 +363,13 @@ the definition of the ``executables`` attribute is still required): # return None or [] if none of the exes represent an instance of # the package. Return one or more Specs for each instance of the # package which is thought to be installed in the provided prefix + ... -This method takes as input a set of discovered executables (which match -those specified by the user) as well as a common prefix shared by all -of those executables. The function must return one or more :py:class:`spack.package.Spec` associated -with the executables (it can also return ``None`` to indicate that no -provided executables are associated with the package). +This method takes as input a set of discovered executables (which match those specified by the user) as well as a common prefix shared by all of those executables. +The function must return one or more :py:class:`spack.package.Spec` associated with the executables (it can also return ``None`` to indicate that no provided executables are associated with the package). -As an example, consider a made-up package called ``foo-package`` which -builds an executable called ``foo``. ``FooPackage`` would appear as -follows: +As an example, consider a made-up package called ``foo-package`` which builds an executable called ``foo``. +``FooPackage`` would appear as follows: .. code-block:: python @@ -427,8 +385,7 @@ follows: @classmethod def determine_spec_details(cls, prefix, exes_in_prefix): - candidates = list(x for x in exes_in_prefix - if os.path.basename(x) == "foo") + candidates = [x for x in exes_in_prefix if os.path.basename(x) == "foo"] if not candidates: return # This implementation is lazy and only checks the first candidate @@ -436,23 +393,15 @@ follows: exe = Executable(exe_path) output = exe("--version", output=str, error=str) version_str = ... # parse output for version string - return Spec.from_detection( - "foo-package@{0}".format(version_str) - ) + return Spec.from_detection("foo-package@{0}".format(version_str)) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Add detection tests to packages ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To ensure that software is detected correctly for multiple configurations -and on different systems users can write a ``detection_test.yaml`` file and -put it in the package directory alongside the ``package.py`` file. -This YAML file contains enough information for Spack to mock an environment -and try to check if the detection logic yields the results that are expected. +To ensure that software is detected correctly for multiple configurations and on different systems users can write a ``detection_test.yaml`` file and put it in the package directory alongside the ``package.py`` file. +This YAML file contains enough information for Spack to mock an environment and try to check if the detection logic yields the results that are expected. -As a general rule, attributes at the top-level of ``detection_test.yaml`` -represent search mechanisms and they each map to a list of tests that should confirm -the validity of the package's detection logic. +As a general rule, attributes at the top-level of ``detection_test.yaml`` represent search mechanisms and they each map to a list of tests that should confirm the validity of the package's detection logic. The detection tests can be run with the following command: @@ -462,12 +411,10 @@ The detection tests can be run with the following command: Errors that have been detected are reported to screen. -"""""""""""""""""""""""""" Tests for PATH inspections """""""""""""""""""""""""" -Detection tests insisting on ``PATH`` inspections are listed under -the ``paths`` attribute: +Detection tests insisting on ``PATH`` inspections are listed under the ``paths`` attribute: .. code-block:: yaml @@ -483,15 +430,11 @@ the ``paths`` attribute: echo "InstalledDir: /usr/bin" platforms: ["linux", "darwin"] results: - - spec: 'llvm@3.9.1 +clang~lld~lldb' + - spec: "llvm@3.9.1 +clang~lld~lldb" -If the ``platforms`` attribute is present, tests are run only if the current host -matches one of the listed platforms. -Each test is performed by first creating a temporary directory structure as -specified in the corresponding ``layout`` and by then running -package detection and checking that the outcome matches the expected -``results``. The exact details on how to specify both the ``layout`` and the -``results`` are reported in the table below: +If the ``platforms`` attribute is present, tests are run only if the current host matches one of the listed platforms. +Each test is performed by first creating a temporary directory structure as specified in the corresponding ``layout`` and by then running package detection and checking that the outcome matches the expected ``results``. +The exact details on how to specify both the ``layout`` and the ``results`` are reported in the table below: .. list-table:: Test based on PATH inspections :header-rows: 1 @@ -525,26 +468,22 @@ package detection and checking that the outcome matches the expected - Nested dictionary with string as keys, and regular expressions as leaf values - No -""""""""""""""""""""""""""""""" Reuse tests from other packages """"""""""""""""""""""""""""""" -When using a custom repository, it is possible to customize a package that already exists in ``builtin`` -and reuse its external tests. To do so, just write a ``detection_test.yaml`` alongside the customized -``package.py`` with an ``includes`` attribute. For instance the ``detection_test.yaml`` for -``myrepo.llvm`` might look like: +When using a custom repository, it is possible to customize a package that already exists in ``builtin`` and reuse its external tests. +To do so, just write a ``detection_test.yaml`` alongside the customized ``package.py`` with an ``includes`` attribute. +For instance the ``detection_test.yaml`` for ``myrepo.llvm`` might look like: .. code-block:: yaml includes: - "builtin.llvm" -This YAML file instructs Spack to run the detection tests defined in ``builtin.llvm`` in addition to -those locally defined in the file. +This YAML file instructs Spack to run the detection tests defined in ``builtin.llvm`` in addition to those locally defined in the file. .. _abi_compatibility: ----------------------------- Specifying ABI Compatibility ---------------------------- @@ -572,12 +511,14 @@ The ``match_variants`` keyword can cover all single-value variants. .. code-block:: python - can_splice("foo@1.1", when="@1.2", match_variants=["bar"]) # any value for bar as long as they're the same - can_splice("foo@1.2", when="@1.3", match_variants="*") # any variant values if all single-value variants match + # any value for bar as long as they're the same + can_splice("foo@1.1", when="@1.2", match_variants=["bar"]) + + # any variant values if all single-value variants match + can_splice("foo@1.2", when="@1.3", match_variants="*") The concretizer will use ABI compatibility to determine automatic splices when :ref:`automatic splicing` is enabled. ------------------ Customizing Views ----------------- @@ -585,10 +526,9 @@ Customizing Views This is advanced functionality documented for completeness, and rarely needs customization. -Spack environments manage a view of their packages, which is a single directory -that merges all installed packages through symlinks, so users can easily access them. -The methods of ``PackageViewMixin`` can be overridden to customize how packages are added -to views. +Spack environments manage a view of their packages, which is a single directory that merges all installed packages through symlinks, so users can easily access them. +The methods of ``PackageViewMixin`` can be overridden to customize how packages are added to views. Sometimes it's impossible to get an application to work just through symlinking its executables, and patching is necessary. -For example, Python scripts in a ``bin`` directory may have a shebang that points to the Python interpreter in Python's install prefix, but it's more convenient to have the shebang point to the Python interpreter in the view, since that interpreter is aware of the Python packages in the view (the view is a virtual environment). -As a consequence, Python extension packages (those inheriting from ``PythonPackage``) override ``add_files_to_view`` in order to rewrite shebang lines. +For example, Python scripts in a ``bin`` directory may have a shebang that points to the Python interpreter in Python's install prefix and not to the Python interpreter in the view. +However, it's more convenient to have the shebang point to the Python interpreter in the view, since that interpreter can locate other Python packages in the view without ``PYTHONPATH`` being set. +Therefore, Python extension packages (those inheriting from ``PythonPackage``) override ``add_files_to_view`` in order to rewrite shebang lines. diff --git a/lib/spack/docs/packaging_guide_build.rst b/lib/spack/docs/packaging_guide_build.rst index 1883ca53970cda..b36a3322206c8b 100644 --- a/lib/spack/docs/packaging_guide_build.rst +++ b/lib/spack/docs/packaging_guide_build.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -16,7 +17,6 @@ - :doc:`3. Testing ` - :doc:`4. Advanced ` -====================================== Packaging Guide: customizing the build ====================================== @@ -25,7 +25,6 @@ In the second part, we will cover the installation procedure, build systems, and .. _installation_procedure: --------------------------------------- Overview of the installation procedure -------------------------------------- @@ -71,7 +70,6 @@ In general, the name and order in which the phases will be executed can be obtai An extensive list of available build systems and phases is provided in :ref:`installation_process`. ------------------------------ Controlling the build process ----------------------------- @@ -148,7 +146,7 @@ In any of the functions above, you can .. code-block:: python if self.spec.satisfies("+variant_name"): - ... + ... to check if a variant is enabled, or @@ -165,7 +163,6 @@ In any of the functions above, you can .. _installation_process: ------------------------ What are build systems? ----------------------- @@ -224,6 +221,7 @@ To use a particular build system, you need to import it in your ``package.py`` f from spack_repo.builtin.build_systems.cmake import CMakePackage + class MyPkg(CMakePackage): pass @@ -232,7 +230,6 @@ For a complete list of build systems and their specific helper functions and pro .. _spec-objects: ---------------------------------------- Configuring the build with spec objects --------------------------------------- @@ -246,7 +243,6 @@ Spack is unique in that it allows you to write a *single* ``package.py`` for all The central object in Spack that encodes the package's configuration is the **concrete spec**, which is available as ``self.spec`` in the package class. This is the object you need to query to make decisions about how to configure the build. -^^^^^^^^^^^^^^^^^^^^^^ Querying ``self.spec`` ^^^^^^^^^^^^^^^^^^^^^^ @@ -259,6 +255,7 @@ If you want to pass a flag to the configure script only if the package is built variant("foo", default=False, description="Enable foo feature") + def configure_args(self): args = [] if self.spec.satisfies("+foo"): @@ -273,6 +270,7 @@ For multi-valued variants, you can use the ``key=value`` syntax to test whether variant("threads", default="none", values=("pthreads", "openmp", "none"), multi=False, ...) + def configure_args(self): args = [] if self.spec.satisfies("threads=pthreads"): @@ -289,14 +287,15 @@ Even if *multiple* values are selected, you can still use ``key=value`` to test variant("languages", default="c,c++", values=("c", "c++", "fortran"), multi=True, ...) + def configure_args(self): args = [] if self.spec.satisfies("languages=c"): - args.append("--enable-c") + args.append("--enable-c") if self.spec.satisfies("languages=c++"): - args.append("--enable-c++") + args.append("--enable-c++") if self.spec.satisfies("languages=fortran"): - args.append("--enable-fortran") + args.append("--enable-fortran") return args Notice that many build systems provide helper functions to make the above code more concise. @@ -322,10 +321,9 @@ An example of using this is shown below: description="C++ standard", ) + def configure_args(self): - return [ - f"--with-cxxstd={self.spec.variants['cxxstd'].value}" - ] + return [f"--with-cxxstd={self.spec.variants['cxxstd'].value}"] **Versions**. Similarly, versions are often used to dynamically change the build configuration: @@ -400,7 +398,6 @@ To see what targets are available in your Spack installation, you can use the fo .. command-output:: spack arch --known-targets -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Referring to a dependency's prefix, libraries, and headers ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -474,10 +471,12 @@ In those cases, the build system could use some help, for which we give a few ex .. code-block:: python lapack_blas = spec["lapack"].libs + spec["blas"].libs - args.extend([ - f"-DMATH_LIBRARY_NAMES={';'.join(lapack_blas.names)}", - f"-DMATH_LIBRARY_DIRS={';'.join(lapack_blas.directories)}" - ]) + args.extend( + [ + f"-DMATH_LIBRARY_NAMES={';'.join(lapack_blas.names)}", + f"-DMATH_LIBRARY_DIRS={';'.join(lapack_blas.directories)}", + ] + ) 3. Search and link flags @@ -489,7 +488,6 @@ In those cases, the build system could use some help, for which we give a few ex .. _before_after_build_phases: ------------------------------ Before and after build phases ----------------------------- @@ -521,7 +519,6 @@ The function body should contain the actual instructions you want to run before .. _overriding-phases: ------------------------- Overriding a build phase ------------------------ @@ -536,6 +533,7 @@ In that case, you can use the generic ``Package`` class, which defines only a si from spack.package import * from spack_repo.builtin.build_systems.generic import Package + class MyPkg(Package): # Override the install phase @@ -547,8 +545,7 @@ The arguments are: ``self`` This is the package object, which extends ``CMakePackage``. - For API docs on Package objects, see - :py:class:`Package `. + For API docs on Package objects, see :py:class:`Package `. ``spec`` This is the concrete spec object created by Spack from an abstract spec supplied by the user. @@ -561,9 +558,11 @@ The arguments are: The function body should contain the actual build instructions, which typically involves: -1. Invoking the build system's commands such as ``make``, ``ninja``, ``python``, et cetera. See :ref:`running_build_executables` for how to do this. +1. Invoking the build system's commands such as ``make``, ``ninja``, ``python``, et cetera. + See :ref:`running_build_executables` for how to do this. 2. Copying files to the ``prefix`` directory, which is where Spack expects the package to be installed. - This can be done using Spack's built-in functions like ``install_tree()`` or ``install()``. See the :ref:`Spack's Python Package API ` for all convenience functions that can be used in the package class. + This can be done using Spack's built-in functions like ``install_tree()`` or ``install()``. + See the :ref:`Spack's Python Package API ` for all convenience functions that can be used in the package class. The arguments ``spec`` and ``prefix`` are passed only for convenience, as they always correspond to ``self.spec`` and ``self.spec.prefix`` respectively, as we have already seen in :ref:`the previous section `. @@ -573,7 +572,6 @@ The arguments ``spec`` and ``prefix`` are passed only for convenience, as they a .. _running_build_executables: -------------------------- Running build executables ------------------------- @@ -586,6 +584,7 @@ Spack makes some of these executables available as global functions, making it e from spack.package import * from spack_repo.builtin.build_systems.generic import Package + class MyPkg(Package): depends_on("make", type="build") @@ -632,7 +631,6 @@ All executables in Spack are instances of :class:`~spack.package.Executable`, se .. _attribute_parallel: -------------------------- Package-level parallelism ------------------------- @@ -648,10 +646,11 @@ If a package does not build properly in parallel, you can simply define ``parall For example: .. code-block:: python - :emphasize-lines: 3 + :emphasize-lines: 4 class ExamplePackage(MakefilePackage): """Example package that does not build in parallel.""" + parallel = False This ensures that any ``make`` or ``ninja`` invocation will *not* set the ``-j `` option, and the build will run sequentially. @@ -685,14 +684,13 @@ This global variable is an integer that specifies the number of jobs to run in p .. _python-package-api: --------------------------- Spack's Python Package API -------------------------- Whenever you implement :ref:`overriding phases ` or :ref:`before and after build phases `, you typically need to modify files, work with paths and run executables. Spack provides a number of convenience functions and classes of its own to make your life even easier, complementing the Python standard library. -All of the functionality in this section is made available by importing the ``spack.package`` module. +All of the functionality in this section is made available by importing the :mod:`spack.package` module. .. code-block:: python @@ -702,18 +700,15 @@ This is already part of the boilerplate for packages created with ``spack create .. _file-filtering: -^^^^^^^^^^^^^^^^^^^^^^^^ File filtering functions ^^^^^^^^^^^^^^^^^^^^^^^^ :py:func:`filter_file(regex, repl, *filenames, **kwargs) ` - Works like ``sed`` but with Python regular expression syntax. Takes - a regular expression, a replacement, and a set of files. ``repl`` - can be a raw string or a callable function. If it is a raw string, - it can contain ``\1``, ``\2``, etc. to refer to capture groups in - the regular expression. If it is a callable, it is passed the - Python ``MatchObject`` and should return a suitable replacement - string for the particular match. + Works like ``sed`` but with Python regular expression syntax. + Takes a regular expression, a replacement, and a set of files. + ``repl`` can be a raw string or a callable function. + If it is a raw string, it can contain ``\1``, ``\2``, etc. to refer to capture groups in the regular expression. + If it is a callable, it is passed the Python ``MatchObject`` and should return a suitable replacement string for the particular match. Examples: @@ -721,38 +716,29 @@ File filtering functions .. code-block:: python - filter_file(r"^\s*CC\s*=.*", "CC = " + spack_cc, "Makefile") + filter_file(r"^\s*CC\s*=.*", "CC = " + spack_cc, "Makefile") filter_file(r"^\s*CXX\s*=.*", "CXX = " + spack_cxx, "Makefile") filter_file(r"^\s*F77\s*=.*", "F77 = " + spack_f77, "Makefile") - filter_file(r"^\s*FC\s*=.*", "FC = " + spack_fc, "Makefile") + filter_file(r"^\s*FC\s*=.*", "FC = " + spack_fc, "Makefile") #. Replacing ``#!/usr/bin/perl`` with ``#!/usr/bin/env perl`` in ``bib2xhtml``: .. code-block:: python - filter_file(r"#!/usr/bin/perl", - "#!/usr/bin/env perl", prefix.bin.bib2xhtml) + filter_file(r"#!/usr/bin/perl", "#!/usr/bin/env perl", prefix.bin.bib2xhtml) - #. Switching the compilers used by ``mpich``'s MPI wrapper scripts from - ``cc``, etc. to the compilers used by the Spack build: + #. Switching the compilers used by ``mpich``'s MPI wrapper scripts from ``cc``, etc. to the compilers used by the Spack build: .. code-block:: python - filter_file("CC='cc'", "CC='%s'" % self.compiler.cc, - prefix.bin.mpicc) - - filter_file("CXX='c++'", "CXX='%s'" % self.compiler.cxx, - prefix.bin.mpicxx) + filter_file("CC='cc'", "CC='%s'" % self.compiler.cc, prefix.bin.mpicc) + filter_file("CXX='c++'", "CXX='%s'" % self.compiler.cxx, prefix.bin.mpicxx) :py:func:`change_sed_delimiter(old_delim, new_delim, *filenames) ` - Some packages, like TAU, have a build system that can't install - into directories with, e.g. "@" in the name, because they use - hard-coded ``sed`` commands in their build. + Some packages, like TAU, have a build system that can't install into directories with, e.g. "@" in the name, because they use hard-coded ``sed`` commands in their build. - ``change_sed_delimiter`` finds all ``sed`` search/replace commands - and changes the delimiter. e.g., if the file contains commands - that look like ``s///``, you can use this to change them to - ``s@@@``. + ``change_sed_delimiter`` finds all ``sed`` search/replace commands and changes the delimiter. + E.g., if the file contains commands that look like ``s///``, you can use this to change them to ``s@@@``. Example of changing ``s///`` to ``s@@@`` in TAU: @@ -762,7 +748,6 @@ File filtering functions change_sed_delimiter("@", ";", "utils/FixMakefile") change_sed_delimiter("@", ";", "utils/FixMakefile.sed.default") -^^^^^^^^^^^^^^ File functions ^^^^^^^^^^^^^^ @@ -770,65 +755,60 @@ File functions Get the n\ :sup:`th` ancestor of the directory ``dir``. :py:func:`can_access(path) ` - True if we can read and write to the file at ``path``. Same as - native Python ``os.access(file_name, os.R_OK|os.W_OK)``. + True if we can read and write to the file at ``path``. + Same as native Python ``os.access(file_name, os.R_OK|os.W_OK)``. :py:func:`install(src, dest) ` - Install a file to a particular location. For example, install a - header into the ``include`` directory under the install ``prefix``: + Install a file to a particular location. + For example, install a header into the ``include`` directory under the install ``prefix``: .. code-block:: python install("my-header.h", prefix.include) :py:func:`join_path(*paths) ` - An alias for ``os.path.join``. This joins paths using the OS path separator. + An alias for ``os.path.join``. + This joins paths using the OS path separator. :py:func:`mkdirp(*paths) ` - Create each of the directories in ``paths``, creating any parent - directories if they do not exist. + Create each of the directories in ``paths``, creating any parent directories if they do not exist. :py:func:`working_dir(dirname, kwargs) ` - This is a Python `Context Manager - `_ that makes it - easier to work with subdirectories in builds. You use this with the - Python ``with`` statement to change into a working directory, and - when the with block is done, you change back to the original - directory. Think of it as a safe ``pushd`` / ``popd`` combination, - where ``popd`` is guaranteed to be called at the end, even if - exceptions are thrown. + This is a Python `Context Manager `_ that makes it easier to work with subdirectories in builds. + You use this with the Python ``with`` statement to change into a working directory, and when the with block is done, you change back to the original directory. + Think of it as a safe ``pushd`` / ``popd`` combination, where ``popd`` is guaranteed to be called at the end, even if exceptions are thrown. Example usage: - #. The ``libdwarf`` build first runs ``configure`` and ``make`` in a - subdirectory called ``libdwarf``. It then implements the - installation code itself. This is natural with ``working_dir``: + #. The ``libdwarf`` build first runs ``configure`` and ``make`` in a subdirectory called ``libdwarf``. + It then implements the installation code itself. + This is natural with ``working_dir``: .. code-block:: python with working_dir("libdwarf"): configure("--prefix=" + prefix, "--enable-shared") make() - install("libdwarf.a", prefix.lib) + install("libdwarf.a", prefix.lib) - #. Many CMake builds require that you build "out of source", that - is, in a subdirectory. You can handle creating and ``cd``'ing to - the subdirectory like the LLVM package does: + #. Many CMake builds require that you build "out of source", that is, in a subdirectory. + You can handle creating and ``cd``'ing to the subdirectory like the LLVM package does: .. code-block:: python with working_dir("spack-build", create=True): - cmake("..", - "-DLLVM_REQUIRES_RTTI=1", - "-DPYTHON_EXECUTABLE=/usr/bin/python", - "-DPYTHON_INCLUDE_DIR=/usr/include/python2.6", - "-DPYTHON_LIBRARY=/usr/lib64/libpython2.6.so", - *std_cmake_args) + cmake( + "..", + "-DLLVM_REQUIRES_RTTI=1", + "-DPYTHON_EXECUTABLE=/usr/bin/python", + "-DPYTHON_INCLUDE_DIR=/usr/include/python2.6", + "-DPYTHON_LIBRARY=/usr/lib64/libpython2.6.so", + *std_cmake_args + ) make() make("install") - The ``create=True`` keyword argument causes the command to create - the directory if it does not exist. + The ``create=True`` keyword argument causes the command to create the directory if it does not exist. :py:func:`touch(path) ` Create an empty file at ``path``. @@ -836,12 +816,10 @@ File functions .. _multimethods: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Multimethods and the ``@when`` decorator ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``@when`` annotation lets packages declare multiple versions of a method that will be called -depending on the package's spec. +The ``@when`` annotation lets packages declare multiple versions of a method that will be called depending on the package's spec. This can be useful to handle cases where configure options are entirely different depending on the version of the package, or when the package is built for different platforms. .. code-block:: python @@ -896,7 +874,6 @@ If no ``@when`` spec matches, the default method (the one without the ``@when`` .. _prefix-objects: -^^^^^^^^^^^^^^ Prefix objects ^^^^^^^^^^^^^^ @@ -925,7 +902,6 @@ If your file or directory contains dashes or dots, use ``join`` instead: .. _environment-variables: ---------------------- The build environment --------------------- @@ -972,7 +948,6 @@ This requires a section of its own, because there are multiple ways to deal with .. _setup-environment: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Package specific environment variables ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1022,14 +997,12 @@ This means that the former should only be used if the environment variables depe .. _setting-package-module-variables: --------------------------------- Setting package module variables -------------------------------- -Apart from modifying environment variables of the dependent package, you can also define Python -variables to be used by the dependent. This is done by implementing -:meth:`setup_dependent_package `. An -example of this can be found in the ``Python`` package: +Apart from modifying environment variables of the dependent package, you can also define Python variables to be used by the dependent. +This is done by implementing :meth:`setup_dependent_package `. +An example of this can be found in the ``Python`` package: .. literalinclude:: .spack/spack-packages/repos/spack_repo/builtin/packages/python/package.py :pyobject: Python.setup_dependent_package @@ -1045,13 +1018,11 @@ This allows Python packages to directly use these variables: .. note:: - We recommend using ``setup_dependent_package`` sparingly, as it is not always clear where - global variables are coming from when editing a ``package.py`` file. + We recommend using ``setup_dependent_package`` sparingly, as it is not always clear where global variables are coming from when editing a ``package.py`` file. .. _compiler_flags: --------------- Compiler flags -------------- @@ -1067,7 +1038,8 @@ The main challenge for packagers is to ensure that these flags are combined and .. warning:: - A common pitfall when dealing with compiler flags in ``MakefilePackage`` and ``AutotoolsPackage`` is that the user and package author specified flags override the build system defaults. This can inadvertently lead to unoptimized builds. + A common pitfall when dealing with compiler flags in ``MakefilePackage`` and ``AutotoolsPackage`` is that the user and package author specified flags override the build system defaults. + This can inadvertently lead to unoptimized builds. For example, suppose a user requests ``spack install pkg cflags=-Wno-unused`` and the build system defaults to ``CFLAGS=-O2 -g``. If the package takes the user request literally and sets ``CFLAGS=-Wextra`` as an environment variable, then the user-specified flags may *override* the build system defaults, and the build would not be optimized: the ``-O2`` flag would be lost. Whether environment variables like ``CFLAGS`` lead to this problem depends on the build system, and may differ from package to package. @@ -1152,6 +1124,7 @@ To ensure that flags are always set as *environment variables*, you can use: from spack.package import * # for env_flags + class MyPackage(MakefilePackage): flag_handler = env_flags # Use environment variables for all flags @@ -1161,13 +1134,13 @@ To ensure that flags are always *passed to the build system*, you can use: from spack.package import * # for build_system_flags + class MyPackage(MakefilePackage): flag_handler = build_system_flags # Pass flags to the build system .. _compiler-wrappers: ---------------------------- Compiler wrappers and flags --------------------------- @@ -1183,7 +1156,6 @@ The ``compiler-wrapper`` package has several responsibilities: 2. Flags needed to locate headers and libraries (during the build as well as at runtime) 3. Target specific flags, like ``-march=x86-64-v3``, translated from the spec's ``target=`` variant. -^^^^^^^^^^^^^^^^^^^^^^ Automatic search flags ^^^^^^^^^^^^^^^^^^^^^^ @@ -1206,6 +1178,7 @@ For example, consider a ``libdwarf`` package that just depends on ``libelf`` and from spack.package import * from spack_repo.builtin.build_systems.autotools import AutotoolsPackage + class Libdwarf(AutotoolsPackage): url = "..." version("1.0", sha256="...") @@ -1219,7 +1192,6 @@ Because the compiler wrapper is set up to automatically include the ``-I`_ on Linux and macOS to make executables directly runnable after installation. @@ -1237,7 +1209,6 @@ If you use the ``CMakePackage``, Spack automatically sets the ``CMAKE_INSTALL_RP For packages that do not fit ``CMakePackage`` but still run ``cmake`` as part of the build, it is recommended to look at :meth:`spack_repo.builtin.build_systems.cmake.CMakeBuilder.std_args` on how to set the install RPATHs correctly. ---------------------- MPI support in Spack --------------------- @@ -1258,19 +1229,14 @@ MPI support in Spack currently sets `self.spec.mpicc` in `setup_dependent_package` to the C compiler of the dependent, which again is wrong because there are many dependents. -It is common for high-performance computing software/packages to use the -Message Passing Interface ( ``MPI``). As a result of concretization, a -given package can be built using different implementations of MPI such as -``OpenMPI``, ``MPICH`` or ``IntelMPI``. That is, when your package -declares that it ``depends_on("mpi")``, it can be built with any of these -``mpi`` implementations. In some scenarios, to configure a package, one -has to provide it with appropriate MPI compiler wrappers such as -``mpicc``, ``mpic++``. However, different implementations of ``MPI`` may -have different names for those wrappers. +It is common for high-performance computing software/packages to use the Message Passing Interface ( ``MPI``). +As a result of concretization, a given package can be built using different implementations of MPI such as ``OpenMPI``, ``MPICH`` or ``IntelMPI``. +That is, when your package declares that it ``depends_on("mpi")``, it can be built with any of these ``mpi`` implementations. +In some scenarios, to configure a package, one has to provide it with appropriate MPI compiler wrappers such as ``mpicc``, ``mpic++``. +However, different implementations of ``MPI`` may have different names for those wrappers. -Spack provides an idiomatic way to use MPI compilers in your package. To -use MPI wrappers to compile your whole build, do this in your -``install()`` method: +Spack provides an idiomatic way to use MPI compilers in your package. +To use MPI wrappers to compile your whole build, do this in your ``install()`` method: .. code-block:: python @@ -1279,56 +1245,39 @@ use MPI wrappers to compile your whole build, do this in your env["F77"] = spec["mpi"].mpif77 env["FC"] = spec["mpi"].mpifc -That's all. A longer explanation of why this works is below. +That's all. +A longer explanation of why this works is below. -We don't try to force any particular build method on packagers. The -decision to use MPI wrappers depends on the way the package is written, -on common practice, and on "what works". Loosely, there are three types -of MPI builds: +We don't try to force any particular build method on packagers. +The decision to use MPI wrappers depends on the way the package is written, on common practice, and on "what works". +Loosely, there are three types of MPI builds: - 1. Some build systems work well without the wrappers and can treat MPI - as an external library, where the person doing the build has to - supply includes/libs/etc. This is fairly uncommon. +1. Some build systems work well without the wrappers and can treat MPI as an external library, where the person doing the build has to supply includes/libs/etc. + This is fairly uncommon. - 2. Others really want the wrappers and assume you're using an MPI - "compiler" -- i.e., they have no mechanism to add MPI - includes/libraries/etc. +2. Others really want the wrappers and assume you're using an MPI "compiler" -- i.e., they have no mechanism to add MPI includes/libraries/etc. - 3. CMake's ``FindMPI`` needs the compiler wrappers, but it uses them to - extract ``-I`` / ``-L`` / ``-D`` arguments, then treats MPI like a - regular library. +3. CMake's ``FindMPI`` needs the compiler wrappers, but it uses them to extract ``-I`` / ``-L`` / ``-D`` arguments, then treats MPI like a regular library. -Note that some CMake builds fall into case 2 because they either don't -know about or don't like CMake's ``FindMPI`` support -- they just assume -an MPI compiler. Also, some autotools builds fall into case 3 (e.g., `here -is an autotools version of CMake's FindMPI -`_). +Note that some CMake builds fall into case 2 because they either don't know about or don't like CMake's ``FindMPI`` support -- they just assume an MPI compiler. +Also, some autotools builds fall into case 3 (e.g., `here is an autotools version of CMake's FindMPI `_). Given all of this, we leave the use of the wrappers up to the packager. Spack will support all three ways of building MPI packages. -^^^^^^^^^^^^^^^^^^^^^ Packaging Conventions ^^^^^^^^^^^^^^^^^^^^^ -As mentioned above, in the ``install()`` method, ``CC``, ``CXX``, -``F77``, and ``FC`` point to Spack's wrappers around the chosen compiler. -Spack's wrappers are not the MPI compiler wrappers, though they do -automatically add ``-I``, ``-L``, and ``-Wl,-rpath`` args for -dependencies in a similar way. The MPI wrappers are a bit different in -that they also add ``-l`` arguments for the MPI libraries, and some add -special ``-D`` arguments to trigger build options in MPI programs. +As mentioned above, in the ``install()`` method, ``CC``, ``CXX``, ``F77``, and ``FC`` point to Spack's wrappers around the chosen compiler. +Spack's wrappers are not the MPI compiler wrappers, though they do automatically add ``-I``, ``-L``, and ``-Wl,-rpath`` args for dependencies in a similar way. +The MPI wrappers are a bit different in that they also add ``-l`` arguments for the MPI libraries, and some add special ``-D`` arguments to trigger build options in MPI programs. -For case 1 above, you generally don't need to do more than patch your -Makefile or add configure args as you normally would. +For case 1 above, you generally don't need to do more than patch your Makefile or add configure args as you normally would. -For case 3, you don't need to do much of anything, as Spack puts the MPI -compiler wrappers in the PATH, and the build will find them and -interrogate them. +For case 3, you don't need to do much of anything, as Spack puts the MPI compiler wrappers in the PATH, and the build will find them and interrogate them. -For case 2, things are a bit more complicated, as you'll need to tell the -build to use the MPI compiler wrappers instead of Spack's compiler -wrappers. All it takes is some lines like this: +For case 2, things are a bit more complicated, as you'll need to tell the build to use the MPI compiler wrappers instead of Spack's compiler wrappers. +All it takes is some lines like this: .. code-block:: python @@ -1337,84 +1286,54 @@ wrappers. All it takes is some lines like this: env["F77"] = spec["mpi"].mpif77 env["FC"] = spec["mpi"].mpifc -Or, if you pass CC, CXX, etc. directly to your build with, e.g., -`--with-cc=`, you'll want to substitute `spec["mpi"].mpicc` in -there instead, e.g.: +Or, if you pass CC, CXX, etc. directly to your build with, e.g., ``--with-cc=``, you'll want to substitute ``spec["mpi"].mpicc`` in there instead, e.g.: .. code-block:: python - configure("--prefix=%s" % prefix, - "--with-cc=%s" % spec["mpi"].mpicc) + configure("--prefix=%s" % prefix, "--with-cc=%s" % spec["mpi"].mpicc) -Now, you may think that doing this will lose the includes, library paths, -and RPATHs that Spack's compiler wrappers get you, but we've actually set -things up so that the MPI compiler wrappers use Spack's compiler wrappers -when run from within Spack. So using the MPI wrappers should really be as -simple as the code above. +Now, you may think that doing this will lose the includes, library paths, and RPATHs that Spack's compiler wrappers get you, but we've actually set things up so that the MPI compiler wrappers use Spack's compiler wrappers when run from within Spack. +So using the MPI wrappers should really be as simple as the code above. -^^^^^^^^^^^^^^^^^^^^^ ``spec["mpi"]`` ^^^^^^^^^^^^^^^^^^^^^ Ok, so how does all this work? -If your package has a virtual dependency like ``mpi``, then referring to -``spec["mpi"]`` within ``install()`` will get you the concrete ``mpi`` -implementation in your dependency DAG. That is a spec object just like -the one passed to install, only the MPI implementations all set some -additional properties on it to help you out. E.g., in openmpi, you'll -find this: +If your package has a virtual dependency like ``mpi``, then referring to ``spec["mpi"]`` within ``install()`` will get you the concrete ``mpi`` implementation in your dependency DAG. +That is a spec object just like the one passed to install, only the MPI implementations all set some additional properties on it to help you out. +E.g., in openmpi, you'll find this: .. literalinclude:: .spack/spack-packages/repos/spack_repo/builtin/packages/openmpi/package.py :pyobject: Openmpi.setup_dependent_package -That code allows the ``openmpi`` package to associate an ``mpicc`` property -with the ``openmpi`` node in the DAG, so that dependents can access it. -``mvapich2`` and ``mpich`` do similar things. So, no matter what MPI -you're using, spec["mpi"].mpicc gets you the location of the MPI -compilers. This allows us to have a fairly simple polymorphic interface -for information about virtual dependencies like MPI. +That code allows the ``openmpi`` package to associate an ``mpicc`` property with the ``openmpi`` spec in the DAG, so that dependents can access it. +``mvapich2`` and ``mpich`` do similar things. +So, no matter what MPI you're using, ``spec["mpi"].mpicc`` gets you the location of the MPI compilers. +This allows us to have a fairly simple polymorphic interface for information about virtual dependencies like MPI. -^^^^^^^^^^^^^^^^^^^^^ Wrapping wrappers ^^^^^^^^^^^^^^^^^^^^^ -Spack likes to use its own compiler wrappers to make it easy to add -``RPATHs`` to builds, and to try hard to ensure that your builds use the -right dependencies. This doesn't play nicely by default with MPI, so we -have to do a couple of tricks. - - 1. If we build MPI with Spack's wrappers, mpicc and friends will be - installed with hard-coded paths to Spack's wrappers, and using them - from outside of Spack will fail because they only work within Spack. - To fix this, we patch mpicc and friends to use the regular - compilers. Look at the filter_compilers method in mpich, openmpi, - or mvapich2 for details. - - 2. We still want to use the Spack compiler wrappers when Spack is - calling mpicc. Luckily, wrappers in all mainstream MPI - implementations provide environment variables that allow us to - dynamically set the compiler to be used by mpicc, mpicxx, etc. - Spack's build environment - sets ``MPICC``, ``MPICXX``, etc. for mpich derivatives and - ``OMPI_CC``, ``OMPI_CXX``, etc. for OpenMPI. This makes the MPI - compiler wrappers use the Spack compiler wrappers so that your - dependencies still get proper RPATHs even if you use the MPI - wrappers. +Spack likes to use its own compiler wrappers to make it easy to add ``RPATHs`` to builds, and to try hard to ensure that your builds use the right dependencies. +This doesn't play nicely by default with MPI, so we have to do a couple of tricks. + +1. If we build MPI with Spack's wrappers, mpicc and friends will be installed with hard-coded paths to Spack's wrappers, and using them from outside of Spack will fail because they only work within Spack. + To fix this, we patch mpicc and friends to use the regular compilers. + Look at the filter_compilers method in mpich, openmpi, or mvapich2 for details. + +2. We still want to use the Spack compiler wrappers when Spack is calling mpicc. + Luckily, wrappers in all mainstream MPI implementations provide environment variables that allow us to dynamically set the compiler to be used by mpicc, mpicxx, etc. + Spack's build environment sets ``MPICC``, ``MPICXX``, etc. for mpich derivatives and ``OMPI_CC``, ``OMPI_CXX``, etc. for OpenMPI. + This makes the MPI compiler wrappers use the Spack compiler wrappers so that your dependencies still get proper RPATHs even if you use the MPI wrappers. -^^^^^^^^^^^^^^^^^^^^^ MPI on Cray machines ^^^^^^^^^^^^^^^^^^^^^ -The Cray programming environment notably uses ITS OWN compiler wrappers, -which function like MPI wrappers. On Cray systems, the ``CC``, ``cc``, -and ``ftn`` wrappers ARE the MPI compiler wrappers, and it's assumed that -you'll use them for all of your builds. So on Cray we don't bother with -``mpicc``, ``mpicxx``, etc., Spack MPI implementations set -``spec["mpi"].mpicc`` to point to Spack's wrappers, which wrap the Cray -wrappers, which wrap the regular compilers and include MPI flags. That -may seem complicated, but for packagers, that means the same code for -using MPI wrappers will work, even on a Cray: +The Cray programming environment notably uses its own compiler wrappers, which function like MPI wrappers. +On Cray systems, the ``CC``, ``cc``, and ``ftn`` wrappers ARE the MPI compiler wrappers, and it's assumed that you'll use them for all of your builds. +So on Cray we don't bother with ``mpicc``, ``mpicxx``, etc., Spack MPI implementations set ``spec["mpi"].mpicc`` to point to Spack's wrappers, which wrap the Cray wrappers, which wrap the regular compilers and include MPI flags. +That may seem complicated, but for packagers, that means the same code for using MPI wrappers will work, even on a Cray: .. code-block:: python @@ -1424,7 +1343,6 @@ This is because on Cray, ``spec["mpi"].mpicc`` is just ``spack_cc``. .. _packaging-workflow: -------------------------------- Packaging workflow and commands ------------------------------- @@ -1449,7 +1367,6 @@ The location of the build directory is printed in the build output, but you can $ pwd /tmp/spack-stage/spack-stage-mypackage-1-2-3-abcdef -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Inspecting the build environment ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1464,7 +1381,6 @@ The command is a convenient way to start a subshell with the build environment variables set up. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Keeping the stage directory on success ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1486,13 +1402,12 @@ Once done, you could remove all sources and build directories with: $ spack clean --stage -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Keeping the install prefix on failure ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Conversely, if a build fails but *has* installed some files, you may want to keep the install prefix to diagnose the issue. -By default, ``spack install`` will delete the install directory if anything fails during build. +By default, ``spack install`` deletes the install directory if anything fails during build. The ``--keep-prefix`` option allows you to keep the install prefix regardless of the build outcome. @@ -1500,7 +1415,8 @@ The ``--keep-prefix`` option allows you to keep the install prefix regardless of $ spack install --keep-prefix -^^^^^^^^^^^^^^^^^^^^^ +.. _cmd-spack-graph: + Understanding the DAG ^^^^^^^^^^^^^^^^^^^^^ diff --git a/lib/spack/docs/packaging_guide_creation.rst b/lib/spack/docs/packaging_guide_creation.rst index b9c63273de5ca9..c4669e89e11560 100644 --- a/lib/spack/docs/packaging_guide_creation.rst +++ b/lib/spack/docs/packaging_guide_creation.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -16,7 +17,6 @@ - :doc:`3. Testing ` - :doc:`4. Advanced ` -=================================== Packaging Guide: defining a package =================================== @@ -34,14 +34,13 @@ Essentially, a package translates a spec into build logic. It also allows the packager to write spec-specific tests of the installed software. Packages in Spack are written in pure Python, so you can do anything in Spack that you can do in Python. -Python was chosen as the implementation language for two reasons. +Python was chosen as the implementation language for two reasons. First, Python is ubiquitous in the scientific software community. Second, it has many powerful features to help make package writing easy. .. _setting-up-for-package-development: ----------------------------------- Setting up for package development ---------------------------------- @@ -53,21 +52,21 @@ Once you have a fork, clone it: .. code-block:: console - git clone --depth=100 git@github.com:YOUR-USERNAME/spack-packages.git ~/spack-packages - cd ~/spack-packages - git remote add --track develop upstream git@github.com:spack/spack-packages.git + $ git clone --depth=100 git@github.com:YOUR-USERNAME/spack-packages.git ~/spack-packages + $ cd ~/spack-packages + $ git remote add --track develop upstream git@github.com:spack/spack-packages.git Then configure Spack to use your local repository: .. code-block:: console - spack repo set --destination ~/spack-packages builtin + $ spack repo set --destination ~/spack-packages builtin Before starting work, it's useful to create a new branch in your local repository. .. code-block:: console - git checkout -b add-my-package + $ git checkout -b add-my-package Lastly, verify that Spack is picking up the right repository by checking the location of a known package, like ``zlib``: @@ -78,7 +77,6 @@ Lastly, verify that Spack is picking up the right repository by checking the loc With this setup, you can conveniently access the package files, and contribute changes back to Spack. ----------------------- Structure of a package ---------------------- @@ -97,23 +95,31 @@ The typical structure of a package is as follows: # import Package API from spack.package import * + class Example(CMakePackage): """Example package""" # package description - # metadata and directives + # Metadata and Directives homepage = "https://example.com" url = "https://example.com/example/v2.4.0.tar.gz" maintainers("github_user1", "github_user2") + license("UNKNOWN", checked_by="github_user1") + + # version directives listed in order with the latest first version("2.4.0", sha256="845ccd79ed915fa2dedf3b2abde3fffe7f9f5673cc51be88e47e6432bd1408be") version("2.3.0", sha256="cd3274e0abcbc2dfb678d87595e9d3ab1c6954d7921d57a88a23cf4981af46c9") + # variant directives expose build options variant("feature", default=False, description="Enable a specific feature") + variant("codec", default=False, description="Build the CODEC executables") + # dependency directives declare required software + depends_on("cxx", type="build") depends_on("libfoo", when="+feature") - # build instructions + # Build Instructions def cmake_args(self): return [ self.define_from_variant("BUILD_CODEC", "codec"), @@ -123,7 +129,7 @@ The typical structure of a package is as follows: The package class is named after the package, and can roughly be divided into two parts: -* **metadata and directives**: attributes and directives that describe the package, such as its homepage, versions, maintainers, dependencies, and variants. +* **metadata and directives**: attributes and directives that describe the package, such as its homepage, maintainers, license, variants, and dependencies. This is the declarative part of the package. * **build instructions**: methods that define how to build and install the package, such as `cmake_args()`. This is the imperative part of the package. @@ -131,7 +137,6 @@ The package class is named after the package, and can roughly be divided into tw In this part of the packaging guide we will cover the **metadata and directives** in detail. In the :doc:`second part `, we will cover the **build instructions**, including how to write custom build logic for different build systems. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Package Names and the Package Directory ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -157,7 +162,6 @@ Usually the package name coincides with the directory name on the filesystem: th This ensures that every package directory is a valid Python module name. -^^^^^^^^^^^^^^^^^^^ Package class names ^^^^^^^^^^^^^^^^^^^ @@ -175,7 +179,8 @@ Here are some examples: In general, you won't have to remember this naming convention because :ref:`cmd-spack-create` and :ref:`cmd-spack-edit` handle the details for you. ------------------------------ +.. _creating-and-editing-packages: + Creating and editing packages ----------------------------- @@ -186,7 +191,6 @@ It can also help you edit existing packages, so you don't have to navigate to th .. _controlling-the-editor: -^^^^^^^^^^^^^^^^^^^^^^ Controlling the editor ^^^^^^^^^^^^^^^^^^^^^^ @@ -209,7 +213,6 @@ If Spack finds none of these variables set, it will look for ``vim``, ``vi``, `` .. _cmd-spack-create: -^^^^^^^^^^^^^^^^^^^^^ Creating new packages ^^^^^^^^^^^^^^^^^^^^^ @@ -286,6 +289,11 @@ Spack automatically creates a directory in the appropriate repository, generates # notify when the package is updated. # maintainers("github_user1", "github_user2") + # FIXME: Add the SPDX identifier of the project's license below. + # See https://spdx.org/licenses/ for a list. Upon manually verifying + # the license, set checked_by to your Github username. + license("UNKNOWN", checked_by="github_user1") + version("6.2.1", sha256="eae9326beb4158c386e39a356818031bd28f3124cf915f8c5b1dc4c7a36b4d7c") # FIXME: Add dependencies if required. @@ -309,12 +317,13 @@ The default installation procedure for a package subclassing the ``AutotoolsPack make check make install -For most Autotools packages, this is sufficient. If you need to add additional arguments to the ``./configure`` call, add them via the ``configure_args`` function. +For most Autotools packages, this is sufficient. +If you need to add additional arguments to the ``./configure`` call, add them via the ``configure_args`` function. In the generated package, the download ``url`` attribute is already set. All the things you still need to change are marked with ``FIXME`` labels. -You can delete the commented instructions between the license and the first import statement after reading them. -The rest of the tasks you need to do are as follows: +You can delete the commented instructions between the Spack license and the first import statement after reading them. +The remaining tasks to complete are as follows: #. Add a description. @@ -327,24 +336,31 @@ The rest of the tasks you need to do are as follows: #. Add a comma-separated list of maintainers. - Add a list of GitHub accounts of people who want to be notified any time the package is modified. See :ref:`package_maintainers`. + Add a list of GitHub accounts of people who want to be notified any time the package is modified. + See :ref:`maintainers`. + +#. Change the ``license`` to the correct license. + + The ``license`` is displayed when users run ``spack info`` so that they can learn more about your package. + See :ref:`package_license`. #. Add ``depends_on()`` calls for the package's dependencies. - ``depends_on`` tells Spack that other packages need to be built and installed before this one. See :ref:`dependencies`. + ``depends_on`` tells Spack that other packages need to be built and installed before this one. + See :ref:`dependencies`. #. Get the installation working. Your new package may require specific flags during ``configure``. These can be added via ``configure_args``. + If no arguments are needed at this time, change the implementation to ``return []``. Specifics will differ depending on the package and its build system. :ref:`installation_process` is covered in detail later. -"""""""""""""""""""""""""""""""" Further package creation options """""""""""""""""""""""""""""""" -If you do not have a URL to a tarball, you can still use ``spack create`` to generate the boilerplate for a package. +If you do not have a tarball URL, you can still use ``spack create`` to generate the boilerplate for a package. .. code-block:: console @@ -362,7 +378,6 @@ A complete list of available build system templates can be found by running ``sp .. _cmd-spack-edit: -^^^^^^^^^^^^^^^^^^^^^^^^^ Editing existing packages ^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -398,7 +413,6 @@ without specifying a package name, which will open the directory containing all Finally, the commands ``spack location --repo`` and ``spack cd --repo`` help you navigate to the root of the package repository. ------------------------- Source code and versions ------------------------ @@ -407,7 +421,6 @@ Typically every package version has a corresponding source code archive, which S .. _versions-and-fetching: -^^^^^^^^^^^^^^^^^ Versions and URLs ^^^^^^^^^^^^^^^^^ @@ -429,33 +442,40 @@ The most straightforward way to add new versions to your package is to add a lin .. note:: - :ref:`Bundle packages ` do not have source code so - there is nothing to fetch. Consequently, their version directives - consist solely of the version name (e.g., ``version("202309")``). + :ref:`Bundle packages ` do not have source code so there is nothing to fetch. + Consequently, their version directives consist solely of the version name (e.g., ``version("202309")``). Notice how you only have to specify the URL once, in the ``url`` field. Spack is smart enough to extrapolate the URL for each version based on the version number and download version ``8.2.0`` of the ``Foo`` package above from ``http://example.com/foo-8.2.0.tar.gz``. -If the URL is particularly complicated or changes based on the release, you can override the default URL generation algorithm by defining your own ``url_for_version()`` function. +If the URL is particularly complicated or changes based on the release, you can override the default URL generation algorithm by defining your own :py:meth:`~spack.package.PackageBase.url_for_version` function. For example, the download URL for OpenMPI contains the ``major.minor`` version in one spot and the ``major.minor.patch`` version in another: .. code-block:: text https://www.open-mpi.org/software/ompi/v2.1/downloads/openmpi-2.1.1.tar.bz2 -In order to handle this, you can define a ``url_for_version()`` function -like so: +In order to handle this, you can define a ``url_for_version()`` function like so: .. literalinclude:: .spack/spack-packages/repos/spack_repo/builtin/packages/openmpi/package.py :pyobject: Openmpi.url_for_version -With the use of this ``url_for_version()``, Spack knows to download OpenMPI ``2.1.1`` -from http://www.open-mpi.org/software/ompi/v2.1/downloads/openmpi-2.1.1.tar.bz2 -but download OpenMPI ``1.10.7`` from http://www.open-mpi.org/software/ompi/v1.10/downloads/openmpi-1.10.7.tar.bz2. +With the use of this ``url_for_version()``, Spack knows to download OpenMPI ``2.1.1`` from + +.. code-block:: text + + http://www.open-mpi.org/software/ompi/v2.1/downloads/openmpi-2.1.1.tar.bz2 + +but download OpenMPI ``1.10.7`` from + +.. code-block:: text + + http://www.open-mpi.org/software/ompi/v1.10/downloads/openmpi-1.10.7.tar.bz2 You'll notice that OpenMPI's ``url_for_version()`` function makes use of a special ``Version`` function called ``up_to()``. -When you call ``version.up_to(2)`` on a version like ``1.10.0``, it returns ``1.10``. ``version.up_to(1)`` would return ``1``. +When you call ``version.up_to(2)`` on a version like ``1.10.0``, it returns ``1.10``. +``version.up_to(1)`` would return ``1``. This can be very useful for packages that place all ``X.Y.*`` versions in a single directory and then places all ``X.Y.Z`` versions in a sub-directory. There are a few ``Version`` properties you should be aware of. @@ -475,13 +495,14 @@ version.joined 123 .. note:: - Python properties don't need parentheses. ``version.dashed`` is correct. + Python properties don't need parentheses. + ``version.dashed`` is correct. ``version.dashed()`` is incorrect. In addition, these version properties can be combined with ``up_to()``. For example: -.. code-block:: python +.. code-block:: pycon >>> version = Version("1.2.3") >>> version.up_to(2).dashed @@ -520,7 +541,6 @@ With this method, you only need to specify the ``url`` when the URL changes. .. _checksum-verification: -^^^^^^^^^^^^^^^^^^^^^ Checksum verification ^^^^^^^^^^^^^^^^^^^^^ @@ -546,13 +566,11 @@ Therefore, Spack requires that all URL downloads have a checksum, and refuses to .. _cmd-spack-checksum: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Automatically adding new versions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``spack checksum`` command can be used to automate the process of adding new versions to a package, assuming the package's download URLs follow a consistent pattern. -"""""""""""""""""" ``spack checksum`` """""""""""""""""" @@ -597,10 +615,42 @@ So, if the sources are at ``http://example.com/downloads/foo-1.0.tar.gz``, Spack If you need to search another path for download links, you can supply some extra attributes that control how your package finds new versions. See the documentation on :ref:`attribute_list_url` and :ref:`attribute_list_depth`. +.. _git_version_provenance: + +Git Version Provenance +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Checksummed assets are preferred but there are a few notable exceptions such as git branches and tags i.e ``pkg@develop``. +These versions do not naturally have source provenance because they refer to a range of commits (branches) or can be changed outside the spack packaging infrastructure (tags). +Without source provenance we cannot have full provenance. + +Spack has a reserved variant to allow users to complete provenance for these cases: ``pkg@develop commit=``. +The ``commit`` variant must be supplied using the full 40 character commit SHA. +Using a partial commit SHA or assigning the ``commit`` variant to a version that is not using a branch or tag reference will lead to an error during concretization. + +Spack will attempt to establish git version provenance by looking up commit SHA's for branch and tag based versions during concretization. +There are 3 sources that it uses. +In order, they are + +1. The local cached downloads (already cached source code for the version needing provenance) +2. Source mirrors (compressed archives of the source code) +3. The git url provided in the package definition + +If Spack is unable to determine what the commit should be during concretization a warning will be issued. +Users may also specify which commit SHA they want with the spec since it is simply a variant. +In this case, or in the case of develop specs (see :ref:`cmd-spack-develop`), Spack will skip attempts to assign the commit SHA automatically. + +.. note:: + + Users wanting to track the latest commits from the internet should utilize ``spack clean --downloads`` prior to concretization to clean out cached downloads that will short-circuit internet queries. + Disabling source mirrors or ensuring they don't contain branch/tag based versions will also be necessary. + + Above all else, the most robust way to ensure binaries have their desired commits is to provide the SHAs via user-specs or config i.e. ``commit=``. + + .. _attribute_list_url: -"""""""""""" ``list_url`` """""""""""" @@ -612,12 +662,11 @@ For example, the following package has a ``list_url`` attribute that points to a class Example(Package): homepage = "http://www.example.com" - url = "http://www.example.com/libexample-1.2.3.tar.gz" + url = "http://www.example.com/libexample-1.2.3.tar.gz" list_url = "http://www.example.com/downloads/all-versions.html" .. _attribute_list_depth: -"""""""""""""" ``list_depth`` """""""""""""" @@ -635,17 +684,18 @@ So, we need to add a ``list_url`` *and* a ``list_depth`` attribute: :linenos: class Mpich(Package): - homepage = "http://www.mpich.org" - url = "http://www.mpich.org/static/downloads/3.0.4/mpich-3.0.4.tar.gz" - list_url = "http://www.mpich.org/static/downloads/" + homepage = "http://www.mpich.org" + url = "http://www.mpich.org/static/downloads/3.0.4/mpich-3.0.4.tar.gz" + list_url = "http://www.mpich.org/static/downloads/" list_depth = 1 -By default, Spack only looks at the top-level page available at ``list_url``. +By default, Spack only looks at the top-level page available at ``list_url``. ``list_depth = 1`` tells it to follow up to 1 level of links from the top-level page. Note that here, this implies 1 level of subdirectories, as the ``mpich`` website is structured much like a filesystem. But ``list_depth`` really refers to link depth when spidering the page. -^^^^^^^^^^^^^^^^^^^^^^^ +.. _mirrors-of-the-main-url: + Mirrors of the main URL ^^^^^^^^^^^^^^^^^^^^^^^ @@ -655,10 +705,7 @@ Spack supports listing mirrors of the main URL in a package by defining the ``ur class Foo(Package): - urls = [ - "http://example.com/foo-1.0.tar.gz", - "http://mirror.com/foo-1.0.tar.gz" - ] + urls = ["http://example.com/foo-1.0.tar.gz", "http://mirror.com/foo-1.0.tar.gz"] instead of just a single ``url``. This attribute is a list of possible URLs that will be tried in order when fetching packages. @@ -673,7 +720,6 @@ For that, Spack goes a step further and defines a mixin class that takes care of .. _preferred_versions: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Preferring versions over others ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -692,7 +738,6 @@ See the section on :ref:`version ordering ` for more details .. _deprecate: -^^^^^^^^^^^^^^^^^^^^^^^^ Deprecating old versions ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -722,7 +767,7 @@ This has two effects. First, ``spack info`` will no longer advertise that version. Second, commands like ``spack install`` that fetch the package will require user approval: -.. code-block:: console +.. code-block:: spec $ spack install openssl@1.0.1e ==> Warning: openssl@1.0.1e is deprecated and may be removed in a future Spack release. @@ -732,7 +777,8 @@ Second, commands like ``spack install`` that fetch the package will require user If you use ``spack install --deprecated``, this check can be skipped. This also applies to package recipes that are renamed or removed. -You should first deprecate all versions before removing a package. If you need to rename it, you can deprecate the old package and create a new package at the same time. +You should first deprecate all versions before removing a package. +If you need to rename it, you can deprecate the old package and create a new package at the same time. Version deprecations should always last at least one release cycle of the builtin package repository before the version is completely removed. No version should be removed without such a deprecation process. @@ -743,7 +789,6 @@ However, you may be asked to help maintain this version of the package if the cu .. _version-comparison: -^^^^^^^^^^^^^^^^ Version ordering ^^^^^^^^^^^^^^^^ @@ -753,8 +798,7 @@ What latest means is determined by the version comparison rules defined in Spack Spack imposes a generic total ordering on the set of versions, independently from the package they are associated with. Most Spack versions are numeric, a tuple of integers; for example, ``0.1``, ``6.96``, or ``1.2.3.1``. -In this very basic case, version comparison is lexicographical on the numeric components: -``1.2 < 1.2.1 < 1.2.2 < 1.10``. +In this very basic case, version comparison is lexicographical on the numeric components: ``1.2 < 1.2.1 < 1.2.2 < 1.10``. Other separators for components are also possible, for example ``2025-03-01 < 2025-06``. @@ -781,8 +825,7 @@ A version string is split into a list of components based on delimiters such as The components are split into the **release** and a possible **pre-release** (if the last component is numeric and the second to last is a string ``alpha``, ``beta`` or ``rc``). The release components are ordered lexicographically, with comparison between different types of components as follows: -#. The following special strings are considered larger than any other numeric or non-numeric version component, and satisfy the following - order between themselves: ``develop > main > master > head > trunk > stable``. +#. The following special strings are considered larger than any other numeric or non-numeric version component, and satisfy the following order between themselves: ``develop > main > master > head > trunk > stable``. #. Numbers are ordered numerically, are less than special strings, and larger than other non-numeric components. @@ -801,18 +844,19 @@ The logic behind this sort order is two-fold: .. _vcs-fetch: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Fetching from code repositories ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For some packages, source code is provided in a Version Control System (VCS) repository rather than in a tarball. Spack can fetch packages from VCS repositories. -Currently, Spack supports fetching with `Git `_, `Mercurial (hg) `_, `Subversion (svn) `_, and `CVS (cvs) `_. +Currently, Spack supports fetching with :ref:`Git `, :ref:`Mercurial (hg) `, :ref:`Subversion (svn) `, and :ref:`CVS (cvs) `. In all cases, the destination is the standard stage source path. -To fetch a package from a source repository, Spack needs to know which VCS to use and where to download from. Much like with ``url``, package authors can specify a class-level ``git``, ``hg``, ``svn``, or ``cvs`` attribute containing the correct download location. +To fetch a package from a source repository, Spack needs to know which VCS to use and where to download from. +Much like with ``url``, package authors can specify a class-level ``git``, ``hg``, ``svn``, or ``cvs`` attribute containing the correct download location. -Many packages developed with Git have both a Git repository as well as release tarballs available for download. Packages can define both a class-level tarball URL and VCS. +Many packages developed with Git have both a Git repository as well as release tarballs available for download. +Packages can define both a class-level tarball URL and VCS. For example: .. code-block:: python @@ -820,30 +864,25 @@ For example: class Trilinos(CMakePackage): homepage = "https://trilinos.org/" - url = "https://github.com/trilinos/Trilinos/archive/trilinos-release-12-12-1.tar.gz" - git = "https://github.com/trilinos/Trilinos.git" + url = "https://github.com/trilinos/Trilinos/archive/trilinos-release-12-12-1.tar.gz" + git = "https://github.com/trilinos/Trilinos.git" version("develop", branch="develop") - version("master", branch="master") + version("master", branch="master") version("12.12.1", sha256="87428fc522803d31065e7bce3cf03fe475096631e5e07bbd7a0fde60c4cf25c7") version("12.10.1", sha256="0263829989b6fd954f72baaf2fc64bc2e2f01d692d4de72986ea808f6e99813f") - version("12.8.1", sha256="a3a5e715f0cc574a73c3f9bebb6bc24f32ffd5b67b387244c2c909da779a1478") + version("12.8.1", sha256="a3a5e715f0cc574a73c3f9bebb6bc24f32ffd5b67b387244c2c909da779a1478") -If a package contains both a ``url`` and ``git`` class-level attribute, -Spack decides which to use based on the arguments to the ``version()`` -directive. Versions containing a specific branch, tag, commit or revision are -assumed to be for VCS download methods, while versions containing a -checksum are assumed to be for URL download methods. +If a package contains both a ``url`` and ``git`` class-level attribute, Spack decides which to use based on the arguments to the ``version()`` directive. +Versions containing a specific branch, tag, commit or revision are assumed to be for VCS download methods, while versions containing a checksum are assumed to be for URL download methods. -Like ``url``, if a specific version downloads from a different repository -than the default repo, it can be overridden with a version-specific argument. +Like ``url``, if a specific version downloads from a different repository than the default repo, it can be overridden with a version-specific argument. .. note:: - In order to reduce ambiguity, each package can only have a single VCS - top-level attribute in addition to ``url``. In the rare case that a - package uses multiple VCS, a fetch strategy can be specified for each - version. For example, the ``rockstar`` package contains: + In order to reduce ambiguity, each package can only have a single VCS top-level attribute in addition to ``url``. + In the rare case that a package uses multiple VCS, a fetch strategy can be specified for each version. + For example, the ``rockstar`` package contains: .. code-block:: python @@ -857,7 +896,6 @@ than the default repo, it can be overridden with a version-specific argument. .. _git-fetch: -""""""" Git """"""" @@ -868,14 +906,10 @@ Git fetching supports the following parameters to the ``version`` directive: * ``tag``: Name of a :ref:`tag ` to fetch. * ``commit``: SHA hash (or prefix) of a :ref:`commit ` to fetch. * ``submodules``: Also fetch :ref:`submodules ` recursively when checking out this repository. -* ``submodules_delete``: A list of submodules to forcibly delete from the repository - after fetching. Useful if a version in the repository has submodules that - have disappeared/are no longer accessible. -* ``get_full_repo``: Ensure the full git history is checked out with all remote - branch information. Normally (``get_full_repo=False``, the default), the git - option ``--depth 1`` will be used if the version of git and the specified - transport protocol support it, and ``--single-branch`` will be used if the - version of git supports it. +* ``submodules_delete``: A list of submodules to forcibly delete from the repository after fetching. + Useful if a version in the repository has submodules that have disappeared/are no longer accessible. +* ``get_full_repo``: Ensure the full git history is checked out with all remote branch information. + Normally (``get_full_repo=False``, the default), the git option ``--depth 1`` will be used if the version of git and the specified transport protocol support it, and ``--single-branch`` will be used if the version of git supports it. * ``git_sparse_paths``: Only clone the provided :ref:`relative paths `. The destination directory for the clone is the standard stage source path. @@ -892,7 +926,7 @@ The destination directory for the clone is the standard stage source path. **Trusted Downloads.** It is critical from a security and reproducibility standpoint that Spack be able to verify the downloaded source. - Providing the full ``commit`` SHA hash allows for Spack to preserve binary provenance for all binaries since git commits are guaranteed to be unique points in the git history. + Providing the full ``commit`` SHA hash allows for Spack to preserve provenance for all binaries since git commits are guaranteed to be unique points in the git history. Whereas, the mutable nature of branches and tags cannot provide such a guarantee. A git download *is trusted* only if the full commit SHA is specified. @@ -926,7 +960,7 @@ Default branch .. _git-branches: Branches - To fetch a particular branch, use the ``branch`` parameter, preferrably with the same name as the version. + To fetch a particular branch, use the ``branch`` parameter, preferably with the same name as the version. For example, .. code-block:: python @@ -973,7 +1007,7 @@ Commits .. code-block:: python version("2014-10-08", commit="1e6ef73d93a28240f954513bc4c2ed46178fa32b") - version("1.0.4", tag="v1.0.4", commit="420136f6f1f26050d95138e27cf8bc905bc5e7f52") + version("1.0.4", tag="v1.0.4", commit="420136f6f1f26050d95138e27cf8bc905bc5e7f52") It may be useful to provide a saner version for commits like this, e.g., you might use the date as the version, as done in the first example above. Or, if you know the commit at which a release was cut, you can use the release version. @@ -1013,8 +1047,8 @@ Submodules return submodules - class MyPackage(Package): - version("1.1.0", commit="907d5f40d653a73955387067799913397807adf3", submodules=submodules) + class MyPackage(Package): + version("1.1.0", commit="907d5f40d653a73955387067799913397807adf3", submodules=submodules) For more information about git submodules see the man page of git: ``man git-submodule``. @@ -1052,24 +1086,26 @@ Sparse-Checkout results in the files from the top level directory of the repository and the contents of the ``doe`` and ``rae`` relative paths within the repository to be cloned. - Alternatively, you can provide the paths to the version directive argument using a callable function whose return value is a list for paths. + Alternatively, you can provide the paths to the version directive argument using a callable function whose return value is a list for paths. For example: .. code-block:: python - def sparse_path_function(package) - paths = ["doe", "rae", "me/file.cpp"] - if package.spec.version > Version("1.2.0"): - paths.extend(["fae"]) - return paths + def sparse_path_function(package): + paths = ["doe", "rae", "me/file.cpp"] + if package.spec.version > Version("1.2.0"): + paths.extend(["fae"]) + return paths - class MyPackage(package): - version("1.1.5", git_sparse_paths=sparse_path_function) - version("1.2.0", git_sparse_paths=sparse_path_function) - version("1.2.5", git_sparse_paths=sparse_path_function) - version("1.1.5", git_sparse_paths=sparse_path_function) - results in the cloning of the files from the top level directory of the repository, the contents of the ``doe`` and ``rae`` relative paths, *and* the ``me/file.cpp`` file. If the package version is greater than ``1.2.0`` then the contents of the ``fae`` relative path will also be cloned. + class MyPackage(Package): + version("1.1.5", git_sparse_paths=sparse_path_function) + version("1.2.0", git_sparse_paths=sparse_path_function) + version("1.2.5", git_sparse_paths=sparse_path_function) + version("1.1.5", git_sparse_paths=sparse_path_function) + + results in the cloning of the files from the top level directory of the repository, the contents of the ``doe`` and ``rae`` relative paths, *and* the ``me/file.cpp`` file. + If the package version is greater than ``1.2.0`` then the contents of the ``fae`` relative path will also be cloned. .. note:: @@ -1079,16 +1115,13 @@ Sparse-Checkout .. _github-fetch: -"""""" GitHub """""" -If a project is hosted on GitHub, *any* valid Git branch, tag, or hash -may be downloaded as a tarball. This is accomplished simply by -constructing an appropriate URL. Spack can checksum any package -downloaded this way, thereby producing a trusted download. For -example, the following downloads a particular hash, and then applies a -checksum. +If a project is hosted on GitHub, *any* valid Git branch, tag, or hash may be downloaded as a tarball. +This is accomplished simply by constructing an appropriate URL. +Spack can checksum any package downloaded this way, thereby producing a trusted download. +For example, the following downloads a particular hash, and then applies a checksum. .. code-block:: python @@ -1103,12 +1136,10 @@ Alternatively, you could provide the GitHub ``url`` for one version as a propert .. _hg-fetch: -""""""""" Mercurial """"""""" -Fetching with Mercurial works much like `Git `_, but you -use the ``hg`` parameter. +Fetching with Mercurial works much like :ref:`Git `, but you use the ``hg`` parameter. The destination directory is still the standard stage source path. .. _hg-default-branch: @@ -1151,7 +1182,6 @@ Revisions .. _svn-fetch: -"""""""""" Subversion """""""""" @@ -1177,16 +1207,13 @@ Fetching the head .. _svn-revisions: Fetching a revision - To fetch a particular revision, add a ``revision`` argument to the - version directive: + To fetch a particular revision, add a ``revision`` argument to the version directive: .. code-block:: python version("develop", revision=128) - Unfortunately, Subversion has no commit hashing scheme like Git and - Mercurial do, so there is no way to guarantee that the download you - get is the same as the download used when the package was created. + Unfortunately, Subversion has no commit hashing scheme like Git and Mercurial do, so there is no way to guarantee that the download you get is the same as the download used when the package was created. Use at your own risk. .. warning:: @@ -1194,20 +1221,17 @@ Fetching a revision This download method is **untrusted**, and is **not recommended**. -Subversion branches are handled as part of the directory structure, so -you can check out a branch or tag by changing the URL. If you want to -package multiple branches, simply add a ``svn`` argument to each -version directive. +Subversion branches are handled as part of the directory structure, so you can check out a branch or tag by changing the URL. +If you want to package multiple branches, simply add a ``svn`` argument to each version directive. .. _cvs-fetch: -""""""" CVS """"""" -CVS (Concurrent Versions System) is an old centralized version control -system. It is a predecessor of Subversion. +CVS (Concurrent Versions System) is an old centralized version control system. +It is a predecessor of Subversion. To fetch with CVS, use the ``cvs``, branch, and ``date`` parameters. The destination directory will be the standard stage source path. @@ -1225,13 +1249,10 @@ Fetching the head version("1.1.2.4") - CVS repository locations are described using an older syntax that - is different from today's ubiquitous URL syntax. ``:pserver:`` - denotes the transport method. CVS servers can host multiple - repositories (called "modules") at the same location, and one needs - to specify both the server location and the module name to access. - Spack combines both into one string using the ``%module=modulename`` - suffix shown above. + CVS repository locations are described using an older syntax that is different from today's ubiquitous URL syntax. + ``:pserver:`` denotes the transport method. + CVS servers can host multiple repositories (called "modules") at the same location, and one needs to specify both the server location and the module name to access. + Spack combines both into one string using the ``%module=modulename`` suffix shown above. .. warning:: @@ -1241,17 +1262,15 @@ Fetching the head .. _cvs-date: Fetching a date - Versions in CVS are commonly specified by date. To fetch a - particular branch or date, add a ``branch`` and/or ``date`` argument - to the version directive: + Versions in CVS are commonly specified by date. + To fetch a particular branch or date, add a ``branch`` and/or ``date`` argument to the version directive: .. code-block:: python version("2021.4.22", branch="branchname", date="2021-04-22") - Unfortunately, CVS does not identify repository-wide commits via a - revision or hash like Subversion, Git, or Mercurial do. This makes - it impossible to specify an exact commit to check out. + Unfortunately, CVS does not identify repository-wide commits via a revision or hash like Subversion, Git, or Mercurial do. + This makes it impossible to specify an exact commit to check out. .. warning:: @@ -1260,15 +1279,11 @@ Fetching a date CVS has more features, but since CVS is rarely used these days, Spack does not support all of them. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Sources that are not archives ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Spack normally expands archives (e.g., ``*.tar.gz`` and ``*.zip``) automatically -into a standard stage source directory (``self.stage.source_path``) after -downloading them. If you want to skip this step (e.g., for self-extracting -executables and other custom archive types), you can add ``expand=False`` to a -``version`` directive. +Spack normally expands archives (e.g., ``*.tar.gz`` and ``*.zip``) automatically into a standard stage source directory (``self.stage.source_path``) after downloading them. +If you want to skip this step (e.g., for self-extracting executables and other custom archive types), you can add ``expand=False`` to a ``version`` directive. .. code-block:: python @@ -1279,14 +1294,11 @@ executables and other custom archive types), you can add ``expand=False`` to a expand=False, ) -When ``expand`` is set to ``False``, Spack sets the current working -directory to the directory containing the downloaded archive before it -calls your ``install`` method. Within ``install``, the path to the -downloaded archive is available as ``self.stage.archive_file``. +When ``expand`` is set to ``False``, Spack sets the current working directory to the directory containing the downloaded archive before it calls your ``install`` method. +Within ``install``, the path to the downloaded archive is available as ``self.stage.archive_file``. -Here is an example snippet for packages distributed as self-extracting -archives. The example sets permissions on the downloaded file to make -it executable, then runs it with some arguments. +Here is an example snippet for packages distributed as self-extracting archives. +The example sets permissions on the downloaded file to make it executable, then runs it with some arguments. .. code-block:: python @@ -1296,26 +1308,24 @@ it executable, then runs it with some arguments. installer("--prefix=%s" % prefix, "arg1", "arg2", "etc.") -^^^^^^^^^^^^^^^ Extra Resources ^^^^^^^^^^^^^^^ Some packages (most notably compilers) provide optional features if additional resources are expanded within their source tree before building. In Spack it is possible to describe such a need with the ``resource`` directive: - .. code-block:: python +.. code-block:: python - resource( - name="cargo", - git="https://github.com/rust-lang/cargo.git", - tag="0.10.0", - destination="cargo", - ) + resource( + name="cargo", + git="https://github.com/rust-lang/cargo.git", + tag="0.10.0", + destination="cargo", + ) The arguments are similar to those of the ``versions`` directive. The keyword ``destination`` is relative to the source root of the package and should point to where the resource is to be expanded. -^^^^^^^^^^^^^^^^ Download caching ^^^^^^^^^^^^^^^^ @@ -1326,7 +1336,6 @@ Example situations would be a "snapshot"-like Version Control System (VCS) tag, .. _version_constraints: ------------------------------- Specifying version constraints ------------------------------ @@ -1343,7 +1352,8 @@ For example, the following: conflicts("^foo@1.2.3:", when="@:4.5") -illustrates, in order, three of four forms of version range constraints: implicit, lower bound and upper bound. The fourth form provides lower *and* upper bounds on the version. +illustrates, in order, three of four forms of version range constraints: implicit, lower bound and upper bound. +The fourth form provides lower *and* upper bounds on the version. In this example, the implicit range is used to indicate that the package :ref:`depends on ` *any* ``python`` *with* ``3`` *as the major version number* (e.g., ``3.13.5``). The other two range constraints are shown in the :ref:`conflict ` with the dependency package ``foo``. @@ -1365,7 +1375,6 @@ For example, if the package defines the version ``1.2.3``, we know from :ref:`ve .. _variants: --------- Variants -------- @@ -1373,17 +1382,16 @@ Many software packages can be configured to enable optional features, which ofte To be flexible enough and support a wide variety of use cases, Spack allows you to expose to the end-user the ability to choose which features should be activated in a package at the time it is installed. The mechanism to be employed is the :py:func:`~spack.package.variant` directive. -^^^^^^^^^^^^^^^^ Boolean variants ^^^^^^^^^^^^^^^^ In their simplest form, variants are boolean options specified at the package level: - .. code-block:: python +.. code-block:: python - class Hdf5(AutotoolsPackage): - ... - variant("shared", default=True, description="Builds a shared version of the library") + class Hdf5(AutotoolsPackage): + ... + variant("shared", default=True, description="Builds a shared version of the library") with a default value and a description of their meaning in the package. @@ -1397,16 +1405,15 @@ We will see this in action in the next part of the packaging guide, where we tal Other than influencing the build process, variants are often used to specify optional :ref:`dependencies of a package `. For example, a package may depend on another package only if a certain variant is enabled: - .. code-block:: python +.. code-block:: python - class Hdf5(AutotoolsPackage): - ... - variant("szip", default=False, description="Enable szip support") - depends_on("szip", when="+szip") + class Hdf5(AutotoolsPackage): + ... + variant("szip", default=False, description="Enable szip support") + depends_on("szip", when="+szip") In this case, ``szip`` is modeled as an optional dependency of ``hdf5``, and users can run ``spack install hdf5 +szip`` to enable it. -^^^^^^^^^^^^^^^^^^^^^^ Single-valued variants ^^^^^^^^^^^^^^^^^^^^^^ @@ -1414,40 +1421,39 @@ Other than boolean variants, Spack supports single- and multi-valued variants th To define a *single-valued* variant, simply pass a tuple of possible values to the ``variant`` directive, together with ``multi=False``: - .. code-block:: python +.. code-block:: python - class Blis(Package): - ... - variant( - "threads", - default="none", - values=("pthreads", "openmp", "none"), - multi=False, - description="Multithreading support", - ) + class Blis(Package): + ... + variant( + "threads", + default="none", + values=("pthreads", "openmp", "none"), + multi=False, + description="Multithreading support", + ) This allows users to ``spack install blis threads=openmp``. In the example above the argument ``multi=False`` indicates that only a **single value** can be selected at a time. This constraint is enforced by the solver, and an error is emitted if a user specifies two or more values at the same time: - .. code-block:: console +.. code-block:: spec - $ spack spec blis threads=openmp,pthreads - Input spec - -------------------------------- - blis threads=openmp,pthreads + $ spack spec blis threads=openmp,pthreads + Input spec + -------------------------------- + blis threads=openmp,pthreads - Concretized - -------------------------------- - ==> Error: multiple values are not allowed for variant "threads" + Concretized + -------------------------------- + ==> Error: multiple values are not allowed for variant "threads" .. hint:: In the example above, the value ``threads=none`` is a variant value like any other, and means that *no value is selected*. In Spack, all variants have to have a value, so ``none`` was chosen as a *convention* to indicate that no value is selected. -^^^^^^^^^^^^^^^^^^^^^ Multi-valued variants ^^^^^^^^^^^^^^^^^^^^^ @@ -1455,88 +1461,83 @@ Like single-valued variants, multi-valued variants take one or more *string* val To define a *multi-valued* variant, simply pass ``multi=True`` instead: - .. code-block:: python +.. code-block:: python - class Gcc(AutotoolsPackage): - ... - variant( - "languages", - default="c,c++,fortran", - values=("ada", "brig", "c", "c++", "fortran", "objc"), - multi=True, - description="Compilers and runtime libraries to build", - ) + class Gcc(AutotoolsPackage): + ... + variant( + "languages", + default="c,c++,fortran", + values=("ada", "brig", "c", "c++", "fortran", "objc"), + multi=True, + description="Compilers and runtime libraries to build", + ) -This allows users to run ``spack install languages=c,c++`` where the values are separated by commas. +This allows users to run ``spack install languages=c,c++``, where the values are separated by commas. -"""""""""""""""""""""""""""""""""""""""""""" Advanced validation of multi-valued variants """""""""""""""""""""""""""""""""""""""""""" -As noted above, the value ``none`` is a value like any other, which raises the question: -what if a variant allows multiple values to be selected, *or* none at all? +As noted above, the value ``none`` is a value like any other, which raises the question: what if a variant allows multiple values to be selected, *or* none at all? Naively, one might think that this can be achieved by simply creating a multi-valued variant that includes the value ``none``: - .. code-block:: python +.. code-block:: python - class Adios(AutotoolsPackage): - ... - variant( - "staging", - values=("dataspaces", "flexpath", "none"), - multi=True, - description="Enable dataspaces and/or flexpath staging transports", - ) + class Adios(AutotoolsPackage): + ... + variant( + "staging", + values=("dataspaces", "flexpath", "none"), + multi=True, + description="Enable dataspaces and/or flexpath staging transports", + ) -but this does not prevent users from selecting the non-sensical option ``staging=dataspaces,none``. +but this does not prevent users from selecting the nonsensical option ``staging=dataspaces,none``. In these cases, more advanced validation logic is required to prevent ``none`` from being selected along with any other value. Spack provides two validator functions to help with this, which can be passed to the ``values=`` argument of the ``variant`` directive. The first validator function is :py:func:`~spack.package.any_combination_of`, which can be used as follows: - .. code-block:: python +.. code-block:: python - class Adios(AutotoolsPackage): - ... - variant( - "staging", - values=any_combination_of("flexpath", "dataspaces"), - description="Enable dataspaces and/or flexpath staging transports", - ) + class Adios(AutotoolsPackage): + ... + variant( + "staging", + values=any_combination_of("flexpath", "dataspaces"), + description="Enable dataspaces and/or flexpath staging transports", + ) This solves the issue by allowing the user to select either any combination of the values ``flexpath`` and ``dataspaces``, or ``none``. In other words, users can specify ``staging=none`` to select nothing, or any of ``staging=dataspaces``, ``staging=flexpath``, and ``staging=dataspaces,flexpath``. The second validator function :py:func:`~spack.package.disjoint_sets` generalizes this idea further: - .. code-block:: python +.. code-block:: python - class Mvapich2(AutotoolsPackage): - ... - variant( - "process_managers", - description="List of the process managers to activate", - values=disjoint_sets(("auto",), ("slurm",), ("hydra", "gforker", "remshell")) - .prohibit_empty_set() - .with_error("'slurm' or 'auto' cannot be activated along with other process managers") - .with_default("auto") - .with_non_feature_values("auto"), - ) + class Mvapich2(AutotoolsPackage): + ... + variant( + "process_managers", + description="List of the process managers to activate", + values=disjoint_sets(("auto",), ("slurm",), ("hydra", "gforker", "remshell")) + .prohibit_empty_set() + .with_error("'slurm' or 'auto' cannot be activated along with other process managers") + .with_default("auto") + .with_non_feature_values("auto"), + ) In this case, examples of valid options are ``process_managers=auto``, ``process_managers=slurm``, and ``process_managers=hydra,remshell``, whereas ``process_managers=slurm,hydra`` is invalid, as it picks values from two different sets. Both validator functions return a :py:class:`~spack.variant.DisjointSetsOfValues` object, which defines chaining methods to further customize the behavior of the variant. -^^^^^^^^^^^^^^^^^^^^^^^^^^^ Conditional Possible Values ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -There are cases where a variant may take multiple values, and the list of allowed values -expands over time. Consider, for instance, the C++ standard with which we might compile -Boost, which can take one of multiple possible values with the latest standards -only available for more recent versions. +There are cases where a variant may take multiple values, and the list of allowed values expands over time. +Consider, for instance, the C++ standard with which we might compile Boost, which can take one of multiple possible values with the latest standards only available for more recent versions. To model a similar situation we can use *conditional possible values* in the variant declaration: @@ -1559,12 +1560,9 @@ To model a similar situation we can use *conditional possible values* in the var ) -The snippet above allows ``98``, ``11`` and ``14`` as unconditional possible values for the -``cxxstd`` variant, while ``17`` requires a version greater or equal to ``1.63.0`` -and both ``2a`` and ``2b`` require a version greater or equal to ``1.73.0``. +The snippet above allows ``98``, ``11`` and ``14`` as unconditional possible values for the ``cxxstd`` variant, while ``17`` requires a version greater than or equal to ``1.63.0`` and both ``2a`` and ``2b`` require a version greater than or equal to ``1.73.0``. -^^^^^^^^^^^^^^^^^^^^ Conditional Variants ^^^^^^^^^^^^^^^^^^^^ @@ -1589,7 +1587,6 @@ For example, a user might run ``spack install foo ~bar``, expecting it to allow However, the constraint ``~bar`` tells Spack that the ``bar`` variant *must exist* and be disabled. This forces Spack to select version 2.0 or higher, where the variant is defined. -^^^^^^^^^^^^^^^ Sticky Variants ^^^^^^^^^^^^^^^ @@ -1599,35 +1596,22 @@ The variant directive can be marked as ``sticky`` by setting the corresponding a variant("bar", default=False, sticky=True) -A ``sticky`` variant differs from a regular one in that it is always set -to either: +A ``sticky`` variant differs from a regular one in that it is always set to either: #. An explicit value appearing in a spec literal or #. Its default value -The concretizer thus is not free to pick an alternate value to work -around conflicts, but will error out instead. -Setting this property on a variant is useful in cases where the -variant allows some dangerous or controversial options (e.g., using unsupported versions -of a compiler for a library) and the packager wants to ensure that -allowing these options is done on purpose by the user, rather than -automatically by the solver. +The concretizer thus is not free to pick an alternate value to work around conflicts, but will error out instead. +Setting this property on a variant is useful in cases where the variant allows some dangerous or controversial options (e.g., using unsupported versions of a compiler for a library) and the packager wants to ensure that allowing these options is done on purpose by the user, rather than automatically by the solver. -^^^^^^^^^^^^^^^^^^^ Overriding Variants ^^^^^^^^^^^^^^^^^^^ -Packages may override variants for several reasons, most often to -change the default from a variant defined in a parent class or to -change the conditions under which a variant is present on the spec. +Packages may override variants for several reasons, most often to change the default from a variant defined in a parent class or to change the conditions under which a variant is present on the spec. -When a variant is defined multiple times, whether in the same package -file or in a subclass and a superclass, the last definition is used -for all attributes **except** for the ``when`` clauses. The ``when`` -clauses are accumulated through all invocations, and the variant is -present on the spec if any of the accumulated conditions are -satisfied. +When a variant is defined multiple times, whether in the same package file or in a subclass and a superclass, the last definition is used for all attributes **except** for the ``when`` clauses. +The ``when`` clauses are accumulated through all invocations, and the variant is present on the spec if any of the accumulated conditions are satisfied. For example, consider the following package: @@ -1639,29 +1623,22 @@ For example, consider the following package: variant("bar", default=True, when="platform=darwin", description="help2") ... -This package ``foo`` has a variant ``bar`` when the spec satisfies -either ``@1.0`` or ``platform=darwin``, but not for other platforms at -other versions. The default for this variant, when it is present, is -always ``True``, regardless of which condition of the variant is -satisfied. This allows packages to override variants in packages or -build system classes from which they inherit, by modifying the variant -values without modifying the ``when`` clause. It also allows a package -to implement ``or`` semantics for a variant ``when`` clause by -duplicating the variant definition. +This package ``foo`` has a variant ``bar`` when the spec satisfies either ``@1.0`` or ``platform=darwin``, but not for other platforms at other versions. +The default for this variant, when it is present, is always ``True``, regardless of which condition of the variant is satisfied. +This allows packages to override variants in packages or build system classes from which they inherit, by modifying the variant values without modifying the ``when`` clause. +It also allows a package to implement ``or`` semantics for a variant ``when`` clause by duplicating the variant definition. .. _dependencies: ------------- Dependencies ------------ -We've covered how to build a simple package, but what if one package -relies on another package to build? How do you express that in a -package file? And how do you refer to the other package in the build -script for your own package? +We've covered how to build a simple package, but what if one package relies on another package to build? +How do you express that in a package file? +And how do you refer to the other package in the build script for your own package? -Spack makes this relatively easy. Let's take a look at the -``libdwarf`` package to see how it's done: +Spack makes this relatively easy. +Let's take a look at the ``libdwarf`` package to see how it's done: .. code-block:: python :emphasize-lines: 9 @@ -1669,7 +1646,7 @@ Spack makes this relatively easy. Let's take a look at the class Libdwarf(Package): homepage = "http://www.prevanders.net/dwarf.html" - url = "http://www.prevanders.net/libdwarf-20130729.tar.gz" + url = "http://www.prevanders.net/libdwarf-20130729.tar.gz" list_url = homepage version("20130729", sha256="092fcfbbcfca3b5be7ae1b5e58538e92c35ab273ae13664fed0d67484c8e78a6") @@ -1677,62 +1654,48 @@ Spack makes this relatively easy. Let's take a look at the depends_on("libelf") - def install(self, spec, prefix): - ... + def install(self, spec, prefix): ... -^^^^^^^^^^^^^^^^ ``depends_on()`` ^^^^^^^^^^^^^^^^ -The highlighted ``depends_on("libelf")`` call tells Spack that it -needs to build and install the ``libelf`` package before it builds -``libdwarf``. This means that in your ``install()`` method, you are -guaranteed that ``libelf`` has been built and installed successfully, -so you can rely on it for your libdwarf build. +The highlighted ``depends_on("libelf")`` call tells Spack that it needs to build and install the ``libelf`` package before it builds ``libdwarf``. +This means that in your ``install()`` method, you are guaranteed that ``libelf`` has been built and installed successfully, so you can rely on it for your libdwarf build. .. _dependency_specs: -^^^^^^^^^^^^^^^^ Dependency specs ^^^^^^^^^^^^^^^^ -``depends_on`` doesn't just take the name of another package. It can -take a full spec as well. This means that you can restrict the versions or -other configuration options of ``libelf`` that ``libdwarf`` will build -with. For example, suppose that in the ``libdwarf`` package you write: +``depends_on`` doesn't just take the name of another package. +It can take a full spec as well. +This means that you can restrict the versions or other configuration options of ``libelf`` that ``libdwarf`` will build with. +For example, suppose that in the ``libdwarf`` package you write: .. code-block:: python depends_on("libelf@0.8") -Now ``libdwarf`` will require ``libelf`` in the range ``0.8``, which -includes patch versions ``0.8.1``, ``0.8.2``, etc. Apart from version -restrictions, you can also specify variants if this package requires -optional features of the dependency. +Now ``libdwarf`` will require ``libelf`` in the range ``0.8``, which includes patch versions ``0.8.1``, ``0.8.2``, etc. +Apart from version restrictions, you can also specify variants if this package requires optional features of the dependency. .. code-block:: python depends_on("libelf@0.8 +parser +pic") -Both users *and* package authors use the same spec syntax to refer -to different package configurations. Users use the spec syntax on the -command line to find installed packages or to install packages with -particular constraints, and package authors can use specs to describe -relationships between packages. +Both users *and* package authors use the same spec syntax to refer to different package configurations. +Users use the spec syntax on the command line to find installed packages or to install packages with particular constraints, and package authors can use specs to describe relationships between packages. .. _version_compatibility: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Specifying backward and forward compatibility ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Packages are often compatible with a range of versions of their -dependencies. This is typically referred to as backward and forward -compatibility. Spack allows you to specify this in the ``depends_on`` -directive using version ranges. +Packages are often compatible with a range of versions of their dependencies. +This is typically referred to as backward and forward compatibility. +Spack allows you to specify this in the ``depends_on`` directive using version ranges. -**Backward compatibility** means that the package requires at least a -certain version of its dependency: +**Backward compatibility** means that the package requires at least a certain version of its dependency: .. code-block:: python @@ -1740,9 +1703,8 @@ certain version of its dependency: In this case, the package requires Python 3.10 or newer. -Commonly, packages drop support for older versions of a dependency as -they release new versions. In Spack you can conveniently add every -backward compatibility rule as a separate line: +Commonly, packages drop support for older versions of a dependency as they release new versions. +In Spack you can conveniently add every backward compatibility rule as a separate line: .. code-block:: python @@ -1751,18 +1713,13 @@ backward compatibility rule as a separate line: depends_on("python@3.9:", when="@1.2:") depends_on("python@3.10:", when="@1.4:") -This means that in general we need Python 3.8 or newer; from version -1.2 onwards we need Python 3.9 or newer; from version 1.4 onwards we -need Python 3.10 or newer. Notice that it's fine to have overlapping -ranges in the ``when`` clauses. +This means that in general we need Python 3.8 or newer; from version 1.2 onwards we need Python 3.9 or newer; from version 1.4 onwards we need Python 3.10 or newer. +Notice that it's fine to have overlapping ranges in the ``when`` clauses. -**Forward compatibility** means that the package requires at most a -certain version of its dependency. Forward compatibility rules are -necessary when there are breaking changes in the dependency that the -package cannot handle. In Spack we often add forward compatibility -bounds only at the time a new, breaking version of a dependency is -released. As with backward compatibility, it is typical to see a list -of forward compatibility bounds in a package file as separate lines: +**Forward compatibility** means that the package requires at most a certain version of its dependency. +Forward compatibility rules are necessary when there are breaking changes in the dependency that the package cannot handle. +In Spack we often add forward compatibility bounds only at the time a new, breaking version of a dependency is released. +As with backward compatibility, it is typical to see a list of forward compatibility bounds in a package file as separate lines: .. code-block:: python @@ -1770,53 +1727,42 @@ of forward compatibility bounds in a package file as separate lines: depends_on("python@:3.12", when="@:1.10") depends_on("python@:3.13", when="@:1.12") -Notice how the ``:`` now appears before the version number both in the -dependency and in the ``when`` clause. This tells Spack that in general -we need Python 3.13 or older up to version ``1.12.x``, and up to version -``1.10.x`` we need Python 3.12 or older. Said differently, forward compatibility -with Python 3.13 was added in version 1.11, while version 1.13 added forward -compatibility with Python 3.14. +Notice how the ``:`` now appears before the version number both in the dependency and in the ``when`` clause. +This tells Spack that in general we need Python 3.13 or older up to version ``1.12.x``, and up to version ``1.10.x`` we need Python 3.12 or older. +Said differently, forward compatibility with Python 3.13 was added in version 1.11, while version 1.13 added forward compatibility with Python 3.14. -Notice that a version range ``@:3.12`` includes *any* patch version -number ``3.12.x``, which is often useful when specifying forward compatibility -bounds. +Notice that a version range ``@:3.12`` includes *any* patch version number ``3.12.x``, which is often useful when specifying forward compatibility bounds. -So far we have seen open-ended version ranges, which is by far the most -common use case. It is also possible to specify both a lower and an upper bound -on the version of a dependency, like this: +So far we have seen open-ended version ranges, which is by far the most common use case. +It is also possible to specify both a lower and an upper bound on the version of a dependency, like this: .. code-block:: python depends_on("python@3.10:3.12") -There is short syntax to specify that a package is compatible with say any -``3.x`` version: +There is short syntax to specify that a package is compatible with say any ``3.x`` version: .. code-block:: python depends_on("python@3") -The above is equivalent to ``depends_on("python@3:3")``, which means at least -Python version 3 and at most any version ``3.x.y``. +The above is equivalent to ``depends_on("python@3:3")``, which means at least Python version 3 and at most any ``3.x.y`` version. -In very rare cases, you may need to specify an exact version, for example -if you need to distinguish between ``3.2`` and ``3.2.1``: +In very rare cases, you may need to specify an exact version, for example if you need to distinguish between ``3.2`` and ``3.2.1``: .. code-block:: python depends_on("pkg@=3.2") -But in general, you should try to use version ranges as much as possible, -so that custom suffixes are included too. The above example can be -rewritten in terms of ranges as follows: +But in general, you should try to use version ranges as much as possible, so that custom suffixes are included too. +The above example can be rewritten in terms of ranges as follows: .. code-block:: python depends_on("pkg@3.2:3.2.0") -A spec can contain a version list of ranges and individual versions -separated by commas. For example, if you need Boost 1.59.0 or newer, -but there are known issues with 1.64.0, 1.65.0, and 1.66.0, you can say: +A spec can contain a version list of ranges and individual versions separated by commas. +For example, if you need Boost 1.59.0 or newer, but there are known issues with 1.64.0, 1.65.0, and 1.66.0, you can say: .. code-block:: python @@ -1825,12 +1771,11 @@ but there are known issues with 1.64.0, 1.65.0, and 1.66.0, you can say: .. _dependency-types: -^^^^^^^^^^^^^^^^ Dependency types ^^^^^^^^^^^^^^^^ -Not all dependencies are created equal, and Spack allows you to specify -exactly what kind of a dependency you need. For example: +Not all dependencies are created equal, and Spack allows you to specify exactly what kind of a dependency you need. +For example: .. code-block:: python @@ -1841,52 +1786,34 @@ exactly what kind of a dependency you need. For example: The following dependency types are available: -* **"build"**: the dependency will be added to the ``PATH`` and - ``PYTHONPATH`` at build-time. -* **"link"**: the dependency will be added to Spack's compiler - wrappers, automatically injecting the appropriate linker flags, - including ``-I``, ``-L``, and RPATH/RUNPATH handling. -* **"run"**: the dependency will be added to the ``PATH`` and - ``PYTHONPATH`` at run-time. This is true for both ``spack load`` - and the module files Spack writes. -* **"test"**: the dependency will be added to the ``PATH`` and - ``PYTHONPATH`` at build-time. The only difference between - "build" and "test" is that test dependencies are only built - if the user requests unit tests with ``spack install --test``. - -One of the advantages of the ``build`` dependency type is that although the -dependency needs to be installed in order for the package to be built, it -can be uninstalled without concern afterwards. ``link`` and ``run`` disallow -this because uninstalling the dependency would break the package. - -``build``, ``link``, and ``run`` dependencies all affect the hash of Spack -packages (along with ``sha256`` sums of patches and archives used to build the -package, and a `canonical hash `_ of -the ``package.py`` recipes). ``test`` dependencies do not affect the package -hash, as they are only used to construct a test environment *after* building and -installing a given package installation. Older versions of Spack did not include -build dependencies in the hash, but this has been -`fixed `_ as of |Spack v0.18|_. +* **build**: the dependency will be added to the ``PATH`` and ``PYTHONPATH`` at build-time. +* **link**: the dependency will be added to Spack's compiler wrappers, automatically injecting the appropriate linker flags, including ``-I``, ``-L``, and RPATH/RUNPATH handling. +* **run**: the dependency will be added to the ``PATH`` and ``PYTHONPATH`` at run-time. + This is true for both ``spack load`` and the module files Spack writes. +* **test**: the dependency will be added to the ``PATH`` and ``PYTHONPATH`` at build-time. + The only difference between "build" and "test" is that test dependencies are only built if the user requests unit tests with ``spack install --test``. + +One of the advantages of the ``build`` dependency type is that although the dependency needs to be installed in order for the package to be built, it can be uninstalled without concern afterwards. +``link`` and ``run`` disallow this because uninstalling the dependency would break the package. + +``build``, ``link``, and ``run`` dependencies all affect the hash of Spack packages (along with ``sha256`` sums of patches and archives used to build the package, and a `canonical hash `_ of the ``package.py`` recipes). +``test`` dependencies do not affect the package hash, as they are only used to construct a test environment *after* building and installing a given package installation. +Older versions of Spack did not include build dependencies in the hash, but this has been `fixed `_ as of |Spack v0.18|_. .. |Spack v0.18| replace:: Spack ``v0.18`` .. _Spack v0.18: https://github.com/spack/spack/releases/tag/v0.18.0 -If the dependency type is not specified, Spack uses a default of -``("build", "link")``. This is the common case for compiler languages. -Non-compiled packages like Python modules commonly use -``("build", "run")``. This means that the compiler wrappers don't need to -inject the dependency's ``prefix/lib`` directory, but the package needs to -be in ``PATH`` and ``PYTHONPATH`` during the build process and later when -a user wants to run the package. +If the dependency type is not specified, Spack uses a default of ``("build", "link")``. +This is the common case for compiler languages. +Non-compiled packages like Python modules commonly use ``("build", "run")``. +This means that the compiler wrappers don't need to inject the dependency's ``prefix/lib`` directory, but the package needs to be in ``PATH`` and ``PYTHONPATH`` during the build process and later when a user wants to run the package. -^^^^^^^^^^^^^^^^^^^^^^^^ Conditional dependencies ^^^^^^^^^^^^^^^^^^^^^^^^ -You may have a package that only requires a dependency under certain -conditions. For example, you may have a package with optional MPI support. -You would then provide a variant to reflect that the feature is optional -and specify the MPI dependency only applies when MPI support is enabled. +You may have a package that only requires a dependency under certain conditions. +For example, you may have a package with optional MPI support. +You would then provide a variant to reflect that the feature is optional and specify the MPI dependency only applies when MPI support is enabled. In that case, you could say something like: .. code-block:: python @@ -1896,11 +1823,10 @@ In that case, you could say something like: depends_on("mpi", when="+mpi") -Suppose that, starting from version 3, the above package also has optional `Trilinos` -support. Furthermore, you want to ensure that when `Trilinos` support is enabled, -the package can be built both with and without MPI. Further -suppose you require a version of `Trilinos` no older than 12.6. In that case, -the `trilinos` variant and dependency directives would be: +Suppose that, starting from version 3, the above package also has optional `Trilinos` support. +Furthermore, you want to ensure that when `Trilinos` support is enabled, the package can be built both with and without MPI. +Further suppose you require a version of `Trilinos` no older than 12.6. +In that case, the `trilinos` variant and dependency directives would be: .. code-block:: python @@ -1910,8 +1836,7 @@ the `trilinos` variant and dependency directives would be: depends_on("trilinos@12.6: +mpi", when="@3: +trilinos +mpi") -Alternatively, you could use the `when` context manager to equivalently specify -the `trilinos` variant dependencies as follows: +Alternatively, you could use the `when` context manager to equivalently specify the `trilinos` variant dependencies as follows: .. code-block:: python @@ -1920,31 +1845,24 @@ the `trilinos` variant dependencies as follows: depends_on("trilinos +mpi", when="+mpi") -The argument to ``when`` in either case can include any Spec constraints that -are supported on the command line using the same :ref:`syntax `. +The argument to ``when`` in either case can include any Spec constraints that are supported on the command line using the same :ref:`syntax `. .. note:: - If a dependency isn't typically used, you can save time by making it - conditional since Spack will not build the dependency unless it is - required for the Spec. + If a dependency isn't typically used, you can save time by making it conditional since Spack will not build the dependency unless it is required for the Spec. .. _dependency_dependency_patching: -^^^^^^^^^^^^^^^^^^^ Dependency patching ^^^^^^^^^^^^^^^^^^^ -Some packages maintain special patches on their dependencies, either to -add new features or to fix bugs. This typically makes a package harder -to maintain, and we encourage developers to upstream (contribute back) -their changes rather than maintaining patches. However, in some cases -it's not possible to upstream. Maybe the dependency's developers don't -accept changes, or maybe they just haven't had time to integrate them. +Some packages maintain special patches on their dependencies, either to add new features or to fix bugs. +This typically makes a package harder to maintain, and we encourage developers to upstream (contribute back) their changes rather than maintaining patches. +However, in some cases it's not possible to upstream. +Maybe the dependency's developers don't accept changes, or maybe they just haven't had time to integrate them. -For times like these, Spack's ``depends_on`` directive can optionally -take a patch or list of patches: +For times like these, Spack's ``depends_on`` directive can optionally take a patch or list of patches: .. code-block:: python @@ -1953,15 +1871,11 @@ take a patch or list of patches: depends_on("binutils", patches="special-binutils-feature.patch") ... -Here, the ``special-tool`` package requires a special feature in -``binutils``, so it provides an extra ``patches=`` keyword -argument. This is similar to the `patch directive `_, with -one small difference. Here, ``special-tool`` is responsible for the -patch, so it should live in ``special-tool``'s directory in the package -repository, not the ``binutils`` directory. +Here, the ``special-tool`` package requires a special feature in ``binutils``, so it provides an extra ``patches=`` keyword argument. +This is similar to the `patch directive `_, with one small difference. +Here, ``special-tool`` is responsible for the patch, so it should live in ``special-tool``'s directory in the package repository, not the ``binutils`` directory. -If you need something more sophisticated than this, you can simply nest a -``patch()`` directive inside of ``depends_on``: +If you need something more sophisticated, you can nest a ``patch()`` directive inside ``depends_on``: .. code-block:: python @@ -1969,21 +1883,18 @@ If you need something more sophisticated than this, you can simply nest a ... depends_on( "binutils", - patches=patch("special-binutils-feature.patch", - level=3, - when="@:1.3"), # condition on binutils - when="@2.0:") # condition on special-tool + patches=patch( + "special-binutils-feature.patch", level=3, when="@:1.3" # condition on binutils + ), + when="@2.0:", # condition on special-tool + ) ... -Note that there are two optional ``when`` conditions here -- one on the -``patch`` directive and the other on ``depends_on``. The condition in -the ``patch`` directive applies to ``binutils`` (the package being -patched), while the condition in ``depends_on`` applies to -``special-tool``. See `patch directive `_ for details on all -the arguments the ``patch`` directive can take. +Note that there are two optional ``when`` conditions here -- one on the ``patch`` directive and the other on ``depends_on``. +The condition in the ``patch`` directive applies to ``binutils`` (the package being patched), while the condition in ``depends_on`` applies to ``special-tool``. +See `patch directive `_ for details on all the arguments the ``patch`` directive can take. -Finally, if you need *multiple* patches on a dependency, you can provide -a list for ``patches``, e.g.: +Finally, if you need *multiple* patches on a dependency, you can provide a list for ``patches``, e.g.: .. code-block:: python @@ -1994,31 +1905,29 @@ a list for ``patches``, e.g.: patches=[ "binutils-bugfix1.patch", "binutils-bugfix2.patch", - patch("https://example.com/special-binutils-feature.patch", - sha256="252c0af58be3d90e5dc5e0d16658434c9efa5d20a5df6c10bf72c2d77f780866", - when="@:1.3")], - when="@2.0:") + patch( + "https://example.com/special-binutils-feature.patch", + sha256="252c0af58be3d90e5dc5e0d16658434c9efa5d20a5df6c10bf72c2d77f780866", + when="@:1.3", + ), + ], + when="@2.0:", + ) ... -As with ``patch`` directives, patches are applied in the order they -appear in the package file (or in this case, in the list). +As with ``patch`` directives, patches are applied in the order they appear in the package file (or in this case, in the list). .. note:: - You may wonder whether dependency patching will interfere with other - packages that depend on ``binutils``. It won't. + You may wonder whether dependency patching will interfere with other packages that depend on ``binutils``. + It won't. - As described in :ref:`patching`, Patching a package adds the ``sha256`` of - the patch to the package's spec, which means it will have a - *different* unique hash than other versions without the patch. The - patched version coexists with unpatched versions, and Spack's support - for :ref:`handling_rpaths` guarantees that each installation finds the - right version. If two packages depend on ``binutils`` patched *the - same* way, they can both use a single installation of ``binutils``. + As described in :ref:`patching`, Patching a package adds the ``sha256`` of the patch to the package's spec, which means it will have a *different* unique hash than other versions without the patch. + The patched version coexists with unpatched versions, and Spack's support for :ref:`handling_rpaths` guarantees that each installation finds the right version. + If two packages depend on ``binutils`` patched *the same* way, they can both use a single installation of ``binutils``. .. _virtual-dependencies: --------------------- Virtual dependencies -------------------- @@ -2030,7 +1939,6 @@ MPI has several different implementations (e.g., `MPICH `_ Many package managers handle interfaces like this by requiring many variations of the package recipe for each implementation of MPI, e.g., ``foo``, ``foo-mvapich``, ``foo-mpich``. In Spack every package is defined in a single ``package.py`` file, and avoids the combinatorial explosion through *virtual dependencies*. -^^^^^^^^^^^^ ``provides`` ^^^^^^^^^^^^ @@ -2064,7 +1972,6 @@ If we look inside the package file of an MPI implementation, say MPICH, we'll se The ``provides("mpi")`` call tells Spack that the ``mpich`` package can be used to satisfy the dependency of any package that ``depends_on("mpi")``. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Providing multiple virtuals simultaneously ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -2079,19 +1986,18 @@ To express this constraint in a package, the two virtual dependencies must be li .. code-block:: python - provides('blas', 'lapack') + provides("blas", "lapack") This makes it impossible to select ``openblas`` as a provider for one of the two virtual dependencies and not for the other. If you try to, Spack will report an error: -.. code-block:: console +.. code-block:: spec $ spack spec netlib-scalapack ^[virtuals=lapack] openblas ^[virtuals=blas] atlas ==> Error: concretization failed for the following reasons: 1. Package 'openblas' needs to provide both 'lapack' and 'blas' together, but provides only 'lapack' -^^^^^^^^^^^^^^^^^^^^ Versioned Interfaces ^^^^^^^^^^^^^^^^^^^^ @@ -2099,7 +2005,7 @@ Just as you can pass a spec to ``depends_on``, so can you pass a spec to ``provi This allows Spack to support the notion of *versioned interfaces*. The MPI standard has gone through many revisions, each with new functions added, and each revision of the standard has a version number. Some packages may require a recent implementation that supports MPI-3 functions, but some MPI versions may only provide up to MPI-2. -Others may need MPI 2.1 or higher. +Others may need MPI 2.1 or higher. You can indicate this by adding a version constraint to the spec passed to ``provides``: .. code-block:: python @@ -2111,7 +2017,6 @@ This says that ``mpich2`` provides MPI support *up to* version 2, but if a packa Currently, names and versions are the only spec components supported for virtual packages. -^^^^^^^^^^^^^^^^^ ``provides when`` ^^^^^^^^^^^^^^^^^ @@ -2141,7 +2046,7 @@ For example, suppose the package ``foo`` declares this: Suppose a user invokes ``spack install`` like this: -.. code-block:: console +.. code-block:: spec $ spack install foo ^mpich@1.0 @@ -2149,7 +2054,6 @@ Spack will fail with a constraint violation, because the version of MPICH reques .. _language-dependencies: ----------------------------------- Language and compiler dependencies ---------------------------------- @@ -2172,7 +2076,6 @@ This means that language dependencies translate to one or more compiler packages .. _packaging_conflicts: ---------- Conflicts --------- @@ -2189,8 +2092,8 @@ Adding the following to a package: conflicts( "%intel-oneapi-compilers@:2024", - when="@:1.2", - msg="known bug when using Intel oneAPI compilers through v2024", + when="@:1.2", + msg="known bug when using Intel oneAPI compilers through v2024", ) expresses that the current package *cannot be built* with Intel oneAPI compilers *up through any version* ``2024`` *when trying to install the package with a version up to* ``1.2``. @@ -2223,7 +2126,6 @@ means the package cannot be built on a Mac running Ventura, Monterey, or Big Sur .. _packaging_requires: --------- Requires -------- @@ -2260,7 +2162,8 @@ Or the package must be built with a GCC or Clang that supports C++ 20, which you .. code-block:: python requires( - "%gcc@10:", "%clang@16:", + "%gcc@10:", + "%clang@16:", policy="one_of", msg="builds only with a GCC or Clang that support C++ 20", ) @@ -2274,22 +2177,28 @@ Or the package must be built with a GCC or Clang that supports C++ 20, which you .. _patching: -------- Patches ------- -Depending on the host architecture, package version, known bugs, or -other issues, you may need to patch your software to get it to build -correctly. Like many other package systems, Spack allows you to store -patches alongside your package files and apply them to source code -after it's downloaded. +Depending on the host architecture, package version, known bugs, or other issues, you may need to patch your software to get it to build correctly. +Like many other package systems, Spack allows you to store patches alongside your package files and apply them to source code after it's downloaded. -^^^^^^^^^ ``patch`` ^^^^^^^^^ -You can specify patches in your package file with the ``patch()`` -directive. ``patch`` looks like this: +You can specify patches in your package file with the ``patch()`` directive. +The first argument can be either the filename or URL of the patch file to be applied to your source. + +.. note:: + + Use of a URL is preferred over maintaining patch files in the package repository. + This helps reduce the size of the package repository, which can become an issue for those with limited space (or allocations). + +Filename patch +"""""""""""""" + +You can supply the name of the patch file. +For example, a simple conditional ``patch`` based on a file for the ``mvapich2`` package looks like: .. code-block:: python @@ -2297,11 +2206,10 @@ directive. ``patch`` looks like this: ... patch("ad_lustre_rwcontig_open_source.patch", when="@1.9:") -The first argument can be either a URL or a filename. It specifies a -patch file that should be applied to your source. If the patch you -supply is a filename, then the patch needs to live within the Spack -source tree. For example, the patch above lives in a directory -structure like this: +This patch will only be applied when attempting to install the package at version ``1.9`` or newer. + +When a filename is provided, the patch needs to live within the Spack source tree. +The above patch file lives with the package file within the package repository directory structure in the following location: .. code-block:: none @@ -2310,67 +2218,93 @@ structure like this: package.py ad_lustre_rwcontig_open_source.patch -If you supply a URL instead of a filename, you need to supply a -``sha256`` checksum, like this: +URL patch file +"""""""""""""" + +If you supply a URL instead of a filename you have two options: patch file URL or commit patch file URL. +In either case, you must supply a checksum. +Spack requires the ``sha256`` hash so that different patches applied to the same package will have unique identifiers. +Patches will be fetched from their URLs, checked, and applied to your source code. + +.. note:: + + To ensure consistency, a ``sha256`` checksum must be provided for the patch. + + You can use the GNU utils ``sha256sum`` or the macOS ``shasum -a 256`` commands to generate a checksum for a patch file. + +Here is an example of specifying the unconditional use of a patch file URL: .. code-block:: python - patch("http://www.nwchem-sw.org/images/Tddft_mxvec20.patch", - sha256="252c0af58be3d90e5dc5e0d16658434c9efa5d20a5df6c10bf72c2d77f780866") + patch( + "http://www.nwchem-sw.org/images/Tddft_mxvec20.patch", + sha256="252c0af58be3d90e5dc5e0d16658434c9efa5d20a5df6c10bf72c2d77f780866", + ) -Spack includes the hashes of patches in its versioning information, so -that the same package with different patches applied will have different -hash identifiers. To ensure that the hashing scheme is consistent, you -must use a ``sha256`` checksum for the patch. Patches will be fetched -from their URLs, checked, and applied to your source code. You can use -the GNU utils ``sha256sum`` or the macOS ``shasum -a 256`` commands to -generate a checksum for a patch file. +Sometimes you can specify the patch file associated with a repository commit. +For example, GitHub allows you to reference the commit in the name of the patch file through a URL in the form ``https://github.com///commit/.patch``. -Spack can also handle compressed patches. If you use these, Spack needs -a little more help. Specifically, it needs *two* checksums: the -``sha256`` of the patch and ``archive_sha256`` for the compressed -archive. ``archive_sha256`` helps Spack ensure that the downloaded -file is not corrupted or malicious, before running it through a tool like -``tar`` or ``zip``. The ``sha256`` of the patch is still required so -that it can be included in specs. Providing it in the package file -ensures that Spack won't have to download and decompress patches it won't -end up using at install time. Both the archive and patch checksum are -checked when patch archives are downloaded. +Below is an example of specifying a conditional commit patch: .. code-block:: python - patch("http://www.nwchem-sw.org/images/Tddft_mxvec20.patch.gz", - sha256="252c0af58be3d90e5dc5e0d16658434c9efa5d20a5df6c10bf72c2d77f780866", - archive_sha256="4e8092a161ec6c3a1b5253176fcf33ce7ba23ee2ff27c75dbced589dabacd06e") + patch( + "https://github.com/ornladios/ADIOS/commit/17aee8aeed64612cd8cfa0b949147091a5525bbe.patch?full_index=1", + sha256="aea47e56013b57c2d5d36e23e0ae6010541c3333a84003784437768c2e350b05", + when="@1.12.0: +mpi", + ) + +In this case the patch is only processed when attempting to install version ``1.12.0`` or higher of the package when the package's ``mpi`` variant is enabled. + +.. note: + + Be sure to append ``?full_index=1`` to the GitHub URL to ensure the patch file consistently contains the complete, stable hash information for reproducible patching. + + Use the resulting URL to get the patch file contents that you then run through the appropriate utility to get the corresponding ``sha256`` value. + +Compressed patches +"""""""""""""""""" + +Spack can also handle compressed patches. +If you use these, Spack needs a little more help. +Specifically, it needs *two* checksums: the ``sha256`` of the patch and ``archive_sha256`` for the compressed archive. +``archive_sha256`` helps Spack ensure that the downloaded file is not corrupted or malicious, before running it through a tool like ``tar`` or ``zip``. +The ``sha256`` of the patch is still required so that it can be included in specs. +Providing it in the package file ensures that Spack won't have to download and decompress patches it won't end up using at install time. +Both the archive and patch checksum are checked when patch archives are downloaded. + +.. code-block:: python + + patch( + "http://www.nwchem-sw.org/images/Tddft_mxvec20.patch.gz", + sha256="252c0af58be3d90e5dc5e0d16658434c9efa5d20a5df6c10bf72c2d77f780866", + archive_sha256="4e8092a161ec6c3a1b5253176fcf33ce7ba23ee2ff27c75dbced589dabacd06e", + ) ``patch`` keyword arguments are described below. -"""""""""""""""""""""""""""""" ``sha256``, ``archive_sha256`` """""""""""""""""""""""""""""" -Hashes of downloaded patch and compressed archive, respectively. Only -needed for patches fetched from URLs. +Hashes of downloaded patch and compressed archive, respectively. +Only needed for patches fetched from URLs. -"""""""" ``when`` """""""" -If supplied, this is a spec that tells Spack when to apply -the patch. If the installed package spec matches this spec, the -patch will be applied. In our example above, the patch is applied -when mvapich is at version ``1.9`` or higher. +If supplied, this is a spec that tells Spack when to apply the patch. +If the installed package spec matches this spec, the patch will be applied. +In our example above, the patch is applied when mvapich is at version ``1.9`` or higher. -""""""""" ``level`` """"""""" -This tells Spack how to run the ``patch`` command. By default, -the level is 1 and Spack runs ``patch -p 1``. If level is 2, -Spack will run ``patch -p 2``, and so on. +This tells Spack how to run the ``patch`` command. +By default, the level is 1 and Spack runs ``patch -p 1``. +If level is 2, Spack will run ``patch -p 2``, and so on. -A lot of people are confused by the level, so here's a primer. If you -look in your patch file, you may see something like this: +A lot of people are confused by the level, so here's a primer. +If you look in your patch file, you may see something like this: .. code-block:: diff :linenos: @@ -2387,32 +2321,23 @@ look in your patch file, you may see something like this: #include #include "ad_lustre.h" -Lines 1-2 show paths with synthetic ``a/`` and ``b/`` prefixes. These -are placeholders for the two ``mvapich2`` source directories that -``diff`` compared when it created the patch file. This is git's -default behavior when creating patch files, but other programs may -behave differently. +Lines 1-2 show paths with synthetic ``a/`` and ``b/`` prefixes. +These are placeholders for the two ``mvapich2`` source directories that ``diff`` compared when it created the patch file. +This is git's default behavior when creating patch files, but other programs may behave differently. -``-p1`` strips off the first level of the prefix in both paths, -allowing the patch to be applied from the root of an expanded mvapich2 -archive. If you set level to ``2``, it would strip off ``src``, and -so on. +``-p1`` strips off the first level of the prefix in both paths, allowing the patch to be applied from the root of an expanded mvapich2 archive. +If you set level to ``2``, it would strip off ``src``, and so on. -It's generally easier to just structure your patch file so that it -applies cleanly with ``-p1``, but if you're using a patch you didn't -create yourself, ``level`` can be handy. +It's generally easier to just structure your patch file so that it applies cleanly with ``-p1``, but if you're using a patch you didn't create yourself, ``level`` can be handy. -""""""""""""""" ``working_dir`` """"""""""""""" -This tells Spack where to run the ``patch`` command. By default, -the working directory is the source path of the stage (``.``). -However, sometimes patches are made with respect to a subdirectory -and this is where the working directory comes in handy. Internally, -the working directory is given to ``patch`` via the ``-d`` option. -Let's take the example patch from above and assume for some reason, -it can only be downloaded in the following form: +This tells Spack where to run the ``patch`` command. +By default, the working directory is the source path of the stage (``.``). +However, sometimes patches are made with respect to a subdirectory and this is where the working directory comes in handy. +Internally, the working directory is given to ``patch`` via the ``-d`` option. +Let's take the example patch from above and assume for some reason, it can only be downloaded in the following form: .. code-block:: diff :linenos: @@ -2429,84 +2354,72 @@ it can only be downloaded in the following form: #include #include "ad_lustre.h" -Hence, the patch needs to be applied in the ``src/mpi`` subdirectory, and the -``working_dir="src/mpi"`` option would exactly do that. +Hence, the patch needs to be applied in the ``src/mpi`` subdirectory, and the ``working_dir="src/mpi"`` option would exactly do that. -^^^^^^^^^^^^^^^^^^^^^ Patch functions ^^^^^^^^^^^^^^^^^^^^^ -In addition to supplying patch files, you can write a custom function -to patch a package's source. For example, the ``py-pyside`` package -contains some custom code for tweaking the way the PySide build -handles ``RPATH``: +In addition to supplying patch files, you can write a custom function to patch a package's source. +For example, the ``py-pyside2`` package contains some custom code for tweaking the way the PySide build handles include files: .. _pyside-patch: -.. literalinclude:: .spack/spack-packages/repos/spack_repo/builtin/packages/py_pyside/package.py - :pyobject: PyPyside.patch +.. literalinclude:: .spack/spack-packages/repos/spack_repo/builtin/packages/py_pyside2/package.py + :pyobject: PyPyside2.patch :linenos: -A ``patch`` function, if present, will be run after patch files are -applied and before ``install()`` is run. +A ``patch`` function, if present, will be run after patch files are applied and before ``install()`` is run. -You could put this logic in ``install()``, but putting it in a patch -function gives you some benefits. First, Spack ensures that the -``patch()`` function is run once per code checkout. That means that -if you run install, hit ctrl-C, and run install again, the code in the -patch function is only run once. +You could put this logic in ``install()``, but putting it in a patch function gives you some benefits. +First, Spack ensures that the ``patch()`` function is run once per code checkout. +That means that if you run install, hit ctrl-C, and run install again, the code in the patch function is only run once. .. _patch_dependency_patching: -^^^^^^^^^^^^^^^^^^^ Dependency patching ^^^^^^^^^^^^^^^^^^^ -So far we've covered how the ``patch`` directive can be used by a package -to patch *its own* source code. Packages can *also* specify patches to be -applied to their dependencies, if they require special modifications. As -with all packages in Spack, a patched dependency library can coexist with -other versions of that library. See the `section on depends_on -`_ for more details. +So far we've covered how the ``patch`` directive can be used by a package to patch *its own* source code. +Packages can *also* specify patches to be applied to their dependencies, if they require special modifications. +As with all packages in Spack, a patched dependency library can coexist with other versions of that library. +See the `section on depends_on `_ for more details. .. _patch_inspecting_patches: -^^^^^^^^^^^^^^^^^^^ Inspecting patches ^^^^^^^^^^^^^^^^^^^ -If you want to better understand the patches that Spack applies to your -packages, you can do that using ``spack spec``, ``spack find``, and other -query commands. Let's look at ``m4``. If you run ``spack spec m4``, you -can see the patches that would be applied to ``m4``:: +If you want to better understand the patches that Spack applies to your packages, you can do that using ``spack spec``, ``spack find``, and other query commands. +Let's look at ``m4``. +If you run ``spack spec m4``, you can see the patches that would be applied to ``m4``: - $ spack spec m4 - Input spec - -------------------------------- - m4 +.. code-block:: spec - Concretized - -------------------------------- - m4@1.4.18%apple-clang@9.0.0 patches=3877ab548f88597ab2327a2230ee048d2d07ace1062efe81fc92e91b7f39cd00,c0a408fbffb7255fcc75e26bd8edab116fc81d216bfd18b473668b7739a4158e,fc9b61654a3ba1a8d6cd78ce087e7c96366c290bc8d2c299f09828d793b853c8 +sigsegv arch=darwin-highsierra-x86_64 - ^libsigsegv@2.11%apple-clang@9.0.0 arch=darwin-highsierra-x86_64 + $ spack spec m4 + Input spec + -------------------------------- + m4 + + Concretized + -------------------------------- + m4@1.4.18%apple-clang@9.0.0 patches=3877ab548f88597ab2327a2230ee048d2d07ace1062efe81fc92e91b7f39cd00,c0a408fbffb7255fcc75e26bd8edab116fc81d216bfd18b473668b7739a4158e,fc9b61654a3ba1a8d6cd78ce087e7c96366c290bc8d2c299f09828d793b853c8 +sigsegv arch=darwin-highsierra-x86_64 + ^libsigsegv@2.11%apple-clang@9.0.0 arch=darwin-highsierra-x86_64 -You can also see patches that have been applied to installed packages -with ``spack find -v``:: +You can also see patches that have been applied to installed packages with ``spack find -v``: - $ spack find -v m4 - ==> 1 installed package - -- darwin-highsierra-x86_64 / apple-clang@9.0.0 ----------------- - m4@1.4.18 patches=3877ab548f88597ab2327a2230ee048d2d07ace1062efe81fc92e91b7f39cd00,c0a408fbffb7255fcc75e26bd8edab116fc81d216bfd18b473668b7739a4158e,fc9b61654a3ba1a8d6cd78ce087e7c96366c290bc8d2c299f09828d793b853c8 +sigsegv +.. code-block:: spec + + $ spack find -v m4 + ==> 1 installed package + -- darwin-highsierra-x86_64 / apple-clang@9.0.0 ----------------- + m4@1.4.18 patches=3877ab548f88597ab2327a2230ee048d2d07ace1062efe81fc92e91b7f39cd00,c0a408fbffb7255fcc75e26bd8edab116fc81d216bfd18b473668b7739a4158e,fc9b61654a3ba1a8d6cd78ce087e7c96366c290bc8d2c299f09828d793b853c8 +sigsegv .. _cmd-spack-resource: -In both cases above, you can see that the patches' sha256 hashes are -stored on the spec as a variant. As mentioned above, this means that you -can have multiple, differently-patched versions of a package installed at -once. +In both cases above, you can see that the patches' sha256 hashes are stored on the spec as a variant. +As mentioned above, this means that you can have multiple, differently-patched versions of a package installed at once. -You can look up a patch by its sha256 hash (or a short version of it) -using the ``spack resource show`` command +You can look up a patch by its sha256 hash (or a short version of it) using the ``spack resource show`` command .. code-block:: console @@ -2515,15 +2428,13 @@ using the ``spack resource show`` command path: .../spack_repo/builtin/packages/m4/gnulib-pgi.patch applies to: builtin.m4 -``spack resource show`` looks up downloadable resources from package -files by hash and prints out information about them. Above, we see that -the ``3877ab54`` patch applies to the ``m4`` package. The output also -tells us where to find the patch. +``spack resource show`` looks up downloadable resources from package files by hash and prints out information about them. +Above, we see that the ``3877ab54`` patch applies to the ``m4`` package. +The output also tells us where to find the patch. -Things get more interesting if you want to know about dependency -patches. For example, when ``dealii`` is built with ``boost@1.68.0``, it -has to patch boost to work correctly. If you didn't know this, you might -wonder where the extra boost patches are coming from: +Things get more interesting if you want to know about dependency patches. +For example, when ``dealii`` is built with ``boost@1.68.0``, it has to patch boost to work correctly. +If you didn't know this, you might wonder where the extra boost patches are coming from: .. code-block:: console @@ -2536,22 +2447,17 @@ wonder where the extra boost patches are coming from: applies to: builtin.boost patched by: builtin.dealii -Here you can see that the patch is applied to ``boost`` by ``dealii``, -and that it lives in ``dealii``'s directory in Spack's ``builtin`` -package repository. +Here you can see that the patch is applied to ``boost`` by ``dealii``, and that it lives in ``dealii``'s directory in Spack's ``builtin`` package repository. .. _packaging_extensions: ----------- Extensions ---------- -Spack's support for package extensions is documented extensively in -:ref:`extensions`. This section documents how to make your own -extendable packages and extensions. +Spack's support for package extensions is documented extensively in :ref:`extensions`. +This section documents how to make your own extendable packages and extensions. -To support extensions, a package needs to set its ``extendable`` -property to ``True``, e.g.: +To support extensions, a package needs to set its ``extendable`` property to ``True``, e.g.: .. code-block:: python @@ -2560,9 +2466,7 @@ property to ``True``, e.g.: extendable = True ... -To make a package into an extension, simply add an -``extends`` call in the package definition, and pass it the name of an -extendable package: +To make a package into an extension, simply add an ``extends`` call in the package definition, and pass it the name of an extendable package: .. code-block:: python @@ -2571,18 +2475,14 @@ extendable package: extends("python") ... -This accomplishes a few things. Firstly, the Python package can set special -variables such as ``PYTHONPATH`` for all extensions when the run or build -environment is set up. Secondly, filesystem views can ensure that extensions -are put in the same prefix as their extendee. This ensures that Python in -a view can always locate its Python packages, even without environment -variables set. +This accomplishes a few things. +Firstly, the Python package can set special variables such as ``PYTHONPATH`` for all extensions when the run or build environment is set up. +Secondly, filesystem views can ensure that extensions are put in the same prefix as their extendee. +This ensures that Python in a view can always locate its Python packages, even without environment variables set. -A package can only extend one other package at a time. To support packages -that may extend one of a list of other packages, Spack supports multiple -``extends`` directives as long as at most one of them is selected as -a dependency during concretization. For example, a lua package could extend -either lua or luajit, but not both: +A package can only extend one other package at a time. +To support packages that may extend one of a list of other packages, Spack supports multiple ``extends`` directives as long as at most one of them is selected as a dependency during concretization. +For example, a lua package could extend either lua or luajit, but not both: .. code-block:: python @@ -2593,28 +2493,22 @@ either lua or luajit, but not both: extends("lua-luajit", when="~use_lua") ... -Now, a user can install, and activate, the ``lua-lpeg`` package for either -lua or luajit. +Now, a user can install, and activate, the ``lua-lpeg`` package for either lua or luajit. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Adding additional constraints ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Some packages produce a Python extension, but are only compatible with -Python 3, or with Python 2. In those cases, a ``depends_on()`` -declaration should be made in addition to the ``extends()`` -declaration: +Some packages produce a Python extension, but require a minimum version of Python to work correctly. +In those cases, a ``depends_on()`` declaration should be made in addition to the ``extends()`` declaration: .. code-block:: python class Icebin(Package): extends("python", when="+python") - depends_on("python@3:", when="+python") + depends_on("python@3.12:", when="+python") -Many packages produce Python extensions for *some* variants, but not -others: they should extend ``python`` only if the appropriate -variant(s) are selected. This may be accomplished with conditional -``extends()`` declarations: +Many packages produce Python extensions for *some* variants, but not others: they should extend ``python`` only if the appropriate variant(s) are selected. +This may be accomplished with conditional ``extends()`` declarations: .. code-block:: python @@ -2623,7 +2517,6 @@ variant(s) are selected. This may be accomplished with conditional extends("python", when="+python") ... --------------------------- Mixins for common metadata -------------------------- @@ -2670,26 +2563,29 @@ These mixins should be used as additional base classes for your package, in addi In the example above ``Cp2k`` inherits the variants and conflicts defined by ``CudaPackage``. -.. _package_maintainers: +.. _maintainers: ------------ Maintainers ----------- -Each package in Spack may have one or more maintainers, i.e. one or more GitHub accounts of people who want to be notified any time the package is modified. - -When a pull request is submitted that updates the package, these people will be requested to review the PR. This is useful for developers who maintain a Spack package for their own software, as well as users who rely on a piece of software and want to ensure that the package doesn't break. -It also gives users a list of people to contact for help when someone reports a build error with the package. +Each package in Spack may have one or more GitHub accounts for people who want to be notified whenever the package is modified. +The list also provides contacts for people needing help with build errors. -To add maintainers to a package, simply declare them with the ``maintainers`` directive: +Adding maintainers is easy. +After familiarizing yourself with the responsibilities of the :ref:`Package Maintainers ` role, you simply need to declare their GitHub accounts in the ``maintainers`` directive: .. code-block:: python - maintainers("user1", "user2") + maintainers("github_user1", "github_user2") -The list of maintainers is additive, and includes all the accounts eventually declared in base classes. +.. warning:: + + Please do not add accounts without consent of the owner. + +The final list of maintainers includes accounts declared in the package's base classes. + +.. _package_license: -------------------- License Information ------------------- @@ -2717,7 +2613,8 @@ Note that specifying a license without a ``when=`` clause makes it apply to all For example, a project might have switched licenses at some point or have certain build configurations that include files that are licensed differently. Spack itself used to be under the ``LGPL-2.1`` license, until it was relicensed in version ``0.12`` in 2018. -You can specify when a ``license()`` directive applies using a ``when=`` clause, just like other directives. For example, to specify that a specific license identifier should only apply to versions up to ``0.11``, but another license should apply for later versions, you could write: +You can specify when a ``license()`` directive applies using a ``when=`` clause, just like other directives. +For example, to specify that a specific license identifier should only apply to versions up to ``0.11``, but another license should apply for later versions, you could write: .. code-block:: python @@ -2732,8 +2629,7 @@ The operators you probably care most about are: * ``OR``: user chooses one license to adhere to; and * ``AND``: user has to adhere to all the licenses. -You may also care about `license exceptions `_ that use the ``WITH`` operator, -e.g. ``Apache-2.0 WITH LLVM-exception``. +You may also care about `license exceptions `_ that use the ``WITH`` operator, e.g. ``Apache-2.0 WITH LLVM-exception``. Many of the licenses that are currently in the spack repositories have been automatically determined. While this is great for bulk adding license information and is most likely correct, there are sometimes edge cases that require manual intervention. @@ -2747,14 +2643,12 @@ When you have validated a package license, either when doing so explicitly or as .. _license: --------------------- Proprietary software -------------------- In order to install proprietary software, Spack needs to know a few more details about a package. The following class attributes should be defined. -^^^^^^^^^^^^^^^^^^^^ ``license_required`` ^^^^^^^^^^^^^^^^^^^^ @@ -2763,7 +2657,6 @@ If set to ``True``, this software requires a license. If set to ``False``, all of the following attributes will be ignored. Defaults to ``False``. -^^^^^^^^^^^^^^^^^^^ ``license_comment`` ^^^^^^^^^^^^^^^^^^^ @@ -2771,7 +2664,6 @@ String. Contains the symbol used by the license manager to denote a comment. Defaults to ``#``. -^^^^^^^^^^^^^^^^^ ``license_files`` ^^^^^^^^^^^^^^^^^ @@ -2781,7 +2673,6 @@ All file paths must be relative to the installation directory. More complex packages like Intel may require multiple licenses for individual components. Defaults to the empty list. -^^^^^^^^^^^^^^^^ ``license_vars`` ^^^^^^^^^^^^^^^^ @@ -2789,7 +2680,6 @@ List of strings. Environment variables that can be set to tell the software where to look for a license if it is not in the usual location. Defaults to the empty list. -^^^^^^^^^^^^^^^ ``license_url`` ^^^^^^^^^^^^^^^ @@ -2827,7 +2717,6 @@ If you install a different version or variant of the package, Spack will automat If the software you are trying to package doesn't rely on license files, Spack will print a warning message, letting the user know that they need to set an environment variable or pointing them to installation documentation. -------------------- Grouping directives ------------------- @@ -2836,7 +2725,6 @@ Very often, these directives share a common argument, which you becomes repetiti .. _group_when_spec: -^^^^^^^^^^^^^^^^^^^^^^^^ Grouping with ``when()`` ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -2883,7 +2771,6 @@ Constraints from nested context managers are also combined together, but they ar .. _default_args: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Grouping with ``default_args()`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -2932,16 +2819,11 @@ The above is short for: .. _custom-attributes: ------------------------------------------------- ``home``, ``command``, ``headers``, and ``libs`` ------------------------------------------------ -Often a package will need to provide attributes for dependents to query -various details about what it provides. While any number of custom defined -attributes can be implemented by a package, the four specific attributes -described below are always available on every package with default -implementations and the ability to customize with alternate implementations -in the case of virtual packages provided: +Often a package will need to provide attributes for dependents to query various details about what it provides. +While any number of custom defined attributes can be implemented by a package, the four specific attributes described below are always available on every package with default implementations and the ability to customize with alternate implementations in the case of virtual packages provided: =========== =========================================== ===================== Attribute Purpose Default @@ -2963,22 +2845,20 @@ Attribute Purpose Default | rest of ``.home`` =========== =========================================== ===================== -Each of these can be customized by implementing the relevant attribute -as a ``@property`` in the package's class: +Each of these can be customized by implementing the relevant attribute as a ``@property`` in the package's class: .. code-block:: python :linenos: class Foo(Package): ... + @property def libs(self): # The library provided by Foo is libMyFoo.so return find_libraries("libMyFoo", root=self.home, recursive=True) -A package may also provide a custom implementation of each attribute -for the virtual packages it provides by implementing the -``_`` property in the package's class. +A package may also provide custom implementations of each attribute for the virtual packages it provides, by implementing the ``_`` property in its package class. The implementation used is the first one found from: #. Specialized virtual: ``Package._`` @@ -2987,13 +2867,11 @@ The implementation used is the first one found from: The use of customized attributes is demonstrated in the next example. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Example: Customized attributes for virtual packages ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Consider a package ``foo`` that can optionally provide two virtual -packages ``bar`` and ``baz``. When both are enabled, the installation tree -appears as follows: +Consider a package ``foo`` that can optionally provide two virtual packages ``bar`` and ``baz``. +When both are enabled, the installation tree appears as follows: .. code-block:: console @@ -3004,14 +2882,10 @@ appears as follows: baz/include/baz/baz.h baz/lib/libFooBaz.so -The install tree shows that ``foo`` is providing the header ``include/foo.h`` -and library ``lib64/libFoo.so`` in its install prefix. The virtual -package ``bar`` is providing ``include/bar/bar.h`` and library -``lib64/libFooBar.so``, also in ``foo``'s install prefix. The ``baz`` -package, however, is provided in the ``baz`` subdirectory of ``foo``'s -prefix with the ``include/baz/baz.h`` header and ``lib/libFooBaz.so`` -library. Such a package could implement the optional attributes as -follows: +The install tree shows that ``foo`` provides the header ``include/foo.h`` and library ``lib64/libFoo.so`` in its install prefix. +The virtual package ``bar`` provides the header ``include/bar/bar.h`` and library ``lib64/libFooBar.so``, also in ``foo``'s install prefix. +The ``baz`` package, however, is provided in the ``baz`` subdirectory of ``foo``'s prefix with the ``include/baz/baz.h`` header and ``lib/libFooBaz.so`` library. +Such a package could implement the optional attributes as follows: .. code-block:: python :linenos: @@ -3023,7 +2897,7 @@ follows: ... provides("bar", when="+bar") provides("baz", when="+baz") - .... + ... # Just the foo headers @property @@ -3071,15 +2945,14 @@ Now consider another package, ``foo-app``, depending on all three: depends_on("bar") depends_on("baz") -The resulting spec objects for its dependencies shows the result of -the above attribute implementations: +The resulting spec objects for its dependencies shows the result of the above attribute implementations: -.. code-block:: python +.. code-block:: pycon # The core headers and libraries of the foo package >>> spec["foo"] - foo@1.0%gcc@11.3.1+bar+baz arch=linux-fedora35-haswell + foo@1.0/ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6 >>> spec["foo"].prefix "/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6" @@ -3107,13 +2980,13 @@ the above attribute implementations: >>> spec["foo"].libs.directories ["/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/lib64"] -.. code-block:: python +.. code-block:: pycon # The virtual bar package in the same prefix as foo # bar resolves to the foo package >>> spec["bar"] - foo@1.0%gcc@11.3.1+bar+baz arch=linux-fedora35-haswell + foo@1.0/ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6 >>> spec["bar"].prefix "/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6" @@ -3142,13 +3015,13 @@ the above attribute implementations: >>> spec["bar"].libs.directories ["/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/lib64"] -.. code-block:: python +.. code-block:: pycon # The virtual baz package in a subdirectory of foo's prefix # baz resolves to the foo package >>> spec["baz"] - foo@1.0%gcc@11.3.1+bar+baz arch=linux-fedora35-haswell + foo@1.0/ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6 >>> spec["baz"].prefix "/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6" @@ -3180,40 +3053,36 @@ the above attribute implementations: "/opt/spack/linux-fedora35-haswell/gcc-11.3.1/foo-1.0-ca3rczp5omy7dfzoqw4p7oc2yh3u7lt6/baz/lib" ] ------------------------------ Style guidelines for packages ----------------------------- The following guidelines are provided, in the interests of making Spack packages work in a consistent manner: -^^^^^^^^^^^^^ Variant Names ^^^^^^^^^^^^^ Spack packages with variants similar to already-existing Spack packages should use the same name for their variants. Standard variant names are: - ======= ======== ======================== - Name Default Description - ======= ======== ======================== - shared True Build shared libraries - mpi True Use MPI - python False Build Python extension - ======= ======== ======================== +======= ======== ======================== +Name Default Description +======= ======== ======================== +shared True Build shared libraries +mpi True Use MPI +python False Build Python extension +======= ======== ======================== If specified in this table, the corresponding default is recommended. -The semantics of the `shared` variant are important. -When a package is built `~shared`, the package guarantees that no shared libraries are built. -When a package is built `+shared`, the package guarantees that shared libraries are built, but it makes no guarantee about whether static libraries are built. +The semantics of the ``shared`` variant are important. +When a package is built ``~shared``, the package guarantees that no shared libraries are built. +When a package is built ``+shared``, the package guarantees that shared libraries are built, but it makes no guarantee about whether static libraries are built. -^^^^^^^^^^^^^^^^^^^ Version definitions ^^^^^^^^^^^^^^^^^^^ Spack packages should list supported versions with the newest first. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Using ``home`` vs ``prefix`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/lib/spack/docs/packaging_guide_testing.rst b/lib/spack/docs/packaging_guide_testing.rst index d761da357f80bf..0d8f6516da2c33 100644 --- a/lib/spack/docs/packaging_guide_testing.rst +++ b/lib/spack/docs/packaging_guide_testing.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -16,7 +17,6 @@ - **3. Testing** - :doc:`4. Advanced ` -====================================== Packaging Guide: testing installations ====================================== @@ -25,37 +25,29 @@ In this part of the packaging guide we will cover how to ensure your package bui .. _checking_an_installation: ------------------------- Checking an installation ------------------------ -A package that *appears* to install successfully does not mean -it is actually installed correctly or will continue to work indefinitely. -There are a number of possible points of failure so Spack provides -features for checking the software along the way. +A package that *appears* to install successfully does not mean it is actually installed correctly or will continue to work indefinitely. +There are a number of possible points of failure so Spack provides features for checking the software along the way. -Failures can occur during and after the installation process. The -build may start but the software may not end up fully installed. The -installed software may not work at all or as expected. The software -may work after being installed but, due to changes on the system, -may stop working days, weeks, or months after being installed. +Failures can occur during and after the installation process. +The build may start, but the software may not end up fully installed. +The installed software may not work at all, or may not work as expected. +The software may work after being installed, but due to changes on the system, may stop working days, weeks, or months after being installed. -This section describes Spack's support for checks that can be performed -during and after its installation. The former checks are referred to as -``build-time tests`` and the latter as ``stand-alone (or smoke) tests``. +This section describes Spack's support for checks that can be performed during and after its installation. +The former checks are referred to as ``build-time tests`` and the latter as ``stand-alone (or smoke) tests``. .. _build_time-tests: -^^^^^^^^^^^^^^^^ Build-time tests ^^^^^^^^^^^^^^^^ -Spack infers the status of a build based on the contents of the install -prefix. Success is assumed if anything (e.g., a file or directory) is -written after ``install()`` completes. Otherwise, the build is assumed -to have failed. However, the presence of install prefix contents -is not a sufficient indicator of success so Spack supports the addition -of tests that can be performed during `spack install` processing. +Spack infers the status of a build based on the contents of the install prefix. +Success is assumed if anything (e.g., a file or directory) is written after ``install()`` completes. +Otherwise, the build is assumed to have failed. +However, the presence of install prefix contents is not a sufficient indicator of success so Spack supports the addition of tests that can be performed during `spack install` processing. Consider a simple autotools build using the following commands: @@ -65,25 +57,20 @@ Consider a simple autotools build using the following commands: $ make $ make install -Standard Autotools and CMake do not write anything to the prefix from -the ``configure`` and ``make`` commands. Files are only written from -the ``make install`` after the build completes. +Standard Autotools and CMake do not write anything to the prefix from the ``configure`` and ``make`` commands. +Files are only written from the ``make install`` after the build completes. .. note:: - If you want to learn more about ``Autotools`` and ``CMake`` packages - in Spack, refer to :ref:`AutotoolsPackage ` and - :ref:`CMakePackage `, respectively. + If you want to learn more about ``Autotools`` and ``CMake`` packages in Spack, refer to :ref:`AutotoolsPackage ` and :ref:`CMakePackage `, respectively. What can you do to check that the build is progressing satisfactorily? -If there are specific files and/or directories expected of a successful -installation, you can add basic, fast ``sanity checks``. You can also add -checks to be performed after one or more installation phases. +If there are specific files and/or directories expected of a successful installation, you can add basic, fast ``sanity checks``. +You can also add checks to be performed after one or more installation phases. .. note:: - Build-time tests are performed when the ``--test`` option is passed - to ``spack install``. + Build-time tests are performed when the ``--test`` option is passed to ``spack install``. .. warning:: @@ -92,25 +79,18 @@ checks to be performed after one or more installation phases. .. _sanity-checks: -"""""""""""""""""""" Adding sanity checks """""""""""""""""""" -Unfortunately, many builds of scientific software modify the installation -prefix **before** ``make install``. Builds like this can falsely report -success when an error occurs before the installation is complete. Simple -sanity checks can be used to identify files and/or directories that are -required of a successful installation. Spack checks for the presence of -the files and directories after ``install()`` runs. +Unfortunately, many builds of scientific software modify the installation prefix **before** ``make install``. +Builds like this can falsely report success when an error occurs before the installation is complete. +Simple sanity checks can be used to identify files and/or directories that are required of a successful installation. +Spack checks for the presence of the files and directories after ``install()`` runs. -If any of the listed files or directories are missing, then the build will -fail and the install prefix will be removed. If they all exist, then Spack -considers the build successful from a sanity check perspective and keeps -the prefix in place. +If any of the listed files or directories are missing, then the build will fail and the install prefix will be removed. +If they all exist, then Spack considers the build successful from a sanity check perspective and keeps the prefix in place. -For example, the sanity checks for the ``reframe`` package below specify -that eight paths must exist within the installation prefix after the -``install`` method completes. +For example, the sanity checks for the ``reframe`` package below specify that eight paths must exist within the installation prefix after the ``install`` method completes. .. code-block:: python @@ -119,15 +99,21 @@ that eight paths must exist within the installation prefix after the # sanity check sanity_check_is_file = [join_path("bin", "reframe")] - sanity_check_is_dir = ["bin", "config", "docs", "reframe", "tutorials", - "unittests", "cscs-checks"] - -When you run ``spack install`` with tests enabled, Spack will ensure that -a successfully installed package has the required files and/or directories. + sanity_check_is_dir = [ + "bin", + "config", + "docs", + "reframe", + "tutorials", + "unittests", + "cscs-checks", + ] + +When you run ``spack install`` with tests enabled, Spack will ensure that a successfully installed package has the required files and/or directories. For example, running: -.. code-block:: console +.. code-block:: spec $ spack install --test=root reframe @@ -145,34 +131,23 @@ and the following **directories**: * ``self.prefix.unittests`` * ``self.prefix.cscs-checks`` -If **any** of these paths are missing, then Spack considers the installation -to have failed. +If **any** of these paths are missing, then Spack considers the installation to have failed. .. note:: - You **MUST** use ``sanity_check_is_file`` to specify required - files and ``sanity_check_is_dir`` for required directories. + You **MUST** use ``sanity_check_is_file`` to specify required files and ``sanity_check_is_dir`` for required directories. .. _install_phase-tests: -""""""""""""""""""""""""""""""" Adding installation phase tests """"""""""""""""""""""""""""""" -Sometimes packages appear to build "correctly" only to have runtime -behavior issues discovered at a later stage, such as after a full -software stack relying on them has been built. Checks can be performed -at different phases of the package installation to possibly avoid -these types of problems. Some checks are built-in to different build -systems, while others will need to be added to the package. - -Built-in installation phase tests are provided by packages inheriting -from select :ref:`build systems `, where naming conventions -are used to identify typical test identifiers for those systems. In -general, you won't need to add anything to your package to take advantage -of these tests if your software's build system complies with the convention; -otherwise, you'll want or need to override the post-phase method to perform -other checks. +Sometimes packages appear to build "correctly" only to have runtime behavior issues discovered at a later stage, such as after a full software stack relying on them has been built. +Checks can be performed at different phases of the package installation to possibly avoid these types of problems. +Some checks are built-in to different build systems, while others will need to be added to the package. + +Built-in installation phase tests are provided by packages inheriting from select :ref:`build systems `, where naming conventions are used to identify typical test identifiers for those systems. +In general, you won't need to add anything to your package to take advantage of these tests if your software's build system complies with the convention; otherwise, you'll want or need to override the post-phase method to perform other checks. .. list-table:: Built-in installation phase tests :header-rows: 1 @@ -214,42 +189,32 @@ other checks. - ``build_test`` (must be overridden) - ``install_test`` (must be overridden) -For example, the ``Libelf`` package inherits from ``AutotoolsPackage`` -and its ``Makefile`` has a standard ``check`` target. So Spack will -automatically run ``make check`` after the ``build`` phase when it -is installed using the ``--test`` option, such as: +For example, the ``Libelf`` package inherits from ``AutotoolsPackage`` and its ``Makefile`` has a standard ``check`` target. +So Spack will automatically run ``make check`` after the ``build`` phase when it is installed using the ``--test`` option, such as: -.. code-block:: console +.. code-block:: spec $ spack install --test=root libelf -In addition to overriding any built-in build system installation -phase tests, you can write your own install phase tests. You will -need to use two decorators for each phase test method: +In addition to overriding any built-in build system installation phase tests, you can write your own install phase tests. +You will need to use two decorators for each phase test method: * ``run_after`` * ``on_package_attributes`` -The first decorator tells Spack when in the installation process to -run your test method installation process; namely *after* the provided -installation phase. The second decorator tells Spack to only run the -checks when the ``--test`` option is provided on the command line. +The first decorator tells Spack when in the installation process to run your test method installation process; namely *after* the provided installation phase. +The second decorator tells Spack to only run the checks when the ``--test`` option is provided on the command line. .. note:: - Be sure to place the directives above your test method in the order - ``run_after`` *then* ``on_package_attributes``. + Be sure to place the directives above your test method in the order ``run_after`` *then* ``on_package_attributes``. .. note:: - You also want to be sure the package supports the phase you use - in the ``run_after`` directive. For example, ``PackageBase`` only - supports the ``install`` phase while the ``AutotoolsPackage`` and - ``MakefilePackage`` support both ``install`` and ``build`` phases. + You also want to be sure the package supports the phase you use in the ``run_after`` directive. + For example, ``PackageBase`` only supports the ``install`` phase while the ``AutotoolsPackage`` and ``MakefilePackage`` support both ``install`` and ``build`` phases. -Assuming both ``build`` and ``install`` phases are available to you, -you could add additional checks to be performed after each of those -phases based on the skeleton provided below. +Assuming both ``build`` and ``install`` phases are available, you can add additional checks to be performed after each of those phases based on the skeleton provided below. .. code-block:: python @@ -259,23 +224,21 @@ phases based on the skeleton provided below. @run_after("build") @on_package_attributes(run_tests=True) def check_build(self): - # Add your custom post-build phase tests - pass + # Add your custom post-build phase tests + pass @run_after("install") @on_package_attributes(run_tests=True) def check_install(self): - # Add your custom post-install phase tests - pass + # Add your custom post-install phase tests + pass .. note:: - You could also schedule work to be done **before** a given phase - using the ``run_before`` decorator. + You could also schedule work to be done **before** a given phase using the ``run_before`` decorator. -By way of a concrete example, the ``reframe`` package mentioned -previously has a simple installation phase check that runs the -installed executable. The check is implemented as follows: +By way of a concrete example, the ``reframe`` package mentioned previously has a simple installation phase check that runs the installed executable. +The check is implemented as follows: .. code-block:: python @@ -286,32 +249,28 @@ installed executable. The check is implemented as follows: @run_after("install") @on_package_attributes(run_tests=True) def check_list(self): - with working_dir(self.stage.source_path): - reframe = Executable(self.prefix.bin.reframe) - reframe("-l") + with working_dir(self.stage.source_path): + reframe = Executable(self.prefix.bin.reframe) + reframe("-l") -"""""""""""""""""""""""""""""""" Checking build-time test results """""""""""""""""""""""""""""""" -Checking the results of these tests after running ``spack install --test`` -can be done by viewing the spec's ``install-time-test-log.txt`` file whose -location will depend on whether the spec installed successfully. +Checking the results of these tests after running ``spack install --test`` can be done by viewing the spec's ``install-time-test-log.txt`` file whose location will depend on whether the spec installed successfully. -A successful installation results in the build and stage logs being copied -to the ``.spack`` subdirectory of the spec's prefix. For example, +A successful installation results in the build and stage logs being copied to the ``.spack`` subdirectory of the spec's prefix. +For example, -.. code-block:: console +.. code-block:: spec $ spack install --test=root zlib@1.2.13 ... [+] /home/user/spack/opt/spack/linux-rhel8-broadwell/gcc-10.3.1/zlib-1.2.13-tehu6cbsujufa2tb6pu3xvc6echjstv6 $ cat /home/user/spack/opt/spack/linux-rhel8-broadwell/gcc-10.3.1/zlib-1.2.13-tehu6cbsujufa2tb6pu3xvc6echjstv6/.spack/install-time-test-log.txt -If the installation fails due to build-time test failures, then both logs will -be left in the build stage directory as illustrated below: +If the installation fails due to build-time test failures, then both logs will be left in the build stage directory as illustrated below: -.. code-block:: console +.. code-block:: spec $ spack install --test=root zlib@1.2.13 ... @@ -323,58 +282,45 @@ be left in the build stage directory as illustrated below: .. _cmd-spack-test: -^^^^^^^^^^^^^^^^^ Stand-alone tests ^^^^^^^^^^^^^^^^^ -While build-time tests are integrated with the installation process, stand-alone -tests are expected to run days, weeks, even months after the software is -installed. The goal is to provide a mechanism for gaining confidence that -packages work as installed **and** *continue* to work as the underlying -software evolves. Packages can add and inherit stand-alone tests. The -``spack test`` command is used for stand-alone testing. +While build-time tests are integrated with the installation process, stand-alone tests are expected to run days, weeks, even months after the software is installed. +The goal is to provide a mechanism for gaining confidence that packages work as installed **and** *continue* to work as the underlying software evolves. +Packages can add and inherit stand-alone tests. +The ``spack test`` command is used for stand-alone testing. .. admonition:: Stand-alone test methods should complete within a few minutes. - Execution speed is important since these tests are intended to quickly - assess whether installed specs work on the system. Spack cannot spare - resources for more extensive testing of packages included in CI stacks. + Execution speed is important since these tests are intended to quickly assess whether installed specs work on the system. + Spack cannot spare resources for more extensive testing of packages included in CI stacks. - Consequently, stand-alone tests should run relatively quickly -- as in - on the order of at most a few minutes -- while testing at least key aspects - of the installed software. Save more extensive testing for other tools. + Consequently, stand-alone tests should run relatively quickly -- as in on the order of at most a few minutes -- while testing at least key aspects of the installed software. + Save more extensive testing for other tools. Tests are defined in the package using methods with names beginning ``test_``. -This allows Spack to support multiple independent checks, or parts. Files -needed for testing, such as source, data, and expected outputs, may be saved -from the build and/or stored with the package in the repository. Regardless -of origin, these files are automatically copied to the spec's test stage -directory prior to execution of the test method(s). Spack also provides helper -functions to facilitate common processing. +This allows Spack to support multiple independent checks, or parts. +Files needed for testing, such as source, data, and expected outputs, may be saved from the build and/or stored with the package in the repository. +Regardless of origin, these files are automatically copied to the spec's test stage directory prior to execution of the test method(s). +Spack also provides helper functions to facilitate common processing. .. tip:: **The status of stand-alone tests can be used to guide follow-up testing efforts.** - Passing stand-alone tests justifies performing more thorough testing, such - as running extensive unit or regression tests or tests that run at scale, - when available. These tests are outside of the scope of Spack packaging. + Passing stand-alone tests justifies performing more thorough testing, such as running extensive unit or regression tests or tests that run at scale, when available. + These tests are outside of the scope of Spack packaging. - Failing stand-alone tests indicate problems with the installation and, - therefore, no reason to proceed with more resource-intensive tests until - the failures have been investigated. + Failing stand-alone tests indicate problems with the installation and, therefore, no reason to proceed with more resource-intensive tests until the failures have been investigated. .. _configure-test-stage: -"""""""""""""""""""""""""""""""""""" Configuring the test stage directory """""""""""""""""""""""""""""""""""" -Stand-alone tests utilize a test stage directory to build, run, and track -tests in the same way Spack uses a build stage directory to install software. -The default test stage root directory, ``$HOME/.spack/test``, is defined in -:ref:`config.yaml `. This location is customizable by adding or -changing the ``test_stage`` path such that: +Stand-alone tests utilize a test stage directory to build, run, and track tests in the same way Spack uses a build stage directory to install software. +The default test stage root directory, ``$HOME/.spack/test``, is defined in :ref:`config.yaml `. +This location is customizable by adding or changing the ``test_stage`` path such that: .. code-block:: yaml @@ -385,70 +331,51 @@ Packages can use the ``self.test_suite.stage`` property to access the path. .. admonition:: Each spec being tested has its own test stage directory. - The ``config:test_stage`` option is the path to the root of a - **test suite**'s stage directories. + The ``config:test_stage`` option is the path to the root of a **test suite**'s stage directories. - Other package properties that provide paths to spec-specific subdirectories - and files are described in :ref:`accessing-files`. + Other package properties that provide paths to spec-specific subdirectories and files are described in :ref:`accessing-files`. .. _adding-standalone-tests: -"""""""""""""""""""""""" Adding stand-alone tests """""""""""""""""""""""" -Test recipes are defined in the package using methods with names beginning -``test_``. This allows for the implementation of multiple independent tests. -Each method has access to the information Spack tracks on the package, such -as options, compilers, and dependencies, supporting the customization of tests -to the build. Standard Python ``assert`` statements and other error reporting -mechanisms can be used. These exceptions are automatically caught and reported -as test failures. +Test recipes are defined in the package using methods with names beginning ``test_``. +This allows for the implementation of multiple independent tests. +Each method has access to the information Spack tracks on the package, such as options, compilers, and dependencies, supporting the customization of tests to the build. +Standard Python ``assert`` statements and other error reporting mechanisms can be used. +These exceptions are automatically caught and reported as test failures. -Each test method is an *implicit test part* named by the method. Its purpose -is the method's docstring. Providing a meaningful purpose for the test gives -context that can aid debugging. Spack outputs both the name and purpose at the -start of test execution so it's also important that the docstring/purpose be -brief. +Each test method is an *implicit test part* named by the method. +Its purpose is the method's docstring. +Providing a meaningful purpose for the test gives context that can aid debugging. +Spack outputs both the name and purpose at the start of test execution so it's also important that the docstring/purpose be brief. .. tip:: We recommend naming test methods so it is clear *what* is being tested. - For example, if a test method is building and/or running an executable - called ``example``, then call the method ``test_example``. This, together - with a similarly meaningful test purpose, will aid test comprehension, - debugging, and maintainability. + For example, if a test method is building and/or running an executable called ``example``, then call the method ``test_example``. + This, together with a similarly meaningful test purpose, will aid test comprehension, debugging, and maintainability. -Stand-alone tests run in an environment that provides access to information -on the installed software, such as build options, dependencies, and compilers. -Build options and dependencies are accessed using the same spec checks used -by build recipes. Examples of checking :ref:`variant settings ` and -:ref:`spec constraints ` can be found at the provided links. +Stand-alone tests run in an environment that provides access to information on the installed software, such as build options, dependencies, and compilers. +Build options and dependencies are accessed using the same spec checks used by build recipes. +Examples of checking :ref:`variant settings ` and :ref:`spec constraints ` can be found at the provided links. .. admonition:: Spack automatically sets up the test stage directory and environment. - Spack automatically creates the test stage directory and copies - relevant files *prior to* running tests. It can also ensure build - dependencies are available **if** necessary. + Spack automatically creates the test stage directory and copies relevant files *prior to* running tests. + It can also ensure build dependencies are available **if** necessary. The path to the test stage is configurable (see :ref:`configure-test-stage`). - Files that Spack knows to copy are those saved from the build (see - :ref:`cache_extra_test_sources`) and those added to the package repository - (see :ref:`cache_custom_files`). + Files that Spack knows to copy are those saved from the build (see :ref:`cache_extra_test_sources`) and those added to the package repository (see :ref:`cache_custom_files`). - Spack will use the value of the ``test_requires_compiler`` property to - determine whether it needs to also set up build dependencies (see - :ref:`test-build-tests`). + Spack will use the value of the ``test_requires_compiler`` property to determine whether it needs to also set up build dependencies (see :ref:`test-build-tests`). -The ``MyPackage`` package below provides two basic test examples: -``test_example`` and ``test_example2``. The first runs the installed -``example`` and ensures its output contains an expected string. The second -runs ``example2`` without checking output so is only concerned with confirming -the executable runs successfully. If the installed spec is not expected to have -``example2``, then the check at the top of the method will raise a special -``SkipTest`` exception, which is captured to facilitate reporting skipped test -parts to tools like CDash. +The ``MyPackage`` package below provides two basic test examples: ``test_example`` and ``test_example2``. +The first runs the installed ``example`` and ensures its output contains an expected string. +The second runs ``example2`` without checking output so is only concerned with confirming the executable runs successfully. +If the installed spec is not expected to have ``example2``, then the check at the top of the method will raise a special ``SkipTest`` exception, which is captured to facilitate reporting skipped test parts to tools like CDash. .. code-block:: python @@ -475,8 +402,7 @@ parts to tools like CDash. example2 = which(self.prefix.bin.example2) example2() -Output showing the identification of each test part after running the tests -is illustrated below. +Output showing the identification of each test part after running the tests is illustrated below. .. code-block:: console @@ -495,57 +421,47 @@ is illustrated below. .. admonition:: Do NOT implement tests that must run in the installation prefix. - Use of the package spec's installation prefix for building and running - tests is **strongly discouraged**. Doing so causes permission errors for - shared spack instances *and* facilities that install the software in - read-only file systems or directories. + Use of the package spec's installation prefix for building and running tests is **strongly discouraged**. + Doing so causes permission errors for shared spack instances *and* facilities that install the software in read-only file systems or directories. - Instead, start these test methods by explicitly copying the needed files - from the installation prefix to the test stage directory. Note the test - stage directory is the current directory when the test is executed with - the ``spack test run`` command. + Instead, start these test methods by explicitly copying the needed files from the installation prefix to the test stage directory. + Note the test stage directory is the current directory when the test is executed with the ``spack test run`` command. .. admonition:: Test methods for library packages should build test executables. - Stand-alone tests for library packages *should* build test executables - that utilize the *installed* library. Doing so ensures the tests follow - a similar build process that users of the library would follow. + Stand-alone tests for library packages *should* build test executables that utilize the *installed* library. + Doing so ensures the tests follow a similar build process that users of the library would follow. For more information on how to do this, see :ref:`test-build-tests`. .. tip:: - If you want to see more examples from packages with stand-alone tests, run - ``spack pkg grep "def\stest" | sed "s/\/package.py.*//g" | sort -u`` - from the command line to get a list of the packages. + If you want to see more examples from packages with stand-alone tests, run ``spack pkg grep "def\stest" | sed "s/\/package.py.*//g" | sort -u`` from the command line to get a list of the packages. .. _adding-standalone-test-parts: -""""""""""""""""""""""""""""" Adding stand-alone test parts """"""""""""""""""""""""""""" -Sometimes dependencies between steps of a test lend themselves to being -broken into parts. Tracking the pass/fail status of each part may aid -debugging. Spack provides a ``test_part`` context manager for use within -test methods. +Sometimes dependencies between steps of a test lend themselves to being broken into parts. +Tracking the pass/fail status of each part can aid debugging. +Spack provides a ``test_part`` context manager for use within test methods. -Each test part is independently run, tracked, and reported. Test parts are -executed in the order they appear. If one fails, subsequent test parts are -still performed even if they would also fail. This allows tools like CDash -to track and report the status of test parts across runs. The pass/fail status -of the enclosing test is derived from the statuses of the embedded test parts. +Each test part is independently run, tracked, and reported. +Test parts are executed in the order they appear. +If one fails, subsequent test parts are still performed even if they would also fail. +This allows tools like CDash to track and report the status of test parts across runs. +The pass/fail status of the enclosing test is derived from the statuses of the embedded test parts. .. admonition:: Test method and test part names **must** be unique. - Test results reporting requires that test methods and embedded test parts - within a package have unique names. + Test results reporting requires that test methods and embedded test parts within a package have unique names. The signature for ``test_part`` is: .. code-block:: python - def test_part(pkg, test_name, purpose, work_dir=".", verbose=False): + def test_part(pkg, test_name, purpose, work_dir=".", verbose=False): ... where each argument has the following meaning: @@ -555,25 +471,20 @@ where each argument has the following meaning: * ``purpose`` is a brief description used as a heading for the test part. - Output from the test is written to a test log file allowing the test name - and purpose to be searched for test part confirmation and debugging. + Output from the test is written to a test log file allowing the test name and purpose to be searched for test part confirmation and debugging. * ``work_dir`` is the path to the directory in which the test will run. - The default of ``None``, or ``"."``, corresponds to the spec's test - stage (i.e., ``self.test_suite.test_dir_for_spec(self.spec)``). + The default of ``None``, or ``"."``, corresponds to the spec's test stage (i.e., ``self.test_suite.test_dir_for_spec(self.spec)``). .. admonition:: Start test part names with the name of the enclosing test. - We **highly recommend** starting the names of test parts with the name - of the enclosing test. Doing so helps with the comprehension, readability - and debugging of test results. + We **highly recommend** starting the names of test parts with the name of the enclosing test. + Doing so helps with the comprehension, readability and debugging of test results. -Suppose ``MyPackage`` installs multiple executables that need to run in a -specific order since the outputs from one are inputs of others. Further suppose -we want to add an integration test that runs the executables in order. We can -accomplish this goal by implementing a stand-alone test method consisting of -test parts for each executable as follows: +Suppose ``MyPackage`` installs multiple executables that need to run in a specific order since the outputs from one are inputs of others. +Further suppose we want to add an integration test that runs the executables in order. +We can accomplish this goal by implementing a stand-alone test method consisting of test parts for each executable as follows: .. code-block:: python @@ -584,21 +495,20 @@ test parts for each executable as follows: """run setup, perform, and report""" with test_part(self, "test_series_setup", purpose="setup operation"): - exe = which(self.prefix.bin.setup)) - exe() + exe = which(self.prefix.bin.setup) + exe() with test_part(self, "test_series_run", purpose="perform operation"): - exe = which(self.prefix.bin.run)) - exe() + exe = which(self.prefix.bin.run) + exe() with test_part(self, "test_series_report", purpose="generate report"): - exe = which(self.prefix.bin.report)) - exe() + exe = which(self.prefix.bin.report) + exe() -The result is ``test_series`` runs the following executable in order: ``setup``, -``run``, and ``report``. In this case no options are passed to any of the -executables and no outputs from running them are checked. Consequently, the -implementation could be simplified with a for-loop as follows: +The result is ``test_series`` runs the following executable in order: ``setup``, ``run``, and ``report``. +In this case no options are passed to any of the executables and no outputs from running them are checked. +Consequently, the implementation could be simplified with a for-loop as follows: .. code-block:: python @@ -611,18 +521,15 @@ implementation could be simplified with a for-loop as follows: for exe, reason in [ ("setup", "setup operation"), ("run", "perform operation"), - ("report", "generate report") + ("report", "generate report"), ]: with test_part(self, f"test_series_{exe}", purpose=reason): exe = which(self.prefix.bin.join(exe)) exe() -In both cases, since we're using a context manager, each test part in -``test_series`` will execute regardless of the status of the other test -parts. +In both cases, since we're using a context manager, each test part in ``test_series`` will execute regardless of the status of the other test parts. -Now let's look at the output from running the stand-alone tests where -the second test part, ``test_series_run``, fails. +Now let's look at the output from running the stand-alone tests where the second test part, ``test_series_run``, fails. .. code-block:: console @@ -645,55 +552,38 @@ the second test part, ``test_series_run``, fails. FAILED: MyPackage::test_series ... -Since test parts depended on the success of previous parts, we see that the -failure of one results in the failure of subsequent checks and the overall -result of the test method, ``test_series``, is failure. +Since test parts depended on the success of previous parts, we see that the failure of one results in the failure of subsequent checks and the overall result of the test method, ``test_series``, is failure. .. tip:: - If you want to see more examples from packages using ``test_part``, run - ``spack pkg grep "test_part(" | sed "s/\/package.py.*//g" | sort -u`` - from the command line to get a list of the packages. + If you want to see more examples from packages using ``test_part``, run ``spack pkg grep "test_part(" | sed "s/\/package.py.*//g" | sort -u`` from the command line to get a list of the packages. .. _test-build-tests: -""""""""""""""""""""""""""""""""""""" Building and running test executables """"""""""""""""""""""""""""""""""""" .. admonition:: Reuse build-time sources and (small) input data sets when possible. - We **highly recommend** reusing build-time test sources and pared down - input files for testing installed software. These files are easier - to keep synchronized with software capabilities when they reside - within the software's repository. More information on saving files from - the installation process can be found at :ref:`cache_extra_test_sources`. + We **highly recommend** reusing build-time test sources and pared down input files for testing installed software. + These files are easier to keep synchronized with software capabilities when they reside within the software's repository. + More information on saving files from the installation process can be found at :ref:`cache_extra_test_sources`. - If that is not possible, you can add test-related files to the package - repository (see :ref:`cache_custom_files`). It will be important to - remember to maintain them so they work across listed or supported versions - of the package. + If that is not possible, you can add test-related files to the package repository (see :ref:`cache_custom_files`). + It will be important to remember to maintain them so they work across listed or supported versions of the package. -Packages that build libraries are good examples of cases where you'll want -to build test executables from the installed software before running them. -Doing so requires you to let Spack know it needs to load the package's -compiler configuration. This is accomplished by setting the package's -``test_requires_compiler`` property to ``True``. +Packages that build libraries are good examples of cases where you'll want to build test executables from the installed software before running them. +Doing so requires you to let Spack know it needs to load the package's compiler configuration. +This is accomplished by setting the package's ``test_requires_compiler`` property to ``True``. .. admonition:: ``test_requires_compiler = True`` is required to build test executables. - Setting the property to ``True`` ensures access to the compiler through - canonical environment variables (e.g., ``CC``, ``CXX``, ``FC``, ``F77``). - It also gives access to build dependencies like ``cmake`` through their - ``spec objects`` (e.g., ``self.spec["cmake"].prefix.bin.cmake`` for the - path or ``self.spec["cmake"].command`` for the ``Executable`` instance). + Setting the property to ``True`` ensures access to the compiler through canonical environment variables (e.g., ``CC``, ``CXX``, ``FC``, ``F77``). + It also gives access to build dependencies like ``cmake`` through their ``spec objects`` (e.g., ``self.spec["cmake"].prefix.bin.cmake`` for the path or ``self.spec["cmake"].command`` for the ``Executable`` instance). - Be sure to add the property at the top of the package class under other - properties like the ``homepage``. + Be sure to add the property at the top of the package class under other properties like the ``homepage``. -The example below, which ignores how ``cxx-example.cpp`` is acquired, -illustrates the basic process of compiling a test executable using the -installed library before running it. +The example below, which ignores how ``cxx-example.cpp`` is acquired, illustrates the basic process of compiling a test executable using the installed library before running it. .. code-block:: python @@ -708,75 +598,52 @@ installed library before running it. exe = "cxx-example" ... cxx = which(os.environ["CXX"]) - cxx( - f"-L{self.prefix.lib}", - f"-I{self.prefix.include}", - f"{exe}.cpp", - "-o", exe - ) + cxx(f"-L{self.prefix.lib}", f"-I{self.prefix.include}", f"{exe}.cpp", "-o", exe) cxx_example = which(exe) cxx_example() -Typically the files used to build and/or run test executables are either -cached from the installation (see :ref:`cache_extra_test_sources`) or added -to the package repository (see :ref:`cache_custom_files`). There is nothing -preventing the use of both. +Typically the files used to build and/or run test executables are either cached from the installation (see :ref:`cache_extra_test_sources`) or added to the package repository (see :ref:`cache_custom_files`). +There is nothing preventing the use of both. .. _cache_extra_test_sources: -"""""""""""""""""""""""""""""""""""" Saving build- and install-time files """""""""""""""""""""""""""""""""""" -You can use the ``cache_extra_test_sources`` helper routine to copy -directories and/or files from the source build stage directory to the -package's installation directory. Spack will automatically copy these -files for you when it sets up the test stage directory and before it -begins running the tests. +You can use the ``cache_extra_test_sources`` helper routine to copy directories and/or files from the source build stage directory to the package's installation directory. +Spack will automatically copy these files for you when it sets up the test stage directory and before it begins running the tests. The signature for ``cache_extra_test_sources`` is: .. code-block:: python - def cache_extra_test_sources(pkg, srcs): + def cache_extra_test_sources(pkg, srcs): ... where each argument has the following meaning: * ``pkg`` is an instance of the package for the spec under test. -* ``srcs`` is a string *or* a list of strings corresponding to the - paths of subdirectories and/or files needed for stand-alone testing. +* ``srcs`` is a string *or* a list of strings corresponding to the paths of subdirectories and/or files needed for stand-alone testing. .. warning:: - Paths provided in the ``srcs`` argument **must be relative** to the - staged source directory. They will be copied to the equivalent relative - location under the test stage directory prior to test execution. + Paths provided in the ``srcs`` argument **must be relative** to the staged source directory. + They will be copied to the equivalent relative location under the test stage directory prior to test execution. -Contents of subdirectories and files are copied to a special test cache -subdirectory of the installation prefix. They are automatically copied to -the appropriate relative paths under the test stage directory prior to -executing stand-alone tests. +Contents of subdirectories and files are copied to a special test cache subdirectory of the installation prefix. +They are automatically copied to the appropriate relative paths under the test stage directory prior to executing stand-alone tests. .. tip:: *Perform test-related conversions once when copying files.* - If one or more of the copied files needs to be modified to reference - the installed software, it is recommended that those changes be made - to the cached files **once** in the post-``install`` copy method - **after** the call to ``cache_extra_test_sources``. This will reduce - the amount of unnecessary work in the test method **and** avoid problems - running stand-alone tests in shared instances and facility deployments. + If one or more of the copied files needs to be modified to reference the installed software, it is recommended that those changes be made to the cached files **once** in the post-``install`` copy method **after** the call to ``cache_extra_test_sources``. + This will reduce the amount of unnecessary work in the test method **and** avoid problems running stand-alone tests in shared instances and facility deployments. - The ``filter_file`` function can be quite useful for such changes - (see :ref:`file-filtering`). + The ``filter_file`` function can be quite useful for such changes (see :ref:`file-filtering`). Below is a basic example of a test that relies on files from the installation. -This package method reuses the contents of the ``examples`` subdirectory, -which is assumed to have all of the files necessary to allow ``make`` to -compile and link ``foo.c`` and ``bar.c`` against the package's installed -library. +This package method reuses the contents of the ``examples`` subdirectory, which is assumed to have all of the files necessary to allow ``make`` to compile and link ``foo.c`` and ``bar.c`` against the package's installed library. .. code-block:: python @@ -795,67 +662,47 @@ library. make() for program in ["foo", "bar"]: - with test_part( - self, - f"test_example_{program}", - purpose=f"ensure {program} runs" - ): + with test_part(self, f"test_example_{program}", purpose=f"ensure {program} runs"): exe = Executable(program) exe() -In this case, ``copy_test_files`` copies the associated files from the -build stage to the package's test cache directory under the installation -prefix. Running ``spack test run`` for the package results in Spack copying -the directory and its contents to the test stage directory. The -``working_dir`` context manager ensures the commands within it are executed -from the ``examples_dir``. The test builds the software using ``make`` before -running each executable, ``foo`` and ``bar``, as independent test parts. +In this case, ``copy_test_files`` copies the associated files from the build stage to the package's test cache directory under the installation prefix. +Running ``spack test run`` for the package results in Spack copying the directory and its contents to the test stage directory. +The ``working_dir`` context manager ensures the commands within it are executed from the ``examples_dir``. +The test builds the software using ``make`` before running each executable, ``foo`` and ``bar``, as independent test parts. .. note:: The method name ``copy_test_files`` here is for illustration purposes. You are free to use a name that is better suited to your package. - The key to copying files for stand-alone testing at build time is use - of the ``run_after`` directive, which ensures the associated files are - copied **after** the provided build stage (``install``) when the installation - prefix **and** files are available. + The key to copying files for stand-alone testing at build time is use of the ``run_after`` directive, which ensures the associated files are copied **after** the provided build stage (``install``) when the installation prefix **and** files are available. - The test method uses the path contained in the package's - ``self.test_suite.current_test_cache_dir`` property for the root directory - of the copied files. In this case, that's the ``examples`` subdirectory. + The test method uses the path contained in the package's ``self.test_suite.current_test_cache_dir`` property for the root directory of the copied files. + In this case, that's the ``examples`` subdirectory. .. tip:: - If you want to see more examples from packages that cache build files, run - ``spack pkg grep cache_extra_test_sources | sed "s/\/package.py.*//g" | sort -u`` - from the command line to get a list of the packages. + If you want to see more examples from packages that cache build files, run ``spack pkg grep cache_extra_test_sources | sed "s/\/package.py.*//g" | sort -u`` from the command line to get a list of the packages. .. _cache_custom_files: -""""""""""""""""""" Adding custom files """"""""""""""""""" -Sometimes it is helpful or necessary to include custom files for building and/or -checking the results of tests as part of the package. Examples of the types -of files that might be useful are: +Sometimes it is helpful or necessary to include custom files for building and/or checking the results of tests as part of the package. +Examples of the types of files that might be useful are: - test source files - test input files - test build scripts - expected test outputs -While obtaining such files from the software repository is preferred (see -:ref:`cache_extra_test_sources`), there are circumstances where doing so is not -feasible such as when the software is not being actively maintained. When test -files cannot be obtained from the repository or there is a need to supplement -files that can, Spack supports the inclusion of additional files under the -``test`` subdirectory of the package in the Spack repository. +While obtaining such files from the software repository is preferred (see :ref:`cache_extra_test_sources`), there are circumstances where doing so is not feasible such as when the software is not being actively maintained. +When test files cannot be obtained from the repository or there is a need to supplement files that can, Spack supports the inclusion of additional files under the ``test`` subdirectory of the package in the Spack repository. -The following example assumes a ``custom-example.c`` is saved in ``MyLibrary`` -package's ``test`` subdirectory. It also assumes the program simply needs to -be compiled and linked against the installed ``MyLibrary`` software. +The following example assumes a ``custom-example.c`` is saved in ``MyLibrary`` package's ``test`` subdirectory. +It also assumes the program simply needs to be compiled and linked against the installed ``MyLibrary`` software. .. code-block:: python @@ -872,53 +719,38 @@ be compiled and linked against the installed ``MyLibrary`` software. with working_dir(src_dir): cc = which(os.environ["CC"]) - cc( - f"-L{self.prefix.lib}", - f"-I{self.prefix.include}", - f"{exe}.cpp", - "-o", exe - ) + cc(f"-L{self.prefix.lib}", f"-I{self.prefix.include}", f"{exe}.cpp", "-o", exe) custom_example = Executable(exe) custom_example() -In this case, ``spack test run`` for the package results in Spack copying -the contents of the ``test`` subdirectory to the test stage directory path -in ``self.test_suite.current_test_data_dir`` before calling -``test_custom_example``. Use of the ``working_dir`` context manager -ensures the commands to build and run the program are performed from -within the appropriate subdirectory of the test stage. +In this case, ``spack test run`` for the package results in Spack copying the contents of the ``test`` subdirectory to the test stage directory path in ``self.test_suite.current_test_data_dir`` before calling ``test_custom_example``. +Use of the ``working_dir`` context manager ensures the commands to build and run the program are performed from within the appropriate subdirectory of the test stage. .. _expected_test_output_from_file: -""""""""""""""""""""""""""""""""""" Reading expected output from a file """"""""""""""""""""""""""""""""""" -The helper function ``get_escaped_text_output`` is available for packages -to retrieve properly formatted text from a file potentially containing -special characters. +The helper function ``get_escaped_text_output`` is available for packages to retrieve properly formatted text from a file potentially containing special characters. The signature for ``get_escaped_text_output`` is: .. code-block:: python - def get_escaped_text_output(filename): + def get_escaped_text_output(filename): ... where ``filename`` is the path to the file containing the expected output. -The path provided to ``filename`` for one of the copied custom files -(:ref:`custom file `) is in the path rooted at -``self.test_suite.current_test_data_dir``. +The path provided to ``filename`` for one of the copied custom files (:ref:`custom file `) is in the path rooted at ``self.test_suite.current_test_data_dir``. -The example below shows how to reference both the custom database -(``packages.db``) and expected output (``dump.out``) files Spack copies -to the test stage: +The example below shows how to reference both the custom database (``packages.db``) and expected output (``dump.out``) files Spack copies to the test stage: .. code-block:: python import re + class Sqlite(AutotoolsPackage): ... @@ -926,18 +758,14 @@ to the test stage: """check example table dump""" test_data_dir = self.test_suite.current_test_data_dir db_filename = test_data_dir.join("packages.db") - .. + ... expected = get_escaped_text_output(test_data_dir.join("dump.out")) sqlite3 = which(self.prefix.bin.sqlite3) - out = sqlite3( - db_filename, ".dump", output=str.split, error=str.split - ) + out = sqlite3(db_filename, ".dump", output=str.split, error=str.split) for exp in expected: assert re.search(exp, out), f"Expected '{exp}' in output" -If the files were instead cached from installing the software, the paths to the -two files would be found under the ``self.test_suite.current_test_cache_dir`` -directory as shown below: +If the files were instead cached from installing the software, the paths to the two files would be found under the ``self.test_suite.current_test_cache_dir`` directory as shown below: .. code-block:: python @@ -945,45 +773,37 @@ directory as shown below: """check example table dump""" test_cache_dir = self.test_suite.current_test_cache_dir db_filename = test_cache_dir.join("packages.db") - .. + ... expected = get_escaped_text_output(test_cache_dir.join("dump.out")) ... -Alternatively, if both files had been installed by the software into the -``share/tests`` subdirectory of the installation prefix, the paths to the -two files would be referenced as follows: +Alternatively, if both files had been installed by the software into the ``share/tests`` subdirectory of the installation prefix, the paths to the two files would be referenced as follows: .. code-block:: python def test_example(self): """check example table dump""" db_filename = self.prefix.share.tests.join("packages.db") - .. - expected = get_escaped_text_output( - self.prefix.share.tests.join("dump.out") - ) + ... + expected = get_escaped_text_output(self.prefix.share.tests.join("dump.out")) ... .. _check_outputs: -"""""""""""""""""""""""""""""""""""" Comparing expected to actual outputs """""""""""""""""""""""""""""""""""" -The ``check_outputs`` helper routine is available for packages to ensure -multiple expected outputs from running an executable are contained within -the actual outputs. +The ``check_outputs`` helper routine is available for packages to ensure multiple expected outputs from running an executable are contained within the actual outputs. The signature for ``check_outputs`` is: .. code-block:: python - def check_outputs(expected, actual): + def check_outputs(expected, actual): ... where each argument has the expected type and meaning: -* ``expected`` is a string or list of strings containing the expected (raw) - output. +* ``expected`` is a string or list of strings containing the expected (raw) output. * ``actual`` is a string containing the actual output from executing the command. @@ -1000,26 +820,19 @@ Invoking the method is the equivalent of: .. tip:: - If you want to see more examples from packages that use this helper, run - ``spack pkg grep check_outputs | sed "s/\/package.py.*//g" | sort -u`` - from the command line to get a list of the packages. + If you want to see more examples from packages that use this helper, run ``spack pkg grep check_outputs | sed "s/\/package.py.*//g" | sort -u`` from the command line to get a list of the packages. .. _accessing-files: -""""""""""""""""""""""""""""""""""""""""" Finding package- and test-related files """"""""""""""""""""""""""""""""""""""""" -You may need to access files from one or more locations when writing -stand-alone tests. This can happen if the software's repository does not -include test source files or includes them but has no way to build the -executables using the installed headers and libraries. In these cases -you may need to reference the files relative to one or more root directories. -The table below lists relevant path properties and provides additional -examples of their use. See :ref:`expected_test_output_from_file` for -examples of accessing files saved from the software repository, package -repository, and installation. +You may need to access files from one or more locations when writing stand-alone tests. +This can happen if the software's repository does not include test source files or includes them but has no way to build the executables using the installed headers and libraries. +In these cases you may need to reference the files relative to one or more root directories. +The table below lists relevant path properties and provides additional examples of their use. +See :ref:`expected_test_output_from_file` for examples of accessing files saved from the software repository, package repository, and installation. .. list-table:: Directory-to-property mapping :header-rows: 1 @@ -1048,17 +861,12 @@ repository, and installation. .. _inheriting-tests: -"""""""""""""""""""""""""""" Inheriting stand-alone tests """""""""""""""""""""""""""" -Stand-alone tests defined in parent (e.g., :ref:`build-systems`) and -virtual (e.g., :ref:`virtual-dependencies`) packages are executed by -packages that inherit from or provide interface implementations for those -packages, respectively. +Stand-alone tests defined in parent (e.g., :ref:`build-systems`) and virtual (e.g., :ref:`virtual-dependencies`) packages are executed by packages that inherit from or provide interface implementations for those packages, respectively. -The table below summarizes the stand-alone tests that will be executed along -with those implemented in the package itself. +The table below summarizes the stand-alone tests that will be executed along with those implemented in the package itself. .. list-table:: Inherited/provided stand-alone tests :header-rows: 1 @@ -1082,24 +890,16 @@ with those implemented in the package itself. * - :ref:`SipPackage ` - Imports modules listed in the ``self.import_modules`` property with defaults derived from the tarball -These tests are very basic so it is important that package developers and -maintainers provide additional stand-alone tests customized to the package. +These tests are very basic so it is important that package developers and maintainers provide additional stand-alone tests customized to the package. .. warning:: - Any package that implements a test method with the same name as an - inherited method will override the inherited method. If that is not the - goal and you are not explicitly calling and adding functionality to - the inherited method for the test, then make sure that all test methods - and embedded test parts have unique test names. + Any package that implements a test method with the same name as an inherited method will override the inherited method. + If that is not the goal and you are not explicitly calling and adding functionality to the inherited method for the test, then make sure that all test methods and embedded test parts have unique test names. -One example of a package that adds its own stand-alone tests to those -"inherited" by the virtual package it provides an implementation for is -the `OpenMPI package -`_. +One example of a package that adds its own stand-alone tests to those "inherited" by the virtual package it provides an implementation for is the `OpenMPI package `_. -Below are snippets from running and viewing the stand-alone test results -for ``openmpi``: +Below are snippets from running and viewing the stand-alone test results for ``openmpi``: .. code-block:: console @@ -1139,89 +939,68 @@ for ``openmpi``: .. _cmd-spack-test-list: -""""""""""""""""""" ``spack test list`` """"""""""""""""""" -Packages available for install testing can be found using the -``spack test list`` command. The command outputs all installed -packages that have defined stand-alone test methods. +Packages available for install testing can be found using the ``spack test list`` command. +The command outputs all installed packages that have defined stand-alone test methods. -Alternatively you can use the ``--all`` option to get a list of -all packages that have stand-alone test methods even if the packages -are not installed. +Alternatively you can use the ``--all`` option to get a list of all packages that have stand-alone test methods even if the packages are not installed. -For more information, refer to `spack test list -`_. +For more information, refer to :ref:`spack-test-list`. .. _cmd-spack-test-run: -"""""""""""""""""" ``spack test run`` """""""""""""""""" -Install tests can be run for one or more installed packages using -the ``spack test run`` command. A ``test suite`` is created for all -of the provided specs. The command accepts the same arguments provided -to ``spack install`` (see :ref:`sec-specs`). If no specs are provided -the command tests all specs in the active environment or all specs -installed in the Spack instance if no environment is active. +Install tests can be run for one or more installed packages using the ``spack test run`` command. +A ``test suite`` is created for all of the provided specs. +The command accepts the same arguments provided to ``spack install`` (see :ref:`sec-specs`). +If no specs are provided the command tests all specs in the active environment or all specs installed in the Spack instance if no environment is active. -Test suites can be named using the ``--alias`` option. Unaliased -test suites use the content hash of their specs as their name. +Test suites can be named using the ``--alias`` option. +Unaliased test suites use the content hash of their specs as their name. Some of the more commonly used debugging options are: - ``--fail-fast`` stops testing each package after the first failure - ``--fail-first`` stops testing packages after the first failure -Test output is written to a text log file by default, though ``junit`` -and ``cdash`` are outputs available through the ``--log-format`` option. +Test output is written to a text log file by default, though ``junit`` and ``cdash`` are outputs available through the ``--log-format`` option. -For more information, refer to `spack test run -`_. +For more information, refer to :ref:`spack-test-run`. .. _cmd-spack-test-results: -"""""""""""""""""""""" ``spack test results`` """""""""""""""""""""" -The ``spack test results`` command shows results for all completed -test suites by default. The alias or content hash can be provided to -limit reporting to the corresponding test suite. +The ``spack test results`` command shows results for all completed test suites by default. +The alias or content hash can be provided to limit reporting to the corresponding test suite. -The ``--logs`` option includes the output generated by the associated -test(s) to facilitate debugging. +The ``--logs`` option includes the output generated by the associated test(s) to facilitate debugging. -The ``--failed`` option limits results shown to that of the failed -tests, if any, of matching packages. +The ``--failed`` option limits results shown to that of the failed tests, if any, of matching packages. -For more information, refer to `spack test results -`_. +For more information, refer to :ref:`spack-test-results`. .. _cmd-spack-test-find: -""""""""""""""""""" ``spack test find`` """"""""""""""""""" -The ``spack test find`` command lists the aliases or content hashes -of all test suites whose results are available. +The ``spack test find`` command lists the aliases or content hashes of all test suites whose results are available. -For more information, refer to `spack test find -`_. +For more information, refer to :ref:`spack-test-find`. .. _cmd-spack-test-remove: -""""""""""""""""""""" ``spack test remove`` """"""""""""""""""""" -The ``spack test remove`` command removes test suites to declutter -the test stage directory. You are prompted to confirm the removal -of each test suite **unless** you use the ``--yes-to-all`` option. +The ``spack test remove`` command removes test suites to declutter the test stage directory. +You are prompted to confirm the removal of each test suite **unless** you use the ``--yes-to-all`` option. -For more information, refer to `spack test remove -`_. +For more information, refer to :ref:`spack-test-remove`. diff --git a/lib/spack/docs/pipelines.rst b/lib/spack/docs/pipelines.rst index 0afb7fdf00d7b8..befee35c0f4f28 100644 --- a/lib/spack/docs/pipelines.rst +++ b/lib/spack/docs/pipelines.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,69 +9,48 @@ .. _pipelines: -============ CI Pipelines ============ -Spack provides commands that support generating and running automated build pipelines in CI instances. At the highest -level, it works like this: provide a spack environment describing the set of packages you care about, and include a -description of how those packages should be mapped to Gitlab runners. Spack can then generate a ``.gitlab-ci.yml`` -file containing job descriptions for all your packages that can be run by a properly configured CI instance. When -run, the generated pipeline will build and deploy binaries, and it can optionally report to a CDash instance -regarding the health of the builds as they evolve over time. +Spack provides commands that support generating and running automated build pipelines in CI instances. +At the highest level, it works like this: provide a Spack environment describing the set of packages you care about, and include a description of how those packages should be mapped to Gitlab runners. +Spack can then generate a ``.gitlab-ci.yml`` file containing job descriptions for all your packages that can be run by a properly configured CI instance. +When run, the generated pipeline will build and deploy binaries, and it can optionally report to a CDash instance regarding the health of the builds as they evolve over time. ------------------------------- Getting started with pipelines ------------------------------ -To get started with automated build pipelines, a Gitlab instance with version ``>= 12.9`` -(more about Gitlab CI `here `_) -with at least one `runner `_ configured is required. This -can be done quickly by setting up a local Gitlab instance. +To get started with automated build pipelines, a Gitlab instance with version ``>= 12.9`` (more about Gitlab CI `here `_) with at least one `runner `_ configured is required. +This can be done quickly by setting up a local Gitlab instance. -It is possible to set up pipelines on gitlab.com, but the builds there are limited to -60 minutes and generic hardware. It is also possible to -`hook up `_ -Gitlab to Google Kubernetes Engine (`GKE `_) -or Amazon Elastic Kubernetes Service (`EKS `_), though those -topics are outside the scope of this document. +It is possible to set up pipelines on gitlab.com, but the builds there are limited to 60 minutes and generic hardware. +It is also possible to `hook up `_ Gitlab to Google Kubernetes Engine (`GKE `_) or Amazon Elastic Kubernetes Service (`EKS `_), though those topics are outside the scope of this document. After setting up a Gitlab instance for running CI, the basic steps for setting up a build pipeline are as follows: #. Create a repository in the Gitlab instance with CI and a runner enabled. #. Add a ``spack.yaml`` at the root containing your pipeline environment -#. Add a ``.gitlab-ci.yml`` at the root containing two jobs (one to generate - the pipeline dynamically, and one to run the generated jobs). -#. Push a commit containing the ``spack.yaml`` and ``.gitlab-ci.yml`` mentioned above - to the gitlab repository - -See the :ref:`functional_example` section for a minimal working example. See also -the :ref:`custom_Workflow` section for a link to an example of a custom workflow -based on spack pipelines. - -Spack's pipelines are now making use of the -`trigger `_ syntax to run -dynamically generated -`child pipelines `_. -Note that the use of dynamic child pipelines requires running Gitlab version -``>= 12.9``. +#. Add a ``.gitlab-ci.yml`` at the root containing two jobs (one to generate the pipeline dynamically, and one to run the generated jobs). +#. Push a commit containing the ``spack.yaml`` and ``.gitlab-ci.yml`` mentioned above to the GitLab repository + +See the :ref:`functional_example` section for a minimal working example. +See also the :ref:`custom_Workflow` section for a link to an example of a custom workflow based on Spack pipelines. + +Spack's pipelines are now making use of the `trigger `_ syntax to run dynamically generated `child pipelines `_. +Note that the use of dynamic child pipelines requires running Gitlab version ``>= 12.9``. .. _functional_example: ------------------- Functional Example ------------------ -The simplest fully functional standalone example of a working pipeline can be -examined live at this example `project `_ -on gitlab.com. +The simplest fully functional standalone example of a working pipeline can be examined live at this example `project `_ on gitlab.com. -Here's the ``.gitlab-ci.yml`` file from that example that builds and runs the -pipeline: +Here's the ``.gitlab-ci.yml`` file from that example that builds and runs the pipeline: .. code-block:: yaml - stages: [ "generate", "build" ] + stages: ["generate", "build"] variables: SPACK_REPOSITORY: "https://github.com/spack/spack.git" @@ -80,44 +60,36 @@ pipeline: generate-pipeline: tags: - - saas-linux-small-amd64 + - saas-linux-small-amd64 stage: generate image: name: ghcr.io/spack/ubuntu20.04-runner-x86_64:2023-01-01 script: - - git clone ${SPACK_REPOSITORY} - - cd spack && git checkout ${SPACK_REF} && cd ../ - - . "./spack/share/spack/setup-env.sh" - - spack --version - - spack env activate --without-view . - - spack -d -v --color=always - ci generate - --check-index-only - --artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir" - --output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml" + - git clone ${SPACK_REPOSITORY} + - cd spack && git checkout ${SPACK_REF} && cd ../ + - . "./spack/share/spack/setup-env.sh" + - spack --version + - spack env activate --without-view . + - spack -d -v --color=always ci generate --check-index-only --artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir" --output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/cloud-ci-pipeline.yml" artifacts: paths: - - "${CI_PROJECT_DIR}/jobs_scratch_dir" + - "${CI_PROJECT_DIR}/jobs_scratch_dir" build-pipeline: stage: build trigger: include: - - artifact: jobs_scratch_dir/cloud-ci-pipeline.yml - job: generate-pipeline + - artifact: jobs_scratch_dir/cloud-ci-pipeline.yml + job: generate-pipeline strategy: depend needs: - - artifacts: True - job: generate-pipeline + - artifacts: true + job: generate-pipeline -The key thing to note above is that there are two jobs: The first job to run, -``generate-pipeline``, runs the ``spack ci generate`` command to generate a -dynamic child pipeline and write it to a yaml file, which is then picked up -by the second job, ``build-jobs``, and used to trigger the downstream pipeline. +The key thing to note above is that there are two jobs: The first job to run, ``generate-pipeline``, runs the ``spack ci generate`` command to generate a dynamic child pipeline and write it to a yaml file, which is then picked up by the second job, ``build-jobs``, and used to trigger the downstream pipeline. -And here's the spack environment built by the pipeline represented as a -``spack.yaml`` file: +And here's the Spack environment built by the pipeline represented as a ``spack.yaml`` file: .. code-block:: yaml @@ -132,7 +104,7 @@ And here's the spack environment built by the pipeline represented as a - zlib - bzip2 ~debug - compiler: - - '%gcc' + - "%gcc" specs: - matrix: @@ -145,7 +117,7 @@ And here's the spack environment built by the pipeline represented as a pipeline-gen: - any-job: tags: - - saas-linux-small-amd64 + - saas-linux-small-amd64 image: name: ghcr.io/spack/ubuntu20.04-runner-x86_64:2023-01-01 before_script: @@ -158,37 +130,24 @@ And here's the spack environment built by the pipeline represented as a .. note:: - The use of ``reuse: false`` in spack environments used for pipelines is - almost always what you want, as without it your pipelines will not rebuild - packages even if package hashes have changed. This is due to the concretizer - strongly preferring known hashes when ``reuse: true``. - -The ``ci`` section in the above environment file contains the bare minimum -configuration required for ``spack ci generate`` to create a working pipeline. -The ``target: gitlab`` tells spack that the desired pipeline output is for -gitlab. However, this isn't strictly required, as currently, gitlab is the -only possible output format for pipelines. The ``pipeline-gen`` section -contains the key information needed to specify attributes for the generated -jobs. Notice that it contains a list which has only a single element in -this case. In real pipelines, it will almost certainly have more elements, -and in those cases, order is important: spack starts at the bottom of the -list and works upwards when applying attributes. - -But in this simple case, we use only the special key ``any-job`` to -indicate that spack should apply the specified attributes (``tags``, ``image``, -and ``before_script``) to any job it generates. This includes jobs for -building/pushing all packages, a ``rebuild-index`` job at the end of the -pipeline, as well as any ``noop`` jobs that might be needed by gitlab when -no rebuilds are required. - -Something to note is that in this simple case, we rely on spack to -generate a reasonable script for the package build jobs (it just creates -a script that invokes ``spack ci rebuild``). - -Another thing to note is the use of the ``SPACK_USER_CONFIG_DIR`` environment -variable in any generated jobs. The purpose of this is to make spack -aware of one final file in the example, the one that contains the mirror -configuration. This file, ``mirrors.yaml`` looks like this: + The use of ``reuse: false`` in Spack environments used for pipelines is almost always what you want, as without it your pipelines will not rebuild packages even if package hashes have changed. + This is due to the concretizer strongly preferring known hashes when ``reuse: true``. + +The ``ci`` section in the above environment file contains the bare minimum configuration required for ``spack ci generate`` to create a working pipeline. +The ``target: gitlab`` tells Spack that the desired pipeline output is for GitLab. +However, this isn't strictly required, as currently, GitLab is the only possible output format for pipelines. +The ``pipeline-gen`` section contains the key information needed to specify attributes for the generated jobs. +Notice that it contains a list which has only a single element in this case. +In real pipelines, it will almost certainly have more elements, and in those cases, order is important: Spack starts at the bottom of the list and works upwards when applying attributes. + +But in this simple case, we use only the special key ``any-job`` to indicate that Spack should apply the specified attributes (``tags``, ``image``, and ``before_script``) to any job it generates. +This includes jobs for building/pushing all packages, a ``rebuild-index`` job at the end of the pipeline, as well as any ``noop`` jobs that might be needed by GitLab when no rebuilds are required. + +Something to note is that in this simple case, we rely on Spack to generate a reasonable script for the package build jobs (it just creates a script that invokes ``spack ci rebuild``). + +Another thing to note is the use of the ``SPACK_USER_CONFIG_DIR`` environment variable in any generated jobs. +The purpose of this is to make Spack aware of one final file in the example, the one that contains the mirror configuration. +This file, ``mirrors.yaml`` looks like this: .. code-block:: yaml @@ -201,198 +160,132 @@ configuration. This file, ``mirrors.yaml`` looks like this: secret_variable: CI_REGISTRY_PASSWORD -Note the name of the mirror is ``buildcache-destination``, which is required -as of Spack 0.23 (see below for more information). The mirror url simply -points to the container registry associated with the project, while -``id_variable`` and ``secret_variable`` refer to environment variables -containing the access credentials for the mirror. +Note the name of the mirror is ``buildcache-destination``, which is required as of Spack 0.23 (see below for more information). +The mirror url simply points to the container registry associated with the project, while ``id_variable`` and ``secret_variable`` refer to environment variables containing the access credentials for the mirror. -When spack builds packages for this example project, they will be pushed to -the project container registry, where they will be available for subsequent -jobs to install as dependencies or for other pipelines to use to build runnable -container images. +When Spack builds packages for this example project, they will be pushed to the project container registry, where they will be available for subsequent jobs to install as dependencies or for other pipelines to use to build runnable container images. ------------------------------------ Spack commands supporting pipelines ----------------------------------- -Spack provides a ``ci`` command with a few sub-commands supporting spack -CI pipelines. These commands are covered in more detail in this section. +Spack provides a ``ci`` command with a few sub-commands supporting Spack CI pipelines. +These commands are covered in more detail in this section. .. _cmd-spack-ci: -^^^^^^^^^^^^ ``spack ci`` ^^^^^^^^^^^^ -Super-command for functionality related to generating pipelines and executing -pipeline jobs. +Super-command for functionality related to generating pipelines and executing pipeline jobs. .. _cmd-spack-ci-generate: -^^^^^^^^^^^^^^^^^^^^^ ``spack ci generate`` ^^^^^^^^^^^^^^^^^^^^^ -Throughout this documentation, references to the "mirror" mean the target -mirror which is checked for the presence of up-to-date specs, and where -any scheduled jobs should push built binary packages. In the past, this -defaulted to the mirror at index 0 in the mirror configs, and could be -overridden using the ``--buildcache-destination`` argument. Starting with -Spack 0.23, ``spack ci generate`` will require you to identify this mirror -by the name "buildcache-destination". While you can configure any number -of mirrors as sources for your pipelines, you will need to identify the -destination mirror by name. - -Concretizes the specs in the active environment, stages them (as described in -:ref:`staging_algorithm`), and writes the resulting ``.gitlab-ci.yml`` to disk. -During concretization of the environment, ``spack ci generate`` also writes a -``spack.lock`` file which is then provided to generated child jobs and made -available in all generated job artifacts to aid in reproducing failed builds -in a local environment. This means there are two artifacts that need to be -exported in your pipeline generation job (defined in your ``.gitlab-ci.yml``). -The first is the output yaml file of ``spack ci generate``, and the other is -the directory containing the concrete environment files. In the -:ref:`functional_example` section, we only mentioned one path in the -``artifacts`` ``paths`` list because we used ``--artifacts-root`` as the -top level directory containing both the generated pipeline yaml and the -concrete environment. - -Using ``--prune-dag`` or ``--no-prune-dag`` configures whether or not jobs are -generated for specs that are already up to date on the mirror. If enabling -DAG pruning using ``--prune-dag``, more information may be required in your -``spack.yaml`` file, see the :ref:`noop_jobs` section below regarding -``noop-job``. - -The optional ``--check-index-only`` argument can be used to speed up pipeline -generation by telling spack to consider only remote buildcache indices when -checking the remote mirror to determine if each spec in the DAG is up to date -or not. The default behavior is for spack to fetch the index and check it, -but if the spec is not found in the index, it also performs a direct check for -the spec on the mirror. If the remote buildcache index is out of date, which -can easily happen if it is not updated frequently, this behavior ensures that -spack has a way to know for certain about the status of any concrete spec on -the remote mirror, but can slow down pipeline generation significantly. - -The optional ``--output-file`` argument should be an absolute path (including -file name) to the generated pipeline, and if not given, the default is -``./.gitlab-ci.yml``. - -While optional, the ``--artifacts-root`` argument is used to determine where -the concretized environment directory should be located. This directory will -be created by ``spack ci generate`` and will contain the ``spack.yaml`` and -generated ``spack.lock`` which are then passed to all child jobs as an -artifact. This directory will also be the root directory for all artifacts -generated by jobs in the pipeline. +Throughout this documentation, references to the "mirror" mean the target mirror which is checked for the presence of up-to-date specs, and where any scheduled jobs should push built binary packages. +In the past, this defaulted to the mirror at index 0 in the mirror configs, and could be overridden using the ``--buildcache-destination`` argument. +Starting with Spack 0.23, ``spack ci generate`` will require you to identify this mirror by the name "buildcache-destination". +While you can configure any number of mirrors as sources for your pipelines, you will need to identify the destination mirror by name. + +Concretizes the specs in the active environment, stages them (as described in :ref:`staging_algorithm`), and writes the resulting ``.gitlab-ci.yml`` to disk. +During concretization of the environment, ``spack ci generate`` also writes a ``spack.lock`` file which is then provided to generated child jobs and made available in all generated job artifacts to aid in reproducing failed builds in a local environment. +This means there are two artifacts that need to be exported in your pipeline generation job (defined in your ``.gitlab-ci.yml``). +The first is the output yaml file of ``spack ci generate``, and the other is the directory containing the concrete environment files. +In the :ref:`functional_example` section, we only mentioned one path in the ``artifacts`` ``paths`` list because we used ``--artifacts-root`` as the top level directory containing both the generated pipeline yaml and the concrete environment. + +Using ``--prune-dag`` or ``--no-prune-dag`` configures whether or not jobs are generated for specs that are already up to date on the mirror. +If enabling DAG pruning using ``--prune-dag``, more information may be required in your ``spack.yaml`` file, see the :ref:`noop_jobs` section below regarding ``noop-job``. + +The optional ``--check-index-only`` argument can be used to speed up pipeline generation by telling Spack to consider only remote buildcache indices when checking the remote mirror to determine if each spec in the DAG is up to date or not. +The default behavior is for Spack to fetch the index and check it, but if the spec is not found in the index, it also performs a direct check for the spec on the mirror. +If the remote buildcache index is out of date, which can easily happen if it is not updated frequently, this behavior ensures that Spack has a way to know for certain about the status of any concrete spec on the remote mirror, but can slow down pipeline generation significantly. + +The optional ``--output-file`` argument should be an absolute path (including file name) to the generated pipeline, and if not given, the default is ``./.gitlab-ci.yml``. + +While optional, the ``--artifacts-root`` argument is used to determine where the concretized environment directory should be located. +This directory will be created by ``spack ci generate`` and will contain the ``spack.yaml`` and generated ``spack.lock`` which are then passed to all child jobs as an artifact. +This directory will also be the root directory for all artifacts generated by jobs in the pipeline. .. _cmd-spack-ci-rebuild: -^^^^^^^^^^^^^^^^^^^^ ``spack ci rebuild`` ^^^^^^^^^^^^^^^^^^^^ -The purpose of ``spack ci rebuild`` is to take an assigned -spec and ensure a binary of a successful build exists on the target mirror. -If the binary does not already exist, it is built from source and pushed -to the mirror. The associated stand-alone tests are optionally run against -the new build. Additionally, files for reproducing the build outside the -CI environment are created to facilitate debugging. - -If a binary for the spec does not exist on the target mirror, an install -shell script, ``install.sh``, is created and saved in the current working -directory. The script is run in a job to install the spec from source. The -resulting binary package is pushed to the mirror. If ``cdash`` is configured -for the environment, the build results will be uploaded to the site. - -Environment variables and values in the ``ci::pipeline-gen`` section of the -``spack.yaml`` environment file provide inputs to this process. The -two main sources of environment variables are variables written into -``.gitlab-ci.yml`` by ``spack ci generate`` and the GitLab CI runtime. -Several key CI pipeline variables are described in -:ref:`ci_environment_variables`. - -If the ``--tests`` option is provided, stand-alone tests are performed but -only if the build was successful *and* the package does not appear in the -list of ``broken-tests-packages``. A shell script, ``test.sh``, is created -and run to perform the tests. On completion, test logs are exported as job -artifacts for review and to facilitate debugging. If ``cdash`` is configured, -test results are also uploaded to the site. - -A snippet from an example ``spack.yaml`` file illustrating use of this -option *and* specification of a package with broken tests is given below. -The inclusion of a spec for building ``gptune`` is not shown here. Note -that ``--tests`` is passed to ``spack ci rebuild`` as part of the -``build-job`` script. +The purpose of ``spack ci rebuild`` is to take an assigned spec and ensure a binary of a successful build exists on the target mirror. +If the binary does not already exist, it is built from source and pushed to the mirror. +The associated stand-alone tests are optionally run against the new build. +Additionally, files for reproducing the build outside the CI environment are created to facilitate debugging. + +If a binary for the spec does not exist on the target mirror, an install shell script, ``install.sh``, is created and saved in the current working directory. +The script is run in a job to install the spec from source. +The resulting binary package is pushed to the mirror. +If ``cdash`` is configured for the environment, the build results will be uploaded to the site. + +Environment variables and values in the ``ci::pipeline-gen`` section of the ``spack.yaml`` environment file provide inputs to this process. +The two main sources of environment variables are variables written into ``.gitlab-ci.yml`` by ``spack ci generate`` and the GitLab CI runtime. +Several key CI pipeline variables are described in :ref:`ci_environment_variables`. + +If the ``--tests`` option is provided, stand-alone tests are performed but only if the build was successful *and* the package does not appear in the list of ``broken-tests-packages``. +A shell script, ``test.sh``, is created and run to perform the tests. +On completion, test logs are exported as job artifacts for review and to facilitate debugging. +If ``cdash`` is configured, test results are also uploaded to the site. + +A snippet from an example ``spack.yaml`` file illustrating use of this option *and* specification of a package with broken tests is given below. +The inclusion of a spec for building ``gptune`` is not shown here. +Note that ``--tests`` is passed to ``spack ci rebuild`` as part of the ``build-job`` script. .. code-block:: yaml ci: pipeline-gen: - - build-job + - build-job: script: - - . "./share/spack/setup-env.sh" - - spack --version - - cd ${SPACK_CONCRETE_ENV_DIR} - - spack env activate --without-view . - - spack config add "config:install_tree:projections:${SPACK_JOB_SPEC_PKG_NAME}:'morepadding/{architecture.platform}-{architecture.target}/{name}-{version}-{hash}'" - - mkdir -p ${SPACK_ARTIFACTS_ROOT}/user_data - - if [[ -r /mnt/key/intermediate_ci_signing_key.gpg ]]; then spack gpg trust /mnt/key/intermediate_ci_signing_key.gpg; fi - - if [[ -r /mnt/key/spack_public_key.gpg ]]; then spack gpg trust /mnt/key/spack_public_key.gpg; fi - - spack -d ci rebuild --tests > >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_out.txt) 2> >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_err.txt >&2) - - broken-tests-packages: - - gptune - -In this case, even if ``gptune`` is successfully built from source, the -pipeline will *not* run its stand-alone tests since the package is listed -under ``broken-tests-packages``. - -Spack's cloud pipelines provide actual, up-to-date examples of the CI/CD -configuration and environment files used by Spack. You can find them -under Spack's `stacks -`_ repository directory. + - . "./share/spack/setup-env.sh" + - spack --version + - cd ${SPACK_CONCRETE_ENV_DIR} + - spack env activate --without-view . + - spack config add "config:install_tree:projections:${SPACK_JOB_SPEC_PKG_NAME}:'morepadding/{architecture.platform}-{architecture.target}/{name}-{version}-{hash}'" + - mkdir -p ${SPACK_ARTIFACTS_ROOT}/user_data + - if [[ -r /mnt/key/intermediate_ci_signing_key.gpg ]]; then spack gpg trust /mnt/key/intermediate_ci_signing_key.gpg; fi + - if [[ -r /mnt/key/spack_public_key.gpg ]]; then spack gpg trust /mnt/key/spack_public_key.gpg; fi + - spack -d ci rebuild --tests > >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_out.txt) 2> >(tee ${SPACK_ARTIFACTS_ROOT}/user_data/pipeline_err.txt >&2) + + broken-tests-packages: + - gptune + +In this case, even if ``gptune`` is successfully built from source, the pipeline will *not* run its stand-alone tests since the package is listed under ``broken-tests-packages``. + +Spack's cloud pipelines provide actual, up-to-date examples of the CI/CD configuration and environment files used by Spack. +You can find them under Spack's `stacks `_ repository directory. .. _cmd-spack-ci-rebuild-index: -^^^^^^^^^^^^^^^^^^^^^^^^^^ ``spack ci rebuild-index`` ^^^^^^^^^^^^^^^^^^^^^^^^^^ -This is a convenience command to rebuild the buildcache index associated with -the mirror in the active, gitlab-enabled environment (specifying the mirror -URL or name is not required). +This is a convenience command to rebuild the buildcache index associated with the mirror in the active, GitLab-enabled environment (specifying the mirror URL or name is not required). .. _cmd-spack-ci-reproduce-build: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``spack ci reproduce-build`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Given the URL to a gitlab pipeline rebuild job, downloads and unzips the -artifacts into a local directory (which can be specified with the optional -``--working-dir`` argument), then finds the target job in the generated -pipeline to extract details about how it was run. Assuming the job used a -docker image, the command prints a ``docker run`` command line and some basic -instructions on how to reproduce the build locally. +Given the URL to a GitLab pipeline rebuild job, downloads and unzips the artifacts into a local directory (which can be specified with the optional ``--working-dir`` argument), then finds the target job in the generated pipeline to extract details about how it was run. +Assuming the job used a docker image, the command prints a ``docker run`` command line and some basic instructions on how to reproduce the build locally. -Note that jobs failing in the pipeline will print messages giving the -arguments you can pass to ``spack ci reproduce-build`` in order to reproduce -a particular build locally. +Note that jobs failing in the pipeline will print messages giving the arguments you can pass to ``spack ci reproduce-build`` in order to reproduce a particular build locally. ------------------------------------- Job Types ------------------------------------ -^^^^^^^^^^^^^^^ Rebuild (build) ^^^^^^^^^^^^^^^ -Rebuild jobs, denoted as ``build-job``'s in the ``pipeline-gen`` list, are jobs -associated with concrete specs that have been marked for rebuild. By default, a simple -script for doing rebuild is generated but may be modified as needed. +Rebuild jobs, denoted as ``build-job``'s in the ``pipeline-gen`` list, are jobs associated with concrete specs that have been marked for rebuild. +By default, a simple script for doing rebuild is generated but may be modified as needed. -The default script does three main steps: change directories to the pipelines concrete -environment, activate the concrete environment, and run the ``spack ci rebuild`` command: +The default script does three main steps: change directories to the pipelines concrete environment, activate the concrete environment, and run the ``spack ci rebuild`` command: .. code-block:: bash @@ -402,219 +295,152 @@ environment, activate the concrete environment, and run the ``spack ci rebuild`` .. _rebuild_index: -^^^^^^^^^^^^^^^^^^^^^^ Update Index (reindex) ^^^^^^^^^^^^^^^^^^^^^^ -By default, while a pipeline job may rebuild a package, create a buildcache -entry, and push it to the mirror, it does not automatically re-generate the -mirror's buildcache index afterward. Because the index is not needed by the -default rebuild jobs in the pipeline, not updating the index at the end of -each job avoids possible race conditions between simultaneous jobs, and it -avoids the computational expense of regenerating the index. This potentially -saves minutes per job, depending on the number of binary packages in the -mirror. As a result, the default is that the mirror's buildcache index may -not correctly reflect the mirror's contents at the end of a pipeline. - -To make sure the buildcache index is up to date at the end of your pipeline, -spack generates a job to update the buildcache index of the target mirror -at the end of each pipeline by default. You can disable this behavior by -adding ``rebuild-index: False`` inside the ``ci`` section of your -spack environment. - -Reindex jobs do not allow modifying the ``script`` attribute since it is automatically -generated using the target mirror listed in the ``mirrors::mirror`` configuration. +By default, while a pipeline job may rebuild a package, create a buildcache entry, and push it to the mirror, it does not automatically re-generate the mirror's buildcache index afterward. +Because the index is not needed by the default rebuild jobs in the pipeline, not updating the index at the end of each job avoids possible race conditions between simultaneous jobs, and it avoids the computational expense of regenerating the index. +This potentially saves minutes per job, depending on the number of binary packages in the mirror. +As a result, the default is that the mirror's buildcache index may not correctly reflect the mirror's contents at the end of a pipeline. + +To make sure the buildcache index is up to date at the end of your pipeline, Spack generates a job to update the buildcache index of the target mirror at the end of each pipeline by default. +You can disable this behavior by adding ``rebuild-index: False`` inside the ``ci`` section of your Spack environment. + +Reindex jobs do not allow modifying the ``script`` attribute since it is automatically generated using the target mirror listed in the ``mirrors::mirror`` configuration. -^^^^^^^^^^^^^^^^^ Signing (signing) ^^^^^^^^^^^^^^^^^ -This job is run after all of the rebuild jobs are completed and is intended to be used -to sign the package binaries built by a protected CI run. Signing jobs are generated -only if a signing job ``script`` is specified and the spack CI job type is protected. -Note, if an ``any-job`` section contains a script, this will not implicitly create a -``signing`` job; a signing job may only exist if it is explicitly specified in the -configuration with a ``script`` attribute. Specifying a signing job without a script -does not create a signing job, and the job configuration attributes will be ignored. +This job is run after all of the rebuild jobs are completed and is intended to be used to sign the package binaries built by a protected CI run. +Signing jobs are generated only if a signing job ``script`` is specified and the Spack CI job type is protected. +Note, if an ``any-job`` section contains a script, this will not implicitly create a ``signing`` job; a signing job may only exist if it is explicitly specified in the configuration with a ``script`` attribute. +Specifying a signing job without a script does not create a signing job, and the job configuration attributes will be ignored. Signing jobs are always assigned the runner tags ``aws``, ``protected``, and ``notary``. .. _noop_jobs: -^^^^^^^^^^^^ No Op (noop) ^^^^^^^^^^^^ -If no specs in an environment need to be rebuilt during a given pipeline run -(meaning all are already up to date on the mirror), a single successful job -(a NO-OP) is still generated to avoid an empty pipeline (which GitLab -considers to be an error). The ``noop-job*`` sections -can be added to your ``spack.yaml`` where you can provide ``tags`` and -``image`` or ``variables`` for the generated NO-OP job. This section also -supports providing ``before_script``, ``script``, and ``after_script``, in -case you want to take some custom actions in the case of an empty pipeline. +If no specs in an environment need to be rebuilt during a given pipeline run (meaning all are already up to date on the mirror), a single successful job (a NO-OP) is still generated to avoid an empty pipeline (which GitLab considers to be an error). +The ``noop-job*`` sections can be added to your ``spack.yaml`` where you can provide ``tags`` and ``image`` or ``variables`` for the generated NO-OP job. +This section also supports providing ``before_script``, ``script``, and ``after_script``, in case you want to take some custom actions in the case of an empty pipeline. Following is an example of this section added to a ``spack.yaml``: .. code-block:: yaml spack: - ci: - pipeline-gen: - - noop-job: - tags: ['custom', 'tag'] - image: - name: 'some.image.registry/custom-image:latest' - entrypoint: ['/bin/bash'] - script:: - - echo "Custom message in a custom script" - -The example above illustrates how you can provide the attributes used to run -the NO-OP job in the case of an empty pipeline. The only field for the NO-OP -job that might be generated for you is ``script``, but that will only happen -if you do not provide one yourself. Notice in this example the ``script`` -uses the ``::`` notation to prescribe override behavior. Without this, the -``echo`` command would have been prepended to the automatically generated script -rather than replacing it. + ci: + pipeline-gen: + - noop-job: + tags: ["custom", "tag"] + image: + name: "some.image.registry/custom-image:latest" + entrypoint: ["/bin/bash"] + script:: + - echo "Custom message in a custom script" + +The example above illustrates how you can provide the attributes used to run the NO-OP job in the case of an empty pipeline. +The only field for the NO-OP job that might be generated for you is ``script``, but that will only happen if you do not provide one yourself. +Notice in this example the ``script`` uses the ``::`` notation to prescribe override behavior. +Without this, the ``echo`` command would have been prepended to the automatically generated script rather than replacing it. ------------------------------------- ci.yaml ------------------------------------ -Here's an example of a spack configuration file describing a build pipeline: +Here's an example of a Spack configuration file describing a build pipeline: .. code-block:: yaml - ci: - target: gitlab - - rebuild_index: True - - broken-specs-url: https://broken.specs.url - - broken-tests-packages: - - gptune - - pipeline-gen: - - submapping: - - match: + spack: + ci: + target: gitlab + rebuild_index: true + broken-specs-url: https://broken.specs.url + broken-tests-packages: + - gptune + pipeline-gen: + - submapping: + - match: - os=ubuntu24.04 - build-job: - tags: + build-job: + tags: - spack-kube - image: spack/ubuntu-noble - - match: + image: spack/ubuntu-noble + - match: - os=almalinux9 - build-job: - tags: + build-job: + tags: - spack-kube - image: spack/almalinux9 + image: spack/almalinux9 - cdash: - build-group: Release Testing - url: https://cdash.spack.io - project: Spack - site: Spack AWS Gitlab Instance + cdash: + build-group: Release Testing + url: https://cdash.spack.io + project: Spack + site: Spack AWS Gitlab Instance -The ``ci`` config section is used to configure how the pipeline workload should be -generated, mainly how the jobs for building specs should be assigned to the -configured runners on your instance. The main section for configuring pipelines -is ``pipeline-gen``, which is a list of job attribute sections that are merged, -using the same rules as Spack configs (:ref:`config-scope-precedence`), from the bottom up. -The order sections are applied is to be consistent with how spack orders scope precedence when merging lists. -There are two main section types: ``-job`` sections and ``submapping`` -sections. +The ``ci`` config section is used to configure how the pipeline workload should be generated, mainly how the jobs for building specs should be assigned to the configured runners on your instance. +The main section for configuring pipelines is ``pipeline-gen``, which is a list of job attribute sections that are merged, using the same rules as Spack configs (:ref:`config-scope-precedence`), from the bottom up. +The order sections are applied is to be consistent with how Spack orders scope precedence when merging lists. +There are two main section types: ``-job`` sections and ``submapping`` sections. -^^^^^^^^^^^^^^^^^^^^^^ Job Attribute Sections ^^^^^^^^^^^^^^^^^^^^^^ -Each type of job may have attributes added or removed via sections in the ``pipeline-gen`` -list. Job type specific attributes may be specified using the keys ``-job`` to -add attributes to all jobs of type ```` or ``-job-remove`` to remove attributes -of type ````. Each section may only contain one type of job attribute specification, i.e., -``build-job`` and ``noop-job`` may not coexist but ``build-job`` and ``build-job-remove`` may. +Each type of job may have attributes added or removed via sections in the ``pipeline-gen`` list. +Job type specific attributes may be specified using the keys ``-job`` to add attributes to all jobs of type ```` or ``-job-remove`` to remove attributes of type ````. +Each section may only contain one type of job attribute specification, i.e., ``build-job`` and ``noop-job`` may not coexist but ``build-job`` and ``build-job-remove`` may. .. note:: The ``*-remove`` specifications are applied before the additive attribute specification. - For example, in the case where both ``build-job`` and ``build-job-remove`` are listed in - the same ``pipeline-gen`` section, the value will still exist in the merged build-job after - applying the section. - -All of the attributes specified are forwarded to the generated CI jobs, however special -treatment is applied to the attributes ``tags``, ``image``, ``variables``, ``script``, -``before_script``, and ``after_script`` as they are components recognized explicitly by the -Spack CI generator. For the ``tags`` attribute, Spack will remove reserved tags -(:ref:`reserved_tags`) from all jobs specified in the config. In some cases, such as for -``signing`` jobs, reserved tags will be added back based on the type of CI that is being run. - -Once a runner has been chosen to build a release spec, the ``build-job*`` -sections provide information determining details of the job in the context of -the runner. At least one of the ``build-job*`` sections must contain a ``tags`` key, which -is a list containing at least one tag used to select the runner from among the -runners known to the gitlab instance. For Docker executor type runners, the -``image`` key is used to specify the Docker image used to build the release spec -(and could also appear as a dictionary with a ``name`` specifying the image name, -as well as an ``entrypoint`` to override whatever the default for that image is). -For other types of runners the ``variables`` key will be useful to pass any -information on to the runner that it needs to do its work (e.g. scheduler -parameters, etc.). Any ``variables`` provided here will be added, verbatim, to -each job. - -The ``build-job`` section also allows users to supply custom ``script``, -``before_script``, and ``after_script`` sections to be applied to every job -scheduled on that runner. This allows users to do any custom preparation or -cleanup tasks that fit their particular workflow, as well as completely -customize the rebuilding of a spec if they so choose. Spack will not generate -a ``before_script`` or ``after_script`` for jobs, but if you do not provide -a custom ``script``, spack will generate one for you that assumes the concrete -environment directory is located within your ``--artifacts-root`` (or if not -provided, within your ``$CI_PROJECT_DIR``), activates that environment for -you, and invokes ``spack ci rebuild``. - -Sections that specify scripts (``script``, ``before_script``, ``after_script``) are all -read as lists of commands or lists of lists of commands. It is recommended to write scripts -as lists of lists if scripts will be composed via merging. The default behavior of merging -lists will remove duplicate commands and potentially apply unwanted reordering, whereas -merging lists of lists will preserve the local ordering and never removes duplicate -commands. When writing commands to the CI target script, all lists are expanded and -flattened into a single list. + For example, in the case where both ``build-job`` and ``build-job-remove`` are listed in the same ``pipeline-gen`` section, the value will still exist in the merged build-job after applying the section. + +All of the attributes specified are forwarded to the generated CI jobs, however special treatment is applied to the attributes ``tags``, ``image``, ``variables``, ``script``, ``before_script``, and ``after_script`` as they are components recognized explicitly by the Spack CI generator. +For the ``tags`` attribute, Spack will remove reserved tags (:ref:`reserved_tags`) from all jobs specified in the config. +In some cases, such as for ``signing`` jobs, reserved tags will be added back based on the type of CI that is being run. + +Once a runner has been chosen to build a release spec, the ``build-job*`` sections provide information determining details of the job in the context of the runner. +At least one of the ``build-job*`` sections must contain a ``tags`` key, which is a list containing at least one tag used to select the runner from among the runners known to the GitLab instance. +For Docker executor type runners, the ``image`` key is used to specify the Docker image used to build the release spec (and could also appear as a dictionary with a ``name`` specifying the image name, as well as an ``entrypoint`` to override whatever the default for that image is). +For other types of runners the ``variables`` key will be useful to pass any information on to the runner that it needs to do its work (e.g. scheduler parameters, etc.). +Any ``variables`` provided here will be added, verbatim, to each job. + +The ``build-job`` section also allows users to supply custom ``script``, ``before_script``, and ``after_script`` sections to be applied to every job scheduled on that runner. +This allows users to do any custom preparation or cleanup tasks that fit their particular workflow, as well as completely customize the rebuilding of a spec if they so choose. +Spack will not generate a ``before_script`` or ``after_script`` for jobs, but if you do not provide a custom ``script``, Spack will generate one for you that assumes the concrete environment directory is located within your ``--artifacts-root`` (or if not provided, within your ``$CI_PROJECT_DIR``), activates that environment for you, and invokes ``spack ci rebuild``. + +Sections that specify scripts (``script``, ``before_script``, ``after_script``) are all read as lists of commands or lists of lists of commands. +It is recommended to write scripts as lists of lists if scripts will be composed via merging. +The default behavior of merging lists will remove duplicate commands and potentially apply unwanted reordering, whereas merging lists of lists will preserve the local ordering and never removes duplicate commands. +When writing commands to the CI target script, all lists are expanded and flattened into a single list. -^^^^^^^^^^^^^^^^^^^ Submapping Sections ^^^^^^^^^^^^^^^^^^^ -A special case of attribute specification is the ``submapping`` section which may be used -to apply job attributes to build jobs based on the package spec associated with the rebuild -job. Submapping is specified as a list of spec ``match`` lists associated with -``build-job``/``build-job-remove`` sections. There are two options for ``match_behavior``: -either ``first`` or ``merge`` may be specified. In either case, the ``submapping`` list is -processed from the bottom up, and then each ``match`` list is searched for a string that -satisfies the check ``spec.satisfies({match_item})`` for each concrete spec. +A special case of attribute specification is the ``submapping`` section which may be used to apply job attributes to build jobs based on the package spec associated with the rebuild job. +Submapping is specified as a list of spec ``match`` lists associated with ``build-job``/``build-job-remove`` sections. +There are two options for ``match_behavior``: either ``first`` or ``merge`` may be specified. +In either case, the ``submapping`` list is processed from the bottom up, and then each ``match`` list is searched for a string that satisfies the check ``spec.satisfies({match_item})`` for each concrete spec. -In the case of ``match_behavior: first``, the first ``match`` section in the list of -``submappings`` that contains a string that satisfies the spec will apply its -``build-job*`` attributes to the rebuild job associated with that spec. This is the -default behavior and will be the method if no ``match_behavior`` is specified. +In the case of ``match_behavior: first``, the first ``match`` section in the list of ``submappings`` that contains a string that satisfies the spec will apply its ``build-job*`` attributes to the rebuild job associated with that spec. +This is the default behavior and will be the method if no ``match_behavior`` is specified. -In the case of ``merge`` match, all of the ``match`` sections in the list of -``submappings`` that contain a string that satisfies the spec will have the associated -``build-job*`` attributes applied to the rebuild job associated with that spec. Again, -the attributes will be merged starting from the bottom match going up to the top match. +In the case of ``merge`` match, all of the ``match`` sections in the list of ``submappings`` that contain a string that satisfies the spec will have the associated ``build-job*`` attributes applied to the rebuild job associated with that spec. +Again, the attributes will be merged starting from the bottom match going up to the top match. In the case that no match is found in a submapping section, no additional attributes will be applied. -^^^^^^^^^^^^^^^^^^^^^^^^ Dynamic Mapping Sections ^^^^^^^^^^^^^^^^^^^^^^^^ -For large scale CI where cost optimization is required, dynamic mapping allows for the use of real-time -mapping schemes served by a web service. This type of mapping does not support the ``-remove`` type -behavior, but it does follow the rest of the merge rules for configurations. +For large scale CI where cost optimization is required, dynamic mapping allows for the use of real-time mapping schemes served by a web service. +This type of mapping does not support the ``-remove`` type behavior, but it does follow the rest of the merge rules for configurations. -The dynamic mapping service needs to implement a single REST API interface for getting -requests ``GET [:PORT][/PATH]?spec=``. +The dynamic mapping service needs to implement a single REST API interface for getting requests ``GET [:PORT][/PATH]?spec=``. example request. @@ -623,8 +449,7 @@ example request. https://my-dyn-mapping.spack.io/allocation?spec=zlib-ng@2.1.6 +compat+opt+shared+pic+new_strategies arch=linux-ubuntu20.04-x86_64_v3%gcc@12.0.0 -With an example response that updates kubernetes request variables, overrides the max retries for gitlab, -and prepends a note about the modifications made by the my-dyn-mapping.spack.io service. +With an example response that updates kubernetes request variables, overrides the max retries for GitLab, and prepends a note about the modifications made by the my-dyn-mapping.spack.io service. .. code-block:: text @@ -646,14 +471,14 @@ and prepends a note about the modifications made by the my-dyn-mapping.spack.io The ci.yaml configuration section takes the URL endpoint as well as a number of options to configure how responses are handled. -It is possible to specify a list of allowed and ignored configuration attributes under ``allow`` and ``ignore`` -respectively. It is also possible to configure required attributes under ``required`` section. +It is possible to specify a list of allowed and ignored configuration attributes under ``allow`` and ``ignore`` respectively. +It is also possible to configure required attributes under ``required`` section. Options to configure the client timeout and SSL verification using the ``timeout`` and ``verify_ssl`` options. By default, the ``timeout`` is set to the option in ``config:timeout`` and ``verify_ssl`` is set to the option in ``config:verify_ssl``. -Passing header parameters to the request can be achieved through the ``header`` section. The values of the variables passed to the -header may be environment variables that are expanded at runtime, such as a private token configured on the runner. +Passing header parameters to the request can be achieved through the ``header`` section. +The values of the variables passed to the header may be environment variables that are expanded at runtime, such as a private token configured on the runner. Here is an example configuration pointing to ``my-dyn-mapping.spack.io/allocation``. @@ -661,211 +486,96 @@ Here is an example configuration pointing to ``my-dyn-mapping.spack.io/allocatio .. code-block:: yaml ci: - - dynamic-mapping: - endpoint: my-dyn-mapping.spack.io/allocation - timeout: 10 - verify_ssl: True - header: - PRIVATE_TOKEN: ${MY_PRIVATE_TOKEN} - MY_CONFIG: "fuzz_allocation:false" - allow: - - variables - ignore: - - script - require: [] - - -^^^^^^^^^^^^^ -Bootstrapping -^^^^^^^^^^^^^ - - -The ``bootstrap`` section allows you to specify lists of specs from -your ``definitions`` that should be staged ahead of the environment's ``specs``. At the moment, -the only viable use-case for bootstrapping is to install compilers. - -Here's an example of what bootstrapping some compilers might look like: - -.. code-block:: yaml + pipeline-gen: + - dynamic-mapping: + endpoint: my-dyn-mapping.spack.io/allocation + timeout: 10 + verify_ssl: true + header: + PRIVATE_TOKEN: ${MY_PRIVATE_TOKEN} + MY_CONFIG: "fuzz_allocation:false" + allow: + - variables + ignore: + - script + require: [] - spack: - definitions: - - compiler-pkgs: - - 'llvm+clang@6.0.1 os=centos7' - - 'gcc@6.5.0 os=centos7' - - 'llvm+clang@6.0.1 os=ubuntu18.04' - - 'gcc@6.5.0 os=ubuntu18.04' - - pkgs: - - readline@7.0 - - compilers: - - '%gcc@5.5.0' - - '%gcc@6.5.0' - - '%gcc@7.3.0' - - '%clang@6.0.0' - - '%clang@6.0.1' - - oses: - - os=ubuntu18.04 - - os=centos7 - specs: - - matrix: - - [$pkgs] - - [$compilers] - - [$oses] - exclude: - - '%gcc@7.3.0 os=centos7' - - '%gcc@5.5.0 os=ubuntu18.04' - ci: - bootstrap: - - name: compiler-pkgs - compiler-agnostic: true - pipeline-gen: - # similar to the example higher up in this description - ... - -The example above adds a list to the ``definitions`` called ``compiler-pkgs`` -(you can add any number of these), which lists compiler packages that should -be staged ahead of the full matrix of release specs (in this example, only -readline). Then within the ``ci`` section, note the addition of a -``bootstrap`` section, which can contain a list of items, each referring to -a list in the ``definitions`` section. These items can either -be a dictionary or a string. If you supply a dictionary, it must have a name -key whose value must match one of the lists in definitions and it can have a -``compiler-agnostic`` key whose value is a boolean. If you supply a string, -then it needs to match one of the lists provided in ``definitions``. You can -think of the bootstrap list as an ordered list of pipeline "phases" that will -be staged before your actual release specs. While this introduces another -layer of bottleneck in the pipeline (all jobs in all stages of one phase must -complete before any jobs in the next phase can begin), it also means you are -guaranteed your bootstrapped compilers will be available when you need them. - -The ``compiler-agnostic`` key can be provided with each item in the -bootstrap list. It tells the ``spack ci generate`` command that any jobs staged -from that particular list should have the compiler removed from the spec, so -that any compiler available on the runner where the job is run can be used to -build the package. - -When including a bootstrapping phase as in the example above, the result is that -the bootstrapped compiler packages will be pushed to the binary mirror (and the -local artifacts mirror) before the actual release specs are built. - -Since bootstrapping compilers is optional, those items can be left out of the -environment/stack file, and in that case no bootstrapping will be done (only the -specs will be staged for building) and the runners will be expected to already -have all needed compilers installed and configured for spack to use. -^^^^^^^^^^^^^^^^ Broken Specs URL ^^^^^^^^^^^^^^^^ -The optional ``broken-specs-url`` key tells Spack to check against a list of -specs that are known to be currently broken in ``develop``. If any such specs -are found, the ``spack ci generate`` command will fail with an error message -informing the user what broken specs were encountered. This allows the pipeline -to fail early and avoid wasting compute resources attempting to build packages -that will not succeed. +The optional ``broken-specs-url`` key tells Spack to check against a list of specs that are known to be currently broken in ``develop``. +If any such specs are found, the ``spack ci generate`` command will fail with an error message informing the user what broken specs were encountered. +This allows the pipeline to fail early and avoid wasting compute resources attempting to build packages that will not succeed. -^^^^^^ CDash ^^^^^^ -The optional ``cdash`` section provides information that will be used by the -``spack ci generate`` command (invoked by ``spack ci start``) for reporting -to CDash. All the jobs generated from this environment will belong to a -"build group" within CDash that can be tracked over time. As the release -progresses, this build group may have jobs added or removed. The URL, project, -and site are used to specify the CDash instance to which build results should -be reported. +The optional ``cdash`` section provides information that will be used by the ``spack ci generate`` command (invoked by ``spack ci start``) for reporting to CDash. +All the jobs generated from this environment will belong to a "build group" within CDash that can be tracked over time. +As the release progresses, this build group may have jobs added or removed. +The URL, project, and site are used to specify the CDash instance to which build results should be reported. -Take a look at the -`schema `_ -for the ci section of the spack environment file, to see precisely what -syntax is allowed there. +Take a look at the `schema `_ for the ci section of the Spack environment file, to see precisely what syntax is allowed there. .. _reserved_tags: -^^^^^^^^^^^^^ Reserved Tags ^^^^^^^^^^^^^ -Spack has a subset of tags (``public``, ``protected``, and ``notary``) that it reserves -for classifying runners that may require special permissions or access. The tags -``public`` and ``protected`` are used to distinguish between runners that use public -permissions and runners with protected permissions. The ``notary`` tag is a special tag -that is used to indicate runners that have access to the highly protected information -used for signing binaries using the ``signing`` job. +Spack has a subset of tags (``public``, ``protected``, and ``notary``) that it reserves for classifying runners that may require special permissions or access. +The tags ``public`` and ``protected`` are used to distinguish between runners that use public permissions and runners with protected permissions. +The ``notary`` tag is a special tag that is used to indicate runners that have access to the highly protected information used for signing binaries using the ``signing`` job. .. _staging_algorithm: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Summary of ``.gitlab-ci.yml`` generation algorithm ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -All specs yielded by the matrix (or all the specs in the environment) have their -dependencies computed, and the entire resulting set of specs are staged together -before being run through the ``ci/pipeline-gen`` entries, where each staged -spec is assigned a runner. "Staging" is the name given to the process of -figuring out in what order the specs should be built, taking into consideration -Gitlab CI rules about jobs/stages. In the staging process, the goal is to maximize -the number of jobs in any stage of the pipeline, while ensuring that the jobs in -any stage only depend on jobs in previous stages (since those jobs are guaranteed -to have completed already). As a runner is determined for a job, the information -in the merged ``any-job*`` and ``build-job*`` sections is used to populate various parts of the job -description that will be used by the target CI pipelines. Once all the jobs have been assigned -a runner, the ``.gitlab-ci.yml`` is written to disk. - -The short example provided above would result in the ``readline``, ``ncurses``, -and ``pkgconf`` packages getting staged and built on the runner chosen by the -``spack-k8s`` tag. In this example, spack assumes the runner is a Docker executor -type runner, and thus certain jobs will be run in the ``centos7`` container -and others in the ``ubuntu-18.04`` container. The resulting ``.gitlab-ci.yml`` -will contain 6 jobs in three stages. Once the jobs have been generated, the -presence of a ``SPACK_CDASH_AUTH_TOKEN`` environment variable during the -``spack ci generate`` command would result in all of the jobs being put in a -build group on CDash called "Release Testing" (that group will be created if -it didn't already exist). +All specs yielded by the matrix (or all the specs in the environment) have their dependencies computed, and the entire resulting set of specs are staged together before being run through the ``ci/pipeline-gen`` entries, where each staged spec is assigned a runner. +"Staging" is the name given to the process of figuring out in what order the specs should be built, taking into consideration Gitlab CI rules about jobs/stages. +In the staging process, the goal is to maximize the number of jobs in any stage of the pipeline, while ensuring that the jobs in any stage only depend on jobs in previous stages (since those jobs are guaranteed to have completed already). +As a runner is determined for a job, the information in the merged ``any-job*`` and ``build-job*`` sections is used to populate various parts of the job description that will be used by the target CI pipelines. +Once all the jobs have been assigned a runner, the ``.gitlab-ci.yml`` is written to disk. + +The short example provided above would result in the ``readline``, ``ncurses``, and ``pkgconf`` packages getting staged and built on the runner chosen by the ``spack-k8s`` tag. +In this example, Spack assumes the runner is a Docker executor type runner, and thus certain jobs will be run in the ``centos7`` container and others in the ``ubuntu-18.04`` container. +The resulting ``.gitlab-ci.yml`` will contain 6 jobs in three stages. +Once the jobs have been generated, the presence of a ``SPACK_CDASH_AUTH_TOKEN`` environment variable during the ``spack ci generate`` command would result in all of the jobs being put in a build group on CDash called "Release Testing" (that group will be created if it didn't already exist). .. _ci_artifacts: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ CI Artifacts Directory Layout -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +----------------------------- -When running the CI build using the command ``spack ci rebuild`` a number of directories are created for -storing data generated during the CI job. The default root directory for artifacts is ``job_scratch_root``. -This can be overridden by passing the argument ``--artifacts-root`` to the ``spack ci generate`` command -or by setting the ``SPACK_ARTIFACTS_ROOT`` environment variable in the build job scripts. +When running the CI build using the command ``spack ci rebuild`` a number of directories are created for storing data generated during the CI job. +The default root directory for artifacts is ``job_scratch_root``. +This can be overridden by passing the argument ``--artifacts-root`` to the ``spack ci generate`` command or by setting the ``SPACK_ARTIFACTS_ROOT`` environment variable in the build job scripts. -The top level directories under the artifact root are ``concrete_environment``, ``logs``, ``reproduction``, -``tests``, and ``user_data``. Spack does not restrict what is written to any of these directories nor does -it require user specified files be written to any specific directory. +The top-level directories under the artifact root are ``concrete_environment``, ``logs``, ``reproduction``, ``tests``, and ``user_data``. +Spack does not restrict what is written to any of these directories nor does it require user specified files be written to any specific directory. ------------------------- ``concrete_environment`` ------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^ -The directory ``concrete_environment`` is used to communicate the ``spack ci generate`` processed ``spack.yaml`` and -the concrete ``spack.lock`` for the CI environment. +The directory ``concrete_environment`` is used to communicate the ``spack ci generate`` processed ``spack.yaml`` and the concrete ``spack.lock`` for the CI environment. --------- ``logs`` --------- +^^^^^^^^ -The directory ``logs`` contains the spack build log, ``spack-build-out.txt``, and the spack build environment -modification file, ``spack-build-mod-env.txt``. Additionally, all files specified by the packages ``Builder`` -property ``archive_files`` are also copied here (i.e., ``CMakeCache.txt`` in ``CMakeBuilder``). +The directory ``logs`` contains the Spack build log, ``spack-build-out.txt``, and the Spack build environment modification file, ``spack-build-mod-env.txt``. +Additionally, all files specified by the packages ``Builder`` property ``archive_files`` are also copied here (i.e., ``CMakeCache.txt`` in ``CMakeBuilder``). ----------------- ``reproduction`` ----------------- +^^^^^^^^^^^^^^^^ -The directory ``reproduction`` is used to store the files needed by the ``spack reproduce-build`` command. -This includes ``repro.json``, copies of all of the files in ``concrete_environment``, the concrete spec -JSON file for the current spec being built, and all of the files written in the artifacts root directory. +The directory ``reproduction`` is used to store the files needed by the ``spack ci reproduce-build`` command. +This includes ``repro.json``, copies of all of the files in ``concrete_environment``, the concrete spec JSON file for the current spec being built, and all of the files written in the artifacts root directory. The ``repro.json`` file is not versioned and is only designed to work with the version that Spack CI was run with. An example of what a ``repro.json`` may look like is here. -.. code:: json +.. code-block:: json { "job_name": "adios2@2.9.2 /feaevuj %gcc@11.4.0 arch=linux-ubuntu20.04-x86_64_v3 E4S ROCm External", @@ -873,61 +583,47 @@ An example of what a ``repro.json`` may look like is here. "ci_project_dir": "/builds/spack/spack" } ---------- ``tests`` ---------- +^^^^^^^^^ -The directory ``tests`` is used to store output from running ``spack test ``. This may or may not have -data in it depending on the package that was built and the availability of tests. +The directory ``tests`` is used to store output from running ``spack test ``. +This may or may not have data in it depending on the package that was built and the availability of tests. -------------- ``user_data`` -------------- +^^^^^^^^^^^^^ The directory ``user_data`` is used to store everything else that shouldn't be copied to the ``reproduction`` directory. Users may use this to store additional logs or metrics or other types of files generated by the build job. -------------------------------------- -Using a custom spack in your pipeline +Using a custom Spack in your pipeline ------------------------------------- -If your runners will not have a version of spack ready to invoke, or if for some -other reason you want to use a custom version of spack to run your pipelines, -this section provides an example of how you could take advantage of -user-provided pipeline scripts to accomplish this fairly simply. First, consider -specifying the source and version of spack you want to use with variables, either -written directly into your ``.gitlab-ci.yml``, or provided by CI variables defined -in the gitlab UI or from some upstream pipeline. Let's say you choose the variable -names ``SPACK_REPO`` and ``SPACK_REF`` to refer to the particular fork of spack -and branch you want for running your pipeline. You can then refer to those in a -custom shell script invoked both from your pipeline generation job and your rebuild -jobs. Here's the ``generate-pipeline`` job from the top of this document, -updated to clone and source a custom spack: +If your runners will not have a version of Spack ready to invoke, or if for some other reason you want to use a custom version of Spack to run your pipelines, this section provides an example of how you could take advantage of user-provided pipeline scripts to accomplish this fairly simply. +First, consider specifying the source and version of Spack you want to use with variables, either written directly into your ``.gitlab-ci.yml``, or provided by CI variables defined in the GitLab UI or from some upstream pipeline. +Let's say you choose the variable names ``SPACK_REPO`` and ``SPACK_REF`` to refer to the particular fork of Spack and branch you want for running your pipeline. +You can then refer to those in a custom shell script invoked both from your pipeline generation job and your rebuild jobs. +Here's the ``generate-pipeline`` job from the top of this document, updated to clone and source a custom Spack: .. code-block:: yaml generate-pipeline: tags: - - - before_script: + - + before_script: - git clone ${SPACK_REPO} - pushd spack && git checkout ${SPACK_REF} && popd - . "./spack/share/spack/setup-env.sh" - script: + script: - spack env activate --without-view . - - spack ci generate --check-index-only - --artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir" - --output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml" - after_script: + - spack ci generate --check-index-only --artifacts-root "${CI_PROJECT_DIR}/jobs_scratch_dir" --output-file "${CI_PROJECT_DIR}/jobs_scratch_dir/pipeline.yml" + after_script: - rm -rf ./spack - artifacts: - paths: + artifacts: + paths: - "${CI_PROJECT_DIR}/jobs_scratch_dir" -That takes care of getting the desired version of spack when your pipeline is -generated by ``spack ci generate``. You also want your generated rebuild jobs -(all of them) to clone that version of spack, so next you would update your -``spack.yaml`` from above as follows: +That takes care of getting the desired version of Spack when your pipeline is generated by ``spack ci generate``. +You also want your generated rebuild jobs (all of them) to clone that version of Spack, so next you would update your ``spack.yaml`` from above as follows: .. code-block:: yaml @@ -937,103 +633,75 @@ generated by ``spack ci generate``. You also want your generated rebuild jobs pipeline-gen: - build-job: tags: - - spack-kube + - spack-kube image: spack/ubuntu-noble before_script: - - git clone ${SPACK_REPO} - - pushd spack && git checkout ${SPACK_REF} && popd - - . "./spack/share/spack/setup-env.sh" + - git clone ${SPACK_REPO} + - pushd spack && git checkout ${SPACK_REF} && popd + - . "./spack/share/spack/setup-env.sh" script: - - spack env activate --without-view ${SPACK_CONCRETE_ENV_DIR} - - spack -d ci rebuild + - spack env activate --without-view ${SPACK_CONCRETE_ENV_DIR} + - spack -d ci rebuild after_script: - - rm -rf ./spack - -Now all of the generated rebuild jobs will use the same shell script to clone -spack before running their actual workload. - -Now imagine you have long pipelines with many specs to be built, and you -are pointing to a spack repository and branch that has a tendency to change -frequently, such as the main repo and its ``develop`` branch. If each child -job checks out the ``develop`` branch, that could result in some jobs running -with one SHA of spack, while later jobs run with another. To help avoid this -issue, the pipeline generation process saves global variables called -``SPACK_VERSION`` and ``SPACK_CHECKOUT_VERSION`` that capture the version -of spack used to generate the pipeline. While the ``SPACK_VERSION`` variable -simply contains the human-readable value produced by ``spack -V`` at pipeline -generation time, the ``SPACK_CHECKOUT_VERSION`` variable can be used in a -``git checkout`` command to make sure all child jobs checkout the same version -of spack used to generate the pipeline. To take advantage of this, you could -simply replace ``git checkout ${SPACK_REF}`` in the example ``spack.yaml`` -above with ``git checkout ${SPACK_CHECKOUT_VERSION}``. - -On the other hand, if you're pointing to a spack repository and branch under your -control, there may be no benefit in using the captured ``SPACK_CHECKOUT_VERSION``, -and you can instead just clone using the variables you define (``SPACK_REPO`` -and ``SPACK_REF`` in the example above). + - rm -rf ./spack + +Now all of the generated rebuild jobs will use the same shell script to clone Spack before running their actual workload. + +Now imagine you have long pipelines with many specs to be built, and you are pointing to a Spack repository and branch that has a tendency to change frequently, such as the main repo and its ``develop`` branch. +If each child job checks out the ``develop`` branch, that could result in some jobs running with one SHA of Spack, while later jobs run with another. +To help avoid this issue, the pipeline generation process saves global variables called ``SPACK_VERSION`` and ``SPACK_CHECKOUT_VERSION`` that capture the version of Spack used to generate the pipeline. +While the ``SPACK_VERSION`` variable simply contains the human-readable value produced by ``spack -V`` at pipeline generation time, the ``SPACK_CHECKOUT_VERSION`` variable can be used in a ``git checkout`` command to make sure all child jobs checkout the same version of Spack used to generate the pipeline. +To take advantage of this, you could simply replace ``git checkout ${SPACK_REF}`` in the example ``spack.yaml`` above with ``git checkout ${SPACK_CHECKOUT_VERSION}``. + +On the other hand, if you're pointing to a Spack repository and branch under your control, there may be no benefit in using the captured ``SPACK_CHECKOUT_VERSION``, and you can instead just clone using the variables you define (``SPACK_REPO`` and ``SPACK_REF`` in the example above). .. _custom_workflow: ---------------- Custom Workflow --------------- -There are many ways to take advantage of spack CI pipelines to achieve custom -workflows for building packages or other resources. One example of a custom -pipelines workflow is the spack tutorial container -`repo `_. This project uses -GitHub (for source control), GitLab (for automated spack CI pipelines), and -DockerHub automated builds to build Docker images (complete with fully populated -binary mirror) used by instructors and participants of a spack tutorial. +There are many ways to take advantage of Spack CI pipelines to achieve custom workflows for building packages or other resources. +One example of a custom pipelines workflow is the Spack tutorial container `repo `_. +This project uses GitHub (for source control), GitLab (for automated Spack CI pipelines), and DockerHub automated builds to build Docker images (complete with fully populated binary mirror) used by instructors and participants of a Spack tutorial. -Take a look at the repo to see how it is accomplished using spack CI pipelines, -and see the following markdown files at the root of the repository for -descriptions and documentation describing the workflow: ``DESCRIPTION.md``, -``DOCKERHUB_SETUP.md``, ``GITLAB_SETUP.md``, and ``UPDATING.md``. +Take a look at the repo to see how it is accomplished using Spack CI pipelines, and see the following markdown files at the root of the repository for descriptions and documentation describing the workflow: ``DESCRIPTION.md``, ``DOCKERHUB_SETUP.md``, ``GITLAB_SETUP.md``, and ``UPDATING.md``. .. _ci_environment_variables: --------------------------------------------------- Environment variables affecting pipeline operation -------------------------------------------------- -Certain secrets and some other information should be provided to the pipeline -infrastructure via environment variables, usually for reasons of security, but -in some cases to support other pipeline use cases such as PR testing. The -environment variables used by the pipeline infrastructure are described here. +Certain secrets and some other information should be provided to the pipeline infrastructure via environment variables, usually for reasons of security, but in some cases to support other pipeline use cases such as PR testing. +The environment variables used by the pipeline infrastructure are described here. -^^^^^^^^^^^^^^^^^ -AWS_ACCESS_KEY_ID -^^^^^^^^^^^^^^^^^ +``AWS_ACCESS_KEY_ID`` +^^^^^^^^^^^^^^^^^^^^^ -Optional. Only needed when binary mirror is an S3 bucket. +Optional. +Only needed when binary mirror is an S3 bucket. -^^^^^^^^^^^^^^^^^^^^^ -AWS_SECRET_ACCESS_KEY -^^^^^^^^^^^^^^^^^^^^^ +``AWS_SECRET_ACCESS_KEY`` +^^^^^^^^^^^^^^^^^^^^^^^^^ -Optional. Only needed when binary mirror is an S3 bucket. +Optional. +Only needed when binary mirror is an S3 bucket. -^^^^^^^^^^^^^^^ -S3_ENDPOINT_URL -^^^^^^^^^^^^^^^ +``S3_ENDPOINT_URL`` +^^^^^^^^^^^^^^^^^^^ -Optional. Only needed when binary mirror is an S3 bucket that is *not* on AWS. +Optional. +Only needed when binary mirror is an S3 bucket that is *not* on AWS. -^^^^^^^^^^^^^^^^^ -CDASH_AUTH_TOKEN -^^^^^^^^^^^^^^^^^ +``CDASH_AUTH_TOKEN`` +^^^^^^^^^^^^^^^^^^^^ -Optional. Only needed to report build groups to CDash. +Optional. +Only needed to report build groups to CDash. -^^^^^^^^^^^^^^^^^ -SPACK_SIGNING_KEY -^^^^^^^^^^^^^^^^^ +``SPACK_SIGNING_KEY`` +^^^^^^^^^^^^^^^^^^^^^ -Optional. Only needed if you want ``spack ci rebuild`` to trust the key you -store in this variable, in which case, it will subsequently be used to sign and -verify binary packages (when installing or creating buildcaches). You could -also have already trusted a key spack knows about, or if no key is present anywhere, -spack will install specs using ``--no-check-signature`` and create buildcaches -using ``-u`` (for unsigned binaries). +Optional. +Only needed if you want ``spack ci rebuild`` to trust the key you store in this variable, in which case, it will subsequently be used to sign and verify binary packages (when installing or creating buildcaches). +You could also have already trusted a key Spack knows about, or if no key is present anywhere, Spack will install specs using ``--no-check-signature`` and create buildcaches using ``-u`` (for unsigned binaries). diff --git a/lib/spack/docs/replace_conda_homebrew.rst b/lib/spack/docs/replace_conda_homebrew.rst deleted file mode 100644 index 31301d07fdb183..00000000000000 --- a/lib/spack/docs/replace_conda_homebrew.rst +++ /dev/null @@ -1,189 +0,0 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. - - SPDX-License-Identifier: (Apache-2.0 OR MIT) - -.. meta:: - :description lang=en: - Learn how to use Spack environments to manage single-user installations, similar to Homebrew and Conda. - -.. _spack-environments-basic-usage: - -================== -Spack Environments -================== - -Spack is an incredibly powerful package manager, designed for supercomputers where users have diverse installation needs. -But Spack can also be used to handle simple single-user installations on your laptop. -Most macOS users are already familiar with package managers like Homebrew and Conda, where all installed packages are symlinked to a single central location like ``/usr/local``. -In this section, we will show you how to emulate the behavior of Homebrew/Conda using :ref:`Spack environments `! - --------------------------- -Creating a New Environment --------------------------- - -First, let's create a new environment. -We'll assume that Spack is already set up correctly, and that you've already sourced the setup script for your shell. -To create, and activate, a new environment, simply run: - -.. code-block:: console - - $ spack env create myenv - -Here, *myenv* can be anything you want to name your environment. -Next, we can add a list of packages we would like to install into our environment. -Let's say we want a newer version of Bash than the one that comes with macOS, and we want a few Python libraries. -We can run: - -.. code-block:: console - - $ spack -e myenv add bash@5 python py-numpy py-scipy py-matplotlib - -Each package can be listed on a separate line, or combined into a single line like we did above. -Notice that we're explicitly asking for Bash 5 here. -You can use any spec you would normally use on the command line with other Spack commands. -If you run the following command: - -.. code-block:: console - - $ spack -e myenv config edit - -you'll see how your ``spack.yaml`` looks like: - -.. code-block:: yaml - - # This is a Spack Environment file. - # - # It describes a set of packages to be installed, along with - # configuration settings. - spack: - # add package specs to the `specs` list - specs: - - bash@5 - - python - - py-numpy - - py-scipy - - py-matplotlib - view: true - concretizer: - unify: true - -------------------------- -Configuring View Location -------------------------- - -Spack symlinks all installations to ``${SPACK_ROOT}/var/spack/environments/myenv/.spack-env/view``, which is the default when ``view: true``. -You can actually change this to any directory you want by editing the ``spack.yaml`` manifest file, or by using the following command: - -.. code-block:: console - - $ spack -e myenv env view enable - -In order to access files in these locations, you need to update ``PATH`` and other environment variables to point to them. -Activating the Spack environment does this automatically, once the software is installed: - -.. code-block:: console - - $ spack env activate -p myenv - -For now, let's deactivate the environment, and proceed with installing the software: - -.. code-block:: console - - $ spack env deactivate - - ------------------------ -Installing the Software ------------------------ - -Once the manifest file is properly defined, you may want to update the ``builtin`` package repository using this command: - -.. code-block:: console - - $ spack repo update - -Then you can proceed concretizing the environment: - -.. code-block:: console - - $ spack -e myenv concretize - -This will tell you which packages, if any, are already installed, and alert you to any conflicting specs. - -To actually install these packages and symlink them to your ``view:`` directory, simply run: - -.. code-block:: console - - $ spack -e myenv install - $ spack env activate myenv - -Now, when you type ``which python3``, it should find the one you just installed. - -.. admonition:: Add the new shell to the list of valid login shells - :class: tip - :collapsible: - - In order to change the default shell to our newer Bash installation, we first need to add it to this list of acceptable shells. - Run: - - .. code-block:: console - - $ sudo vim /etc/shells - - and add the absolute path to your bash executable. Then run: - - .. code-block:: console - - $ chsh -s /path/to/bash - - Now, when you log out and log back in, ``echo $SHELL`` should point to the newer version of Bash. - - ------------------------ -Keeping Up With Updates ------------------------ - -Let's say you upgraded to a new version of macOS, or a new version of Python was released, and you want to rebuild your entire software stack. -To do this, simply run the following commands: - -.. code-block:: console - - $ spack env activate myenv - $ spack concretize --fresh --force - $ spack install - -The ``--fresh`` flag tells Spack to use the latest version of every package, where possible, instead of trying to reuse installed packages as much as possible. - -The ``--force`` flag in addition tells Spack to overwrite its previous concretization decisions, allowing you to choose a new version of Python. -If any of the new packages like Bash are already installed, ``spack install`` won't re-install them, it will keep the symlinks in place. - ------------------------- -Cleaning Up Old Packages ------------------------- - -If we want to clean up old, out-of-date packages from our environment after an upgrade, here's how to upgrade our entire software stack and tidy up the old versions: - -.. code-block:: console - - $ spack env activate myenv - $ spack concretize --fresh --force - $ spack install - $ spack gc --except-any-environment - -The final step, ``spack gc --except-any-environment``, runs Spack's garbage collector and removes any packages that are no longer needed by any managed Spack environment -- which will clean up those old versions that got replaced during the upgrade. - ------------------------- -Removing the Environment ------------------------- - -If you need to remove ``myenv`` completely, the procedure is simple. -Just run: - -.. code-block:: console - - $ spack env activate myenv - $ spack uninstall --all - $ spack env deactivate myenv - $ spack env rm myenv - -This will uninstall all packages in your environment, remove the symlinks, and finally remove the environment. diff --git a/lib/spack/docs/repositories.rst b/lib/spack/docs/repositories.rst index fe762867ed47b2..5eea211d077c25 100644 --- a/lib/spack/docs/repositories.rst +++ b/lib/spack/docs/repositories.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,11 +9,11 @@ .. _repositories: -================================= Package Repositories (repos.yaml) ================================= -Spack comes with thousands of built-in package recipes. As of Spack v1.0, these are hosted in a separate Git repository at `spack/spack-packages `_. +Spack comes with thousands of built-in package recipes. +As of Spack v1.0, these are hosted in a separate Git repository at `spack/spack-packages `_. A **package repository** is a directory that Spack searches when it needs to find a package by name. You may need to maintain packages for restricted, proprietary, or experimental software separately from the built-in repository. @@ -20,7 +21,6 @@ Spack allows you to configure local and remote repositories using either the ``r This document describes how to set up and manage these package repositories. ---------------------------------------------- Structure of an Individual Package Repository --------------------------------------------- @@ -47,9 +47,9 @@ An individual Spack package repository is a directory structured as follows: .. code-block:: yaml - repo: - namespace: myrepo - api: v2.0 + repo: + namespace: myrepo + api: v2.0 It defines primarily: @@ -87,11 +87,11 @@ Package names can only contain lowercase characters ``a-z``, digits ``0-9`` and The mapping between package names and directory names is one-to-one. Use ``spack list`` to see how Spack resolves the package names from the directory names. --------------------------------------------- Configuring Repositories with ``repos.yaml`` -------------------------------------------- -Spack uses ``repos.yaml`` files found in its :ref:`configuration scopes ` (e.g., ``~/.spack/``, ``etc/spack/``) to discover and prioritize package repositories. Note that this ``repos.yaml`` (plural) configuration file is distinct from the ``repo.yaml`` (singular) file within each individual package repository. +Spack uses ``repos.yaml`` files found in its :ref:`configuration scopes ` (e.g., ``~/.spack/``, ``etc/spack/``) to discover and prioritize package repositories. +Note that this ``repos.yaml`` (plural) configuration file is distinct from the ``repo.yaml`` (singular) file within each individual package repository. Spack supports two main types of repository configurations: @@ -101,10 +101,11 @@ Local Repositories (Path-based) You can point Spack to a repository on your local filesystem: .. code-block:: yaml + :caption: ``~/.spack/repos.yaml`` + :name: code-example-local-repo - # Example: ~/.spack/repos.yaml - repos: - my_local_packages: /path/to/my_repository_root + repos: + my_local_packages: /path/to/my_repository_root Here, ``/path/to/my_repository_root`` should be the directory containing that repository's ``repo.yaml`` and ``packages/`` subdirectory. @@ -115,43 +116,57 @@ Spack can clone and use repositories directly from Git URLs: .. code-block:: yaml - repos: - my_remote_repo: https://github.com/myorg/spack-custom-pkgs.git + repos: + my_remote_repo: https://github.com/myorg/spack-custom-pkgs.git + +Automatic Cloning +""""""""""""""""" -**Automatic Cloning.** -When Spack first encounters a Git-based repository configuration, it automatically clones it. By default, these repositories are cloned into a subdirectory within ``~/.spack/package_repos/``, named with a hash of the repository URL. +When Spack first encounters a Git-based repository configuration, it automatically clones it. +By default, these repositories are cloned into a subdirectory within ``~/.spack/package_repos/``, named with a hash of the repository URL. -To change directories to the package repository, you can use ``spack cd --repo [name]``. To find where a repository is cloned, you can use ``spack location --repo [name]`` or ``spack repo list``. The ``name`` argument is optional; if omitted, Spack will use the first package repository in configuration order. +To change directories to the package repository, you can use ``spack cd --repo [name]``. +To find where a repository is cloned, you can use ``spack location --repo [name]`` or ``spack repo list``. +The ``name`` argument is optional; if omitted, Spack will use the first package repository in configuration order. -**Customizing Clone Location.** -The default clone location (``~/.spack/package_repos/``) might not be convenient for package maintainers who want to make changes to packages. You can specify a custom local directory for Spack to clone a Git repository into, or to use if the repository is already cloned there. This is done using the ``destination`` key in ``repos.yaml`` or via the ``spack repo set --destination`` command (see :ref:`cmd-spack-repo-set-destination`). +Customizing Clone Location +"""""""""""""""""""""""""" + +The default clone location (``~/.spack/package_repos/``) might not be convenient for package maintainers who want to make changes to packages. +You can specify a custom local directory for Spack to clone a Git repository into, or to use if the repository is already cloned there. +This is done using the ``destination`` key in ``repos.yaml`` or via the ``spack repo set --destination`` command (see :ref:`cmd-spack-repo-set-destination`). For example, to use ``~/custom_packages_clone`` for ``my_remote_repo``: .. code-block:: yaml + :caption: ``~/.spack/repos.yaml`` + :name: code-example-location - # ~/.spack/repos.yaml - repos: - my_remote_repo: - git: https://github.com/myorg/spack-custom-pkgs.git - destination: ~/custom_packages_clone + repos: + my_remote_repo: + git: https://github.com/myorg/spack-custom-pkgs.git + destination: ~/custom_packages_clone -If the ``git`` URL is defined in a lower-precedence configuration (like Spack's defaults for ``builtin``), you only need to specify the ``destination`` in your user-level ``repos.yaml``. Spack can make the configuration changes for you using ``spack repo set --destination ~/spack-packages builtin``, or you can directly edit your ``repos.yaml`` file: +If the ``git`` URL is defined in a lower-precedence configuration (like Spack's defaults for ``builtin``), you only need to specify the ``destination`` in your user-level ``repos.yaml``. +Spack can make the configuration changes for you using ``spack repo set --destination ~/spack-packages builtin``, or you can directly edit your ``repos.yaml`` file: .. code-block:: yaml + :caption: ``~/.spack/repos.yaml`` + :name: code-example-builtin - # ~/.spack/repos.yaml - repos: - builtin: - destination: ~/spack-packages + repos: + builtin: + destination: ~/spack-packages -**Updating and pinning.** +Updating and pinning +"""""""""""""""""""" Repos can be pinned to a git branch, tag, or commit. .. code-block:: yaml + :caption: ``~/.spack/repos.yaml`` + :name: code-example-branch - # ~/.spack/repos.yaml repos: builtin: branch: releases/v2025.07 @@ -162,8 +177,12 @@ The ``spack repo update`` command will update the repo on disk to match the curr If the repo is pinned to a commit or tag, it will ensure the repo on disk reflects that commit or tag. If the repo is pinned to a branch or unpinned, ``spack repo update`` will pull the most recent state of the branch (the default branch if unpinned). -**Git repositories need a package repo index.** -A single Git repository can contain one or more Spack package repositories. To enable Spack to discover these, the root of the Git repository should contain a ``spack-repo-index.yaml`` file. This file lists the relative paths to package repository roots within the git repo. +Git repositories need a package repo index +"""""""""""""""""""""""""""""""""""""""""" + +A single Git repository can contain one or more Spack package repositories. +To enable Spack to discover these, the root of the Git repository should contain a ``spack-repo-index.yaml`` file. +This file lists the relative paths to package repository roots within the git repo. For example, assume a Git repository at ``https://example.com/my_org/my_pkgs.git`` has the following structure @@ -191,28 +210,34 @@ For example, assume a Git repository at ``https://example.com/my_org/my_pkgs.git The ``spack-repo-index.yaml`` in the root of ``https://example.com/my_org/my_pkgs.git`` should look like this: .. code-block:: yaml + :caption: ``my_pkgs.git/spack-repo-index.yaml`` + :name: code-example-repo-index - # my_pkgs.git/spack-repo-index.yaml - repo_index: - paths: - - spack_pkgs/spack_repo/my_org/comp_sci_packages - - spack_pkgs/spack_repo/my_org/physics_packages + repo_index: + paths: + - spack_pkgs/spack_repo/my_org/comp_sci_packages + - spack_pkgs/spack_repo/my_org/physics_packages If ``my_pkgs.git`` is configured in ``repos.yaml`` as follows: .. code-block:: yaml + :caption: ``~/.spack/repos.yaml`` + :name: code-example-git-repo - # ~/.spack/repos.yaml - repos: - example_mono_repo: https://example.com/my_org/my_pkgs.git + repos: + example_mono_repo: https://example.com/my_org/my_pkgs.git -Spack will clone ``my_pkgs.git`` and look for ``spack-repo-index.yaml``. It will then register two separate repositories based on the paths found (e.g., ``/spack_pkgs/spack_repo/my_org/comp_sci_packages`` and ``/spack_pkgs/spack_repo/my_org/physics_packages``), each with its own namespace defined in its respective ``repo.yaml`` file. Thus, one ``repos.yaml`` entry for a Git mono-repo can lead to *multiple repositories* being available to Spack. +Spack will clone ``my_pkgs.git`` and look for ``spack-repo-index.yaml``. +It will then register two separate repositories based on the paths found (e.g., ``/spack_pkgs/spack_repo/my_org/comp_sci_packages`` and ``/spack_pkgs/spack_repo/my_org/physics_packages``), each with its own namespace defined in its respective ``repo.yaml`` file. +Thus, one ``repos.yaml`` entry for a Git mono-repo can lead to *multiple repositories* being available to Spack. -If you want only one of the package repositories from a Git mono-repo, you can override the paths in your user-level ``repos.yaml``. For example, if you only want the computer science packages: +If you want only one of the package repositories from a Git mono-repo, you can override the paths in your user-level ``repos.yaml``. +For example, if you only want the computer science packages: .. code-block:: yaml + :caption: ``~/.spack/repos.yaml`` + :name: code-example-specific-repo - # ~/.spack/repos.yaml repos: example_mono_repo: git: https://example.com/my_org/my_pkgs.git @@ -225,17 +250,17 @@ The ``builtin`` Repository ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Spack's extensive collection of built-in packages resides at `spack/spack-packages `_. -By default, Spack is configured to use this as a Git-based repository. The default configuration in ``$spack/etc/spack/defaults/repos.yaml`` looks something like this: +By default, Spack is configured to use this as a Git-based repository. +The default configuration in ``$spack/etc/spack/defaults/repos.yaml`` looks something like this: .. code-block:: yaml - repos: - builtin: - git: https://github.com/spack/spack-packages.git + repos: + builtin: + git: https://github.com/spack/spack-packages.git .. _namespaces: ----------- Namespaces ---------- @@ -243,20 +268,25 @@ Every repository in Spack has an associated **namespace** defined in the ``names For example, the built-in repository (from ``spack/spack-packages``) has its namespace defined as ``builtin``: .. code-block:: yaml + :caption: ``repo.yaml`` of ``spack/spack-packages`` + :name: code-example-repo-yaml - # In spack/spack-packages repository's repo.yaml - repo: - namespace: builtin - api: v2.0 # Or newer + repo: + namespace: builtin + api: v2.0 # Or newer -Spack records the repository namespace of each installed package. For example, if you install the ``mpich`` package from the ``builtin`` repo, Spack records its fully qualified name as ``builtin.mpich``. This accomplishes two things: +Spack records the repository namespace of each installed package. +For example, if you install the ``mpich`` package from the ``builtin`` repo, Spack records its fully qualified name as ``builtin.mpich``. +This accomplishes two things: 1. You can have packages with the same name from different namespaces installed simultaneously. 2. You can easily determine which repository a package came from after it is installed (more :ref:`below `). .. note:: - The ``namespace`` defined in the package repository's ``repo.yaml`` is the **authoritative source** for the namespace. It is *not* derived from the local configuration in ``repos.yaml``. This means that the namespace is determined by the repository maintainer, not by the user or local configuration. + The ``namespace`` defined in the package repository's ``repo.yaml`` is the **authoritative source** for the namespace. + It is *not* derived from the local configuration in ``repos.yaml``. + This means that the namespace is determined by the repository maintainer, not by the user or local configuration. Nested Namespaces for Organizations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -284,30 +314,32 @@ On the file system, this requires a directory structure like this: Uniqueness ^^^^^^^^^^ -Spack cannot ensure global uniqueness of all namespaces, but it will prevent you from registering two repositories with the same namespace *at the same time* in your current configuration. If you try to add a repository that has the same namespace as an already registered one, Spack will print a warning and may ignore the new addition or apply specific override logic depending on the configuration. +Spack cannot ensure global uniqueness of all namespaces, but it will prevent you from registering two repositories with the same namespace *at the same time* in your current configuration. +If you try to add a repository that has the same namespace as an already registered one, Spack will print a warning and may ignore the new addition or apply specific override logic depending on the configuration. .. _namespace-example: Namespace Example ^^^^^^^^^^^^^^^^^ -Suppose LLNL maintains its own version of ``mpich`` (in a repository with namespace ``llnl.comp``), separate from Spack's built-in ``mpich`` package (namespace ``builtin``). If you've installed both, ``spack find`` alone might be ambiguous: +Suppose LLNL maintains its own version of ``mpich`` (in a repository with namespace ``llnl.comp``), separate from Spack's built-in ``mpich`` package (namespace ``builtin``). +If you've installed both, ``spack find`` alone might be ambiguous: .. code-block:: console - $ spack find - ==> 2 installed packages. - -- linux-rhel6-x86_64 / gcc@4.4.7 ------------- - mpich@3.2 mpich@3.2 + $ spack find + ==> 2 installed packages. + -- linux-rhel6-x86_64 / gcc@4.4.7 ------------- + mpich@3.2 mpich@3.2 Using ``spack find -N`` displays packages with their namespaces: .. code-block:: console - $ spack find -N - ==> 2 installed packages. - -- linux-rhel6-x86_64 / gcc@4.4.7 ------------- - builtin.mpich@3.2 llnl.comp.mpich@3.2 + $ spack find -N + ==> 2 installed packages. + -- linux-rhel6-x86_64 / gcc@4.4.7 ------------- + builtin.mpich@3.2 llnl.comp.mpich@3.2 Now you can distinguish them. Packages differing only by namespace will have different hashes: @@ -321,11 +353,10 @@ Packages differing only by namespace will have different hashes: All Spack commands that take a package :ref:`spec ` also accept a fully qualified spec with a namespace, allowing you to be specific: -.. code-block:: console +.. code-block:: spec - spack uninstall llnl.comp.mpich + $ spack uninstall llnl.comp.mpich -------------------------------------- Search Order and Overriding Packages ------------------------------------- @@ -337,20 +368,22 @@ This search order allows you to override built-in packages. If you have your own ``mpich`` in a repository ``my_custom_repo``, and ``my_custom_repo`` is listed before ``builtin`` in your ``repos.yaml``, Spack will use your version of ``mpich`` by default. Suppose your effective (merged) ``repos.yaml`` implies the following order: -1. ``proto`` (local repo at ``~/my_spack_repos/spack_repo/proto_repo``) -2. ``llnl`` (local repo at ``/usr/local/repos/spack_repo/llnl_repo``) -3. ``builtin`` (Spack's default packages from `spack/spack-packages`) + +1. ``proto`` (local repo at ``~/my_spack_repos/spack_repo/proto_repo``) +2. ``llnl`` (local repo at ``/usr/local/repos/spack_repo/llnl_repo``) +3. ``builtin`` (Spack's default packages from ``spack/spack-packages``) And the packages are: - +--------------+------------------------------------------------+-----------------------------+ - | Namespace | Source | Packages | - +==============+================================================+=============================+ - | ``proto`` | ``~/my_spack_repos/spack_repo/proto_repo`` | ``mpich`` | - +--------------+------------------------------------------------+-----------------------------+ - | ``llnl`` | ``/usr/local/repos/spack_repo/llnl_repo`` | ``hdf5`` | - +--------------+------------------------------------------------+-----------------------------+ - | ``builtin`` | `spack/spack-packages` (Git) | ``mpich``, ``hdf5``, others | - +--------------+------------------------------------------------+-----------------------------+ + ++--------------+------------------------------------------------+-----------------------------+ +| Namespace | Source | Packages | ++==============+================================================+=============================+ +| ``proto`` | ``~/my_spack_repos/spack_repo/proto_repo`` | ``mpich`` | ++--------------+------------------------------------------------+-----------------------------+ +| ``llnl`` | ``/usr/local/repos/spack_repo/llnl_repo`` | ``hdf5`` | ++--------------+------------------------------------------------+-----------------------------+ +| ``builtin`` | `spack/spack-packages` (Git) | ``mpich``, ``hdf5``, others | ++--------------+------------------------------------------------+-----------------------------+ If ``hdf5`` depends on ``mpich``: @@ -365,7 +398,7 @@ You can force a particular repository's package using a fully qualified name: To see which repositories will be used for a build *before* installing, use ``spack spec -N``: -.. code-block:: console +.. code-block:: spec $ spack spec -N hdf5 llnl.hdf5@1.10.0 @@ -380,7 +413,6 @@ To see which repositories will be used for a build *before* installing, use ``sp .. _cmd-spack-repo: --------------------------- The ``spack repo`` Command -------------------------- @@ -395,11 +427,11 @@ This command shows all repositories Spack currently knows about, including their .. code-block:: console - $ spack repo list - [+] my_local v2.0 /path/to/spack_repo/my_local_packages - [+] comp_sci_packages v2.0 ~/.spack/package_repos//spack_pkgs/spack_repo/comp_sci_packages - [+] physics_packages v2.0 ~/.spack/package_repos//spack_pkgs/spack_repo/physics_packages # From the same git repo - [+] builtin v2.0 ~/.spack/package_repos//repos/spack_repo/builtin + $ spack repo list + [+] my_local v2.0 /path/to/spack_repo/my_local_packages + [+] comp_sci_packages v2.0 ~/.spack/package_repos//spack_pkgs/spack_repo/comp_sci_packages + [+] physics_packages v2.0 ~/.spack/package_repos//spack_pkgs/spack_repo/physics_packages # From the same git repo + [+] builtin v2.0 ~/.spack/package_repos//repos/spack_repo/builtin Spack shows a green ``[+]`` next to each repository that is available for use. It shows a red ``[-]`` to indicate that package repositories cannot be used due to an error (e.g., unsupported API version, missing ``repo.yaml``, etc.). @@ -427,10 +459,10 @@ To create the directory structure for a new, empty local repository: .. code-block:: console - $ spack repo create ~/my_spack_projects myorg.projectx - ==> Created repo with namespace 'myorg.projectx'. - ==> To register it with spack, run this command: - spack repo add ~/my_spack_projects/spack_repo/myorg/projectx + $ spack repo create ~/my_spack_projects myorg.projectx + ==> Created repo with namespace 'myorg.projectx'. + ==> To register it with spack, run this command: + spack repo add ~/my_spack_projects/spack_repo/myorg/projectx This command creates the following structure: @@ -453,16 +485,14 @@ The ```` can be simple (e.g., ``myrepo``) or nested (e.g., ``myorg.pr To register package repositories from local paths or a remote Git repositories with Spack: -* **For a local path:** - Provide the path to the repository's root directory (the one containing ``repo.yaml`` and ``packages/``). +* **For a local path:** Provide the path to the repository's root directory (the one containing ``repo.yaml`` and ``packages/``). .. code-block:: console $ spack repo add ~/my_spack_projects/spack_repo/myorg/projectx ==> Added repo to config with name 'myorg.projectx'. -* **For a Git repository:** - Provide the Git URL. +* **For a Git repository:** Provide the Git URL. .. code-block:: console @@ -487,15 +517,15 @@ By configuration name (e.g., ``projectx`` from the add example): .. code-block:: console - $ spack repo remove projectx - ==> Removed repository 'projectx'. + $ spack repo remove projectx + ==> Removed repository 'projectx'. By path (for a local repo): .. code-block:: console - $ spack repo remove ~/my_spack_projects/spack_repo/myorg/projectx - ==> Removed repository '/home/user/my_spack_projects/spack_repo/myorg/projectx'. + $ spack repo remove ~/my_spack_projects/spack_repo/myorg/projectx + ==> Removed repository '/home/user/my_spack_projects/spack_repo/myorg/projectx'. This command removes the corresponding entry from your ``repos.yaml`` configuration. It does *not* delete the local repository files or any cloned Git repositories. @@ -510,24 +540,24 @@ The ```` is the key used in your ``repos.yaml`` file for that Git r .. code-block:: console - $ spack repo set --destination /my/custom/path/for/spack-packages builtin - ==> Updated repo 'builtin' + $ spack repo set --destination /my/custom/path/for/spack-packages builtin + ==> Updated repo 'builtin' This updates your user-level ``repos.yaml``, adding or modifying the ``destination:`` key for the specified repository configuration name. .. code-block:: yaml + :caption: ``~/.spack/repos.yaml`` after ``spack repo set`` + :name: code-example-specific-destination - # ~/.spack/repos.yaml after the command - repos: - builtin: - destination: /my/custom/path/for/spack-packages - # The 'git:' URL is typically inherited from Spack's default configuration for 'builtin' + repos: + builtin: + destination: /my/custom/path/for/spack-packages + # The 'git:' URL is typically inherited from Spack's default configuration for 'builtin' Spack will then use ``/my/custom/path/for/spack-packages`` for the ``builtin`` repository. If the directory doesn't exist, Spack will clone into it. If it exists and is a valid Git repository, Spack will use it. --------------------------------- Repository Namespaces and Python -------------------------------- @@ -545,6 +575,7 @@ This allows you to easily extend or subclass package classes from other reposito # Import the original Mpich class from the 'builtin' repository from spack_repo.builtin.packages.mpich.package import Mpich as BuiltinMpich + class MyCustomMpich(BuiltinMpich): # Override versions, variants, or methods from BuiltinMpich version("3.5-custom", sha256="...") @@ -556,7 +587,7 @@ This allows you to easily extend or subclass package classes from other reposito if "+custom_feature" in spec: # Do custom things pass - super().install(spec, prefix) # Call parent install method + super().install(spec, prefix) # Call parent install method Spack manages Python's ``sys.path`` at runtime to make these imports discoverable across all registered repositories. This capability is powerful for creating derivative packages or slightly modifying existing ones without copying entire package files. diff --git a/lib/spack/docs/requirements.txt b/lib/spack/docs/requirements.txt index 45ebb4681e574b..4422cce37178ee 100644 --- a/lib/spack/docs/requirements.txt +++ b/lib/spack/docs/requirements.txt @@ -1,14 +1,9 @@ -sphinx==8.2.3 +sphinx==9.1.0 sphinxcontrib-programoutput==0.18 +sphinxcontrib-svg2pdfconverter==2.0.0 sphinx-copybutton==0.5.2 -sphinx_design==0.6.1 -sphinx-rtd-theme==3.0.2 -python-levenshtein==0.27.1 -docutils==0.21.2 +sphinx-last-updated-by-git==0.3.8 +sphinx-sitemap==2.9.0 +furo==2025.12.19 +docutils==0.22.4 pygments==2.19.2 -urllib3==2.5.0 -pytest==8.4.1 -isort==6.0.1 -black==25.1.0 -flake8==7.3.0 -mypy==1.17.0 diff --git a/lib/spack/docs/roles_and_responsibilities.rst b/lib/spack/docs/roles_and_responsibilities.rst new file mode 100644 index 00000000000000..cb2e17f51ba1e2 --- /dev/null +++ b/lib/spack/docs/roles_and_responsibilities.rst @@ -0,0 +1,95 @@ +.. Copyright Spack Project Developers. See COPYRIGHT file for details. + + SPDX-License-Identifier: (Apache-2.0 OR MIT) + +.. meta:: + :description lang=en: + A guide to distinguish the roles and responsibilities associated with managing the Spack Packages repository. + +.. _packaging-roles: + +Packaging Roles and Responsibilities +==================================== + +There are four roles related to `Spack Package `_ repository Pull Requests (PRs): + +#. :ref:`package-contributors`, +#. :ref:`package-reviewers`, +#. :ref:`package-maintainers`, and +#. :ref:`committers`. + +One person can assume multiple roles (e.g., a Package Contributor may also be a Maintainer; a Package Reviewer may also be a Committer). +This section defines and describes the responsibilities of each role. + +.. _package-contributors: + +Package Contributors +-------------------- + +Contributors submit changes to packages through PRs `Spack Package `_ repository Pull Requests (PRs). + +As a Contributor, you are **expected** to test your changes on **at least one platform** outside of Spack’s Continuous Integration (CI) checks. + +.. note:: + + We also ask that you include the output from ``spack debug report`` from the platform you used to facilitate PR reviews. + +.. _package-reviewers: + +Package Reviewers +----------------- + +Anyone can review a PR so we encourage Spack’s community members to review and comment on those involving software in which they have expertise and/or interest. + +As a Package Reviewer, you are **expected** to assess changes in PRs to the best of your ability and knowledge with special consideration to the information contained in the :ref:`package-review-guide`. + +.. _package-maintainers: + +Maintainers (Package Owners) +---------------------------- + +Maintainers are individuals (technically GitHub accounts) who appear in a package’s :ref:`maintainers` directive. +These are people who have agreed to be notified of and given the opportunity to review changes to packages. +They are, from a Spack package perspective, `Code Owners `_ of the package, whether or not they “own” or work on the software that the package builds. + +As a Maintainer, you are **expected**, when available, to: + +* review PRs in a timely manner (reported in :ref:`committers`) to confirm that the changes made to the package are reasonable; +* confirm that packages successfully build on at least one platform; and +* attempt to confirm that any updated or included tests pass. + +See :ref:`build_success_reviews` for acceptable forms of build success confirmation. + +.. note:: + + If at least one maintainer approves a PR -– and there are no objections from others -– then the PR can be merged by any of the :ref:`committers`. + +.. _committers: + +Committers +---------- + +Committers are vetted individuals who are allowed to merge PRs into the ``develop`` branch. + +As a Committer, you are **expected** to: + +* ensure **at least one review** is performed prior to merging (GitHub rules enforce this); +* encourage **at least one** :ref:`Package Maintainer ` (if any) to comment and/or review the PR; +* allow Package Maintainers (if any) **up to one week** to comment or provide a review; +* determine if the criteria defined in :ref:`package-review-guide` are met; and +* **merge the reviewed PR** at their discretion. + +.. note:: + + If there are no :ref:`package-maintainers` or the Maintainers have not commented or reviewed the PR within the allotted time, you will need to conduct the review. + +.. tip:: + + The following criteria must be met in order to become a Committer: + + * cannot be an anonymous account; + * must come from a known and trustworthy organization; + * demonstrated record of contribution to Spack; + * have an account on the Spack Slack workspace; + * be approved by the Onboarding subcommittee; and + * (proposed) be known to at least 3 members of the core development team. diff --git a/lib/spack/docs/signing.rst b/lib/spack/docs/signing.rst index a05cb1f181a184..de34abeae57cb6 100644 --- a/lib/spack/docs/signing.rst +++ b/lib/spack/docs/signing.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -9,122 +10,72 @@ .. _signing: -===================== Spack Package Signing ===================== -The goal of package signing in Spack is to provide data integrity -assurances around official packages produced by the automated Spack CI -pipelines. These assurances directly address the security of Spack's -software supply chain by explaining why a security-conscious user can -be reasonably justified in the belief that packages installed via Spack -have an uninterrupted auditable trail back to change management -decisions judged to be appropriate by the Spack maintainers. This is -achieved through cryptographic signing of packages built by Spack CI -pipelines based on code that has been transparently reviewed and -approved on GitHub. This document describes the signing process for -interested users. +The goal of package signing in Spack is to provide data integrity assurances around official packages produced by the automated Spack CI pipelines. +These assurances directly address the security of Spack's software supply chain by explaining why a security-conscious user can be reasonably justified in the belief that packages installed via Spack have an uninterrupted auditable trail back to change management decisions judged to be appropriate by the Spack maintainers. +This is achieved through cryptographic signing of packages built by Spack CI pipelines based on code that has been transparently reviewed and approved on GitHub. +This document describes the signing process for interested users. .. _risks: ------------------------------- Risks, Impact and Threat Model ------------------------------ -This document addresses the approach taken to safeguard Spack's -reputation with regard to the integrity of the package data produced by -Spack's CI pipelines. It does not address issues of data confidentiality -(Spack is intended to be largely open source) or availability (efforts -are described elsewhere). With that said, the main reputational risk can -be broadly categorized as a loss of faith in the data integrity due to a -breach of the private key used to sign packages. Remediation of a -private key breach would require republishing the public key with a -revocation certificate, generating a new signing key, an assessment and -potential rebuild/resigning of all packages since the key was breached, -and finally direct intervention by every spack user to update their copy -of Spack's public keys used for local verification. - -The primary threat model used in mitigating the risks of these stated -impacts is one of individual error not malicious intent or insider -threat. The primary objective is to avoid the above impacts by making a -private key breach nearly impossible due to oversight or configuration -error. Obvious and straightforward measures are taken to mitigate issues -of malicious interference in data integrity and insider threats but -these attack vectors are not systematically addressed. It should be hard -to exfiltrate the private key intentionally, and almost impossible to -leak the key by accident. +This document addresses the approach taken to safeguard Spack's reputation with regard to the integrity of the package data produced by Spack's CI pipelines. +It does not address issues of data confidentiality (Spack is intended to be largely open source) or availability (efforts are described elsewhere). +With that said, the main reputational risk can be broadly categorized as a loss of faith in the data integrity due to a breach of the private key used to sign packages. +Remediation of a private key breach would require republishing the public key with a revocation certificate, generating a new signing key, an assessment and potential rebuild/resigning of all packages since the key was breached, and finally direct intervention by every spack user to update their copy of Spack's public keys used for local verification. + +The primary threat model used in mitigating the risks of these stated impacts is one of individual error not malicious intent or insider threat. +The primary objective is to avoid the above impacts by making a private key breach nearly impossible due to oversight or configuration error. +Obvious and straightforward measures are taken to mitigate issues of malicious interference in data integrity and insider threats but these attack vectors are not systematically addressed. +It should be hard to exfiltrate the private key intentionally, and almost impossible to leak the key by accident. .. _overview: ------------------ Pipeline Overview ----------------- -Spack pipelines build software through progressive stages where packages -in later stages nominally depend on packages built in earlier stages. -For both technical and design reasons these dependencies are not -implemented through the default GitLab artifacts mechanism; instead -built packages are uploaded to AWS S3 mirrors (buckets) where they are -retrieved by subsequent stages in the pipeline. Two broad categories of -pipelines exist: Pull Request (PR) pipelines and Develop/Release -pipelines. - -- PR pipelines are launched in response to pull requests made by - trusted and untrusted users. Packages built on these pipelines upload - code to quarantined AWS S3 locations which cache the built packages - for the purposes of review and iteration on the changes proposed in - the pull request. Packages built on PR pipelines can come from - untrusted users so signing of these pipelines is not implemented. - Jobs in these pipelines are executed via normal GitLab runners both - within the AWS GitLab infrastructure and at affiliated institutions. -- Develop and Release pipelines **sign** the packages they produce and carry - strong integrity assurances that trace back to auditable change management - decisions. These pipelines only run after members from a trusted group of - reviewers verify that the proposed changes in a pull request are appropriate. - Once the PR is merged, or a release is cut, a pipeline is run on protected - GitLab runners which provide access to the required signing keys within the - job. Intermediary keys are used to sign packages in each stage of the - pipeline as they are built and a final job officially signs each package - external to any specific package's build environment. An intermediate key - exists in the AWS infrastructure and for each affiliated institution that - maintains protected runners. The runners that execute these pipelines - exclusively accept jobs from protected branches meaning the intermediate keys - are never exposed to unreviewed code and the official keys are never exposed - to any specific build environment. +Spack pipelines build software through progressive stages where packages in later stages nominally depend on packages built in earlier stages. +For both technical and design reasons these dependencies are not implemented through the default GitLab artifacts mechanism; instead built packages are uploaded to AWS S3 mirrors (buckets) where they are retrieved by subsequent stages in the pipeline. +Two broad categories of pipelines exist: Pull Request (PR) pipelines and Develop/Release pipelines. + +- PR pipelines are launched in response to pull requests made by trusted and untrusted users. + Packages built on these pipelines upload code to quarantined AWS S3 locations which cache the built packages for the purposes of review and iteration on the changes proposed in the pull request. + Packages built on PR pipelines can come from untrusted users so signing of these pipelines is not implemented. + Jobs in these pipelines are executed via normal GitLab runners both within the AWS GitLab infrastructure and at affiliated institutions. +- Develop and Release pipelines **sign** the packages they produce and carry strong integrity assurances that trace back to auditable change management decisions. + These pipelines only run after members from a trusted group of reviewers verify that the proposed changes in a pull request are appropriate. + Once the PR is merged, or a release is cut, a pipeline is run on protected GitLab runners which provide access to the required signing keys within the job. + Intermediary keys are used to sign packages in each stage of the pipeline as they are built and a final job officially signs each package external to any specific package's build environment. + An intermediate key exists in the AWS infrastructure and for each affiliated institution that maintains protected runners. + The runners that execute these pipelines exclusively accept jobs from protected branches meaning the intermediate keys are never exposed to unreviewed code and the official keys are never exposed to any specific build environment. .. _key_architecture: ----------------- Key Architecture ---------------- -Spack's CI process uses public-key infrastructure (PKI) based on GNU Privacy -Guard (gpg) keypairs to sign public releases of spack package metadata, also -called specs. Two classes of GPG keys are involved in the process to reduce the -impact of an individual private key compromise, these key classes are the -*Intermediate CI Key* and *Reputational Key*. Each of these keys has signing -sub-keys that are used exclusively for signing packages. This can be confusing -so for the purpose of this explanation we will refer to Root and Signing keys. -Each key has a private and a public component as well as one or more identities -and zero or more signatures. +Spack's CI process uses public-key infrastructure (PKI) based on GNU Privacy Guard (gpg) keypairs to sign public releases of spack package metadata, also called specs. +Two classes of GPG keys are involved in the process to reduce the impact of an individual private key compromise, these key classes are the *Intermediate CI Key* and *Reputational Key*. +Each of these keys has signing sub-keys that are used exclusively for signing packages. +This can be confusing so for the purpose of this explanation we will refer to Root and Signing keys. +Each key has a private and a public component as well as one or more identities and zero or more signatures. -------------------- Intermediate CI Key ------------------- -The Intermediate key class is used to sign and verify packages between stages -within a develop or release pipeline. An intermediate key exists for the AWS -infrastructure as well as each affiliated institution that maintains protected -runners. These intermediate keys are made available to the GitLab execution -environment building the package so that the package's dependencies may be -verified by the Signing Intermediate CI Public Key and the final package may be -signed by the Signing Intermediate CI Private Key. +The Intermediate key class is used to sign and verify packages between stages within a develop or release pipeline. +An intermediate key exists for the AWS infrastructure as well as each affiliated institution that maintains protected runners. +These intermediate keys are made available to the GitLab execution environment building the package so that the package's dependencies may be verified by the Signing Intermediate CI Public Key and the final package may be signed by the Signing Intermediate CI Private Key. +---------------------------------------------------------------------------------------------------------+ | **Intermediate CI Key (GPG)** | +==================================================+======================================================+ -| Root Intermediate CI Private Key (RSA 4096)# | Root Intermediate CI Public Key (RSA 4096) | +| Root Intermediate CI Private Key (RSA 4096) | Root Intermediate CI Public Key (RSA 4096) | +--------------------------------------------------+------------------------------------------------------+ | Signing Intermediate CI Private Key (RSA 4096) | Signing Intermediate CI Public Key (RSA 4096) | +--------------------------------------------------+------------------------------------------------------+ @@ -134,29 +85,21 @@ signed by the Signing Intermediate CI Private Key. +---------------------------------------------------------------------------------------------------------+ -The *Root intermediate CI Private Key*\ is stripped out of the GPG key and -stored offline completely separate from Spack's infrastructure. This allows the -core development team to append revocation certificates to the GPG key and -issue new sub-keys for use in the pipeline. It is our expectation that this -will happen on a semi-regular basis. A corollary of this is that *this key -should not be used to verify package integrity outside the internal CI process.* +The *Root intermediate CI Private Key*\ is stripped out of the GPG key and stored offline completely separate from Spack's infrastructure. +This allows the core development team to append revocation certificates to the GPG key and issue new sub-keys for use in the pipeline. +It is our expectation that this will happen on a semi-regular basis. +A corollary of this is that *this key should not be used to verify package integrity outside the internal CI process.* ----------------- Reputational Key ---------------- -The Reputational Key is the public facing key used to sign complete groups of -development and release packages. Only one key pair exists in this class of -keys. In contrast to the Intermediate CI Key the Reputational Key *should* be -used to verify package integrity. At the end of develop and release pipelines a -final pipeline job pulls down all signed package metadata built by the pipeline, -verifies they were signed with an Intermediate CI Key, then strips the -Intermediate CI Key signature from the package and re-signs them with the -Signing Reputational Private Key. The officially signed packages are then -uploaded back to the AWS S3 mirror. Please note that separating use of the -reputational key into this final job is done to prevent leakage of the key in a -spack package. Because the Signing Reputational Private Key is never exposed to -a build job it cannot accidentally end up in any built package. +The Reputational Key is the public facing key used to sign complete groups of development and release packages. +Only one key pair exists in this class of keys. +In contrast to the Intermediate CI Key the Reputational Key *should* be used to verify package integrity. +At the end of develop and release pipelines a final pipeline job pulls down all signed package metadata built by the pipeline, verifies they were signed with an Intermediate CI Key, then strips the Intermediate CI Key signature from the package and re-signs them with the Signing Reputational Private Key. +The officially signed packages are then uploaded back to the AWS S3 mirror. +Please note that separating use of the reputational key into this final job is done to prevent leakage of the key in a spack package. +Because the Signing Reputational Private Key is never exposed to a build job it cannot accidentally end up in any built package. +---------------------------------------------------------------------------------------------------------+ @@ -171,33 +114,23 @@ a build job it cannot accidentally end up in any built package. | Signatures: Signed by core development team [#f1]_ | +---------------------------------------------------------------------------------------------------------+ -The Root Reputational Private Key is stripped out of the GPG key and stored -offline completely separate from Spack's infrastructure. This allows the core -development team to append revocation certificates to the GPG key in the -unlikely event that the Signing Reputation Private Key is compromised. In -general it is the expectation that rotating this key will happen infrequently if -at all. This should allow relatively transparent verification for the end-user -community without needing deep familiarity with GnuPG or Public Key -Infrastructure. +The Root Reputational Private Key is stripped out of the GPG key and stored offline completely separate from Spack's infrastructure. +This allows the core development team to append revocation certificates to the GPG key in the unlikely event that the Signing Reputation Private Key is compromised. +In general it is the expectation that rotating this key will happen infrequently if at all. +This should allow relatively transparent verification for the end-user community without needing deep familiarity with GnuPG or Public Key Infrastructure. .. _build_cache_signing: -------------------- Build Cache Signing ------------------- -For an in-depth description of the layout of a binary mirror, see -the :ref:`documentation` covering binary caches. The -key takeaway from that discussion that applies here is that the entry point -to a binary package is its manifest. The manifest refers unambiguously to the -spec metadata and compressed archive, which are stored as content-addressed -blobs. +For an in-depth description of the layout of a binary mirror, see the :ref:`documentation` covering binary caches. +The key takeaway from that discussion that applies here is that the entry point to a binary package is its manifest. +The manifest refers unambiguously to the spec metadata and compressed archive, which are stored as content-addressed blobs. -The manifest files can either be signed or unsigned, but are always given -a name ending with ``.spec.manifest.json`` regardless. The difference between -signed and unsigned manifests is simply that the signed version is wrapped in -a gpg cleartext signature, as illustrated below: +The manifest files can either be signed or unsigned, but are always given a name ending with ``.spec.manifest.json`` regardless. +The difference between signed and unsigned manifests is simply that the signed version is wrapped in a gpg cleartext signature, as illustrated below: .. code-block:: text @@ -238,161 +171,96 @@ a gpg cleartext signature, as illustrated below: =RrFX -----END PGP SIGNATURE----- -If a user has trusted the public key associated with the private key -used to sign the above manifest file, the signature can be verified with -gpg, as follows: +If a user has trusted the public key associated with the private key used to sign the above manifest file, the signature can be verified with gpg, as follows: .. code-block:: console $ gpg --verify gcc-runtime-12.3.0-s2nqujezsce4x6uhtvxscu7jhewqzztx.spec.manifest.json -When attempting to install a binary package that has been signed, spack will -attempt to verify the signature with one of the trusted keys in its keyring, -and will fail if unable to do so. While not recommended, it is possible to -force installation of a signed package without verification by providing the -``--no-check-signature`` argument to ``spack install ...``. +When attempting to install a binary package that has been signed, spack will attempt to verify the signature with one of the trusted keys in its keyring, and will fail if unable to do so. +While not recommended, it is possible to force installation of a signed package without verification by providing the ``--no-check-signature`` argument to ``spack install ...``. .. _internal_implementation: ------------------------ Internal Implementation ----------------------- -The technical implementation of the pipeline signing process includes components -defined in Amazon Web Services, the Kubernetes cluster, at affiliated -institutions, and the GitLab/GitLab Runner deployment. We present the technical -implementation in two interdependent sections. The first addresses how secrets -are managed through the lifecycle of a develop or release pipeline. The second -section describes how Gitlab Runner and pipelines are configured and managed to -support secure automated signing. +The technical implementation of the pipeline signing process includes components defined in Amazon Web Services, the Kubernetes cluster, at affiliated institutions, and the GitLab/GitLab Runner deployment. +We present the technical implementation in two interdependent sections. +The first addresses how secrets are managed through the lifecycle of a develop or release pipeline. +The second section describes how Gitlab Runner and pipelines are configured and managed to support secure automated signing. Secrets Management ^^^^^^^^^^^^^^^^^^ -As stated above the Root Private Keys (intermediate and reputational) -are stripped from the GPG keys and stored outside Spack's -infrastructure. +As stated above the Root Private Keys (intermediate and reputational) are stripped from the GPG keys and stored outside Spack's infrastructure. -.. warning:: - **TODO** - - Explanation here about where and how access is handled for these keys. - - Both Root private keys are protected with strong passwords - - Who has access to these and how? +.. .. admonition:: TODO +.. :class: warning -**Intermediate CI Key** ------------------------ +.. - Explanation here about where and how access is handled for these keys. +.. - Both Root private keys are protected with strong passwords +.. - Who has access to these and how? + +Intermediate CI Key +^^^^^^^^^^^^^^^^^^^ + +Multiple intermediate CI signing keys exist, one Intermediate CI Key for jobs run in AWS, and one key for each affiliated institution (e.g. University of Oregon). +Here we describe how the Intermediate CI Key is managed in AWS: -Multiple intermediate CI signing keys exist, one Intermediate CI Key for jobs -run in AWS, and one key for each affiliated institution (e.g. University of -Oregon). Here we describe how the Intermediate CI Key is managed in AWS: - -The Intermediate CI Key (including the Signing Intermediate CI Private Key) is -exported as an ASCII armored file and stored in a Kubernetes secret called -``spack-intermediate-ci-signing-key``. For convenience sake, this same secret -contains an ASCII-armored export of just the *public* components of the -Reputational Key. This secret also contains the *public* components of each of -the affiliated institutions' Intermediate CI Key. These are potentially needed -to verify dependent packages which may have been found in the public mirror or -built by a protected job running on an affiliated institution's infrastructure -in an earlier stage of the pipeline. - -Procedurally the ``spack-intermediate-ci-signing-key`` secret is used in -the following way: - -1. A ``large-arm-prot`` or ``large-x86-prot`` protected runner picks up - a job tagged ``protected`` from a protected GitLab branch. (See - `Protected Runners and Reserved Tags <#_8bawjmgykv0b>`__). -2. Based on its configuration, the runner creates a job Pod in the - pipeline namespace and mounts the spack-intermediate-ci-signing-key - Kubernetes secret into the build container -3. The Intermediate CI Key, affiliated institutions' public key and the - Reputational Public Key are imported into a keyring by the ``spack gpg ...`` - sub-command. This is initiated by the job's build script which is created by - the generate job at the beginning of the pipeline. -4. Assuming the package has dependencies those spec manifests are verified using - the keyring. +The Intermediate CI Key (including the Signing Intermediate CI Private Key) is exported as an ASCII armored file and stored in a Kubernetes secret called ``spack-intermediate-ci-signing-key``. +For convenience sake, this same secret contains an ASCII-armored export of just the *public* components of the Reputational Key. +This secret also contains the *public* components of each of the affiliated institutions' Intermediate CI Key. +These are potentially needed to verify dependent packages which may have been found in the public mirror or built by a protected job running on an affiliated institution's infrastructure in an earlier stage of the pipeline. + +Procedurally the ``spack-intermediate-ci-signing-key`` secret is used in the following way: + +1. A ``large-arm-prot`` or ``large-x86-prot`` protected runner picks up a job tagged ``protected`` from a protected GitLab branch. + (See :ref:`protected_runners`). +2. Based on its configuration, the runner creates a job Pod in the pipeline namespace and mounts the spack-intermediate-ci-signing-key Kubernetes secret into the build container +3. The Intermediate CI Key, affiliated institutions' public key and the Reputational Public Key are imported into a keyring by the ``spack gpg ...`` sub-command. + This is initiated by the job's build script which is created by the generate job at the beginning of the pipeline. +4. Assuming the package has dependencies those spec manifests are verified using the keyring. 5. The package is built and the spec manifest is generated -6. The spec manifest is signed by the keyring and uploaded to the mirror's - build cache. - -**Reputational Key** --------------------- - -Because of the increased impact to end users in the case of a private -key breach, the Reputational Key is managed separately from the -Intermediate CI Keys and has additional controls. First, the Reputational -Key was generated outside of Spack's infrastructure and has been signed -by the core development team. The Reputational Key (along with the -Signing Reputational Private Key) was then ASCII armor exported to a -file. Unlike the Intermediate CI Key this exported file is not stored as -a base64 encoded secret in Kubernetes. Instead\ *the key file -itself*\ is encrypted and stored in Kubernetes as the -``spack-signing-key-encrypted`` secret in the pipeline namespace. - -The encryption of the exported Reputational Key (including the Signing -Reputational Private Key) is handled by `AWS Key Management Store (KMS) data -keys -`__. -The private key material is decrypted and imported at the time of signing into a -memory mounted temporary directory holding the keychain. The signing job uses -the `AWS Encryption SDK -`__ -(i.e. ``aws-encryption-cli``) to decrypt the Reputational Key. Permission to -decrypt the key is granted to the job Pod through a Kubernetes service account -specifically used for this, and only this, function. Finally, for convenience -sake, this same secret contains an ASCII-armored export of the *public* -components of the Intermediate CI Keys and the Reputational Key. This allows the -signing script to verify that packages were built by the pipeline (both on AWS -or at affiliated institutions), or signed previously as a part of a different -pipeline. This is done *before* importing decrypting and importing the -Signing Reputational Private Key material and officially signing the packages. - -Procedurally the ``spack-signing-key-encrypted`` secret is used in the -following way: - -1. The ``spack-package-signing-gitlab-runner`` protected runner picks - up a job tagged ``notary`` from a protected GitLab branch (See - `Protected Runners and Reserved Tags <#_8bawjmgykv0b>`__). -2. Based on its configuration, the runner creates a job pod in the - pipeline namespace. The job is run in a stripped down purpose-built - image ``ghcr.io/spack/notary:latest`` Docker image. The runner is - configured to only allow running jobs with this image. -3. The runner also mounts the ``spack-signing-key-encrypted`` secret to - a path on disk. Note that this becomes several files on disk, the - public components of the Intermediate CI Keys, the public components - of the Reputational CI, and an AWS KMS encrypted file containing the - Signing Reputational Private Key. -4. In addition to the secret, the runner creates a tmpfs memory mounted - directory where the GnuPG keyring will be created to verify, and - then resign the package specs. -5. The job script syncs all spec manifest files from the build cache to - a working directory in the job's execution environment. -6. The job script then runs the ``sign.sh`` script built into the - Notary Docker image. -7. The ``sign.sh`` script imports the public components of the - Reputational and Intermediate CI Keys and uses them to verify good - signatures on the spec.manifest.json files. If any signed manifest - does not verify, the job immediately fails. -8. Assuming all manifests are verified, the ``sign.sh`` script then unpacks - the manifest json data from the signed file in preparation for being - re-signed with the Reputational Key. -9. The private components of the Reputational Key are decrypted to - standard out using ``aws-encryption-cli`` directly into a ``gpg - --import ...`` statement which imports the key into the - keyring mounted in-memory. -10. The private key is then used to sign each of the manifests and the - keyring is removed from disk. -11. The re-signed manifests are resynced to the AWS S3 Mirror and the - public signing of the packages for the develop or release pipeline - that created them is complete. - -Non service-account access to the private components of the Reputational -Key that are managed through access to the symmetric secret in KMS used -to encrypt the data key (which in turn is used to encrypt the GnuPG key -- See:\ `Encryption SDK -Documentation `__). -A small trusted subset of the core development team are the only -individuals with access to this symmetric key. +6. The spec manifest is signed by the keyring and uploaded to the mirror's build cache. + +Reputational Key +^^^^^^^^^^^^^^^^ + +Because of the increased impact to end users in the case of a private key breach, the Reputational Key is managed separately from the Intermediate CI Keys and has additional controls. +First, the Reputational Key was generated outside of Spack's infrastructure and has been signed by the core development team. +The Reputational Key (along with the Signing Reputational Private Key) was then ASCII armor exported to a file. +Unlike the Intermediate CI Key this exported file is not stored as a base64 encoded secret in Kubernetes. +Instead\ *the key file itself*\ is encrypted and stored in Kubernetes as the ``spack-signing-key-encrypted`` secret in the pipeline namespace. + +The encryption of the exported Reputational Key (including the Signing Reputational Private Key) is handled by `AWS Key Management Store (KMS) data keys `__. +The private key material is decrypted and imported at the time of signing into a memory mounted temporary directory holding the keychain. +The signing job uses the `AWS Encryption SDK `__ (i.e. ``aws-encryption-cli``) to decrypt the Reputational Key. +Permission to decrypt the key is granted to the job Pod through a Kubernetes service account specifically used for this, and only this, function. +Finally, for convenience sake, this same secret contains an ASCII-armored export of the *public* components of the Intermediate CI Keys and the Reputational Key. +This allows the signing script to verify that packages were built by the pipeline (both on AWS or at affiliated institutions), or signed previously as a part of a different pipeline. +This is done *before* importing decrypting and importing the Signing Reputational Private Key material and officially signing the packages. + +Procedurally the ``spack-signing-key-encrypted`` secret is used in the following way: + +1. The ``spack-package-signing-gitlab-runner`` protected runner picks up a job tagged ``notary`` from a protected GitLab branch (See :ref:`protected_runners`). +2. Based on its configuration, the runner creates a job pod in the pipeline namespace. + The job is run in a stripped down purpose-built image ``ghcr.io/spack/notary:latest`` Docker image. + The runner is configured to only allow running jobs with this image. +3. The runner also mounts the ``spack-signing-key-encrypted`` secret to a path on disk. + Note that this becomes several files on disk, the public components of the Intermediate CI Keys, the public components of the Reputational CI, and an AWS KMS encrypted file containing the Signing Reputational Private Key. +4. In addition to the secret, the runner creates a tmpfs memory mounted directory where the GnuPG keyring will be created to verify, and then resign the package specs. +5. The job script syncs all spec manifest files from the build cache to a working directory in the job's execution environment. +6. The job script then runs the ``sign.sh`` script built into the Notary Docker image. +7. The ``sign.sh`` script imports the public components of the Reputational and Intermediate CI Keys and uses them to verify good signatures on the spec.manifest.json files. + If any signed manifest does not verify, the job immediately fails. +8. Assuming all manifests are verified, the ``sign.sh`` script then unpacks the manifest json data from the signed file in preparation for being re-signed with the Reputational Key. +9. The private components of the Reputational Key are decrypted to standard out using ``aws-encryption-cli`` directly into a ``gpg --import ...`` statement which imports the key into the keyring mounted in-memory. +10. The private key is then used to sign each of the manifests and the keyring is removed from disk. +11. The re-signed manifests are resynced to the AWS S3 Mirror and the public signing of the packages for the develop or release pipeline that created them is complete. + +Non service-account access to the private components of the Reputational Key that are managed through access to the symmetric secret in KMS used to encrypt the data key (which in turn is used to encrypt the GnuPG key - See:\ `Encryption SDK Documentation `__). +A small trusted subset of the core development team are the only individuals with access to this symmetric key. .. _protected_runners: @@ -400,67 +268,48 @@ Protected Runners and Reserved Tags ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Spack has a large number of Gitlab Runners operating in its build farm. -These include runners deployed in the AWS Kubernetes cluster as well as -runners deployed at affiliated institutions. The majority of runners are -shared runners that operate across projects in gitlab.spack.io. These -runners pick up jobs primarily from the spack/spack project and execute -them in PR pipelines. - -A small number of runners operating on AWS and at affiliated institutions are -registered as specific *protected* runners on the spack/spack project. In -addition to protected runners there are protected branches on the spack/spack -project. These are the ``develop`` branch, any release branch (i.e. managed with -the ``releases/v*`` wildcard) and any tag branch (managed with the ``v*`` -wildcard) Finally, Spack's pipeline generation code reserves certain tags to make -sure jobs are routed to the correct runners; these tags are ``public``, -``protected``, and ``notary``. Understanding how all this works together to -protect secrets and provide integrity assurances can be a little confusing so -lets break these down: - -- **Protected Branches**- Protected branches in Spack prevent anyone - other than Maintainers in GitLab from pushing code. In the case of - Spack, the only Maintainer level entity pushing code to protected - branches is Spack bot. Protecting branches also marks them in such a - way that Protected Runners will only run jobs from those branches -- **Protected Runners**- Protected Runners only run jobs from protected - branches. Because protected runners have access to secrets, it's critical - that they not run jobs from untrusted code (i.e. PR branches). If they did, it - would be possible for a PR branch to tag a job in such a way that a protected - runner executed that job and mounted secrets into a code execution - environment that had not been reviewed by Spack maintainers. Note however - that in the absence of tagging used to route jobs, public runners *could* run - jobs from protected branches. No secrets would be at risk of being breached - because non-protected runners do not have access to those secrets; lack of - secrets would, however, cause the jobs to fail. -- **Reserved Tags**- To mitigate the issue of public runners picking up - protected jobs Spack uses a small set of "reserved" job tags (Note that these - are *job* tags not git tags). These tags are "public", "private", and - "notary." The majority of jobs executed in Spack's GitLab instance are - executed via a ``generate`` job. The generate job code systematically ensures - that no user defined configuration sets these tags. Instead, the ``generate`` - job sets these tags based on rules related to the branch where this pipeline - originated. If the job is a part of a pipeline on a PR branch it sets the - ``public`` tag. If the job is part of a pipeline on a protected branch it - sets the ``protected`` tag. Finally if the job is the package signing job and - it is running on a pipeline that is part of a protected branch then it sets - the ``notary`` tag. - -Protected Runners are configured to only run jobs from protected branches. Only -jobs running in pipelines on protected branches are tagged with ``protected`` or -``notary`` tags. This tightly couples jobs on protected branches to protected -runners that provide access to the secrets required to sign the built packages. +These include runners deployed in the AWS Kubernetes cluster as well as runners deployed at affiliated institutions. +The majority of runners are shared runners that operate across projects in gitlab.spack.io. +These runners pick up jobs primarily from the spack/spack project and execute them in PR pipelines. + +A small number of runners operating on AWS and at affiliated institutions are registered as specific *protected* runners on the spack/spack project. +In addition to protected runners there are protected branches on the spack/spack project. +These are the ``develop`` branch, any release branch (i.e. managed with the ``releases/v*`` wildcard) and any tag branch (managed with the ``v*`` wildcard). +Finally, Spack's pipeline generation code reserves certain tags to make sure jobs are routed to the correct runners; these tags are ``public``, ``protected``, and ``notary``. +Understanding how all this works together to protect secrets and provide integrity assurances can be a little confusing so lets break these down: + +Protected Branches + Protected branches in Spack prevent anyone other than Maintainers in GitLab from pushing code. + In the case of Spack, the only Maintainer level entity pushing code to protected branches is Spack bot. + Protecting branches also marks them in such a way that Protected Runners will only run jobs from those branches + +Protected Runners + Protected Runners only run jobs from protected branches. + Because protected runners have access to secrets, it's critical that they not run jobs from untrusted code (i.e. PR branches). + If they did, it would be possible for a PR branch to tag a job in such a way that a protected runner executed that job and mounted secrets into a code execution environment that had not been reviewed by Spack maintainers. + Note however that in the absence of tagging used to route jobs, public runners *could* run jobs from protected branches. + No secrets would be at risk of being breached because non-protected runners do not have access to those secrets; lack of secrets would, however, cause the jobs to fail. + +Reserved Tags + To mitigate the issue of public runners picking up protected jobs Spack uses a small set of "reserved" job tags (Note that these are *job* tags not git tags). + These tags are "public", "private", and "notary." + The majority of jobs executed in Spack's GitLab instance are executed via a ``generate`` job. + The generate job code systematically ensures that no user defined configuration sets these tags. + Instead, the ``generate`` job sets these tags based on rules related to the branch where this pipeline originated. + If the job is a part of a pipeline on a PR branch it sets the ``public`` tag. + If the job is part of a pipeline on a protected branch it sets the ``protected`` tag. + Finally if the job is the package signing job and it is running on a pipeline that is part of a protected branch then it sets the ``notary`` tag. + +Protected Runners are configured to only run jobs from protected branches. +Only jobs running in pipelines on protected branches are tagged with ``protected`` or ``notary`` tags. +This tightly couples jobs on protected branches to protected runners that provide access to the secrets required to sign the built packages. The secrets can **only** be accessed via: 1. Runners under direct control of the core development team. 2. Runners under direct control of trusted maintainers at affiliated institutions. -3. By code running the automated pipeline that has been reviewed by the - Spack maintainers and judged to be appropriate. +3. By code running the automated pipeline that has been reviewed by the Spack maintainers and judged to be appropriate. -Other attempts (either through malicious intent or incompetence) can at -worst grab jobs intended for protected runners which will cause those -jobs to fail alerting both Spack maintainers and the core development -team. +Other attempts (either through malicious intent or incompetence) can at worst grab jobs intended for protected runners which will cause those jobs to fail alerting both Spack maintainers and the core development team. .. [#f1] - The Reputational Key has also cross signed core development team - keys. + The Reputational Key has also cross signed core development team keys. diff --git a/lib/spack/docs/spec_syntax.rst b/lib/spack/docs/spec_syntax.rst index 3b26648bc6a6e2..7cdd9af21b068f 100644 --- a/lib/spack/docs/spec_syntax.rst +++ b/lib/spack/docs/spec_syntax.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,39 +9,40 @@ .. _sec-specs: -=========== Spec Syntax =========== Spack has a specific syntax to describe package constraints. -Each constraint is individually referred to as a *spec*. Spack uses specs to: +Each constraint is individually referred to as a *spec*. +Spack uses specs to: 1. Refer to a particular build configuration of a package, or 2. Express requirements, or preferences, on packages via configuration files, or 3. Query installed packages, or buildcaches -Specs are more than a package name and a version; you can use them to -specify the compiler, compiler version, architecture, compile options, -and dependency options for a build. In this section, we'll go over -the full syntax of specs. +Specs are more than a package name and a version; you can use them to specify the compiler, compiler version, architecture, compile options, and dependency options for a build. +In this section, we'll go over the full syntax of specs. Here is an example of using a complex spec to install a very specific configuration of ``mpileaks``: -.. code-block:: console +.. code-block:: spec - $ spack install mpileaks@1.2:1.4 +debug ~qt target=x86_64_v3 %gcc@15.1.0 ^libelf@1.1 %gcc@14.2.0 + $ spack install mpileaks@1.2:1.4 +debug ~qt target=x86_64_v3 %gcc@15 ^libelf@1.1 %clang@20 -The figure below helps getting a sense of the various parts that compose this spec: +The figure below helps you get a sense of the various parts that compose this spec: .. figure:: images/spec_anatomy.svg + :alt: Spack spec with annotations + :width: 740 + :height: 180 -If used to install a package, this will install: +When installing this, you will get: - * The ``mpileaks`` library at some version between ``1.2`` and ``1.4`` (inclusive), - * with ``debug`` options enabled, and without ``qt`` support, - * for an ``x86_64_v3`` architecture, - * built using ``gcc`` at version ``15.1.0``, - * depending on ``libelf`` at version ``1.1``, built with ``gcc`` at version ``14.2.0``. +* The ``mpileaks`` package at some version between ``1.2`` and ``1.4`` (inclusive), +* with ``debug`` options enabled, and without ``qt`` support, +* optimized for an ``x86_64_v3`` architecture, +* built using ``gcc`` at version ``15``, +* depending on ``libelf`` at version ``1.1``, built with ``clang`` at version ``20``. Most specs will not be as complicated as this one, but this is a good example of what is possible with specs. There are a few general rules that we can already infer from this first example: @@ -54,7 +56,6 @@ The flexibility the spec syntax offers in specifying the details of a build make .. _software-model: --------------- Software Model -------------- @@ -101,13 +102,13 @@ In general, such a configuration would likely behave unexpectedly at runtime, an The purpose of specs is to abstract this full DAG away from Spack users. A user who does not care about the DAG at all, can refer to ``mpileaks`` by simply writing: -.. code-block:: +.. code-block:: spec mpileaks The spec becomes only slightly more complicated, if that user knows that ``mpileaks`` indirectly uses ``dyninst`` and wants a particular version of ``dyninst``: -.. code-block:: +.. code-block:: spec mpileaks ^dyninst@8.1 @@ -117,57 +118,67 @@ You can put all the same modifiers on dependency specs that you would put on the That is, you can specify their versions, variants, and architectures just like any other spec. Specifiers are associated with the nearest package name to their left. -The order of transitive package dependencies doesn't matter when writing a spec. -For example, these two specs represent exactly the same configuration: - -.. code-block:: none - - mpileaks ^callpath@1.0 ^libelf@0.8.3 - mpileaks ^libelf@0.8.3 ^callpath@1.0 +.. _sec-virtual-dependencies: -Direct dependencies specified with ``%`` associate with the most recent transitive node, or with the root of the DAG. -So in the spec: +Virtual dependencies +^^^^^^^^^^^^^^^^^^^^ -.. code-block:: +The dependency graph for ``mpileaks`` we saw above wasn't *quite* accurate. +``mpileaks`` uses MPI, which is an interface that has many different implementations. +Above, we showed ``mpileaks`` and ``callpath`` depending on ``mpich``, which is one *particular* implementation of MPI. +However, we could build either with another implementation, such as ``openmpi`` or ``mvapich``. - root %dep1 ^transitive %dep2 %dep3 +Spack represents interfaces like this using *virtual dependencies*. +The real dependency DAG for ``mpileaks`` looks like this: -``dep1`` is a direct dependency of ``root``, while both ``dep2`` and ``dep3`` are direct dependencies of ``transitive``. +.. graphviz:: -.. admonition:: Windows Spec Syntax Caveats - :class: note - :collapsible: + digraph { + node[ + fontname=Monaco, + penwidth=2, + fontsize=124, + margin=.4, + shape=box, + fillcolor=lightblue, + style="rounded,filled" + ] - Windows has a few idiosyncrasies when it comes to the Spack spec syntax and the use of certain shells. - Spack's spec dependency syntax uses the carat (``^``) character; however, this is an escape string in CMD, - so it must be escaped with an additional carat (i.e., ``^^``). - CMD also will attempt to interpret strings with ``=`` characters in them. Any spec including this symbol - must double-quote the string. + mpi [color=red] + mpileaks -> mpi + mpileaks -> callpath -> mpi + callpath -> dyninst + dyninst -> libdwarf -> libelf + dyninst -> libelf + } - Note: All of these issues are unique to CMD; they can be avoided by using PowerShell. +Notice that ``mpich`` has now been replaced with ``mpi``. +There is no *real* MPI package, but some packages *provide* the MPI interface, and these packages can be substituted in for ``mpi`` when ``mpileaks`` is built. - For more context on these caveats, see the related issues: `carat `_ and `equals `_. +Spack is unique in that its virtual packages can be versioned, just like regular packages. +A particular version of a package may provide a particular version of a virtual package. +A package can *depend on* a particular version of a virtual package. +For instance, if an application needs MPI-2 functions, it can depend on ``mpi@2:`` to indicate that it needs some implementation that provides MPI-2 functions. Below are more details about the specifiers that you can add to specs. .. _version-specifier: ------------------ Version specifier ----------------- A version specifier -.. code-block:: +.. code-block:: spec - pkg@ + pkg@specifier comes after a package name and starts with ``@``. It can be something abstract that matches multiple known versions or a specific version. The version specifier usually represents *a range of versions*: -.. code-block:: +.. code-block:: spec # All versions between v1.0 and v1.5. # This includes any v1.5.x version @@ -182,7 +193,7 @@ The version specifier usually represents *a range of versions*: but can also be *a specific version*: -.. code-block:: text +.. code-block:: spec # Exactly version v3.2, will NOT match v3.2.1 etc. @=3.2 @@ -199,58 +210,58 @@ In general, it is preferable to use the range syntax ``@3.2``, because ranges al A version specifier can also be a list of ranges and specific versions, separated by commas. For example: -.. code-block:: +.. code-block:: spec @1.0:1.5,=1.7.1 matches any version in the range ``1.0:1.5`` and the specific version ``1.7.1``. -^^^^^^^^^^^^ Git versions ^^^^^^^^^^^^ -For packages with a ``git`` attribute, ``git`` references -may be specified instead of a numerical version (i.e., branches, tags, -and commits). Spack will stage and build based off the ``git`` -reference provided. Acceptable syntaxes for this are: +.. note:: + Users wanting to just match specific commits for branch or tag based versions should assign the ``commit`` variant (``commit=<40 char sha>``). + Spack reserves this variant specifically to track provenance of git based versions. + Spack will attempt to compute this value for you automatically during concretization and raise a warning if it is unable to assign the commit. + Further details can be found in :ref:`git_version_provenance`. + + +For packages with a ``git`` attribute, ``git`` references may be specified instead of a numerical version (i.e., branches, tags, and commits). +Spack will stage and build based off the ``git`` reference provided. +Acceptable syntaxes for this are: -.. code-block:: sh +.. code-block:: spec - # commit hashes - foo@abcdef1234abcdef1234abcdef1234abcdef1234 # 40 character hashes are automatically treated as git commits + # commit hashes + foo@abcdef1234abcdef1234abcdef1234abcdef1234 # 40 character hashes are automatically treated as git commits foo@git.abcdef1234abcdef1234abcdef1234abcdef1234 - # branches and tags - foo@git.develop # use the develop branch - foo@git.0.19 # use the 0.19 tag + # branches and tags + foo@git.develop # use the develop branch + foo@git.0.19 # use the 0.19 tag -Spack always needs to associate a Spack version with the git reference, -which is used for version comparison. This Spack version is heuristically -taken from the closest valid git tag among the ancestors of the git ref. +Spack always needs to associate a Spack version with the git reference, which is used for version comparison. +This Spack version is heuristically taken from the closest valid git tag among the ancestors of the git ref. -Once a Spack version is associated with a git ref, it is always printed with -the git ref. For example, if the commit ``@git.abcdefg`` is tagged -``0.19``, then the spec will be shown as ``@git.abcdefg=0.19``. +Once a Spack version is associated with a git ref, it is always printed with the git ref. +For example, if the commit ``@git.abcdefg`` is tagged ``0.19``, then the spec will be shown as ``@git.abcdefg=0.19``. -If the git ref is not exactly a tag, then the distance to the nearest tag -is also part of the resolved version. ``@git.abcdefg=0.19.git.8`` means -that the commit is 8 commits away from the ``0.19`` tag. +If the git ref is not exactly a tag, then the distance to the nearest tag is also part of the resolved version. +``@git.abcdefg=0.19.git.8`` means that the commit is 8 commits away from the ``0.19`` tag. -In cases where Spack cannot resolve a sensible version from a git ref, -users can specify the Spack version to use for the git ref. This is done -by appending ``=`` and the Spack version to the git ref. For example: +In cases where Spack cannot resolve a sensible version from a git ref, users can specify the Spack version to use for the git ref. +This is done by appending ``=`` and the Spack version to the git ref. +For example: -.. code-block:: sh +.. code-block:: spec foo@git.my_ref=3.2 # use the my_ref tag or branch, but treat it as version 3.2 for version comparisons foo@git.abcdef1234abcdef1234abcdef1234abcdef1234=develop # use the given commit, but treat it as develop for version comparisons -Details about how versions are compared and how Spack determines if -one version is less than another are discussed in the developer guide. +Details about how versions are compared and how Spack determines if one version is less than another are discussed in the developer guide. .. _basic-variants: --------- Variants -------- @@ -262,48 +273,46 @@ The variants available for a particular package are defined by the package autho There are different types of variants. -^^^^^^^^^^^^^^^^ Boolean Variants ^^^^^^^^^^^^^^^^ Typically used to enable or disable a feature at compile time. For example, a package might have a ``debug`` variant that can be explicitly enabled with: -.. code-block:: +.. code-block:: spec +debug and disabled with -.. code-block:: +.. code-block:: spec ~debug -^^^^^^^^^^^^^^^^^^^^^^ Single-valued Variants ^^^^^^^^^^^^^^^^^^^^^^ Often used to set defaults. For example, a package might have a ``compression`` variant that determines the default compression algorithm, which users could set to: -.. code-block:: +.. code-block:: spec compression=gzip or -.. code-block:: +.. code-block:: spec compression=zstd -^^^^^^^^^^^^^^^^^^^^^ Multi-valued Variants ^^^^^^^^^^^^^^^^^^^^^ A package might have a ``fabrics`` variant that determines which network fabrics to support. -Users could activate multiple values at the same time. For instance: +Users could activate multiple values at the same time. +For instance: -.. code-block:: +.. code-block:: spec fabrics=verbs,ofi @@ -313,44 +322,20 @@ The values are separated by commas. The meaning of ``fabrics=verbs,ofi`` is to enable *at least* the specified fabrics, but other fabrics may be enabled as well. If the intent is to enable *only* the specified fabrics, then the: -.. code-block:: +.. code-block:: spec fabrics:=verbs,ofi syntax should be used with the ``:=`` operator. -.. admonition:: Alternative ways to deactivate Boolean Variants - :class: note - :collapsible: - - In certain shells, the ``~`` character expands to the home directory. - To avoid these issues, avoid whitespace between the package name and the variant: - - .. code-block:: sh - - mpileaks ~debug # shell may try to substitute this! - mpileaks~debug # use this instead - - Alternatively, you can use the ``-`` character to disable a variant, but be aware that this requires a space between the package name and the variant: - - .. code-block:: sh - mpileaks-debug # wrong: refers to a package named "mpileaks-debug" - mpileaks -debug # right: refers to a package named mpileaks with debug disabled - - As a last resort, ``debug=False`` can also be used to disable a boolean variant. - - - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Variant propagation to dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Spack allows variants to propagate their value to the package's -dependencies by using ``++``, ``--``, and ``~~`` for boolean variants. +Spack allows variants to propagate their value to the package's dependencies by using ``++``, ``--``, and ``~~`` for boolean variants. For example, for a ``debug`` variant: -.. code-block:: sh +.. code-block:: spec mpileaks ++debug # enabled debug will be propagated to dependencies mpileaks +debug # only mpileaks will have debug enabled @@ -358,108 +343,52 @@ For example, for a ``debug`` variant: To propagate the value of non-boolean variants Spack uses ``name==value``. For example, for the ``stackstart`` variant: -.. code-block:: sh +.. code-block:: spec mpileaks stackstart==4 # variant will be propagated to dependencies mpileaks stackstart=4 # only mpileaks will have this variant value -Spack also allows variants to be propagated from a package that does -not have that variant. - -^^^^^^^^^^^^^^^^^ -Binary Provenance -^^^^^^^^^^^^^^^^^ - -Spack versions are paired to attributes that determine the source code Spack -will use to build. Checksummed assets are preferred but there are a few -notable exceptions such as git branches and tags i.e ``pkg@develop``. -These versions do not naturally have source provenance because they refer to a range -of commits (branches) or can be changed outside the spack packaging infrastructure -(tags). Without source provenance we cannot have binary provenance. - -Spack has a reserved variant to allow users to complete source and binary provenance -for these cases: ``pkg@develop commit=``. The ``commit`` variant must be supplied -using the full 40 character commit SHA. Using a partial commit SHA or assigning -the ``commit`` variant to a version that is not using a branch or tag reference will -lead to an error during concretization. - -Spack will attempt to establish binary provenance by looking up commit SHA's for branch -and tag based versions during concretization. There are 3 sources that it uses. In order, they -are - -1. Staged source code (already cached source code for the version needing provenance) -2. Source mirrors (compressed archives of the source code) -3. The git url provided in the package definition - -If Spack is unable to determine what the commit should be -during concretization a warning will be issued. Users may also specify which commit SHA they -want with the spec since it is simply a variant. In this case, or in the case of develop specs -(see :ref:`develop-specs`), Spack will skip attempts to assign the commit SHA automatically. +Spack also allows variants to be propagated from a package that does not have that variant. -.. note:: - - Users wanting to track the latest commits from the internet should utilize ``spack clean --stage`` - prior to concretization to clean out old stages that will short-circuit internet queries. - Disabling source mirrors or ensuring they don't contain branch/tag based versions will also - be necessary. - - Above all else, the most robust way to ensure binaries have their desired commits is to provide - the SHAs via user-specs or config i.e. ``commit=``. - - --------------- Compiler Flags -------------- -Compiler flags are specified using the same syntax as non-boolean variants, -but fulfill a different purpose. While the function of a variant is set by -the package, compiler flags are used by the compiler wrappers to inject -flags into the compile line of the build. Additionally, compiler flags can -be inherited by dependencies by using ``==``. -``spack install libdwarf cppflags=="-g"`` will install both libdwarf and -libelf with the ``-g`` flag injected into their compile line. +Compiler flags are specified using the same syntax as non-boolean variants, but fulfill a different purpose. +While the function of a variant is set by the package, compiler flags are used by the compiler wrappers to inject flags into the compile line of the build. +Additionally, compiler flags can be inherited by dependencies by using ``==``. +``spack install libdwarf cppflags=="-g"`` will install both libdwarf and libelf with the ``-g`` flag injected into their compile line. -Notice that the value of the compiler flags must be quoted if it -contains any spaces. Any of ``cppflags=-O3``, ``cppflags="-O3"``, -``cppflags='-O3'``, and ``cppflags="-O3 -fPIC"`` are acceptable, but -``cppflags=-O3 -fPIC`` is not. Additionally, if the value of the -compiler flags is not the last thing on the line, it must be followed -by a space. The command ``spack install libelf cppflags="-O3"%intel`` -will be interpreted as an attempt to set ``cppflags="-O3%intel"``. +Notice that the value of the compiler flags must be quoted if it contains any spaces. +Any of ``cppflags=-O3``, ``cppflags="-O3"``, ``cppflags='-O3'``, and ``cppflags="-O3 -fPIC"`` are acceptable, but ``cppflags=-O3 -fPIC`` is not. +Additionally, if the value of the compiler flags is not the last thing on the line, it must be followed by a space. +The command ``spack install libelf cppflags="-O3"%intel`` will be interpreted as an attempt to set ``cppflags="-O3%intel"``. -The six compiler flags are injected in the same order as implicit make commands -in GNU Autotools. If all flags are set, the order is -``$cppflags $cflags|$cxxflags $ldflags $ldlibs`` for C and C++, and -``$fflags $cppflags $ldflags $ldlibs`` for Fortran. +The six compiler flags are injected in the same order as implicit make commands in GNU Autotools. +If all flags are set, the order is ``$cppflags $cflags|$cxxflags $ldflags $ldlibs`` for C and C++, and ``$fflags $cppflags $ldflags $ldlibs`` for Fortran. .. _architecture_specifiers: ------------------------ Architecture specifiers ----------------------- Each node in the dependency graph of a spec has an architecture attribute. This attribute is a triplet of platform, operating system, and processor. -You can specify the elements either separately by using -the reserved keywords ``platform``, ``os``, and ``target``: +You can specify the elements either separately by using the reserved keywords ``platform``, ``os``, and ``target``: -.. code-block:: console +.. code-block:: spec $ spack install libelf platform=linux $ spack install libelf os=ubuntu18.04 $ spack install libelf target=broadwell -Normally, users don't have to bother specifying the architecture if they -are installing software for their current host, as in that case the -values will be detected automatically. If you need fine-grained control -over which packages use which targets (or over *all* packages' default -target), see :ref:`package-preferences`. +Normally, users don't have to bother specifying the architecture if they are installing software for their current host, as in that case the values will be detected automatically. +If you need fine-grained control over which packages use which targets (or over *all* packages' default target), see :ref:`package-preferences`. .. _support-for-microarchitectures: +.. _cmd-spack-arch: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Support for specific microarchitectures ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -471,9 +400,9 @@ A complete list of the microarchitectures known to Spack can be obtained in the When a spec is installed, Spack matches the compiler being used with the microarchitecture being targeted to inject appropriate optimization flags at compile time. Giving a command such as the following: -.. code-block:: console +.. code-block:: spec - $ spack install zlib%gcc@14.2.0 target=icelake + $ spack install zlib target=icelake %gcc@14 will produce compilation lines similar to: @@ -485,18 +414,16 @@ will produce compilation lines similar to: where the flags ``-march=icelake-client -mtune=icelake-client`` are injected by Spack based on the requested target and compiler. -If Spack knows that the requested compiler can't optimize for the current target -or can't build binaries for that target at all, it will exit with a meaningful error message: +If Spack determines that the requested compiler cannot optimize for the requested target or cannot build binaries for that target at all, it will exit with a meaningful error message: -.. code-block:: console +.. code-block:: spec - $ spack install zlib%gcc@5.5.0 target=icelake + $ spack install zlib target=icelake %gcc@5 ==> Error: cannot produce optimized binary for micro-architecture "icelake" with gcc@5.5.0 [supported compiler versions are 8:] -Conversely, if an old compiler is selected for a newer microarchitecture, Spack will optimize for the best match it can find instead -of failing: +Conversely, if an older compiler is selected for a newer microarchitecture, Spack will optimize for the best match instead of failing: -.. code-block:: console +.. code-block:: spec $ spack arch linux-ubuntu18.04-broadwell @@ -519,129 +446,160 @@ of failing: -------------------------------- zlib@1.2.11%gcc@9.0.1+optimize+pic+shared arch=linux-ubuntu18.04-broadwell -In the snippet above, for instance, the microarchitecture was demoted to ``haswell`` when -compiling with ``gcc@4.8`` because support to optimize for ``broadwell`` starts from ``gcc@4.9:``. +In the snippet above, for instance, the microarchitecture was demoted to ``haswell`` when compiling with ``gcc@4.8`` because support to optimize for ``broadwell`` starts from ``gcc@4.9:``. -Finally, if Spack has no information to match compiler and target, it will -proceed with the installation but avoid injecting any microarchitecture-specific -flags. +Finally, if Spack has no information to match the compiler and target, it will proceed with the installation but avoid injecting any microarchitecture-specific flags. -.. _sec-virtual-dependencies: --------------------- -Virtual dependencies --------------------- +.. _sec-dependencies: -The dependency graph for ``mpileaks`` we saw above wasn't *quite* accurate. -``mpileaks`` uses MPI, which is an interface that has many different implementations. -Above, we showed ``mpileaks`` and ``callpath`` depending on ``mpich``, which is one *particular* implementation of MPI. -However, we could build either with another implementation, such as ``openmpi`` or ``mvapich``. +Dependencies +------------ -Spack represents interfaces like this using *virtual dependencies*. -The real dependency DAG for ``mpileaks`` looks like this: +Each node in a DAG can specify dependencies using either the ``%`` or the ``^`` sigil: -.. graphviz:: +* The ``%`` sigil identifies direct dependencies, which means there must be an edge connecting the dependency to the node they refer to. +* The ``^`` sigil identifies transitive dependencies, which means the dependency just needs to be in the sub-DAG of the node they refer to. - digraph { - node[ - fontname=Monaco, - penwidth=2, - fontsize=124, - margin=.4, - shape=box, - fillcolor=lightblue, - style="rounded,filled" - ] +The order of transitive dependencies does not matter when writing a spec. +For example, these two specs represent exactly the same configuration: - mpi [color=red] - mpileaks -> mpi - mpileaks -> callpath -> mpi - callpath -> dyninst - dyninst -> libdwarf -> libelf - dyninst -> libelf - } +.. code-block:: spec -Notice that ``mpich`` has now been replaced with ``mpi``. -There is no *real* MPI package, but some packages *provide* the MPI interface, and these packages can be substituted in for ``mpi`` when ``mpileaks`` is built. + mpileaks ^callpath@1.0 ^libelf@0.8.3 + mpileaks ^libelf@0.8.3 ^callpath@1.0 -Spack is unique in that its virtual packages can be versioned, just like regular packages. -A particular version of a package may provide a particular version of a virtual package. -A package can *depend on* a particular version of a virtual package. -For instance, if an application needs MPI-2 functions, it can depend on ``mpi@2:`` to indicate that it needs some implementation that provides MPI-2 functions. +Direct dependencies specified with ``%`` apply either to the most recent transitive dependency (``^``), or, if none, to the root package in the spec. +So in the spec: + +.. code-block:: spec + + root %dep1 ^transitive %dep2 %dep3 + +``dep1`` is a direct dependency of ``root``, while both ``dep2`` and ``dep3`` are direct dependencies of ``transitive``. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Constraining virtual packages ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -When installing a package that depends on a virtual package, you can opt to specify the particular provider you want to use, or you can let Spack pick. +When installing a package that depends on a virtual package, see :ref:`sec-virtual-dependencies`, you can opt to specify the particular provider you want to use, or you can let Spack pick. For example, if you just type this: -.. code-block:: console +.. code-block:: spec $ spack install mpileaks -Then Spack will pick a provider for you according to site policies. +Then Spack will pick an ``mpi`` provider for you according to site policies. If you really want a particular version, say ``mpich``, then you could run this instead: -.. code-block:: console +.. code-block:: spec $ spack install mpileaks ^mpich This forces Spack to use some version of ``mpich`` for its implementation. As always, you can be even more specific and require a particular ``mpich`` version: -.. code-block:: console +.. code-block:: spec $ spack install mpileaks ^mpich@3 -The ``mpileaks`` package in particular only needs MPI-1 commands, so -any MPI implementation will do. If another package depends on -``mpi@2`` and you try to give it an insufficient MPI implementation -(e.g., one that provides only ``mpi@:1``), then Spack will raise an -error. Likewise, if you try to plug in some package that doesn't -provide MPI, Spack will raise an error. +The ``mpileaks`` package in particular only needs MPI-1 commands, so any MPI implementation will do. +If another package depends on ``mpi@2`` and you try to give it an insufficient MPI implementation (e.g., one that provides only ``mpi@:1``), then Spack will raise an error. +Likewise, if you try to plug in some package that doesn't provide MPI, Spack will raise an error. .. _explicit-binding-virtuals: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Explicit binding of virtual dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -There are packages that provide more than just one virtual dependency. When interacting with them, users -might want to utilize just a subset of what they could provide and use other providers for virtuals they -need. +There are packages that provide more than just one virtual dependency. +When interacting with them, users might want to utilize just a subset of what they could provide and use other providers for virtuals they need. -It is possible to be more explicit and tell Spack which dependency should provide which virtual, using a -special syntax: +It is possible to be more explicit and tell Spack which dependency should provide which virtual, using a special syntax: -.. code-block:: console +.. code-block:: spec $ spack spec strumpack ^mpi=intel-parallel-studio+mkl ^lapack=openblas Concretizing the spec above produces the following DAG: .. figure:: images/strumpack_virtuals.svg + :width: 3044 + :height: 1683 + +where ``intel-parallel-studio`` *could* provide ``mpi``, ``lapack``, and ``blas`` but is used only for the former. +The ``lapack`` and ``blas`` dependencies are satisfied by ``openblas``. + +Dependency edge attributes +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Some specs require additional information about the relationship between a package and its dependency. +This information lives on the edge between the two, and can be specified by following the dependency sigil with square-brackets ``[]``. +Edge attributes are always specified as key-value pairs: + +.. code-block:: spec + + root ^[key=value] dep + +In the following sections we'll discuss the edge attributes that are currently allowed in the spec syntax. -where ``intel-parallel-studio`` *could* provide ``mpi``, ``lapack``, and ``blas`` but is used only for the former. The ``lapack`` -and ``blas`` dependencies are satisfied by ``openblas``. +Virtuals +"""""""" + +Packages can provide, or depend on, multiple virtual packages. +Users can select which virtuals to use from which dependency by specifying the ``virtuals`` edge attribute: + +.. code-block:: spec + + $ spack install mpich %[virtuals=c,cxx] clang %[virtuals=fortran] gcc + +The command above tells Spack to use ``clang`` to provide the ``c`` and ``cxx`` virtuals, and ``gcc`` to provide the ``fortran`` virtual. + +The special syntax we have seen in :ref:`explicit-binding-virtuals` is a more compact way to specify the ``virtuals`` edge attribute. +For instance, an equivalent formulation of the command above is: + +.. code-block:: spec + + $ spack install mpich %c,cxx=clang %fortran=gcc + +Conditional dependencies +"""""""""""""""""""""""" + +Conditional dependencies allow dependency constraints to be applied only under certain conditions. +We can express conditional constraints by specifying the ``when`` edge attribute: + +.. code-block:: spec + + $ spack install hdf5 ^[when=+mpi] mpich@3.1 + +This tells Spack that hdf5 should depend on ``mpich@3.1`` if it is configured with MPI support. + +Dependency propagation +^^^^^^^^^^^^^^^^^^^^^^ + +The dependency specifications on a node, can be propagated using a double percent ``%%`` sigil. +This is particularly useful when specifying compilers. +For instance, the following command: + +.. code-block:: spec + + $ spack install hdf5+cxx+fortran %%c,cxx=clang %%fortran=gfortran + +tells Spack to install ``hdf5`` using Clang as the C and C++ compiler, and GCC as the Fortran compiler. +It also tells Spack to propagate the same choices, as :ref:`strong preferences `, to the runtime sub-DAG of ``hdf5``. +Build tools are unaffected and can still prefer to use a different compiler. -^^^^^^^^^^^^^^^^^^^^^^^^ Specifying Specs by Hash -^^^^^^^^^^^^^^^^^^^^^^^^ +------------------------ -Complicated specs can become cumbersome to enter on the command line, -especially when many of the qualifications are necessary to distinguish -between similar installs. To avoid this, when referencing an existing spec, -Spack allows you to reference specs by their hash. We previously -discussed the spec hash that Spack computes. In place of a spec in any -command, substitute ``/`` where ```` is any amount from -the beginning of a spec hash. +Complicated specs can become cumbersome to enter on the command line, especially when many of the qualifications are necessary to distinguish between similar installs. +To avoid this, when referencing an existing spec, Spack allows you to reference specs by their hash. +We previously discussed the spec hash that Spack computes. +In place of a spec in any command, substitute ``/`` where ```` is any amount from the beginning of a spec hash. -For example, let's say that you accidentally installed two different -``mvapich2`` installations. If you want to uninstall one of them but don't -know what the difference is, you can run: +For example, let's say that you accidentally installed two different ``mvapich2`` installations. +If you want to uninstall one of them but don't know what the difference is, you can run: -.. code-block:: console +.. code-block:: spec $ spack find --long mvapich2 ==> 2 installed packages. @@ -652,69 +610,61 @@ know what the difference is, you can run: You can then uninstall the latter installation using: -.. code-block:: console +.. code-block:: spec $ spack uninstall /er3die3 -Or, if you want to build with a specific installation as a dependency, -you can use: +Or, if you want to build with a specific installation as a dependency, you can use: -.. code-block:: console +.. code-block:: spec $ spack install trilinos ^/er3die3 -If the given spec hash is sufficiently long as to be unique, Spack will -replace the reference with the spec to which it refers. Otherwise, it will -prompt for a more qualified hash. +If the given spec hash is sufficiently long as to be unique, Spack will replace the reference with the spec to which it refers. +Otherwise, it will prompt for a more qualified hash. Note that this will not work to reinstall a dependency uninstalled by ``spack uninstall --force``. --------------------------- -Dependency edge attributes --------------------------- +Specs on the command line +------------------------- -Some specs require additional information about the relationship between a package and its dependency. -This information lives on the edge between the two, and can be specified by following the dependency sigil with square-brackets. -Edge attributes are always specified as key-value pairs: +The characters used in the spec syntax were chosen to work well with most shells. +However, there are cases where the shell may interpret the spec before Spack gets a chance to parse it, leading to unexpected results. +Here we document two such cases, and how to avoid them. -.. code-block:: +Unix shells +^^^^^^^^^^^ - root ^[=] dep +On Unix-like systems, the shell may expand ``~foo`` to the home directory of a user named ``foo``, so Spack won't see it as a :ref:`disabled boolean variant ` ``foo``. +To work around this without quoting, you can avoid whitespace between the package name and boolean variants: -In the following sections we'll discuss the edge attributes that are currently allowed in the spec syntax. +.. code-block:: spec -^^^^^^^^^^^^^^^^^ -Virtuals on edges -^^^^^^^^^^^^^^^^^ + mpileaks ~debug # shell may expand this to `mpileaks /home/debug` + mpileaks~debug # use this instead + +Alternatively, you can use a hyphen ``-`` character to disable a variant, but be aware that this *requires* a space between the package name and the variant: -Packages can provide, or depend on, multiple virtual packages. -Users can select which virtuals to use from which dependency by specifying the ``virtuals`` edge attribute: +.. code-block:: spec -.. code-block:: none + mpileaks-debug # wrong: refers to a package named "mpileaks-debug" + mpileaks -debug # right: refers to a package named mpileaks with debug disabled - spack install mpich %[virtuals=c,cxx] clang %[virtuals=fortran] gcc +As a last resort, ``debug=False`` can also be used to disable a boolean variant. -The command above tells Spack to use ``clang`` to provide the ``c`` and ``cxx`` virtuals, and ``gcc`` to provide the ``fortran`` virtual. - -The special syntax we have seen in :ref:`explicit-binding-virtuals` is a more compact way to specify the ``virtuals`` edge attribute. -For instance, an equivalent formulation of the command above is: - -.. code-block:: none +Windows CMD +^^^^^^^^^^^ - spack install mpich %c,cxx=clang %fortran=gcc +In Windows CMD, the caret ``^`` is an escape character, and needs itself escaping. +Similarly, the equals ``=`` character has special meaning in CMD. +To use the caret and equals characters in a spec, you can quote and escape them like this: -^^^^^^^^^^^^^^^^^^^^^^^^ -Conditional dependencies -^^^^^^^^^^^^^^^^^^^^^^^^ - -Conditional dependencies allow dependency constraints to be applied only under certain conditions. -We can express conditional constraint by specifying the ``when`` edge attribute: - -.. code-block:: none +.. code-block:: console - spack install hdf5 ^[when=+mpi] mpich@3.1 + C:\> spack install mpileaks "^^libelf" "foo=bar" -This tells Spack that hdf5 should depend on ``mpich@3.1`` if it is configured with MPI support. +These issues are not present in PowerShell. +See GitHub issue `#42833 `_ and `#43348 `_ for more details. diff --git a/lib/spack/docs/toolchains_yaml.rst b/lib/spack/docs/toolchains_yaml.rst new file mode 100644 index 00000000000000..4d322cf69082bc --- /dev/null +++ b/lib/spack/docs/toolchains_yaml.rst @@ -0,0 +1,174 @@ +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. + + SPDX-License-Identifier: (Apache-2.0 OR MIT) + +.. meta:: + :description lang=en: + Define named compiler sets (toolchains) in Spack to easily and consistently apply compiler choices for C, C++, and Fortran across different packages. + +.. _toolchains: + +Toolchains (toolchains.yaml) +============================= + +Toolchains let you group a set of compiler constraints under a single, user-defined name. +This allows you to reference a complex set of compiler choices for C, C++, and Fortran, with a simple spec like ``%my_toolchain``. +They are defined under the ``toolchains`` section of the configuration. + +.. seealso:: + + The sections :ref:`language-dependencies` and :ref:`explicit-binding-virtuals` provide more background on how Spack handles languages and compilers. + +Basic usage +----------- + +As an example, the following configuration file defines a toolchain named ``llvm_gfortran``: + +.. code-block:: yaml + :caption: ``~/.spack/toolchains.yaml`` + + toolchains: + llvm_gfortran: + - spec: cflags=-O3 + - spec: "%c=llvm" + when: "%c" + - spec: "%cxx=llvm" + when: "%cxx" + - spec: "%fortran=gcc" + when: "%fortran" + +The ``when`` clause in each entry determines if that line's ``spec`` is applied. +In this example, it means that ``llvm`` is used as a compiler for the C and C++ languages, and ``gcc`` for Fortran, *whenever the package uses those languages*. +The spec ``cflags=-O3`` is *always* applied, because there is no ``when`` clause for that spec. + +The toolchain can be referenced using + +.. code-block:: spec + + $ spack install my-package %llvm_gfortran + +Toolchains are useful for three reasons: + +1. **They reduce verbosity.** + Instead of multiple constraints ``%c,cxx=clang %fortran=gcc``, you can simply write ``%llvm_gfortran``. +2. **They apply conditionally.** + You can use ``my-package %llvm_gfortran`` even if ``my-package`` is not written in Fortran. +3. **They apply locally.** + Toolchains are used at the level of a single spec. + + +.. _pitfalls-without-toolchains: + +Pitfalls without toolchains +--------------------------- + +The conditional nature of toolchains is important, because it helps you avoid two common pitfalls when specifying compilers. + +1. Firstly, when you specify ``my-package %gcc``, your spec is **underconstrained**: Spack has to make ``my-package`` depend on ``gcc``, but the constraint does not rule out mixed compilers, such as ``gcc`` for C and ``llvm`` for C++. + +2. Secondly, when you specify ``my-package %c,cxx,fortran=gcc`` to be more explicit, your spec might be **overconstrained**. + You not only require ``gcc`` for all languages, but *also* that ``my-package`` uses *all* these languages. + This will cause a concretization error if ``my-package`` is written in C and C++, but not Fortran. + +Combining toolchains +-------------------- + +Different toolchains can be used independently or even in the same spec. +Consider the following configuration: + +.. code-block:: yaml + :caption: ``~/.spack/toolchains.yaml`` + + toolchains: + llvm_gfortran: + - spec: cflags=-O3 + - spec: "%c=llvm" + when: "%c" + - spec: "%cxx=llvm" + when: "%cxx" + - spec: "%fortran=gcc" + when: "%fortran" + gcc_all: + - spec: "%c=gcc" + when: "%c" + - spec: "%cxx=gcc" + when: "%cxx" + - spec: "%fortran=gcc" + when: "%fortran" + + +Now, you can use these toolchains in a single spec: + +.. code-block:: spec + + $ spack install hdf5+fortran%llvm_gfortran ^mpich %gcc_all + +This will result in: + +* An ``hdf5`` compiled with ``llvm`` for the C/C++ components, but with its Fortran components compiled with ``gfortran``, +* Built against an MPICH installation compiled entirely with ``gcc`` for C, C++, and Fortran. + +Toolchains for other dependencies +--------------------------------- + +While toolchains are typically used to define compiler presets, they can be used for other dependencies as well. + +A common use case is to define a toolchain that also picks a specific MPI implementation. +In the following example, we define a toolchain that uses ``openmpi@5`` as an MPI provider, and ``llvm@19`` as the compiler for C and C++: + +.. code-block:: yaml + :caption: ``~/.spack/toolchains.yaml`` + + toolchains: + clang_openmpi: + - spec: "%c=llvm@19" + when: "%c" + - spec: "%cxx=llvm@19" + when: "%cxx" + - spec: "%mpi=openmpi@5" + when: "%mpi" + +The general pattern in toolchains configuration is to use a ``when`` condition that specifies a direct dependency on a *virtual* package, and a ``spec`` that :ref:`requires a specific provider for that virtual `. + +Notice that it's possible to achieve similar configuration with :doc:`packages.yaml `: + +.. code-block:: yaml + :caption: ~/.spack/packages.yaml + + packages: + c: + require: [llvm@19] + cxx: + require: [llvm@19] + mpi: + require: [openmpi@5] + +The difference is that the toolchain can be applied **locally** in a spec, while the ``packages.yaml`` configuration is always global. +This makes toolchains particularly useful in Spack environments. + +Toolchains in Spack environments +-------------------------------- + +Toolchains can be used to simplify the construction of a list of specs for Spack environments using :ref:`spec matrices `, when the list includes packages with different language requirements: + +.. code-block:: yaml + :caption: spack.yaml + + spack: + specs: + - matrix: + - [kokkos, hdf5~cxx+fortran, py-scipy] + - ["%llvm_gfortran"] + +Note that in this case, we can use a single matrix, and the user doesn't need to know exactly which package requires which language. +Without toolchains, it would be difficult to enforce compilers directly, because: + +* ``kokkos`` depends on C and C++, but not Fortran +* ``hdf5~cxx+fortran`` depends on C and Fortran, but not C++ +* ``py-scipy`` depends on C, C++, and Fortran + +.. note:: + + Toolchains are currently limited to using only direct dependencies (``%``) in their definition. + Transitive dependencies are not allowed. diff --git a/lib/spack/docs/windows.rst b/lib/spack/docs/windows.rst index 5731aee9917906..178cafcd4cefab 100644 --- a/lib/spack/docs/windows.rst +++ b/lib/spack/docs/windows.rst @@ -1,4 +1,5 @@ -.. Copyright Spack Project Developers. See COPYRIGHT file for details. +.. + Copyright Spack Project Developers. See COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) @@ -8,171 +9,141 @@ .. _windows_support: -================ Spack On Windows ================ -Windows support for Spack is currently under development. While this work is still in an early stage, -it is currently possible to set up Spack and perform a few operations on Windows. This section will guide -you through the steps needed to install Spack and start running it on a fresh Windows machine. +Windows support for Spack is currently under development. +While this work is still in an early stage, it is currently possible to set up Spack and perform a few operations on Windows. +This section will guide you through the steps needed to install Spack and start running it on a fresh Windows machine. ------------------------------ Step 1: Install prerequisites ----------------------------- -To use Spack on Windows, you will need the following packages: +To use Spack on Windows, you will need the following packages. Required: + * Microsoft Visual Studio * Python * Git * 7z Optional: + * Intel Fortran (needed for some packages) .. note:: - Currently MSVC is the only compiler tested for C/C++ projects. Intel OneAPI provides Fortran support. + Currently MSVC is the only compiler tested for C/C++ projects. + Intel OneAPI provides Fortran support. -^^^^^^^^^^^^^^^^^^^^^^^ Microsoft Visual Studio ^^^^^^^^^^^^^^^^^^^^^^^ Microsoft Visual Studio provides the only Windows C/C++ compiler that is currently supported by Spack. -Spack additionally requires that the Windows SDK (including WGL) to be installed as part of your -Visual Studio installation as it is required to build many packages from source. +Spack additionally requires that the Windows SDK (including WGL) to be installed as part of your Visual Studio installation as it is required to build many packages from source. We require several specific components to be included in the Visual Studio installation. -One is the C/C++ toolset, which can be selected as "Desktop development with C++" or "C++ build tools," -depending on installation type (Professional, Build Tools, etc.) The other required component is -"C++ CMake tools for Windows," which can be selected from among the optional packages. +One is the C/C++ toolset, which can be selected as "Desktop development with C++" or "C++ build tools," depending on installation type (Professional, Build Tools, etc.) +The other required component is "C++ CMake tools for Windows," which can be selected from among the optional packages. This provides CMake and Ninja for use during Spack configuration. -If you already have Visual Studio installed, you can make sure these components are installed by -rerunning the installer. Next to your installation, select "Modify" and look at the -"Installation details" pane on the right. +If you already have Visual Studio installed, you can make sure these components are installed by rerunning the installer. +Next to your installation, select "Modify" and look at the "Installation details" pane on the right. -^^^^^^^^^^^^^ Intel Fortran ^^^^^^^^^^^^^ For Fortran-based packages on Windows, we strongly recommend Intel's oneAPI Fortran compilers. -The suite is free to download from Intel's website, located at -https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/fortran-compiler.html. -The executable of choice for Spack will be Intel's Beta Compiler, ifx, which supports the classic -compiler's (ifort's) frontend and runtime libraries by using LLVM. +The suite is free to download from Intel's website, located at https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/fortran-compiler.html. +The executable of choice for Spack will be Intel's Beta Compiler, ifx, which supports the classic compiler's (ifort's) frontend and runtime libraries by using LLVM. -^^^^^^ Python ^^^^^^ As Spack is a Python-based package, an installation of Python will be needed to run it. -Python 3 can be downloaded and installed from the Windows Store, and will be automatically added -to your ``PATH`` in this case. +Python 3 can be downloaded and installed from the Windows Store, and will be automatically added to your ``PATH`` in this case. .. note:: Spack currently supports Python versions later than 3.2 inclusive. -^^^ Git ^^^ A bash console and GUI can be downloaded from https://git-scm.com/downloads. -If you are unfamiliar with Git, there are a myriad of resources online to help -guide you through checking out repositories and switching development branches. +If you are unfamiliar with Git, there are a myriad of resources online to help guide you through checking out repositories and switching development branches. -When given the option of adjusting your ``PATH``, choose the ``Git from the -command line and also from 3rd-party software`` option. This will automatically -update your ``PATH`` variable to include the ``git`` command. +When given the option of adjusting your ``PATH``, choose the ``Git from the command line and also from 3rd-party software`` option. +This will automatically update your ``PATH`` variable to include the ``git`` command. -Spack support on Windows is currently dependent on installing the Git for Windows project -as the project providing Git support on Windows. This is additionally the recommended method -for installing Git on Windows, a link to which can be found above. Spack requires the -utilities vendored by this project. +Spack support on Windows is currently dependent on installing the Git for Windows project as the project providing Git support on Windows. +This is additionally the recommended method for installing Git on Windows, a link to which can be found above. +Spack requires the utilities vendored by this project. -^^^^ 7zip ^^^^ -A tool for extracting ``.xz`` files is required for extracting source tarballs. The latest 7-Zip -can be located at https://sourceforge.net/projects/sevenzip/. +A tool for extracting ``.xz`` files is required for extracting source tarballs. +The latest 7-Zip can be located at https://sourceforge.net/projects/sevenzip/. -------------------------------- Step 2: Install and setup Spack ------------------------------- -We are now ready to get the Spack environment set up on our machine. We -begin by using Git to clone the Spack repo, hosted at https://github.com/spack/spack.git -into a desired directory, for our purposes today, called ``spack_install``. +We are now ready to get the Spack environment set up on our machine. +We begin by using Git to clone the Spack repo, hosted at https://github.com/spack/spack.git into a desired directory, for our purposes today, called ``spack_install``. -In order to install Spack with Windows support, run the following one-liner -in a Windows CMD prompt. +In order to install Spack with Windows support, run the following one-liner in a Windows CMD prompt. .. code-block:: console - git clone https://github.com/spack/spack.git + $ git clone https://github.com/spack/spack.git .. note:: - If you chose to install Spack into a directory on Windows that is set up to require Administrative - Privileges, Spack will require elevated privileges to run. - Administrative Privileges can be denoted either by default, such as - ``C:\Program Files``, or administrator-applied administrative restrictions - on a directory that Spack installs files to such as ``C:\Users`` + If you chose to install Spack into a directory on Windows that is set up to require Administrative Privileges, Spack will require elevated privileges to run. + Administrative Privileges can be denoted either by default, such as ``C:\Program Files``, or administrator-applied administrative restrictions on a directory that Spack installs files to such as ``C:\Users`` -------------------------------- Step 3: Run and configure Spack ------------------------------- On Windows, Spack supports both primary native shells, Powershell and the traditional command prompt. -To use Spack, pick your favorite shell, and run ``bin\spack_cmd.bat`` or ``share/spack/setup-env.ps1`` -(you may need to Run as Administrator) from the top-level Spack -directory. This will provide a Spack-enabled shell. If you receive a warning message that Python is not in your ``PATH`` -(which may happen if you installed Python from the website and not the Windows Store), add the location -of the Python executable to your ``PATH`` now. You can permanently add Python to your ``PATH`` variable -by using the ``Edit the system environment variables`` utility in Windows Control Panel. +To use Spack, pick your favorite shell, and run ``bin\spack_cmd.bat`` or ``share/spack/setup-env.ps1`` (you may need to Run as Administrator) from the top-level Spack directory. +This will provide a Spack-enabled shell. +If you receive a warning message that Python is not in your ``PATH`` (which may happen if you installed Python from the website and not the Windows Store), add the location of the Python executable to your ``PATH`` now. +You can permanently add Python to your ``PATH`` variable by using the ``Edit the system environment variables`` utility in Windows Control Panel. To configure Spack, first run the following command inside the Spack console: .. code-block:: console - spack compiler find + $ spack compiler find -This creates a ``.staging`` directory in our Spack prefix, along with a ``windows`` subdirectory -containing a ``packages.yaml`` file. On a fresh Windows installation with the above packages -installed, this command should only detect Microsoft Visual Studio and the Intel Fortran -compiler will be integrated within the first version of MSVC present in the ``packages.yaml`` -output. +This creates a ``.staging`` directory in our Spack prefix, along with a ``windows`` subdirectory containing a ``packages.yaml`` file. +On a fresh Windows installation with the above packages installed, this command should only detect Microsoft Visual Studio and the Intel Fortran compiler will be integrated within the first version of MSVC present in the ``packages.yaml`` output. Spack provides a default ``config.yaml`` file for Windows that it will use unless overridden. -This file is located at ``etc\spack\defaults\windows\config.yaml``. You can read more on how to -do this and write your own configuration files in the :ref:`Configuration Files` section of our -documentation. If you do this, pay particular attention to the ``build_stage`` block of the file -as this specifies the directory that will temporarily hold the source code for the packages to -be installed. This path name must be sufficiently short for compliance with CMD, otherwise you -will see build errors during installation (particularly with CMake) tied to long path names. - -To allow Spack's use of external tools and dependencies already on your system, the -external pieces of software must be described in the ``packages.yaml`` file. +This file is located at ``etc\spack\defaults\windows\config.yaml``. +You can read more on how to do this and write your own configuration files in the :ref:`Configuration Files` section of our documentation. +If you do this, pay particular attention to the ``build_stage`` block of the file as this specifies the directory that will temporarily hold the source code for the packages to be installed. +This path name must be sufficiently short for compliance with CMD, otherwise you will see build errors during installation (particularly with CMake) tied to long path names. + +To allow Spack's use of external tools and dependencies already on your system, the external pieces of software must be described in the ``packages.yaml`` file. There are two methods to populate this file: -The first and easiest choice is to use Spack to find installations on your system. In -the Spack terminal, run the following commands: +The first and easiest choice is to use Spack to find installations on your system. +In the Spack terminal, run the following commands: .. code-block:: console - spack external find cmake - spack external find ninja + $ spack external find cmake + $ spack external find ninja -The ``spack external find `` will find executables on your system -with the same name given. The command will store the items found in -``packages.yaml`` in the ``.staging\`` directory. +The ``spack external find `` will find executables on your system with the same name given. +The command will store the items found in ``packages.yaml`` in the ``.staging\`` directory. -Assuming that the command found CMake and Ninja executables in the previous -step, continue to Step 4. If no executables were found, we may need to manually direct Spack towards the CMake -and Ninja installations we set up with Visual Studio. Therefore, your ``packages.yaml`` file will look something -like this, possibly with slight variations in the paths to CMake and Ninja: +Assuming that the command found CMake and Ninja executables in the previous step, continue to Step 4. +If no executables were found, we may need to manually direct Spack towards the CMake and Ninja installations we set up with Visual Studio. +Therefore, your ``packages.yaml`` file will look something like this, possibly with slight variations in the paths to CMake and Ninja: .. code-block:: yaml @@ -181,48 +152,40 @@ like this, possibly with slight variations in the paths to CMake and Ninja: externals: - spec: cmake@3.19 prefix: 'c:\Program Files (x86)\Microsoft Visual Studio\2019\Professional\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake' - buildable: False + buildable: false ninja: externals: - spec: ninja@1.8.2 prefix: 'c:\Program Files (x86)\Microsoft Visual Studio\2019\Professional\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja' - buildable: False + buildable: false -You can also use a separate installation of CMake if you have one and prefer -to use it. If you don't have a path to Ninja analogous to the above, then you can -obtain it by running the Visual Studio Installer and following the instructions -at the start of this section. Also note that YAML files use spaces for indentation -and not tabs, so ensure that this is the case when editing one directly. +You can also use a separate installation of CMake if you have one and prefer to use it. +If you don't have a path to Ninja analogous to the above, then you can obtain it by running the Visual Studio Installer and following the instructions at the start of this section. +Also note that YAML files use spaces for indentation and not tabs, so ensure that this is the case when editing one directly. -.. note:: Cygwin +.. note:: The use of Cygwin is not officially supported by Spack and is not tested. - However, Spack will not prevent this, so if choosing to use Spack - with Cygwin, know that no functionality is guaranteed. + However, Spack will not prevent this, so if choosing to use Spack with Cygwin, know that no functionality is guaranteed. ------------------ Step 4: Use Spack ----------------- -Once the configuration is complete, it is time to give the installation a test. Install a basic package through the -Spack console via: +Once the configuration is complete, it is time to give the installation a test. +Install a basic package through the Spack console via: -.. code-block:: console +.. code-block:: spec - spack install cpuinfo + $ spack install cpuinfo If in the previous step, you did not have CMake or Ninja installed, running the command above should install both packages. -.. note:: Spec Syntax Caveats - Windows has a few idiosyncrasies when it comes to the Spack spec syntax and the use of certain shells - See the Spack spec syntax doc for more information +.. note:: + Windows has a few idiosyncrasies when it comes to the Spack spec syntax and the use of certain shells See the Spack spec syntax doc for more information --------------- For developers -------------- -The intent is to provide a Windows installer that will automatically set up -Python, Git, and Spack, instead of requiring the user to do so manually. -Instructions for creating the installer are at -https://github.com/spack/spack/blob/develop/lib/spack/spack/cmd/installer/README.md +The intent is to provide a Windows installer that will automatically set up Python, Git, and Spack, instead of requiring the user to do so manually. +Instructions for creating the installer are at https://github.com/spack/spack/blob/develop/lib/spack/spack/cmd/installer/README.md diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py index 2bbda0912607c2..0c25558b512a8b 100644 --- a/lib/spack/spack/__init__.py +++ b/lib/spack/spack/__init__.py @@ -10,7 +10,7 @@ import spack.util.git #: PEP440 canonical ... string -__version__ = "1.0.1" +__version__ = "1.2.0.dev0" spack_version = __version__ #: The current Package API version implemented by this version of Spack. The Package API defines @@ -18,7 +18,7 @@ #: version is incremented when the package API is extended in a backwards-compatible way. The major #: version is incremented upon breaking changes. This version is changed independently from the #: Spack version. -package_api_version = (2, 2) +package_api_version = (2, 4) #: The minimum Package API version that this version of Spack is compatible with. This should #: always be a tuple of the form ``(major, 0)``, since compatibility with vX.Y implies @@ -70,7 +70,7 @@ def get_spack_commit() -> Optional[str]: def get_version() -> str: """Get a descriptive version of this instance of Spack. - Outputs ' ()'. + Outputs ``" ()"``. The commit sha is only added when available. """ diff --git a/lib/spack/spack/archspec.py b/lib/spack/spack/archspec.py index bf645729f76c48..6f1217eacf3239 100644 --- a/lib/spack/spack/archspec.py +++ b/lib/spack/spack/archspec.py @@ -57,3 +57,7 @@ def microarchitecture_flags_from_target( return target.optimization_flags(compiler.package.archspec_name(), version_number) except ValueError: return "" + + +#: The host target family, like x86_64 or aarch64 +HOST_TARGET_FAMILY = spack.vendor.archspec.cpu.host().family diff --git a/lib/spack/spack/audit.py b/lib/spack/spack/audit.py index 9b4655980848cc..0efad6291d8653 100644 --- a/lib/spack/spack/audit.py +++ b/lib/spack/spack/audit.py @@ -11,8 +11,8 @@ .. code-block:: python audit_cfgcmp = AuditClass( - tag='CFG-COMPILER', - description='Sanity checks on compilers.yaml', + tag="CFG-COMPILER", + description="Sanity checks on compilers.yaml", kwargs=() ) @@ -606,7 +606,7 @@ def _ensure_packages_are_unparseable(pkgs, error_cls): errors = [] for pkg_name in pkgs: try: - source = ph.canonical_source(pkg_name, filter_multimethods=False) + source = ph.canonical_source(spack.spec.Spec(pkg_name), filter_multimethods=False) except Exception as e: error_msg = "Package '{}' failed to unparse".format(pkg_name) details = ["{}".format(str(e))] @@ -667,7 +667,7 @@ def _ensure_docstring_and_no_fixme(pkgs, error_cls): pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name) if not pkg_cls.__doc__: - error_msg = "Package '{}' miss a docstring" + error_msg = "Package '{}' is missing a docstring" errors.append(error_cls(error_msg.format(pkg_name), [])) return errors @@ -914,11 +914,14 @@ def _linting_package_file(pkgs, error_cls): for pkg_name in pkgs: pkg_cls = spack.repo.PATH.get_pkg_class(pkg_name) + homepage = pkg_cls.homepage + if not homepage: + continue + # Does the homepage have http, and if so, does https work? - if pkg_cls.homepage.startswith("http://"): - https = re.sub("http", "https", pkg_cls.homepage, 1) + if homepage.startswith("http://"): try: - response = urlopen(https) + response = urlopen(f"https://{homepage[7:]}") except Exception as e: msg = 'Error with attempting https for "{0}": ' errors.append(error_cls(msg.format(pkg_cls.name), [str(e)])) @@ -1224,7 +1227,7 @@ def _named_specs_in_when_arguments(pkgs, error_cls): def _refers_to_pkg(when): when_spec = spack.spec.Spec(when) - return when_spec.name is None or when_spec.name == pkg_name + return not when_spec.name or when_spec.name == pkg_name def _error_items(when_dict): for when, elts in when_dict.items(): diff --git a/lib/spack/spack/binary_distribution.py b/lib/spack/spack/binary_distribution.py index 4dc584ebb2dfbc..e36a1574dd035f 100644 --- a/lib/spack/spack/binary_distribution.py +++ b/lib/spack/spack/binary_distribution.py @@ -2,7 +2,6 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import codecs import collections import concurrent.futures import contextlib @@ -25,6 +24,7 @@ import urllib.parse import urllib.request import warnings +from collections import defaultdict from contextlib import closing from typing import IO, Callable, Dict, Iterable, List, Mapping, Optional, Set, Tuple, Union @@ -85,15 +85,13 @@ from .enums import InstallRecordStatus from .url_buildcache import ( CURRENT_BUILD_CACHE_LAYOUT_VERSION, - SUPPORTED_LAYOUT_VERSIONS, BlobRecord, BuildcacheComponent, BuildcacheEntryError, BuildcacheManifest, InvalidMetadataFile, ListMirrorSpecsError, - MirrorForSpec, - MirrorURLAndVersion, + MirrorMetadata, URLBuildcacheEntry, get_entries_from_cache, get_url_buildcache_class, @@ -159,10 +157,6 @@ class BinaryCacheIndex: At the moment, everything in this class is initialized as lazily as possible, so that it avoids slowing anything in spack down until absolutely necessary. - - TODO: What's the cost if, e.g., we realize in the middle of a spack - install that the cache is out of date, and we fetch directly? Does it - mean we should have paid the price to update the cache earlier? """ def __init__(self, cache_root: Optional[str] = None): @@ -184,12 +178,12 @@ def __init__(self, cache_root: Optional[str] = None): # mapping from mirror urls to the time.time() of the last index fetch and a bool indicating # whether the fetch succeeded or not. - self._last_fetch_times: Dict[MirrorURLAndVersion, float] = {} + self._last_fetch_times: Dict[MirrorMetadata, Tuple[float, bool]] = {} - # _mirrors_for_spec is a dictionary mapping DAG hashes to lists of - # entries indicating mirrors where that concrete spec can be found. - # Each entry is a MirrorURLAndVersion. - self._mirrors_for_spec: Dict[str, List[MirrorForSpec]] = {} + #: Dictionary mapping DAG hashes of specs to Spec objects + self._known_specs: Dict[str, spack.spec.Spec] = {} + #: Dictionary mapping DAG hashes of specs to a list of mirrors where they can be found + self._mirrors_for_spec: Dict[str, Set[MirrorMetadata]] = defaultdict(set) def _init_local_index_cache(self): if not self._index_file_cache_initialized: @@ -205,17 +199,6 @@ def _init_local_index_cache(self): self._index_file_cache_initialized = True - def clear(self): - """For testing purposes we need to be able to empty the cache and - clear associated data structures.""" - if self._index_file_cache: - self._index_file_cache.destroy() - self._index_file_cache = file_cache.FileCache(self._index_cache_root) - self._local_index_cache = {} - self._specs_already_associated = set() - self._last_fetch_times = {} - self._mirrors_for_spec = {} - def _write_local_index_cache(self): self._init_local_index_cache() cache_key = self._index_contents_key @@ -231,22 +214,20 @@ def regenerate_spec_cache(self, clear_existing=False): if clear_existing: self._specs_already_associated = set() - self._mirrors_for_spec = {} + self._mirrors_for_spec = defaultdict(set) + self._known_specs = {} - for url_and_version in self._local_index_cache: - cache_entry = self._local_index_cache[url_and_version] + for mirror_metadata in self._local_index_cache: + cache_entry = self._local_index_cache[mirror_metadata] cached_index_path = cache_entry["index_path"] cached_index_hash = cache_entry["index_hash"] if cached_index_hash not in self._specs_already_associated: self._associate_built_specs_with_mirror( - cached_index_path, MirrorURLAndVersion.from_string(url_and_version) + cached_index_path, MirrorMetadata.from_string(mirror_metadata) ) self._specs_already_associated.add(cached_index_hash) - def _associate_built_specs_with_mirror(self, cache_key, url_and_version: MirrorURLAndVersion): - mirror_url = url_and_version.url - layout_version = url_and_version.version - + def _associate_built_specs_with_mirror(self, cache_key, mirror_metadata: MirrorMetadata): with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir: db = BuildCacheDatabase(tmpdir) @@ -257,8 +238,8 @@ def _associate_built_specs_with_mirror(self, cache_key, url_and_version: MirrorU db._read_from_file(pathlib.Path(cache_path)) except spack.database.InvalidDatabaseVersionError as e: tty.warn( - "you need a newer Spack version to read the buildcache index " - f"for the following v{layout_version} mirror: '{mirror_url}'. " + "you need a newer Spack version to read the buildcache index for the " + f"following v{mirror_metadata.version} mirror: '{mirror_metadata.url}'. " f"{e.database_version_message}" ) return @@ -266,109 +247,57 @@ def _associate_built_specs_with_mirror(self, cache_key, url_and_version: MirrorU spec_list = [ s for s in db.query_local(installed=InstallRecordStatus.ANY) - if s.external or db.query_local_by_spec_hash(s.dag_hash()).in_buildcache + # todo, make it easer to get install records associated with specs + if s.external or db._data[s.dag_hash()].in_buildcache ] - for indexed_spec in spec_list: - dag_hash = indexed_spec.dag_hash() - - if dag_hash not in self._mirrors_for_spec: - self._mirrors_for_spec[dag_hash] = [] - - for entry in self._mirrors_for_spec[dag_hash]: - # A binary mirror can only have one spec per DAG hash, so - # if we already have an entry under this DAG hash for this - # mirror url/layout version, we're done. - if ( - entry.url_and_version.url == mirror_url - and entry.url_and_version.version == layout_version - ): - break - else: - self._mirrors_for_spec[dag_hash].append( - MirrorForSpec(url_and_version, indexed_spec) - ) - - def get_all_built_specs(self): - spec_list = [] - for dag_hash in self._mirrors_for_spec: - # in the absence of further information, all concrete specs - # with the same DAG hash are equivalent, so we can just - # return the first one in the list. - if len(self._mirrors_for_spec[dag_hash]) > 0: - spec_list.append(self._mirrors_for_spec[dag_hash][0].spec) + for spec in spec_list: + dag_hash = spec.dag_hash() + mirrors = self._mirrors_for_spec[dag_hash] - return spec_list + mirrors.add(mirror_metadata.strip_view()) + if dag_hash not in self._known_specs: + self._known_specs[dag_hash] = spec - def find_built_spec(self, spec, mirrors_to_check=None): - """Look in our cache for the built spec corresponding to ``spec``. + def get_all_built_specs(self) -> List[spack.spec.Spec]: + """Returns a list of all concrete specs known to be available in a binary cache.""" + return list(self._known_specs.values()) - If the spec can be found among the configured binary mirrors, a - list is returned that contains the concrete spec and the mirror url - of each mirror where it can be found. Otherwise, ``None`` is - returned. + def find_built_spec(self, spec: spack.spec.Spec) -> List[MirrorMetadata]: + """Returns a list of MirrorMetadata objects indicating which mirrors have the given + concrete spec. - This method does not trigger reading anything from remote mirrors, but - rather just checks if the concrete spec is found within the cache. + This method does not trigger reading anything from remote mirrors, but rather just checks + if the concrete spec is found within the cache. The cache can be updated by calling ``update()`` on the cache. Args: - spec (spack.spec.Spec): Concrete spec to find - mirrors_to_check: Optional mapping containing mirrors to check. If - None, just assumes all configured mirrors. - - Returns: - An list of objects containing the found specs and mirror url where - each can be found, e.g.: - - .. code-block:: python - - [ - { - "spec": , - "mirror_url": - } - ] + spec: Concrete spec to find """ - return self.find_by_hash(spec.dag_hash(), mirrors_to_check=mirrors_to_check) + return self.find_by_hash(spec.dag_hash()) - def find_by_hash(self, find_hash, mirrors_to_check=None): + def find_by_hash(self, dag_hash: str) -> List[MirrorMetadata]: """Same as find_built_spec but uses the hash of a spec. Args: - find_hash (str): hash of the spec to search - mirrors_to_check: Optional mapping containing mirrors to check. If - None, just assumes all configured mirrors. - """ - if find_hash not in self._mirrors_for_spec: - return [] - results = self._mirrors_for_spec[find_hash] - if not mirrors_to_check: - return results - mirror_urls = mirrors_to_check.values() - return [r for r in results if r.url_and_version.url in mirror_urls] - - def update_spec(self, spec: spack.spec.Spec, found_list: List[MirrorForSpec]): - """ - Take list of {'mirror_url': m, 'spec': s} objects and update the local - built_spec_cache + dag_hash: hash of the spec to search """ + return list(self._mirrors_for_spec.get(dag_hash, [])) + + def update_spec(self, spec: spack.spec.Spec, found_list: List[MirrorMetadata]) -> None: + """Update the cache with a new list of mirrors for a given spec.""" spec_dag_hash = spec.dag_hash() if spec_dag_hash not in self._mirrors_for_spec: - self._mirrors_for_spec[spec_dag_hash] = found_list + self._mirrors_for_spec[spec_dag_hash] = set(found_list) + self._known_specs[spec_dag_hash] = spec else: current_list = self._mirrors_for_spec[spec_dag_hash] for new_entry in found_list: - for cur_entry in current_list: - if new_entry.url_and_version == cur_entry.url_and_version: - cur_entry.spec = new_entry.spec - break - else: - current_list.append(MirrorForSpec(new_entry.url_and_version, new_entry.spec)) + current_list.add(new_entry.strip_view()) - def update(self, with_cooldown=False): + def update(self, with_cooldown: bool = False) -> None: """Make sure local cache of buildcache index files is up to date. If the same mirrors are configured as the last time this was called and none of the remote buildcache indices have changed, calling this @@ -379,9 +308,9 @@ def update(self, with_cooldown=False): on disk under ``_index_cache_root``).""" self._init_local_index_cache() configured_mirrors = [ - MirrorURLAndVersion(m.fetch_url, layout_version) - for layout_version in SUPPORTED_LAYOUT_VERSIONS + MirrorMetadata(m.fetch_url, layout_version, m.fetch_view) for m in spack.mirrors.mirror.MirrorCollection(binary=True).values() + for layout_version in m.supported_layout_versions ] items_to_remove = [] spec_cache_clear_needed = False @@ -410,13 +339,13 @@ def update(self, with_cooldown=False): # Otherwise the concrete spec cache should not need to be updated at # all. - fetch_errors = [] + fetch_errors: List[Exception] = [] all_methods_failed = True ttl = spack.config.get("config:binary_index_ttl", 600) now = time.time() for local_index_cache_key in self._local_index_cache: - urlAndVersion = MirrorURLAndVersion.from_string(local_index_cache_key) + urlAndVersion = MirrorMetadata.from_string(local_index_cache_key) cached_mirror_url = urlAndVersion.url cache_entry = self._local_index_cache[local_index_cache_key] cached_index_path = cache_entry["index_path"] @@ -435,6 +364,7 @@ def update(self, with_cooldown=False): all_methods_failed = False else: # May need to fetch the index and update the local caches + needs_regen = False try: needs_regen = self._fetch_and_cache_index( urlAndVersion, cache_entry=cache_entry @@ -442,9 +372,15 @@ def update(self, with_cooldown=False): self._last_fetch_times[urlAndVersion] = (now, True) all_methods_failed = False except FetchIndexError as e: - needs_regen = False fetch_errors.append(e) self._last_fetch_times[urlAndVersion] = (now, False) + except BuildcacheIndexNotExists as e: + fetch_errors.append(e) + self._last_fetch_times[urlAndVersion] = (now, False) + # Binary caches are not required to have an index, don't raise + # if it doesn't exist. + all_methods_failed = False + # The need to regenerate implies a need to clear as well. spec_cache_clear_needed |= needs_regen spec_cache_regenerate_needed |= needs_regen @@ -476,14 +412,21 @@ def update(self, with_cooldown=False): continue # Need to fetch the index and update the local caches + needs_regen = False try: needs_regen = self._fetch_and_cache_index(urlAndVersion) self._last_fetch_times[urlAndVersion] = (now, True) all_methods_failed = False except FetchIndexError as e: fetch_errors.append(e) - needs_regen = False self._last_fetch_times[urlAndVersion] = (now, False) + except BuildcacheIndexNotExists as e: + fetch_errors.append(e) + self._last_fetch_times[urlAndVersion] = (now, False) + # Binary caches are not required to have an index, don't raise + # if it doesn't exist. + all_methods_failed = False + # Generally speaking, a new mirror wouldn't imply the need to # clear the spec cache, so leave it as is. if needs_regen: @@ -501,14 +444,14 @@ def update(self, with_cooldown=False): if spec_cache_regenerate_needed: self.regenerate_spec_cache(clear_existing=spec_cache_clear_needed) - def _fetch_and_cache_index(self, url_and_version: MirrorURLAndVersion, cache_entry={}): + def _fetch_and_cache_index(self, mirror_metadata: MirrorMetadata, cache_entry={}): """Fetch a buildcache index file from a remote mirror and cache it. If we already have a cached index from this mirror, then we first check if the hash has changed, and we avoid fetching it if not. Args: - url_and_version: Contains mirror base url and target binary cache layout version + mirror_metadata: Contains mirror base url and target binary cache layout version cache_entry (dict): Old cache metadata with keys ``index_hash``, ``index_path``, ``etag`` @@ -517,19 +460,22 @@ def _fetch_and_cache_index(self, url_and_version: MirrorURLAndVersion, cache_ent Throws: FetchIndexError + BuildcacheIndexNotExists """ - mirror_url = url_and_version.url - layout_version = url_and_version.version + mirror_url = mirror_metadata.url + mirror_view = mirror_metadata.view + layout_version = mirror_metadata.version # TODO: get rid of this request, handle 404 better scheme = urllib.parse.urlparse(mirror_url).scheme if scheme != "oci": cache_class = get_url_buildcache_class(layout_version=layout_version) - if not web_util.url_exists(cache_class.get_index_url(mirror_url)): - return False + index_url = cache_class.get_index_url(mirror_url, mirror_view) + if not web_util.url_exists(index_url): + raise BuildcacheIndexNotExists(f"Index not found in cache {index_url}") - fetcher: IndexFetcher = get_index_fetcher(scheme, url_and_version, cache_entry) + fetcher: IndexFetcher = get_index_fetcher(scheme, mirror_metadata, cache_entry) result = fetcher.conditional_fetch() # Nothing to do @@ -537,13 +483,13 @@ def _fetch_and_cache_index(self, url_and_version: MirrorURLAndVersion, cache_ent return False # Persist new index.json - url_hash = compute_hash(f"{mirror_url}/v{layout_version}") + url_hash = compute_hash(str(mirror_metadata)) cache_key = "{}_{}.json".format(url_hash[:10], result.hash[:10]) self._index_file_cache.init_entry(cache_key) with self._index_file_cache.write_transaction(cache_key) as (old, new): new.write(result.data) - self._local_index_cache[str(url_and_version)] = { + self._local_index_cache[str(mirror_metadata)] = { "index_hash": result.hash, "index_path": cache_key, "etag": result.etag, @@ -699,7 +645,7 @@ def select_signing_key() -> str: return keys[0] -def _push_index(db: BuildCacheDatabase, temp_dir: str, cache_prefix: str): +def _push_index(db: BuildCacheDatabase, temp_dir: str, cache_prefix: str, name: str = ""): """Generate the index, compute its hash, and push the files to the mirror""" index_json_path = os.path.join(temp_dir, spack.database.INDEX_JSON_FILE) with open(index_json_path, "w", encoding="utf-8") as f: @@ -707,7 +653,11 @@ def _push_index(db: BuildCacheDatabase, temp_dir: str, cache_prefix: str): cache_class = get_url_buildcache_class(layout_version=CURRENT_BUILD_CACHE_LAYOUT_VERSION) cache_class.push_local_file_as_blob( - index_json_path, cache_prefix, "index", BuildcacheComponent.INDEX, compression="none" + index_json_path, + cache_prefix, + url_util.join(name, "index") if name else "index", + BuildcacheComponent.INDEX, + compression="none", ) cache_class.maybe_push_layout_json(cache_prefix) @@ -715,6 +665,8 @@ def _push_index(db: BuildCacheDatabase, temp_dir: str, cache_prefix: str): def _read_specs_and_push_index( file_list: List[str], read_method: Callable[[str], URLBuildcacheEntry], + name: str, + filter_fn: Callable[[str], bool], cache_prefix: str, db: BuildCacheDatabase, temp_dir: str, @@ -730,6 +682,16 @@ def _read_specs_and_push_index( temp_dir: Location to write index.json and hash for pushing """ for file in file_list: + # All supported versions of build caches put the hash as the last + # parameter before the extension + try: + x = file.split("/")[-1].split("-")[-1].split(".")[0] + except IndexError: + raise GenerateIndexError(f"Malformed metadata file name detected {file}") + + if not filter_fn(x): + continue + cache_entry: Optional[URLBuildcacheEntry] = None try: cache_entry = read_method(file) @@ -744,10 +706,16 @@ def _read_specs_and_push_index( db.add(fetched_spec) db.mark(fetched_spec, "in_buildcache", True) - _push_index(db, temp_dir, cache_prefix) + _push_index(db, temp_dir, cache_prefix, name) -def _url_generate_package_index(url: str, tmpdir: str): +def _url_generate_package_index( + url: str, + tmpdir: str, + db: Optional[BuildCacheDatabase] = None, + name: str = "", + filter_fn: Callable[[str], bool] = lambda x: True, +): """Create or replace the build cache index on the given mirror. The buildcache index contains an entry for each binary package under the cache_prefix. @@ -760,19 +728,23 @@ def _url_generate_package_index(url: str, tmpdir: str): """ with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpspecsdir: try: - file_list, read_fn = get_entries_from_cache( + filename_to_mtime_mapping, read_fn = get_entries_from_cache( url, tmpspecsdir, component_type=BuildcacheComponent.SPEC ) + file_list = list(filename_to_mtime_mapping.keys()) except ListMirrorSpecsError as e: raise GenerateIndexError(f"Unable to generate package index: {e}") from e tty.debug(f"Retrieving spec descriptor files from {url} to build index") - db = BuildCacheDatabase(tmpdir) - db._write() + if not db: + db = BuildCacheDatabase(tmpdir) + db._write() try: - _read_specs_and_push_index(file_list, read_fn, url, db, str(db.database_directory)) + _read_specs_and_push_index( + file_list, read_fn, name, filter_fn, url, db, str(db.database_directory) + ) except Exception as e: raise GenerateIndexError( f"Encountered problem pushing package index to {url}: {e}" @@ -782,7 +754,7 @@ def _url_generate_package_index(url: str, tmpdir: str): def generate_key_index(mirror_url: str, tmpdir: str) -> None: """Create the key index page. - Creates (or replaces) the "index.json" page at the location given in mirror_url. This page + Creates (or replaces) the ``index.json`` page at the location given in mirror_url. This page contains an entry for each key under mirror_url. """ @@ -1086,6 +1058,8 @@ def tag(self, tag: str, roots: List[spack.spec.Spec]): self._base_images, self._checksums, tagged_image, self.tmpdir, None, None, *roots ) + tty.info(f"Tagged {tagged_image}") + class URLUploader(Uploader): def __init__( @@ -1121,7 +1095,7 @@ def make_uploader( base_image: Optional[str] = None, ) -> Uploader: """Builder for the appropriate uploader based on the mirror type""" - if mirror.push_url.startswith("oci://"): + if spack.oci.image.is_oci_url(mirror.push_url): return OCIUploader( mirror=mirror, force=force, update_index=update_index, base_image=base_image ) @@ -1705,17 +1679,17 @@ def try_fetch(url_to_fetch): def download_tarball( - spec: spack.spec.Spec, unsigned: Optional[bool] = False, mirrors_for_spec=None + spec: spack.spec.Spec, + unsigned: Optional[bool] = False, + mirrors_for_spec: Optional[List[MirrorMetadata]] = None, ) -> Optional[spack.stage.Stage]: """Download binary tarball for given package Args: spec: a concrete spec unsigned: if ``True`` or ``False`` override the mirror signature verification defaults - mirrors_for_spec (list): Optional list of concrete specs and mirrors - obtained by calling binary_distribution.get_mirrors_for_spec(). - These will be checked in order first before looking in other - configured mirrors. + mirrors_for_spec: Optional list of mirrors known to have the spec. These will be checked + in order first before looking in other configured mirrors. Returns: ``None`` if the tarball could not be downloaded, the signature verified @@ -1737,23 +1711,26 @@ def download_tarball( # look in all configured mirrors if needed, as maybe the spec # we need was in an un-indexed mirror. No need to check any # mirror for the spec twice though. - try_first = [i.url_and_version for i in mirrors_for_spec] if mirrors_for_spec else [] - - try_next = [] - for try_layout in SUPPORTED_LAYOUT_VERSIONS: - try_next.extend([MirrorURLAndVersion(i.fetch_url, try_layout) for i in configured_mirrors]) + try_first = mirrors_for_spec or [] + try_next = [ + MirrorMetadata(mirror.fetch_url, layout, mirror.fetch_view) + for mirror in configured_mirrors + for layout in mirror.supported_layout_versions + ] urls_and_versions = try_first + [uv for uv in try_next if uv not in try_first] # TODO: turn `mirrors_for_spec` into a list of Mirror instances, instead of doing that here. - def fetch_url_to_mirror(url_and_version): - url = url_and_version.url - layout_version = url_and_version.version + def fetch_url_to_mirror( + mirror_metadata: MirrorMetadata, + ) -> Tuple[spack.mirrors.mirror.Mirror, int]: + url = mirror_metadata.url + layout_version = mirror_metadata.version for mirror in configured_mirrors: if mirror.fetch_url == url: return mirror, layout_version return spack.mirrors.mirror.Mirror(url), layout_version - mirrors = [fetch_url_to_mirror(url_and_version) for url_and_version in urls_and_versions] + mirrors = [fetch_url_to_mirror(mirror_metadata) for mirror_metadata in urls_and_versions] for mirror, layout_version in mirrors: # Override mirror's default if @@ -1763,10 +1740,8 @@ def fetch_url_to_mirror(url_and_version): fetch_url = mirror.fetch_url # TODO: refactor this to some "nice" place. - if fetch_url.startswith("oci://"): - ref = spack.oci.image.ImageReference.from_string(fetch_url[len("oci://") :]).with_tag( - _oci_default_tag(spec) - ) + if spack.oci.image.is_oci_url(fetch_url): + ref = ImageReference.from_url(fetch_url).with_tag(_oci_default_tag(spec)) # Fetch the manifest try: @@ -2031,6 +2006,8 @@ def _tar_strip_component(tar: tarfile.TarFile, prefix: str): def extract_buildcache_tarball(tarfile_path: str, destination: str) -> None: with closing(tarfile.open(tarfile_path, "r")) as tar: + # For consistent behavior across all supported Python versions + tar.extraction_filter = lambda member, path: member # Remove common prefix from tarball entries and directly extract them to the install dir. tar.extractall( path=destination, members=_tar_strip_component(tar, prefix=_ensure_common_prefix(tar)) @@ -2179,15 +2156,17 @@ def install_single_spec(spec, unsigned=False, force=False): install_root_node(node, unsigned=unsigned, force=force) -def try_direct_fetch(spec, mirrors=None): - """ - Try to find the spec directly on the configured mirrors - """ - found_specs: List[MirrorForSpec] = [] - binary_mirrors = spack.mirrors.mirror.MirrorCollection(mirrors=mirrors, binary=True).values() +def try_direct_fetch(spec: spack.spec.Spec) -> List[MirrorMetadata]: + """Try to find the spec directly on the configured mirrors""" + found_specs: List[MirrorMetadata] = [] + binary_mirrors = spack.mirrors.mirror.MirrorCollection(binary=True).values() + + for mirror in binary_mirrors: + # TODO: OCI-support + if spack.oci.image.is_oci_url(mirror.fetch_url): + continue - for layout_version in SUPPORTED_LAYOUT_VERSIONS: - for mirror in binary_mirrors: + for layout_version in mirror.supported_layout_versions: # layout_version could eventually come from the mirror config cache_class = get_url_buildcache_class(layout_version=layout_version) cache_entry = cache_class(mirror.fetch_url, spec) @@ -2204,42 +2183,31 @@ def try_direct_fetch(spec, mirrors=None): fetched_spec = spack.spec.Spec.from_dict(spec_dict) fetched_spec._mark_concrete() - found_specs.append( - MirrorForSpec(MirrorURLAndVersion(mirror.fetch_url, layout_version), fetched_spec) - ) + found_specs.append(MirrorMetadata(mirror.fetch_url, layout_version, mirror.fetch_view)) return found_specs -def get_mirrors_for_spec(spec=None, mirrors_to_check=None, index_only=False): +def get_mirrors_for_spec(spec: spack.spec.Spec, index_only: bool = False) -> List[MirrorMetadata]: """ - Check if concrete spec exists on mirrors and return a list - indicating the mirrors on which it can be found + Check if concrete spec exists on mirrors and return a list indicating the mirrors on which it + can be found Args: - spec (spack.spec.Spec): The spec to look for in binary mirrors - mirrors_to_check (dict): Optionally override the configured mirrors - with the mirrors in this dictionary. - index_only (bool): When ``index_only`` is set to ``True``, only the local - cache is checked, no requests are made. - - Return: - A list of objects, each containing a ``mirror_url`` and ``spec`` key - indicating all mirrors where the spec can be found. + spec: The spec to look for in binary mirrors + index_only: When ``index_only`` is set to ``True``, only the local cache is checked, no + requests are made. """ - if spec is None: - return [] - - if not spack.mirrors.mirror.MirrorCollection(mirrors=mirrors_to_check, binary=True): + if not spack.mirrors.mirror.MirrorCollection(binary=True): tty.debug("No Spack mirrors are currently configured") - return {} + return [] - results = BINARY_INDEX.find_built_spec(spec, mirrors_to_check=mirrors_to_check) + results = BINARY_INDEX.find_built_spec(spec) # The index may be out-of-date. If we aren't only considering indices, try # to fetch directly since we know where the file should be. if not results and not index_only: - results = try_direct_fetch(spec, mirrors=mirrors_to_check) + results = try_direct_fetch(spec) # We found a spec by the direct fetch approach, we might as well # add it to our mapping. if results: @@ -2256,17 +2224,13 @@ def update_cache_and_get_specs(): local index cache (essentially a no-op if it has been done already and nothing has changed on the configured mirrors.) - Throws: + Raises: FetchCacheError """ BINARY_INDEX.update() return BINARY_INDEX.get_all_built_specs() -def clear_spec_cache(): - BINARY_INDEX.clear() - - def get_keys( install: bool = False, trust: bool = False, @@ -2280,12 +2244,12 @@ def get_keys( tty.die("Please add a spack mirror to allow " + "download of build caches.") for mirror in mirror_collection.values(): - for layout_version in SUPPORTED_LAYOUT_VERSIONS: - fetch_url = mirror.fetch_url - # TODO: oci:// does not support signing. - if fetch_url.startswith("oci://"): - continue + if not mirror.signed: + # Don't bother fetching keys for unsigned mirrors + continue + for layout_version in mirror.supported_layout_versions: + fetch_url = mirror.fetch_url if layout_version == 2: _get_keys_v2(fetch_url, install, trust, force) else: @@ -2577,6 +2541,10 @@ class BuildcacheIndexError(spack.error.SpackError): """Raised when a buildcache cannot be read for any reason""" +class BuildcacheIndexNotExists(Exception): + """Buildcache does not contain an index""" + + FetchIndexResult = collections.namedtuple("FetchIndexResult", "etag hash data fresh") @@ -2588,7 +2556,7 @@ def get_index_manifest(self, manifest_response) -> BlobRecord: """Read the response of the manifest request and return a BlobRecord""" cache_class = get_url_buildcache_class(CURRENT_BUILD_CACHE_LAYOUT_VERSION) try: - result = codecs.getreader("utf-8")(manifest_response).read() + result = io.TextIOWrapper(manifest_response, encoding="utf-8").read() except (ValueError, OSError) as e: raise FetchIndexError(f"Remote index {manifest_response.url} is invalid", e) from e @@ -2666,7 +2634,7 @@ def conditional_fetch(self) -> FetchIndexResult: raise FetchIndexError(f"Could not fetch index from {url_index}", e) from e try: - result = codecs.getreader("utf-8")(response).read() + result = io.TextIOWrapper(response, encoding="utf-8").read() except (ValueError, OSError) as e: raise FetchIndexError(f"Remote index {url_index} is invalid") from e @@ -2718,7 +2686,7 @@ def conditional_fetch(self) -> FetchIndexResult: raise FetchIndexError(f"Could not fetch index {url}", e) from e try: - result = codecs.getreader("utf-8")(response).read() + result = io.TextIOWrapper(response, encoding="utf-8").read() except (ValueError, OSError) as e: raise FetchIndexError(f"Remote index {url} is invalid", e) from e @@ -2735,14 +2703,9 @@ def conditional_fetch(self) -> FetchIndexResult: class OCIIndexFetcher(IndexFetcher): - def __init__(self, url_and_version: MirrorURLAndVersion, local_hash, urlopen=None) -> None: + def __init__(self, mirror_metadata: MirrorMetadata, local_hash, urlopen=None) -> None: self.local_hash = local_hash - - url = url_and_version.url - - # Remove oci:// prefix - assert url.startswith("oci://") - self.ref = spack.oci.image.ImageReference.from_string(url[6:]) + self.ref = spack.oci.image.ImageReference.from_url(mirror_metadata.url) self.urlopen = urlopen or spack.oci.opener.urlopen def conditional_fetch(self) -> FetchIndexResult: @@ -2781,7 +2744,7 @@ def conditional_fetch(self) -> FetchIndexResult: headers={"Accept": "application/vnd.oci.image.layer.v1.tar+gzip"}, ) ) - result = codecs.getreader("utf-8")(response).read() + result = io.TextIOWrapper(response, encoding="utf-8").read() except (OSError, ValueError) as e: raise FetchIndexError(f"Remote index {url_manifest} is invalid", e) from e @@ -2795,16 +2758,17 @@ def conditional_fetch(self) -> FetchIndexResult: class DefaultIndexFetcher(IndexFetcher): """Fetcher for buildcache index, cache invalidation via manifest contents""" - def __init__(self, url_and_version: MirrorURLAndVersion, local_hash, urlopen=web_util.urlopen): - self.url = url_and_version.url - self.layout_version = url_and_version.version + def __init__(self, mirror_metadata: MirrorMetadata, local_hash, urlopen=web_util.urlopen): + self.url = mirror_metadata.url + self.view = mirror_metadata.view + self.layout_version = mirror_metadata.version self.local_hash = local_hash self.urlopen = urlopen self.headers = {"User-Agent": web_util.SPACK_USER_AGENT} def conditional_fetch(self) -> FetchIndexResult: cache_class = get_url_buildcache_class(layout_version=self.layout_version) - url_index_manifest = cache_class.get_index_url(self.url) + url_index_manifest = cache_class.get_index_url(self.url, self.view) try: response = self.urlopen( @@ -2841,27 +2805,29 @@ def conditional_fetch(self) -> FetchIndexResult: class EtagIndexFetcher(IndexFetcher): """Fetcher for buildcache index, cache invalidation via ETags headers - This class differs from the DefaultIndexFetcher in the following ways: 1) It - is provided with an etag value on creation, rather than an index checksum - value. Note that since we never start out with an etag, the default fetcher - must have been used initially and determined that the etag approach is valid. - 2) It provides this etag value in the 'If-None-Match' request header for the - index manifest. 3) It checks for special exception type and response code - indicating the index manifest is not modified, exiting early and returning - 'Fresh', if encountered. 4) If it needs to actually read the manifest, it - does not need to do any checks of the url scheme to determine whether an - etag should be included in the return value.""" - - def __init__(self, url_and_version: MirrorURLAndVersion, etag, urlopen=web_util.urlopen): - self.url = url_and_version.url - self.layout_version = url_and_version.version + This class differs from the :class:`DefaultIndexFetcher` in the following ways: + + 1. It is provided with an etag value on creation, rather than an index checksum value. Note + that since we never start out with an etag, the default fetcher must have been used initially + and determined that the etag approach is valid. + 2. It provides this etag value in the ``If-None-Match`` request header for the + index manifest. + 3. It checks for special exception type and response code indicating the index manifest is not + modified, exiting early and returning ``Fresh``, if encountered. + 4. If it needs to actually read the manifest, it does not need to do any checks of the url + scheme to determine whether an etag should be included in the return value.""" + + def __init__(self, mirror_metadata: MirrorMetadata, etag, urlopen=web_util.urlopen): + self.url = mirror_metadata.url + self.view = mirror_metadata.view + self.layout_version = mirror_metadata.version self.etag = etag self.urlopen = urlopen def conditional_fetch(self) -> FetchIndexResult: # Do a conditional fetch of the index manifest (i.e. using If-None-Match header) cache_class = get_url_buildcache_class(layout_version=self.layout_version) - manifest_url = cache_class.get_index_url(self.url) + manifest_url = cache_class.get_index_url(self.url, self.view) headers = {"User-Agent": web_util.SPACK_USER_AGENT, "If-None-Match": f'"{self.etag}"'} try: @@ -2894,25 +2860,25 @@ def conditional_fetch(self) -> FetchIndexResult: def get_index_fetcher( - scheme: str, url_and_version: MirrorURLAndVersion, cache_entry: Dict[str, str] + scheme: str, mirror_metadata: MirrorMetadata, cache_entry: Dict[str, str] ) -> IndexFetcher: if scheme == "oci": # TODO: Actually etag and OCI are not mutually exclusive... - return OCIIndexFetcher(url_and_version, cache_entry.get("index_hash", None)) + return OCIIndexFetcher(mirror_metadata, cache_entry.get("index_hash", None)) elif cache_entry.get("etag"): - if url_and_version.version < 3: - return EtagIndexFetcherV2(url_and_version.url, cache_entry["etag"]) + if mirror_metadata.version < 3: + return EtagIndexFetcherV2(mirror_metadata.url, cache_entry["etag"]) else: - return EtagIndexFetcher(url_and_version, cache_entry["etag"]) + return EtagIndexFetcher(mirror_metadata, cache_entry["etag"]) else: - if url_and_version.version < 3: + if mirror_metadata.version < 3: return DefaultIndexFetcherV2( - url_and_version.url, local_hash=cache_entry.get("index_hash", None) + mirror_metadata.url, local_hash=cache_entry.get("index_hash", None) ) else: return DefaultIndexFetcher( - url_and_version, local_hash=cache_entry.get("index_hash", None) + mirror_metadata, local_hash=cache_entry.get("index_hash", None) ) diff --git a/lib/spack/spack/bootstrap/__init__.py b/lib/spack/spack/bootstrap/__init__.py index ae7b1f798dd73a..1df4d011e7d294 100644 --- a/lib/spack/spack/bootstrap/__init__.py +++ b/lib/spack/spack/bootstrap/__init__.py @@ -10,20 +10,22 @@ ensure_core_dependencies, ensure_gpg_in_path_or_raise, ensure_patchelf_in_path_or_raise, + ensure_winsdk_external_or_raise, ) from .environment import BootstrapEnvironment, ensure_environment_dependencies from .status import status_message __all__ = [ - "is_bootstrapping", + "all_core_root_specs", + "BootstrapEnvironment", "ensure_bootstrap_configuration", + "ensure_clingo_importable_or_raise", "ensure_core_dependencies", + "ensure_environment_dependencies", "ensure_gpg_in_path_or_raise", - "ensure_clingo_importable_or_raise", "ensure_patchelf_in_path_or_raise", - "all_core_root_specs", - "ensure_environment_dependencies", - "BootstrapEnvironment", + "ensure_winsdk_external_or_raise", + "is_bootstrapping", "status_message", "store_path", ] diff --git a/lib/spack/spack/bootstrap/clingo.py b/lib/spack/spack/bootstrap/clingo.py index 5a0460914467d8..eac32e18656e82 100644 --- a/lib/spack/spack/bootstrap/clingo.py +++ b/lib/spack/spack/bootstrap/clingo.py @@ -11,14 +11,16 @@ """ import pathlib import sys -from typing import Dict, Optional, Tuple +from typing import Dict, Optional, Tuple, Type import spack.vendor.archspec.cpu import spack.compilers.config import spack.compilers.libraries import spack.config +import spack.package_base import spack.platforms +import spack.repo import spack.spec import spack.traverse import spack.version @@ -26,8 +28,28 @@ from .config import spec_for_current_python +def _select_best_version( + pkg_cls: Type["spack.package_base.PackageBase"], node: spack.spec.Spec, valid_versions: str +) -> None: + """Try to attach the best known version to a node""" + constraint = spack.version.from_string(valid_versions) + allowed_versions = [v for v in pkg_cls.versions if v.satisfies(constraint)] + try: + best_version = spack.package_base.sort_by_pkg_preference(allowed_versions, pkg=pkg_cls)[0] + except (KeyError, ValueError, IndexError): + return + node.versions.versions = [spack.version.from_string(f"={best_version}")] + + +def _add_compilers_if_missing() -> None: + arch = spack.spec.ArchSpec.default_arch() + if not spack.compilers.config.compilers_for_arch(arch): + spack.compilers.config.find_compilers() + + class ClingoBootstrapConcretizer: def __init__(self, configuration): + _add_compilers_if_missing() self.host_platform = spack.platforms.host() self.host_os = self.host_platform.default_operating_system() self.host_target = spack.vendor.archspec.cpu.host().family @@ -119,14 +141,40 @@ def concretize(self) -> "spack.spec.Spec": s = spack.spec.Spec.from_specfile(str(self.prototype_path())) s._mark_concrete(False) - # Tweak it to conform to the host architecture + # These are nodes in the cmake stack, whose versions are frequently deprecated for + # security reasons. In case there is no external cmake on this machine, we'll update + # their versions to the most preferred, within the valid range, according to the + # repository we know. + to_be_updated = { + pkg_name: (spack.repo.PATH.get_pkg_class(pkg_name), valid_versions) + for pkg_name, valid_versions in { + "ca-certificates-mozilla": ":", + "openssl": "3:3", + "curl": "8:8", + "cmake": "3.16:3", + "libiconv": "1:1", + "ncurses": "6:6", + "m4": "1.4", + }.items() + } + + # Tweak it to conform to the host architecture + update the version of a few dependencies for node in s.traverse(): + # Clear patches, we'll compute them correctly later + node.patches.clear() + if "patches" in node.variants: + del node.variants["patches"] + node.architecture.os = str(self.host_os) node.architecture = self.host_architecture if node.name == "gcc-runtime": node.versions = self.host_compiler.versions + if node.name in to_be_updated: + pkg_cls, valid_versions = to_be_updated[node.name] + _select_best_version(pkg_cls=pkg_cls, node=node, valid_versions=valid_versions) + # Can't use re2c@3.1 with Python 3.6 if self.host_python.satisfies("@3.6"): s["re2c"].versions.versions = [spack.version.from_string("=2.2")] @@ -147,6 +195,7 @@ def concretize(self) -> "spack.spec.Spec": if "libc" in edge.virtuals: edge.spec = self.host_libc + spack.spec._inject_patches_variant(s) s._finalize_concretization() # Work around the fact that the installer calls Spec.dependents() and diff --git a/lib/spack/spack/bootstrap/config.py b/lib/spack/spack/bootstrap/config.py index 1393f7588c7771..ef048e24d23022 100644 --- a/lib/spack/spack/bootstrap/config.py +++ b/lib/spack/spack/bootstrap/config.py @@ -8,14 +8,12 @@ import sys from typing import Any, Dict, Generator, MutableSequence, Sequence -import spack.compilers.config import spack.config import spack.environment import spack.modules import spack.paths import spack.platforms import spack.repo -import spack.spec import spack.store import spack.util.path from spack.llnl.util import tty @@ -34,8 +32,9 @@ def spec_for_current_python() -> str: minor version (all patches are ABI compatible with the same minor). See: - https://www.python.org/dev/peps/pep-0513/ - https://stackoverflow.com/a/35801395/771663 + + * https://www.python.org/dev/peps/pep-0513/ + * https://stackoverflow.com/a/35801395/771663 """ version_str = ".".join(str(x) for x in sys.version_info[:2]) return f"python@{version_str}" @@ -137,12 +136,6 @@ def _bootstrap_config_scopes() -> Sequence["spack.config.ConfigScope"]: return config_scopes -def _add_compilers_if_missing() -> None: - arch = spack.spec.ArchSpec.default_arch() - if not spack.compilers.config.compilers_for_arch(arch): - spack.compilers.config.find_compilers() - - @contextlib.contextmanager def _ensure_bootstrap_configuration() -> Generator: spack.repo.PATH.repos # ensure this is instantiated from current config. @@ -160,8 +153,5 @@ def _ensure_bootstrap_configuration() -> Generator: spack.config.set("bootstrap", user_configuration["bootstrap"]) spack.config.set("config", user_configuration["config"]) spack.config.set("repos", user_configuration["repos"]) - # We may need to compile code from sources, so ensure we - # have compilers for the current platform - _add_compilers_if_missing() with spack.modules.disable_modules(), spack_python_interpreter(): yield diff --git a/lib/spack/spack/bootstrap/core.py b/lib/spack/spack/bootstrap/core.py index 5af9a83543d254..025088256acdb8 100644 --- a/lib/spack/spack/bootstrap/core.py +++ b/lib/spack/spack/bootstrap/core.py @@ -7,18 +7,18 @@ bootstrapping mirrors. The logic is quite different from an installation done from a Spack user, because of the following reasons: - 1. The binaries are all compiled on the same OS for a given platform (e.g. they are compiled on - ``centos7`` on ``linux``), but they will be installed and used on the host OS. They are also - targeted at the most generic architecture possible. That makes the binaries difficult to reuse - with other specs in an environment without ad-hoc logic. - 2. Bootstrapping has a fallback procedure where we try to install software by default from the - most recent binaries, and proceed to older versions of the mirror, until we try building from - sources as a last resort. This allows us not to be blocked on architectures where we don't - have binaries readily available, but is also not compatible with the working of environments - (they don't have fallback procedures). - 3. Among the binaries we have clingo, so we can't concretize that with clingo :-) - 4. clingo, GnuPG and patchelf binaries need to be verified by sha256 sum (all the other binaries - we might add on top of that in principle can be verified with GPG signatures). +1. The binaries are all compiled on the same OS for a given platform (e.g. they are compiled on + ``centos7`` on ``linux``), but they will be installed and used on the host OS. They are also + targeted at the most generic architecture possible. That makes the binaries difficult to reuse + with other specs in an environment without ad-hoc logic. +2. Bootstrapping has a fallback procedure where we try to install software by default from the + most recent binaries, and proceed to older versions of the mirror, until we try building from + sources as a last resort. This allows us not to be blocked on architectures where we don't + have binaries readily available, but is also not compatible with the working of environments + (they don't have fallback procedures). +3. Among the binaries we have clingo, so we can't concretize that with clingo :-) +4. clingo, GnuPG and patchelf binaries need to be verified by sha256 sum (all the other binaries + we might add on top of that in principle can be verified with GPG signatures). """ import copy @@ -294,8 +294,8 @@ def try_import(self, module: str, abstract_spec_str: str) -> bool: PackageInstaller( [concrete_spec.package], fail_fast=True, - package_use_cache=False, - dependencies_use_cache=False, + root_policy="source_only", + dependencies_policy="source_only", ).install() if _try_import_from_store(module, query_spec=concrete_spec, query_info=info): @@ -554,7 +554,9 @@ def ensure_winsdk_external_or_raise() -> None: """ if set(["win-sdk", "wgl"]).issubset(spack.config.get("packages").keys()): return - externals = spack.detection.by_path(["win-sdk", "wgl"]) + tty.debug("Detecting Windows SDK and WGL installations") + # find the externals sequentially to avoid subprocesses being spawned + externals = spack.detection.by_path(["win-sdk", "wgl"], max_workers=1) if not set(["win-sdk", "wgl"]) == externals.keys(): missing_packages_lst = [] if "wgl" not in externals: diff --git a/lib/spack/spack/bootstrap/environment.py b/lib/spack/spack/bootstrap/environment.py index d292570b01ba27..144bd0bd7cf019 100644 --- a/lib/spack/spack/bootstrap/environment.py +++ b/lib/spack/spack/bootstrap/environment.py @@ -109,7 +109,7 @@ def _write_spack_yaml_file(self) -> None: env = spack.tengine.make_environment() template = env.get_template("bootstrap/spack.yaml") context = { - "python_spec": spec_for_current_python(), + "python_spec": f"{spec_for_current_python()}+ctypes", "python_prefix": sys.exec_prefix, "architecture": spack.vendor.archspec.cpu.host().family, "environment_path": self.environment_root(), diff --git a/lib/spack/spack/build_environment.py b/lib/spack/spack/build_environment.py index fe5f204aae87ae..24591e6a351913 100644 --- a/lib/spack/spack/build_environment.py +++ b/lib/spack/spack/build_environment.py @@ -165,7 +165,9 @@ def jobserver_enabled(): return "MAKEFLAGS" in os.environ and "--jobserver" in os.environ["MAKEFLAGS"] -def get_effective_jobs(jobs, parallel=True, supports_jobserver=False): +def get_effective_jobs( + jobs, parallel: bool = True, supports_jobserver: bool = False +) -> Optional[int]: """Return the number of jobs, or None if supports_jobserver and a jobserver is detected.""" if not parallel or jobs <= 1 or env_flag(SPACK_NO_PARALLEL_MAKE): return 1 @@ -249,7 +251,7 @@ def __call__( jobs_env_supports_jobserver: bool = False, **kwargs, ) -> Optional[str]: - """Runs this "make" executable in a subprocess. + """Runs this ``make`` executable in a subprocess. Args: parallel: if False, parallelism is disabled @@ -471,12 +473,12 @@ def optimization_flags(compiler, target): def set_wrapper_variables(pkg, env): """Set environment variables used by the Spack compiler wrapper (which have the prefix - `SPACK_`) and also add the compiler wrappers to PATH. + ``SPACK_``) and also add the compiler wrappers to PATH. This determines the injected -L/-I/-rpath options; each of these specifies a search order and this function computes these options in a manner that is intended to match the DAG traversal - order in `SetupContext`. TODO: this is not the case yet, we're using post order, SetupContext - is using topo order.""" + order in ``SetupContext``. TODO: this is not the case yet, we're using post order, + ``SetupContext`` is using topo order.""" # Set compiler flags injected from the spec set_wrapper_environment_variables_for_flags(pkg, env) @@ -1126,6 +1128,8 @@ def _setup_pkg_and_run( input_pipe: Optional[Connection], jsfd1: Optional[Connection], jsfd2: Optional[Connection], + stdout_pipe: Optional[Connection] = None, + stderr_pipe: Optional[Connection] = None, ): """Main entry point in the child process for Spack builds. @@ -1161,7 +1165,8 @@ def _setup_pkg_and_run( input_multiprocess_fd: stdin from the parent (not passed currently on Windows) jsfd1: gmake Jobserver file descriptor 1. jsfd2: gmake Jobserver file descriptor 2. - + stdout_pipe: pipe to redirect stdout to + stderr_pipe: pipe to redirect stderr to """ context: str = kwargs.get("context", "build") @@ -1173,6 +1178,12 @@ def _setup_pkg_and_run( # child, so we undo Python's precaution. closefd=False since Connection has ownership. if input_pipe is not None: sys.stdin = os.fdopen(input_pipe.fileno(), closefd=False) + if stdout_pipe is not None: + os.dup2(stdout_pipe.fileno(), sys.stdout.fileno()) + stdout_pipe.close() + if stderr_pipe is not None: + os.dup2(stderr_pipe.fileno(), sys.stderr.fileno()) + stderr_pipe.close() pkg = serialized_pkg.restore() @@ -1210,7 +1221,10 @@ def _setup_pkg_and_run( # 'pkg' is not defined yet pass elif context == "test": - logfile = os.path.join(pkg.test_suite.stage, pkg.test_suite.test_log_name(pkg.spec)) + logfile = os.path.join( + pkg.test_suite.stage, # type: ignore[union-attr] + pkg.test_suite.test_log_name(pkg.spec), # type: ignore[union-attr] + ) error_msg = str(e) if isinstance(e, (spack.multimethod.NoSuchMethodError, AttributeError)): @@ -1247,7 +1261,7 @@ class BuildProcess: """Class used to manage builds launched by Spack. Each build is launched in its own child process, and the main Spack process - tracks each child with a ``BuildProcess`` object. `BuildProcess`` is used to: + tracks each child with a ``BuildProcess`` object. ``BuildProcess`` is used to: - Start and monitor an active child process. - Clean up its processes and resources when the child process completes. - Kill the child process if needed. @@ -1343,6 +1357,8 @@ def child_fun(): """ read_pipe, write_pipe = multiprocessing.Pipe(duplex=False) input_fd = None + stdout_fd = None + stderr_fd = None jobserver_fd1 = None jobserver_fd2 = None @@ -1352,6 +1368,16 @@ def child_fun(): # Forward sys.stdin when appropriate, to allow toggling verbosity if sys.platform != "win32" and sys.stdin.isatty() and hasattr(sys.stdin, "fileno"): input_fd = Connection(os.dup(sys.stdin.fileno())) + + # If our process has redirected stdout/stderr after the forkserver was started, we need to + # make the forked processes use the new file descriptors. + if multiprocessing.get_start_method() == "forkserver": + try: + stdout_fd = Connection(os.dup(sys.stdout.fileno())) + stderr_fd = Connection(os.dup(sys.stderr.fileno())) + except Exception: + pass + mflags = os.environ.get("MAKEFLAGS") if mflags is not None: m = re.search(r"--jobserver-[^=]*=(\d),(\d)", mflags) @@ -1369,6 +1395,8 @@ def child_fun(): input_fd, jobserver_fd1, jobserver_fd2, + stdout_fd, + stderr_fd, ), read_pipe=read_pipe, timeout=timeout, @@ -1391,6 +1419,10 @@ def child_fun(): # Close the input stream in the parent process if input_fd is not None: input_fd.close() + if stdout_fd is not None: + stdout_fd.close() + if stderr_fd is not None: + stderr_fd.close() return p @@ -1408,18 +1440,17 @@ def exitcode_msg(process): typ = "exit" if process.exitcode >= 0 else "signal" return f"{typ} {abs(process.exitcode)}" - timeout = process.timeout - process.join(timeout=timeout) - if process.is_alive(): - warnings.warn(f"Terminating process, since the timeout of {timeout}s was exceeded") - process.terminate() - try: # Check if information from the read pipe has been received. child_result = process.read_pipe.recv() except EOFError: raise InstallError(f"The process has stopped unexpectedly ({exitcode_msg(process)})") - + finally: + timeout = process.timeout + process.join(timeout=timeout) + if process.is_alive(): + warnings.warn(f"Terminating process, since the timeout of {timeout}s was exceeded") + process.terminate() # If returns a StopPhase, raise it if isinstance(child_result, spack.error.StopPhase): raise child_result @@ -1515,7 +1546,7 @@ def make_stack(tb, stack=None): class ChildError(InstallError): """Special exception class for wrapping exceptions from child processes - in Spack's build environment. + in Spack's build environment. The main features of a ChildError are: @@ -1536,11 +1567,11 @@ class ChildError(InstallError): The long_message of a ChildError displays one of two things: - 1. If the original error was a ProcessError, indicating a command - died during the build, we'll show context from the build log. + 1. If the original error was a ProcessError, indicating a command + died during the build, we'll show context from the build log. - 2. If the original error was any other type of error, we'll show - context from the Python code. + 2. If the original error was any other type of error, we'll show + context from the Python code. SpackError handles displaying the special traceback if we're in debug mode with spack -d. diff --git a/lib/spack/spack/buildcache_migrate.py b/lib/spack/spack/buildcache_migrate.py index cb646d9e120997..0782bba17f4525 100644 --- a/lib/spack/spack/buildcache_migrate.py +++ b/lib/spack/spack/buildcache_migrate.py @@ -2,14 +2,14 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import codecs +import io import json import os import pathlib import tempfile from typing import NamedTuple -import spack.binary_distribution as bindist +import spack.binary_distribution import spack.database as spack_db import spack.error import spack.llnl.util.tty as tty @@ -105,7 +105,7 @@ def _migrate_spec( for meta_url in v2_metadata_urls: try: _, _, meta_file = web_util.read_from_url(meta_url) - spec_contents = codecs.getreader("utf-8")(meta_file).read() + spec_contents = io.TextIOWrapper(meta_file, encoding="utf-8").read() v2_spec_url = meta_url break except (web_util.SpackWebError, OSError): @@ -257,8 +257,11 @@ def migrate( signing_key = "" if not unsigned: try: - signing_key = bindist.select_signing_key() - except (bindist.NoKeyException, bindist.PickKeyException): + signing_key = spack.binary_distribution.select_signing_key() + except ( + spack.binary_distribution.NoKeyException, + spack.binary_distribution.PickKeyException, + ): raise MigrationException( "Signed migration requires exactly one secret key in keychain" ) @@ -277,7 +280,7 @@ def migrate( try: _, _, index_file = web_util.read_from_url(index_url) - contents = codecs.getreader("utf-8")(index_file).read() + contents = io.TextIOWrapper(index_file, encoding="utf-8").read() except (web_util.SpackWebError, OSError): raise MigrationException("Buildcache migration requires a buildcache index") @@ -286,13 +289,14 @@ def migrate( with open(index_path, "w", encoding="utf-8") as fd: fd.write(contents) - db = bindist.BuildCacheDatabase(tmpdir) + db = spack.binary_distribution.BuildCacheDatabase(tmpdir) db._read_from_file(pathlib.Path(index_path)) specs_to_migrate = [ s for s in db.query_local(installed=InstallRecordStatus.ANY) - if not s.external and db.query_local_by_spec_hash(s.dag_hash()).in_buildcache + # todo, make it easer to get install records associated with specs + if not s.external and db._data[s.dag_hash()].in_buildcache ] # Run the tasks in parallel if possible @@ -329,13 +333,13 @@ def migrate( # Push the migrated mirror index index_tmpdir = os.path.join(tmpdir, "rebuild_index") os.mkdir(index_tmpdir) - bindist._push_index(db, index_tmpdir, mirror_url) + spack.binary_distribution._push_index(db, index_tmpdir, mirror_url) # Push the public part of the signing key if not unsigned: keys_tmpdir = os.path.join(tmpdir, "keys") os.mkdir(keys_tmpdir) - bindist._url_push_keys( + spack.binary_distribution._url_push_keys( mirror_url, keys=[signing_key], update_index=True, tmpdir=keys_tmpdir ) else: diff --git a/lib/spack/spack/buildcache_prune.py b/lib/spack/spack/buildcache_prune.py index 31f33044191798..b5d3a144e5176f 100644 --- a/lib/spack/spack/buildcache_prune.py +++ b/lib/spack/spack/buildcache_prune.py @@ -1,14 +1,18 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) - import os import pathlib +import re import tempfile +import uuid from concurrent.futures import Future, as_completed -from typing import Callable, Dict, List, Optional, Set, Tuple, cast +from fnmatch import fnmatch +from pathlib import Path +from typing import Callable, Dict, Iterable, Iterator, List, Optional, Set, Tuple, cast -import spack.binary_distribution as bindist +import spack.binary_distribution +import spack.error import spack.llnl.util.tty as tty import spack.stage import spack.util.parallel @@ -19,6 +23,7 @@ from .mirrors.mirror import Mirror from .url_buildcache import ( CURRENT_BUILD_CACHE_LAYOUT_VERSION, + BuildcacheComponent, URLBuildcacheEntry, get_entries_from_cache, get_url_buildcache_class, @@ -27,7 +32,7 @@ def _fetch_manifests( mirror: Mirror, tmpspecsdir: str -) -> Tuple[List[str], Callable[[str], URLBuildcacheEntry], List[str]]: +) -> Tuple[Dict[str, float], Callable[[str], URLBuildcacheEntry], List[str]]: """ Fetch all manifests from the buildcache for a given mirror. @@ -39,17 +44,23 @@ def _fetch_manifests( :return: A tuple with three elements - a list of manifest files in the mirror, a callable to read each manifest, and a list of blobs in the mirror. """ - manifests, read_fn = get_entries_from_cache(url=mirror.fetch_url, tmpspecsdir=tmpspecsdir) - url_to_list = url_util.join(mirror.fetch_url, bindist.buildcache_relative_blobs_path()) + manifest_file_to_mtime_mapping, read_fn = get_entries_from_cache( + mirror.fetch_url, tmpspecsdir, BuildcacheComponent.MANIFEST + ) + url_to_list = url_util.join( + mirror.fetch_url, spack.binary_distribution.buildcache_relative_blobs_path() + ) tty.debug(f"Listing blobs in {url_to_list}") blobs = web_util.list_url(url_to_list, recursive=True) or [] if not blobs: tty.warn(f"Unable to list blobs in {url_to_list}") blobs = [ - url_util.join(mirror.fetch_url, bindist.buildcache_relative_blobs_path(), blob_name) + url_util.join( + mirror.fetch_url, spack.binary_distribution.buildcache_relative_blobs_path(), blob_name + ) for blob_name in blobs ] - return manifests, read_fn, blobs + return manifest_file_to_mtime_mapping, read_fn, blobs def _delete_manifests_from_cache_aws( @@ -63,13 +74,12 @@ def _delete_manifests_from_cache_aws( cache_class = get_url_buildcache_class(layout_version=CURRENT_BUILD_CACHE_LAYOUT_VERSION) - include_pattern = cache_class.get_buildcache_component_include_pattern() + include_pattern = cache_class.get_buildcache_component_include_pattern( + BuildcacheComponent.MANIFEST + ) file_count_before_deletion = len(list(pathlib.Path(tmpspecsdir).rglob(include_pattern))) - # Add file:// prefix to URLs so that they are deleted properly by web_util.remove_url - urls_to_delete = {url_util.path_to_file_url(url) for url in urls_to_delete} - tty.debug(f"Deleting {len(urls_to_delete)} entries from cache at {url}") deleted = _delete_entries_from_cache_manual(tmpspecsdir, urls_to_delete) tty.debug(f"Deleted {deleted} entries from cache at {url}") @@ -116,6 +126,32 @@ def _delete_entries_from_cache_manual(url: str, urls_to_delete: Set[str]) -> int return pruned_objects +def _delete_entries_from_cache( + mirror: Mirror, tmpspecsdir: str, manifests_to_delete: Set[str], blobs_to_delete: Set[str] +) -> int: + pruned_manifests: Optional[int] = None + + if mirror.fetch_url.startswith("s3://"): + pruned_manifests = _delete_manifests_from_cache_aws( + url=mirror.fetch_url, tmpspecsdir=tmpspecsdir, urls_to_delete=manifests_to_delete + ) + + if pruned_manifests is None: + # If the AWS CLI deletion failed, we fall back to deleting both manifests + # and blobs with the fallback method. + objects_to_delete = blobs_to_delete.union(manifests_to_delete) + pruned_objects = 0 + else: + # If the AWS CLI deletion succeeded, we only need to worry about + # deleting the blobs, since the manifests have already been deleted. + objects_to_delete = blobs_to_delete + pruned_objects = pruned_manifests + + return pruned_objects + _delete_entries_from_cache_manual( + url=mirror.fetch_url, urls_to_delete=objects_to_delete + ) + + def _delete_object(url: str) -> int: try: web_util.remove_url(url=url) @@ -126,11 +162,43 @@ def _delete_object(url: str) -> int: return 0 +def _object_has_prunable_mtime(url: str, pruning_started_at: float) -> Tuple[str, bool]: + """Check if an object's modification time makes it eligible for pruning. + + Objects modified after pruning started should not be pruned to avoid + race conditions with concurrent uploads. + """ + stat_result = web_util.stat_url(url) + assert stat_result is not None + if stat_result[1] > pruning_started_at: + tty.verbose(f"Skipping deletion of {url} because it was modified after pruning started") + return url, False + return url, True + + +def _filter_new_specs(urls: Iterable[str], pruning_started_at: float) -> Iterator[str]: + """Filter out URLs that were modified after pruning started. + + Runs parallel modification time checks on all URLs and yields only + those that are old enough to be safely pruned. + """ + with spack.util.parallel.make_concurrent_executor() as executor: + futures = [] + for url in urls: + futures.append(executor.submit(_object_has_prunable_mtime, url, pruning_started_at)) + + for manifest_or_blob_future in as_completed(futures): + url, has_prunable_mtime = manifest_or_blob_future.result() + if has_prunable_mtime: + yield url + + def _prune_orphans( mirror: Mirror, manifests: List[str], read_fn: Callable[[str], URLBuildcacheEntry], blobs: List[str], + pruning_started_at: float, tmpspecsdir: str, dry_run: bool, ) -> int: @@ -198,11 +266,17 @@ def _prune_orphans( if not orphaned_blobs and not orphaned_manifests: return 0 + # Filter out any new specs that have been uploaded since the pruning started + orphaned_blobs = set(_filter_new_specs(orphaned_blobs, pruning_started_at)) + orphaned_manifests = set(_filter_new_specs(orphaned_manifests, pruning_started_at)) + if orphaned_blobs: tty.info(f"Found {len(orphaned_blobs)} blob(s) with no manifest") if orphaned_manifests: tty.info(f"Found {len(orphaned_manifests)} manifest(s) that are missing blobs") + # If dry run, just print the manifests and blobs that would be deleted + # and exit early. if dry_run: pruned_object_count = len(orphaned_blobs) + len(orphaned_manifests) for manifest in orphaned_manifests: @@ -213,27 +287,12 @@ def _prune_orphans( tty.info(f" Would prune blob: {blob}") return pruned_object_count - # Try to delete the orphaned manifests using the AWS CLI, - # if possible. - pruned_manifests: Optional[int] = None - if mirror.fetch_url.startswith("s3://"): - pruned_manifests = _delete_manifests_from_cache_aws( - url=mirror.fetch_url, tmpspecsdir=tmpspecsdir, urls_to_delete=orphaned_manifests - ) - - if pruned_manifests is None: - # If the AWS CLI deletion failed, we fall back to deleting both manifests - # and blobs with the fallback method. - orphans_to_delete = orphaned_blobs.union(orphaned_manifests) - pruned_object_count = 0 - else: - # If the AWS CLI deletion succeeded, we only need to worry about - # deleting the blobs, since the manifests have already been deleted. - orphans_to_delete = orphaned_blobs - pruned_object_count = pruned_manifests - - pruned_object_count += _delete_entries_from_cache_manual( - url=mirror.fetch_url, urls_to_delete=orphans_to_delete + # Otherwise, perform the deletions. + pruned_object_count = _delete_entries_from_cache( + mirror=mirror, + tmpspecsdir=tmpspecsdir, + manifests_to_delete=orphaned_manifests, + blobs_to_delete=orphaned_blobs, ) for manifest in orphaned_manifests: @@ -244,24 +303,135 @@ def _prune_orphans( return pruned_object_count -def prune(mirror: Mirror, dry_run: bool) -> None: +def prune_direct( + mirror: Mirror, keeplist_file: pathlib.Path, pruning_started_at: float, dry_run: bool +) -> None: + """ + Execute direct pruning for a given mirror using a keeplist file. + + This function reads a file containing spec hashes to keep, then deletes + all other spec manifests from the buildcache. + Note that this function does *not* prune the blobs associated with the manifests; + to do that, `prune_orphan` must be invoked to clean up the now-orphaned blobs. + + Args: + mirror: Mirror to prune + keeplist_file: Path to file containing newline-delimited hashes to keep + pruning_started_at: Timestamp of when the pruning started + dry_run: Whether to perform a dry run without actually deleting + """ + tty.info("Running Direct Pruning") + tty.debug(f"Direct pruning mirror: {mirror.fetch_url}" + (" (dry run)" if dry_run else "")) + + keep_hashes: Set[str] = set() + for line in keeplist_file.read_text().splitlines(): + keep_hash = line.strip().lstrip("/") + if len(keep_hash) != 32: + raise MalformedKeepListException(f"Found malformed hash in keeplist: {line}") + keep_hashes.add(keep_hash) + + if not keep_hashes: + raise BuildcachePruningException(f"No hashes found in keeplist file: {keeplist_file}") + + tty.info(f"Loaded {len(keep_hashes)} hashes to keep from {keeplist_file}") + total_pruned: Optional[int] = None + with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpspecsdir: + try: + manifest_to_mtime_mapping, read_fn, blob_list = _fetch_manifests(mirror, tmpspecsdir) + except Exception as e: + raise BuildcachePruningException("Error getting entries from buildcache") from e + + # Determine which manifests correspond to specs we want to prune + manifests_to_prune: List[str] = [] + specs_to_prune: List[str] = [] + + for manifest in manifest_to_mtime_mapping.keys(): + if not fnmatch( + manifest, + URLBuildcacheEntry.get_buildcache_component_include_pattern( + BuildcacheComponent.SPEC + ), + ): + tty.info(f"Found a non-spec manifest at {manifest}, skipping...") + continue + + # Attempt to regex match the manifest name in order to extract the name, version, + # and hash for the spec. + manifest_name = manifest.split("/")[-1] # strip off parent directories + regex_match = re.match(r"([^ ]+)-([^- ]+)[-_]([^-_\. ]+)", manifest_name) + + if regex_match is None: + # This should never happen, unless the buildcache is somehow corrupted + # and/or there is a bug. + raise BuildcachePruningException( + "Unable to extract spec name, version, and hash from " + f'the manifest named "{manifest_name}"' + ) + + spec_name, spec_version, spec_hash = regex_match.groups() + + # Chop off any prefix/parent file path to get just the name + spec_name = pathlib.Path(spec_name).name + + if spec_hash not in keep_hashes: + manifests_to_prune.append(manifest) + specs_to_prune.append(f"{spec_name}/{spec_hash[:7]}") + + if not manifests_to_prune: + tty.info("No specs to prune - all specs are in the keeplist") + return + + tty.info(f"Found {len(manifests_to_prune)} spec(s) to prune") + + if dry_run: + for spec_name in specs_to_prune: + tty.info(f" Would prune: {spec_name}") + total_pruned = len(manifests_to_prune) + else: + manifests_to_delete = set(_filter_new_specs(manifests_to_prune, pruning_started_at)) + + total_pruned = _delete_entries_from_cache( + mirror=mirror, + tmpspecsdir=tmpspecsdir, + manifests_to_delete=manifests_to_delete, + blobs_to_delete=set(), + ) + + if dry_run: + tty.info(f"Would have pruned {total_pruned} objects from mirror: {mirror.fetch_url}") + else: + tty.info(f"Pruned {total_pruned} objects from mirror: {mirror.fetch_url}") + if total_pruned > 0: + tty.info( + "As a consequence of pruning, the buildcache index is now likely out of date." + ) + tty.info("Run `spack buildcache update-index` to update the index for this mirror.") + + +def prune_orphan(mirror: Mirror, pruning_started_at: float, dry_run: bool) -> None: """ Execute the pruning process for a given mirror. Currently, this function only performs the pruning of orphaned manifests and blobs. """ + tty.info("=== Orphan Pruning Phase ===") tty.debug(f"Pruning mirror: {mirror.fetch_url}" + (" (dry run)" if dry_run else "")) total_pruned = 0 with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpspecsdir: - manifest_list, read_fn, blob_list = _fetch_manifests(mirror, tmpspecsdir) + try: + manifest_to_mtime_mapping, read_fn, blob_list = _fetch_manifests(mirror, tmpspecsdir) + manifests = list(manifest_to_mtime_mapping.keys()) + except Exception as e: + raise BuildcachePruningException("Error getting entries from buildcache") from e while True: # Continue pruning until no more orphaned objects are found pruned = _prune_orphans( mirror=mirror, - manifests=manifest_list, + manifests=manifests, read_fn=read_fn, blobs=blob_list, + pruning_started_at=pruning_started_at, tmpspecsdir=tmpspecsdir, dry_run=dry_run, ) @@ -285,3 +455,73 @@ def prune(mirror: Mirror, dry_run: bool) -> None: tty.info( "Run `spack buildcache update-index` to update the index for this mirror." ) + + +def get_buildcache_normalized_time(mirror: Mirror) -> float: + """ + Get the current time as reported by the buildcache. + + This is necessary because different buildcache implementations may use different + time formats/time zones. This function creates a temporary file, calls `stat_url` + on it, and then deletes it. This guarentees that the time used for the beginning + of the pruning is consistent across all buildcache implementations. + """ + with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as f: + tmpdir = Path(f) + touch_file = tmpdir / f".spack-prune-marker-{uuid.uuid4()}" + touch_file.touch() + remote_path = url_util.join(mirror.push_url, touch_file.name) + + web_util.push_to_url( + local_file_path=str(touch_file), remote_path=remote_path, keep_original=True + ) + + stat_info = web_util.stat_url(remote_path) + assert stat_info is not None + start_time = stat_info[1] + + web_util.remove_url(remote_path) + + return start_time + + +def prune_buildcache(mirror: Mirror, keeplist: Optional[str] = None, dry_run: bool = False): + """ + Runs buildcache pruning for a given mirror. + + Args: + mirror: Mirror to prune + keeplist_file: Path to file containing newline-delimited hashes to keep + dry_run: Whether to perform a dry run without actually deleting + """ + # Determine the time to use as the "started at" time for pruning. + # If a cache index exists, use that time. Otherwise, use the current time (normalized + # to the buildcache's time zone). + cache_index_url = URLBuildcacheEntry.get_index_url(mirror_url=mirror.fetch_url) + stat_result = web_util.stat_url(cache_index_url) + if stat_result is not None: + started_at = stat_result[1] + else: + started_at = get_buildcache_normalized_time(mirror) + + if keeplist: + prune_direct(mirror, pathlib.Path(keeplist), started_at, dry_run) + + prune_orphan(mirror, started_at, dry_run) + + +class BuildcachePruningException(spack.error.SpackError): + """ + Raised when pruning fails irrevocably + """ + + pass + + +class MalformedKeepListException(BuildcachePruningException): + """ + Raised when the keeplist passed to the direct pruner + is invalid or malformed in some way + """ + + pass diff --git a/lib/spack/spack/builder.py b/lib/spack/spack/builder.py index 1e435bd5ab601f..1a6bcc6feeb70c 100644 --- a/lib/spack/spack/builder.py +++ b/lib/spack/spack/builder.py @@ -20,7 +20,7 @@ from spack.error import SpackError from spack.util.prefix import Prefix -#: Builder classes, as registered by the "builder" decorator +#: Builder classes, as registered by the ``builder`` decorator BUILDER_CLS: Dict[str, Type["Builder"]] = {} #: Map id(pkg) to a builder, to avoid creating multiple @@ -424,17 +424,19 @@ class BaseBuilder(metaclass=BuilderMeta): class AnyBuilder(BaseBuilder): @run_after("install") def fixup_install(self): - # do something after the package is installed - pass + # do something after the package is installed + pass def setup_build_environment(self, env: EnvironmentModifications) -> None: - env.set("MY_ENV_VAR", "my_value") + env.set("MY_ENV_VAR", "my_value") + + + class CMakeBuilder(cmake.CMakeBuilder, AnyBuilder): + pass - class CMakeBuilder(cmake.CMakeBuilder, AnyBuilder): - pass - class AutotoolsBuilder(autotools.AutotoolsBuilder, AnyBuilder): - pass + class AutotoolsBuilder(autotools.AutotoolsBuilder, AnyBuilder): + pass """ def __init__(self, pkg: spack.package_base.PackageBase) -> None: @@ -507,7 +509,7 @@ class Builder(BaseBuilder, collections.abc.Sequence): """A builder is a class that, given a package object (i.e. associated with concrete spec), knows how to install it. - The builder behaves like a sequence, and when iterated over return the "phases" of the + The builder behaves like a sequence, and when iterated over return the ``phases`` of the installation in the correct order. """ @@ -636,13 +638,16 @@ class BuilderWithDefaults(Builder): def apply_macos_rpath_fixups(builder: Builder): """On Darwin, make installed libraries more easily relocatable. - Some build systems (handrolled, autotools, makefiles) can set their own - rpaths that are duplicated by spack's compiler wrapper. This fixup - interrogates, and postprocesses if necessary, all libraries installed - by the code. + Some build systems (handrolled, autotools, makefiles) can set their own rpaths that are + duplicated by spack's compiler wrapper. This fixup interrogates, and postprocesses if + necessary, all libraries installed by the code. + + It should be added as a :func:`~spack.phase_callbacks.run_after` to packaging systems (or + individual packages) that do not install relocatable libraries by default. + + Example:: - It should be added as a @run_after to packaging systems (or individual - packages) that do not install relocatable libraries by default. + run_after("install", when="platform=darwin")(apply_macos_rpath_fixups) Args: builder: builder that installed the package @@ -736,7 +741,7 @@ def install(self, pkg: Package, spec: Spec, prefix: Prefix) -> None: pass """ - #: A generic package has only the "install" phase + #: A generic package has only the ``install`` phase phases = ("install",) #: Names associated with package methods in the old build-system format diff --git a/lib/spack/spack/ci/__init__.py b/lib/spack/spack/ci/__init__.py index fa995f066e9723..8906e9aa6d966c 100644 --- a/lib/spack/spack/ci/__init__.py +++ b/lib/spack/spack/ci/__init__.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: (Apache-2.0 OR MIT) import base64 -import codecs +import io import json import os import pathlib @@ -14,11 +14,11 @@ import tempfile import zipfile from collections import namedtuple -from typing import Callable, Dict, List, Optional, Set, Tuple, Union +from typing import Callable, Dict, Iterable, List, Optional, Set, Tuple from urllib.request import Request import spack -import spack.binary_distribution as bindist +import spack.binary_distribution import spack.builder import spack.config as cfg import spack.environment as ev @@ -41,7 +41,6 @@ from spack.error import SpackError from spack.llnl.util.tty.color import cescape, colorize from spack.reporters.cdash import SPACK_CDASH_TIMEOUT -from spack.version import GitVersion, StandardVersion from .common import ( IS_WINDOWS, @@ -93,19 +92,17 @@ def get_change_revisions(path: str) -> Tuple[Optional[str], Optional[str]]: return None, None -def get_added_versions( - checksums_version_dict: Dict[str, Union[StandardVersion, GitVersion]], - path: str, - from_ref: str = "HEAD~1", - to_ref: str = "HEAD", -) -> List[Union[StandardVersion, GitVersion]]: - """Get a list of the versions added between `from_ref` and `to_ref`. +def filter_added_checksums( + checksums: Iterable[str], path: str, from_ref: str = "HEAD~1", to_ref: str = "HEAD" +) -> List[str]: + """Get a list of the version checksums added between ``from_ref`` and ``to_ref``. + Args: - checksums_version_dict (Dict): all package versions keyed by known checksums. - path (str): path to the package.py - from_ref (str): oldest git ref, defaults to `HEAD~1` - to_ref (str): newer git ref, defaults to `HEAD` - Returns: list of versions added between refs + checksums: an iterable of checksums to look for in the diff + path: path to the package.py + from_ref: oldest git ref, defaults to ``HEAD~1`` + to_ref: newer git ref, defaults to ``HEAD`` + Returns: list of version checksums added between refs """ git_exe = spack.util.git.git(required=True) @@ -115,13 +112,13 @@ def get_added_versions( # Store added and removed versions # Removed versions are tracked here to determine when versions are moved in a file # and show up as both added and removed in a git diff. - added_checksums = set() - removed_checksums = set() + added_checksums: Set[str] = set() + removed_checksums: Set[str] = set() # Scrape diff for modified versions and prune added versions if they show up # as also removed (which means they've actually just moved in the file and # we shouldn't need to rechecksum them) - for checksum in checksums_version_dict.keys(): + for checksum in checksums: for line in diff_lines: if checksum in line: if line.startswith("+"): @@ -129,13 +126,13 @@ def get_added_versions( if line.startswith("-"): removed_checksums.add(checksum) - return [checksums_version_dict[c] for c in added_checksums - removed_checksums] + return list(added_checksums - removed_checksums) def stack_changed(env_path: str) -> bool: """Given an environment manifest path, return whether or not the stack was changed. Returns True iff the environment manifest changed between the provided revisions (or - additionally if the `.gitlab-ci.yml` file itself changed).""" + additionally if the ``.gitlab-ci.yml`` file itself changed).""" # git returns posix paths always, normalize input to be compatible with that env_path = spack.llnl.path.convert_to_posix_path(os.path.dirname(env_path)) @@ -246,19 +243,19 @@ def create_already_built_pruner(check_index_only: bool = True) -> PrunerCallback """Return a filter that prunes specs already present on any configured mirrors""" try: - bindist.BINARY_INDEX.update() - except bindist.FetchCacheError as e: + spack.binary_distribution.BINARY_INDEX.update() + except spack.binary_distribution.FetchCacheError as e: tty.warn(e) def rebuild_filter(s: spack.spec.Spec) -> RebuildDecision: - spec_locations = bindist.get_mirrors_for_spec(spec=s, index_only=check_index_only) + spec_locations = spack.binary_distribution.get_mirrors_for_spec( + spec=s, index_only=check_index_only + ) if not spec_locations: return RebuildDecision(True, "not found anywhere") - urls = ",".join( - [f"{loc.url_and_version.url}@v{loc.url_and_version.version}" for loc in spec_locations] - ) + urls = ",".join(f"{loc.url}@v{loc.version}" for loc in spec_locations) message = f"up-to-date [{urls}]" return RebuildDecision(False, message) @@ -371,6 +368,7 @@ def collect_pipeline_options(env: ev.Environment, args) -> PipelineOptions: options.prune_unaffected = args.prune_unaffected options.prune_external = args.prune_externals options.check_index_only = args.index_only + options.forward_variables = args.forward_variable or [] ci_config = cfg.get("ci") @@ -518,16 +516,19 @@ def generate_pipeline(env: ev.Environment, args) -> None: # packagen name not in that list. unaffected_pruner = get_unaffected_pruners(env, options.untouched_pruning_dependent_depth) if unaffected_pruner: + tty.info("Enabling Unaffected Pruner") pruning_filters.append(unaffected_pruner) # Possibly prune specs that are already built on some configured mirror if options.prune_up_to_date: + tty.info("Enabling Up-to-date Pruner") pruning_filters.append( create_already_built_pruner(check_index_only=options.check_index_only) ) # Possibly prune specs that are external if options.prune_external: + tty.info("Enabling Externals Pruner") pruning_filters.append(create_external_pruner()) # Do all the pruning @@ -558,17 +559,14 @@ def generate_pipeline(env: ev.Environment, args) -> None: tty.warn("Unable to populate buildgroup without CDash credentials") -def import_signing_key(base64_signing_key): - """Given Base64-encoded gpg key, decode and import it to use for - signing packages. +def import_signing_key(base64_signing_key: str) -> None: + """Given Base64-encoded gpg key, decode and import it to use for signing packages. Arguments: - base64_signing_key (str): A gpg key including the secret key, - armor-exported and base64 encoded, so it can be stored in a - gitlab CI variable. For an example of how to generate such - a key, see: - - https://github.com/spack/spack-infrastructure/blob/main/gitlab-docker/files/gen-key + base64_signing_key: + A gpg key including the secret key, armor-exported and base64 encoded, so it can be + stored in a gitlab CI variable. For an example of how to generate such a key, see + https://github.com/spack/spack-infrastructure/blob/main/gitlab-docker/files/gen-key. """ if not base64_signing_key: tty.warn("No key found for signing/verifying packages") @@ -578,27 +576,25 @@ def import_signing_key(base64_signing_key): # This command has the side-effect of creating the directory referred # to as GNUPGHOME in setup_environment() - list_output = spack_gpg("list", output=str) + list_output = spack_gpg("list") tty.debug("spack gpg list:") tty.debug(list_output) - decoded_key = base64.b64decode(base64_signing_key) - if isinstance(decoded_key, bytes): - decoded_key = decoded_key.decode("utf8") + decoded_key = base64.b64decode(base64_signing_key).decode("utf-8") with tempfile.TemporaryDirectory() as tmpdir: sign_key_path = os.path.join(tmpdir, "signing_key") with open(sign_key_path, "w", encoding="utf-8") as fd: fd.write(decoded_key) - key_import_output = spack_gpg("trust", sign_key_path, output=str) + key_import_output = spack_gpg("trust", sign_key_path) tty.debug(f"spack gpg trust {sign_key_path}") tty.debug(key_import_output) # Now print the keys we have for verifying and signing - trusted_keys_output = spack_gpg("list", "--trusted", output=str) - signing_keys_output = spack_gpg("list", "--signing", output=str) + trusted_keys_output = spack_gpg("list", "--trusted") + signing_keys_output = spack_gpg("list", "--signing") tty.debug("spack gpg list --trusted") tty.debug(trusted_keys_output) @@ -629,13 +625,13 @@ def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: b sign_binaries: If True, spack will attempt to sign binary package before pushing. """ tty.debug(f"Pushing to build cache ({'signed' if sign_binaries else 'unsigned'})") - signing_key = bindist.select_signing_key() if sign_binaries else None + signing_key = spack.binary_distribution.select_signing_key() if sign_binaries else None mirror = spack.mirrors.mirror.Mirror.from_url(mirror_url) try: - with bindist.make_uploader(mirror, signing_key=signing_key) as uploader: + with spack.binary_distribution.make_uploader(mirror, signing_key=signing_key) as uploader: uploader.push_or_raise([spec]) return True - except bindist.PushToBuildCacheError as e: + except spack.binary_distribution.PushToBuildCacheError as e: tty.error(f"Problem writing to {mirror_url}: {e}") return False @@ -701,17 +697,15 @@ def copy_test_logs_to_artifacts(test_stage, job_test_dir): ) -def download_and_extract_artifacts(url, work_dir) -> str: +def download_and_extract_artifacts(url: str, work_dir: str) -> str: """Look for gitlab artifacts.zip at the given url, and attempt to download - and extract the contents into the given work_dir + and extract the contents into the given work_dir Arguments: + url: Complete url to artifacts.zip file + work_dir: Path to destination where artifacts should be extracted - url (str): Complete url to artifacts.zip file - work_dir (str): Path to destination where artifacts should be extracted - - Output: - + Returns: Artifacts root path relative to the archive root """ tty.msg(f"Fetching artifacts from: {url}") @@ -766,23 +760,27 @@ def get_spack_info(): return f"no git repo, use spack {spack.spack_version}" -def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None): - """Look in the local spack clone to find the checkout_commit, and if - provided, the merge_commit given as arguments. If those commits can - be found locally, then clone spack and attempt to recreate a merge - commit with the same parent commits as tested in gitlab. This looks - something like 1) git clone repo && cd repo 2) git checkout - 3) git merge . If there is no - merge_commit provided, then skip step (3). +def setup_spack_repro_version( + repro_dir: str, checkout_commit: str, merge_commit: Optional[str] = None +) -> bool: + """Look in the local spack clone to find the checkout_commit, and if provided, the + merge_commit given as arguments. If those commits can be found locally, then clone spack and + attempt to recreate a merge commit with the same parent commits as tested in gitlab. This looks + something like + + 1. ``git clone repo && cd repo`` + 2. ``git checkout `` + 3. ``git merge `` + + If there is no merge_commit provided, then skip step (3). Arguments: - repro_dir (str): Location where spack should be cloned - checkout_commit (str): SHA of PR branch commit - merge_commit (str): SHA of target branch parent + repro_dir: Location where spack should be cloned + checkout_commit: SHA of PR branch commit + merge_commit: SHA of target branch parent - Returns: True if git repo state was successfully recreated, or False - otherwise. + Returns: True iff the git repo state was successfully recreated """ # figure out the path to the spack git version being used for the # reproduction @@ -863,7 +861,7 @@ def setup_spack_repro_version(repro_dir, checkout_commit, merge_commit=None): def reproduce_ci_job(url, work_dir, autostart, gpg_url, runtime, use_local_head): - """Given a url to gitlab artifacts.zip from a failed 'spack ci rebuild' job, + """Given a url to gitlab artifacts.zip from a failed ``spack ci rebuild`` job, attempt to setup an environment in which the failure can be reproduced locally. This entails the following: @@ -1312,7 +1310,7 @@ def read_broken_spec(broken_spec_url): tty.warn(f"Unable to read broken spec from {broken_spec_url}") return None - broken_spec_contents = codecs.getreader("utf-8")(fs).read() + broken_spec_contents = io.TextIOWrapper(fs, encoding="utf-8").read() return syaml.load(broken_spec_contents) diff --git a/lib/spack/spack/ci/common.py b/lib/spack/spack/ci/common.py index e3a7e3fa7c81ff..26b75f59b47988 100644 --- a/lib/spack/spack/ci/common.py +++ b/lib/spack/spack/ci/common.py @@ -17,7 +17,7 @@ from urllib.parse import quote, urlencode, urlparse from urllib.request import Request -import spack.binary_distribution as bindist +import spack.binary_distribution import spack.config as cfg import spack.deptypes as dt import spack.environment as ev @@ -28,7 +28,6 @@ import spack.schema import spack.spec import spack.util.compression as compression -import spack.util.spack_yaml as syaml import spack.util.web as web_util from spack import traverse from spack.llnl.util.lang import memoized @@ -144,34 +143,6 @@ def ensure_expected_target_path(path: str) -> str: return path -def update_env_scopes( - env: ev.Environment, - cli_scopes: List[str], - output_file: str, - transform_windows_paths: bool = False, -) -> None: - """Add any config scopes from cli_scopes which aren't already included in the - environment, by reading the yaml, adding the missing includes, and writing the - updated yaml back to the same location. - """ - with open(env.manifest_path, "r", encoding="utf-8") as env_fd: - env_yaml_root = syaml.load(env_fd) - - # Add config scopes to environment - env_includes = env_yaml_root["spack"].get("include", []) - include_scopes: List[str] = [] - for scope in cli_scopes: - if scope not in include_scopes and scope not in env_includes: - include_scopes.insert(0, scope) - env_includes.extend(include_scopes) - env_yaml_root["spack"]["include"] = [ - ensure_expected_target_path(i) if transform_windows_paths else i for i in env_includes - ] - - with open(output_file, "w", encoding="utf-8") as fd: - syaml.dump_config(env_yaml_root, fd, default_flow_style=False) - - def write_pipeline_manifest(specs, src_prefix, dest_prefix, output_file): """Write out the file describing specs that should be copied""" buildcache_copies = {} @@ -179,7 +150,7 @@ def write_pipeline_manifest(specs, src_prefix, dest_prefix, output_file): for release_spec in specs: release_spec_dag_hash = release_spec.dag_hash() cache_class = get_url_buildcache_class( - layout_version=bindist.CURRENT_BUILD_CACHE_LAYOUT_VERSION + layout_version=spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION ) buildcache_copies[release_spec_dag_hash] = { "src": cache_class.get_manifest_url(release_spec, src_prefix), @@ -232,9 +203,9 @@ def args(self): def build_name(self, spec: Optional[spack.spec.Spec] = None) -> Optional[str]: """Returns the CDash build name. - A name will be generated if the `spec` is provided, + A name will be generated if the ``spec`` is provided, otherwise, the value will be retrieved from the environment - through the `SPACK_CDASH_BUILD_NAME` variable. + through the ``SPACK_CDASH_BUILD_NAME`` variable. Returns: (str) given spec's CDash build name.""" if spec: @@ -402,6 +373,7 @@ def __init__( self.pipeline_type = pipeline_type self.require_signing = require_signing self.cdash_handler = cdash_handler + self.forward_variables: List[str] = [] class PipelineNode: @@ -542,7 +514,6 @@ def __init_job(self, release_spec): job_vars["SPACK_JOB_SPEC_COMPILER_VERSION"] = release_spec.format("{compiler.version}") job_vars["SPACK_JOB_SPEC_ARCH"] = release_spec.format("{architecture}") job_vars["SPACK_JOB_SPEC_VARIANTS"] = release_spec.format("{variants}") - return job_object def __is_named(self, section): @@ -582,7 +553,7 @@ def __apply_submapping(self, dest, spec, section): if _spec_matches(spec, match_string): matched = True if "build-job-remove" in match_attrs: - spack.config.remove_yaml(dest, attrs["build-job-remove"]) + cfg.remove_yaml(dest, attrs["build-job-remove"]) if "build-job" in match_attrs: spack.schema.merge_yaml(dest, attrs["build-job"]) break @@ -653,7 +624,7 @@ def generate_ir(self): def _apply_section(dest, src): if do_remove: - dest = spack.config.remove_yaml(dest, src[remove_job_name]) + dest = cfg.remove_yaml(dest, src[remove_job_name]) if do_merge: dest = copy.copy(spack.schema.merge_yaml(dest, src[merge_job_name])) diff --git a/lib/spack/spack/ci/gitlab.py b/lib/spack/spack/ci/gitlab.py index c3fa05795c9a0c..161608ca8a0448 100644 --- a/lib/spack/spack/ci/gitlab.py +++ b/lib/spack/spack/ci/gitlab.py @@ -4,17 +4,19 @@ import copy import os import shutil +import urllib from typing import List, Optional import spack.vendor.ruamel.yaml import spack -import spack.binary_distribution as bindist -import spack.config as cfg +import spack.binary_distribution +import spack.config import spack.llnl.util.tty as tty import spack.mirrors.mirror import spack.schema import spack.spec +import spack.util.path as path_util import spack.util.spack_yaml as syaml from .common import ( @@ -26,7 +28,6 @@ SpackCIError, ensure_expected_target_path, unpack_script, - update_env_scopes, write_pipeline_manifest, ) from .generator_registry import generator @@ -129,29 +130,52 @@ def generate_gitlab_yaml(pipeline: PipelineDag, spack_ci: SpackCIConfig, options # concrete environment directory, along with the spack.lock file. if not os.path.exists(concrete_env_dir): os.makedirs(concrete_env_dir) - shutil.copyfile(options.env.manifest_path, os.path.join(concrete_env_dir, "spack.yaml")) - shutil.copyfile(options.env.lock_path, os.path.join(concrete_env_dir, "spack.lock")) - update_env_scopes( - options.env, - [ - os.path.relpath(s.path, concrete_env_dir) - for s in cfg.scopes().values() - if not s.writable - and isinstance(s, (cfg.DirectoryConfigScope)) - and os.path.exists(s.path) - ], - os.path.join(concrete_env_dir, "spack.yaml"), - # Here transforming windows paths is only required in the special case - # of copy_only_pipelines, a unique scenario where the generate job and - # child pipelines are run on different platforms. To make this compatible - # w/ Windows, we cannot write Windows style path separators that will be - # consumed on by the Posix copy job runner. - # - # TODO (johnwparent): Refactor config + cli read/write to deal only in - # posix style paths - transform_windows_paths=(options.pipeline_type == PipelineType.COPY_ONLY), - ) + # Copy the manifest and handle relative included paths + with open(options.env.manifest_path, "r", encoding="utf-8") as fin, open( + os.path.join(concrete_env_dir, "spack.yaml"), "w", encoding="utf-8" + ) as fout: + data = syaml.load(fin) + if "spack" not in data: + raise spack.config.ConfigSectionError( + 'Missing top level "spack" section in environment' + ) + + def _rewrite_include(path, orig_root, new_root): + expanded_path = path_util.substitute_path_variables(path) + + # Skip non-local paths + parsed = urllib.parse.urlparse(expanded_path) + file_schemes = ["", "file"] + if parsed.scheme not in file_schemes: + return path + + if os.path.isabs(expanded_path): + return path + abs_path = path_util.canonicalize_path(path, orig_root) + return os.path.relpath(abs_path, start=new_root) + + # If there are no includes, just copy + if "include" in data["spack"]: + includes = data["spack"]["include"] + # If there are includes in the config, then we need to fix the relative paths + # to be relative from the concrete env dir used by downstream pipelines + env_root_path = os.path.dirname(os.path.abspath(options.env.manifest_path)) + fixed_includes = [] + for inc in includes: + if isinstance(inc, dict): + inc["path"] = _rewrite_include(inc["path"], env_root_path, concrete_env_dir) + else: + inc = _rewrite_include(inc, env_root_path, concrete_env_dir) + + fixed_includes.append(inc) + + data["spack"]["include"] = fixed_includes + + os.makedirs(concrete_env_dir, exist_ok=True) + syaml.dump(data, fout) + + shutil.copyfile(options.env.lock_path, os.path.join(concrete_env_dir, "spack.lock")) job_log_dir = os.path.join(pipeline_artifacts_dir, "logs") job_repro_dir = os.path.join(pipeline_artifacts_dir, "reproduction") @@ -238,7 +262,9 @@ def main_script_replacements(cmd): # Let downstream jobs know whether the spec needed rebuilding, regardless # whether DAG pruning was enabled or not. - already_built = bindist.get_mirrors_for_spec(spec=release_spec, index_only=True) + already_built = spack.binary_distribution.get_mirrors_for_spec( + spec=release_spec, index_only=True + ) job_vars["SPACK_SPEC_NEEDS_REBUILD"] = "False" if already_built else "True" if options.cdash_handler: @@ -293,8 +319,8 @@ def main_script_replacements(cmd): ) maybe_generate_manifest(pipeline, options, manifest_path) - relative_specs_url = bindist.buildcache_relative_specs_url() - relative_keys_url = bindist.buildcache_relative_keys_url() + relative_specs_url = spack.binary_distribution.buildcache_relative_specs_url() + relative_keys_url = spack.binary_distribution.buildcache_relative_keys_url() if options.pipeline_type == PipelineType.COPY_ONLY: stage_names.append("copy") @@ -394,6 +420,9 @@ def main_script_replacements(cmd): "SPACK_REBUILD_EVERYTHING": str(rebuild_everything), "SPACK_REQUIRE_SIGNING": str(options.require_signing), } + output_object["variables"].update( + dict([(v, os.environ[v]) for v in options.forward_variables if v in os.environ]) + ) if options.stack_name: output_object["variables"]["SPACK_CI_STACK_NAME"] = options.stack_name diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py index f7d701d785f47a..3f621e902b66b6 100644 --- a/lib/spack/spack/cmd/__init__.py +++ b/lib/spack/spack/cmd/__init__.py @@ -9,11 +9,12 @@ import re import subprocess import sys +import textwrap from collections import Counter from typing import Generator, List, Optional, Sequence, Union import spack.concretize -import spack.config # breaks a cycle. +import spack.config import spack.environment as ev import spack.error import spack.extensions @@ -330,7 +331,11 @@ def ensure_single_spec_or_die(spec, matching_specs): if len(matching_specs) <= 1: return - format_string = "{name}{@version}{ arch=architecture} {%compiler.name}{@compiler.version}" + format_string = ( + "{name}{@version}" + "{ platform=architecture.platform}{ os=architecture.os}{ target=architecture.target}" + "{%compiler.name}{@compiler.version}" + ) args = ["%s matches multiple packages." % spec, "Matching packages:"] args += [ colorize(" @K{%s} " % s.dag_hash(7)) + s.cformat(format_string) for s in matching_specs @@ -658,20 +663,20 @@ def require_active_env(cmd_name): ) -def find_environment(args): +def find_environment(args: argparse.Namespace) -> Optional[ev.Environment]: """Find active environment from args or environment variable. Check for an environment in this order: - 1. via ``spack -e ENV`` or ``spack -D DIR`` (arguments) - 2. via a path in the spack.environment.spack_env_var environment variable. + + 1. via ``spack -e ENV`` or ``spack -D DIR`` (arguments) + 2. via a path in the spack.environment.spack_env_var environment variable. If an environment is found, read it in. If not, return None. Arguments: - args (argparse.Namespace): argparse namespace with command arguments + args: argparse namespace with command arguments - Returns: - (spack.environment.Environment): a found environment, or ``None`` + Returns: a found environment, or ``None`` """ # treat env as a name @@ -701,9 +706,23 @@ def find_environment(args): raise ev.SpackEnvironmentError("no environment in %s" % env) -def first_line(docstring): +def doc_first_line(function: object) -> Optional[str]: """Return the first line of the docstring.""" - return docstring.split("\n")[0] + return function.__doc__.split("\n", 1)[0].strip() if function.__doc__ else None + + +if sys.version_info >= (3, 13): + # indent of __doc__ is automatically removed in 3.13+ + # see https://github.com/python/cpython/commit/2566b74b26bcce24199427acea392aed644f4b17 + def doc_dedented(function: object) -> Optional[str]: + """Return the docstring with leading indentation removed.""" + return function.__doc__ + +else: + + def doc_dedented(function: object) -> Optional[str]: + """Return the docstring with leading indentation removed.""" + return textwrap.dedent(function.__doc__) if function.__doc__ else None def converted_arg_length(arg: str): @@ -740,7 +759,7 @@ def group_arguments( max_group_size: max number of elements in any group (default 500) prefix_length: length of any additional arguments (including spaces) to be passed before the groups from args; default is 0 characters - max_group_length: max length of characters that if a group of args is joined by " " + max_group_length: max length of characters that if a group of args is joined by ``" "`` On unix, ths defaults to SC_ARG_MAX from sysconf. On Windows the default is the max usable for CreateProcess (32,768 chars) diff --git a/lib/spack/spack/cmd/arch.py b/lib/spack/spack/cmd/arch.py index 72e23b3516a8f5..ff3ffd6b4dbfb5 100644 --- a/lib/spack/spack/cmd/arch.py +++ b/lib/spack/spack/cmd/arch.py @@ -14,7 +14,7 @@ import spack.spec description = "print architecture information about this machine" -section = "system" +section = "config" level = "short" diff --git a/lib/spack/spack/cmd/audit.py b/lib/spack/spack/cmd/audit.py index ad8a7fc219b4db..cd7359af2670e0 100644 --- a/lib/spack/spack/cmd/audit.py +++ b/lib/spack/spack/cmd/audit.py @@ -11,7 +11,7 @@ import spack.repo description = "audit configuration files, packages, etc." -section = "system" +section = "packaging" level = "short" diff --git a/lib/spack/spack/cmd/blame.py b/lib/spack/spack/cmd/blame.py index c7ce1a430435b4..6b51fa912f7489 100644 --- a/lib/spack/spack/cmd/blame.py +++ b/lib/spack/spack/cmd/blame.py @@ -21,7 +21,7 @@ from spack.util.executable import ProcessError description = "show contributors to packages" -section = "developer" +section = "query" level = "long" git = spack.util.git.git(required=True) diff --git a/lib/spack/spack/cmd/bootstrap.py b/lib/spack/spack/cmd/bootstrap.py index f875328b3f750a..0de13d0d2fa2c1 100644 --- a/lib/spack/spack/cmd/bootstrap.py +++ b/lib/spack/spack/cmd/bootstrap.py @@ -12,24 +12,24 @@ import spack.bootstrap import spack.bootstrap.config import spack.bootstrap.core +import spack.cmd.mirror import spack.concretize import spack.config import spack.llnl.util.filesystem import spack.llnl.util.tty import spack.llnl.util.tty.color -import spack.mirrors.utils import spack.stage import spack.util.path import spack.util.spack_yaml from spack.cmd.common import arguments description = "manage bootstrap configuration" -section = "system" +section = "admin" level = "long" # Tarball to be downloaded if binary packages are requested in a local mirror -BINARY_TARBALL = "https://github.com/spack/spack-bootstrap-mirrors/releases/download/v0.6/bootstrap-buildcache-v3.tar.gz" +BINARY_TARBALL = "https://github.com/spack/spack-bootstrap-mirrors/releases/download/v2.2/bootstrap-buildcache.tar.gz" #: Subdirectory where to create the mirror LOCAL_MIRROR_DIR = "bootstrap_cache" @@ -51,9 +51,9 @@ }, } -CLINGO_JSON = "$spack/share/spack/bootstrap/github-actions-v0.6/clingo.json" -GNUPG_JSON = "$spack/share/spack/bootstrap/github-actions-v0.6/gnupg.json" -PATCHELF_JSON = "$spack/share/spack/bootstrap/github-actions-v0.6/patchelf.json" +CLINGO_JSON = "$spack/share/spack/bootstrap/github-actions-v2/clingo.json" +GNUPG_JSON = "$spack/share/spack/bootstrap/github-actions-v2/gnupg.json" +PATCHELF_JSON = "$spack/share/spack/bootstrap/github-actions-v2/patchelf.json" # Metadata for a generated source mirror SOURCE_METADATA = { @@ -111,7 +111,12 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: ) list = sp.add_parser("list", help="list all the sources of software to bootstrap Spack") - _add_scope_option(list) + list.add_argument( + "--scope", + action=arguments.ConfigScope, + type=arguments.config_scope_readable_validator, + help="configuration scope to read/modify", + ) add = sp.add_parser("add", help="add a new source for bootstrapping") _add_scope_option(add) @@ -188,6 +193,11 @@ def _reset(args): def _root(args): if args.path: spack.config.set("bootstrap:root", args.path, scope=args.scope) + elif args.scope: + if args.scope not in spack.config.existing_scope_names(): + spack.llnl.util.tty.die( + f"The argument --scope={args.scope} must refer to an existing scope." + ) root = spack.config.get("bootstrap:root", default=None, scope=args.scope) if root: @@ -400,7 +410,9 @@ def _mirror(args): spack.llnl.util.tty.set_msg_enabled(False) spec = spack.concretize.concretize_one(spec_str) for node in spec.traverse(): - spack.mirrors.utils.create(mirror_dir, [node]) + if node.external: + continue + spack.cmd.mirror.create(mirror_dir, [node]) spack.llnl.util.tty.set_msg_enabled(True) if args.binary_packages: diff --git a/lib/spack/spack/cmd/build_env.py b/lib/spack/spack/cmd/build_env.py index e4c052c59e82b1..bfc5bc420c155e 100644 --- a/lib/spack/spack/cmd/build_env.py +++ b/lib/spack/spack/cmd/build_env.py @@ -5,9 +5,7 @@ import spack.cmd.common.env_utility as env_utility from spack.context import Context -description = ( - "run a command in a spec's install environment, or dump its environment to screen or file" -) +description = "dump the install environment for a spec,\nor run a command in that environment" section = "build" level = "long" diff --git a/lib/spack/spack/cmd/buildcache.py b/lib/spack/spack/cmd/buildcache.py index 3767a87b7b779e..f102806b6948f0 100644 --- a/lib/spack/spack/cmd/buildcache.py +++ b/lib/spack/spack/cmd/buildcache.py @@ -2,13 +2,15 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import argparse +import enum import glob import json +import os import sys import tempfile from typing import List, Optional, Tuple -import spack.binary_distribution as bindist +import spack.binary_distribution import spack.cmd import spack.concretize import spack.config @@ -17,6 +19,7 @@ import spack.error import spack.llnl.util.tty as tty import spack.mirrors.mirror +import spack.oci.image import spack.oci.oci import spack.spec import spack.stage @@ -24,6 +27,7 @@ import spack.util.parallel import spack.util.web as web_util from spack import traverse +from spack.binary_distribution import BINARY_INDEX from spack.cmd import display_specs from spack.cmd.common import arguments from spack.llnl.string import plural @@ -31,13 +35,14 @@ from spack.spec import Spec, save_dependency_specfiles from ..buildcache_migrate import migrate -from ..buildcache_prune import prune +from ..buildcache_prune import prune_buildcache from ..enums import InstallRecordStatus from ..url_buildcache import ( BuildcacheComponent, BuildcacheEntryError, URLBuildcacheEntry, check_mirror_for_layout, + get_entries_from_cache, get_url_buildcache_class, ) @@ -46,6 +51,12 @@ level = "long" +class ViewUpdateMode(enum.Enum): + CREATE = enum.auto() + OVERWRITE = enum.auto() + APPEND = enum.auto() + + def setup_parser(subparser: argparse.ArgumentParser): setattr(setup_parser, "parser", subparser) subparsers = subparser.add_subparsers(help="buildcache sub-commands") @@ -191,6 +202,7 @@ def setup_parser(subparser: argparse.ArgumentParser): check.add_argument( "--scope", action=arguments.ConfigScope, + type=arguments.config_scope_readable_validator, default=lambda: spack.config.default_modify_scope(), help="configuration scope containing mirrors to check", ) @@ -215,6 +227,12 @@ def setup_parser(subparser: argparse.ArgumentParser): prune.add_argument( "mirror", type=arguments.mirror_name_or_url, help="mirror name, path, or URL" ) + prune.add_argument( + "-k", + "--keeplist", + default=None, + help="file containing newline-delimited list of package hashes to keep (optional)", + ) prune.add_argument( "--dry-run", action="store_true", @@ -272,6 +290,26 @@ def setup_parser(subparser: argparse.ArgumentParser): sync.set_defaults(func=sync_fn) + # Check the validity of a buildcache + check_index = subparsers.add_parser("check-index", help=check_index_fn.__doc__) + check_index.add_argument( + "--verify", + nargs="+", + choices=["exists", "manifests", "blobs", "all"], + default=["exists"], + help="List of items to verify along along with the index.", + ) + check_index.add_argument( + "--name", "-n", action="store", help="Name of the view index to check" + ) + check_index.add_argument( + "--output", "-o", action="store", help="File to write check details to" + ) + check_index.add_argument( + "mirror", type=arguments.mirror_name_or_url, help="mirror name, path, or URL" + ) + check_index.set_defaults(func=check_index_fn) + # Update buildcache index without copying any additional packages update_index = subparsers.add_parser( "update-index", aliases=["rebuild-index"], help=update_index_fn.__doc__ @@ -279,6 +317,30 @@ def setup_parser(subparser: argparse.ArgumentParser): update_index.add_argument( "mirror", type=arguments.mirror_name_or_url, help="destination mirror name, path, or URL" ) + update_index_view_args = update_index.add_argument_group("view arguments") + update_index_view_args.add_argument( + "sources", nargs="*", help="List of environments names or paths" + ) + update_index_view_args.add_argument( + "--name", "-n", action="store", help="Name of the view index to update" + ) + update_index_view_mode_args = update_index_view_args.add_mutually_exclusive_group( + required=False + ) + update_index_view_mode_args.add_argument( + "--append", + "-a", + action="store_true", + help="Append the listed specs to the current view index if it already exists. " + "This operation does not guarentee atomic write and should be run with care.", + ) + update_index_view_mode_args.add_argument( + "--force", + "-f", + action="store_true", + help="If a view index already exists, overwrite it and " + "suppress warnings (this is the default for non-view indices)", + ) update_index.add_argument( "-k", "--keys", @@ -286,6 +348,7 @@ def setup_parser(subparser: argparse.ArgumentParser): action="store_true", help="if provided, key index will be updated as well as package index", ) + arguments.add_common_arguments(update_index, ["yes_to_all"]) update_index.set_defaults(func=update_index_fn) # Migrate a buildcache from layout_version 2 to version 3 @@ -399,7 +462,7 @@ def push_fn(args): unsigned = not (args.key or args.signed) # For OCI images, we require dependencies to be pushed for now. - if mirror.push_url.startswith("oci://") and not unsigned: + if spack.oci.image.is_oci_url(mirror.push_url) and not unsigned: tty.warn( "Code signing is currently not supported for OCI images. " "Use --unsigned to silence this warning." @@ -407,7 +470,9 @@ def push_fn(args): unsigned = True # Select a signing key, or None if unsigned. - signing_key = None if unsigned else (args.key or bindist.select_signing_key()) + signing_key = ( + None if unsigned else (args.key or spack.binary_distribution.select_signing_key()) + ) specs = _specs_to_be_packaged( roots, @@ -435,10 +500,10 @@ def push_fn(args): ) # Warn about possible old binary mirror layout - if not mirror.push_url.startswith("oci://"): + if not spack.oci.image.is_oci_url(mirror.push_url): check_mirror_for_layout(mirror) - with bindist.make_uploader( + with spack.binary_distribution.make_uploader( mirror=mirror, force=args.force, update_index=args.update_index, @@ -447,39 +512,43 @@ def push_fn(args): ) as uploader: skipped, upload_errors = uploader.push(specs=specs) failed.extend(upload_errors) - if not upload_errors and args.tag: - uploader.tag(args.tag, roots) - if skipped: - if len(specs) == 1: - tty.info("The spec is already in the buildcache. Use --force to overwrite it.") - elif len(skipped) == len(specs): - tty.info("All specs are already in the buildcache. Use --force to overwrite them.") - else: - tty.info( - "The following {} specs were skipped as they already exist in the buildcache:\n" - " {}\n" - " Use --force to overwrite them.".format( - len(skipped), ", ".join(elide_list([_format_spec(s) for s in skipped], 5)) + if skipped: + if len(specs) == 1: + tty.info("The spec is already in the buildcache. Use --force to overwrite it.") + elif len(skipped) == len(specs): + tty.info("All specs are already in the buildcache. Use --force to overwrite them.") + else: + tty.info( + "The following {} specs were skipped as they already exist in the " + "buildcache:\n" + " {}\n" + " Use --force to overwrite them.".format( + len(skipped), ", ".join(elide_list([_format_spec(s) for s in skipped], 5)) + ) ) - ) - if failed: - if len(failed) == 1: - raise failed[0][1] + if failed: + if len(failed) == 1: + raise failed[0][1] + + raise spack.error.SpackError( + f"The following {len(failed)} errors occurred while pushing specs to the " + "buildcache", + "\n".join( + elide_list( + [ + f" {_format_spec(spec)}: {e.__class__.__name__}: {e}" + for spec, e in failed + ], + 5, + ) + ), + ) - raise spack.error.SpackError( - f"The following {len(failed)} errors occurred while pushing specs to the buildcache", - "\n".join( - elide_list( - [ - f" {_format_spec(spec)}: {e.__class__.__name__}: {e}" - for spec, e in failed - ], - 5, - ) - ), - ) + # Finally tag all roots as a single image if requested. + if args.tag: + uploader.tag(args.tag, roots) def install_fn(args): @@ -487,17 +556,19 @@ def install_fn(args): if not args.specs: tty.die("a spec argument is required to install from a buildcache") - query = bindist.BinaryCacheQuery(all_architectures=args.otherarch) + query = spack.binary_distribution.BinaryCacheQuery(all_architectures=args.otherarch) matches = spack.store.find(args.specs, multiple=args.multiple, query_fn=query) for match in matches: - bindist.install_single_spec(match, unsigned=args.unsigned, force=args.force) + spack.binary_distribution.install_single_spec( + match, unsigned=args.unsigned, force=args.force + ) def list_fn(args): """list binary packages available from mirrors""" try: - specs = bindist.update_cache_and_get_specs() - except bindist.FetchCacheError as e: + specs = spack.binary_distribution.update_cache_and_get_specs() + except spack.binary_distribution.FetchCacheError as e: tty.die(e) if not args.allarch: @@ -520,7 +591,7 @@ def list_fn(args): def keys_fn(args): """get public keys available on mirrors""" - bindist.get_keys(args.install, args.trust, args.force) + spack.binary_distribution.get_keys(args.install, args.trust, args.force) def check_fn(args: argparse.Namespace): @@ -552,7 +623,12 @@ def check_fn(args: argparse.Namespace): tty.msg("No mirrors provided, exiting.") return - if bindist.check_specs_against_mirrors(configured_mirrors, specs, args.output_file) == 1: + if ( + spack.binary_distribution.check_specs_against_mirrors( + configured_mirrors, specs, args.output_file + ) + == 1 + ): sys.exit(1) @@ -568,7 +644,7 @@ def download_fn(args): if len(specs) != 1: tty.die("a single spec argument is required to download from a buildcache") - bindist.download_single_spec(specs[0], args.path) + spack.binary_distribution.download_single_spec(specs[0], args.path) def save_specfile_fn(args): @@ -598,7 +674,7 @@ def copy_buildcache_entry(cache_entry: URLBuildcacheEntry, destination_url: str) try: spec_dict = cache_entry.fetch_metadata() cache_entry.fetch_archive() - except bindist.BuildcacheEntryError as e: + except spack.binary_distribution.BuildcacheEntryError as e: tty.warn(f"Failed to retrieve buildcache for copying due to {e}") cache_entry.destroy() return @@ -709,7 +785,7 @@ def sync_fn(args): for s in specs_to_sync: tty.debug(" {0}{1}: {2}".format("* " if s in env.roots() else " ", s.name, s.dag_hash())) cache_class = get_url_buildcache_class( - layout_version=bindist.CURRENT_BUILD_CACHE_LAYOUT_VERSION + layout_version=spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION ) src_cache_entry = cache_class(src_mirror_url, s, allow_unsigned=True) src_cache_entry.read_manifest() @@ -734,7 +810,7 @@ def manifest_copy( for spec_hash, copy_obj in deduped_manifest.items(): cache_class = get_url_buildcache_class( - layout_version=bindist.CURRENT_BUILD_CACHE_LAYOUT_VERSION + layout_version=spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION ) src_cache_entry = cache_class( cache_class.get_base_url(copy_obj["src"]), allow_unsigned=True @@ -759,28 +835,275 @@ def update_index(mirror: spack.mirrors.mirror.Mirror, update_keys=False): with tempfile.TemporaryDirectory( dir=spack.stage.get_stage_root() ) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor: - bindist._oci_update_index(image_ref, tmpdir, executor) + spack.binary_distribution._oci_update_index(image_ref, tmpdir, executor) return # Otherwise, assume a normal mirror. url = mirror.push_url with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir: - bindist._url_generate_package_index(url, tmpdir) + spack.binary_distribution._url_generate_package_index(url, tmpdir) + + if update_keys: + mirror_update_keys(mirror) + + +def mirror_update_keys(mirror: spack.mirrors.mirror.Mirror): + url = mirror.push_url + try: + with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir: + spack.binary_distribution.generate_key_index(url, tmpdir) + except spack.binary_distribution.CannotListKeys as e: + # Do not error out if listing keys went wrong. This usually means that the _gpg path + # does not exist. TODO: distinguish between this and other errors. + tty.warn(f"did not update the key index: {e}") + + +def update_view( + mirror: spack.mirrors.mirror.Mirror, + update_mode: ViewUpdateMode, + *sources: str, + name: Optional[str] = None, + update_keys: bool = False, + yes_to_all: bool = False, +): + """update a buildcache view index""" + # OCI images do not support views. + try: + spack.oci.oci.image_from_mirror(mirror) + raise spack.error.SpackError("OCI build caches do not support index views") + except ValueError: + pass + + if update_mode == ViewUpdateMode.APPEND and not yes_to_all: + tty.warn( + "Appending to a view index does not guarantee idempotent write when contending " + "with multiple writers. This feature is meant to be used by a single process." + ) + tty.get_yes_or_no("Do you want to proceed?", default=False) + + # Otherwise, assume a normal mirror. + url = mirror.push_url + + if (name and mirror.push_view) and not name == mirror.push_view: + tty.warn( + ( + f"Updating index view with name ({name}), which is different than " + f"the configured name ({mirror.push_view}) for the mirror {mirror.name}" + ) + ) + + name = name or mirror.push_view + if not name: + tty.die( + "Attempting to update a view but could not determine the view name.\n" + " Either pass --name or configure the view name in mirrors.yaml" + ) + + mirror_metadata = spack.binary_distribution.MirrorMetadata( + url, spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION, name + ) + + # Check if the index already exists, if it does make sure there is a copy in the + # local cache. + index_exists = True + try: + BINARY_INDEX._fetch_and_cache_index(mirror_metadata) + except spack.binary_distribution.BuildcacheIndexNotExists: + index_exists = False + + if index_exists and update_mode == ViewUpdateMode.CREATE: + raise spack.error.SpackError( + "Index already exists. To overwrite or update pass --force or --append respectively" + ) + + hashes = [] + if sources: + for source in sources: + tty.debug(f"reading specs from source: {source}") + env = ev.environment_from_name_or_dir(source) + hashes.extend(env.all_hashes()) + else: + # Get hashes in the current active environment + hashes = spack.cmd.require_active_env(cmd_name="buildcache update-view").all_hashes() + + if not hashes: + tty.warn("No specs found for view, creating an empty index") + + filter_fn = lambda x: x in hashes + + with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir: + # Initialize a database + db = spack.binary_distribution.BuildCacheDatabase(tmpdir) + db._write() + + if update_mode == ViewUpdateMode.APPEND: + # Load the current state of the view index from the cache into the database + cache_index = BINARY_INDEX._local_index_cache.get(str(mirror_metadata)) + if cache_index: + cache_key = cache_index["index_path"] + db._read_from_file(BINARY_INDEX._index_file_cache.cache_path(cache_key)) + + spack.binary_distribution._url_generate_package_index(url, tmpdir, db, name, filter_fn) if update_keys: - try: - with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir: - bindist.generate_key_index(url, tmpdir) - except bindist.CannotListKeys as e: - # Do not error out if listing keys went wrong. This usually means that the _gpg path - # does not exist. TODO: distinguish between this and other errors. - tty.warn(f"did not update the key index: {e}") + mirror_update_keys(mirror) + + +def check_index_fn(args): + """Check if a build cache index, manifests, and blobs are consistent""" + mirror = args.mirror + verify = set(args.verify) + + checking_view_index = (args.name or mirror.fetch_view) is not None + + if "all" in verify: + verify.update(["exists", "manifests", "blobs"]) + + try: + spack.oci.oci.image_from_mirror(mirror) + raise spack.error.SpackError("OCI build caches do not support index views") + except ValueError: + pass + + # Check if the index exists, and cache it locally for next operations + mirror_metadata = spack.binary_distribution.MirrorMetadata( + mirror.fetch_url, + spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION, + args.name or mirror.fetch_view, + ) + index_exists = True + missing_index_blob = False + try: + BINARY_INDEX._fetch_and_cache_index(mirror_metadata) + except spack.binary_distribution.BuildcacheIndexNotExists: + index_exists = False + except spack.binary_distribution.FetchIndexError: + # Here the index manifest exists, but the index blob did not + # We can still run some of the other validations here, so let's try + index_exists = False + missing_index_blob = True + + missing_specs = [] + unindexed_specs = [] + missing_blobs = {} + cache_hash_list = [] + index_hash_list = [] + # List the manifests and verify + with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir: + # Get listing of spec manifests in mirror + manifest_files = [] + if "manifests" in verify or "blobs" in verify: + manifest_files, read_fn = get_entries_from_cache( + mirror.fetch_url, tmpdir, BuildcacheComponent.SPEC + ) + if "manifests" in verify and index_exists: + # Read the index file + db = spack.binary_distribution.BuildCacheDatabase(tmpdir) + cache_entry = BINARY_INDEX._local_index_cache[str(mirror_metadata)] + cache_key = cache_entry["index_path"] + cache_path = BINARY_INDEX._index_file_cache.cache_path(cache_key) + with BINARY_INDEX._index_file_cache.read_transaction(cache_key): + db._read_from_file(cache_path) + + index_hash_list = set( + [ + s.dag_hash() + for s in db.query_local(installed=InstallRecordStatus.ANY) + if db._data[s.dag_hash()].in_buildcache + ] + ) + + for spec_manifest in manifest_files: + + # Spec manifests have a naming format + # --.spec.manifest.json + spec_hash = spec_manifest.rsplit("-", 1)[1].split(".", 1)[0] + if checking_view_index and spec_hash not in index_hash_list: + continue + + cache_hash_list.append(spec_hash) + if spec_hash not in index_hash_list: + unindexed_specs.append(spec_hash) + + if "blobs" in verify: + entry = read_fn(spec_manifest) + entry.read_manifest() + for record in entry.manifest.data: + if not entry.check_blob_exists(record): + blobs = missing_blobs.get(spec_hash, []) + blobs.append(record) + missing_blobs[spec_hash] = blobs + + for h in index_hash_list: + if h not in cache_hash_list: + missing_specs.append(h) + + # Print summary + summary_msg = "Build cache check:\n\t" + if "exists" in verify: + if index_exists: + summary_msg = f"Index exists in mirror: {mirror.name}" + else: + summary_msg = f"Index does not exist in mirror: {mirror.name}" + if mirror.fetch_view: + summary_msg += f"@{mirror.fetch_view}" + summary_msg += "\n" + if missing_index_blob: + tty.warn("The index blob is missing") + + if "manifests" in verify: + if checking_view_index: + count = "n/a" + else: + count = len(unindexed_specs) + summary_msg += f"\tUnindexed specs: {count}\n" + + if "manifests" in verify: + summary_msg += f"\tMissing specs: {len(missing_specs)}\n" + + if "blobs" in verify: + summary_msg += f"\tMissing blobs: {len(missing_blobs)}\n" + + if args.output: + os.makedirs(os.path.dirname(args.output), exist_ok=True) + with open(args.output, "w", encoding="utf-8") as fd: + json.dump( + { + "exists": index_exists, + "manifests": {"missing": missing_specs, "unindexed": unindexed_specs}, + "blobs": {"missing": missing_blobs}, + }, + fd, + ) + + tty.info(summary_msg) def update_index_fn(args): - """update a buildcache index""" - return update_index(args.mirror, update_keys=args.keys) + """update a buildcache index or index view if extra arguments are provided.""" + + update_view_index = ( + args.append or args.force or args.name or args.sources or args.mirror.push_view + ) + + if update_view_index: + update_mode = ViewUpdateMode.CREATE + if args.force: + update_mode = ViewUpdateMode.OVERWRITE + elif args.append: + update_mode = ViewUpdateMode.APPEND + + return update_view( + args.mirror, + update_mode, + *args.sources, + name=args.name, + update_keys=args.keys, + yes_to_all=args.yes_to_all, + ) + else: + return update_index(args.mirror, update_keys=args.keys) def migrate_fn(args): @@ -797,15 +1120,15 @@ def migrate_fn(args): will attempt to verify the signatures on specs, and then re-sign them before migration, using whatever keys are already installed in your key ring. You can migrate a mirror of unsigned binaries (or convert a mirror of signed binaries - to unsigned) by providing the --unsigned argument. + to unsigned) by providing the ``--unsigned`` argument. By default spack will leave the original mirror contents (in the old layout) in place after migration. You can have spack remove the old contents by providing - the --delete-existing argument. Because migrating a mostly-already-migrated + the ``--delete-existing`` argument. Because migrating a mostly-already-migrated mirror should be fast, consider a workflow where you perform a default migration, (i.e. preserve the existing layout rather than deleting it) then evaluate the state of the migrated mirror by attempting to install from it, and finally - running the migration again with --delete-existing.""" + running the migration again with ``--delete-existing``.""" target_mirror = args.mirror unsigned = args.unsigned assert isinstance(target_mirror, spack.mirrors.mirror.Mirror) @@ -832,12 +1155,17 @@ def migrate_fn(args): def prune_fn(args): - """prune stale buildcache entries from the mirror""" + """prune buildcache entries from the mirror + + If a keeplist file is provided, performs direct pruning (deletes packages not in keeplist) + followed by orphan pruning. If no keeplist is provided, only performs orphan pruning. + """ mirror: spack.mirrors.mirror.Mirror = args.mirror + keeplist: Optional[str] = args.keeplist dry_run: bool = args.dry_run assert isinstance(mirror, spack.mirrors.mirror.Mirror) - prune(mirror, dry_run) + prune_buildcache(mirror=mirror, keeplist=keeplist, dry_run=dry_run) def buildcache(parser, args): diff --git a/lib/spack/spack/cmd/cd.py b/lib/spack/spack/cmd/cd.py index 9c4a5e7c31eb95..5ea3f4857f5985 100644 --- a/lib/spack/spack/cmd/cd.py +++ b/lib/spack/spack/cmd/cd.py @@ -8,7 +8,7 @@ import spack.cmd.location description = "cd to spack directories in the shell" -section = "developer" +section = "user environment" level = "long" diff --git a/lib/spack/spack/cmd/change.py b/lib/spack/spack/cmd/change.py index d7fc73cbb23ff2..576eeb6aaa718f 100644 --- a/lib/spack/spack/cmd/change.py +++ b/lib/spack/spack/cmd/change.py @@ -3,8 +3,10 @@ # SPDX-License-Identifier: (Apache-2.0 OR MIT) import argparse +import warnings import spack.cmd +import spack.environment import spack.spec from spack.cmd.common import arguments @@ -19,33 +21,66 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: "--list-name", dest="list_name", default="specs", - help="name of the list to remove specs from", + help="name of the list to remove abstract specs from", ) subparser.add_argument( - "--match-spec", dest="match_spec", help="if name is ambiguous, supply a spec to match" + "--match-spec", + dest="match_spec", + help="change all specs matching match-spec (default is match by spec name)", ) subparser.add_argument( "-a", "--all", action="store_true", - help="change all matching specs (allow changing more than one spec)", + help="change all matching abstract specs (allow changing more than one abstract spec)", + ) + subparser.add_argument( + "-c", + "--concrete", + action="store_true", + default=False, + help="change concrete specs in the environment", + ) + subparser.add_argument( + "-C", + "--concrete-only", + action="store_true", + default=False, + help="change only concrete specs in the environment", ) arguments.add_common_arguments(subparser, ["specs"]) def change(parser, args): + if args.all and args.concrete_only: + warnings.warn("'spack change --all' argument is ignored with '--concrete-only'") + if args.list_name != "specs" and args.concrete_only: + warnings.warn("'spack change --list-name' argument is ignored with '--concrete-only'") + env = spack.cmd.require_active_env(cmd_name="change") + match_spec = None + if args.match_spec: + match_spec = spack.spec.Spec(args.match_spec) + specs = spack.cmd.parse_specs(args.specs) + with env.write_transaction(): - if args.match_spec: - match_spec = spack.spec.Spec(args.match_spec) - else: - match_spec = None - for spec in spack.cmd.parse_specs(args.specs): - env.change_existing_spec( - spec, - list_name=args.list_name, - match_spec=match_spec, - allow_changing_multiple_specs=args.all, - ) + if not args.concrete_only: + try: + for spec in specs: + env.change_existing_spec( + spec, + list_name=args.list_name, + match_spec=match_spec, + allow_changing_multiple_specs=args.all, + ) + except (ValueError, spack.environment.SpackEnvironmentError) as e: + msg = "Cannot change abstract specs." + msg += " Try again with '--concrete-only' to change concrete specs only." + raise ValueError(msg) from e + + if args.concrete or args.concrete_only: + for spec in specs: + env.mutate(selector=match_spec or spack.spec.Spec(spec.name), mutator=spec) + env.write() diff --git a/lib/spack/spack/cmd/checksum.py b/lib/spack/spack/cmd/checksum.py index bba133781c6d62..283f7a8db40a40 100644 --- a/lib/spack/spack/cmd/checksum.py +++ b/lib/spack/spack/cmd/checksum.py @@ -7,7 +7,6 @@ import sys from typing import Dict, Optional, Tuple -import spack.cmd import spack.llnl.string import spack.llnl.util.lang import spack.repo @@ -70,7 +69,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: modes_parser.add_argument( "--verify", action="store_true", default=False, help="verify known package checksums" ) - subparser.add_argument("package", help="name or spec (e.g. `cmake` or `cmake@3.18`)") + subparser.add_argument("package", help="name or spec (e.g. ``cmake`` or ``cmake@3.18``)") subparser.add_argument( "versions", nargs="*", @@ -175,7 +174,7 @@ def checksum(parser, args): sys.exit(0) # convert dict into package.py version statements - version_lines = get_version_lines(version_hashes, url_dict) + version_lines = get_version_lines(version_hashes) print() print(version_lines) print() diff --git a/lib/spack/spack/cmd/ci.py b/lib/spack/spack/cmd/ci.py index 5f557a22a28adf..13ae4509793e36 100644 --- a/lib/spack/spack/cmd/ci.py +++ b/lib/spack/spack/cmd/ci.py @@ -7,10 +7,10 @@ import os import shutil import sys -from typing import Dict +from typing import Dict, List from urllib.parse import urlparse, urlunparse -import spack.binary_distribution as bindist +import spack.binary_distribution import spack.ci as spack_ci import spack.cmd import spack.cmd.buildcache as buildcache @@ -27,13 +27,15 @@ import spack.repo import spack.spec import spack.stage -import spack.util.executable +import spack.util.git import spack.util.gpg as gpg_util import spack.util.timer as timer import spack.util.url as url_util import spack.util.web as web_util -import spack.version from spack.llnl.util import tty +from spack.version import StandardVersion + +from . import doc_dedented, doc_first_line description = "manage continuous integration pipelines" section = "build" @@ -60,9 +62,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: # Dynamic generation of the jobs yaml from a spack environment generate = subparsers.add_parser( - "generate", - description=deindent(ci_generate.__doc__), - help=spack.cmd.first_line(ci_generate.__doc__), + "generate", description=doc_dedented(ci_generate), help=doc_first_line(ci_generate) ) generate.add_argument( "--output-file", @@ -111,8 +111,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: action="store_true", dest="prune_externals", default=True, - help="skip external specs\n\n" - "do not generate jobs for specs that are marked as external", + help="skip external specs\n\ndo not generate jobs for specs that are marked as external", ) prune_ext_group.add_argument( "--no-prune-externals", @@ -144,6 +143,12 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: "should specifiy a name that can safely be used for artifacts within your project " "directory.", ) + generate.add_argument( + "--forward-variable", + action="append", + help="Environment variables to forward from the generate environment " + "to the generated jobs.", + ) generate.set_defaults(func=ci_generate) spack.cmd.common.arguments.add_concretizer_args(generate) @@ -152,17 +157,13 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: # Rebuild the buildcache index associated with the mirror in the # active, gitlab-enabled environment. index = subparsers.add_parser( - "rebuild-index", - description=deindent(ci_reindex.__doc__), - help=spack.cmd.first_line(ci_reindex.__doc__), + "rebuild-index", description=doc_dedented(ci_reindex), help=doc_first_line(ci_reindex) ) index.set_defaults(func=ci_reindex) # Handle steps of a ci build/rebuild rebuild = subparsers.add_parser( - "rebuild", - description=deindent(ci_rebuild.__doc__), - help=spack.cmd.first_line(ci_rebuild.__doc__), + "rebuild", description=doc_dedented(ci_rebuild), help=doc_first_line(ci_rebuild) ) rebuild.add_argument( "-t", @@ -171,11 +172,19 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: default=False, help="run stand-alone tests after the build", ) - rebuild.add_argument( + rebuild_ff_group = rebuild.add_mutually_exclusive_group() + rebuild_ff_group.add_argument( + "--no-fail-fast", + action="store_false", + default=True, + dest="fail_fast", + help="continue build/stand-alone tests after the first failure", + ) + rebuild_ff_group.add_argument( "--fail-fast", action="store_true", - default=False, - help="stop stand-alone tests after the first failure", + dest="fail_fast", + help="stop build/stand-alone tests after the first failure", ) rebuild.add_argument( "--timeout", @@ -189,8 +198,8 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: # Facilitate reproduction of a failed CI build job reproduce = subparsers.add_parser( "reproduce-build", - description=deindent(ci_reproduce.__doc__), - help=spack.cmd.first_line(ci_reproduce.__doc__), + description=doc_dedented(ci_reproduce), + help=doc_first_line(ci_reproduce), ) reproduce.add_argument( "job_url", help="URL of GitLab job web page or artifact", type=_gitlab_artifacts_url @@ -227,8 +236,8 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: # Verify checksums inside of ci workflows verify_versions = subparsers.add_parser( "verify-versions", - description=deindent(ci_verify_versions.__doc__), - help=spack.cmd.first_line(ci_verify_versions.__doc__), + description=doc_dedented(ci_verify_versions), + help=doc_first_line(ci_verify_versions), ) verify_versions.add_argument("from_ref", help="git ref from which start looking at changes") verify_versions.add_argument("to_ref", help="git ref to end looking at changes") @@ -236,7 +245,8 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: def ci_generate(args): - """generate jobs file from a CI-aware spack file + """\ + generate jobs file from a CI-aware spack file if you want to report the results on CDash, you will need to set the SPACK_CDASH_AUTH_TOKEN before invoking this command. the value must be the CDash authorization token needed to create @@ -247,7 +257,8 @@ def ci_generate(args): def ci_reindex(args): - """rebuild the buildcache index for the remote mirror + """\ + rebuild the buildcache index for the remote mirror use the active, gitlab-enabled environment to rebuild the buildcache index for the associated mirror @@ -267,7 +278,8 @@ def ci_reindex(args): def ci_rebuild(args): - """rebuild a spec if it is not on the remote mirror + """\ + rebuild a spec if it is not on the remote mirror check a single spec against the remote mirror, and rebuild it from source if the mirror does not contain the hash @@ -429,7 +441,11 @@ def ci_rebuild(args): fd.write(spack_info.encode("utf8")) fd.write(b"\n") - matches = None if full_rebuild else bindist.get_mirrors_for_spec(job_spec, index_only=False) + matches = ( + None + if full_rebuild + else spack.binary_distribution.get_mirrors_for_spec(job_spec, index_only=False) + ) if matches: # Got a hash match on at least one configured mirror. All @@ -440,7 +456,7 @@ def ci_rebuild(args): # jobs in subsequent stages. tty.msg("No need to rebuild {0}, found hash match at: ".format(job_spec_pkg_name)) for match in matches: - tty.msg(" {0}".format(match.url_and_version.url)) + tty.msg(" {0}".format(match.url)) # Now we are done and successful return 0 @@ -455,7 +471,7 @@ def ci_rebuild(args): spack_cmd.append("-k") install_args = [ - f'--use-buildcache={spack_ci.common.win_quote("package:never,dependencies:only")}' + f"--use-buildcache={spack_ci.common.win_quote('package:never,dependencies:only')}" ] can_verify = spack_ci.can_verify_binaries() @@ -466,6 +482,10 @@ def ci_rebuild(args): if args.jobs: install_args.append(f"-j{args.jobs}") + fail_fast = bool(os.environ.get("SPACK_CI_FAIL_FAST", str(args.fail_fast))) + if fail_fast: + install_args.append("--fail-fast") + slash_hash = spack_ci.common.win_quote("/" + job_spec.dag_hash()) # Arguments when installing the root from sources @@ -544,7 +564,7 @@ def ci_rebuild(args): spack_ci.run_standalone_tests( cdash=cdash_handler, job_spec=job_spec, - fail_fast=args.fail_fast, + fail_fast=fail_fast, log_file=log_file, repro_dir=repro_dir, timeout=args.timeout, @@ -583,8 +603,8 @@ def ci_rebuild(args): if not result.success: install_exit_code = FAILED_CREATE_BUILDCACHE_CODE (tty.msg if result.success else tty.error)( - f'{"Pushed" if result.success else "Failed to push"} ' - f'{job_spec.format("{name}{@version}{/hash:7}", color=clr.get_color_when())} ' + f"{'Pushed' if result.success else 'Failed to push'} " + f"{job_spec.format('{name}{@version}{/hash:7}', color=clr.get_color_when())} " f"to {result.url}" ) @@ -645,7 +665,8 @@ def ci_rebuild(args): def ci_reproduce(args): - """generate instructions for reproducing the spec rebuild job + """\ + generate instructions for reproducing the spec rebuild job artifacts of the provided gitlab pipeline rebuild job's URL will be used to derive instructions for reproducing the build locally @@ -707,18 +728,21 @@ def _gitlab_artifacts_url(url: str) -> str: def validate_standard_versions( - pkg: spack.package_base.PackageBase, versions: spack.version.VersionList + pkg: spack.package_base.PackageBase, versions: List[StandardVersion] ) -> bool: """Get and test the checksum of a package version based on a tarball. Args: - pkg spack.package_base.PackageBase: Spack package for which to validate a version checksum - versions spack.version.VersionList: list of package versions to validate - Returns: bool: result of the validation. True is valid and false is failed. + pkg: Spack package for which to validate a version checksum + versions: list of package versions to validate + Returns: True if all versions are valid, False if any version is invalid. """ - url_dict: Dict[spack.version.StandardVersion, str] = {} + url_dict: Dict[StandardVersion, str] = {} for version in versions: url = pkg.find_valid_url_for_version(version) + assert ( + url is not None + ), f"Package {pkg.name} does not have a valid URL for version {version}" url_dict[version] = url version_hashes = spack.stage.get_checksums_for_versions( @@ -742,17 +766,18 @@ def validate_standard_versions( def validate_git_versions( - pkg: spack.package_base.PackageBase, versions: spack.version.VersionList + pkg: spack.package_base.PackageBase, versions: List[StandardVersion] ) -> bool: """Get and test the commit and tag of a package version based on a git repository. Args: - pkg spack.package_base.PackageBase: Spack package for which to validate a version - versions spack.version.VersionList: list of package versions to validate - Returns: bool: result of the validation. True is valid and false is failed. + pkg: Spack package for which to validate a version + versions: list of package versions to validate + Returns: True if all versions are valid, False if any version is invalid. """ valid_commit = True for version in versions: fetcher = spack.fetch_strategy.for_package_version(pkg, version) + assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy) with spack.stage.Stage(fetcher) as stage: known_commit = pkg.versions[version]["commit"] try: @@ -770,12 +795,9 @@ def validate_git_versions( # commit that is located in the package.py file. if "tag" in pkg.versions[version]: tag = pkg.versions[version]["tag"] - try: - with fs.working_dir(stage.source_path): - found_commit = fetcher.git( - "rev-list", "-n", "1", tag, output=str, error=str - ).strip() - except spack.util.executable.ProcessError: + url = pkg.version_or_package_attr("git", version) + found_commit = spack.util.git.get_commit_sha(url, tag) + if not found_commit: tty.error( f"Invalid tag for {pkg.name}@{version}\n" f" {tag} could not be found in the git repository." @@ -801,7 +823,8 @@ def validate_git_versions( def ci_verify_versions(args): - """validate version checksum & commits between git refs + """\ + validate version checksum & commits between git refs This command takes a from_ref and to_ref arguments and then parses the git diff between the two to determine which packages have been modified verifies the new checksums inside of them. @@ -812,7 +835,7 @@ def ci_verify_versions(args): "AC", spack.repo.builtin_repo(), args.from_ref, args.to_ref ) - failed_version = False + success = True for pkg_name in pkgs: spec = spack.spec.Spec(pkg_name) pkg = spack.repo.PATH.get_pkg_class(spec.name)(spec) @@ -824,37 +847,39 @@ def ci_verify_versions(args): continue # Store versions checksums / commits for future loop - checksums_version_dict = {} - commits_version_dict = {} + url_version_to_checksum: Dict[StandardVersion, str] = {} + git_version_to_checksum: Dict[StandardVersion, str] = {} for version in pkg.versions: # If the package version defines a sha256 we'll use that as the high entropy # string to detect which versions have been added between from_ref and to_ref if "sha256" in pkg.versions[version]: - checksums_version_dict[pkg.versions[version]["sha256"]] = version + url_version_to_checksum[version] = pkg.versions[version]["sha256"] # If a package version instead defines a commit we'll use that as a # high entropy string to detect new versions. elif "commit" in pkg.versions[version]: - commits_version_dict[pkg.versions[version]["commit"]] = version + git_version_to_checksum[version] = pkg.versions[version]["commit"] # TODO: enforce every version have a commit or a sha256 defined if not - # an infinite version (there are a lot of package's where this doesn't work yet.) + # an infinite version (there are a lot of packages where this doesn't work yet.) - with fs.working_dir(os.path.dirname(path)): - added_checksums = spack_ci.get_added_versions( - checksums_version_dict, path, from_ref=args.from_ref, to_ref=args.to_ref - ) - added_commits = spack_ci.get_added_versions( - commits_version_dict, path, from_ref=args.from_ref, to_ref=args.to_ref + def filter_added_versions(versions: Dict[StandardVersion, str]) -> List[StandardVersion]: + added_checksums = spack_ci.filter_added_checksums( + versions.values(), path, from_ref=args.from_ref, to_ref=args.to_ref ) + return [v for v, c in versions.items() if c in added_checksums] + + with fs.working_dir(os.path.dirname(path)): + new_url_versions = filter_added_versions(url_version_to_checksum) + new_git_versions = filter_added_versions(git_version_to_checksum) - if added_checksums: - failed_version = not validate_standard_versions(pkg, added_checksums) or failed_version + if new_url_versions: + success &= validate_standard_versions(pkg, new_url_versions) - if added_commits: - failed_version = not validate_git_versions(pkg, added_commits) or failed_version + if new_git_versions: + success &= validate_git_versions(pkg, new_git_versions) - if failed_version: + if not success: sys.exit(1) diff --git a/lib/spack/spack/cmd/clean.py b/lib/spack/spack/cmd/clean.py index 3adba595a2f86b..d6ec5795f2a864 100644 --- a/lib/spack/spack/cmd/clean.py +++ b/lib/spack/spack/cmd/clean.py @@ -23,10 +23,10 @@ class AllClean(argparse.Action): - """Activates flags -s -d -f -m and -p simultaneously""" + """Activates flags -s -d -f -m -p and -b simultaneously""" def __call__(self, parser, namespace, values, option_string=None): - parser.parse_args(["-sdfmp"], namespace=namespace) + parser.parse_args(["-sdfmpb"], namespace=namespace) def setup_parser(subparser: argparse.ArgumentParser) -> None: @@ -61,11 +61,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: help="remove software and configuration needed to bootstrap Spack", ) subparser.add_argument( - "-a", - "--all", - action=AllClean, - help="equivalent to -sdfmp (does not include --bootstrap)", - nargs=0, + "-a", "--all", action=AllClean, help="equivalent to ``-sdfmpb``", nargs=0 ) arguments.add_common_arguments(subparser, ["specs"]) diff --git a/lib/spack/spack/cmd/commands.py b/lib/spack/spack/cmd/commands.py index ee5699c1197b2e..e88fb359749f31 100644 --- a/lib/spack/spack/cmd/commands.py +++ b/lib/spack/spack/cmd/commands.py @@ -22,7 +22,7 @@ from spack.main import section_descriptions description = "list available spack commands" -section = "developer" +section = "config" level = "long" @@ -31,7 +31,7 @@ #: standard arguments for updating completion scripts -#: we iterate through these when called with --update-completion +#: we iterate through these when called with ``--update-completion`` update_completion_args: Dict[str, Dict[str, Any]] = { "bash": { "aliases": True, diff --git a/lib/spack/spack/cmd/common/arguments.py b/lib/spack/spack/cmd/common/arguments.py index 47f4d16b299f6f..300d979c34b972 100644 --- a/lib/spack/spack/cmd/common/arguments.py +++ b/lib/spack/spack/cmd/common/arguments.py @@ -2,15 +2,16 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) - import argparse import os import textwrap +from typing import Any, Optional import spack.cmd import spack.config import spack.deptypes as dt import spack.environment as ev +import spack.llnl.util.tty as tty import spack.mirrors.mirror import spack.mirrors.utils import spack.reporters @@ -129,6 +130,40 @@ def __call__(self, parser, namespace, concurrent_packages, option_string): setattr(namespace, "concurrent_packages", concurrent_packages) +class DeprecatedStoreTrueAction(argparse.Action): + """Like the builtin store_true, but prints a deprecation warning.""" + + def __init__( + self, + option_strings, + dest: str, + default: Optional[Any] = False, + required: bool = False, + help: Optional[str] = None, + removed_in: Optional[str] = None, + instructions: Optional[str] = None, + ): + super().__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + const=True, + required=required, + help=help, + default=default, + ) + self.removed_in = removed_in + self.instructions = instructions + + def __call__(self, parser, namespace, value, option_string=None): + instructions = [] if not self.instructions else [self.instructions] + tty.warn( + f"{option_string} is deprecated and will be removed in {self.removed_in}.", + *instructions, + ) + setattr(namespace, self.dest, self.const) + + class DeptypeAction(argparse.Action): """Creates a flag of valid dependency types from a deptype argument.""" @@ -167,6 +202,15 @@ def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) +def config_scope_readable_validator(value): + if value not in spack.config.existing_scope_names(): + raise ValueError( + f"Invalid scope argument {value} " + "for config read operation, scope context does not exist" + ) + return value + + def _cdash_reporter(namespace): """Helper function to create a CDash reporter. This function gets an early reference to the argparse namespace under construction, so it can later use it to create the object. @@ -429,6 +473,16 @@ def no_install_status(): ) +@arg +def show_non_defaults(): + return Args( + "--non-defaults", + action="store_true", + default=False, + help="highlight non-default versions or variants", + ) + + @arg def no_checksum(): return Args( @@ -648,15 +702,13 @@ def add_argument_string_or_variable(parser, arg: str, *, deprecate_str: bool = T "--s3-access-key-id", help="ID string to use to connect to this S3 mirror", ) - add_argument_string_or_variable( - s3_connection_parser, - "--s3-access-key-secret", - help="secret string to use to connect to this S3 mirror", + s3_connection_parser.add_argument( + "--s3-access-key-secret-variable", + help="environment variable containing secret string to use to connect to this S3 mirror", ) - add_argument_string_or_variable( - s3_connection_parser, - "--s3-access-token", - help="access token to use to connect to this S3 mirror", + s3_connection_parser.add_argument( + "--s3-access-token-variable", + help="environment variable containing access token to use to connect to this S3 mirror", ) s3_connection_parser.add_argument( "--s3-profile", help="S3 profile name to use to connect to this S3 mirror", default=None @@ -673,10 +725,9 @@ def add_argument_string_or_variable(parser, arg: str, *, deprecate_str: bool = T deprecate_str=False, help="username to use to connect to this OCI mirror", ) - add_argument_string_or_variable( - oci_connection_parser, - "--oci-password", - help="password to use to connect to this OCI mirror", + oci_connection_parser.add_argument( + "--oci-password-variable", + help="environment variable containing password to use to connect to this OCI mirror", ) diff --git a/lib/spack/spack/cmd/compiler.py b/lib/spack/spack/cmd/compiler.py index 0a22893be545c6..abe59a95cc589a 100644 --- a/lib/spack/spack/cmd/compiler.py +++ b/lib/spack/spack/cmd/compiler.py @@ -4,7 +4,7 @@ import argparse import sys -import warnings +from typing import List, Optional import spack.binary_distribution import spack.compilers.config @@ -16,9 +16,10 @@ from spack.llnl.util.lang import index_by from spack.llnl.util.tty.colify import colify from spack.llnl.util.tty.color import colorize +from spack.spec import Spec description = "manage compilers" -section = "system" +section = "config" level = "long" @@ -31,19 +32,6 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: aliases=["add"], help="search the system for compilers to add to Spack configuration", ) - mixed_toolchain_group = find_parser.add_mutually_exclusive_group() - mixed_toolchain_group.add_argument( - "--mixed-toolchain", - action="store_true", - default=False, - help="(DEPRECATED) Allow mixed toolchains (for example: clang, clang++, gfortran)", - ) - mixed_toolchain_group.add_argument( - "--no-mixed-toolchain", - action="store_false", - dest="mixed_toolchain", - help="(DEPRECATED) Do not allow mixed toolchains (for example: clang, clang++, gfortran)", - ) find_parser.add_argument("add_paths", nargs=argparse.REMAINDER) find_parser.add_argument( "--scope", @@ -66,7 +54,10 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: # List list_parser = sp.add_parser("list", aliases=["ls"], help="list available compilers") list_parser.add_argument( - "--scope", action=arguments.ConfigScope, help="configuration scope to read from" + "--scope", + action=arguments.ConfigScope, + type=arguments.config_scope_readable_validator, + help="configuration scope to read from", ) list_parser.add_argument( "--remote", action="store_true", help="list also compilers from registered buildcaches" @@ -76,7 +67,13 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: info_parser = sp.add_parser("info", help="show compiler paths") info_parser.add_argument("compiler_spec") info_parser.add_argument( - "--scope", action=arguments.ConfigScope, help="configuration scope to read from" + "--scope", + action=arguments.ConfigScope, + type=arguments.config_scope_readable_validator, + help="configuration scope to read from", + ) + info_parser.add_argument( + "--remote", action="store_true", help="list also compilers from registered buildcaches" ) @@ -84,12 +81,6 @@ def compiler_find(args): """Search either $PATH or a list of paths OR MODULES for compilers and add them to Spack's configuration. """ - if args.mixed_toolchain: - warnings.warn( - "The '--mixed-toolchain' option has been deprecated in Spack v0.23, and currently " - "has no effect. The option will be removed in Spack v1.1" - ) - paths = args.add_paths or None new_compilers = spack.compilers.config.find_compilers( path_hints=paths, scope=args.scope, max_workers=args.jobs @@ -135,58 +126,59 @@ def compiler_remove(args): def compiler_info(args): """Print info about all compilers matching a spec.""" + all_compilers = _all_available_compilers(scope=args.scope, remote=args.remote) query = spack.spec.Spec(args.compiler_spec) - all_compilers = spack.compilers.config.all_compilers(scope=args.scope, init_config=False) - compilers = [x for x in all_compilers if x.satisfies(query)] if not compilers: tty.die(f"No compilers match spec {query.cformat()}") - else: - for c in compilers: - print(f"{c.cformat()}:") - print(f" prefix: {c.external_path}") - extra_attributes = getattr(c, "extra_attributes", {}) - if "compilers" in extra_attributes: - print(" compilers:") - for language, exe in extra_attributes.get("compilers", {}).items(): - print(f" {language}: {exe}") - if "flags" in extra_attributes: - print(" flags:") - for flag, flag_value in extra_attributes["flags"].items(): - print(f" {flag} = {flag_value}") - if "environment" in extra_attributes: - environment = extra_attributes["environment"] - if len(environment.get("set", {})) != 0: - print("\tenvironment:") - print("\t set:") - for key, value in environment["set"].items(): - print(f"\t {key} = {value}") - if "extra_rpaths" in extra_attributes: - print(" extra rpaths:") - for extra_rpath in extra_attributes["extra_rpaths"]: - print(f" {extra_rpath}") - if getattr(c, "external_modules", []): - print(" modules: ") - for module in c.external_modules: - print(f" {module}") - print() - -def compiler_list(args): - supported_compilers = spack.compilers.config.supported_compilers() + compilers.sort(key=lambda x: (not x.external, x.name, x.version)) - def _is_compiler(x): - return x.name in supported_compilers and x.package.supported_languages and not x.external + for c in compilers: + exes = { + cname: getattr(c.package, cname) + for cname in ("cc", "cxx", "fortran") + if hasattr(c.package, cname) + } + if not exes: + tty.debug( + f"{__name__}: skipping {c.format()} from compiler list, " + f"since it has no executables" + ) + continue + + print(f"{c.tree(recurse_dependencies=False, status_fn=spack.spec.Spec.install_status)}") + print(f" prefix: {c.prefix}") + print(" compilers:") + for language, exe in exes.items(): + print(f" {language}: {exe}") + + extra_attributes = getattr(c, "extra_attributes", {}) + if "flags" in extra_attributes: + print(" flags:") + for flag, flag_value in extra_attributes["flags"].items(): + print(f" {flag} = {flag_value}") + if "environment" in extra_attributes: + environment = extra_attributes["environment"] + if len(environment.get("set", {})) != 0: + print("\tenvironment:") + print("\t set:") + for key, value in environment["set"].items(): + print(f"\t {key} = {value}") + if "extra_rpaths" in extra_attributes: + print(" extra rpaths:") + for extra_rpath in extra_attributes["extra_rpaths"]: + print(f" {extra_rpath}") + if getattr(c, "external_modules", []): + print(" modules: ") + for module in c.external_modules: + print(f" {module}") + print() - compilers_from_store = [x for x in spack.store.STORE.db.query() if _is_compiler(x)] - compilers_from_yaml = spack.compilers.config.all_compilers(scope=args.scope, init_config=False) - compilers = compilers_from_yaml + compilers_from_store - if args.remote: - compilers.extend( - [x for x in spack.binary_distribution.update_cache_and_get_specs() if _is_compiler(x)] - ) +def compiler_list(args): + compilers = _all_available_compilers(scope=args.scope, remote=args.remote) # If there are no compilers in any scope, and we're outputting to a tty, give a # hint to the user. @@ -227,6 +219,23 @@ def _is_compiler(x): colify(reversed(sorted(result))) +def _all_available_compilers(scope: Optional[str], remote: bool) -> List[Spec]: + supported_compilers = spack.compilers.config.supported_compilers() + + def _is_compiler(x): + return x.name in supported_compilers and x.package.supported_languages and not x.external + + compilers_from_store = [x for x in spack.store.STORE.db.query() if _is_compiler(x)] + compilers_from_yaml = spack.compilers.config.all_compilers(scope=scope, init_config=False) + compilers = compilers_from_yaml + compilers_from_store + + if remote: + compilers.extend( + [x for x in spack.binary_distribution.update_cache_and_get_specs() if _is_compiler(x)] + ) + return compilers + + def compiler(parser, args): action = { "add": compiler_find, diff --git a/lib/spack/spack/cmd/compilers.py b/lib/spack/spack/cmd/compilers.py index 87a31a5d309209..08225957898feb 100644 --- a/lib/spack/spack/cmd/compilers.py +++ b/lib/spack/spack/cmd/compilers.py @@ -8,13 +8,16 @@ from spack.cmd.compiler import compiler_list description = "list available compilers" -section = "system" +section = "config" level = "short" def setup_parser(subparser: argparse.ArgumentParser) -> None: subparser.add_argument( - "--scope", action=arguments.ConfigScope, help="configuration scope to read/modify" + "--scope", + action=arguments.ConfigScope, + type=arguments.config_scope_readable_validator, + help="configuration scope to read/modify", ) subparser.add_argument( "--remote", action="store_true", help="list also compilers from registered buildcaches" diff --git a/lib/spack/spack/cmd/concretize.py b/lib/spack/spack/cmd/concretize.py index 87290c34909f15..1d8884f7408616 100644 --- a/lib/spack/spack/cmd/concretize.py +++ b/lib/spack/spack/cmd/concretize.py @@ -27,7 +27,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: ) spack.cmd.common.arguments.add_concretizer_args(subparser) - spack.cmd.common.arguments.add_common_arguments(subparser, ["jobs"]) + spack.cmd.common.arguments.add_common_arguments(subparser, ["jobs", "show_non_defaults"]) def concretize(parser, args): @@ -45,7 +45,10 @@ def concretize(parser, args): if not args.quiet: if concretized_specs: tty.msg(f"Concretized {plural(len(concretized_specs), 'spec')}:") - ev.display_specs([concrete for _, concrete in concretized_specs]) + ev.display_specs( + [concrete for _, concrete in concretized_specs], + highlight_non_defaults=args.non_defaults, + ) else: tty.msg("No new specs to concretize.") env.write() diff --git a/lib/spack/spack/cmd/config.py b/lib/spack/spack/cmd/config.py index 44ecc359696b28..04bddc186ae1f4 100644 --- a/lib/spack/spack/cmd/config.py +++ b/lib/spack/spack/cmd/config.py @@ -13,10 +13,12 @@ import spack.error import spack.llnl.util.filesystem as fs import spack.llnl.util.tty as tty +import spack.llnl.util.tty.color as color import spack.schema import spack.schema.env import spack.spec import spack.store +import spack.util.spack_json as sjson import spack.util.spack_yaml as syaml from spack.cmd.common import arguments from spack.llnl.util.tty.colify import colify_table @@ -43,6 +45,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: metavar="section", choices=spack.config.SECTION_SCHEMAS, ) + get_parser.add_argument("--json", action="store_true", help="output configuration as JSON") blame_parser = sp.add_parser( "blame", help="print configuration annotated with source file:line" @@ -88,9 +91,17 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: choices=("all", "env", "include", "internal", "path"), help="list only scopes of the specified type(s)\n\noptions: %(choices)s", ) + scopes_parser.add_argument( + "-v", + "--verbose", + dest="scopes_verbose", # spack has -v as well + action="store_true", + default=False, + help="show scope types and whether scopes are overridden", + ) scopes_parser.add_argument( "section", - help="tailor scope path information to the specified section (implies -p|--paths)" + help="tailor scope path information to the specified section (implies ``--paths``)" "\n\noptions: %(choices)s", metavar="section", nargs="?", @@ -167,17 +178,21 @@ def _get_scope_and_section(args): def print_configuration(args, *, blame: bool) -> None: + if args.scope and args.scope not in spack.config.existing_scope_names(): + tty.die(f"the argument --scope={args.scope} must refer to an existing scope.") if args.scope and args.section is None: tty.die(f"the argument --scope={args.scope} requires specifying a section.") + yaml = blame or not args.json + if args.section is not None: - spack.config.CONFIG.print_section(args.section, blame=blame, scope=args.scope) + spack.config.CONFIG.print_section(args.section, yaml=yaml, blame=blame, scope=args.scope) return - print_flattened_configuration(blame=blame) + print_flattened_configuration(blame=blame, yaml=yaml) -def print_flattened_configuration(*, blame: bool) -> None: +def print_flattened_configuration(*, blame: bool, yaml: bool) -> None: """Prints to stdout a flattened version of the configuration. Args: @@ -195,7 +210,11 @@ def print_flattened_configuration(*, blame: bool) -> None: for config_section in spack.config.SECTION_SCHEMAS: current = spack.config.get(config_section) flattened[spack.schema.env.TOP_LEVEL_KEY][config_section] = current - syaml.dump_config(flattened, stream=sys.stdout, default_flow_style=False, blame=blame) + if blame or yaml: + syaml.dump_config(flattened, stream=sys.stdout, default_flow_style=False, blame=blame) + else: + sjson.dump(flattened, sys.stdout) + sys.stdout.write("\n") def config_get(args): @@ -219,7 +238,16 @@ def config_edit(args): the active environment. """ spack_env = os.environ.get(ev.spack_env_var) - if spack_env and not args.scope: + env_error = ev.environment._active_environment_error + + if env_error and args.scope: + # Cannot use scopes beyond the environment itself with a failed environment + raise env_error + elif env_error: + # The rest of the config system wasn't set up fully, but spack.main was allowed + # to progress so the user can open the malformed environment file + config_file = env_error.filename + elif spack_env and not args.scope: # Don't use the scope object for envs, as `config edit` can be called # for a malformed environment. Use SPACK_ENV to find spack.yaml. config_file = ev.manifest_file(spack_env) @@ -244,47 +272,79 @@ def config_list(args): print(" ".join(list(spack.config.SECTION_SCHEMAS))) -def _config_scope_info(args, scope): - scope_path = None - if (args.section or args.paths) and hasattr(scope, "path"): - section_path = scope.get_section_filename(args.section) if args.section else None - scope_path = ( - section_path - if section_path and os.path.exists(section_path) - else f"{scope.path}{os.sep}" - ) - return (scope.name, scope_path or " ") +def _config_scope_info(args, scope, active, included): + result = [scope.name] # always print the name + + if args.scopes_verbose: + result.append(",".join(_config_basic_scope_types(scope, included))) + if scope.name not in active: + scope_status = "override" + elif args.section and not spack.config.CONFIG.get_config(args.section, scope=scope.name): + scope_status = "absent" + else: + scope_status = "active" + result.append(scope_status) + + section_path = None + if args.section or args.paths: + if hasattr(scope, "path"): + section_path = scope.get_section_filename(args.section) if args.section else None + result.append( + section_path + if section_path and os.path.exists(section_path) + else f"{scope.path}{os.sep}" + ) + else: + result.append(" ") + if args.scopes_verbose and scope_status in ("absent", "override"): + result = [color.colorize(f"@k{{{elt}}}") for elt in result] -def _config_basic_scope_types(scope): + return result + + +def _config_basic_scope_types(scope, included): types = [] if isinstance(scope, spack.config.InternalConfigScope): types.append("internal") - elif hasattr(scope, "yaml_path") and scope.yaml_path == [spack.schema.env.TOP_LEVEL_KEY]: + if hasattr(scope, "yaml_path") and scope.yaml_path == [spack.schema.env.TOP_LEVEL_KEY]: types.append("env") if hasattr(scope, "path"): types.append("path") + if scope.name in included: + types.append("include") return sorted(types) def config_scopes(args): """List configured scopes in descending order of precedence.""" - - included_scopes = list( - i.name for s in spack.config.scopes().reversed_values() for i in s.included_scopes - ) - - scopes = list( + included = list(i.name for s in spack.config.scopes().values() for i in s.included_scopes) + active = [s.name for s in spack.config.CONFIG.active_scopes] + scopes = [ s for s in spack.config.scopes().reversed_values() if ( "include" in args.type - and s.name in included_scopes - or any(i in ("all", *_config_basic_scope_types(s)) for i in args.type) + and s.name in included + or any(i in ("all", *_config_basic_scope_types(s, included)) for i in args.type) ) - ) + and (s.name in active or args.scopes_verbose) + ] + if scopes: - colify_table([_config_scope_info(args, s) for s in scopes]) + headers = ["Scope"] + if args.scopes_verbose: + headers += ["Type", "Status"] + if args.section or args.paths: + headers += ["Path"] + + table = [_config_scope_info(args, s, active, included) for s in scopes] + + # add headers if we have > 1 column + if len(headers) > 1: + table = [[color.colorize(f"@*C{{{colname}}}") for colname in headers]] + table + + colify_table(table) def config_add(args): diff --git a/lib/spack/spack/cmd/containerize.py b/lib/spack/spack/cmd/containerize.py index 6c888e41c23dbe..42bc6be14d438e 100644 --- a/lib/spack/spack/cmd/containerize.py +++ b/lib/spack/spack/cmd/containerize.py @@ -8,8 +8,8 @@ import spack.container.images import spack.llnl.util.tty -description = "creates recipes to build images for different container runtimes" -section = "container" +description = "create a container build recipe from an environment" +section = "environments" level = "long" diff --git a/lib/spack/spack/cmd/create.py b/lib/spack/spack/cmd/create.py index c4b01b0f70902c..65f675de0a416b 100644 --- a/lib/spack/spack/cmd/create.py +++ b/lib/spack/spack/cmd/create.py @@ -6,7 +6,7 @@ import re import sys import urllib.parse -from typing import List, Optional +from typing import List, Optional, Tuple import spack.llnl.util.tty as tty import spack.repo @@ -53,6 +53,7 @@ # ---------------------------------------------------------------------------- {package_class_import} + from spack.package import * @@ -908,23 +909,22 @@ def get_name(name, url): return result -def get_url(url): +def get_url(url: Optional[str]) -> str: """Get the URL to use. Use a default URL if none is provided. Args: - url (str): ``url`` argument to ``spack create`` + url: ``url`` argument to ``spack create`` - Returns: - str: The URL of the package + Returns: The URL of the package """ # Use the user-supplied URL or a default URL if none is present. return url or "https://www.example.com/example-1.2.3.tar.gz" -def get_versions(args, name): +def get_versions(args: argparse.Namespace, name: str) -> Tuple[str, BuildSystemAndLanguageGuesser]: """Returns a list of versions and hashes for a package. Also returns a BuildSystemAndLanguageGuesser object. @@ -932,11 +932,10 @@ def get_versions(args, name): Returns default values if no URL is provided. Args: - args (argparse.Namespace): The arguments given to ``spack create`` - name (str): The name of the package + args: The arguments given to ``spack create`` + name: The name of the package - Returns: - tuple: versions and hashes, and a BuildSystemAndLanguageGuesser object + Returns: Tuple of versions and hashes, and a BuildSystemAndLanguageGuesser object """ # Default version with hash @@ -983,7 +982,7 @@ def get_versions(args, name): url_dict, name, first_stage_function=guesser, keep_stage=args.keep_stage ) - versions = get_version_lines(version_hashes, url_dict) + versions = get_version_lines(version_hashes) else: versions = unhashed_versions @@ -1027,17 +1026,16 @@ def get_build_system( return selected_template -def get_repository(args, name): +def get_repository(args: argparse.Namespace, name: str) -> spack.repo.Repo: """Returns a Repo object that will allow us to determine the path where the new package file should be created. Args: - args (argparse.Namespace): The arguments given to ``spack create`` - name (str): The name of the package to create + args: The arguments given to ``spack create`` + name: The name of the package to create Returns: - spack.repo.Repo: A Repo object capable of determining the path to the - package file + A Repo object capable of determining the path to the package file """ spec = Spec(name) # Figure out namespace for spec @@ -1060,7 +1058,9 @@ def get_repository(args, name): if spec.namespace: repo = spack.repo.PATH.get_repo(spec.namespace) else: - repo = spack.repo.PATH.first_repo() + _repo = spack.repo.PATH.first_repo() + assert _repo is not None, "No package repository found" + repo = _repo # Set the namespace on the spec if it's not there already if not spec.namespace: diff --git a/lib/spack/spack/cmd/deconcretize.py b/lib/spack/spack/cmd/deconcretize.py index e8399853710e25..d1afec66bd86df 100644 --- a/lib/spack/spack/cmd/deconcretize.py +++ b/lib/spack/spack/cmd/deconcretize.py @@ -13,7 +13,7 @@ import spack.spec from spack.cmd.common import arguments -description = "remove specs from the concretized lockfile of an environment" +description = "remove specs from the lockfile of an environment" section = "environments" level = "long" diff --git a/lib/spack/spack/cmd/dependencies.py b/lib/spack/spack/cmd/dependencies.py index 2077e17444f042..5a6055f396131f 100644 --- a/lib/spack/spack/cmd/dependencies.py +++ b/lib/spack/spack/cmd/dependencies.py @@ -14,7 +14,7 @@ from spack.solver.input_analysis import create_graph_analyzer description = "show dependencies of a package" -section = "basic" +section = "query" level = "long" diff --git a/lib/spack/spack/cmd/dependents.py b/lib/spack/spack/cmd/dependents.py index 0eb4197ef15c50..4591489404e808 100644 --- a/lib/spack/spack/cmd/dependents.py +++ b/lib/spack/spack/cmd/dependents.py @@ -15,7 +15,7 @@ from spack.llnl.util.tty.colify import colify description = "show packages that depend on another" -section = "basic" +section = "query" level = "long" @@ -43,7 +43,7 @@ def inverted_dependencies(): names to possible dependencies. Virtual packages are included as sources, so that you can query - dependents of, e.g., `mpi`, but virtuals are not included as + dependents of, e.g., ``mpi``, but virtuals are not included as actual dependents. """ dag = collections.defaultdict(set) diff --git a/lib/spack/spack/cmd/dev_build.py b/lib/spack/spack/cmd/dev_build.py index c0ffc6f5e01cf3..26b03324bb99d0 100644 --- a/lib/spack/spack/cmd/dev_build.py +++ b/lib/spack/spack/cmd/dev_build.py @@ -16,7 +16,7 @@ from spack.cmd.common import arguments from spack.installer import PackageInstaller -description = "developer build: build from code in current working directory" +description = "build package from code in current working directory" section = "build" level = "long" diff --git a/lib/spack/spack/cmd/develop.py b/lib/spack/spack/cmd/develop.py index b105f5863a79e2..ec29462dbdfa33 100644 --- a/lib/spack/spack/cmd/develop.py +++ b/lib/spack/spack/cmd/develop.py @@ -42,12 +42,27 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: default=True, help=( "(default) clone the package unless the path already exists, " - "use --force to overwrite" + "use ``--force`` to overwrite" ), ) subparser.add_argument( - "-f", "--force", help="remove any files or directories that block cloning source code" + "--no-modify-concrete-specs", + action="store_false", + default=True, + dest="apply_changes", + help=( + "do not mutate concrete specs to have dev_path provenance." + " This requires a later `spack concretize --force` command to use develop specs" + ), + ) + + subparser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + help="remove any files or directories that block cloning source code", ) subparser.add_argument( @@ -145,6 +160,7 @@ def update_env( spec: spack.spec.Spec, specified_path: Optional[str] = None, build_dir: Optional[str] = None, + apply_changes: bool = True, ): """ Update the spack.yaml file with additions or changes from a develop call @@ -165,6 +181,10 @@ def update_env( # add develop spec and update path _update_config(spec, specified_path) + # If we are automatically mutating the concrete specs for dev provenance, do so + if apply_changes: + env.apply_develop(spec, _abs_code_path(env, spec, specified_path)) + def _clone(spec: spack.spec.Spec, abspath: str, force: bool = False): if os.path.exists(abspath): @@ -225,7 +245,8 @@ def _dev_spec_generator(args, env): for s in concrete_specs: for node_spec in s.traverse(direction="parents", root=True): tty.debug(f"Recursive develop for {node_spec.name}") - yield node_spec, _abs_code_path(env, node_spec, args.path) + dev_spec = spack.spec.Spec(node_spec.format("{name}@{versions}")) + yield dev_spec, _abs_code_path(env, node_spec, args.path) else: yield spec, _abs_code_path(env, spec, args.path) @@ -236,4 +257,4 @@ def develop(parser, args): for spec, abspath in _dev_spec_generator(args, env): assure_concrete_spec(env, spec) setup_src_code(spec, abspath, clone=args.clone, force=args.force) - update_env(env, spec, args.path, args.build_directory) + update_env(env, spec, args.path, args.build_directory, args.apply_changes) diff --git a/lib/spack/spack/cmd/diff.py b/lib/spack/spack/cmd/diff.py index 61e48aec1d576c..a0269f589b7308 100644 --- a/lib/spack/spack/cmd/diff.py +++ b/lib/spack/spack/cmd/diff.py @@ -15,7 +15,7 @@ from spack.llnl.util.tty.color import cprint, get_color_when description = "compare two specs" -section = "basic" +section = "query" level = "long" @@ -47,12 +47,12 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: ) -def shift(asp_function): +def shift(asp_function: asp.AspFunction) -> asp.AspFunction: """Transforms ``attr("foo", "bar")`` into ``foo("bar")``.""" - if not asp_function.args: + args = asp_function.args + if not args: raise ValueError(f"Can't shift ASP function with no arguments: {str(asp_function)}") - first, *rest = asp_function.args - return asp.AspFunction(first, rest) + return asp.AspFunction(args[0], args[1:]) def compare_specs(a, b, to_string=False, color=None, ignore_packages=None): diff --git a/lib/spack/spack/cmd/edit.py b/lib/spack/spack/cmd/edit.py index 6c73c69c932158..67fe9a15e84283 100644 --- a/lib/spack/spack/cmd/edit.py +++ b/lib/spack/spack/cmd/edit.py @@ -6,6 +6,7 @@ import errno import glob import os +from typing import Optional, Union import spack.cmd import spack.llnl.util.tty as tty @@ -13,31 +14,24 @@ import spack.repo import spack.util.editor -description = "open package files in $EDITOR" +description = "open package files in ``$EDITOR``" section = "packaging" level = "short" -class ComputeBuildSystemPathAction(argparse.Action): - """Compute the path to the build system directory. This is done lazily so that we use the - correct spack.repo.PATH when the command is run.""" - - def __call__(self, parser, namespace, values, option_string=None): - setattr(namespace, self.dest, os.path.join(spack.repo.PATH.repos[0].root, "build_systems")) - - def setup_parser(subparser: argparse.ArgumentParser) -> None: excl_args = subparser.add_mutually_exclusive_group() # Various types of Spack files that can be edited # Edits package files by default + # build systems require separate logic to find excl_args.add_argument( "-b", "--build-system", dest="path", - action=ComputeBuildSystemPathAction, - nargs=0, - help="edit the build system with the supplied name", + action="store_const", + const="BUILD_SYSTEM", # placeholder for path that requires computing late + help="edit the build system with the supplied name or fullname", ) excl_args.add_argument( "-c", @@ -72,15 +66,21 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: help="edit the main spack module with the supplied name", ) - # Options for editing packages - excl_args.add_argument("-r", "--repo", default=None, help="path to repo to edit package in") - excl_args.add_argument("-N", "--namespace", default=None, help="namespace of package to edit") + # Options for editing packages and build systems + subparser.add_argument( + "-r", "--repo", default=None, help="path to repo to edit package or build system in" + ) + subparser.add_argument( + "-N", "--namespace", default=None, help="namespace of package or build system to edit" + ) subparser.add_argument("package", nargs="*", default=None, help="package name") -def locate_package(name: str, repo: spack.repo.Repo) -> str: - path = repo.filename_for_package_name(name) +def locate_package(name: str, repo: Optional[spack.repo.Repo]) -> str: + # if not given a repo, use the full repo path to choose one + repo_like: Union[spack.repo.Repo, spack.repo.RepoPath] = repo or spack.repo.PATH + path: str = repo_like.filename_for_package_name(name) try: with open(path, "r", encoding="utf-8"): @@ -91,6 +91,30 @@ def locate_package(name: str, repo: spack.repo.Repo) -> str: tty.die(f"Cannot edit package: {e}") +def locate_build_system(name: str, repo: Optional[spack.repo.Repo]) -> str: + # If given a fullname for a build system, split it into namespace and name + namespace = None + if "." in name: + namespace, name = name.rsplit(".", 1) + + # If given a namespace and a repo, they better match + if namespace and repo: + if repo.namespace != namespace: + msg = f"{namespace}.{name}: namespace conflicts with repo '{repo.namespace}'" + msg += " specified from --repo or --namespace argument" + raise ValueError(msg) + + if namespace: + repo = spack.repo.PATH.get_repo(namespace) + + # If not given a namespace, use the default + if not repo: + repo = spack.repo.PATH.first_repo() + + assert repo + return locate_file(name, repo.build_systems_path) + + def locate_file(name: str, path: str) -> str: # convert command names to python module name if path == spack.paths.command_path: @@ -124,19 +148,29 @@ def locate_file(name: str, path: str) -> str: def edit(parser, args): names = args.package - # If `--command`, `--test`, or `--module` is chosen, edit those instead - if args.path: + # If `--command`, `--test`, `--docs`, or `--module` is chosen, edit those instead + if args.path and args.path != "BUILD_SYSTEM": paths = [locate_file(name, args.path) for name in names] if names else [args.path] spack.util.editor.editor(*paths) - elif names: - if args.repo: - repo = spack.repo.from_path(args.repo) - elif args.namespace: - repo = spack.repo.PATH.get_repo(args.namespace) + return + + # Cannot set repo = spack.repo.PATH.first_repo() as default because packages and build_systems + # can include repo information as part of their fullname + repo = None + if args.namespace: + repo = spack.repo.PATH.get_repo(args.namespace) + elif args.repo: + repo = spack.repo.from_path(args.repo) + # default_repo used when no name provided + default_repo = repo or spack.repo.PATH.first_repo() + + if args.path == "BUILD_SYSTEM": + if names: + paths = [locate_build_system(n, repo) for n in names] else: - repo = spack.repo.PATH - paths = [locate_package(name, repo) for name in names] + paths = [default_repo.build_systems_path] spack.util.editor.editor(*paths) - else: - # By default open the directory where packages live - spack.util.editor.editor(spack.repo.PATH.repos[0].packages_path) + return + + paths = [locate_package(n, repo) for n in names] if names else [default_repo.packages_path] + spack.util.editor.editor(*paths) diff --git a/lib/spack/spack/cmd/env.py b/lib/spack/spack/cmd/env.py index dbfdb3fff2468a..5f75d1b328084d 100644 --- a/lib/spack/spack/cmd/env.py +++ b/lib/spack/spack/cmd/env.py @@ -30,12 +30,12 @@ from spack.llnl.util.tty.color import cescape, colorize from spack.util.environment import EnvironmentModifications -description = "manage virtual environments" +description = "manage environments" section = "environments" level = "short" -#: List of subcommands of `spack env` +#: List of subcommands of ``spack env`` subcommands: List[Tuple[str, ...]] = [ ("activate",), ("deactivate",), @@ -58,7 +58,8 @@ # env create # def env_create_setup_parser(subparser): - """create a new environment + """\ + create a new environment create a new environment or, optionally, copy an existing environment @@ -87,7 +88,7 @@ def env_create_setup_parser(subparser): "envfile", nargs="?", default=None, - help="manifest or lock file (ends with '.json' or '.lock')", + help="manifest or lock file (ends with '.json' or '.lock') or an environment name or path", ) subparser.add_argument( "--include-concrete", @@ -142,7 +143,7 @@ def _env_create( Arguments: name_or_path (str): name of the environment to create, or path to it init_file (str or file): optional initialization file -- can be - a JSON lockfile (*.lock, *.json) or YAML manifest file + a JSON lockfile (*.lock, *.json), YAML manifest file, or env dir dir (bool): if True, create an environment in a directory instead of a named environment keep_relative (bool): if True, develop paths are copied verbatim into @@ -289,7 +290,7 @@ def create_temp_env_directory(): def _tty_info(msg): """tty.info like function that prints the equivalent printf statement for eval.""" - decorated = f'{colorize("@*b{==>}")} {msg}\n' + decorated = f"{colorize('@*b{==>}')} {msg}\n" executor = "echo" if sys.platform == "win32" else "printf" print(f"{executor} {shlex.quote(decorated)};") @@ -575,7 +576,8 @@ def _env_untrack_or_remove( f"Really {'remove' if remove else 'untrack'} {environments} {envs}?", default=False ) if not answer: - tty.die("Will not remove any environments") + tty.msg(f"Will not remove environment(s) {envs}") + return # keep track of the environments we remove for later printing the exit code removed_env_names = [] @@ -607,7 +609,7 @@ def _env_untrack_or_remove( spack.environment.environment.environment_dir_from_name(bad_env_name, exists_ok=True) ) tty.msg(f"Successfully removed environment '{bad_env_name}'") - removed_env_names.append(env.name) + removed_env_names.append(bad_env_name) # Following the design of linux rm we should exit with a status of 1 # anytime we cannot delete every environment the user asks for. @@ -621,7 +623,7 @@ def _env_untrack_or_remove( # env untrack # def env_untrack_setup_parser(subparser): - """track an environment from a directory in Spack""" + """untrack an environment from a directory in Spack""" subparser.add_argument("env", nargs="+", help="tracked environment name") subparser.add_argument( "-f", "--force", action="store_true", help="force unlink even when environment is active" @@ -639,7 +641,8 @@ def env_untrack(args): # env remove # def env_remove_setup_parser(subparser): - """remove managed environment(s) + """\ + remove managed environment(s) remove existing environment(s) managed by Spack @@ -669,7 +672,8 @@ def env_remove(args): # env rename # def env_rename_setup_parser(subparser): - """rename an existing environment + """\ + rename an existing environment rename a managed environment or move an independent/directory environment @@ -779,7 +783,8 @@ def actions(): # env view # def env_view_setup_parser(subparser): - """manage the environment's view + """\ + manage the environment's view provide the path when enabling a view with a non-default path """ @@ -874,7 +879,8 @@ def env_loads(args): def env_update_setup_parser(subparser): - """update the environment manifest to the latest schema format + """\ + update the environment manifest to the latest schema format update the environment to the latest schema format, which may not be readable by older versions of spack @@ -919,7 +925,8 @@ def env_update(args): def env_revert_setup_parser(subparser): - """restore the environment manifest to its previous format + """\ + restore the environment manifest to its previous format revert the environment's manifest to the schema format from its last 'spack env update' @@ -966,7 +973,8 @@ def env_revert(args): def env_depfile_setup_parser(subparser): - """generate a depfile to exploit parallel builds across specs + """\ + generate a depfile to exploit parallel builds across specs requires the active environment to be concrete """ @@ -1083,8 +1091,8 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: subsubparser = sp.add_parser( name, aliases=aliases, - description=setup_parser_cmd.__doc__, - help=spack.cmd.first_line(setup_parser_cmd.__doc__), + description=spack.cmd.doc_dedented(setup_parser_cmd), + help=spack.cmd.doc_first_line(setup_parser_cmd), ) setup_parser_cmd(subsubparser) diff --git a/lib/spack/spack/cmd/extensions.py b/lib/spack/spack/cmd/extensions.py index 13311ac00e23b6..b8d80fa0f9d95b 100644 --- a/lib/spack/spack/cmd/extensions.py +++ b/lib/spack/spack/cmd/extensions.py @@ -14,7 +14,7 @@ from spack.llnl.util.tty.colify import colify description = "list extensions for package" -section = "extensions" +section = "query" level = "long" diff --git a/lib/spack/spack/cmd/find.py b/lib/spack/spack/cmd/find.py index bff7034fd6b158..9d877eb51dbde3 100644 --- a/lib/spack/spack/cmd/find.py +++ b/lib/spack/spack/cmd/find.py @@ -13,14 +13,17 @@ import spack.llnl.util.tty as tty import spack.llnl.util.tty.color as color import spack.repo +import spack.solver.reuse import spack.spec import spack.store from spack.cmd.common import arguments +from spack.solver.reuse import create_external_parser +from spack.solver.runtimes import external_config_with_implicit_externals from ..enums import InstallRecordStatus description = "list and search installed packages" -section = "basic" +section = "query" level = "short" @@ -38,7 +41,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: action="store_const", dest="format", const="{/hash}", - help="same as '--format {/hash}'; use with xargs or $()", + help="same as ``--format {/hash}``; use with ``xargs`` or ``$()``", ) format_group.add_argument( "--json", @@ -87,12 +90,18 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: action="store_true", help="don't show full list of installed specs in an environment", ) - subparser.add_argument( + concretized_vs_packages = subparser.add_mutually_exclusive_group() + concretized_vs_packages.add_argument( "-c", "--show-concretized", action="store_true", help="show concretized specs in an environment", ) + concretized_vs_packages.add_argument( + "--show-configured-externals", + action="store_true", + help="show externals defined in the 'packages' section of the configuration", + ) subparser.add_argument( "-f", "--show-flags", @@ -119,6 +128,12 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: action="store_true", help="show only specs that were installed as dependencies", ) + subparser.add_argument( + "-e", + "--external", + action="store_true", + help="show only specs that are marked as externals", + ) subparser.add_argument( "-u", "--unknown", @@ -238,7 +253,7 @@ def decorator(spec, fmt): def display_env(env, args, decorator, results): """Display extra find output when running in an environment. - In an environment, `spack find` outputs a preliminary section + In an environment, ``spack find`` outputs a preliminary section showing the root specs of the environment (this is in addition to the section listing out specs matching the query parameters). @@ -315,8 +330,17 @@ def root_decorator(spec, string): def _find_query(args, env): q_args = query_arguments(args) - concretized_but_not_installed = list() - if env: + concretized_but_not_installed = [] + if args.show_configured_externals: + packages_with_externals = external_config_with_implicit_externals(spack.config.CONFIG) + completion_mode = spack.config.CONFIG.get("concretizer:externals:completion") + results = spack.solver.reuse.SpecFilter.from_packages_yaml( + external_parser=create_external_parser(packages_with_externals, completion_mode), + packages_with_externals=packages_with_externals, + include=[], + exclude=[], + ).selected_specs() + elif env: all_env_specs = env.all_specs() if args.constraint: init_specs = cmd.parse_specs(args.constraint) @@ -337,6 +361,9 @@ def _find_query(args, env): else: results = args.specs(**q_args) + if args.external: + results = [s for s in results if s.external] + # use groups by default except with format. if args.groups is None: args.groups = not args.format diff --git a/lib/spack/spack/cmd/graph.py b/lib/spack/spack/cmd/graph.py index dba419349f1d91..de310e72b0b3fb 100644 --- a/lib/spack/spack/cmd/graph.py +++ b/lib/spack/spack/cmd/graph.py @@ -12,7 +12,7 @@ from spack.llnl.util import tty description = "generate graphs of package dependency relationships" -section = "basic" +section = "query" level = "long" @@ -38,7 +38,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: "-s", "--static", action="store_true", - help="graph static (possible) deps, don't concretize (implies --dot)", + help="graph static (possible) deps, don't concretize (implies ``--dot``)", ) subparser.add_argument( "-c", diff --git a/lib/spack/spack/cmd/help.py b/lib/spack/spack/cmd/help.py index 770774db726cd2..2c8acbb6b9e1be 100644 --- a/lib/spack/spack/cmd/help.py +++ b/lib/spack/spack/cmd/help.py @@ -16,7 +16,7 @@ # is only one on spec syntax. # spec_guide = """\ -spec expression syntax: +@*B{spec expression syntax:} package [constraints] [^dependency [constraints] ...] @@ -24,43 +24,43 @@ @K{/hash} unique prefix or full hash of installed package - constraints: - versions: + @*B{constraints:} + @*c{versions:} @c{@version} single version @c{@min:max} version range (inclusive) @c{@min:} version or higher @c{@:max} up to version (inclusive) @c{@=version} exact version - compilers: + @*c{compilers:} @g{%compiler} build with @g{%compiler@version} build with specific compiler version @g{%compiler@min:max} specific version range (see above) - compiler flags: + @*c{compiler flags:} @g{cflags="flags"} cppflags, cflags, cxxflags, fflags, ldflags, ldlibs @g{==} propagate flags to package dependencies - variants: + @*c{variants:} @B{+variant} enable @r{-variant} or @r{~variant} disable @B{variant=value} set non-boolean to @B{variant=value1,value2,value3} set multi-value values @B{++}, @r{--}, @r{~~}, @B{==} propagate variants to package dependencies - architecture variants: + @*c{architecture variants:} @m{platform=platform} linux, darwin, freebsd, windows @m{os=operating_system} specific @m{target=target} specific processor @m{arch=platform-os-target} shortcut for all three above - dependencies: + @*c{dependencies:} ^dependency [constraints] specify constraints on dependencies ^@K{/hash} build with a specific installed dependency - examples: + @*B{examples:} hdf5 any hdf5 configuration hdf5 @c{@1.10.1} hdf5 version 1.10.1 hdf5 @c{@1.8:} hdf5 1.8 or higher diff --git a/lib/spack/spack/cmd/info.py b/lib/spack/spack/cmd/info.py index c1a5c8a9932714..9ea09d38114c3f 100644 --- a/lib/spack/spack/cmd/info.py +++ b/lib/spack/spack/cmd/info.py @@ -1,38 +1,89 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) +# mypy: disallow-untyped-defs import argparse +import collections +import shutil import sys import textwrap -from itertools import zip_longest +from argparse import Namespace +from typing import Any, Callable, Dict, Iterable, List, Optional, TextIO, Tuple import spack.builder +import spack.cmd +import spack.dependency import spack.deptypes as dt import spack.fetch_strategy as fs import spack.install_test import spack.llnl.util.tty as tty import spack.llnl.util.tty.color as color +import spack.package_base import spack.repo import spack.spec import spack.variant +import spack.version from spack.cmd.common import arguments from spack.llnl.util.tty.colify import colify -from spack.package_base import preferred_version +from spack.package_base import PackageBase +from spack.util.typing import SupportsRichComparison description = "get detailed information on a particular package" -section = "basic" +section = "query" level = "short" header_color = "@*b" plain_format = "@." +#: Allow at least this much room for values when formatting definitions +#: Wrap after a long variant name/condition if we need to do so to preserve this width. +MIN_VALUES_WIDTH = 30 -def padder(str_list, extra=0): + +class Formatter: + """Generic formatter for elements displayed by `spack info`. + + Elements have four parts: name, values, when condition, and description. They can + be formatted two ways (shown here for variants): + + Grouped by when (default):: + + when +cuda + cuda_arch [none] none, 10, 100, 100a, 101, + 101a, 11, 12, 120, 120a, 13 + CUDA architecture + + Or, by name (each name has a when nested under it):: + + cuda_arch [none] none, 10, 100, 100a, 101, + 101a, 11, 12, 120, 120a, 13 + when +cuda + CUDA architecture + + The values and description will be wrapped if needed. the name (and any additional info) + will not (so they should be kept short). + + Subclasses are responsible for generating colorized text, but not wrapping, + indentation, or other formatting, for the name, values, and description. + + """ + + def format_name(self, element: Any) -> str: + return str(element) + + def format_values(self, element: Any) -> str: + return "" + + def format_description(self, element: Any) -> str: + return "" + + +def padder(str_list: Iterable, extra: int = 0) -> Callable: """Return a function to pad elements of a list.""" length = max(len(str(s)) for s in str_list) + extra - def pad(string): + def pad(string: str) -> str: string = str(string) padding = max(0, length - len(string)) return string + (padding * " ") @@ -45,6 +96,22 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: "-a", "--all", action="store_true", default=False, help="output all package information" ) + by = subparser.add_mutually_exclusive_group() + by.add_argument( + "--by-name", + dest="by_name", + action="store_true", + default=True, + help="list variants, dependency, etc. in name order, then by when condition", + ) + by.add_argument( + "--by-when", + dest="by_name", + action="store_false", + default=False, + help="group variants, dependencies, etc. first by when condition, then by name", + ) + options = [ ("--detectable", print_detectable.__doc__), ("--maintainers", print_maintainers.__doc__), @@ -56,108 +123,102 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: ("--tags", print_tags.__doc__), ("--tests", print_tests.__doc__), ("--virtuals", print_virtuals.__doc__), - ("--variants-by-name", "list variants in strict name order; don't group by condition"), ] for opt, help_comment in options: subparser.add_argument(opt, action="store_true", help=help_comment) - arguments.add_common_arguments(subparser, ["package"]) + # deprecated for the more generic --by-name, but still here until we can remove it + subparser.add_argument( + "--variants-by-name", + dest="by_name", + action=arguments.DeprecatedStoreTrueAction, + help=argparse.SUPPRESS, + removed_in="a future Spack release", + instructions="use --by-name instead", + ) + arguments.add_common_arguments(subparser, ["spec"]) -def section_title(s): +def section_title(s: str) -> str: return header_color + s + plain_format -def version(s): +def version(s: str) -> str: return spack.spec.VERSION_COLOR + s + plain_format -def variant(s): - return spack.spec.ENABLED_VARIANT_COLOR + s + plain_format +def format_deptype(depflag: int) -> str: + color_flags = zip("gcbm", dt.ALL_FLAGS) + return ", ".join( + color.colorize(f"@{c}{{{dt.flag_to_string(depflag & flag)}}}") + for c, flag in color_flags + if depflag & flag + ) -def license(s): - return spack.spec.VERSION_COLOR + s + plain_format +class DependencyFormatter(Formatter): + def format_name(self, dep: spack.dependency.Dependency) -> str: + return dep.spec._long_spec(color=color.get_color_when()) + def format_values(self, dep: spack.dependency.Dependency) -> str: + return str(format_deptype(dep.depflag)) -class VariantFormatter: - def __init__(self, pkg): - self.variants = pkg.variants - self.headers = ("Name [Default]", "When", "Allowed values", "Description") - # Don't let name or possible values be less than max widths - _, cols = tty.terminal_size() - max_name = min(self.column_widths[0], 30) - max_when = min(self.column_widths[1], 30) - max_vals = min(self.column_widths[2], 20) +def count_bool_variant_conditions( + when_indexed_dictionary: Dict[spack.spec.Spec, Any], +) -> List[Tuple[int, Tuple[str, bool]]]: + """Counts boolean variants in whens in a dictionary. - # allow the description column to extend as wide as the terminal. - max_description = min( - self.column_widths[3], - # min width 70 cols, 14 cols of margins and column spacing - max(cols, 70) - max_name - max_vals - 14, - ) - self.column_widths = (max_name, max_when, max_vals, max_description) + Returns a list of the most used when conditions for boolean variants along with their value. + """ + top: Dict = collections.defaultdict(int) + for when, _ in when_indexed_dictionary.items(): + for v, variant in when.variants.items(): + if type(variant.value) is bool: + top[(variant.name, variant.value)] += 1 - # Compute the format - self.fmt = "%%-%ss%%-%ss%%-%ss%%s" % ( - self.column_widths[0] + 4, - self.column_widths[1] + 4, - self.column_widths[2] + 4, - ) + # sorted by frequency, highest first + return list(reversed(sorted((n, t) for t, n in top.items()))) - def default(self, v): - s = "on" if v.default is True else "off" - if not isinstance(v.default, bool): - s = v.default - return s - @property - def lines(self): - if not self.variants: - yield " None" - return - - else: - yield " " + self.fmt % self.headers - underline = tuple([w * "=" for w in self.column_widths]) - yield " " + self.fmt % underline - yield "" - for k, e in sorted(self.variants.items()): - v, w = e - name = textwrap.wrap( - "{0} [{1}]".format(k, self.default(v)), width=self.column_widths[0] - ) - if all(spec == spack.spec.Spec() for spec in w): - w = "--" - when = textwrap.wrap(str(w), width=self.column_widths[1]) - allowed = v.allowed_values.replace("True, False", "on, off") - allowed = textwrap.wrap(allowed, width=self.column_widths[2]) - description = [] - for d_line in v.description.split("\n"): - description += textwrap.wrap(d_line, width=self.column_widths[3]) - for t in zip_longest(name, when, allowed, description, fillvalue=""): - yield " " + self.fmt % t - - -def print_dependencies(pkg, args): +def print_dependencies(pkg: PackageBase, args: Namespace) -> None: """output build, link, and run package dependencies""" + print_definitions(pkg, "Dependencies", pkg.dependencies, DependencyFormatter(), args.by_name) - for deptype in ("build", "link", "run"): - color.cprint("") - color.cprint(section_title("%s Dependencies:" % deptype.capitalize())) - deps = sorted(pkg.dependencies_of_type(dt.flag_from_string(deptype))) - if deps: - colify(deps, indent=4) - else: - color.cprint(" None") +def print_dependency_suggestion(pkg: PackageBase) -> None: + variant_counts = count_bool_variant_conditions(pkg.dependencies) + big_variants = [ + (name, val) + for n, (name, val) in variant_counts + # make a note of variants with large counts that aren't already toggled by the user. + if n >= 20 and not (name in pkg.spec.variants and pkg.spec.variants[name].value != val) + ] -def print_detectable(pkg, args): + if big_variants: + spec = spack.spec.Spec(pkg.name) + for name, val in big_variants: + # skip if user specified, or already saw a value (e.g. many +mpi and ~mpi) + if name in spec.variants or name in pkg.spec.variants: + continue + spec.variants[name] = spack.variant.BoolValuedVariant(name, not val) + + # if there is new stuff to add beyond the input + if spec.variants: + spec.constrain(pkg.spec) # include already specified constraints + print() + tty.info( + f"{pkg.name} has many complex dependencies; consider this for a simpler view:", + f"spack info {spec.format(color=tty.color.get_color_when())}", + format="y", + ) + + +def print_detectable(pkg: PackageBase, args: Namespace) -> None: """output information on external detection""" color.cprint("") - color.cprint(section_title("Externally Detectable: ")) + color.cprint(section_title("Externally Detectable:")) # If the package has an 'executables' of 'libraries' field, it # can detect an installation @@ -181,7 +242,7 @@ def print_detectable(pkg, args): color.cprint(" False") -def print_maintainers(pkg, args): +def print_maintainers(pkg: PackageBase, args: Namespace) -> None: """output package maintainers""" if len(pkg.maintainers) > 0: @@ -190,7 +251,7 @@ def print_maintainers(pkg, args): color.cprint(section_title("Maintainers: ") + mnt) -def print_namespace(pkg, args): +def print_namespace(pkg: PackageBase, args: Namespace) -> None: """output package namespace""" repo = spack.repo.PATH.get_repo(pkg.namespace) @@ -199,7 +260,7 @@ def print_namespace(pkg, args): color.cprint(f" @c{{{repo.namespace}}} at {repo.root}") -def print_phases(pkg, args): +def print_phases(pkg: PackageBase, args: Namespace) -> None: """output installation phases""" builder = spack.builder.create(pkg) @@ -213,7 +274,7 @@ def print_phases(pkg, args): color.cprint(phase_str) -def print_tags(pkg, args): +def print_tags(pkg: PackageBase, args: Namespace) -> None: """output package tags""" color.cprint("") @@ -225,7 +286,7 @@ def print_tags(pkg, args): color.cprint(" None") -def print_tests(pkg, args): +def print_tests(pkg: PackageBase, args: Namespace) -> None: """output relevant build-time and stand-alone tests""" # Some built-in base packages (e.g., Autotools) define callback (e.g., @@ -263,66 +324,72 @@ def print_tests(pkg, args): color.cprint(" None") -def _fmt_value(v): - if v is None or isinstance(v, bool): - return str(v).lower() - else: - return str(v) - +def _fmt_when(when: "spack.spec.Spec", indent: int) -> str: + return color.colorize( + f"{indent * ' '}@B{{when}} {color.cescape(when._long_spec(color=color.get_color_when()))}" + ) -def _fmt_name_and_default(variant): - """Print colorized name [default] for a variant.""" - return color.colorize(f"@c{{{variant.name}}} @C{{[{_fmt_value(variant.default)}]}}") +def _fmt_variant_value(v: Any) -> str: + return str(v).lower() if v is None or isinstance(v, bool) else str(v) -def _fmt_when(when: "spack.spec.Spec", indent: int): - return color.colorize(f"{indent * ' '}@B{{when}} {color.cescape(str(when))}") +def _print_definition( + name_field: str, + values_field: str, + description: str, + max_name_len: int, + indent: int, + when: Optional[spack.spec.Spec] = None, + out: Optional[TextIO] = None, +) -> None: + """Print a definition entry for `spack info` output. -def _fmt_variant_description(variant, width, indent): - """Format a variant's description, preserving explicit line breaks.""" - return "\n".join( - textwrap.fill( - line, width=width, initial_indent=indent * " ", subsequent_indent=indent * " " - ) - for line in variant.description.split("\n") - ) + Arguments: + name_field: name and optional info, e.g. a default; should be short. + values_field: possible values for the entry; Wrapped if long. + description: description of the field (wrapped if overly long) + max_name_len: max length of any definition to be printed + indent: size of leading indent for entry + when: optional when condition + out: stream to print to + Caller is expected to calculate the max name length in advance and pass it to + ``_print_definition``. -def _fmt_variant(variant, max_name_default_len, indent, when=None, out=None): + """ out = out or sys.stdout + cols = shutil.get_terminal_size().columns + + # prevent values from being compressed by really long names + name_col_width = min(max_name_len, cols - MIN_VALUES_WIDTH - indent) + name_len = color.clen(name_field) + + pad = 4 # min padding between name and values + value_indent = (indent + name_col_width + pad) * " " # left edge of values + + formatted_name_and_values = f"{indent * ' '}{name_field}" + if values_field: + formatted_values = "\n".join( + color.cwrap( + values_field, + width=cols - 2, + initial_indent=value_indent, + subsequent_indent=value_indent, + ) + ) - _, cols = tty.terminal_size() - - name_and_default = _fmt_name_and_default(variant) - name_default_len = color.clen(name_and_default) - - values = variant.values - if not isinstance(variant.values, (tuple, list, spack.variant.DisjointSetsOfValues)): - values = [variant.values] - - # put 'none' first, sort the rest by value - sorted_values = sorted(values, key=lambda v: (v != "none", v)) - - pad = 4 # min padding between 'name [default]' and values - value_indent = (indent + max_name_default_len + pad) * " " # left edge of values + if name_len > name_col_width: + # for overlong names, values appear aligned on next line + formatted_name_and_values += f"\n{formatted_values}" + else: + # for regular names, trim indentation to make room for name on same line + formatted_values = formatted_values[indent + name_len + pad :] - # This preserves any formatting (i.e., newlines) from how the description was - # written in package.py, but still wraps long lines for small terminals. - # This allows some packages to provide detailed help on their variants (see, e.g., gasnet). - formatted_values = "\n".join( - textwrap.wrap( - f"{', '.join(_fmt_value(v) for v in sorted_values)}", - width=cols - 2, - initial_indent=value_indent, - subsequent_indent=value_indent, - ) - ) - formatted_values = formatted_values[indent + name_default_len + pad :] + # e.g,. name [default] value1, value2, value3, ... + formatted_name_and_values += f"{pad * ' '}{formatted_values}" - # name [default] value1, value2, value3, ... - padding = pad * " " - color.cprint(f"{indent * ' '}{name_and_default}{padding}@c{{{formatted_values}}}", stream=out) + out.write(f"{formatted_name_and_values}\n") # when description_indent = indent + 4 @@ -330,39 +397,62 @@ def _fmt_variant(variant, max_name_default_len, indent, when=None, out=None): out.write(_fmt_when(when, description_indent - 2)) out.write("\n") - # description, preserving explicit line breaks from the way it's written in the package file - out.write(_fmt_variant_description(variant, cols - 2, description_indent)) - out.write("\n") + # description, preserving explicit line breaks from the way it's written in the + # package file, but still wrapoing long lines for small terminals. This allows + # descriptions to provide detailed help in descriptions (see, e.g., gasnet's variants). + if description: + formatted_description = "\n".join( + textwrap.fill( + line, + width=cols - 2, + initial_indent=description_indent * " ", + subsequent_indent=description_indent * " ", + ) + for line in description.split("\n") + ) + out.write(formatted_description) + out.write("\n") -def _print_variants_header(pkg): - """output variants""" +def print_header(header: str, when_indexed_dictionary: Dict, formatter: Formatter) -> bool: + color.cprint("") + color.cprint(section_title(f"{header}:")) - if not pkg.variants: + if not when_indexed_dictionary: print(" None") - return + return False + return True - color.cprint("") - color.cprint(section_title("Variants:")) - - # Calculate the max length of the "name [default]" part of the variant display - # This lets us know where to print variant values. - max_name_default_len = max( - color.clen(_fmt_name_and_default(variant)) - for name in pkg.variant_names() - for _, variant in pkg.variant_definitions(name) + +def max_name_length(when_indexed_dictionary: Dict, formatter: Formatter) -> int: + # Calculate the max length of the first field of the definition. Lets us know how + # much to pad other fields on the first line. + return max( + color.clen(formatter.format_name(definition)) + for subkey in spack.package_base._subkeys(when_indexed_dictionary) + for _, definition in spack.package_base._definitions(when_indexed_dictionary, subkey) ) - return max_name_default_len +def print_grouped_by_when( + pkg: PackageBase, header: str, when_indexed_dictionary: Dict, formatter: Formatter +) -> None: + """Generic method to print metadata grouped by when conditions.""" + if not print_header(header, when_indexed_dictionary, formatter): + return + + max_name_len = max_name_length(when_indexed_dictionary, formatter) -def print_variants_grouped_by_when(pkg): - max_name_default_len = _print_variants_header(pkg) + # ensure that items without conditions come first + unconditional_first = lambda item: (item[0] != spack.spec.Spec(), item) indent = 4 - for when, variants_by_name in pkg.variant_items(): - padded_values = max_name_default_len + 4 + for when, by_name in sorted(when_indexed_dictionary.items(), key=unconditional_first): + if not pkg.intersects(when): + continue + start_indent = indent + values_indent = max_name_len + 4 if when != spack.spec.Spec(): sys.stdout.write("\n") @@ -370,39 +460,122 @@ def print_variants_grouped_by_when(pkg): sys.stdout.write("\n") # indent names slightly inside 'when', but line up values - padded_values -= 2 start_indent += 2 + values_indent -= 2 + + for subkey, definition in sorted(by_name.items()): + _print_definition( + formatter.format_name(definition), + formatter.format_values(definition), + formatter.format_description(definition), + values_indent, + start_indent, + when=None, + out=sys.stdout, + ) - for name, variant in sorted(variants_by_name.items()): - _fmt_variant(variant, padded_values, start_indent, None, out=sys.stdout) +def print_by_name( + pkg: PackageBase, header: str, when_indexed_dictionary: Dict, formatter: Formatter +) -> None: + if not print_header(header, when_indexed_dictionary, formatter): + return -def print_variants_by_name(pkg): - max_name_default_len = _print_variants_header(pkg) - max_name_default_len += 4 + max_name_len = max_name_length(when_indexed_dictionary, formatter) + max_name_len += 4 indent = 4 - for name in pkg.variant_names(): - for when, variant in pkg.variant_definitions(name): - _fmt_variant(variant, max_name_default_len, indent, when, out=sys.stdout) + + def unconditional_first(definition: Any) -> SupportsRichComparison: + spec = getattr(definition, "spec", None) + if spec: + return (spec != spack.spec.Spec(spec.name), spec) + else: + return getattr(definition, "name", None) # type: ignore[return-value] + + for subkey in spack.package_base._subkeys(when_indexed_dictionary): + for when, definition in sorted( + spack.package_base._definitions(when_indexed_dictionary, subkey), + key=lambda t: unconditional_first(t[1]), + ): + if not pkg.intersects(when): + continue + + _print_definition( + formatter.format_name(definition), + formatter.format_values(definition), + formatter.format_description(definition), + max_name_len, + indent, + when=when, + out=sys.stdout, + ) sys.stdout.write("\n") -def print_variants(pkg, args): - """output variants""" - if args.variants_by_name: - print_variants_by_name(pkg) +def print_definitions( + pkg: PackageBase, + header: str, + when_indexed_dictionary: Dict, + formatter: Formatter, + by_name: bool, +) -> None: + # convert simple dictionaries to dicts of dicts before formatting. + # subkeys are ignored in formatting, so use stringified numbers. + values = when_indexed_dictionary.values() + if when_indexed_dictionary and not isinstance(next(iter(values)), dict): + when_indexed_dictionary = { + when: {str(i): element} + for i, (when, element) in enumerate(when_indexed_dictionary.items()) + } + + if by_name: + print_by_name(pkg, header, when_indexed_dictionary, formatter) else: - print_variants_grouped_by_when(pkg) + print_grouped_by_when(pkg, header, when_indexed_dictionary, formatter) -def print_versions(pkg, args): +class VariantFormatter(Formatter): + def format_name(self, variant: spack.variant.Variant) -> str: + return color.colorize( + f"@c{{{variant.name}}} @C{{[{_fmt_variant_value(variant.default)}]}}" + ) + + def format_values(self, variant: spack.variant.Variant) -> str: + values = ( + [variant.values] + if not isinstance(variant.values, (tuple, list, spack.variant.DisjointSetsOfValues)) + else variant.values + ) + + # put 'none' first, sort the rest by value + sorted_values = sorted(values, key=lambda v: (v != "none", v)) + + return color.colorize(f"@c{{{', '.join(_fmt_variant_value(v) for v in sorted_values)}}}") + + def format_description(self, variant: spack.variant.Variant) -> str: + return variant.description + + +def print_variants(pkg: PackageBase, args: Namespace) -> None: + """output variants""" + print_definitions(pkg, "Variants", pkg.variants, VariantFormatter(), args.by_name) + + +def print_licenses(pkg: PackageBase, args: Namespace) -> None: + """Output the licenses of the project.""" + print_definitions(pkg, "Licenses", pkg.licenses, Formatter(), args.by_name) + + +def print_versions(pkg: PackageBase, args: Namespace) -> None: """output versions""" color.cprint("") color.cprint(section_title("Preferred version: ")) - if not pkg.versions: + versions = [v for v in pkg.versions if pkg.spec.versions.intersects(v)] + + if not versions: color.cprint(version(" None")) color.cprint("") color.cprint(section_title("Safe versions: ")) @@ -411,14 +584,14 @@ def print_versions(pkg, args): color.cprint(section_title("Deprecated versions: ")) color.cprint(version(" None")) else: - pad = padder(pkg.versions, 4) + pad = padder(versions, 4) - preferred = preferred_version(pkg) + preferred = spack.package_base.preferred_version(pkg) - def get_url(version): + def get_url(version: spack.version.VersionType) -> str: try: - return fs.for_package_version(pkg, version) - except spack.fetch_strategy.InvalidArgsError: + return str(fs.for_package_version(pkg, version)) + except fs.InvalidArgsError: return "No URL" url = get_url(preferred) if pkg.has_code else "" @@ -429,7 +602,7 @@ def get_url(version): safe = [] deprecated = [] - for v in reversed(sorted(pkg.versions)): + for v in reversed(sorted(versions)): if pkg.has_code: url = get_url(v) if pkg.versions[v].get("deprecated", False): @@ -449,7 +622,7 @@ def get_url(version): color.cprint(line) -def print_virtuals(pkg, args): +def print_virtuals(pkg: PackageBase, args: Namespace) -> None: """output virtual packages""" color.cprint("") @@ -463,27 +636,16 @@ def print_virtuals(pkg, args): color.cprint(" None") -def print_licenses(pkg, args): - """Output the licenses of the project.""" - - color.cprint("") - color.cprint(section_title("Licenses: ")) +def info(parser: argparse.ArgumentParser, args: Namespace) -> None: + specs = spack.cmd.parse_specs(args.spec) + if len(specs) > 1: + tty.die(f"`spack info` requires exactly one spec. Parsed {len(specs)}") + if len(specs) == 0: + tty.die("`spack info` requires a spec.") - if len(pkg.licenses) == 0: - color.cprint(" None") - else: - pad = padder(pkg.licenses, 4) - for when_spec in pkg.licenses: - license_identifier = pkg.licenses[when_spec] - line = license(" {0}".format(pad(license_identifier))) + color.cescape( - str(when_spec) - ) - color.cprint(line) - - -def info(parser, args): - spec = spack.spec.Spec(args.package) + spec = specs[0] pkg_cls = spack.repo.PATH.get_pkg_class(spec.fullname) + pkg_cls.validate_variant_names(spec) pkg = pkg_cls(spec) # Output core package information @@ -498,7 +660,7 @@ def info(parser, args): color.cprint(" None") if getattr(pkg, "homepage"): - color.cprint(section_title("Homepage: ") + pkg.homepage) + color.cprint(section_title("Homepage: ") + str(pkg.homepage)) # Now output optional information in expected order sections = [ @@ -512,10 +674,12 @@ def info(parser, args): (args.all or not args.no_dependencies, print_dependencies), (args.all or args.virtuals, print_virtuals), (args.all or args.tests, print_tests), - (args.all or True, print_licenses), + (True, print_licenses), ] for print_it, func in sections: if print_it: func(pkg, args) + print_dependency_suggestion(pkg) + color.cprint("") diff --git a/lib/spack/spack/cmd/install.py b/lib/spack/spack/cmd/install.py index 11b4138bc19b0e..10a097f65d63fd 100644 --- a/lib/spack/spack/cmd/install.py +++ b/lib/spack/spack/cmd/install.py @@ -17,7 +17,7 @@ import spack.store from spack.cmd.common import arguments from spack.error import InstallError, SpackError -from spack.installer import PackageInstaller +from spack.installer import InstallPolicy from spack.llnl.string import plural from spack.llnl.util import tty @@ -26,14 +26,12 @@ level = "short" -# Determine value of cache flag -def cache_opt(default_opt, use_buildcache): - if use_buildcache == "auto": - return default_opt - elif use_buildcache == "only": - return True +def cache_opt(use_buildcache: str, default: InstallPolicy) -> InstallPolicy: + if use_buildcache == "only": + return "cache_only" elif use_buildcache == "never": - return False + return "source_only" + return default def install_kwargs_from_args(args): @@ -41,6 +39,12 @@ def install_kwargs_from_args(args): to the package installer. """ pkg_use_bc, dep_use_bc = args.use_buildcache + if args.cache_only: + default = "cache_only" + elif args.use_cache: + default = "auto" + else: + default = "source_only" return { "fail_fast": args.fail_fast, @@ -51,10 +55,8 @@ def install_kwargs_from_args(args): "verbose": args.verbose or args.install_verbose, "fake": args.fake, "dirty": args.dirty, - "package_use_cache": cache_opt(args.use_cache, pkg_use_bc), - "package_cache_only": cache_opt(args.cache_only, pkg_use_bc), - "dependencies_use_cache": cache_opt(args.use_cache, dep_use_bc), - "dependencies_cache_only": cache_opt(args.cache_only, dep_use_bc), + "root_policy": cache_opt(pkg_use_bc, default), + "dependencies_policy": cache_opt(dep_use_bc, default), "include_build_deps": args.include_build_deps, "stop_at": args.until, "unsigned": args.unsigned, @@ -384,9 +386,9 @@ def install_with_active_env(env: ev.Environment, args, install_kwargs, reporter) specs_to_install = env.all_matching_specs(*specs) if not specs_to_install: msg = ( - "Cannot install '{0}' because no matching specs are in the current environment." - " You can add specs to the environment with 'spack add {0}', or as part" - " of the install command with 'spack install --add {0}'" + "Cannot install '{0}' because no matching specs are in the current environment.\n" + " Specs can be added to the environment with 'spack add {0}',\n" + " or as part of the install command with 'spack install --add {0}'" ).format(" ".join(args.spec)) tty.die(msg) @@ -437,6 +439,11 @@ def install_without_active_env(args, install_kwargs, reporter): installs = [s.package for s in concrete_specs] install_kwargs["explicit"] = [s.dag_hash() for s in concrete_specs] + if spack.config.get("config:installer", "old") == "new": + from spack.new_installer import PackageInstaller + else: + from spack.installer import PackageInstaller + try: builder = PackageInstaller(installs, **install_kwargs) builder.install() diff --git a/lib/spack/spack/cmd/license.py b/lib/spack/spack/cmd/license.py index deba07f00074aa..09fa5ba7a31722 100644 --- a/lib/spack/spack/cmd/license.py +++ b/lib/spack/spack/cmd/license.py @@ -13,7 +13,7 @@ import spack.paths description = "list and check license headers on files in spack" -section = "developer" +section = "query" level = "long" #: SPDX license id must appear in the first lines of a file diff --git a/lib/spack/spack/cmd/list.py b/lib/spack/spack/cmd/list.py index 33d78162253fa9..eaa63f3ebeb02f 100644 --- a/lib/spack/spack/cmd/list.py +++ b/lib/spack/spack/cmd/list.py @@ -21,7 +21,7 @@ from spack.version import VersionList description = "list and search available packages" -section = "basic" +section = "query" level = "short" diff --git a/lib/spack/spack/cmd/load.py b/lib/spack/spack/cmd/load.py index c5cb3a1174f622..37f89617606523 100644 --- a/lib/spack/spack/cmd/load.py +++ b/lib/spack/spack/cmd/load.py @@ -71,7 +71,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: "--list", action="store_true", default=False, - help="show loaded packages: same as `spack find --loaded`", + help="show loaded packages: same as ``spack find --loaded``", ) diff --git a/lib/spack/spack/cmd/location.py b/lib/spack/spack/cmd/location.py index 39fa6ed411dd6f..9d23aa6d9427e9 100644 --- a/lib/spack/spack/cmd/location.py +++ b/lib/spack/spack/cmd/location.py @@ -15,7 +15,7 @@ from spack.cmd.common import arguments description = "print out locations of packages and spack directories" -section = "basic" +section = "query" level = "long" diff --git a/lib/spack/spack/cmd/log_parse.py b/lib/spack/spack/cmd/log_parse.py index 32bbc9ba042406..910908c246329d 100644 --- a/lib/spack/spack/cmd/log_parse.py +++ b/lib/spack/spack/cmd/log_parse.py @@ -9,7 +9,7 @@ from spack.util.log_parse import make_log_context, parse_log_events description = "filter errors and warnings from build logs" -section = "build" +section = "developer" level = "long" event_types = ("errors", "warnings") diff --git a/lib/spack/spack/cmd/logs.py b/lib/spack/spack/cmd/logs.py index 16f6b2e07dd941..94db8798bd3dec 100644 --- a/lib/spack/spack/cmd/logs.py +++ b/lib/spack/spack/cmd/logs.py @@ -14,10 +14,10 @@ import spack.spec import spack.util.compression as compression from spack.cmd.common import arguments -from spack.main import SpackCommandError +from spack.error import SpackError description = "print out logs for packages" -section = "basic" +section = "query" level = "long" @@ -39,19 +39,19 @@ def _logs(cmdline_spec: spack.spec.Spec, concrete_spec: spack.spec.Spec): # combined log file is only written after the build is finished. log_path = concrete_spec.package.log_path else: - raise SpackCommandError(f"{cmdline_spec} is not installed or staged") + raise SpackError(f"{cmdline_spec} is not installed or staged") try: stream = open(log_path, "rb") except OSError as e: if e.errno == errno.ENOENT: - raise SpackCommandError(f"No logs are available for {cmdline_spec}") from e - raise SpackCommandError(f"Error reading logs for {cmdline_spec}: {e}") from e + raise SpackError(f"No logs are available for {cmdline_spec}") from e + raise SpackError(f"Error reading logs for {cmdline_spec}: {e}") from e with stream as f: ext = compression.extension_from_magic_numbers_by_stream(f, decompress=False) if ext and ext != "gz": - raise SpackCommandError(f"Unsupported storage format for {log_path}: {ext}") + raise SpackError(f"Unsupported storage format for {log_path}: {ext}") # If the log file is gzip compressed, wrap it with a decompressor _dump_byte_stream_to_stdout(gzip.GzipFile(fileobj=f) if ext == "gz" else f) @@ -61,10 +61,10 @@ def logs(parser, args): specs = spack.cmd.parse_specs(args.spec) if not specs: - raise SpackCommandError("You must supply a spec.") + raise SpackError("You must supply a spec.") if len(specs) != 1: - raise SpackCommandError("Too many specs. Supply only one.") + raise SpackError("Too many specs. Supply only one.") concrete_spec = spack.cmd.matching_spec_from_env(specs[0]) diff --git a/lib/spack/spack/cmd/maintainers.py b/lib/spack/spack/cmd/maintainers.py index d43f6c3d7fc94d..7d6afa4b891e54 100644 --- a/lib/spack/spack/cmd/maintainers.py +++ b/lib/spack/spack/cmd/maintainers.py @@ -11,7 +11,7 @@ from spack.llnl.util.tty.colify import colify description = "get information about package maintainers" -section = "developer" +section = "query" level = "long" diff --git a/lib/spack/spack/cmd/mark.py b/lib/spack/spack/cmd/mark.py index f58f82768f270d..726ec47122ef12 100644 --- a/lib/spack/spack/cmd/mark.py +++ b/lib/spack/spack/cmd/mark.py @@ -4,8 +4,10 @@ import argparse import sys +from typing import List, Union import spack.cmd +import spack.spec import spack.store from spack.cmd.common import arguments from spack.llnl.util import tty @@ -13,7 +15,7 @@ from ..enums import InstallRecordStatus description = "mark packages as explicitly or implicitly installed" -section = "admin" +section = "build" level = "long" error_message = """You can either: @@ -51,16 +53,14 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: ) -def find_matching_specs(specs, allow_multiple_matches=False): - """Returns a list of specs matching the not necessarily - concretized specs given from cli +def find_matching_specs( + specs: List[Union[str, spack.spec.Spec]], allow_multiple_matches: bool = False +) -> List[spack.spec.Spec]: + """Returns a list of specs matching the not necessarily concretized specs given from cli Args: - specs (list): list of specs to be matched against installed packages - allow_multiple_matches (bool): if True multiple matches are admitted - - Return: - list of specs + specs: list of specs to be matched against installed packages + allow_multiple_matches: if True multiple matches are admitted """ # List of specs that match expressions given via command line specs_from_cli = [] diff --git a/lib/spack/spack/cmd/mirror.py b/lib/spack/spack/cmd/mirror.py index 394518a70c6141..13c9ec8659697f 100644 --- a/lib/spack/spack/cmd/mirror.py +++ b/lib/spack/spack/cmd/mirror.py @@ -4,6 +4,7 @@ import argparse import sys +from concurrent.futures import as_completed import spack.caches import spack.cmd @@ -17,9 +18,11 @@ import spack.mirrors.utils import spack.repo import spack.spec +import spack.util.parallel import spack.util.web as web_util from spack.cmd.common import arguments from spack.error import SpackError +from spack.llnl.string import comma_or description = "manage mirrors (source and binary)" section = "config" @@ -36,7 +39,6 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: create_parser.add_argument( "-d", "--directory", default=None, help="directory in which to create mirror" ) - create_parser.add_argument( "-a", "--all", @@ -45,6 +47,13 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: " in the current environment if there is an active environment" " (this requires significant time and space)", ) + create_parser.add_argument( + "-j", + "--jobs", + type=int, + default=None, + help="Use a given number of workers to make the mirror (used in combination with -a)", + ) create_parser.add_argument("--file", help="file with specs of packages to put in mirror") create_parser.add_argument( "--exclude-file", @@ -109,7 +118,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: choices=("binary", "source"), help=( "specify the mirror type: for both binary " - "and source use `--type binary --type source` (default)" + "and source use ``--type binary --type source`` (default)" ), ) add_parser.add_argument( @@ -134,17 +143,26 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: default=None, dest="signed", ) + add_parser.add_argument( + "--name", + "-n", + action="store", + dest="view_name", + help="Name of the index view for a binary mirror", + ) arguments.add_connection_args(add_parser, False) # Remove remove_parser = sp.add_parser("remove", aliases=["rm"], help=mirror_remove.__doc__) remove_parser.add_argument("name", help="mnemonic name for mirror", metavar="mirror") remove_parser.add_argument( - "--scope", - action=arguments.ConfigScope, - default=lambda: spack.config.default_modify_scope(), - help="configuration scope to modify", + "--scope", action=arguments.ConfigScope, default=None, help="configuration scope to modify" + ) + remove_parser.add_argument( + "--all-scopes", + action="store_true", + default=False, + help="remove from all config scopes (default: highest scope with matching mirror)", ) - # Set-Url set_url_parser = sp.add_parser("set-url", help=mirror_set_url.__doc__) set_url_parser.add_argument("name", help="mnemonic name for mirror", metavar="mirror") @@ -180,7 +198,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: choices=("binary", "source"), help=( "specify the mirror type: for both binary " - "and source use `--type binary --type source`" + "and source use ``--type binary --type source``" ), ) set_parser.add_argument("--url", help="url of mirror directory from 'spack mirror create'") @@ -231,18 +249,15 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: ) -def _configure_access_pair( - args, id_tok, id_variable_tok, secret_tok, secret_variable_tok, default=None -): +def _configure_access_pair(args, id_tok, id_variable_tok, secret_variable_tok, default=None): """Configure the access_pair options""" # Check if any of the arguments are set to update this access_pair. # If none are set, then skip computing the new access pair args_id = getattr(args, id_tok) args_id_variable = getattr(args, id_variable_tok) - args_secret = getattr(args, secret_tok) args_secret_variable = getattr(args, secret_variable_tok) - if not any([args_id, args_id_variable, args_secret, args_secret_variable]): + if not any([args_id, args_id_variable, args_secret_variable]): return None def _default_value(id_): @@ -261,7 +276,6 @@ def _default_variable(id_): id_ = None id_variable = None - secret = None secret_variable = None # Get the value/default value if the argument of the inverse @@ -269,31 +283,22 @@ def _default_variable(id_): id_ = getattr(args, id_tok) or _default_value("id") if not args_id: id_variable = getattr(args, id_variable_tok) or _default_variable("id") - if not args_secret_variable: - secret = getattr(args, secret_tok) or _default_value("secret") - if not args_secret: - secret_variable = getattr(args, secret_variable_tok) or _default_variable("secret") - - if (id_ or id_variable) and (secret or secret_variable): - if secret: - if not id_: - raise SpackError("Cannot add mirror with a variable id and text secret") - - return [id_, secret] - else: - return dict( - [ - (("id", id_) if id_ else ("id_variable", id_variable)), - ("secret_variable", secret_variable), - ] - ) + secret_variable = getattr(args, secret_variable_tok) or _default_variable("secret") + + if (id_ or id_variable) and secret_variable: + return dict( + [ + (("id", id_) if id_ else ("id_variable", id_variable)), + ("secret_variable", secret_variable), + ] + ) else: - if id_ or id_variable or secret or secret_variable is not None: + if id_ or id_variable or secret_variable is not None: id_arg_tok = id_tok.replace("_", "-") - secret_arg_tok = secret_tok.replace("_", "-") + secret_variable_arg_tok = secret_variable_tok.replace("_", "-") tty.warn( "Expected both parts of the access pair to be specified. " - f"(i.e. --{id_arg_tok} and --{secret_arg_tok})" + f"(i.e. --{id_arg_tok} and --{secret_variable_arg_tok})" ) return None @@ -303,16 +308,14 @@ def mirror_add(args): """add a mirror to Spack""" if ( args.s3_access_key_id - or args.s3_access_key_secret - or args.s3_access_token or args.s3_access_key_id_variable or args.s3_access_key_secret_variable or args.s3_access_token_variable or args.s3_profile or args.s3_endpoint_url + or args.view_name or args.type or args.oci_username - or args.oci_password or args.oci_username_variable or args.oci_password_variable or args.autopush @@ -320,29 +323,13 @@ def mirror_add(args): ): connection = {"url": args.url} # S3 Connection - if args.s3_access_key_secret: - tty.warn( - "Configuring mirror secrets as plain text with --s3-access-key-secret is " - "deprecated. Use --s3-access-key-secret-variable instead" - ) - if args.oci_password: - tty.warn( - "Configuring mirror secrets as plain text with --oci-password is deprecated. " - "Use --oci-password-variable instead" - ) access_pair = _configure_access_pair( - args, - "s3_access_key_id", - "s3_access_key_id_variable", - "s3_access_key_secret", - "s3_access_key_secret_variable", + args, "s3_access_key_id", "s3_access_key_id_variable", "s3_access_key_secret_variable" ) if access_pair: connection["access_pair"] = access_pair - if args.s3_access_token: - connection["access_token"] = args.s3_access_token - elif args.s3_access_token_variable: + if args.s3_access_token_variable: connection["access_token_variable"] = args.s3_access_token_variable if args.s3_profile: @@ -353,7 +340,7 @@ def mirror_add(args): # OCI Connection access_pair = _configure_access_pair( - args, "oci_username", "oci_username_variable", "oci_password", "oci_password_variable" + args, "oci_username", "oci_username_variable", "oci_password_variable" ) if access_pair: connection["access_pair"] = access_pair @@ -365,6 +352,9 @@ def mirror_add(args): connection["autopush"] = args.autopush if args.signed is not None: connection["signed"] = args.signed + if args.view_name: + connection["view"] = args.view_name + mirror = spack.mirrors.mirror.Mirror(connection, name=args.name) else: mirror = spack.mirrors.mirror.Mirror(args.url, name=args.name) @@ -373,7 +363,21 @@ def mirror_add(args): def mirror_remove(args): """remove a mirror by name""" - spack.mirrors.utils.remove(args.name, args.scope) + name = args.name + scopes = [args.scope] if args.scope else list(spack.config.CONFIG.scopes.keys()) + + removed = False + for scope in scopes: + removed_from_this_scope = spack.mirrors.utils.remove(name, scope) + if removed_from_this_scope: + tty.msg(f"Removed mirror {name} from {scope} scope") + + removed |= removed_from_this_scope + if removed and not args.all_scopes: + return + + if not removed: + tty.die(f"No mirror with name {name} in {comma_or(scopes)} scope") def _configure_mirror(args): @@ -394,14 +398,13 @@ def _configure_mirror(args): args, "s3_access_key_id", "s3_access_key_id_variable", - "s3_access_key_secret", "s3_access_key_secret_variable", default=default_access_pair, ) if access_pair: changes["access_pair"] = access_pair - if args.s3_access_token: - changes["access_token"] = args.s3_access_token + if getattr(args, "s3_access_token_variable", None): + changes["access_token_variable"] = args.s3_access_token_variable if args.s3_profile: changes["profile"] = args.s3_profile if args.s3_endpoint_url: @@ -410,7 +413,6 @@ def _configure_mirror(args): args, "oci_username", "oci_username_variable", - "oci_password", "oci_password_variable", default=default_access_pair, ) @@ -632,45 +634,89 @@ def mirror_create(args): # When no directory is provided, the source dir is used path = args.directory or spack.caches.fetch_cache_location() - mirror_specs, mirror_fn = _specs_and_action(args) - mirror_fn(mirror_specs, path=path, skip_unstable_versions=args.skip_unstable_versions) + mirror_specs = _specs_to_mirror(args) + workers = args.jobs + if workers is None: + if args.all: + workers = min( + 16, spack.config.determine_number_of_jobs(parallel=True), len(mirror_specs) + ) + else: + workers = 1 + + create_mirror_for_all_specs( + mirror_specs, + path=path, + skip_unstable_versions=args.skip_unstable_versions, + workers=workers, + ) -def _specs_and_action(args): +def _specs_to_mirror(args): include_fn = IncludeFilter(args) if args.all and not ev.active_environment(): mirror_specs = all_specs_with_all_versions() - mirror_fn = create_mirror_for_all_specs elif args.all and ev.active_environment(): mirror_specs = concrete_specs_from_environment() - mirror_fn = create_mirror_for_individual_specs else: mirror_specs = concrete_specs_from_user(args) - mirror_fn = create_mirror_for_individual_specs mirror_specs, _ = lang.stable_partition(mirror_specs, predicate_fn=include_fn) - return mirror_specs, mirror_fn + return mirror_specs + +def create_mirror_for_one_spec(candidate, mirror_cache): + pkg_cls = spack.repo.PATH.get_pkg_class(candidate.name) + pkg_obj = pkg_cls(spack.spec.Spec(candidate)) + mirror_stats = spack.mirrors.utils.MirrorStatsForOneSpec(candidate) + spack.mirrors.utils.create_mirror_from_package_object(pkg_obj, mirror_cache, mirror_stats) + mirror_stats.finalize() + return mirror_stats -def create_mirror_for_all_specs(mirror_specs, path, skip_unstable_versions): - mirror_cache, mirror_stats = spack.mirrors.utils.mirror_cache_and_stats( + +def create_mirror_for_all_specs(mirror_specs, path, skip_unstable_versions, workers): + mirror_cache = spack.mirrors.utils.get_mirror_cache( path, skip_unstable_versions=skip_unstable_versions ) - for candidate in mirror_specs: - pkg_cls = spack.repo.PATH.get_pkg_class(candidate.name) - pkg_obj = pkg_cls(spack.spec.Spec(candidate)) - mirror_stats.next_spec(pkg_obj.spec) - spack.mirrors.utils.create_mirror_from_package_object(pkg_obj, mirror_cache, mirror_stats) + mirror_stats = spack.mirrors.utils.MirrorStatsForAllSpecs() + with spack.util.parallel.make_concurrent_executor(jobs=workers) as executor: + # Submit tasks to the process pool + futures = [ + executor.submit(create_mirror_for_one_spec, candidate, mirror_cache) + for candidate in mirror_specs + ] + for mirror_future in as_completed(futures): + ext_mirror_stats = mirror_future.result() + mirror_stats.merge(ext_mirror_stats) + process_mirror_stats(*mirror_stats.stats()) + return mirror_stats -def create_mirror_for_individual_specs(mirror_specs, path, skip_unstable_versions): - present, mirrored, error = spack.mirrors.utils.create( - path, mirror_specs, skip_unstable_versions - ) - tty.msg("Summary for mirror in {}".format(path)) - process_mirror_stats(present, mirrored, error) +def create(path, specs, skip_unstable_versions=False): + """Create a directory to be used as a spack mirror, and fill it with + package archives. + + Arguments: + path: Path to create a mirror directory hierarchy in. + specs: Any package versions matching these specs will be added \ + to the mirror. + skip_unstable_versions: if true, this skips adding resources when + they do not have a stable archive checksum (as determined by + ``fetch_strategy.stable_target``) + + Returns: + A tuple of lists, each containing specs + + * present: Package specs that were already present. + * mirrored: Package specs that were successfully mirrored. + * error: Package specs that failed to mirror due to some error. + """ + # automatically spec-ify anything in the specs array. + specs = [s if isinstance(s, spack.spec.Spec) else spack.spec.Spec(s) for s in specs] + mirror_stats = create_mirror_for_all_specs(specs, path, skip_unstable_versions, workers=1) + return mirror_stats.stats() def mirror_destroy(args): diff --git a/lib/spack/spack/cmd/modules/__init__.py b/lib/spack/spack/cmd/modules/__init__.py index 7ae1125f576e2a..4b2454a3b09d89 100644 --- a/lib/spack/spack/cmd/modules/__init__.py +++ b/lib/spack/spack/cmd/modules/__init__.py @@ -356,9 +356,9 @@ def refresh(module_type, specs, args): #: Dictionary populated with the list of sub-commands. #: Each sub-command must be callable and accept 3 arguments: #: -#: - module_type: the type of module it refers to -#: - specs : the list of specs to be processed -#: - args : namespace containing the parsed command line arguments +#: - module_type: the type of module it refers to +#: - specs : the list of specs to be processed +#: - args : namespace containing the parsed command line arguments callbacks = {"refresh": refresh, "rm": rm, "find": find, "loads": loads} @@ -384,7 +384,9 @@ def modules_cmd(parser, args, module_type, callbacks=callbacks): for s in specs: spec_fmt = ( "{hash:7} {name}{@version}{compiler_flags}{variants}" - "{arch=architecture} {%compiler}" + "{ platform=architecture.platform}{ os=architecture.os}" + "{ target=architecture.target}" + "{%compiler}" ) msg += "\t" + s.cformat(spec_fmt) + "\n" tty.die(msg, "In this context exactly *one* match is needed.") diff --git a/lib/spack/spack/cmd/patch.py b/lib/spack/spack/cmd/patch.py index b3af42d87fb18d..8f6522560f83bb 100644 --- a/lib/spack/spack/cmd/patch.py +++ b/lib/spack/spack/cmd/patch.py @@ -12,7 +12,7 @@ import spack.traverse from spack.cmd.common import arguments -description = "patch expanded archive sources in preparation for install" +description = "patch expanded sources in preparation for install" section = "build" level = "long" diff --git a/lib/spack/spack/cmd/pkg.py b/lib/spack/spack/cmd/pkg.py index e9bac8fef6d917..82a8eb40849553 100644 --- a/lib/spack/spack/cmd/pkg.py +++ b/lib/spack/spack/cmd/pkg.py @@ -86,7 +86,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: def pkg_add(args): - """add a package to the git stage with `git add`""" + """add a package to the git stage with ``git add``""" spack.repo.add_package_to_git_stage(args.packages, spack.repo.builtin_repo()) @@ -168,7 +168,8 @@ def pkg_hash(args): def get_grep(required=False): """Get a grep command to use with ``spack pkg grep``.""" grep = exe.which(os.environ.get("SPACK_GREP") or "grep", required=required) - grep.ignore_quotes = True # allow `spack pkg grep '"quoted string"'` without warning + if grep: + grep.ignore_quotes = True # allow `spack pkg grep '"quoted string"'` without warning return grep diff --git a/lib/spack/spack/cmd/providers.py b/lib/spack/spack/cmd/providers.py index b1a7d764cbfad7..8a358db6f4c6b8 100644 --- a/lib/spack/spack/cmd/providers.py +++ b/lib/spack/spack/cmd/providers.py @@ -11,7 +11,7 @@ import spack.repo description = "list packages that provide a particular virtual package" -section = "basic" +section = "query" level = "long" diff --git a/lib/spack/spack/cmd/python.py b/lib/spack/spack/cmd/python.py index 3b87e6c7e76e64..1f99f0b9b199dc 100644 --- a/lib/spack/spack/cmd/python.py +++ b/lib/spack/spack/cmd/python.py @@ -17,6 +17,8 @@ section = "developer" level = "long" +IS_WINDOWS = sys.platform == "win32" + def setup_parser(subparser: argparse.ArgumentParser) -> None: subparser.add_argument( @@ -134,12 +136,14 @@ def python_interpreter(args): propagate_exceptions_from(console) console.runsource(args.python_command) else: - # Provides readline support, allowing user to use arrow keys - console.push("import readline") - # Provide tabcompletion - console.push("from rlcompleter import Completer") - console.push("readline.set_completer(Completer(locals()).complete)") - console.push('readline.parse_and_bind("tab: complete")') + # no readline module on Windows + if not IS_WINDOWS: + # Provides readline support, allowing user to use arrow keys + console.push("import readline") + # Provide tabcompletion + console.push("from rlcompleter import Completer") + console.push("readline.set_completer(Completer(locals()).complete)") + console.push('readline.parse_and_bind("tab: complete")') console.interact( "Spack version %s\nPython %s, %s %s" diff --git a/lib/spack/spack/cmd/repo.py b/lib/spack/spack/cmd/repo.py index 3191691cab8f4e..6525de9951a8f4 100644 --- a/lib/spack/spack/cmd/repo.py +++ b/lib/spack/spack/cmd/repo.py @@ -48,7 +48,10 @@ def setup_parser(subparser: argparse.ArgumentParser): # List list_parser = sp.add_parser("list", aliases=["ls"], help=repo_list.__doc__) list_parser.add_argument( - "--scope", action=arguments.ConfigScope, help="configuration scope to read from" + "--scope", + action=arguments.ConfigScope, + type=arguments.config_scope_readable_validator, + help="configuration scope to read from", ) output_group = list_parser.add_mutually_exclusive_group() output_group.add_argument("--names", action="store_true", help="show configuration names only") @@ -113,10 +116,13 @@ def setup_parser(subparser: argparse.ArgumentParser): "namespace_or_path", help="namespace or path of a Spack package repository" ) remove_parser.add_argument( - "--scope", - action=arguments.ConfigScope, - default=lambda: spack.config.default_modify_scope(), - help="configuration scope to modify", + "--scope", action=arguments.ConfigScope, default=None, help="configuration scope to modify" + ) + remove_parser.add_argument( + "--all-scopes", + action="store_true", + default=False, + help="remove from all config scopes (default: highest scope with matching repo)", ) # Migrate @@ -252,8 +258,18 @@ def repo_add(args): def repo_remove(args): """remove a repository from Spack's configuration""" - namespace_or_path = args.namespace_or_path - repos: Dict[str, str] = spack.config.get("repos", scope=args.scope) + scopes = [args.scope] if args.scope else list(spack.config.CONFIG.scopes.keys()) + found_and_removed = False + for scope in scopes: + found_and_removed |= _remove_repo(args.namespace_or_path, scope) + if found_and_removed and not args.all_scopes: + return + if not found_and_removed: + tty.die(f"No repository with path or namespace: {args.namespace_or_path}") + + +def _remove_repo(namespace_or_path, scope): + repos: Dict[str, str] = spack.config.get("repos", scope=scope) if namespace_or_path in repos: # delete by name (from config) @@ -262,7 +278,7 @@ def repo_remove(args): # delete by namespace or path (requires constructing the repo) canon_path = spack.util.path.canonicalize_path(namespace_or_path) descriptors = spack.repo.RepoDescriptors.from_config( - spack.repo.package_repository_lock(), spack.config.CONFIG, scope=args.scope + spack.repo.package_repository_lock(), spack.config.CONFIG, scope=scope ) for name, descriptor in descriptors.items(): descriptor.initialize(fetch=False) @@ -277,11 +293,12 @@ def repo_remove(args): key = name break else: - tty.die(f"No repository with path or namespace: {namespace_or_path}") + return False del repos[key] - spack.config.set("repos", repos, args.scope) - tty.msg(f"Removed repository '{namespace_or_path}'.") + spack.config.set("repos", repos, scope) + tty.msg(f"Removed repository '{namespace_or_path}' from scope '{scope}'.") + return True def repo_list(args): @@ -322,8 +339,9 @@ def repo_list(args): # Print aligned output for status, namespace, api, path in repo_info: + cpath = color.cescape(path) color.cprint( - f"{status} {namespace:<{max_namespace_width}} {api:<{max_api_width}} {path}" + f"{status} {namespace:<{max_namespace_width}} {api:<{max_api_width}} {cpath}" ) diff --git a/lib/spack/spack/cmd/resource.py b/lib/spack/spack/cmd/resource.py index b25bb040d9f32f..1792b7e3e9f99b 100644 --- a/lib/spack/spack/cmd/resource.py +++ b/lib/spack/spack/cmd/resource.py @@ -9,8 +9,8 @@ import spack.llnl.util.tty.color as color import spack.repo -description = "list downloadable resources (tarballs, repos, patches, etc.)" -section = "basic" +description = "list downloadable resources (tarballs, repos, patches)" +section = "query" level = "long" diff --git a/lib/spack/spack/cmd/solve.py b/lib/spack/spack/cmd/solve.py index 03016fe07e58b6..4063f1c64ea109 100644 --- a/lib/spack/spack/cmd/solve.py +++ b/lib/spack/spack/cmd/solve.py @@ -14,6 +14,7 @@ import spack.hash_types as ht import spack.llnl.util.tty as tty import spack.llnl.util.tty.color as color +import spack.package_base import spack.solver.asp as asp import spack.spec @@ -55,22 +56,34 @@ def _process_result(result, show, required_format, kwargs): opt, _, _ = min(result.answers) if ("opt" in show) and (not required_format): tty.msg("Best of %d considered solutions." % result.nmodels) - tty.msg("Optimization Criteria:") - - maxlen = max(len(s[2]) for s in result.criteria) - color.cprint("@*{ Priority Criterion %sInstalled ToBuild}" % ((maxlen - 10) * " ")) - - fmt = " @K{%%-8d} %%-%ds%%9s %%7s" % maxlen - for i, (installed_cost, build_cost, name) in enumerate(result.criteria, 1): - color.cprint( - fmt - % ( - i, - name, - "-" if build_cost is None else installed_cost, - installed_cost if build_cost is None else build_cost, - ) - ) + + print() + maxlen = max(len(s.name) for s in result.criteria) + color.cprint("@*{ Priority Value Criterion}") + + for i, criterion in enumerate(result.criteria, 1): + value = f"@K{{{criterion.value:>5}}}" + grey_out = True + if criterion.value > 0: + value = f"@*{{{criterion.value:>5}}}" + grey_out = False + + if grey_out: + lc = "@K" + elif criterion.kind == asp.OptimizationKind.CONCRETE: + lc = "@b" + elif criterion.kind == asp.OptimizationKind.BUILD: + lc = "@g" + else: + lc = "@y" + + color.cprint(f" @K{{{i:8}}} {value} {lc}{{{criterion.name:<{maxlen}}}}") + print() + print() + color.cprint(" @*{Legend:}") + color.cprint(" @g{Specs to be built}") + color.cprint(" @b{Reused specs}") + color.cprint(" @y{Other criteria}") print() # dump the solutions as concretized specs @@ -106,6 +119,12 @@ def solve(parser, args): "show_types": args.types, "status_fn": install_status_fn if args.install_status else None, "hashes": args.long or args.very_long, + "highlight_version_fn": ( + spack.package_base.non_preferred_version if args.non_defaults else None + ), + "highlight_variant_fn": ( + spack.package_base.non_default_variant if args.non_defaults else None + ), } # process output options diff --git a/lib/spack/spack/cmd/spec.py b/lib/spack/spack/cmd/spec.py index 7cb41101931b59..aeedb3e37a3b78 100644 --- a/lib/spack/spack/cmd/spec.py +++ b/lib/spack/spack/cmd/spec.py @@ -11,6 +11,7 @@ import spack.hash_types as ht import spack.llnl.util.lang as lang import spack.llnl.util.tty as tty +import spack.package_base import spack.spec import spack.store import spack.traverse @@ -59,6 +60,8 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: default=None, help="print concrete spec with the specified format string", ) + arguments.add_common_arguments(format_group, ["show_non_defaults"]) + subparser.add_argument( "-c", "--cover", @@ -120,5 +123,11 @@ def spec(parser, args): status_fn=install_status_fn if args.install_status else None, hashes=args.long or args.very_long, key=spack.traverse.by_dag_hash, + highlight_version_fn=( + spack.package_base.non_preferred_version if args.non_defaults else None + ), + highlight_variant_fn=( + spack.package_base.non_default_variant if args.non_defaults else None + ), ) ) diff --git a/lib/spack/spack/cmd/style.py b/lib/spack/spack/cmd/style.py index 7f5e3de36ee50e..49111afc1d145f 100644 --- a/lib/spack/spack/cmd/style.py +++ b/lib/spack/spack/cmd/style.py @@ -7,8 +7,8 @@ import re import sys import warnings -from itertools import islice, zip_longest -from typing import Callable, Dict, List, Optional +from itertools import zip_longest +from typing import Callable, Dict, List, Optional, Set import spack.llnl.util.tty as tty import spack.llnl.util.tty.color as color @@ -38,7 +38,7 @@ def grouper(iterable, n, fillvalue=None): exclude_paths = [os.path.relpath(spack.paths.vendor_path, spack.paths.prefix)] #: Order in which tools should be run. flake8 is last so that it can -#: double-check the results of other tools (if, e.g., --fix was provided) +#: double-check the results of other tools (if, e.g., ``--fix`` was provided) #: The list maps an executable name to a method to ensure the tool is #: bootstrapped or present in the environment. tool_names = ["import", "isort", "black", "flake8", "mypy"] @@ -55,7 +55,7 @@ def is_package(f): """Whether flake8 should consider a file as a core file or a package. We run flake8 with different exceptions for the core and for - packages, since we allow `from spack.package import *` and poking globals + packages, since we allow ``from spack.package import *`` and poking globals into packages. """ return f.startswith("var/spack/") and f.endswith("package.py") @@ -205,7 +205,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: "--spec-strings", action="store_true", help="upgrade spec strings in Python, JSON and YAML files for compatibility with Spack " - "v1.0 and v0.x. Example: spack style --spec-strings $(git ls-files). Note: must be " + "v1.0 and v0.x. Example: spack style ``--spec-strings $(git ls-files)``. Note: must be " "used only on specs from spack v0.X.", ) @@ -401,15 +401,11 @@ def _run_import_check( is_use = re.compile(r"(? None: """Walk the AST of a Python file and apply handler to formatted spec strings.""" - has_constant = sys.version_info >= (3, 8) for node in ast.walk(tree): - if has_constant and isinstance(node, ast.Constant) and isinstance(node.value, str): - current_str = node.value - elif not has_constant and isinstance(node, ast.Str): + if sys.version_info >= (3, 8): + if isinstance(node, ast.Constant) and isinstance(node.value, str): + current_str = node.value + else: + continue + elif isinstance(node, ast.Str): current_str = node.s else: continue diff --git a/lib/spack/spack/cmd/tags.py b/lib/spack/spack/cmd/tags.py index 3ac74da9719447..39e6166c18a369 100644 --- a/lib/spack/spack/cmd/tags.py +++ b/lib/spack/spack/cmd/tags.py @@ -4,16 +4,16 @@ import argparse import io import sys +from typing import Dict, Iterable, List import spack.environment import spack.llnl.string import spack.llnl.util.tty as tty import spack.llnl.util.tty.colify as colify import spack.repo -import spack.tag description = "show package tags and associated packages" -section = "basic" +section = "query" level = "long" @@ -68,7 +68,7 @@ def tags(parser, args): return # unique list of available tags - available_tags = sorted(spack.repo.PATH.tag_index.keys()) + available_tags = sorted(spack.repo.PATH.tag_index.tags) if not available_tags: tty.msg("No tagged packages") return @@ -80,7 +80,7 @@ def tags(parser, args): if not args.installed: report_tags("available", available_tags) else: - tag_pkgs = spack.tag.packages_with_tags(available_tags, True, True) + tag_pkgs = packages_with_tags(available_tags, True, True) tags = tag_pkgs.keys() if tag_pkgs else [] report_tags("installed", tags) return @@ -90,7 +90,7 @@ def tags(parser, args): isatty = sys.stdout.isatty() tags = args.tag if args.tag else available_tags - tag_pkgs = spack.tag.packages_with_tags(tags, args.installed, False) + tag_pkgs = packages_with_tags(tags, args.installed, False) missing = "No installed packages" if args.installed else "None" for tag in sorted(tag_pkgs): # TODO: Remove the sorting once we're sure noone has an old @@ -105,3 +105,29 @@ def tags(parser, args): buffer.write(" {0}\n".format(missing)) buffer.write("\n") print(buffer.getvalue()) + + +def packages_with_tags( + tags: Iterable[str], installed: bool, skip_empty: bool +) -> Dict[str, List[str]]: + """ + Returns a dict, indexed by tag, containing lists of names of packages + containing the tag or, if no tags, for all available tags. + + Arguments: + tags: list of tags of interest or None for all + installed: True if want names of packages that are installed; + otherwise, False if want all packages with the tag + skip_empty: True if exclude tags with no associated packages; + otherwise, False if want entries for all tags even when no such + tagged packages + """ + tag_pkgs: Dict[str, List[str]] = {} + name_filter = {x.name for x in spack.environment.installed_specs()} if installed else None + for tag in tags: + packages = spack.repo.PATH.tag_index.get_packages(tag) + if name_filter is not None: + packages = [p for p in packages if p in name_filter] + if packages or not skip_empty: + tag_pkgs[tag] = packages + return tag_pkgs diff --git a/lib/spack/spack/cmd/test.py b/lib/spack/spack/cmd/test.py index d63bcc658e271f..3b2085e68a89a9 100644 --- a/lib/spack/spack/cmd/test.py +++ b/lib/spack/spack/cmd/test.py @@ -20,8 +20,10 @@ from spack.llnl.util import tty from spack.llnl.util.tty import colify +from . import doc_dedented, doc_first_line + description = "run spack's tests for an install" -section = "admin" +section = "build" level = "long" @@ -30,7 +32,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: # Run run_parser = sp.add_parser( - "run", description=test_run.__doc__, help=spack.cmd.first_line(test_run.__doc__) + "run", description=doc_dedented(test_run), help=doc_first_line(test_run) ) run_parser.add_argument( @@ -77,7 +79,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: # List list_parser = sp.add_parser( - "list", description=test_list.__doc__, help=spack.cmd.first_line(test_list.__doc__) + "list", description=doc_dedented(test_list), help=doc_first_line(test_list) ) list_parser.add_argument( "-a", @@ -91,7 +93,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: # Find find_parser = sp.add_parser( - "find", description=test_find.__doc__, help=spack.cmd.first_line(test_find.__doc__) + "find", description=doc_dedented(test_find), help=doc_first_line(test_find) ) find_parser.add_argument( "filter", @@ -101,7 +103,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: # Status status_parser = sp.add_parser( - "status", description=test_status.__doc__, help=spack.cmd.first_line(test_status.__doc__) + "status", description=doc_dedented(test_status), help=doc_first_line(test_status) ) status_parser.add_argument( "names", nargs=argparse.REMAINDER, help="test suites for which to print status" @@ -109,9 +111,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: # Results results_parser = sp.add_parser( - "results", - description=test_results.__doc__, - help=spack.cmd.first_line(test_results.__doc__), + "results", description=doc_dedented(test_results), help=doc_first_line(test_results) ) results_parser.add_argument( "-l", "--logs", action="store_true", help="print the test log for each matching package" @@ -138,7 +138,7 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: # Remove remove_parser = sp.add_parser( - "remove", description=test_remove.__doc__, help=spack.cmd.first_line(test_remove.__doc__) + "remove", description=doc_dedented(test_remove), help=doc_first_line(test_remove) ) arguments.add_common_arguments(remove_parser, ["yes_to_all"]) remove_parser.add_argument( @@ -147,7 +147,8 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: def test_run(args): - """run tests for the specified installed packages + """\ + run tests for the specified installed packages if no specs are listed, run tests for all packages in the current environment or all installed packages if there is no active environment @@ -253,7 +254,8 @@ def has_test_and_tags(pkg_class): def test_find(args): # TODO: merge with status (noargs) - """find tests that are running or have available results + """\ + find tests that are running or have available results displays aliases for tests that have them, otherwise test suite content hashes """ @@ -404,12 +406,13 @@ def test_results(args): def test_remove(args): - """remove results from Spack test suite(s) (default all) + """\ + remove results from Spack test suite(s) (default all) if no test suite is listed, remove results for all suites. removed tests can no longer be accessed for results or status, and will not - appear in `spack test list` results + appear in ``spack test list`` results """ if args.names: test_suites = [] diff --git a/lib/spack/spack/cmd/test_env.py b/lib/spack/spack/cmd/test_env.py index fcf2ec96adbe20..13a2773b57397b 100644 --- a/lib/spack/spack/cmd/test_env.py +++ b/lib/spack/spack/cmd/test_env.py @@ -6,9 +6,9 @@ from spack.context import Context description = ( - "run a command in a spec's test environment, or dump its environment to screen or file" + "run a command in a spec's test environment,\nor dump its environment to screen or file" ) -section = "admin" +section = "developer" level = "long" setup_parser = env_utility.setup_parser diff --git a/lib/spack/spack/cmd/tutorial.py b/lib/spack/spack/cmd/tutorial.py index faef4a59d1e9ab..afa2463ad63285 100644 --- a/lib/spack/spack/cmd/tutorial.py +++ b/lib/spack/spack/cmd/tutorial.py @@ -23,7 +23,7 @@ # tutorial configuration parameters -tutorial_branch = "releases/v1.0" +tutorial_branch = "releases/v1.1" tutorial_mirror = "file:///mirror" tutorial_key = os.path.join(spack.paths.share_path, "keys", "tutorial.pub") diff --git a/lib/spack/spack/cmd/undevelop.py b/lib/spack/spack/cmd/undevelop.py index 99446a50e6196e..93b900b857430f 100644 --- a/lib/spack/spack/cmd/undevelop.py +++ b/lib/spack/spack/cmd/undevelop.py @@ -15,6 +15,16 @@ def setup_parser(subparser: argparse.ArgumentParser) -> None: + subparser.add_argument( + "--no-modify-concrete-specs", + action="store_false", + dest="apply_changes", + help=( + "do not mutate concrete specs to remove dev_path provenance." + " This requires running `spack concretize -f` later to apply changes to concrete specs" + ), + ) + subparser.add_argument( "-a", "--all", action="store_true", help="remove all specs from (clear) the environment" ) @@ -51,6 +61,9 @@ def undevelop(parser, args): env = spack.cmd.require_active_env(cmd_name="undevelop") with env.write_transaction(): _update_config(remove_specs, remove_all) + if args.apply_changes: + for spec in remove_specs: + env.apply_develop(spec, path=None) updated_all_dev_specs = set(spack.config.get("develop")) remove_spec_names = set(x.name for x in remove_specs) diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py index 63cf3bd0f94cbd..aadc4282801719 100644 --- a/lib/spack/spack/cmd/uninstall.py +++ b/lib/spack/spack/cmd/uninstall.py @@ -83,17 +83,13 @@ def find_matching_specs( allow_multiple_matches: bool = False, origin=None, ) -> List[spack.spec.Spec]: - """Returns a list of specs matching the not necessarily - concretized specs given from cli + """Returns a list of specs matching the not necessarily concretized specs given from cli Args: env: optional active environment specs: list of specs to be matched against installed packages allow_multiple_matches: if True multiple matches are admitted origin: origin of the spec - - Return: - list: list of specs """ # constrain uninstall resolution to current environment if one is active hashes = env.all_hashes() if env else None @@ -209,7 +205,7 @@ def get_uninstall_list(args, specs: List[spack.spec.Spec], env: Optional[ev.Envi """Returns unordered uninstall_list and remove_list: these may overlap (some things may be both uninstalled and removed from the current environment). - It is assumed we are in an environment if --remove is specified (this + It is assumed we are in an environment if ``--remove`` is specified (this method raises an exception otherwise).""" if args.remove and not env: raise ValueError("Can only use --remove when in an environment") diff --git a/lib/spack/spack/cmd/url.py b/lib/spack/spack/cmd/url.py index 1300ea4f091a01..0ad6a0d12747f5 100644 --- a/lib/spack/spack/cmd/url.py +++ b/lib/spack/spack/cmd/url.py @@ -517,9 +517,9 @@ def version_parsed_correctly(pkg, version): def remove_prefix(pkg_name): - """Remove build system prefix ('py-', 'perl-', etc.) from a package name. + """Remove build system prefix (``'py-'``, ``'perl-'``, etc.) from a package name. - After determining a name, `spack create` determines a build system. + After determining a name, ``spack create`` determines a build system. Some build systems prepend a special string to the front of the name. Since this can't be guessed from the URL, it would be unfair to say that these names are incorrectly parsed, so we remove them. @@ -550,7 +550,7 @@ def remove_prefix(pkg_name): def remove_separators(version): - """Remove separator characters ('.', '_', and '-') from a version. + """Remove separator characters (``.``, ``_``, and ``-``) from a version. A version like 1.2.3 may be displayed as 1_2_3 in the URL. Make sure 1.2.3, 1-2-3, 1_2_3, and 123 are considered equal. diff --git a/lib/spack/spack/cmd/verify.py b/lib/spack/spack/cmd/verify.py index 3b836dbe7e1740..051757d9849ff6 100644 --- a/lib/spack/spack/cmd/verify.py +++ b/lib/spack/spack/cmd/verify.py @@ -67,6 +67,11 @@ def setup_parser(subparser: argparse.ArgumentParser): arguments.add_common_arguments(libraries_subparser, ["constraint"]) + versions_subparser = sp.add_parser( + "versions", help=verify_versions.__doc__, description=verify_versions.__doc__ + ) + arguments.add_common_arguments(versions_subparser, ["constraint"]) + def verify(parser, args): cmd = args.verify_command @@ -74,9 +79,72 @@ def verify(parser, args): return verify_libraries(args) elif cmd == "manifest": return verify_manifest(args) + elif cmd == "versions": + return verify_versions(args) parser.error("invalid verify subcommand") +def verify_versions(args): + """Check that all versions of installed packages are known to Spack and non-deprecated. + + Reports errors for any of the following: + + 1. Installed package not loadable from the repo + 2. Installed package version not known by the package recipe + 3. Installed package version deprecated in the package recipe + """ + if args.specs: + specs = args.specs(installed=True) + else: + specs = spack.store.db.query(installed=True) + + msg_lines = _verify_version(specs) + if msg_lines: + tty.die("\n".join(msg_lines)) + + +def _verify_version(specs): + """Helper method for verify_versions.""" + missing_package = [] + unknown_version = [] + deprecated_version = [] + + for spec in specs: + try: + pkg = spec.package + except Exception as e: + tty.debug(str(e)) + missing_package.append(spec) + continue + + if spec.version not in pkg.versions: + unknown_version.append(spec) + continue + + if pkg.versions[spec.version].get("deprecated", False): + deprecated_version.append(spec) + + msg_lines = [] + if missing_package or unknown_version or deprecated_version: + errors = len(missing_package) + len(unknown_version) + len(deprecated_version) + msg_lines = [f"{errors} installed packages have unknown/deprecated versions\n"] + + msg_lines += [ + f" Cannot check version for {spec} at {spec.prefix}. Cannot load package." + for spec in missing_package + ] + msg_lines += [ + f" Spec {spec} at {spec.prefix} has version {spec.version} unknown to Spack." + for spec in unknown_version + ] + msg_lines += [ + f" Spec {spec} at {spec.prefix} has deprecated version {spec.version}." + for spec in deprecated_version + ] + + return msg_lines + + def verify_libraries(args): """verify that shared libraries of install packages can be located in rpaths (Linux only)""" specs_from_db = [s for s in args.specs(installed=True) if not s.external] diff --git a/lib/spack/spack/cmd/versions.py b/lib/spack/spack/cmd/versions.py index 25f46cae8af4f5..ab7808bf1e2388 100644 --- a/lib/spack/spack/cmd/versions.py +++ b/lib/spack/spack/cmd/versions.py @@ -13,7 +13,7 @@ from spack.version import infinity_versions, ver description = "list available versions of a package" -section = "packaging" +section = "query" level = "long" diff --git a/lib/spack/spack/cmd/view.py b/lib/spack/spack/cmd/view.py index b58f5c206262b6..406fa279fd9f82 100644 --- a/lib/spack/spack/cmd/view.py +++ b/lib/spack/spack/cmd/view.py @@ -9,12 +9,12 @@ - specs resolved from the package names given by the user (the seeds) -- all dependencies of the seeds unless user specifies `--no-dependencies` +- all dependencies of the seeds unless user specifies ``--no-dependencies`` - less any specs with names matching the regular expressions given by - `--exclude` + ``--exclude`` -The `view` can be built and tore down via a number of methods (the "actions"): +The ``view`` can be built and tore down via a number of methods (the "actions"): - symlink :: a file system view which is a directory hierarchy that is the union of the hierarchies of the installed packages in the DAG @@ -25,8 +25,8 @@ - statlink :: a view producing a status report of a symlink or hardlink view. -The file system view concept is imspired by Nix, implemented by -brett.viren@gmail.com ca 2016. +The file system view concept is inspired by Nix, implemented by +Brett Viren ca 2016. All operations on views are performed via proxy objects such as YamlFilesystemView. @@ -45,7 +45,7 @@ from spack.llnl.util.link_tree import MergeConflictError from spack.util import spack_yaml as s_yaml -description = "project packages to a compact naming scheme on the filesystem" +description = "manipulate view directories in the filesystem" section = "environments" level = "short" diff --git a/lib/spack/spack/compilers/adaptor.py b/lib/spack/spack/compilers/adaptor.py index 9de7a3b7309657..3ce38f6a2c2551 100644 --- a/lib/spack/spack/compilers/adaptor.py +++ b/lib/spack/spack/compilers/adaptor.py @@ -17,8 +17,8 @@ class Languages(enum.Enum): class CompilerAdaptor: - """Provides access to compiler attributes via `Package.compiler`. Useful for - packages which do not yet access compiler properties via `self.spec[language]`. + """Provides access to compiler attributes via ``Package.compiler``. Useful for + packages which do not yet access compiler properties via ``self.spec[language]``. """ def __init__( diff --git a/lib/spack/spack/compilers/config.py b/lib/spack/spack/compilers/config.py index de03b31f3ae045..f34551180b42c9 100644 --- a/lib/spack/spack/compilers/config.py +++ b/lib/spack/spack/compilers/config.py @@ -10,8 +10,6 @@ import warnings from typing import Any, Dict, List, Optional, Tuple -import spack.vendor.archspec.cpu - import spack.config import spack.detection import spack.detection.path @@ -22,6 +20,7 @@ import spack.platforms import spack.repo import spack.spec +from spack.externals import ExternalSpecsParser, external_spec from spack.operating_systems import windows_os from spack.util.environment import get_path @@ -195,8 +194,7 @@ def _mark_in_packages_yaml(self, match, candidate_scopes): continue def _partition_match(external_yaml): - s = CompilerFactory.from_external_yaml(external_yaml) - return not s.satisfies(match) + return not external_spec(external_yaml).satisfies(match) to_keep, to_remove = spack.llnl.util.lang.stable_partition( externals_config, _partition_match @@ -211,9 +209,7 @@ def _partition_match(external_yaml): continue self.marked_packages_yaml.append((current_scope, packages_yaml)) - all_removals.extend( - [CompilerFactory.from_external_yaml(x) for x in removed_from_scope] - ) + all_removals.extend([external_spec(x) for x in removed_from_scope]) return all_removals def flush(self): @@ -258,17 +254,14 @@ def name_os_target(spec: spack.spec.Spec) -> Tuple[str, str, str]: class CompilerFactory: """Class aggregating all ways of constructing a list of compiler specs from config entries.""" - _PACKAGES_YAML_CACHE: Dict[str, Optional[spack.spec.Spec]] = {} - _GENERIC_TARGET = None - @staticmethod def from_packages_yaml( configuration: spack.config.Configuration, *, scope: Optional[str] = None ) -> List[spack.spec.Spec]: """Returns the compiler specs defined in the "packages" section of the configuration""" - compilers = [] + externals_dicts = [] compiler_package_names = supported_compilers() - packages_yaml = configuration.get("packages", scope=scope) + packages_yaml = configuration.get_config("packages", scope=scope) for name, entry in packages_yaml.items(): if name not in compiler_package_names: continue @@ -277,52 +270,17 @@ def from_packages_yaml( if not externals_config: continue - compiler_specs = [] - for current_external in externals_config: - key = str(current_external) - if key not in CompilerFactory._PACKAGES_YAML_CACHE: - CompilerFactory._PACKAGES_YAML_CACHE[key] = CompilerFactory.from_external_yaml( - current_external - ) - - compiler = CompilerFactory._PACKAGES_YAML_CACHE[key] - if compiler: - compiler_specs.append(compiler) + for current in externals_config: + # If extra_attributes is not there don't use this entry as a compiler. + if _EXTRA_ATTRIBUTES_KEY not in current: + header = f"The external spec '{current['spec']}' cannot be used as a compiler" + tty.debug(f"[{__file__}] {header}: missing the '{_EXTRA_ATTRIBUTES_KEY}' key") + continue - compilers.extend(compiler_specs) - return compilers + externals_dicts.append(current) - @staticmethod - def from_external_yaml(config: Dict[str, Any]) -> Optional[spack.spec.Spec]: - """Returns a compiler spec from an external definition from packages.yaml.""" - # Allow `@x.y.z` instead of `@=x.y.z` - err_header = f"The external spec '{config['spec']}' cannot be used as a compiler" - # If extra_attributes is not there I might not want to use this entry as a compiler, - # therefore just leave a debug message, but don't be loud with a warning. - if _EXTRA_ATTRIBUTES_KEY not in config: - tty.debug(f"[{__file__}] {err_header}: missing the '{_EXTRA_ATTRIBUTES_KEY}' key") - return None - extra_attributes = config[_EXTRA_ATTRIBUTES_KEY] - result = spack.spec.Spec( - str(spack.spec.parse_with_version_concrete(config["spec"])), - external_path=config.get("prefix"), - external_modules=config.get("modules"), - ) - result.extra_attributes = extra_attributes - CompilerFactory._finalize_external_concretization(result) - return result - - @staticmethod - def _finalize_external_concretization(abstract_spec): - if CompilerFactory._GENERIC_TARGET is None: - CompilerFactory._GENERIC_TARGET = spack.vendor.archspec.cpu.host().family - - if abstract_spec.architecture: - abstract_spec.architecture.complete_with_defaults() - else: - abstract_spec.constrain(spack.spec.Spec.default_arch()) - abstract_spec.architecture.target = CompilerFactory._GENERIC_TARGET - abstract_spec._finalize_concretization() + external_parser = ExternalSpecsParser(externals_dicts) + return external_parser.all_specs() @staticmethod def from_legacy_yaml(compiler_dict: Dict[str, Any]) -> List[spack.spec.Spec]: diff --git a/lib/spack/spack/compilers/flags.py b/lib/spack/spack/compilers/flags.py index 60e8dcff206f4c..541269872e869e 100644 --- a/lib/spack/spack/compilers/flags.py +++ b/lib/spack/spack/compilers/flags.py @@ -7,8 +7,8 @@ def tokenize_flags(flags_values: str, propagate: bool = False) -> List[Tuple[str, bool]]: """Given a compiler flag specification as a string, this returns a list where the entries are the flags. For compiler options which set values - using the syntax "-flag value", this function groups flags and their - values together. Any token not preceded by a "-" is considered the + using the syntax ``-flag value``, this function groups flags and their + values together. Any token not preceded by a ``-`` is considered the value of a prior flag.""" tokens = flags_values.split() if not tokens: diff --git a/lib/spack/spack/compilers/libraries.py b/lib/spack/spack/compilers/libraries.py index 418a0461c73a7c..d0831a602eb674 100644 --- a/lib/spack/spack/compilers/libraries.py +++ b/lib/spack/spack/compilers/libraries.py @@ -317,10 +317,16 @@ def dynamic_linker_filter_for(node: spack.spec.Spec) -> Optional[DefaultDynamicL def compiler_spec(node: spack.spec.Spec) -> Optional[spack.spec.Spec]: - """Returns the compiler spec associated with the node passed as argument. + """Returns a compiler :class:`~spack.spec.Spec` associated with the node passed as argument. - The function looks for a "c", "cxx", and "fortran" compiler in that order, - and returns the first found. If none is found, returns None. + The function looks for a ``c``, ``cxx``, and ``fortran`` compiler in that order, + and returns the first found. If the node does not depend on any of these languages, + it returns :obj:`None`. + + Use of this function is *discouraged*, because a single spec can have multiple compilers + associated with it, and this function only returns one of them. It can be better to refer to + compilers on a per-language basis, through the language virtuals: ``spec["c"]``, + ``spec["cxx"]``, and ``spec["fortran"]``. """ for language in ("c", "cxx", "fortran"): candidates = node.dependencies(virtuals=[language]) diff --git a/lib/spack/spack/concretize.py b/lib/spack/spack/concretize.py index b6a0bbfd875e26..ca473535f1ff89 100644 --- a/lib/spack/spack/concretize.py +++ b/lib/spack/spack/concretize.py @@ -102,7 +102,11 @@ def concretize_separately( tests: list of package names for which to consider tests dependencies. If True, all nodes will have test dependencies. If False, test dependencies will be disregarded. """ - from spack.bootstrap import ensure_bootstrap_configuration, ensure_clingo_importable_or_raise + from spack.bootstrap import ( + ensure_bootstrap_configuration, + ensure_clingo_importable_or_raise, + ensure_winsdk_external_or_raise, + ) to_concretize = [abstract for abstract, concrete in spec_list if not concrete] args = [ @@ -118,6 +122,10 @@ def concretize_separately( with ensure_bootstrap_configuration(): ensure_clingo_importable_or_raise() + # ensure we don't try to detect winsdk in parallel + if sys.platform == "win32": + ensure_winsdk_external_or_raise() + # Ensure all the indexes have been built or updated, since # otherwise the processes in the pool may timeout on waiting # for a write lock. We do this indirectly by retrieving the @@ -138,11 +146,11 @@ def concretize_separately( ] # Solve the environment in parallel on Linux - # TODO: support parallel concretization on macOS and Windows num_procs = min(len(args), spack.config.determine_number_of_jobs(parallel=True)) msg = "Starting concretization" - if sys.platform not in ("darwin", "win32") and num_procs > 1: + # no parallel conc on Windows + if not sys.platform == "win32" and num_procs > 1: msg += f" pool with {num_procs} processes" tty.msg(msg) @@ -179,8 +187,8 @@ def concretize_one(spec: Union[str, Spec], tests: TestsType = False) -> Spec: """Return a concretized copy of the given spec. Args: - tests: if False disregard 'test' dependencies, if a list of names activate them for - the packages in the list, if True activate 'test' dependencies for all packages. + tests: if False disregard test dependencies, if a list of names activate them for + the packages in the list, if True activate test dependencies for all packages. """ from spack.solver.asp import Solver, SpecBuilder diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py index e290c607ede83d..8f14e958704ef1 100644 --- a/lib/spack/spack/config.py +++ b/lib/spack/spack/config.py @@ -6,12 +6,12 @@ This implements Spack's configuration system, which handles merging multiple scopes with different levels of precedence. See the documentation on :ref:`configuration-scopes` for details on how Spack's -configuration system behaves. The scopes are: +configuration system behaves. The scopes set up here are: - #. ``default`` - #. ``system`` - #. ``site`` - #. ``user`` +#. ``spack`` in ``$spack/etc/spack`` - controls all built-in spack scopes, + except default +#. ``defaults`` in ``$spack/etc/spack/defaults`` - defaults that Spack + needs to function Important functions in this module are: @@ -31,10 +31,12 @@ import functools import os import os.path +import pathlib import re import sys from collections import defaultdict -from typing import Any, Callable, Dict, Generator, List, NamedTuple, Optional, Tuple, Union +from itertools import chain +from typing import Any, Callable, Dict, Generator, List, Optional, Set, Tuple, Union from spack.vendor import jsonschema @@ -60,7 +62,11 @@ import spack.schema.toolchains import spack.schema.upstreams import spack.schema.view +import spack.util.executable +import spack.util.git +import spack.util.hash import spack.util.remote_file_cache as rfc_util +import spack.util.spack_json as sjson import spack.util.spack_yaml as syaml from spack.llnl.util import filesystem, lang, tty from spack.util.cpus import cpus_available @@ -91,10 +97,12 @@ # Same as above, but including keys for environments # this allows us to unify config reading between configs and environments -_ALL_SCHEMAS: Dict[str, Any] = copy.deepcopy(SECTION_SCHEMAS) -_ALL_SCHEMAS.update({spack.schema.env.TOP_LEVEL_KEY: spack.schema.env.schema}) +_ALL_SCHEMAS: Dict[str, Any] = { + **SECTION_SCHEMAS, + spack.schema.env.TOP_LEVEL_KEY: spack.schema.env.schema, +} -#: Path to the default configuration +#: Path to the main configuration scope CONFIGURATION_DEFAULTS_PATH = ("defaults", os.path.join(spack.paths.etc_path, "defaults")) #: Hard-coded default values for some key configuration options. @@ -110,7 +118,8 @@ "build_jobs": min(16, cpus_available()), "build_stage": "$tempdir/spack-stage", "license_dir": spack.paths.default_license_dir, - } + }, + "concretizer": {"externals": {"completion": "default_variants"}}, } #: metavar to use for commands that accept scopes @@ -137,6 +146,7 @@ def __init__(self, name: str) -> None: self.name = name self.writable = False self.sections = syaml.syaml_dict() + self.prefer_modify = False #: names of any included scopes self._included_scopes: Optional[List["ConfigScope"]] = None @@ -150,13 +160,43 @@ def included_scopes(self) -> List["ConfigScope"]: includes = self.get_section("include") if includes: include_paths = [included_path(data) for data in includes["include"]] - for path in include_paths: - included_scope = include_path_scope(path, self.name) - if included_scope: + included_scopes = chain(*[include.scopes(self) for include in include_paths]) + + # Do not include duplicate scopes + for included_scope in included_scopes: + if any([included_scope.name == scope.name for scope in self._included_scopes]): + tty.warn(f"Ignoring duplicate included scope: {included_scope.name}") + continue + + if included_scope not in self._included_scopes: self._included_scopes.append(included_scope) return self._included_scopes + @property + def exists(self) -> bool: + """Whether the config object indicated by the scope can be read""" + return True + + def override_include(self): + """Whether the ``include::`` section of this scope should override lower scopes.""" + include = self.sections.get("include") + if not include: + return False + + # override if this has an include section and there is an override attribute on + # the include key in the dict and it is set to True. + return getattr(next(iter(include.keys()), None), "override", False) + + def transitive_includes(self, _names: Optional[Set[str]] = None) -> Set[str]: + """Get name of this scope and names of its transitively included scopes.""" + if _names is None: + _names = _set() + _names.add(self.name) + for scope in self.included_scopes: + _names |= scope.transitive_includes(_names=_names) + return _names + def get_section_filename(self, section: str) -> str: raise NotImplementedError @@ -177,10 +217,17 @@ def __repr__(self) -> str: class DirectoryConfigScope(ConfigScope): """Config scope backed by a directory containing one file per section.""" - def __init__(self, name: str, path: str, *, writable: bool = True) -> None: + def __init__( + self, name: str, path: str, *, writable: bool = True, prefer_modify: bool = True + ) -> None: super().__init__(name) self.path = path self.writable = writable + self.prefer_modify = prefer_modify + + @property + def exists(self) -> bool: + return os.path.exists(self.path) def get_section_filename(self, section: str) -> str: """Returns the filename associated with a given section""" @@ -188,7 +235,14 @@ def get_section_filename(self, section: str) -> str: return os.path.join(self.path, f"{section}.yaml") def get_section(self, section: str) -> Optional[YamlConfigDict]: - """Returns the data associated with a given section""" + """Returns the data associated with a given section if the scope exists""" + if not self.exists: + tty.debug(f"Attempting to read from missing scope: {self} at {self.path}") + return {} + return self._get_section(section) + + def _get_section(self, section: str) -> Optional[YamlConfigDict]: + """get_section but without the existence check""" if section not in self.sections: path = self.get_section_filename(section) schema = SECTION_SCHEMAS[section] @@ -201,7 +255,7 @@ def _write_section(self, section: str) -> None: raise spack.error.ConfigError(f"Cannot write to immutable scope {self}") filename = self.get_section_filename(section) - data = self.get_section(section) + data = self._get_section(section) if data is None: return @@ -226,6 +280,7 @@ def __init__( *, yaml_path: Optional[List[str]] = None, writable: bool = True, + prefer_modify: bool = True, ) -> None: """Similar to ``ConfigScope`` but can be embedded in another schema. @@ -249,8 +304,13 @@ def __init__( self.schema = schema self.path = path self.writable = writable + self.prefer_modify = prefer_modify self.yaml_path = yaml_path or [] + @property + def exists(self) -> bool: + return os.path.exists(self.path) + def get_section_filename(self, section) -> str: return self.path @@ -281,6 +341,10 @@ def get_section(self, section: str) -> Optional[YamlConfigDict]: # } # } + if not self.exists: + tty.debug(f"Attempting to read from missing scope: {self} at {self.path}") + return {} + # This bit ensures we have read the file and have # the raw data in memory if self._raw_data is None: @@ -424,7 +488,7 @@ def _config_mutator(method): @functools.wraps(method) def _method(self, *args, **kwargs): - self._get_config_memoized.cache.clear() + self._get_config_memoized.cache_clear() return method(self, *args, **kwargs) return _method @@ -453,16 +517,22 @@ def highest(self) -> ConfigScope: return next(self.scopes.reversed_values()) # type: ignore @_config_mutator - def push_scope( + def push_scope_incremental( self, scope: ConfigScope, priority: Optional[int] = None, _depth: int = 0 - ) -> None: + ) -> Generator["Configuration", None, None]: """Adds a scope to the Configuration, at a given priority. + ``push_scope_incremental`` yields included scopes incrementally, so that their + data can be used by higher priority scopes during config initialization. If you + push a scope that includes other, low-priority scopes, they will be pushed on + first, before the scope that included them. + If a priority is not given, it is assumed to be the current highest priority. Args: scope: scope to be added priority: priority of the scope + """ # TODO: As a follow on to #48784, change this to create a graph of the # TODO: includes AND ensure properly sorted such that the order included @@ -480,9 +550,31 @@ def push_scope( # record this inclusion so that remove_scope() can use it self.push_scope(included_scope, priority=priority, _depth=_depth + 1) + yield self tty.debug(f"[CONFIGURATION: PUSH SCOPE]: {str(scope)}, priority={priority}", level=2) self.scopes.add(scope.name, value=scope, priority=priority) + yield self + + @_config_mutator + def push_scope( + self, scope: ConfigScope, priority: Optional[int] = None, _depth: int = 0 + ) -> None: + """Add a scope to the Configuration, at a given priority. + + If a priority is not given, it is assumed to be the current highest priority. + + Args: + scope: scope to be added + priority: priority of the scope + + """ + # Use push_scope_incremental to do the real work. It returns a generator, which needs + # to be consumed to get each of the yielded scopes added to the scope stack. + # It will usually yield one scope, but if there are includes it will yield those first, + # before the scope we're actually pushing. + for _ in self.push_scope_incremental(scope=scope, priority=priority, _depth=_depth): + pass @_config_mutator def remove_scope(self, scope_name: str) -> Optional[ConfigScope]: @@ -509,16 +601,31 @@ def writable_scopes(self) -> Generator[ConfigScope, None, None]: """Generator of writable scopes with an associated file.""" return (s for s in self.scopes.values() if s.writable) + @property + def existing_scopes(self) -> Generator[ConfigScope, None, None]: + """Generator of existing scopes. These are self.scopes where the + scope has a representation on the filesystem or is internal""" + return (s for s in self.scopes.values() if s.exists) + def highest_precedence_scope(self) -> ConfigScope: """Writable scope with the highest precedence.""" - return next(s for s in self.scopes.reversed_values() if s.writable) + scope = next(s for s in self.scopes.reversed_values() if s.writable) + + # if a scope prefers that we edit another, respect that. + while scope: + preferred = scope + scope = next( + (s for s in scope.included_scopes if s.writable and s.prefer_modify), None + ) + + return preferred def matching_scopes(self, reg_expr) -> List[ConfigScope]: """ List of all scopes whose names match the provided regular expression. - For example, matching_scopes(r'^command') will return all scopes - whose names begin with `command`. + For example, ``matching_scopes(r'^command')`` will return all scopes + whose names begin with ``command``. """ return [s for s in self.scopes.values() if re.search(reg_expr, s.name)] @@ -638,10 +745,46 @@ def get_config( """ return self._get_config_memoized(section, scope=scope, _merged_scope=_merged_scope) + def deepcopy_as_builtin( + self, section: str, scope: Optional[str] = None, *, line_info: bool = False + ) -> Dict[str, Any]: + """Get a deep copy of a section with native Python types, excluding YAML metadata.""" + return syaml.deepcopy_as_builtin( + self.get_config(section, scope=scope), line_info=line_info + ) + + def _filter_overridden(self, scopes: List[ConfigScope]): + """Filter out overridden scopes. + + NOTE: this does not yet handle diamonds or nested `include::` in lists. It is + sufficient for include::[] in an env, which allows isolation. + """ + # find last override in scopes + i = next((i for i, s in reversed(list(enumerate(scopes))) if s.override_include()), -1) + if i < 0: + return scopes # no overrides + + keep = scopes[i].transitive_includes() + keep |= _set(s.name for s in self.scopes.priority_values(ConfigScopePriority.DEFAULTS)) + keep |= _set(s.name for s in scopes[i:]) + + # return scopes to keep, with order preserved + return [s for s in scopes if s.name in keep] + + @property + def active_scopes(self) -> List[ConfigScope]: + """Return a list of scopes that have not been overridden by include::.""" + return self._filter_overridden([s for s in self.scopes.values()]) + @lang.memoized def _get_config_memoized( self, section: str, scope: Optional[str], _merged_scope: Optional[str] ) -> YamlConfigDict: + """Memoized helper for ``get_config()``. + + Note that the memoization cache for this function is cleared whenever + any function decorated with ``@_config_mutator`` is called. + """ _validate_section_name(section) if scope is not None and _merged_scope is not None: @@ -655,11 +798,22 @@ def _get_config_memoized( else: scopes = list(self.scopes.values()) + # filter any scopes overridden by `include::` + scopes = self._filter_overridden(scopes) + merged_section: Dict[str, Any] = syaml.syaml_dict() updated_scopes = [] for config_scope in scopes: # read potentially cached data from the scope. data = config_scope.get_section(section) + if data and section == "include": + # Include overrides are handled by `_filter_overridden` above. Any remaining + # includes at this point are *not* actually overridden -- they're scopes with + # ConfigScopePriority.DEFAULT, which we currently do *not* remove with + # `include::`, because these scopes are needed for Spack to function correctly. + # So, we ignore :: here. + data = data.copy() + data["include"] = data.pop("include") # strip override # Skip empty configs if not isinstance(data, dict) or section not in data: @@ -688,13 +842,13 @@ def get(self, path: str, default: Optional[Any] = None, scope: Optional[str] = N """Get a config section or a single value from one. Accepts a path syntax that allows us to grab nested config map - entries. Getting the 'config' section would look like:: + entries. Getting the ``config`` section would look like:: - spack.config.get('config') + spack.config.get("config") and the ``dirty`` section in the ``config`` scope would be:: - spack.config.get('config:dirty') + spack.config.get("config:dirty") We use ``:`` as the separator, like YAML objects. """ @@ -758,12 +912,26 @@ def __iter__(self): """Iterate over scopes in this configuration.""" yield from self.scopes.values() - def print_section(self, section: str, blame: bool = False, *, scope=None) -> None: - """Print a configuration to stdout.""" + def print_section( + self, section: str, yaml: bool = True, blame: bool = False, *, scope: Optional[str] = None + ) -> None: + """Print a configuration to stdout. + + Arguments: + section: The configuration section to print. + yaml: If True, output in YAML format, otherwise JSON (ignored when blame is True). + blame: Whether to include source locations for each entry. + scope: The configuration scope to use. + """ try: data = syaml.syaml_dict() data[section] = self.get_config(section, scope=scope) - syaml.dump_config(data, stream=sys.stdout, default_flow_style=False, blame=blame) + if yaml or blame: + syaml.dump_config(data, stream=sys.stdout, default_flow_style=False, blame=blame) + else: + sjson.dump(data, sys.stdout) + sys.stdout.write("\n") + except (syaml.SpackYAMLError, OSError) as e: raise spack.error.ConfigError(f"cannot read '{section}' configuration") from e @@ -812,71 +980,364 @@ def override( #: Class for the relevance of an optional path conditioned on a limited #: python code that evaluates to a boolean and or explicit specification #: as optional. -class IncludePath(NamedTuple): - path: str +class OptionalInclude: + """Base properties for all includes.""" + + name: str when: str - sha256: str optional: bool + prefer_modify: bool + _scopes: List[ConfigScope] + def __init__(self, entry: dict): + self.name = entry.get("name", "") + self.when = entry.get("when", "") + self.optional = entry.get("optional", False) + self.prefer_modify = entry.get("prefer_modify", False) + self._scopes = [] -def included_path(entry: Union[str, dict]) -> IncludePath: - """Convert the included path entry into an IncludePath. + def _scope( + self, path: str, config_path: str, parent_scope: ConfigScope + ) -> Optional[ConfigScope]: + """Instantiate a configuration scope for the configuration path. - Args: - entry: include configuration entry + Args: + path: raw include path + config_path: configuration path + parent_scope: including scope - Returns: converted entry, where an empty ``when`` means the path is - not conditionally included - """ - if isinstance(entry, str): - return IncludePath(path=entry, sha256="", when="", optional=False) + Returns: configuration scopes + + Raises: + ValueError: the required configuration path does not exist + """ + assert self._valid_parent_scope( + parent_scope + ), "Optional includes must have valid parent_scope object" + + # use specified name if there is one + config_name = self.name + if not config_name: + # Try to use the relative path to create the included scope name + parent_path = getattr(parent_scope, "path", None) + if parent_path and str(parent_path) == os.path.commonprefix( + [parent_path, config_path] + ): + included_name = os.path.relpath(config_path, parent_path) + else: + included_name = config_path - path = entry["path"] - sha256 = entry.get("sha256", "") - when = entry.get("when", "") - optional = entry.get("optional", False) - return IncludePath(path=path, sha256=sha256, when=when, optional=optional) + if sys.platform == "win32": + # Clean windows path for use in config name that looks nicer + # ie. The path: C:\\some\\path\\to\\a\\file + # becomes C/some/path/to/a/file + included_name = included_name.replace("\\", "/") + included_name = included_name.replace(":", "") + config_name = f"{parent_scope.name}:{included_name}" -def include_path_scope(include: IncludePath, parent_name: str) -> Optional[ConfigScope]: - """Instantiate an appropriate configuration scope for the given path. + _, ext = os.path.splitext(config_path) + ext_is_yaml = ext == ".yaml" or ext == ".yml" + is_dir = os.path.isdir(config_path) + exists = os.path.exists(config_path) - Args: - include: optional include path - parent_name: name of including scope + if not exists and not self.optional: + dest = f" at ({config_path})" if config_path != path else "" + raise ValueError(f"Required path ({path}) does not exist{dest}") + + if (exists and not is_dir) or ext_is_yaml: + # files are assumed to be SingleFileScopes + tty.debug(f"Creating SingleFileScope {config_name} for '{config_path}'") + return SingleFileScope( + config_name, + config_path, + spack.schema.merged.schema, + prefer_modify=self.prefer_modify, + ) + + if ext and not is_dir: + raise ValueError( + f"File-based scope does not exist yet: should have a .yaml/.yml extension \ +for file scopes, or no extension for directory scopes (currently {ext})" + ) + + # directories are treated as regular ConfigScopes + # assign by "default" + tty.debug(f"Creating DirectoryConfigScope {config_name} for '{config_path}'") + return DirectoryConfigScope(config_name, config_path, prefer_modify=self.prefer_modify) + + def _valid_parent_scope(self, parent_scope: ConfigScope) -> bool: + """Validates that a parent scope is a valid configuration object""" + # enforced by type checking but those can always be # type: ignore'd + assert isinstance( + parent_scope, ConfigScope + ), f"Optional include must have valid parent scope,\ + of type ConfigScope; Type:{type(parent_scope)} is not valid." + # naive check that parent scope name isn't empty or just whitespace + return bool(re.sub(r"\s", "", parent_scope.name)) + + def evaluate_condition(self) -> bool: + # circular dependencies + import spack.spec + + return (not self.when) or spack.spec.eval_conditional(self.when) - Returns: configuration scope + def scopes(self, parent_scope: ConfigScope) -> List[ConfigScope]: + """Instantiate configuration scopes. - Raises: - ValueError: included path has an unsupported URL scheme, is required - but does not exist; configuration stage directory argument is missing - ConfigFileError: unable to access remote configuration file(s) + Args: + parent_scope: including scope + + Returns: configuration scopes IF the when condition is satisfied; + otherwise, an empty list. + + Raises: + ValueError: the required configuration path does not exist + """ + raise NotImplementedError("must be implemented in derived classes") + + @property + def paths(self) -> List[str]: + """Path(s) associated with the include.""" + + raise NotImplementedError("must be implemented in derived classes") + + +class IncludePath(OptionalInclude): + path: str + sha256: str + destination: Optional[str] + + def __init__(self, entry: dict): + super().__init__(entry) + path_override_env_var = entry.get("path_override_env_var", "") + if path_override_env_var and path_override_env_var in os.environ: + self.path = os.environ[path_override_env_var] + else: + self.path = entry.get("path", "") + self.sha256 = entry.get("sha256", "") + self.destination = None + + def __repr__(self): + return ( + f"IncludePath({self.path}, sha256={self.sha256}, " + f"when='{self.when}', optional={self.optional})" + ) + + def scopes(self, parent_scope: ConfigScope) -> List[ConfigScope]: + """Instantiate a configuration scope for the included path. + + Args: + parent_scope: including scope + + Returns: configuration scopes IF the when condition is satisfied; + otherwise, an empty list. + + Raises: + ConfigFileError: unable to access remote configuration file + ValueError: included path has an unsupported URL scheme, is required + but does not exist; configuration stage directory argument is missing + """ + if not self.evaluate_condition(): + tty.debug(f"Include condition is not satisfied in {self}") + return [] + + if self._scopes: + tty.debug(f"Using existing scopes: {[s.name for s in self._scopes]}") + return self._scopes + + # Make sure to use the proper (default) working directory when obtaining + # the local path for a local file. + def work_dir(): + if not os.path.isabs(self.path) and hasattr(parent_scope, "path"): + if os.path.isfile(parent_scope.path): + return os.path.dirname(parent_scope.path) + if os.path.isdir(parent_scope.path): + return parent_scope.path + return os.getcwd() + + with filesystem.working_dir(work_dir()): + config_path = rfc_util.local_path(self.path, self.sha256, _include_cache_location) + assert config_path + self.destination = config_path + + scope = self._scope(self.path, self.destination, parent_scope) + if scope is not None: + self._scopes = [scope] + + return self._scopes + + @property + def paths(self) -> List[str]: + """Path(s) associated with the include.""" + + return [self.path] + + +class GitIncludePaths(OptionalInclude): + repo: str + branch: str + commit: str + tag: str + _paths: List[str] + destination: Optional[str] + + def __init__(self, entry: dict): + super().__init__(entry) + self.repo = entry.get("git", "") + self.branch = entry.get("branch", "") + self.commit = entry.get("commit", "") + self.tag = entry.get("tag", "") + self._paths = entry.get("paths", []) + self.destination = None + + if not self.branch and not self.commit and not self.tag: + raise spack.error.ConfigError( + "Git include paths ({self}) must specify one or more of: branch, commit, tag" + ) + + if not self._paths: + raise spack.error.ConfigError( + "Git include paths ({self}) must include one or more relative paths" + ) + + def __repr__(self): + if self.branch: + identifier = f"branch={self.branch}" + else: + identifier = f"commit={self.commit}, tag={self.tag}" + + return ( + f"GitIncludePaths({self.repo}, paths={self.paths}, " + f"{identifier}, when='{self.when}', optional={self.optional})" + ) + + def _destination(self): + dir_name = spack.util.hash.b32_hash(self.repo)[-7:] + return os.path.join(_include_cache_location(), dir_name) + + def _clone(self) -> Optional[str]: + """Clone the repository.""" + if self.fetched(): + tty.debug(f"Repository ({self.repo}) already cloned to {self.destination}") + return self.destination + + destination = self._destination() + with filesystem.working_dir(destination, create=True): + if not os.path.exists(".git"): + try: + spack.util.git.init_git_repo(self.repo) + except spack.util.executable.ProcessError as e: + raise spack.error.ConfigError( + f"Unable to initialize repository ({self.repo}) under {destination}: {e}" + ) + + try: + if self.commit: + spack.util.git.pull_checkout_commit(self.commit) + elif self.tag: + spack.util.git.pull_checkout_tag(self.tag) + elif self.branch: + # if the branch already exists we should use the + # previously configured remote + try: + git = spack.util.git.git(required=True) + output = git("config", f"branch.{self.branch}.remote", output=str) + remote = output.strip() + except spack.util.executable.ProcessError: + remote = "origin" + spack.util.git.pull_checkout_branch(self.branch, remote=remote) + else: + raise spack.error.ConfigError(f"Missing or unsupported options in {self}") + + except spack.util.executable.ProcessError as e: + raise spack.error.ConfigError( + f"Unable to check out repository ({self}) in {destination}: {e}" + ) + + # only set the destination on successful clone/checkout + self.destination = destination + return self.destination + + def fetched(self): + return self.destination is not None and os.path.join(self.destination, ".git") + + def scopes(self, parent_scope: ConfigScope) -> List[ConfigScope]: + """Instantiate configuration scopes for the included paths. + + Args: + parent_scope: including scope + + Returns: configuration scopes IF the when condition is satisfied; + otherwise, an empty list. + + Raises: + ConfigFileError: unable to access remote configuration file(s) + ValueError: included path has an unsupported URL scheme, is required + but does not exist; configuration stage directory argument is missing + """ + if not self.evaluate_condition(): + tty.debug(f"Include condition is not satisfied in {self}") + return [] + + if self._scopes: + tty.debug(f"Using existing scopes: {[s.name for s in self._scopes]}") + return self._scopes + + destination = self._clone() + if destination is None: + raise spack.error.ConfigError(f"Unable to cache the include: {self}") + + scopes: List[ConfigScope] = [] + for relative_path in self.paths: + config_path = os.path.join(destination, relative_path) + scope = self._scope(relative_path, config_path, parent_scope) + if scope is not None: + scopes.append(scope) + + # cache the scopes if successfully able to process all of them + if scopes: + self._scopes = scopes + return self._scopes + + @property + def paths(self) -> List[str]: + """Path(s) associated with the include.""" + + return self._paths + + +def included_path(entry: Union[str, pathlib.Path, dict]) -> Union[IncludePath, GitIncludePaths]: + """Convert the included paths entry into the appropriate optional include. + + Args: + entry: include configuration entry + + Returns: converted entry, where an empty ``when`` means the path is not conditionally included """ - # circular dependencies - import spack.spec + if isinstance(entry, (str, pathlib.Path)): + return IncludePath({"path": str(entry)}) - if (not include.when) or spack.spec.eval_conditional(include.when): - config_path = rfc_util.local_path(include.path, include.sha256, _include_cache_location) - if not config_path: - raise ConfigFileError(f"Unable to fetch remote configuration from {include.path}") + if entry.get("path", ""): + return IncludePath(entry) - if os.path.isdir(config_path): - # directories are treated as regular ConfigScopes - config_name = f"{parent_name}:{os.path.basename(config_path)}" - tty.debug(f"Creating DirectoryConfigScope {config_name} for '{config_path}'") - return DirectoryConfigScope(config_name, config_path) + return GitIncludePaths(entry) - if os.path.exists(config_path): - # files are assumed to be SingleFileScopes - config_name = f"{parent_name}:{config_path}" - tty.debug(f"Creating SingleFileScope {config_name} for '{config_path}'") - return SingleFileScope(config_name, config_path, spack.schema.merged.schema) - if not include.optional: - path = f" at ({config_path})" if config_path != include.path else "" - raise ValueError(f"Required path ({include.path}) does not exist{path}") +def paths_from_includes(includes: List[Union[str, dict]]) -> List[str]: + """The path(s) from the configured includes. + + Args: + includes: include configuration information + + Returns: list of path or an empty list if there are none + """ - return None + paths = [] + for entry in includes: + include = included_path(entry) + paths.extend(include.paths) + return paths def config_paths_from_entry_points() -> List[Tuple[str, str]]: @@ -911,40 +1372,24 @@ def create_incremental() -> Generator[Configuration, None, None]: it. It is bundled inside a function so that configuration can be initialized lazily. """ - # first do the builtin, hardcoded defaults + # Default scopes are builtins and the default scope within the Spack instance. + # These are versioned with Spack and can be overridden by systems, sites or user scopes. cfg = create_from( - (ConfigScopePriority.BUILTIN, InternalConfigScope("_builtin", CONFIG_DEFAULTS)) + (ConfigScopePriority.DEFAULTS, InternalConfigScope("_builtin", CONFIG_DEFAULTS)), + (ConfigScopePriority.DEFAULTS, DirectoryConfigScope(*CONFIGURATION_DEFAULTS_PATH)), ) + yield cfg - # Builtin paths to configuration files in Spack - configuration_paths = [ - # Default configuration scope is the lowest-level scope. These are - # versioned with Spack and can be overridden by systems, sites or users - CONFIGURATION_DEFAULTS_PATH - ] + # Initial topmost scope is spack (the config scope in the spack instance). + # It includes the user, site, and system scopes. Environments and command + # line scopes go above this. + configuration_paths = [("spack", os.path.join(spack.paths.etc_path))] - disable_local_config = "SPACK_DISABLE_LOCAL_CONFIG" in os.environ - - # System configuration is per machine. - # This is disabled if user asks for no local configuration. - if not disable_local_config: - configuration_paths.append(("system", spack.paths.system_config_path)) - - # Site configuration is per spack instance, for sites or projects - # No site-level configs should be checked into spack by default. - configuration_paths.append(("site", os.path.join(spack.paths.etc_path))) - - # Python package's can register configuration scopes via entry_points + # Python packages can register configuration scopes via entry_points configuration_paths.extend(config_paths_from_entry_points()) - # User configuration can override both spack defaults and site config - # This is disabled if user asks for no local configuration. - if not disable_local_config: - configuration_paths.append(("user", spack.paths.user_config_path)) - # add each scope for name, path in configuration_paths: - cfg.push_scope(DirectoryConfigScope(name, path), priority=ConfigScopePriority.CONFIG_FILES) # yield the config incrementally so that each config level's init code can get # data from the one below. This can be tricky, but it enables us to have a # single unified config system. @@ -954,7 +1399,9 @@ def create_incremental() -> Generator[Configuration, None, None]: # config (which uses ssl and other config options) for some of the scopes, # to make the bootstrap issues more explicit, even if allowing config scope # init to reference lower scopes is more flexible. - yield cfg + yield from cfg.push_scope_incremental( + DirectoryConfigScope(name, path), priority=ConfigScopePriority.CONFIG_FILES + ) def create() -> Configuration: @@ -1054,6 +1501,9 @@ def get(path: str, default: Optional[Any] = None, scope: Optional[str] = None) - return CONFIG.get(path, default, scope) +_set = set #: save this before defining set -- maybe config.set was ill-advised :) + + def set(path: str, value: Any, scope: Optional[str] = None) -> None: """Convenience function for setting single values in config files. @@ -1075,10 +1525,24 @@ def writable_scopes() -> List[ConfigScope]: return scopes +def existing_scopes() -> List[ConfigScope]: + """Return list of existing scopes. Scopes where Spack is + aware of said scope, and the scope has a representation + on the filesystem or are internal scopes. + Higher-priority scopes come first in the list.""" + scopes = [x for x in CONFIG.scopes.values() if x.exists] + scopes.reverse() + return scopes + + def writable_scope_names() -> List[str]: return list(x.name for x in writable_scopes()) +def existing_scope_names() -> List[str]: + return list(x.name for x in existing_scopes()) + + def matched_config(cfg_path: str) -> List[Tuple[str, Any]]: return [(scope, get(cfg_path, scope=scope)) for scope in writable_scope_names()] @@ -1278,7 +1742,7 @@ def remove_yaml(dest, source): """UnMerges source from dest; entries in source take precedence over dest. This routine may modify dest and should be assigned to dest, in - case dest was None to begin with, e.g.: + case dest was None to begin with, e.g.:: dest = remove_yaml(dest, source) @@ -1288,7 +1752,7 @@ def remove_yaml(dest, source): appear as keys in ``dest``. Config file authors can optionally end any attribute in a dict - with `::` instead of `:`, and the key will remove the entire section + with ``::`` instead of ``:``, and the key will remove the entire section from ``dest`` """ @@ -1370,7 +1834,7 @@ def _validate(path): # value (if it's valid). try: syaml.load_config(path) - except spack.util.spack_yaml.SpackYAMLError as e: + except syaml.SpackYAMLError as e: raise ValueError( "Remainder of path is not a valid key" f" and does not parse as a value {path}" @@ -1417,7 +1881,7 @@ def process(path): quoted = True element = element.strip("'\"") - if any([append, prepend, override, quoted]): + if append or prepend or override or quoted: element = syaml.syaml_str(element) if append: element.append = True @@ -1432,27 +1896,29 @@ def process(path): def process_config_path(path: str) -> List[str]: - """Process a path argument to config.set() that may contain overrides ('::' or - trailing ':') + """Process a path argument to config.set() that may contain overrides (``::`` or + trailing ``:``) Colons will be treated as static strings if inside of quotes, - e.g. `this:is:a:path:'value:with:colon'` will yield: + e.g. ``this:is:a:path:'value:with:colon'`` will yield: + + .. code-block:: text - [this, is, a, path, value:with:colon] + [this, is, a, path, value:with:colon] - The path may consist only of keys (e.g. for a `get`) or may end in a value. + The path may consist only of keys (e.g. for a ``get``) or may end in a value. Keys are always strings: if a user encloses a key in quotes, the quotes should be removed. Values with quotes should be treated as strings, but without quotes, may be parsed as a different yaml object (e.g. - '{}' is a dict, but '"{}"' is a string). + ``'{}'`` is a dict, but ``'"{}"'`` is a string). This function does not know whether the final element of the path is a key or value, so: - * It must strip the quotes, in case it is a key (so we look for "key" and - not '"key"')) + * It must strip the quotes, in case it is a key (so we look for ``key`` and + not ``"key"``) * It must indicate somehow that the quotes were stripped, in case it is a - value (so that we don't process '"{}"' as a YAML dict) + value (so that we don't process ``"{}"`` as a YAML dict) Therefore, all elements with quotes are stripped, and then also converted to ``syaml_str`` (if treating the final element as a value, the caller diff --git a/lib/spack/spack/container/images.py b/lib/spack/spack/container/images.py index fbf251b2208c92..35d15efc28bf4b 100644 --- a/lib/spack/spack/container/images.py +++ b/lib/spack/spack/container/images.py @@ -35,7 +35,7 @@ def build_info(image, spack_version): Args: image (str): image to be used at run-time. Should be of the form - : e.g. "ubuntu:18.04" + : e.g. ``"ubuntu:18.04"`` spack_version (str): version of Spack that we want to use to build Returns: @@ -57,10 +57,10 @@ def os_package_manager_for(image): Args: image (str): image to be used at run-time. Should be of the form - : e.g. "ubuntu:18.04" + : e.g. ``"ubuntu:18.04"`` Returns: - Name of the package manager, e.g. "apt" or "yum" + Name of the package manager, e.g. ``"apt"`` or ``"yum"`` """ name = data()["images"][image]["os_package_manager"] return name diff --git a/lib/spack/spack/container/writers.py b/lib/spack/spack/container/writers.py index 7ed761e879bdff..d78a48527eeefc 100644 --- a/lib/spack/spack/container/writers.py +++ b/lib/spack/spack/container/writers.py @@ -196,7 +196,7 @@ def manifest(self): # Ensure that a few paths are where they need to be manifest.setdefault("config", syaml.syaml_dict()) - manifest["config"]["install_tree"] = self.paths.store + manifest["config"]["install_tree"] = {"root": self.paths.store} manifest["view"] = self.paths.view manifest = {"spack": manifest} diff --git a/lib/spack/spack/cray_manifest.py b/lib/spack/spack/cray_manifest.py index 48ae2760f334c9..49df96e902473d 100644 --- a/lib/spack/spack/cray_manifest.py +++ b/lib/spack/spack/cray_manifest.py @@ -34,7 +34,7 @@ def translated_compiler_name(manifest_compiler_name): """ When creating a Compiler object, Spack expects a name matching - one of the classes in `spack.compilers.config`. Names in the Cray manifest + one of the classes in :mod:`spack.compilers.config`. Names in the Cray manifest may differ; for cases where we know the name refers to a compiler in Spack, this function translates it automatically. @@ -182,6 +182,7 @@ def spec_from_entry(entry): spec._hashes_final = True spec.external_path = entry["prefix"] spec.origin = "external-db" + spec.namespace = pkg_cls.namespace spack.spec.Spec.ensure_valid_variants(spec) return spec diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index 0f401d46f1825e..1e87a3fee5ed2f 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -5,12 +5,11 @@ The database serves two purposes: - 1. It implements a cache on top of a potentially very large Spack - directory hierarchy, speeding up many operations that would - otherwise require filesystem access. - - 2. It will allow us to track external installations as well as lost - packages and their dependencies. +1. It implements a cache on top of a potentially very large Spack + directory hierarchy, speeding up many operations that would + otherwise require filesystem access. +2. It will allow us to track external installations as well as lost + packages and their dependencies. Prior to the implementation of this store, a directory layout served as the authoritative database of packages in Spack. This module @@ -67,7 +66,7 @@ ) from spack.error import SpackError from spack.util.crypto import bit_length -from spack.util.socket import _getfqdn +from spack.util.socket import _gethostname from .enums import InstallRecordStatus @@ -178,8 +177,8 @@ class InstallRecord: install path, AND whether or not it is installed. We need the installed flag in case a user either: - a) blew away a directory, or - b) used spack uninstall -f to get rid of it + 1. blew away a directory, or + 2. used spack uninstall -f to get rid of it If, in either case, the package was removed but others still depend on it, we still need to track its spec, so we don't @@ -443,7 +442,7 @@ def _ensure_parent_directories(self) -> None: def clear(self, spec: "spack.spec.Spec", force: bool = False) -> None: """Removes any persistent and cached failure tracking for the spec. - see `mark()`. + see :meth:`mark`. Args: spec: the spec whose failure indicators are being removed @@ -649,11 +648,11 @@ def _ensure_parent_directories(self): self.database_directory.mkdir(parents=True, exist_ok=True) def write_transaction(self): - """Get a write lock context manager for use in a `with` block.""" + """Get a write lock context manager for use in a ``with`` block.""" return self._write_transaction_impl(self.lock, acquire=self._read, release=self._write) def read_transaction(self): - """Get a read lock context manager for use in a `with` block.""" + """Get a read lock context manager for use in a ``with`` block.""" return self._read_transaction_impl(self.lock, acquire=self._read) def _write_to_file(self, stream): @@ -725,10 +724,9 @@ def query_by_spec_hash( """Get a spec for hash, and whether it's installed upstream. Return: - (tuple): (bool, optional InstallRecord): bool tells us whether - the record is from an upstream. Its InstallRecord is also - returned if available (the record must be checked to know - whether the hash is installed). + Tuple of bool and optional InstallRecord. The bool tells us whether the record is from + an upstream. Its InstallRecord is also returned if available (the record must be + checked to know whether the hash is installed). If the record is available locally, this function will always have a preference for returning that, even if it is not installed locally @@ -745,12 +743,11 @@ def query_by_spec_hash( return True, db._data[hash_key] return False, None - def query_local_by_spec_hash(self, hash_key): + def query_local_by_spec_hash(self, hash_key: str) -> Optional[InstallRecord]: """Get a spec by hash in the local database Return: - (InstallRecord or None): InstallRecord when installed - locally, otherwise None.""" + InstallRecord when installed locally, otherwise None.""" with self.read_transaction(): return self._data.get(hash_key, None) @@ -810,7 +807,9 @@ def _read_from_file(self, filename: pathlib.Path, *, reindex: bool = False) -> N def check(cond, msg): if not cond: - raise CorruptDatabaseError(f"Spack database is corrupt: {msg}", self._index_path) + raise CorruptDatabaseError( + f"Spack database is corrupt: {msg}", str(self._index_path) + ) check("database" in fdata, "no 'database' attribute in JSON DB.") @@ -832,7 +831,7 @@ def invalid_record(hash_key, error): return CorruptDatabaseError( f"Invalid record in Spack database: hash: {hash_key}, cause: " f"{type(error).__name__}: {error}", - self._index_path, + str(self._index_path), ) # Build up the database in three passes: @@ -1104,7 +1103,7 @@ def _write(self, type=None, value=None, traceback=None): self._state_is_inconsistent = True return - temp_file = str(self._index_path) + (".%s.%s.temp" % (_getfqdn(), os.getpid())) + temp_file = str(self._index_path) + (".%s.%s.temp" % (_gethostname(), os.getpid())) # Write a temporary database file them move it into place try: @@ -1711,7 +1710,7 @@ def query( hashes: list of hashes used to restrict the search - install_tree: query 'all' (default), 'local', 'upstream', or upstream path + install_tree: query ``"all"`` (default), ``"local"``, ``"upstream"``, or upstream path origin: origin of the spec """ @@ -1762,7 +1761,7 @@ def query( ) results = list(local_results) + list(x for x in upstream_results if x not in local_results) - results.sort() # type: ignore[call-overload] + results.sort() # type: ignore[call-arg,call-overload] return results def query_one( diff --git a/lib/spack/spack/dependency.py b/lib/spack/spack/dependency.py index 9fab90dc869dc2..5415ddf4ae7424 100644 --- a/lib/spack/spack/dependency.py +++ b/lib/spack/spack/dependency.py @@ -2,11 +2,15 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) """Data structures that represent Spack's dependency relationships.""" -from typing import Dict, List, Type +from typing import TYPE_CHECKING, Dict, List, Type import spack.deptypes as dt import spack.spec +if TYPE_CHECKING: + import spack.package_base + import spack.patch + class Dependency: """Class representing metadata for a dependency on a package. @@ -35,10 +39,12 @@ class Dependency: """ + __slots__ = "pkg", "spec", "patches", "depflag" + def __init__( self, pkg: Type["spack.package_base.PackageBase"], - spec: "spack.spec.Spec", + spec: spack.spec.Spec, depflag: dt.DepFlag = dt.DEFAULT, ): """Create a new Dependency. @@ -48,14 +54,12 @@ def __init__( spec: Spec indicating dependency requirements type: strings describing dependency relationship """ - assert isinstance(spec, spack.spec.Spec) - self.pkg = pkg - self.spec = spec.copy() + self.spec = spec # This dict maps condition specs to lists of Patch objects, just # as the patches dict on packages does. - self.patches: Dict[spack.spec.Spec, "List[spack.patch.Patch]"] = {} + self.patches: Dict[spack.spec.Spec, List["spack.patch.Patch"]] = {} self.depflag = depflag @property @@ -63,19 +67,6 @@ def name(self) -> str: """Get the name of the dependency package.""" return self.spec.name - def merge(self, other: "Dependency"): - """Merge constraints, deptypes, and patches of other into self.""" - self.spec.constrain(other.spec) - self.depflag |= other.depflag - - # concatenate patch lists, or just copy them in - for cond, p in other.patches.items(): - if cond in self.patches: - current_list = self.patches[cond] - current_list.extend(p for p in other.patches[cond] if p not in current_list) - else: - self.patches[cond] = other.patches[cond] - def __repr__(self) -> str: types = dt.flag_to_chars(self.depflag) if self.patches: diff --git a/lib/spack/spack/deptypes.py b/lib/spack/spack/deptypes.py index 05b30d3548b197..0ebaaa2a21cbfe 100644 --- a/lib/spack/spack/deptypes.py +++ b/lib/spack/spack/deptypes.py @@ -51,12 +51,15 @@ def compatible(flag1: DepFlag, flag2: DepFlag) -> bool: non-build dependency. This separates our two process spaces, build time and run time. These dependency combinations are allowed: - single dep on name: [b], [l], [r], [bl], [br], [blr] - two deps on name: [b, l], [b, r], [b, lr] + + * single dep on name: ``[b]``, ``[l]``, ``[r]``, ``[bl]``, ``[br]``, ``[blr]`` + * two deps on name: ``[b, l]``, ``[b, r]``, ``[b, lr]`` but none of these make any sense: - two build deps: [b, b], [b, br], [b, bl], [b, blr] - any two deps that both have an l or an r, i.e. [l, l], [r, r], [l, r], [bl, l], [bl, r]""" + + * two build deps: ``[b, b]``, ``[b, br]``, ``[b, bl]``, ``[b, blr]`` + * any two deps that both have an ``l`` or an ``r``, i.e. ``[l, l]``, ``[r, r]``, ``[l, r]``, + ``[bl, l]``, ``[bl, r]``""" # Cannot have overlapping build types to two different dependencies if flag1 & flag2: return False @@ -139,9 +142,9 @@ def flag_to_string(x: DepFlag) -> DepType: def flag_to_chars(depflag: DepFlag) -> str: """Create a string representing deptypes for many dependencies. - The string will be some subset of 'blrt', like 'bl ', 'b t', or - ' lr ' where each letter in 'blrt' stands for 'build', 'link', - 'run', and 'test' (the dependency types). + The string will be some subset of ``blrt``, like ``bl ``, ``b t``, or + `` lr `` where each letter in ``blrt`` stands for ``build``, ``link``, + ``run``, and ``test`` (the dependency types). For a single dependency, this just indicates that the dependency has the indicated deptypes. For a list of dependnecies, this shows diff --git a/lib/spack/spack/detection/path.py b/lib/spack/spack/detection/path.py index 3fedef78a26243..183cb3d1fbba57 100644 --- a/lib/spack/spack/detection/path.py +++ b/lib/spack/spack/detection/path.py @@ -188,7 +188,7 @@ def libraries_in_ld_and_system_library_path( def libraries_in_windows_paths(path_hints: Optional[List[str]] = None) -> Dict[str, str]: """Get the paths of all libraries available from the system PATH paths. - For more details, see `libraries_in_ld_and_system_library_path` regarding + For more details, see ``libraries_in_ld_and_system_library_path`` regarding return type and contents. Args: diff --git a/lib/spack/spack/directives.py b/lib/spack/spack/directives.py index 70f44da046bc2c..26bdb8d3cad79a 100644 --- a/lib/spack/spack/directives.py +++ b/lib/spack/spack/directives.py @@ -5,7 +5,7 @@ """This package contains directives that can be used within a package. Directives are functions that can be called inside a package -definition to modify the package, for example: +definition to modify the package, for example:: class OpenMpi(Package): depends_on("hwloc") @@ -16,25 +16,35 @@ class OpenMpi(Package): The available directives are: - * ``build_system`` - * ``conflicts`` - * ``depends_on`` - * ``extends`` - * ``license`` - * ``patch`` - * ``provides`` - * ``resource`` - * ``variant`` - * ``version`` - * ``requires`` - * ``redistribute`` - +* ``build_system`` +* ``conflicts`` +* ``depends_on`` +* ``extends`` +* ``license`` +* ``patch`` +* ``provides`` +* ``resource`` +* ``variant`` +* ``version`` +* ``requires`` +* ``redistribute`` + +They're implemented as functions that return partial functions that are later executed with a +package class as first argument:: + + @directive("example") + def example_directive(arg1, arg2): + return partial(_execute_example_directive, arg1=arg1, arg2=arg2) + + def _execute_example_directive(pkg, arg1, arg2): + # modify pkg.example based on arg1 and arg2 """ import collections import collections.abc import os import re import warnings +from functools import partial from typing import Any, Callable, List, Optional, Tuple, Type, Union import spack.deptypes as dt @@ -82,7 +92,8 @@ class OpenMpi(Package): SpecType = str DepType = Union[Tuple[str, ...], str] WhenType = Optional[Union[spack.spec.Spec, str, bool]] -Patcher = Callable[[Union[Type[spack.package_base.PackageBase], Dependency]], None] +PackageType = Type[spack.package_base.PackageBase] +Patcher = Callable[[Union[PackageType, Dependency]], None] PatchesType = Union[Patcher, str, List[Union[Patcher, str]]] @@ -163,6 +174,9 @@ def version( tag: Optional[str] = None, branch: Optional[str] = None, get_full_repo: Optional[bool] = None, + git_sparse_paths: Optional[ + Union[List[str], Callable[[spack.package_base.PackageBase], List[str]]] + ] = None, submodules: Union[SubmoduleCallback, Optional[bool]] = None, submodules_delete: Optional[bool] = None, # other version control @@ -178,8 +192,12 @@ def version( version("2.1", sha256="...") version("2.0", sha256="...", preferred=True) + + .. versionchanged:: v2.3 + + The ``git_sparse_paths`` parameter was added. """ - kwargs = { + kwargs: dict = { key: value for key, value in ( ("sha256", sha256), @@ -197,6 +215,7 @@ def version( ("hg", hg), ("cvs", cvs), ("get_full_repo", get_full_repo), + ("git_sparse_paths", git_sparse_paths), ("branch", branch), ("submodules", submodules), ("submodules_delete", submodules_delete), @@ -211,18 +230,17 @@ def version( ) if value is not None } - return lambda pkg: _execute_version(pkg, ver, **kwargs) + return partial(_execute_version, ver=ver, kwargs=kwargs) -def _execute_version(pkg: Type[spack.package_base.PackageBase], ver: Union[str, int], **kwargs): +def _execute_version(pkg: PackageType, ver: Union[str, int], kwargs: dict): if ( (any(s in kwargs for s in spack.util.crypto.hashes) or "checksum" in kwargs) and hasattr(pkg, "has_code") and not pkg.has_code ): raise VersionChecksumError( - "{0}: Checksums not allowed in no-code packages " - "(see '{1}' version).".format(pkg.name, ver) + f"{pkg.name}: Checksums not allowed in no-code packages " f"(see '{ver}' version)." ) if not isinstance(ver, (int, str)): @@ -244,8 +262,66 @@ def _execute_version(pkg: Type[spack.package_base.PackageBase], ver: Union[str, pkg.versions[version] = kwargs -def _depends_on( - pkg: Type[spack.package_base.PackageBase], +@directive("conflicts") +def conflicts(conflict_spec: SpecType, when: WhenType = None, msg: Optional[str] = None): + """Declare a conflict for a package. + + A conflict is a spec that is known to be invalid. For example, a package that cannot build + with GCC 14 and above can declare:: + + conflicts("%gcc@14:") + + To express the same constraint only when the ``foo`` variant is activated:: + + conflicts("%gcc@14:", when="+foo") + + Args: + conflict_spec: constraint defining the known conflict + when: optional condition that triggers the conflict + msg: optional user defined message + """ + return partial(_execute_conflicts, conflict_spec=conflict_spec, when=when, msg=msg) + + +def _execute_conflicts(pkg: PackageType, conflict_spec, when, msg): + # If when is not specified the conflict always holds + when_spec = _make_when_spec(when) + if not when_spec: + return + + # Save in a list the conflicts and the associated custom messages + conflict_spec_list = pkg.conflicts.setdefault(when_spec, []) + msg_with_name = f"{pkg.name}: {msg}" if msg is not None else msg + conflict_spec_list.append((spack.spec.Spec(conflict_spec), msg_with_name)) + + +@directive("dependencies") +def depends_on( + spec: SpecType, + when: WhenType = None, + type: DepType = dt.DEFAULT_TYPES, + patches: Optional[PatchesType] = None, +): + """Declare a dependency on another package. + + Example:: + + depends_on("hwloc@2:", when="@1:", type="link") + + Args: + spec: dependency spec + when: condition when this dependency applies + type: One or more of ``"build"``, ``"run"``, ``"test"``, or ``"link"`` (either a string or + tuple). Defaults to ``("build", "link")``. + patches: single result of :py:func:`patch` directive, a + ``str`` to be passed to ``patch``, or a list of these + """ + dep_spec = spack.spec.Spec(spec) + return partial(_execute_depends_on, spec=dep_spec, when=when, type=type, patches=patches) + + +def _execute_depends_on( + pkg: PackageType, spec: spack.spec.Spec, *, when: WhenType = None, @@ -257,7 +333,9 @@ def _depends_on( return if not spec.name: - raise DependencyError(f"Invalid dependency specification in package '{pkg.name}':", spec) + raise DependencyError( + f"Invalid dependency specification in package '{pkg.name}':", str(spec) + ) if pkg.name == spec.name: raise CircularReferenceError(f"Package '{pkg.name}' cannot depend on itself.") @@ -304,7 +382,9 @@ def _depends_on( dependency = Dependency(pkg, spec, depflag=depflag) deps_by_name[spec.name] = dependency else: - dependency.spec.constrain(spec, deps=False) + copy = dependency.spec.copy() + copy.constrain(spec, deps=False) + dependency.spec = copy dependency.depflag |= depflag # apply patches to the dependency @@ -312,68 +392,6 @@ def _depends_on( execute_patch(dependency) -@directive("conflicts") -def conflicts(conflict_spec: SpecType, when: WhenType = None, msg: Optional[str] = None): - """Declare a conflict for a package. - - A conflict is a spec that is known to be invalid. For example, a package that cannot build - with GCC 14 and above can declare:: - - conflicts("%gcc@14:") - - To express the same constraint only when the ``foo`` variant is activated:: - - conflicts("%gcc@14:", when="+foo") - - Args: - conflict_spec: constraint defining the known conflict - when: optional condition that triggers the conflict - msg: optional user defined message - """ - - def _execute_conflicts(pkg: Type[spack.package_base.PackageBase]): - # If when is not specified the conflict always holds - when_spec = _make_when_spec(when) - if not when_spec: - return - - # Save in a list the conflicts and the associated custom messages - conflict_spec_list = pkg.conflicts.setdefault(when_spec, []) - msg_with_name = f"{pkg.name}: {msg}" if msg is not None else msg - conflict_spec_list.append((spack.spec.Spec(conflict_spec), msg_with_name)) - - return _execute_conflicts - - -@directive(("dependencies")) -def depends_on( - spec: SpecType, - when: WhenType = None, - type: DepType = dt.DEFAULT_TYPES, - patches: Optional[PatchesType] = None, -): - """Declare a dependency on another package. - - Example:: - - depends_on("hwloc@2:", when="@1:", type="link") - - Args: - spec: dependency spec - when: condition when this dependency applies - type: One or more of ``"build"``, ``"run"``, ``"test"``, or ``"link"`` (either a string or - tuple). Defaults to ``("build", "link")``. - patches: single result of ``patch()`` directive, a - ``str`` to be passed to ``patch``, or a list of these - """ - dep_spec = spack.spec.Spec(spec) - - def _execute_depends_on(pkg: Type[spack.package_base.PackageBase]): - _depends_on(pkg, dep_spec, when=when, type=type, patches=patches) - - return _execute_depends_on - - @directive("disable_redistribute") def redistribute( source: Optional[bool] = None, binary: Optional[bool] = None, when: WhenType = None @@ -383,15 +401,11 @@ def redistribute( By default, packages allow source/binary distribution (in mirrors/build caches resp.). This directive allows users to explicitly disable redistribution for specs. """ - - return lambda pkg: _execute_redistribute(pkg, source, binary, when) + return partial(_execute_redistribute, source=source, binary=binary, when=when) def _execute_redistribute( - pkg: Type[spack.package_base.PackageBase], - source: Optional[bool], - binary: Optional[bool], - when: WhenType, + pkg: PackageType, source: Optional[bool], binary: Optional[bool], when: WhenType ): if source is None and binary is None: return @@ -427,7 +441,12 @@ def _execute_redistribute( @directive(("extendees", "dependencies")) -def extends(spec, when=None, type=("build", "run"), patches=None): +def extends( + spec: str, + when: WhenType = None, + type: DepType = ("build", "run"), + patches: Optional[PatchesType] = None, +): """Same as :func:`depends_on`, but also adds this package to the extendee list. In case of Python, also adds a dependency on ``python-venv``. @@ -436,26 +455,29 @@ def extends(spec, when=None, type=("build", "run"), patches=None): Notice that the default ``type`` is ``("build", "run")``, which is different from :func:`depends_on` where the default is ``("build", "link")``.""" - def _execute_extends(pkg): - when_spec = _make_when_spec(when) - if not when_spec: - return + return partial(_execute_extends, spec=spec, when=when, type=type, patches=patches) + - dep_spec = spack.spec.Spec(spec) +def _execute_extends( + pkg: PackageType, spec: str, when: WhenType, type: DepType, patches: Optional[PatchesType] +): + when_spec = _make_when_spec(when) + if not when_spec: + return - _depends_on(pkg, dep_spec, when=when, type=type, patches=patches) + dep_spec = spack.spec.Spec(spec) - # When extending python, also add a dependency on python-venv. This is done so that - # Spack environment views are Python virtual environments. - if dep_spec.name == "python" and not pkg.name == "python-venv": - _depends_on(pkg, spack.spec.Spec("python-venv"), when=when, type=("build", "run")) + _execute_depends_on(pkg, dep_spec, when=when, type=type, patches=patches) - pkg.extendees[dep_spec.name] = (dep_spec, when_spec) + # When extending python, also add a dependency on python-venv. This is done so that + # Spack environment views are Python virtual environments. + if dep_spec.name == "python" and not pkg.name == "python-venv": + _execute_depends_on(pkg, spack.spec.Spec("python-venv"), when=when, type=("build", "run")) - return _execute_extends + pkg.extendees[dep_spec.name] = (dep_spec, when_spec) -@directive(dicts=("provided", "provided_together")) +@directive(("provided", "provided_together")) def provides(*specs: SpecType, when: WhenType = None): """Declare that this package provides a virtual dependency. @@ -467,28 +489,29 @@ def provides(*specs: SpecType, when: WhenType = None): when: condition when this provides clause needs to be considered """ - def _execute_provides(pkg: Type[spack.package_base.PackageBase]): - when_spec = _make_when_spec(when) - if not when_spec: - return + return partial(_execute_provides, specs=specs, when=when) - # ``when`` specs for ``provides()`` need a name, as they are used - # to build the ProviderIndex. - when_spec.name = pkg.name - spec_objs = [spack.spec.Spec(x) for x in specs] - spec_names = [x.name for x in spec_objs] - if len(spec_names) > 1: - pkg.provided_together.setdefault(when_spec, []).append(set(spec_names)) +def _execute_provides(pkg: PackageType, specs: Tuple[SpecType, ...], when: WhenType): + when_spec = _make_when_spec(when) + if not when_spec: + return + + # ``when`` specs for ``provides()`` need a name, as they are used + # to build the ProviderIndex. + when_spec.name = pkg.name - for provided_spec in spec_objs: - if pkg.name == provided_spec.name: - raise CircularReferenceError("Package '%s' cannot provide itself." % pkg.name) + spec_objs = [spack.spec.Spec(x) for x in specs] + spec_names = [x.name for x in spec_objs] + if len(spec_names) > 1: + pkg.provided_together.setdefault(when_spec, []).append(set(spec_names)) - provided_set = pkg.provided.setdefault(when_spec, set()) - provided_set.add(provided_spec) + for provided_spec in spec_objs: + if pkg.name == provided_spec.name: + raise CircularReferenceError("Package '%s' cannot provide itself." % pkg.name) - return _execute_provides + provided_set = pkg.provided.setdefault(when_spec, set()) + provided_set.add(provided_spec) @directive("splice_specs") @@ -505,25 +528,28 @@ def can_splice( target. match_variants: A list of variants that must match between target spec and current package, - with special value '*' which matches all variants. Example: a ``json`` variant is + with special value ``*`` which matches all variants. Example: a ``json`` variant is defined on two packages, and they are ABI-compatible whenever they agree on the json variant (regardless of whether it is turned on or off). Note that this cannot - be applied to multi-valued variants and multi-valued variants will be skipped by '*'. + be applied to multi-valued variants and multi-valued variants will be skipped by ``*``. """ - def _execute_can_splice(pkg: Type[spack.package_base.PackageBase]): - when_spec = _make_when_spec(when) - if isinstance(match_variants, str) and match_variants != "*": - raise ValueError( - "* is the only valid string for match_variants " - "if looking to provide a single variant, use " - f"[{match_variants}] instead" - ) - if when_spec is None: - return - pkg.splice_specs[when_spec] = (spack.spec.Spec(target), match_variants) + return partial(_execute_can_splice, target=target, when=when, match_variants=match_variants) - return _execute_can_splice + +def _execute_can_splice( + pkg: PackageType, target: SpecType, when: SpecType, match_variants: Union[None, str, List[str]] +): + when_spec = _make_when_spec(when) + if isinstance(match_variants, str) and match_variants != "*": + raise ValueError( + "* is the only valid string for match_variants " + "if looking to provide a single variant, use " + f"[{match_variants}] instead" + ) + if when_spec is None: + return + pkg.splice_specs[when_spec] = (spack.spec.Spec(target), match_variants) @directive("patches") @@ -555,51 +581,68 @@ def patch( compressed URL patches) """ - def _execute_patch( - pkg_or_dep: Union[Type[spack.package_base.PackageBase], Dependency], - ) -> None: - pkg = pkg_or_dep.pkg if isinstance(pkg_or_dep, Dependency) else pkg_or_dep + return partial( + _execute_patch, + when=when, + url_or_filename=url_or_filename, + level=level, + working_dir=working_dir, + reverse=reverse, + sha256=sha256, + archive_sha256=archive_sha256, + ) - if hasattr(pkg, "has_code") and not pkg.has_code: - raise UnsupportedPackageDirective( - "Patches are not allowed in {0}: package has no code.".format(pkg.name) - ) - when_spec = _make_when_spec(when) - if not when_spec: - return - - # If this spec is identical to some other, then append this - # patch to the existing list. - cur_patches = pkg_or_dep.patches.setdefault(when_spec, []) - - global _patch_order_index - ordering_key = (pkg.name, _patch_order_index) - _patch_order_index += 1 - - patch: spack.patch.Patch - if "://" in url_or_filename: - if sha256 is None: - raise ValueError("patch() with a url requires a sha256") - - patch = spack.patch.UrlPatch( - pkg, - url_or_filename, - level, - working_dir=working_dir, - reverse=reverse, - ordering_key=ordering_key, - sha256=sha256, - archive_sha256=archive_sha256, - ) - else: - patch = spack.patch.FilePatch( - pkg, url_or_filename, level, working_dir, reverse, ordering_key=ordering_key - ) +def _execute_patch( + pkg_or_dep: Union[PackageType, Dependency], + url_or_filename: str, + level: int, + when: WhenType, + working_dir: str, + reverse: bool, + sha256: Optional[str], + archive_sha256: Optional[str], +) -> None: + pkg = pkg_or_dep.pkg if isinstance(pkg_or_dep, Dependency) else pkg_or_dep + + if hasattr(pkg, "has_code") and not pkg.has_code: + raise UnsupportedPackageDirective( + "Patches are not allowed in {0}: package has no code.".format(pkg.name) + ) - cur_patches.append(patch) + when_spec = _make_when_spec(when) + if not when_spec: + return - return _execute_patch + # If this spec is identical to some other, then append this + # patch to the existing list. + cur_patches = pkg_or_dep.patches.setdefault(when_spec, []) + + global _patch_order_index + ordering_key = (pkg.name, _patch_order_index) + _patch_order_index += 1 + + patch: spack.patch.Patch + if "://" in url_or_filename: + if sha256 is None: + raise ValueError("patch() with a url requires a sha256") + + patch = spack.patch.UrlPatch( + pkg, + url_or_filename, + level, + working_dir=working_dir, + reverse=reverse, + ordering_key=ordering_key, + sha256=sha256, + archive_sha256=archive_sha256, + ) + else: + patch = spack.patch.FilePatch( + pkg, url_or_filename, level, working_dir, reverse, ordering_key=ordering_key + ) + + cur_patches.append(patch) def conditional(*values: Union[str, bool], when: Optional[WhenType] = None): @@ -643,6 +686,35 @@ def variant( Raises: spack.directives_meta.DirectiveError: If arguments passed to the directive are invalid """ + return partial( + _execute_variant, + name=name, + default=default, + description=description, + values=values, + multi=multi, + validator=validator, + when=when, + sticky=sticky, + ) + + +def _format_error(msg, pkg, name): + msg += " @*r{{[{0}, variant '{1}']}}" + return spack.llnl.util.tty.color.colorize(msg.format(pkg.name, name)) + + +def _execute_variant( + pkg: PackageType, + name: str, + default: Optional[Union[bool, str, Tuple[str, ...]]], + description: str, + values: Optional[Union[collections.abc.Sequence, Callable[[Any], bool]]], + multi: Optional[bool], + validator: Optional[Callable[[str, str, Tuple[Any, ...]], None]], + when: Optional[Union[str, bool]], + sticky: bool, +): # This validation can be removed at runtime and enforced with an audit in Spack v1.0. # For now it's a warning to let people migrate faster. @@ -662,17 +734,8 @@ def variant( category=spack.error.SpackAPIWarning, ) - def format_error(msg, pkg): - msg += " @*r{{[{0}, variant '{1}']}}" - return spack.llnl.util.tty.color.colorize(msg.format(pkg.name, name)) - if name in spack.variant.RESERVED_NAMES: - - def _raise_reserved_name(pkg): - msg = "The name '%s' is reserved by Spack" % name - raise DirectiveError(format_error(msg, pkg)) - - return _raise_reserved_name + raise DirectiveError(_format_error(f"The name '{name}' is reserved by Spack", pkg, name)) # Ensure we have a sequence of allowed variant values, or a # predicate for it. @@ -694,15 +757,14 @@ def _raise_reserved_name(pkg): # TODO: attributes and let a packager decide whether to use the fluent # TODO: interface or the directive argument if hasattr(values, argument) and locals()[argument] is not None: - - def _raise_argument_error(pkg): - msg = ( - "Remove specification of {0} argument: it is handled " - "by an attribute of the 'values' argument" + raise DirectiveError( + _format_error( + f"Remove specification of {argument} argument: it is handled " + "by an attribute of the 'values' argument", + pkg, + name, ) - raise DirectiveError(format_error(msg.format(argument), pkg)) - - return _raise_argument_error + ) # Allow for the object defining the allowed values to supply its own # default value and group validator, say if it supports multiple values. @@ -718,42 +780,32 @@ def _raise_argument_error(pkg): default = ",".join(default) if default is None or default == "": - - def _raise_default_not_set(pkg): - if default is None: - msg = "either a default was not explicitly set, or 'None' was used" - else: - msg = "the default cannot be an empty string" - raise DirectiveError(format_error(msg, pkg)) - - return _raise_default_not_set + if default is None: + msg = "either a default was not explicitly set, or 'None' was used" + else: + msg = "the default cannot be an empty string" + raise DirectiveError(_format_error(msg, pkg, name)) description = str(description).strip() + when_spec = _make_when_spec(when) - def _execute_variant(pkg): - when_spec = _make_when_spec(when) - - if not re.match(spack.spec.IDENTIFIER_RE, name): - directive = "variant" - msg = "Invalid variant name in {0}: '{1}'" - raise DirectiveError(directive, msg.format(pkg.name, name)) - - # variants are stored by condition then by name (so only the last variant of a - # given name takes precedence *per condition*). - # NOTE: variant defaults and values can conflict if when conditions overlap. - variants_by_name = pkg.variants.setdefault(when_spec, {}) - variants_by_name[name] = spack.variant.Variant( - name=name, - default=default, - description=description, - values=values, - multi=multi, - validator=validator, - sticky=sticky, - precedence=pkg.num_variant_definitions(), - ) + if not re.match(spack.spec.IDENTIFIER_RE, name): + raise DirectiveError("variant", f"Invalid variant name in {pkg.name}: '{name}'") - return _execute_variant + # variants are stored by condition then by name (so only the last variant of a + # given name takes precedence *per condition*). + # NOTE: variant defaults and values can conflict if when conditions overlap. + variants_by_name = pkg.variants.setdefault(when_spec, {}) # type: ignore[arg-type] + variants_by_name[name] = spack.variant.Variant( + name=name, + default=default, + description=description, + values=values, + multi=multi, + validator=validator, + sticky=sticky, + precedence=pkg.num_variant_definitions(), + ) @directive("resources") @@ -780,34 +832,50 @@ def resource( """ - def _execute_resource(pkg): - when_spec = _make_when_spec(when) - if not when_spec: - return + return partial( + _execute_resource, + name=name, + destination=destination, + placement=placement, + when=when, + kwargs=kwargs, + ) + - # Check if the path is relative - if os.path.isabs(destination): - msg = "The destination keyword of a resource directive can't be an absolute path.\n" - msg += f"\tdestination : '{destination}\n'" - raise RuntimeError(msg) +def _execute_resource( + pkg: PackageType, + name: Optional[str], + destination: str, + placement: Optional[str], + when: WhenType, + # additional kwargs are as for `version()` + kwargs: dict, +): + when_spec = _make_when_spec(when) + if not when_spec: + return - # Check if the path falls within the main package stage area - test_path = "stage_folder_root" + # Check if the path is relative + if os.path.isabs(destination): + msg = "The destination keyword of a resource directive can't be an absolute path.\n" + msg += f"\tdestination : '{destination}\n'" + raise RuntimeError(msg) - # Normalized absolute path - normalized_destination = os.path.normpath(os.path.join(test_path, destination)) + # Check if the path falls within the main package stage area + test_path = "stage_folder_root" - if test_path not in normalized_destination: - msg = "Destination of a resource must be within the package stage directory.\n" - msg += f"\tdestination : '{destination}'\n" - raise RuntimeError(msg) + # Normalized absolute path + normalized_destination = os.path.normpath(os.path.join(test_path, destination)) - resources = pkg.resources.setdefault(when_spec, []) - resources.append( - Resource(name, spack.fetch_strategy.from_kwargs(**kwargs), destination, placement) - ) + if test_path not in normalized_destination: + msg = "Destination of a resource must be within the package stage directory.\n" + msg += f"\tdestination : '{destination}'\n" + raise RuntimeError(msg) - return _execute_resource + resources = pkg.resources.setdefault(when_spec, []) + resources.append( + Resource(name, spack.fetch_strategy.from_kwargs(**kwargs), destination, placement) + ) def build_system(*values, **kwargs): @@ -834,18 +902,35 @@ def maintainers(*names: str): Args: names: GitHub username for the maintainer """ + return partial(_execute_maintainer, names=names) - def _execute_maintainer(pkg): - maintainers = set(getattr(pkg, "maintainers", [])) - maintainers.update(names) - pkg.maintainers = sorted(maintainers) - return _execute_maintainer +def _execute_maintainer(pkg: PackageType, names: Tuple[str, ...]): + maintainers = set(getattr(pkg, "maintainers", [])) + maintainers.update(names) + pkg.maintainers = sorted(maintainers) -def _execute_license( - pkg: Type[spack.package_base.PackageBase], license_identifier: str, when: WhenType +@directive("licenses") +def license( + license_identifier: str, + checked_by: Optional[Union[str, List[str]]] = None, + when: Optional[Union[str, bool]] = None, ): + """Declare the license(s) the software is distributed under. + + Args: + license_identifiers: SPDX identifier specifying the license(s) the software + is distributed under. + checked_by: string or list of strings indicating which github user checked the + license (if any). + when: A spec specifying when the license applies. + """ + + return partial(_execute_license, license_identifier=license_identifier, when=when) + + +def _execute_license(pkg: PackageType, license_identifier: str, when: Optional[Union[str, bool]]): # If when is not specified the license always holds when_spec = _make_when_spec(when) if not when_spec: @@ -869,27 +954,13 @@ def _execute_license( pkg.licenses[when_spec] = license_identifier -@directive("licenses") -def license( - license_identifier: str, - checked_by: Optional[Union[str, List[str]]] = None, - when: Optional[Union[str, bool]] = None, -): - """Declare the license(s) the software is distributed under. - - Args: - license_identifiers: SPDX identifier specifying the license(s) the software - is distributed under. - checked_by: string or list of strings indicating which github user checked the - license (if any). - when: A spec specifying when the license applies. - """ - - return lambda pkg: _execute_license(pkg, license_identifier, when) - - @directive("requirements") -def requires(*requirement_specs: str, policy="one_of", when=None, msg=None): +def requires( + *requirement_specs: str, + policy: str = "one_of", + when: Optional[str] = None, + msg: Optional[str] = None, +): """Declare that a spec must be satisfied for a package. For instance, a package whose Fortran code can only be compiled with GCC can declare:: @@ -902,31 +973,42 @@ def requires(*requirement_specs: str, policy="one_of", when=None, msg=None): Args: requirement_specs: spec expressing the requirement + policy: either ``"one_of"`` or ``"any_of"``. If ``"one_of"``, exactly one of the + requirements must be satisfied. If ``"any_of"``, at least one of the requirements must + be satisfied. Defaults to ``"one_of"``. when: optional constraint that triggers the requirement. If None the requirement is applied unconditionally. - msg: optional user defined message """ - def _execute_requires(pkg: Type[spack.package_base.PackageBase]): - if policy not in ("one_of", "any_of"): - err_msg = ( - f"the 'policy' argument of the 'requires' directive in {pkg.name} is set " - f"to a wrong value (only 'one_of' or 'any_of' are allowed)" - ) - raise DirectiveError(err_msg) + return partial( + _execute_requires, requirement_specs=requirement_specs, policy=policy, when=when, msg=msg + ) - when_spec = _make_when_spec(when) - if not when_spec: - return - # Save in a list the requirements and the associated custom messages - requirement_list = pkg.requirements.setdefault(when_spec, []) - msg_with_name = f"{pkg.name}: {msg}" if msg is not None else msg - requirements = tuple(spack.spec.Spec(s) for s in requirement_specs) - requirement_list.append((requirements, policy, msg_with_name)) +def _execute_requires( + pkg: PackageType, + requirement_specs: Tuple[str, ...], + policy: str, + when: Optional[str], + msg: Optional[str], +): + if policy not in ("one_of", "any_of"): + err_msg = ( + f"the 'policy' argument of the 'requires' directive in {pkg.name} is set " + f"to a wrong value (only 'one_of' or 'any_of' are allowed)" + ) + raise DirectiveError(err_msg) + + when_spec = _make_when_spec(when) + if not when_spec: + return - return _execute_requires + # Save in a list the requirements and the associated custom messages + requirement_list = pkg.requirements.setdefault(when_spec, []) + msg_with_name = f"{pkg.name}: {msg}" if msg is not None else msg + requirements = tuple(spack.spec.Spec(s) for s in requirement_specs) + requirement_list.append((requirements, policy, msg_with_name)) class DependencyError(DirectiveError): diff --git a/lib/spack/spack/directives_meta.py b/lib/spack/spack/directives_meta.py index 9f3c2b6c92a9eb..174ef29ed4500f 100644 --- a/lib/spack/spack/directives_meta.py +++ b/lib/spack/spack/directives_meta.py @@ -100,6 +100,24 @@ def pop_default_args() -> dict: """Pop default arguments""" return DirectiveMeta._default_args.pop() + @staticmethod + def _remove_directives(arg): + # If any of the arguments are executors returned by a directive passed as an argument, + # don't execute them lazily. Instead, let the called directive handle them. This allows + # nested directive calls in packages. The caller can return the directive if it should be + # queued. Nasty, but it's the best way I can think of to avoid side effects if directive + # results are passed as args + directives = DirectiveMeta._directives_to_be_executed + if isinstance(arg, (list, tuple)): + # Descend into args that are lists or tuples + for a in arg: + DirectiveMeta._remove_directives(a) + else: + # Remove directives args from the exec queue + remove = next((d for d in directives if d is arg), None) + if remove is not None: + directives.remove(remove) + @staticmethod def directive(dicts: Optional[Union[Sequence[str], str]] = None) -> Callable: """Decorator for Spack directives. @@ -113,8 +131,8 @@ def directive(dicts: Optional[Union[Sequence[str], str]] = None) -> Callable: .. code-block:: python - @directive(dicts='versions') - version(pkg, ...): + @directive(dicts="versions") + def version(pkg, ...): ... This directive allows you write: @@ -126,15 +144,15 @@ class Foo(Package): The ``@directive`` decorator handles a couple things for you: - 1. Adds the class scope (pkg) as an initial parameter when - called, like a class method would. This allows you to modify - a package from within a directive, while the package is still - being defined. + 1. Adds the class scope (pkg) as an initial parameter when + called, like a class method would. This allows you to modify + a package from within a directive, while the package is still + being defined. - 2. It automatically adds a dictionary called "versions" to the - package so that you can refer to pkg.versions. + 2. It automatically adds a dictionary called ``versions`` to the + package so that you can refer to pkg.versions. - The ``(dicts='versions')`` part ensures that ALL packages in Spack + The ``(dicts="versions")`` part ensures that ALL packages in Spack will have a ``versions`` attribute after they're constructed, and that if no directive actually modified it, it will just be an empty dict. @@ -184,31 +202,14 @@ def _wrapper(*args, **_kwargs): ] if kwargs.get("when"): when_constraints.append(spack.spec.Spec(kwargs["when"])) - when_spec = spack.spec.merge_abstract_anonymous_specs(*when_constraints) + when_spec = spack.spec.Spec() + for current in when_constraints: + when_spec._constrain_symbolically(current, deps=True) kwargs["when"] = when_spec - # If any of the arguments are executors returned by a - # directive passed as an argument, don't execute them - # lazily. Instead, let the called directive handle them. - # This allows nested directive calls in packages. The - # caller can return the directive if it should be queued. - def remove_directives(arg): - directives = DirectiveMeta._directives_to_be_executed - if isinstance(arg, (list, tuple)): - # Descend into args that are lists or tuples - for a in arg: - remove_directives(a) - else: - # Remove directives args from the exec queue - remove = next((d for d in directives if d is arg), None) - if remove is not None: - directives.remove(remove) - - # Nasty, but it's the best way I can think of to avoid - # side effects if directive results are passed as args - remove_directives(args) - remove_directives(list(kwargs.values())) + DirectiveMeta._remove_directives(args) + DirectiveMeta._remove_directives(list(kwargs.values())) # A directive returns either something that is callable on a # package or a sequence of them diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py index efb795be046c1e..77b9ac96db1120 100644 --- a/lib/spack/spack/directory_layout.py +++ b/lib/spack/spack/directory_layout.py @@ -327,8 +327,8 @@ def remove_install_directory(self, spec: "spack.spec.Spec", deprecated: bool = F path = os.path.dirname(path) def all_specs(self) -> List["spack.spec.Spec"]: - """Returns a list of all specs detected in self.root, detected by `.spack` directories. - Their prefix is set to the directory containing the `.spack` directory. Note that these + """Returns a list of all specs detected in self.root, detected by ``.spack`` directories. + Their prefix is set to the directory containing the ``.spack`` directory. Note that these specs may follow a different layout than the current layout if it was changed after installation.""" return specs_from_metadata_dirs(self.root) diff --git a/lib/spack/spack/enums.py b/lib/spack/spack/enums.py index 3a71d90fa881ba..ded48034637c69 100644 --- a/lib/spack/spack/enums.py +++ b/lib/spack/spack/enums.py @@ -17,8 +17,15 @@ class InstallRecordStatus(enum.Flag): class ConfigScopePriority(enum.IntEnum): """Priorities of the different kind of config scopes used by Spack""" - BUILTIN = 0 + DEFAULTS = 0 CONFIG_FILES = 1 ENVIRONMENT = 2 CUSTOM = 3 COMMAND_LINE = 4 + + +class PropagationPolicy(enum.Enum): + """Enum to specify the behavior of a propagated dependency""" + + NONE = enum.auto() + PREFERENCE = enum.auto() diff --git a/lib/spack/spack/environment/__init__.py b/lib/spack/spack/environment/__init__.py index 200f45e519da01..deb05370c35ef3 100644 --- a/lib/spack/spack/environment/__init__.py +++ b/lib/spack/spack/environment/__init__.py @@ -6,8 +6,8 @@ .. _lockfile-format: -`spack.lock` format -=================== +``spack.lock`` format +===================== Spack environments have existed since Spack ``v0.12.0``, and there have been different ``spack.lock`` formats since then. The formats are documented here. @@ -15,27 +15,27 @@ The high-level format of a Spack lockfile hasn't changed much between versions, but the contents have. Lockfiles are JSON-formatted and their top-level sections are: - 1. ``_meta`` (object): this contains details about the file format, including: - * ``file-type``: always ``"spack-lockfile"`` - * ``lockfile-version``: an integer representing the lockfile format version - * ``specfile-version``: an integer representing the spec format version (since - ``v0.17``) +1. ``_meta`` (object): this contains details about the file format, including: - 2. ``spack`` (object): optional, this identifies information about Spack - used to concretize the environment: - * ``type``: required, identifies form Spack version took (e.g., ``git``, ``release``) - * ``commit``: the commit if the version is from git - * ``version``: the Spack version + * ``file-type``: always ``"spack-lockfile"`` + * ``lockfile-version``: an integer representing the lockfile format version + * ``specfile-version``: an integer representing the spec format version (since + ``v0.17``) +2. ``spack`` (object): optional, this identifies information about Spack + used to concretize the environment: - 3. ``roots`` (list): an ordered list of records representing the roots of the Spack - environment. Each has two fields: - * ``hash``: a Spack spec hash uniquely identifying the concrete root spec - * ``spec``: a string representation of the abstract spec that was concretized + * ``type``: required, identifies form Spack version took (e.g., ``git``, ``release``) + * ``commit``: the commit if the version is from git + * ``version``: the Spack version +3. ``roots`` (list): an ordered list of records representing the roots of the Spack + environment. Each has two fields: - 4. ``concrete_specs``: a dictionary containing the specs in the environment. - 5. ``include_concrete`` (dictionary): an optional dictionary that includes the roots - and concrete specs from the included environments, keyed by the path to that - environment + * ``hash``: a Spack spec hash uniquely identifying the concrete root spec + * ``spec``: a string representation of the abstract spec that was concretized +4. ``concrete_specs``: a dictionary containing the specs in the environment. +5. ``include_concrete`` (dictionary): an optional dictionary that includes the roots + and concrete specs from the included environments, keyed by the path to that + environment Compatibility ------------- @@ -549,6 +549,7 @@ TOP_LEVEL_KEY, Environment, SpackEnvironmentConfigError, + SpackEnvironmentDevelopError, SpackEnvironmentError, SpackEnvironmentViewError, activate, @@ -586,6 +587,7 @@ "TOP_LEVEL_KEY", "Environment", "SpackEnvironmentConfigError", + "SpackEnvironmentDevelopError", "SpackEnvironmentError", "SpackEnvironmentViewError", "activate", diff --git a/lib/spack/spack/environment/depfile.py b/lib/spack/spack/environment/depfile.py index f69bc15e4e18cb..46389b0c24ffa0 100644 --- a/lib/spack/spack/environment/depfile.py +++ b/lib/spack/spack/environment/depfile.py @@ -165,7 +165,11 @@ def __init__( item.target.safe_name(), " ".join(self._install_target(s.safe_name()) for s in item.prereqs), item.target.spec_hash(), - item.target.unsafe_format("{name}{@version}{variants}{ arch=architecture}"), + item.target.unsafe_format( + "{name}{@version}{variants}" + "{ platform=architecture.platform}{ os=architecture.os}" + "{ target=architecture.target}" + ), item.buildcache_flag, ) for item in adjacency_list diff --git a/lib/spack/spack/environment/environment.py b/lib/spack/spack/environment/environment.py index 53a64adeaa6169..dda534ea62a3e3 100644 --- a/lib/spack/spack/environment/environment.py +++ b/lib/spack/spack/environment/environment.py @@ -24,6 +24,7 @@ import spack.llnl.util.filesystem as fs import spack.llnl.util.tty as tty import spack.llnl.util.tty.color as clr +import spack.package_base import spack.paths import spack.repo import spack.schema.env @@ -36,9 +37,9 @@ import spack.util.path import spack.util.spack_json as sjson import spack.util.spack_yaml as syaml +import spack.variant as vt from spack import traverse -from spack.installer import PackageInstaller -from spack.llnl.util.filesystem import islink, readlink, symlink +from spack.llnl.util.filesystem import copy_tree, islink, readlink, symlink from spack.llnl.util.link_tree import ConflictingSpecsError from spack.schema.env import TOP_LEVEL_KEY from spack.spec import Spec @@ -58,6 +59,10 @@ #: currently activated environment _active_environment: Optional["Environment"] = None +# This is used in spack.main to bypass env failures if the command is `spack config edit` +# It is used in spack.cmd.config to get the path to a failed env for `spack config edit` +#: Validation error for a currently activate environment that failed to parse +_active_environment_error: Optional[spack.config.ConfigFormatError] = None #: default path where environments are stored in the spack tree default_env_path = os.path.join(spack.paths.var_path, "environments") @@ -85,14 +90,23 @@ def env_root_path() -> str: def environment_name(path: Union[str, pathlib.Path]) -> str: """Human-readable representation of the environment. - This is the path for directory environments, and just the name + This is the path for independent environments, and just the name for managed environments. """ - path_str = str(path) - if path_str.startswith(env_root_path()): - return os.path.basename(path_str) + env_root = pathlib.Path(env_root_path()).resolve() + path_path = pathlib.Path(path) + + # For a managed environment created in Spack, env.path is ENV_ROOT/NAME + # For a tracked environment from `spack env track`, the path is symlinked to ENV_ROOT/NAME + # So if ENV_ROOT/NAME resolves to env.path we know the environment is tracked/managed. + # Otherwise, it is an independent environment and we return the path. + # + # We resolve both paths fully because the env_root itself could also be a symlink, + # and any directory in env.path could be a symlink. + if (env_root / path_path.name).resolve() == path_path.resolve(): + return path_path.name else: - return path_str + return str(path) def ensure_no_disallowed_env_config_mods(scope: spack.config.ConfigScope) -> None: @@ -124,8 +138,10 @@ def default_manifest_yaml(): ) +sep_re = re.escape(os.sep) + #: regex for validating environment names -valid_environment_name_re = r"^\w[\w-]*$" +valid_environment_name_re = rf"^\w[{sep_re}\w-]*$" #: version of the lockfile format. Must increase monotonically. lockfile_format_version = 6 @@ -169,11 +185,7 @@ def valid_env_name(name): def validate_env_name(name): if not valid_env_name(name): raise ValueError( - ( - "'%s': names must start with a letter, and only contain " - "letters, numbers, _, and -." - ) - % name + f"{name}: names may only contain letters, numbers, _, and -, and may not start with -." ) return name @@ -354,13 +366,16 @@ def create_in_dir( Args: root: directory where to create the environment. - init_file: either a lockfile, a manifest file, or None + init_file: either a lockfile, a manifest file, an env directory, or None with_view: whether a view should be maintained for the environment. If the value is a string, it specifies the path to the view keep_relative: if True, develop paths are copied verbatim into the new environment file, otherwise they are made absolute include_concrete: concrete environment names/paths to be included """ + # If the initfile is a named environment, get its path + if init_file and exists(str(init_file)): + init_file = read(str(init_file)).path initialize_environment_dir(root, envfile=init_file) if with_view is None and keep_relative: @@ -387,7 +402,12 @@ def create_in_dir( env = Environment(root) if init_file: - init_file_dir = os.path.abspath(os.path.dirname(init_file)) + if os.path.isdir(init_file): + init_file_dir = init_file + copied = True + else: + init_file_dir = os.path.abspath(os.path.dirname(init_file)) + copied = False if not keep_relative: if env.path != init_file_dir: @@ -395,13 +415,15 @@ def create_in_dir( # spack.yaml file in another directory, and moreover we want # dev paths in this environment to refer to their original # locations. - _rewrite_relative_dev_paths_on_relocation(env, init_file_dir) - _rewrite_relative_repos_paths_on_relocation(env, init_file_dir) + # If the full env was copied including internal files, only rewrite + # relative paths outside of env + _rewrite_relative_dev_paths_on_relocation(env, init_file_dir, copied_env=copied) + _rewrite_relative_repos_paths_on_relocation(env, init_file_dir, copied_env=copied) return env -def _rewrite_relative_dev_paths_on_relocation(env, init_file_dir): +def _rewrite_relative_dev_paths_on_relocation(env, init_file_dir, copied_env=False): """When initializing the environment from a manifest file and we plan to store the environment in a different directory, we have to rewrite relative paths to absolute ones.""" @@ -417,6 +439,10 @@ def _rewrite_relative_dev_paths_on_relocation(env, init_file_dir): if entry["path"] == expanded_path: continue + # If copied and it's inside the env, we copied it and don't need to relativize + if copied_env and expanded_path.startswith(init_file_dir): + continue + tty.debug("Expanding develop path for {0} to {1}".format(name, expanded_path)) dev_specs[name]["path"] = expanded_path @@ -429,7 +455,7 @@ def _rewrite_relative_dev_paths_on_relocation(env, init_file_dir): env._re_read() -def _rewrite_relative_repos_paths_on_relocation(env, init_file_dir): +def _rewrite_relative_repos_paths_on_relocation(env, init_file_dir, copied_env=False): """When initializing the environment from a manifest file and we plan to store the environment in a different directory, we have to rewrite relative repo paths to absolute ones and expand environment variables.""" @@ -448,6 +474,10 @@ def _rewrite_relative_repos_paths_on_relocation(env, init_file_dir): if entry == expanded_path: continue + # If copied and it's inside the env, we copied it and don't need to relativize + if copied_env and expanded_path.startswith(init_file_dir): + continue + tty.debug("Expanding repo path for {0} to {1}".format(entry, expanded_path)) repos_specs[name] = expanded_path @@ -535,7 +565,7 @@ def validate_included_envs_concrete(include_concrete: List[str]) -> None: non_concrete_envs.add(Environment(env_path).name) if non_concrete_envs: - msg = "The following environment(s) are not concrete: {0}\n" "Please run:".format( + msg = "The following environment(s) are not concrete: {0}\nPlease run:".format( ", ".join(non_concrete_envs) ) for env in non_concrete_envs: @@ -551,11 +581,22 @@ def all_environment_names(): if not os.path.exists(env_root_path()): return [] - candidates = sorted(os.listdir(env_root_path())) + env_root = pathlib.Path(env_root_path()).resolve() + + def yaml_paths(): + for root, dirs, files in os.walk(env_root, topdown=True, followlinks=True): + dirs[:] = [ + d + for d in dirs + if not d.startswith(".") and not env_root.samefile(os.path.join(root, d)) + ] + if manifest_name in files: + yield os.path.join(root, manifest_name) + names = [] - for candidate in candidates: - yaml_path = os.path.join(_root(candidate), manifest_name) - if valid_env_name(candidate) and os.path.exists(yaml_path): + for yaml_path in yaml_paths(): + candidate = str(pathlib.Path(yaml_path).relative_to(env_root).parent) + if valid_env_name(candidate): names.append(candidate) return names @@ -572,7 +613,7 @@ def _read_yaml(str_or_file): data = syaml.load_config(str_or_file) except syaml.SpackYAMLError as e: raise SpackEnvironmentConfigError( - f"Invalid environment configuration detected: {e.message}" + f"Invalid environment configuration detected: {e.message}", e.filename ) filename = getattr(str_or_file, "name", None) @@ -911,10 +952,6 @@ def _exclude_duplicate_runtimes(self, nodes): return [x for x in nodes if x.name not in all_runtimes or runtimes_by_name[x.name] == x] -def _create_environment(path): - return Environment(path) - - def env_subdir_path(manifest_dir: Union[str, pathlib.Path]) -> str: """Path to where the environment stores repos, logs, views, configs. @@ -994,8 +1031,18 @@ def unify(self): def unify(self, value): self._unify = value - def __reduce__(self): - return _create_environment, (self.path,) + def __getstate__(self): + state = self.__dict__.copy() + state.pop("txlock", None) + state.pop("_repo", None) + state.pop("repo_token", None) + state.pop("store_token", None) + return state + + def __setstate__(self, state): + self.__dict__.update(state) + self.txlock = lk.Lock(self._transaction_lock_path) + self._repo = None def _re_read(self): """Reinitialize the environment object.""" @@ -1014,7 +1061,7 @@ def _read(self): shutil.copy(self.lock_path, self._lock_backup_v1_path) def write_transaction(self): - """Get a write lock context manager for use in a `with` block.""" + """Get a write lock context manager for use in a ``with`` block.""" return lk.WriteTransaction(self.txlock, acquire=self._re_read) def _process_view(self, env_view: Optional[Union[bool, str, Dict]]): @@ -1277,12 +1324,12 @@ def destroy(self): """Remove this environment from Spack entirely.""" shutil.rmtree(self.path) - def add(self, user_spec, list_name=user_speclist_name): + def add(self, user_spec, list_name=user_speclist_name) -> bool: """Add a single user_spec (non-concretized) to the Environment Returns: - (bool): True if the spec was added, False if it was already - present and did not need to be added + True if the spec was added, False if it was already present and did not need to be + added """ spec = Spec(user_spec) @@ -1301,7 +1348,7 @@ def add(self, user_spec, list_name=user_speclist_name): list_to_change = self.spec_lists[list_name] existing = str(spec) in list_to_change.yaml_list if not existing: - list_to_change.add(str(spec)) + list_to_change.add(spec) if list_name == user_speclist_name: self.manifest.add_user_spec(str(user_spec)) else: @@ -1318,22 +1365,22 @@ def change_existing_spec( allow_changing_multiple_specs=False, ): """ - Find the spec identified by `match_spec` and change it to `change_spec`. + Find the spec identified by ``match_spec`` and change it to ``change_spec``. Arguments: change_spec: defines the spec properties that need to be changed. This will not change attributes of the - matched spec unless they conflict with `change_spec`. + matched spec unless they conflict with ``change_spec``. list_name: identifies the spec list in the environment that should be modified match_spec: if set, this identifies the spec that should be changed. If not set, it is assumed we are - looking for a spec with the same name as `change_spec`. + looking for a spec with the same name as ``change_spec``. """ - if not (change_spec.name or (match_spec and match_spec.name)): + if not (change_spec.name or match_spec): raise ValueError( - "Must specify a spec name to identify a single spec" - " in the environment that will be changed" + "Must specify a spec name or match spec to identify a single spec" + " in the environment that will be changed (or multiple with '--all')" ) match_spec = match_spec or Spec(change_spec.name) @@ -1424,6 +1471,79 @@ def is_develop(self, spec): """Returns true when the spec is built from local sources""" return spec.name in self.dev_specs + def apply_develop(self, spec: spack.spec.Spec, path: Optional[str] = None): + """Mutate concrete specs to include dev_path provenance pointing to path. + + This will fail if any existing concrete spec for the same package does not satisfy the + + given develop spec.""" + selector = spack.spec.Spec(spec.name) + + mutator = spack.spec.Spec() + if path: + variant = vt.SingleValuedVariant("dev_path", path) + else: + variant = vt.VariantValueRemoval("dev_path") + mutator.variants["dev_path"] = variant + + msg = ( + f"Develop spec '{spec}' conflicts with concrete specs in environment." + " Try again with 'spack develop --no-modify-concrete-specs'" + " and run 'spack concretize --force' to apply your changes." + ) + self.mutate(selector, mutator, validator=spec, msg=msg) + + def mutate( + self, + selector: spack.spec.Spec, + mutator: spack.spec.Spec, + validator: Optional[spack.spec.Spec] = None, + msg: Optional[str] = None, + ): + """Mutate concrete specs of an environment + + Mutate any spec that matches ``selector``. Invalidate caches on parents of mutated specs. + If a validator spec is supplied, throw an error if a selected spec does not satisfy the + validator. + """ + # Find all specs that this mutation applies to + modify_specs = [] + modified_specs = [] + for dep in self.all_specs_generator(): + if dep.satisfies(selector): + if not dep.satisfies(validator or selector): + if not msg: + msg = f"spec {dep} satisfies selector {selector}" + msg += f" but not validator {validator}" + raise SpackEnvironmentDevelopError(msg) + modify_specs.append(dep) + + # Manipulate selected specs + for s in modify_specs: + modified = s.mutate(mutator, rehash=False) + if modified: + modified_specs.append(s) + + # Identify roots modified and invalidate all dependent hashes + modified_roots = [] + for parent in traverse.traverse_nodes(modified_specs, direction="parents"): + # record whether this parent is a root before we modify the hash + if parent.dag_hash() in self.specs_by_hash: + modified_roots.append((parent, parent.dag_hash())) + # modify the parent to invalidate hashes + parent._mark_root_concrete(False) + parent.clear_caches() + + # Compute new hashes and update the env list of specs + for root, old_hash in modified_roots: + root._finalize_concretization() + self.concretized_order[self.concretized_order.index(old_hash)] = root.dag_hash() + self.specs_by_hash.pop(old_hash) + self.specs_by_hash[root.dag_hash()] = root + + if modified_roots: + self.write() + def concretize( self, force: Optional[bool] = None, tests: Union[bool, Sequence] = False ) -> Sequence[SpecPair]: @@ -1603,8 +1723,6 @@ def _concretize_separately(self, tests: Union[bool, Sequence] = False): """Concretization strategy that concretizes separately one user spec after the other. """ - import spack.bootstrap - # keep any concretized specs whose user specs are still in the manifest old_concretized_user_specs = self.concretized_user_specs old_concretized_order = self.concretized_order @@ -1647,10 +1765,10 @@ def update_default_view(self, path_or_bool: Union[str, bool]) -> None: """Updates the path of the default view. If the argument passed as input is False the default view is deleted, if present. The - manifest will have an entry "view: false". + manifest will have an entry ``view: false``. If the argument passed as input is True a default view is created, if not already present. - The manifest will have an entry "view: true". If a default view is already declared, it + The manifest will have an entry ``view: true``. If a default view is already declared, it will be left untouched. If the argument passed as input is a path a default view pointing to that path is created, @@ -1894,8 +2012,14 @@ def install_specs(self, specs: Optional[List[Spec]] = None, **install_args): *(s.dag_hash() for s in roots), } + if spack.config.get("config:installer", "old") == "new": + from spack.new_installer import PackageInstaller + else: + from spack.installer import PackageInstaller # type: ignore[assignment] + + builder = PackageInstaller([spec.package for spec in specs], **install_args) + try: - builder = PackageInstaller([spec.package for spec in specs], **install_args) builder.install() finally: if reporter: @@ -1922,7 +2046,7 @@ def roots(self): """Specs explicitly requested by the user *in this environment*. Yields both added and installed specs that have user specs in - `spack.yaml`. + ``spack.yaml``. """ concretized = dict(self.concretized_specs()) for spec in self.user_specs: @@ -1998,8 +2122,8 @@ def matching_spec(self, spec): spec in the environment. The matching spec does not have to be installed in the environment, - but must be concrete (specs added with `spack add` without an - intervening `spack concretize` will not be matched). + but must be concrete (specs added with ``spack add`` without an + intervening ``spack concretize`` will not be matched). If there is a single root spec that matches the provided spec or a single dependency spec that matches the provided spec, then the @@ -2446,11 +2570,13 @@ def _equiv_dict(first, second): return same_values and same_keys_with_same_overrides -def display_specs(specs): +def display_specs(specs: List[spack.spec.Spec], *, highlight_non_defaults: bool = False) -> None: """Displays a list of specs traversed breadth-first, covering nodes, with install status. Args: - specs (list): list of specs + specs: list of specs to be displayed + highlight_non_defaults: if True, highlights non-default versions and variants in the specs + being displayed """ tree_string = spack.spec.tree( specs, @@ -2458,6 +2584,12 @@ def display_specs(specs): hashes=True, hashlen=7, status_fn=spack.spec.Spec.install_status, + highlight_version_fn=( + spack.package_base.non_preferred_version if highlight_non_defaults else None + ), + highlight_variant_fn=( + spack.package_base.non_default_variant if highlight_non_defaults else None + ), key=traverse.by_dag_hash, ) print(tree_string) @@ -2539,7 +2671,7 @@ def _top_level_key(data): Returns: Either 'spack' or 'env' """ - msg = 'cannot find top level attribute "spack" or "env"' "in the environment" + msg = 'cannot find top level attribute "spack" or "env" in the environment' assert any(x in data for x in ("spack", "env")), msg if "spack" in data: return "spack" @@ -2615,10 +2747,18 @@ def _ensure_env_dir(): return envfile = pathlib.Path(envfile) - if not envfile.exists() or not envfile.is_file(): + if not envfile.exists(): msg = f"cannot initialize environment, {envfile} is not a valid file" raise SpackEnvironmentError(msg) + if envfile.is_dir(): + # initialization file is an entire env directory + if not (envfile / "spack.yaml").is_file(): + msg = f"cannot initialize environment, {envfile} is not a valid environment" + raise SpackEnvironmentError(msg) + copy_tree(str(envfile), str(environment_dir)) + return + _ensure_env_dir() # When we have a lockfile we should copy that and produce a consistent default manifest @@ -2644,9 +2784,8 @@ def _ensure_env_dir(): # TODO: make this recursive includes = manifest[TOP_LEVEL_KEY].get("include", []) - for include in includes: - included_path = spack.config.included_path(include) - path = included_path.path + paths = spack.config.paths_from_includes(includes) + for path in paths: if os.path.isabs(path): continue @@ -2657,12 +2796,20 @@ def _ensure_env_dir(): continue orig_abspath = os.path.normpath(envfile.parent / path) + if os.path.isfile(orig_abspath): + fs.touchp(abspath) + shutil.copy(orig_abspath, abspath) + continue + if not os.path.exists(orig_abspath): - tty.warn(f"Included file does not exist; will not copy: '{path}'") + tty.warn(f"Skipping copy of non-existent include path: '{path}'") continue - fs.touchp(abspath) - shutil.copy(orig_abspath, abspath) + if os.path.exists(abspath): + tty.warn(f"Skipping copy of directory over existing path: {path}") + continue + + shutil.copytree(orig_abspath, abspath, symlinks=True) class EnvironmentManifestFile(collections.abc.Mapping): @@ -2830,10 +2977,7 @@ def remove_definition(self, user_spec: str, list_name: str) -> None: or the list does not exist """ defs = self.configuration.get("definitions", []) - msg = ( - f"cannot remove {user_spec} from the '{list_name}' definition, " - f"no valid list exists" - ) + msg = f"cannot remove {user_spec} from the '{list_name}' definition, no valid list exists" for idx, item in self._iterate_on_definitions(defs, list_name=list_name, err_msg=msg): try: @@ -2961,11 +3105,11 @@ def env_config_scope(self) -> spack.config.ConfigScope: ensure_no_disallowed_env_config_mods(self._env_config_scope) return self._env_config_scope - def prepare_config_scope( - self, priority: ConfigScopePriority = ConfigScopePriority.ENVIRONMENT - ) -> None: + def prepare_config_scope(self) -> None: """Add the manifest's scope to the global configuration search path.""" - spack.config.CONFIG.push_scope(self.env_config_scope, priority) + spack.config.CONFIG.push_scope( + self.env_config_scope, priority=ConfigScopePriority.ENVIRONMENT + ) def deactivate_config_scope(self) -> None: """Remove the manifest's scope from the global config path.""" @@ -3011,3 +3155,11 @@ class SpackEnvironmentViewError(SpackEnvironmentError): class SpackEnvironmentConfigError(SpackEnvironmentError): """Class for Spack environment-specific configuration errors.""" + + def __init__(self, msg, filename): + self.filename = filename + super().__init__(msg) + + +class SpackEnvironmentDevelopError(SpackEnvironmentError): + """Class for errors in applying develop information to an environment.""" diff --git a/lib/spack/spack/error.py b/lib/spack/spack/error.py index e38be432ef0f47..de90ed053dfdf7 100644 --- a/lib/spack/spack/error.py +++ b/lib/spack/spack/error.py @@ -2,8 +2,8 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import inspect import sys +from typing import Optional import spack.llnl.util.tty as tty @@ -11,7 +11,7 @@ #: this is module-scoped because it needs to be set very early debug = 0 -#: whether to show a backtrace when an error is printed, enabled with --backtrace. +#: whether to show a backtrace when an error is printed, enabled with ``--backtrace``. SHOW_BACKTRACE = False @@ -24,7 +24,7 @@ class SpackError(Exception): Subclasses can be found in the modules they have to do with. """ - def __init__(self, message, long_message=None): + def __init__(self, message: str, long_message: Optional[str] = None) -> None: super().__init__() self.message = message self._long_message = long_message @@ -78,16 +78,13 @@ def die(self): sys.exit(1) def __str__(self): - msg = self.message if self._long_message: - msg += "\n %s" % self._long_message - return msg + return f"{self.message}\n {self._long_message}" + return self.message def __repr__(self): - args = [repr(self.message), repr(self.long_message)] - args = ",".join(args) - qualified_name = inspect.getmodule(self).__name__ + "." + type(self).__name__ - return qualified_name + "(" + args + ")" + qualified_name = type(self).__module__ + "." + type(self).__name__ + return f"{qualified_name}({repr(self.message)}, {repr(self.long_message)})" def __reduce__(self): return type(self), (self.message, self.long_message) @@ -219,3 +216,11 @@ def __init__(self, path, size, contents, algorithm, expected, computed): class CompilerError(SpackError): """Raised if something goes wrong when probing or querying a compiler.""" + + +class SpecFilenameError(SpecError): + """Raised when a spec file name is invalid.""" + + +class NoSuchSpecFileError(SpecFilenameError): + """Raised when a spec file doesn't exist.""" diff --git a/lib/spack/spack/extensions.py b/lib/spack/spack/extensions.py index f7256ed78b8ea9..19183ab17ca8d7 100644 --- a/lib/spack/spack/extensions.py +++ b/lib/spack/spack/extensions.py @@ -108,7 +108,7 @@ def ensure_package_creation(name): def load_extension(name: str) -> str: - """Loads a single extension into the 'spack.extensions' package. + """Loads a single extension into the ``spack.extensions`` package. Args: name: name of the extension @@ -132,6 +132,7 @@ def get_extension_paths(): return paths +@spack.llnl.util.lang.memoized def extension_paths_from_entry_points() -> List[str]: """Load extensions from a Python package's entry points. @@ -146,6 +147,8 @@ def extension_paths_from_entry_points() -> List[str]: The function ``get_spack_extensions`` returns paths to the package's spack extensions + This function assumes that the state of entry points doesn't change from the first time it's + called. E.g., it doesn't support any new installation of packages between two calls. """ extension_paths: List[str] = [] for entry_point in spack.llnl.util.lang.get_entry_points(group="spack.extensions"): diff --git a/lib/spack/spack/externals.py b/lib/spack/spack/externals.py new file mode 100644 index 00000000000000..aa1c66d9a15050 --- /dev/null +++ b/lib/spack/spack/externals.py @@ -0,0 +1,426 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +""" +This module turns the configuration data in the ``packages`` section into a list of concrete specs. + +This is mainly done by the ``ExternalSpecsParser`` class, which is responsible for: + + 1. Transforming an intermediate representation of the YAML configuration into a set of nodes + 2. Ensuring the dependency specifications are not ambiguous + 3. Inferring missing information about the external specs (e.g. architecture, deptypes) + 4. Wiring up the external specs to their dependencies + +The helper function ``extract_dicts_from_configuration`` is used to transform the configuration +into the intermediate representation. +""" +import re +import uuid +import warnings +from typing import Any, Callable, Dict, List, NamedTuple, Tuple, Union + +from spack.vendor.typing_extensions import TypedDict + +import spack.archspec +import spack.deptypes +import spack.repo +import spack.spec +from spack.error import SpackError +from spack.llnl.util import tty + + +class DependencyDict(TypedDict, total=False): + id: str + spec: str + deptypes: spack.deptypes.DepTypes + virtuals: str + + +class ExternalDict(TypedDict, total=False): + """Dictionary representation of an external spec. + + This representation mostly follows the one used in the configuration files, with a few + exceptions needed to support specific features. + """ + + spec: str + prefix: str + modules: List[str] + extra_attributes: Dict[str, Any] + id: str + dependencies: List[DependencyDict] + # Target requirement from configuration. Not in the external schema + required_target: str + + +def node_from_dict(external_dict: ExternalDict) -> spack.spec.Spec: + """Returns an external spec node from a dictionary representation.""" + extra_attributes = external_dict.get("extra_attributes", {}) + result = spack.spec.Spec( + # Allow `@x.y.z` instead of `@=x.y.z` + str(spack.spec.parse_with_version_concrete(external_dict["spec"])), + external_path=external_dict.get("prefix"), + external_modules=external_dict.get("modules"), + ) + if not result.versions.concrete: + raise ExternalSpecError( + f"The external spec '{external_dict['spec']}' doesn't have a concrete version" + ) + + result.extra_attributes = extra_attributes + if "required_target" in external_dict: + result.constrain(f"target={external_dict['required_target']}") + return result + + +def complete_architecture(node: spack.spec.Spec) -> None: + """Completes a node with architecture information. + + Undefined targets are set to the default host target family (e.g. ``x86_64``). + The operating system and platform are set based on the current host. + """ + if node.architecture: + if not node.architecture.target: + node.architecture.target = spack.archspec.HOST_TARGET_FAMILY + node.architecture.complete_with_defaults() + else: + node.constrain(spack.spec.Spec.default_arch()) + node.architecture.target = spack.archspec.HOST_TARGET_FAMILY + + node.namespace = spack.repo.PATH.repo_for_pkg(node.name).namespace + for flag_type in spack.spec.FlagMap.valid_compiler_flags(): + node.compiler_flags.setdefault(flag_type, []) + + +def complete_variants_and_architecture(node: spack.spec.Spec) -> None: + """Completes a node with variants and architecture information. + + Architecture is completed first, delegating to ``complete_architecture``. + Variants are then added to the node, using their default value. + """ + complete_architecture(node) + pkg_class = spack.repo.PATH.get_pkg_class(node.name) + variants_dict = pkg_class.variants.copy() + changed = True + + while variants_dict and changed: + changed = False + items = list(variants_dict.items()) # copy b/c loop modifies dict + + for when, variants_by_name in items: + if not node.satisfies(when): + continue + variants_dict.pop(when) + for name, vdef in variants_by_name.items(): + if name not in node.variants: + # Cannot use Spec.constrain, because we lose information on the variant type + node.variants[name] = vdef.make_default() + changed = True + + +def extract_dicts_from_configuration(packages_yaml) -> List[ExternalDict]: + """Transforms the packages.yaml configuration into a list of external dictionaries. + + The default required target is extracted from ``packages:all:require``, if present. + Any package-specific required target overrides the default. + """ + result = [] + default_required_target = "" + if "all" in packages_yaml: + default_required_target = _required_target(packages_yaml["all"]) + + for name, entry in packages_yaml.items(): + pkg_required_target = _required_target(entry) or default_required_target + partial_result = [current for current in entry.get("externals", [])] + if pkg_required_target: + for partial in partial_result: + partial["required_target"] = pkg_required_target + result.extend(partial_result) + return result + + +def _line_info(config_dict: Any) -> str: + result = getattr(config_dict, "line_info", "") + return "" if not result else f" [{result}]" + + +_TARGET_RE = re.compile(r"target=([^\s:]+)") + + +def _required_target(entry) -> str: + """Parses the YAML configuration for a single external spec and returns the required target + if defined. Returns an empty string otherwise. + """ + if "require" not in entry: + return "" + + requirements = entry["require"] + if not isinstance(requirements, list): + requirements = [requirements] + + results = [] + for requirement in requirements: + if not isinstance(requirement, str): + continue + + matches = _TARGET_RE.match(requirement) + if matches: + results.append(matches.group(1)) + + if len(results) == 1: + return results[0] + + return "" + + +class ExternalSpecAndConfig(NamedTuple): + spec: spack.spec.Spec + config: ExternalDict + + +class ExternalSpecsParser: + """Transforms a list of external dicts into a list of specs.""" + + def __init__( + self, + external_dicts: List[ExternalDict], + *, + complete_node: Callable[[spack.spec.Spec], None] = complete_variants_and_architecture, + allow_nonexisting: bool = True, + ): + """Initializes a class to manage and process external specifications in ``packages.yaml``. + + Args: + external_dicts: list of ExternalDict objects to provide external specifications. + complete_node: a callable that completes a node with missing variants, targets, etc. + Defaults to `complete_architecture`. + allow_nonexisting: whether to allow non-existing packages. Defaults to True. + + Raises: + spack.repo.UnknownPackageError: if a package does not exist, + and allow_nonexisting is False. + """ + self.external_dicts = external_dicts + self.specs_by_external_id: Dict[str, ExternalSpecAndConfig] = {} + self.specs_by_name: Dict[str, List[ExternalSpecAndConfig]] = {} + self.nodes: List[spack.spec.Spec] = [] + self.allow_nonexisting = allow_nonexisting + # Fill the data structures above (can be done lazily) + self.complete_node = complete_node + self._parse() + + def _parse(self) -> None: + # Parse all nodes without creating edges among them + self._parse_all_nodes() + # Map dependencies specified as specs to a single id + self._ensure_dependencies_have_single_id() + # Attach dependencies to externals + self._create_edges() + # Mark the specs as concrete + for node in self.nodes: + node._finalize_concretization() + + def _create_edges(self): + for eid, entry in self.specs_by_external_id.items(): + current_node, current_dict = entry.spec, entry.config + line_info = _line_info(current_dict) + spec_str = current_dict["spec"] + + # Compute the dependency types for this spec + pkg_class, deptypes_by_package = spack.repo.PATH.get_pkg_class(current_node.name), {} + for when, by_name in pkg_class.dependencies.items(): + if not current_node.satisfies(when): + continue + for name, dep in by_name.items(): + if name not in deptypes_by_package: + deptypes_by_package[name] = dep.depflag + deptypes_by_package[name] |= dep.depflag + + for dependency_dict in current_dict.get("dependencies", []): + dependency_id = dependency_dict.get("id") + if not dependency_id: + raise ExternalDependencyError( + f"A dependency for {spec_str} does not have an external id{line_info}" + ) + elif dependency_id not in self.specs_by_external_id: + raise ExternalDependencyError( + f"A dependency for {spec_str} has an external id " + f"{dependency_id} that cannot be found in packages.yaml{line_info}" + ) + + dependency_node = self.specs_by_external_id[dependency_id].spec + + # Compute dependency types and virtuals + depflag = spack.deptypes.NONE + if "deptypes" in dependency_dict: + depflag = spack.deptypes.canonicalize(dependency_dict["deptypes"]) + + virtuals: Tuple[str, ...] = () + if "virtuals" in dependency_dict: + virtuals = tuple(dependency_dict["virtuals"].split(",")) + + # Infer dependency types and virtuals if the user didn't specify them + if depflag == spack.deptypes.NONE and not virtuals: + # Infer the deptype if only '%' was used in the spec + inferred_virtuals = [] + for name, current_flag in deptypes_by_package.items(): + if not dependency_node.intersects(name): + continue + depflag |= current_flag + if spack.repo.PATH.is_virtual(name): + inferred_virtuals.append(name) + virtuals = tuple(inferred_virtuals) + elif depflag == spack.deptypes.NONE: + depflag = spack.deptypes.DEFAULT + + current_node._add_dependency(dependency_node, depflag=depflag, virtuals=virtuals) + + def _ensure_dependencies_have_single_id(self): + for eid, entry in self.specs_by_external_id.items(): + current_node, current_dict = entry.spec, entry.config + spec_str = current_dict["spec"] + line_info = _line_info(current_dict) + + if current_node.dependencies() and "dependencies" in current_dict: + raise ExternalSpecError( + f"the spec {spec_str} cannot specify dependencies both in the root spec and" + f"in the 'dependencies' field{line_info}" + ) + + # Transform inline entries like 'mpich %gcc' to a canonical form using 'dependencies' + for edge in current_node.edges_to_dependencies(): + entry: DependencyDict = {"spec": str(edge.spec)} + + # Handle entries with more options specified + if edge.depflag != 0: + entry["deptypes"] = spack.deptypes.flag_to_tuple(edge.depflag) + + if edge.virtuals: + entry["virtuals"] = ",".join(edge.virtuals) + + current_dict.setdefault("dependencies", []).append(entry) + current_node.clear_edges() + + # Map a spec: to id: + for dependency_dict in current_dict.get("dependencies", []): + if "id" in dependency_dict: + continue + + if "spec" not in dependency_dict: + raise ExternalDependencyError( + f"the spec {spec_str} needs to specify either the id or the spec " + f"of its dependencies{line_info}" + ) + + query_spec = spack.spec.Spec(dependency_dict["spec"]) + candidates = [ + x + for x in self.specs_by_name.get(query_spec.name, []) + if x.spec.satisfies(query_spec) + ] + if len(candidates) == 0: + raise ExternalDependencyError( + f"the spec '{spec_str}' depends on '{query_spec}', but there is no such " + f"external spec in packages.yaml{line_info}" + ) + elif len(candidates) > 1: + candidates_str = ( + f" [candidates are {', '.join([str(x.spec) for x in candidates])}]" + ) + raise ExternalDependencyError( + f"the spec '{spec_str}' depends on '{query_spec}', but there are multiple " + f"external specs that could satisfy the request{candidates_str}{line_info}" + ) + + dependency_dict["id"] = candidates[0].config["id"] + + def _parse_all_nodes(self) -> None: + """Parses all the nodes from the external dicts but doesn't add any edge.""" + for external_dict in self.external_dicts: + line_info = _line_info(external_dict) + try: + node = node_from_dict(external_dict) + except spack.spec.UnsatisfiableArchitectureSpecError: + spec_str, target_str = external_dict["spec"], external_dict["required_target"] + tty.debug( + f"[{__name__}]{line_info} Skipping external spec '{spec_str}' because it " + f"cannot be constrained with the required target '{target_str}'." + ) + continue + except ExternalSpecError as e: + warnings.warn(f"{e}{line_info}") + continue + + package_exists = spack.repo.PATH.exists(node.name) + + # If we allow non-existing packages, just continue + if not package_exists and self.allow_nonexisting: + continue + + if not package_exists and not self.allow_nonexisting: + raise ExternalSpecError(f"Package '{node.name}' does not exist{line_info}") + + eid = external_dict.setdefault("id", str(uuid.uuid4())) + if eid in self.specs_by_external_id: + other_node = self.specs_by_external_id[eid] + other_line_info = _line_info(other_node.config) + raise DuplicateExternalError( + f"Specs {node} and {other_node.spec} cannot have the same external id {eid}" + f"{line_info}{other_line_info}" + ) + + self.complete_node(node) + + # Add a Python dependency to Python extensions that don't specify it + pkg_class = spack.repo.PATH.get_pkg_class(node.name) + if ( + "dependencies" not in external_dict + and not node.dependencies() + and any([c.__name__ == "PythonExtension" for c in pkg_class.__mro__]) + ): + warnings.warn( + f"Spack is trying attach a Python dependency to '{node}'. This feature is " + f"deprecated, and will be removed in v1.2. Please make the dependency " + f"explicit in your configuration." + ) + external_dict.setdefault("dependencies", []).append({"spec": "python"}) + + # Normalize internally so that each node has a unique id + spec_and_config = ExternalSpecAndConfig(spec=node, config=external_dict) + self.specs_by_external_id[eid] = spec_and_config + self.specs_by_name.setdefault(node.name, []).append(spec_and_config) + self.nodes.append(node) + + def get_specs_for_package(self, package_name: str) -> List[spack.spec.Spec]: + """Returns the external specs for a given package name.""" + result = self.specs_by_name.get(package_name, []) + return [x.spec for x in result] + + def all_specs(self) -> List[spack.spec.Spec]: + """Returns all the external specs.""" + return self.nodes + + def query(self, query: Union[str, spack.spec.Spec]) -> List[spack.spec.Spec]: + """Returns the external specs matching a query spec.""" + result = [] + for node in self.nodes: + if node.satisfies(query): + result.append(node) + return result + + +def external_spec(config: ExternalDict) -> spack.spec.Spec: + """Returns an external spec from a dictionary representation.""" + return ExternalSpecsParser([config]).all_specs()[0] + + +class DuplicateExternalError(SpackError): + """Raised when a duplicate external is detected.""" + + +class ExternalDependencyError(SpackError): + """Raised when a dependency on an external package is specified wrongly.""" + + +class ExternalSpecError(SpackError): + """Raised when a dependency on an external package is specified wrongly.""" diff --git a/lib/spack/spack/fetch_strategy.py b/lib/spack/spack/fetch_strategy.py index e0dde4cc306db3..1d2d5b4d1b2297 100644 --- a/lib/spack/spack/fetch_strategy.py +++ b/lib/spack/spack/fetch_strategy.py @@ -6,20 +6,24 @@ Fetch strategies are used to download source code into a staging area in order to build it. They need to define the following methods: - * fetch() - This should attempt to download/check out source from somewhere. - * check() - Apply a checksum to the downloaded source code, e.g. for an archive. - May not do anything if the fetch method was safe to begin with. - * expand() - Expand (e.g., an archive) downloaded file to source, with the - standard stage source path as the destination directory. - * reset() - Restore original state of downloaded code. Used by clean commands. - This may just remove the expanded source and re-expand an archive, - or it may run something like git reset --hard. - * archive() - Archive a source directory, e.g. for creating a mirror. +``fetch()`` + This should attempt to download/check out source from somewhere. + +``check()`` + Apply a checksum to the downloaded source code, e.g. for an archive. + May not do anything if the fetch method was safe to begin with. + +``expand()`` + Expand (e.g., an archive) downloaded file to source, with the + standard stage source path as the destination directory. + +``reset()`` + Restore original state of downloaded code. Used by clean commands. + This may just remove the expanded source and re-expand an archive, + or it may run something like git reset ``--hard``. + +``archive()`` + Archive a source directory, e.g. for creating a mirror. """ import copy import functools @@ -30,34 +34,31 @@ import shutil import sys import time -import urllib.error import urllib.parse import urllib.request -import urllib.response from pathlib import PurePath -from typing import Callable, List, Mapping, Optional +from typing import Callable, List, Mapping, Optional, Type import spack.config import spack.error import spack.llnl.url -import spack.llnl.util import spack.llnl.util.filesystem as fs import spack.llnl.util.tty as tty import spack.oci.opener import spack.util.archive import spack.util.crypto as crypto +import spack.util.executable import spack.util.git import spack.util.url as url_util import spack.util.web as web_util import spack.version -import spack.version.git_ref_lookup from spack.llnl.string import comma_and, quote from spack.llnl.util.filesystem import get_single_file, mkdirp, symlink, temp_cwd, working_dir from spack.util.compression import decompressor_for from spack.util.executable import CommandNotFoundError, Executable, which #: List of all fetch strategies, created by FetchStrategy metaclass. -all_strategies = [] +all_strategies: List[Type["FetchStrategy"]] = [] def _needs_stage(fun): @@ -159,7 +160,7 @@ def source_id(self): the information available to them in the Spack package. The returned value is added to the content which determines the full - hash for a package using `str()`. + hash for a package using :class:`str`. """ raise NotImplementedError @@ -429,14 +430,16 @@ def _check_headers(self, headers): def _fetch_urllib(self, url, chunk_size=65536): save_file = self.stage.save_filename - request = urllib.request.Request(url, headers={"User-Agent": web_util.SPACK_USER_AGENT}) + request = urllib.request.Request( + url, headers={"User-Agent": web_util.SPACK_USER_AGENT, "Accept": "*/*"} + ) if os.path.lexists(save_file): os.remove(save_file) try: response = web_util.urlopen(request) - tty.msg(f"Fetching {url}") + tty.verbose(f"Fetching {url}") progress = FetchProgress.from_headers(response.headers, enabled=sys.stdout.isatty()) with open(save_file, "wb") as f: while True: @@ -469,7 +472,7 @@ def _fetch_curl(self, url, config_args=[]): if self.stage.save_filename: save_file = self.stage.save_filename partial_file = self.stage.save_filename + ".part" - tty.msg(f"Fetching {url}") + tty.verbose(f"Fetching {url}") if partial_file: save_args = [ "-C", @@ -656,7 +659,7 @@ def fetch(self): try: response = self._urlopen(self.url) - tty.msg(f"Fetching {self.url}") + tty.verbose(f"Fetching {self.url}") with open(file, "wb") as f: shutil.copyfileobj(response, f) except OSError as e: @@ -726,12 +729,11 @@ def __repr__(self): @fetcher class GoFetchStrategy(VCSFetchStrategy): - """Fetch strategy that employs the `go get` infrastructure. + """Fetch strategy that employs the ``go get`` infrastructure. - Use like this in a package: + Use like this in a package:: - version('name', - go='github.com/monochromegane/the_platinum_searcher/...') + version("name", go="github.com/monochromegane/the_platinum_searcher/...") Go get does not natively support versions, they can be faked with git. @@ -798,20 +800,19 @@ def __str__(self): class GitFetchStrategy(VCSFetchStrategy): """ Fetch strategy that gets source code from a git repository. - Use like this in a package: + Use like this in a package:: - version('name', git='https://github.com/project/repo.git') + version("name", git="https://github.com/project/repo.git") - Optionally, you can provide a branch, or commit to check out, e.g.: + Optionally, you can provide a branch, or commit to check out, e.g.:: - version('1.1', git='https://github.com/project/repo.git', tag='v1.1') + version("1.1", git="https://github.com/project/repo.git", tag="v1.1") You can use these three optional attributes in addition to ``git``: - * ``branch``: Particular branch to build from (default is the - repository's default branch) - * ``tag``: Particular tag to check out - * ``commit``: Particular commit hash in the repo + * ``branch``: Particular branch to build from (default is the repository's default branch) + * ``tag``: Particular tag to check out + * ``commit``: Particular commit hash in the repo Repositories are cloned into the standard stage source path directory. """ @@ -825,10 +826,9 @@ class GitFetchStrategy(VCSFetchStrategy): "get_full_repo", "submodules_delete", "git_sparse_paths", + "skip_checkout", ] - git_version_re = r"git version (\S+)" - def __init__(self, **kwargs): self.commit: Optional[str] = None @@ -846,6 +846,9 @@ def __init__(self, **kwargs): self.submodules_delete = kwargs.get("submodules_delete", False) self.get_full_repo = kwargs.get("get_full_repo", False) self.git_sparse_paths = kwargs.get("git_sparse_paths", None) + # skipping checkout with a blobless clone is an efficient way to traverse meta-data + # see https://bhupesh.me/minimalist-guide-git-clone/ + self.skip_checkout = kwargs.get("skip_checkout", False) @property def git_version(self): @@ -856,9 +859,8 @@ def version_from_git(git_exe): """Given a git executable, return the Version (this will fail if the output cannot be parsed into a valid Version). """ - version_output = git_exe("--version", output=str) - m = re.search(GitFetchStrategy.git_version_re, version_output) - return spack.version.Version(m.group(1)) + version_string = ".".join(map(str, git_exe.version)) + return spack.version.Version(version_string) @property def git(self): @@ -887,6 +889,7 @@ def cachable(self): def source_id(self): # TODO: tree-hash would secure download cache and mirrors, commit only secures checkouts. + # TODO(psakiev): Tree-hash is part of the commit SHA computation, question comment validity return self.commit def mirror_id(self): @@ -922,10 +925,7 @@ def fetch(self): tty.debug(f"Already fetched {self.stage.source_path}") return - if self.git_sparse_paths: - self._sparse_clone_src() - else: - self._clone_src() + self._clone_src() self.submodule_operations() def bare_clone(self, dest: str) -> None: @@ -954,156 +954,30 @@ def _clone_src(self) -> None: dest = self.stage.source_path tty.debug(f"Cloning git repository: {self._repo_info()}") - git = self.git - debug = spack.config.get("config:debug") - - if self.commit: - # Need to do a regular clone and check out everything if - # they asked for a particular commit. - clone_args = ["clone", self.url] - if not debug: - clone_args.insert(1, "--quiet") - with temp_cwd(): - git(*clone_args) - repo_name = get_single_file(".") - if self.stage: - self.stage.srcdir = repo_name - shutil.copytree(repo_name, dest, symlinks=True) - shutil.rmtree( - repo_name, - ignore_errors=False, - onerror=fs.readonly_file_handler(ignore_errors=True), - ) - - with working_dir(dest): - checkout_args = ["checkout", self.commit] - if not debug: - checkout_args.insert(1, "--quiet") - git(*checkout_args) - - else: - # Can be more efficient if not checking out a specific commit. - args = ["clone"] - if not debug: - args.append("--quiet") - - # If we want a particular branch ask for it. - if self.branch: - args.extend(["--branch", self.branch]) - elif self.tag and self.git_version >= spack.version.Version("1.8.5.2"): - args.extend(["--branch", self.tag]) - - # Try to be efficient if we're using a new enough git. - # This checks out only one branch's history - if self.git_version >= spack.version.Version("1.7.10"): - if self.get_full_repo: - args.append("--no-single-branch") - else: - args.append("--single-branch") - - with temp_cwd(): - # Yet more efficiency: only download a 1-commit deep - # tree, if the in-use git and protocol permit it. - if ( - (not self.get_full_repo) - and self.git_version >= spack.version.Version("1.7.1") - and self.protocol_supports_shallow_clone() - ): - args.extend(["--depth", "1"]) - - args.extend([self.url]) - git(*args) - - repo_name = get_single_file(".") - if self.stage: - self.stage.srcdir = repo_name - shutil.move(repo_name, dest) - - with working_dir(dest): - # For tags, be conservative and check them out AFTER - # cloning. Later git versions can do this with clone - # --branch, but older ones fail. - if self.tag and self.git_version < spack.version.Version("1.8.5.2"): - # pull --tags returns a "special" error code of 1 in - # older versions that we have to ignore. - # see: https://github.com/git/git/commit/19d122b - pull_args = ["pull", "--tags"] - co_args = ["checkout", self.tag] - if not spack.config.get("config:debug"): - pull_args.insert(1, "--quiet") - co_args.insert(1, "--quiet") - - git(*pull_args, ignore_errors=1) - git(*co_args) - - def _sparse_clone_src(self, **kwargs): - """Use git's sparse checkout feature to clone portions of a git repository""" - dest = self.stage.source_path - git = self.git - - if self.git_version < spack.version.Version("2.26.0"): - # technically this should be supported for 2.25, but bumping for OS issues - # see https://github.com/spack/spack/issues/45771 - # code paths exist where the package is not set. Assure some indentifier for the - # package that was configured for sparse checkout exists in the error message - identifier = str(self.url) - if self.package: - identifier += f" ({self.package.name})" - tty.warn( - ( - f"{identifier} is configured for git sparse-checkout " - "but the git version is too old to support sparse cloning. " - "Cloning the full repository instead." - ) - ) - self._clone_src() - else: - # default to depth=2 to allow for retention of some git properties - depth = kwargs.get("depth", 2) - needs_fetch = self.branch or self.tag - git_ref = self.branch or self.tag or self.commit - - assert git_ref - - clone_args = ["clone"] - - if needs_fetch: - clone_args.extend(["--branch", git_ref]) - - if self.get_full_repo: - clone_args.append("--no-single-branch") - else: - clone_args.append("--single-branch") - - clone_args.extend( - [f"--depth={depth}", "--no-checkout", "--filter=blob:none", self.url] - ) + depth = None if self.get_full_repo else 1 + name = self.package.name if self.package else None + checkout_ref = self.commit or self.tag or self.branch + fetch_ref = self.tag or self.branch - sparse_args = ["sparse-checkout", "set"] + kwargs = {"debug": spack.config.get("config:debug"), "git_exe": self.git, "dest": name} - if callable(self.git_sparse_paths): - sparse_args.extend(self.git_sparse_paths()) + with temp_cwd(ignore_cleanup_errors=True): + if self.commit and name: + try: + spack.util.git.git_init_fetch(self.url, self.commit, depth, **kwargs) + except spack.util.executable.ProcessError: + spack.util.git.git_clone(self.url, fetch_ref, True, depth, **kwargs) else: - sparse_args.extend([p for p in self.git_sparse_paths]) - - sparse_args.append("--cone") - - checkout_args = ["checkout", git_ref] - - if not spack.config.get("config:debug"): - clone_args.insert(1, "--quiet") - checkout_args.insert(1, "--quiet") - - with temp_cwd(): - git(*clone_args) - repo_name = get_single_file(".") - if self.stage: - self.stage.srcdir = repo_name - shutil.move(repo_name, dest) + spack.util.git.git_clone(self.url, fetch_ref, self.get_full_repo, depth, **kwargs) + repo_name = get_single_file(".") + kwargs["dest"] = repo_name + if not self.skip_checkout: + spack.util.git.git_checkout(checkout_ref, self.git_sparse_paths, **kwargs) - with working_dir(dest): - git(*sparse_args) - git(*checkout_args) + if self.stage: + self.stage.srcdir = repo_name + shutil.copytree(repo_name, dest, symlinks=True) + return def submodule_operations(self): dest = self.stage.source_path @@ -1151,12 +1025,6 @@ def reset(self): self.git(*co_args) self.git(*clean_args) - def protocol_supports_shallow_clone(self): - """Shallow clone operations (--depth #) are not supported by the basic - HTTP protocol or by no-protocol file specifications. - Use (e.g.) https:// or file:// instead.""" - return not (self.url.startswith("http://") or self.url.startswith("/")) - def __str__(self): return f"[git] {self._repo_info()}" @@ -1164,16 +1032,17 @@ def __str__(self): @fetcher class CvsFetchStrategy(VCSFetchStrategy): """Fetch strategy that gets source code from a CVS repository. - Use like this in a package: + Use like this in a package:: - version('name', - cvs=':pserver:anonymous@www.example.com:/cvsroot%module=modulename') + version("name", cvs=":pserver:anonymous@www.example.com:/cvsroot%module=modulename") - Optionally, you can provide a branch and/or a date for the URL: + Optionally, you can provide a branch and/or a date for the URL:: - version('name', - cvs=':pserver:anonymous@www.example.com:/cvsroot%module=modulename', - branch='branchname', date='date') + version( + "name", + cvs=":pserver:anonymous@www.example.com:/cvsroot%module=modulename", + branch="branchname", date="date" + ) Repositories are checked out into the standard stage source path directory. """ @@ -1281,14 +1150,13 @@ def __str__(self): @fetcher class SvnFetchStrategy(VCSFetchStrategy): """Fetch strategy that gets source code from a subversion repository. - Use like this in a package: + Use like this in a package:: - version('name', svn='http://www.example.com/svn/trunk') + version("name", svn="http://www.example.com/svn/trunk") - Optionally, you can provide a revision for the URL: + Optionally, you can provide a revision for the URL:: - version('name', svn='http://www.example.com/svn/trunk', - revision='1641') + version("name", svn="http://www.example.com/svn/trunk", revision="1641") Repositories are checked out into the standard stage source path directory. """ @@ -1376,21 +1244,20 @@ def __str__(self): class HgFetchStrategy(VCSFetchStrategy): """ Fetch strategy that gets source code from a Mercurial repository. - Use like this in a package: + Use like this in a package:: - version('name', hg='https://jay.grs.rwth-aachen.de/hg/lwm2') + version("name", hg="https://jay.grs.rwth-aachen.de/hg/lwm2") - Optionally, you can provide a branch, or revision to check out, e.g.: + Optionally, you can provide a branch, or revision to check out, e.g.:: - version('torus', - hg='https://jay.grs.rwth-aachen.de/hg/lwm2', branch='torus') + version("torus", hg="https://jay.grs.rwth-aachen.de/hg/lwm2", branch="torus") - You can use the optional 'revision' attribute to check out a + You can use the optional ``revision`` attribute to check out a branch, tag, or particular revision in hg. To prevent non-reproducible builds, using a moving target like a branch is discouraged. - * ``revision``: Particular revision, branch, or tag. + * ``revision``: Particular revision, branch, or tag. Repositories are cloned into the standard stage source path directory. """ @@ -1588,22 +1455,21 @@ def from_url(url: str) -> URLFetchStrategy: """Given a URL, find an appropriate fetch strategy for it. Currently just gives you a URLFetchStrategy that uses curl. - TODO: make this return appropriate fetch strategies for other - types of URLs. + TODO: make this return appropriate fetch strategies for other types of URLs. """ return URLFetchStrategy(url=url) -def from_kwargs(**kwargs): +def from_kwargs(**kwargs) -> FetchStrategy: """Construct an appropriate FetchStrategy from the given keyword arguments. Args: - **kwargs: dictionary of keyword arguments, e.g. from a - ``version()`` directive in a package. + **kwargs: dictionary of keyword arguments, e.g. from a ``version()`` directive in a + package. Returns: - typing.Callable: The fetch strategy that matches the args, based - on attribute names (e.g., ``git``, ``hg``, etc.) + The fetch strategy that matches the args, based on attribute names (e.g., ``git``, ``hg``, + etc.) Raises: spack.error.FetchError: If no ``fetch_strategy`` matches the args. @@ -1692,6 +1558,18 @@ def _from_merged_attrs(fetcher, pkg, version): def for_package_version(pkg, version=None): + saved_versions = None + if version is not None: + saved_versions = pkg.spec.versions + + try: + return _for_package_version(pkg, version) + finally: + if saved_versions is not None: + pkg.spec.versions = saved_versions + + +def _for_package_version(pkg, version=None): """Determine a fetch strategy based on the arguments supplied to version() in the package description.""" @@ -1716,15 +1594,25 @@ def for_package_version(pkg, version=None): version = pkg.version # if it's a commit, we must use a GitFetchStrategy - commit_sha = pkg.spec.variants.get("commit", None) - if isinstance(version, spack.version.GitVersion) or commit_sha: + commit_var = pkg.spec.variants.get("commit", None) + commit = commit_var.value if commit_var else None + tag = None + if isinstance(version, spack.version.GitVersion) or commit: if not hasattr(pkg, "git"): raise spack.error.FetchError( f"Cannot fetch git version for {pkg.name}. Package has no 'git' attribute" ) - # Populate the version with comparisons to other commits if isinstance(version, spack.version.GitVersion): - version.attach_lookup(spack.version.git_ref_lookup.GitRefLookup(pkg.name)) + # Populate the version with comparisons to other commits + from spack.version.git_ref_lookup import GitRefLookup + + version.attach_lookup(GitRefLookup(pkg.name)) + + if not commit and version.is_commit: + commit = version.ref + version_meta_data = pkg.versions.get(version.std_version) + else: + version_meta_data = pkg.versions.get(version) # For GitVersion, we have no way to determine whether a ref is a branch or tag # Fortunately, we handle branches and tags identically, except tags are @@ -1734,16 +1622,14 @@ def for_package_version(pkg, version=None): # Branches cannot be cached, so we tell the fetcher not to cache tags/branches # TODO(psakiev) eventually we should only need to clone based on the commit - ref_type = None - ref_value = None - if commit_sha: - ref_type = "commit" - ref_value = commit_sha.value - else: - ref_type = "commit" if version.is_commit else "tag" - ref_value = version.ref - kwargs = {ref_type: ref_value, "no_cache": ref_type != "commit"} + # commit stashed on version + if version_meta_data: + if not commit: + commit = version_meta_data.get("commit") + tag = version_meta_data.get("tag") or version_meta_data.get("branch") + + kwargs = {"commit": commit, "tag": tag, "no_cache": bool(not commit)} kwargs["git"] = pkg.version_or_package_attr("git", version) kwargs["submodules"] = pkg.version_or_package_attr("submodules", version, False) kwargs["git_sparse_paths"] = pkg.version_or_package_attr("git_sparse_paths", version, None) diff --git a/lib/spack/spack/filesystem_view.py b/lib/spack/spack/filesystem_view.py index 5010ddb02f3cd9..d331f65d79b2ca 100644 --- a/lib/spack/spack/filesystem_view.py +++ b/lib/spack/spack/filesystem_view.py @@ -165,10 +165,10 @@ def __init__( link_type: LinkType = "symlink", ): """ - Initialize a filesystem view under the given `root` directory with - corresponding directory `layout`. + Initialize a filesystem view under the given ``root`` directory with + corresponding directory ``layout``. - Files are linked by method `link` (spack.llnl.util.filesystem.symlink by default). + Files are linked by method ``link`` (spack.llnl.util.filesystem.symlink by default). """ self._root = root self.layout = layout @@ -188,14 +188,14 @@ def add_specs(self, *specs: spack.spec.Spec, **kwargs) -> None: """ Add given specs to view. - Should accept `with_dependencies` as keyword argument (default + Should accept ``with_dependencies`` as keyword argument (default True) to indicate wether or not dependencies should be activated as well. - Should except an `exclude` keyword argument containing a list of + Should except an ``exclude`` keyword argument containing a list of regexps that filter out matching spec names. - This method should make use of `activate_standalone`. + This method should make use of ``activate_standalone``. """ raise NotImplementedError @@ -215,18 +215,18 @@ def remove_specs(self, *specs: spack.spec.Spec, **kwargs) -> None: """ Removes given specs from view. - Should accept `with_dependencies` as keyword argument (default + Should accept ``with_dependencies`` as keyword argument (default True) to indicate wether or not dependencies should be deactivated as well. - Should accept `with_dependents` as keyword argument (default True) + Should accept ``with_dependents`` as keyword argument (default True) to indicate wether or not dependents on the deactivated specs should be removed as well. - Should except an `exclude` keyword argument containing a list of + Should except an ``exclude`` keyword argument containing a list of regexps that filter out matching spec names. - This method should make use of `deactivate_standalone`. + This method should make use of ``deactivate_standalone``. """ raise NotImplementedError @@ -253,7 +253,7 @@ def get_spec(self, spec: spack.spec.Spec) -> Optional[spack.spec.Spec]: Return the actual spec linked in this view (i.e. do not look it up in the database by name). - `spec` can be a name or a spec from which the name is extracted. + ``spec`` can be a name or a spec from which the name is extracted. As there can only be a single version active for any spec the name is enough to identify the spec in the view. @@ -265,11 +265,12 @@ def get_spec(self, spec: spack.spec.Spec) -> Optional[spack.spec.Spec]: def print_status(self, *specs: spack.spec.Spec, **kwargs) -> None: """ Print a short summary about the given specs, detailing whether.. - * ..they are active in the view. - * ..they are active but the activated version differs. - * ..they are not activte in the view. - Takes `with_dependencies` keyword argument so that the status of + * ..they are active in the view. + * ..they are active but the activated version differs. + * ..they are not activte in the view. + + Takes ``with_dependencies`` keyword argument so that the status of dependencies is printed as well. """ raise NotImplementedError diff --git a/lib/spack/spack/graph.py b/lib/spack/spack/graph.py index 8e61db2729d59c..37f3962a0fbdf8 100644 --- a/lib/spack/spack/graph.py +++ b/lib/spack/spack/graph.py @@ -7,33 +7,35 @@ (i.e. Spack specs). There are two main functions you probably care about: -graph_ascii() will output a colored graph of a spec in ascii format, -kind of like the graph git shows with "git log --graph", e.g.:: - - o mpileaks - |\ - | |\ - | o | callpath - |/| | - | |\| - | |\ \ - | | |\ \ - | | | | o adept-utils - | |_|_|/| - |/| | | | - o | | | | mpi - / / / / - | | o | dyninst - | |/| | - |/|/| | - | | |/ - | o | libdwarf - |/ / - o | libelf - / - o boost - -graph_dot() will output a graph of a spec (or multiple specs) in dot format. +:func:`graph_ascii` will output a colored graph of a spec in ascii format, +kind of like the graph git shows with ``git log --graph``, e.g. + +.. code-block:: text + + o mpileaks + |\ + | |\ + | o | callpath + |/| | + | |\| + | |\ \ + | | |\ \ + | | | | o adept-utils + | |_|_|/| + |/| | | | + o | | | | mpi + / / / / + | | o | dyninst + | |/| | + |/|/| | + | | |/ + | o | libdwarf + |/ / + o | libelf + / + o boost + +:func:`graph_dot` will output a graph of a spec (or multiple specs) in dot format. """ import enum import sys @@ -304,14 +306,10 @@ def write(self, spec, color=None, out=None): """Write out an ascii graph of the provided spec. Arguments: - spec -- spec to graph. This only handles one spec at a time. - - Optional arguments: - - out -- file object to write out to (default is sys.stdout) - - color -- whether to write in color. Default is to autodetect - based on output file. + spec: spec to graph. This only handles one spec at a time. + out: file object to write out to (default is sys.stdout) + color: whether to write in color. Default is to autodetect + based on output file. """ if out is None: diff --git a/lib/spack/spack/hash_types.py b/lib/spack/spack/hash_types.py index bdf1da3c593514..ba3e20f1d6bb3e 100644 --- a/lib/spack/spack/hash_types.py +++ b/lib/spack/spack/hash_types.py @@ -3,10 +3,13 @@ # SPDX-License-Identifier: (Apache-2.0 OR MIT) """Definitions that control how Spack creates Spec hashes.""" +from typing import TYPE_CHECKING, Callable, List, Optional + import spack.deptypes as dt import spack.repo -HASHES = [] +if TYPE_CHECKING: + import spack.spec class SpecHashDescriptor: @@ -19,24 +22,27 @@ class SpecHashDescriptor: We currently use different hashes for different use cases.""" - def __init__(self, depflag: dt.DepFlag, package_hash, name, override=None): + __slots__ = "depflag", "package_hash", "name", "attr", "override" + + def __init__( + self, + depflag: dt.DepFlag, + package_hash: bool, + name: str, + override: Optional[Callable[["spack.spec.Spec"], str]] = None, + ) -> None: self.depflag = depflag self.package_hash = package_hash self.name = name - HASHES.append(self) + self.attr = f"_{name}" # Allow spec hashes to have an alternate computation method self.override = override - @property - def attr(self): - """Private attribute stored on spec""" - return "_" + self.name - - def __call__(self, spec): + def __call__(self, spec: "spack.spec.Spec") -> str: """Run this hash on the provided spec.""" return spec.spec_hash(self) - def __repr__(self): + def __repr__(self) -> str: return ( f"SpecHashDescriptor(depflag={self.depflag!r}, " f"package_hash={self.package_hash!r}, name={self.name!r}, override={self.override!r})" @@ -49,7 +55,7 @@ def __repr__(self): ) -def _content_hash_override(spec): +def _content_hash_override(spec: "spack.spec.Spec") -> str: pkg_cls = spack.repo.PATH.get_pkg_class(spec.name) pkg = pkg_cls(spec) return pkg.content_hash() @@ -72,3 +78,5 @@ def _content_hash_override(spec): build_hash = SpecHashDescriptor( depflag=dt.BUILD | dt.LINK | dt.RUN, package_hash=False, name="build_hash" ) + +HASHES: List["SpecHashDescriptor"] = [dag_hash, package_hash, full_hash, build_hash] diff --git a/lib/spack/spack/hooks/__init__.py b/lib/spack/spack/hooks/__init__.py index 8672866c611b87..02f597c4878fb2 100644 --- a/lib/spack/spack/hooks/__init__.py +++ b/lib/spack/spack/hooks/__init__.py @@ -10,10 +10,10 @@ Currently the following hooks are supported: - * pre_install(spec) - * post_install(spec, explicit) - * pre_uninstall(spec) - * post_uninstall(spec) +* ``pre_install(spec)`` +* ``post_install(spec, explicit)`` +* ``pre_uninstall(spec)`` +* ``post_uninstall(spec)`` This can be used to implement support for things like module systems (e.g. modules, lmod, etc.) or to add other custom diff --git a/lib/spack/spack/hooks/autopush.py b/lib/spack/spack/hooks/autopush.py index cf93fe7ee932b5..434bd26e8ae4ff 100644 --- a/lib/spack/spack/hooks/autopush.py +++ b/lib/spack/spack/hooks/autopush.py @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import spack.binary_distribution as bindist +import spack.binary_distribution import spack.llnl.util.tty as tty import spack.mirrors.mirror @@ -21,7 +21,9 @@ def post_install(spec, explicit): # Push the package to all autopush mirrors for mirror in spack.mirrors.mirror.MirrorCollection(binary=True, autopush=True).values(): - signing_key = bindist.select_signing_key() if mirror.signed else None - with bindist.make_uploader(mirror=mirror, force=True, signing_key=signing_key) as uploader: + signing_key = spack.binary_distribution.select_signing_key() if mirror.signed else None + with spack.binary_distribution.make_uploader( + mirror=mirror, force=True, signing_key=signing_key + ) as uploader: uploader.push_or_raise([spec]) tty.msg(f"{spec.name}: Pushed to build cache: '{mirror.name}'") diff --git a/lib/spack/spack/hooks/sbang.py b/lib/spack/spack/hooks/sbang.py index f681d3bbef6aba..0ba85100e0c760 100644 --- a/lib/spack/spack/hooks/sbang.py +++ b/lib/spack/spack/hooks/sbang.py @@ -17,7 +17,7 @@ import spack.paths import spack.spec import spack.store -from spack.util.socket import _getfqdn +from spack.util.socket import _gethostname #: OS-imposed character limit for shebang line: 127 for Linux; 511 for Mac. #: Different Linux distributions have different limits, but 127 is the @@ -209,7 +209,7 @@ def install_sbang(): os.chown(sbang_bin_dir, os.stat(sbang_bin_dir).st_uid, grp.getgrnam(group_name).gr_gid) # copy over the fresh copy of `sbang` - sbang_tmp_path = os.path.join(sbang_bin_dir, f".sbang.{_getfqdn()}.{os.getpid()}.tmp") + sbang_tmp_path = os.path.join(sbang_bin_dir, f".sbang.{_gethostname()}.{os.getpid()}.tmp") shutil.copy(spack.paths.sbang_script, sbang_tmp_path) # set permissions on `sbang` (including group if set in configuration) diff --git a/lib/spack/spack/install_test.py b/lib/spack/spack/install_test.py index 5e58e3a10c1e62..c38f218a6b829b 100644 --- a/lib/spack/spack/install_test.py +++ b/lib/spack/spack/install_test.py @@ -12,7 +12,7 @@ import shutil import sys from collections import Counter, OrderedDict -from typing import Callable, Iterable, List, Optional, Tuple, Type, Union +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, Union import spack.config import spack.error @@ -89,13 +89,12 @@ def get_escaped_text_output(filename: str) -> List[str]: return [re.escape(ln) for ln in expected.split("\n")] -def get_test_stage_dir(): +def get_test_stage_dir() -> str: """Retrieves the ``config:test_stage`` path to the configured test stage root directory Returns: - str: absolute path to the configured test stage root or, if none, - the default test stage path + absolute path to the configured test stage root or, if none, the default test stage path """ return spack.util.path.canonicalize_path( spack.config.get("config:test_stage", spack.paths.default_test_path) @@ -503,24 +502,6 @@ def test_part( # call from the error stack = traceback.extract_stack()[:-1] - # Package files have a line added at import time, so we re-read - # the file to make line numbers match. We have to subtract two - # from the line number because the original line number is - # inflated once by the import statement and the lines are - # displaced one by the import statement. - for i, entry in enumerate(stack): - filename, lineno, function, text = entry - if spack.repo.is_package_file(filename): - with open(filename, encoding="utf-8") as f: - lines = f.readlines() - new_lineno = lineno - 2 - text = lines[new_lineno] - if isinstance(entry, tuple): - new_entry = (filename, new_lineno, function, text) - stack[i] = new_entry # type: ignore[call-overload] - elif isinstance(entry, list): - stack[i][1] = new_lineno # type: ignore[index] - # Format and print the stack out = traceback.format_list(stack) for line in out: @@ -1131,17 +1112,17 @@ def write_reproducibility_data(self) -> None: write_test_suite_file(self) - def to_dict(self): + def to_dict(self) -> Dict[str, Any]: """Build a dictionary for the test suite. Returns: - dict: The dictionary contains entries for up to two keys: + The dictionary contains entries for up to two keys. - specs: list of the test suite's specs in dictionary form - alias: the alias, or name, given to the test suite if provided + * specs: list of the test suite's specs in dictionary form + * alias: the alias, or name, given to the test suite if provided """ specs = [s.to_dict() for s in self.specs] - d = {"specs": specs} + d: Dict[str, Any] = {"specs": specs} if self.alias: d["alias"] = self.alias return d @@ -1151,8 +1132,8 @@ def from_dict(d): """Instantiates a TestSuite based on a dictionary specs and an optional alias: - specs: list of the test suite's specs in dictionary form - alias: the test suite alias + * specs: list of the test suite's specs in dictionary form + * alias: the test suite alias Returns: TestSuite: Instance created from the specs diff --git a/lib/spack/spack/installer.py b/lib/spack/spack/installer.py index f60e7ebf265829..23a547007efef7 100644 --- a/lib/spack/spack/installer.py +++ b/lib/spack/spack/installer.py @@ -38,7 +38,9 @@ import time from collections import defaultdict from gzip import GzipFile -from typing import Dict, Iterator, List, Optional, Set, Tuple, Union +from typing import TYPE_CHECKING, Dict, Iterator, List, Optional, Set, Tuple, Union + +from spack.vendor.typing_extensions import Literal import spack.binary_distribution as binary_distribution import spack.build_environment @@ -57,9 +59,7 @@ import spack.repo import spack.report import spack.rewiring -import spack.spec import spack.store -import spack.util.executable import spack.util.path import spack.util.timer as timer from spack.llnl.string import ordinal @@ -70,6 +70,9 @@ from spack.util.environment import EnvironmentModifications, dump_environment from spack.util.executable import which +if TYPE_CHECKING: + import spack.spec + #: Counter to support unique spec sequencing that is used to ensure packages #: with the same priority are (initially) processed in the order in which they #: were added (see https://docs.python.org/2/library/heapq.html). @@ -77,6 +80,9 @@ _FAIL_FAST_ERR = "Terminating after first install failure" +#: Type for specifying installation source modes +InstallPolicy = Literal["auto", "cache_only", "source_only"] + class BuildStatus(enum.Enum): """Different build (task) states.""" @@ -447,7 +453,7 @@ def _process_binary_cache_tarball( pkg: the package being installed explicit: the package was explicitly requested by the user unsigned: if ``True`` or ``False`` override the mirror signature verification defaults - mirrors_for_spec: Optional list of concrete specs and mirrors + mirrors_for_spec: Optional list of mirrors to look for the spec. obtained by calling binary_distribution.get_mirrors_for_spec(). timer: timer to keep track of binary install phases. @@ -503,10 +509,10 @@ def _try_install_from_binary_cache( tty.debug(f"Searching for binary cache of {package_id(pkg.spec)}") with timer.measure("search"): - matches = binary_distribution.get_mirrors_for_spec(pkg.spec, index_only=True) + mirrors = binary_distribution.get_mirrors_for_spec(pkg.spec, index_only=True) return _process_binary_cache_tarball( - pkg, explicit, unsigned, mirrors_for_spec=matches, timer=timer + pkg, explicit, unsigned, mirrors_for_spec=mirrors, timer=timer ) @@ -782,16 +788,14 @@ def _add_default_args(self) -> None: """Ensure standard install options are set to at least the default.""" for arg, default in [ ("context", "build"), # installs *always* build - ("dependencies_cache_only", False), - ("dependencies_use_cache", True), + ("dependencies_policy", "auto"), ("dirty", False), ("fail_fast", False), ("fake", False), ("install_deps", True), ("install_package", True), ("install_source", False), - ("package_cache_only", False), - ("package_use_cache", True), + ("root_policy", "auto"), ("keep_prefix", False), ("keep_stage", False), ("restage", False), @@ -815,14 +819,16 @@ def get_depflags(self, pkg: "spack.package_base.PackageBase") -> int: include_build_deps = self.install_args.get("include_build_deps") if self.pkg_id == package_id(pkg.spec): - cache_only = self.install_args.get("package_cache_only") + policy = self.install_args.get("root_policy", "auto") else: - cache_only = self.install_args.get("dependencies_cache_only") + policy = self.install_args.get("dependencies_policy", "auto") # Include build dependencies if pkg is going to be built from sources, or # if build deps are explicitly requested. if include_build_deps or not ( - cache_only or pkg.spec.installed and pkg.spec.dag_hash() not in self.overwrite + policy == "cache_only" + or pkg.spec.installed + and pkg.spec.dag_hash() not in self.overwrite ): depflag |= dt.BUILD if self.run_tests(pkg): @@ -1165,20 +1171,11 @@ def is_build_request(self) -> bool: return self.pkg == self.request.pkg @property - def use_cache(self) -> bool: - _use_cache = True - if self.is_build_request: - return self.request.install_args.get("package_use_cache", _use_cache) - else: - return self.request.install_args.get("dependencies_use_cache", _use_cache) - - @property - def cache_only(self) -> bool: - _cache_only = False + def install_policy(self) -> InstallPolicy: if self.is_build_request: - return self.request.install_args.get("package_cache_only", _cache_only) + return self.request.install_args.get("root_policy", "auto") else: - return self.request.install_args.get("dependencies_cache_only", _cache_only) + return self.request.install_args.get("dependencies_policy", "auto") @property def key(self) -> Tuple[int, int]: @@ -1212,8 +1209,8 @@ def check_db(spec: "spack.spec.Spec") -> Tuple[Optional[spack.database.InstallRe spec: spec whose database install status is being checked Return: - Tuple of optional database record, and a boolean installed_in_db - that's ``True`` iff the spec is considered installed + Tuple of optional database record, and a boolean installed_in_db that's ``True`` iff the + spec is considered installed """ try: rec = spack.store.STORE.db.get_record(spec) @@ -1262,11 +1259,12 @@ def start(self): # Use the binary cache to install if requested, # save result to be handled in BuildTask.complete() # TODO: change binary installs to occur in subprocesses rather than the main Spack process - if self.use_cache: + policy = self.install_policy + if policy != "source_only": if _install_from_cache(pkg, self.explicit, unsigned): self.success_result = ExecuteResult.SUCCESS return - elif self.cache_only: + elif policy == "cache_only": self.error_result = spack.error.InstallError( "No binary found when cache-only was specified", pkg=pkg ) @@ -1284,6 +1282,9 @@ def start(self): self._setup_install_dir(pkg) # Create a child process to do the actual installation. + self._start_build_process() + + def _start_build_process(self): self.process_handle = spack.build_environment.start_build_process( self.pkg, build_process, self.request.install_args ) @@ -1385,6 +1386,26 @@ def terminate(self) -> None: self.process_handle.terminate() +class MockBuildProcess: + def complete(self) -> bool: + return True + + def terminate(self) -> None: + pass + + +class FakeBuildTask(BuildTask): + """Blocking BuildTask executed directly in the main thread. Used for --fake installs.""" + + process_handle = MockBuildProcess() # type: ignore[assignment] + + def _start_build_process(self): + build_process(self.pkg, self.request.install_args) + + def poll(self): + return True + + class RewireTask(Task): """Class for representing a rewire task for a package.""" @@ -1410,7 +1431,18 @@ def complete(self): try: install_args = self.request.install_args unsigned = install_args.get("unsigned") - _process_binary_cache_tarball(self.pkg, explicit=self.explicit, unsigned=unsigned) + success = _process_binary_cache_tarball( + self.pkg, explicit=self.explicit, unsigned=unsigned + ) + + if not success: + tty.msg( + "Failed to find binary for build spec, requeuing {self.pkg.spec} with" + "dependency install task for its build spec" + ) + self.status = oldstatus + return ExecuteResult.MISSING_BUILD_SPEC + _print_installed_pkg(self.pkg.prefix) self.record.succeed() return ExecuteResult.SUCCESS @@ -1439,9 +1471,6 @@ def __init__( self, packages: List["spack.package_base.PackageBase"], *, - cache_only: bool = False, - dependencies_cache_only: bool = False, - dependencies_use_cache: bool = True, dirty: bool = False, explicit: Union[Set[str], bool] = False, overwrite: Optional[Union[List[str], Set[str]]] = None, @@ -1453,17 +1482,16 @@ def __init__( install_source: bool = False, keep_prefix: bool = False, keep_stage: bool = False, - package_cache_only: bool = False, - package_use_cache: bool = True, restage: bool = False, skip_patch: bool = False, stop_at: Optional[str] = None, stop_before: Optional[str] = None, tests: Union[bool, List[str], Set[str]] = False, unsigned: Optional[bool] = None, - use_cache: bool = False, verbose: bool = False, concurrent_packages: Optional[int] = None, + root_policy: InstallPolicy = "auto", + dependencies_policy: InstallPolicy = "auto", ) -> None: """ Arguments: @@ -1485,9 +1513,10 @@ def __init__( stop_at: last installation phase to be executed (or None) tests: False to run no tests, True to test all packages, or a list of package names to run tests for some - use_cache: Install from binary package, if available. verbose: Display verbose build output (by default, suppresses it) concurrent_packages: Max packages to be built concurrently + root_policy: ``"auto"``, ``"cache_only"``, ``"source_only"``. + dependencies_policy: ``"auto"``, ``"cache_only"``, ``"source_only"``. """ if sys.platform == "win32": # No locks on Windows, we should always use 1 process @@ -1503,9 +1532,7 @@ def __init__( self.concurrent_packages = concurrent_packages install_args = { - "cache_only": cache_only, - "dependencies_cache_only": dependencies_cache_only, - "dependencies_use_cache": dependencies_use_cache, + "dependencies_policy": dependencies_policy, "dirty": dirty, "explicit": explicit, "fail_fast": fail_fast, @@ -1517,15 +1544,13 @@ def __init__( "keep_prefix": keep_prefix, "keep_stage": keep_stage, "overwrite": overwrite or [], - "package_cache_only": package_cache_only, - "package_use_cache": package_use_cache, + "root_policy": root_policy, "restage": restage, "skip_patch": skip_patch, "stop_at": stop_at, "stop_before": stop_before, "tests": tests, "unsigned": unsigned, - "use_cache": use_cache, "verbose": verbose, "concurrent_packages": self.concurrent_packages, } @@ -1601,7 +1626,12 @@ def _add_init_task( request: the associated install request all_deps: dictionary of all dependencies and associated dependents """ - cls = RewireTask if pkg.spec.spliced else BuildTask + cls: type[Task] = BuildTask + if pkg.spec.spliced: + cls = RewireTask + elif request.install_args.get("fake"): + cls = FakeBuildTask + task = cls(pkg, request=request, status=BuildStatus.QUEUED, installed=self.installed) for dep_id in task.dependencies: all_deps[dep_id].add(package_id(pkg.spec)) @@ -2000,6 +2030,10 @@ def _peek_ready_task(self) -> Optional[Task]: task = self.build_pq[0][1] return task if task.priority == 0 else None + def _tasks_installing_in_other_spack(self) -> bool: + """Whether any tasks in the build queue are installing in other spack processes.""" + return any(task.status == BuildStatus.INSTALLING for _, task in self.build_pq) + def _pop_task(self) -> Task: """Pop the first task off the queue and return it. @@ -2343,7 +2377,7 @@ def complete_task(self, task: Task, install_status: InstallStatus) -> Optional[T raise except BuildcacheEntryError as exc: - if task.cache_only: + if task.install_policy == "cache_only": raise # Checking hash on downloaded binary failed. @@ -2352,7 +2386,7 @@ def complete_task(self, task: Task, install_status: InstallStatus) -> Optional[T f"to {str(exc)}: Requeuing to install from source." ) # this overrides a full method, which is ugly. - task.use_cache = False # type: ignore[misc] + task.install_policy = "source_only" # type: ignore[misc] self._requeue_task(task, install_status) return None @@ -2402,6 +2436,8 @@ def complete_task(self, task: Task, install_status: InstallStatus) -> Optional[T # include downgrading the write to a read lock if pkg.spec.installed: self._cleanup_task(pkg) + # mark installed if we haven't yet - may be discovering installed for the first time + self._update_installed(task) return None @@ -2431,7 +2467,7 @@ def _install(self) -> None: ) # While a task is ready or tasks are running - while self._peek_ready_task() or active_tasks: + while self._peek_ready_task() or active_tasks or self._tasks_installing_in_other_spack(): # While there's space for more active tasks to start while len(active_tasks) < self.max_active_tasks: task = self._pop_ready_task() @@ -2448,7 +2484,8 @@ def _install(self) -> None: # handled in complete_task() task.error_result = e - time.sleep(0.1) + # 10 ms to avoid busy waiting + time.sleep(0.01) # Check if any tasks have completed and add to list done = [task for task in active_tasks if task.poll()] try: diff --git a/lib/spack/spack/llnl/path.py b/lib/spack/spack/llnl/path.py index 16f79a747b5728..30b9f2f24e2533 100644 --- a/lib/spack/spack/llnl/path.py +++ b/lib/spack/spack/llnl/path.py @@ -19,13 +19,13 @@ class Path: def format_os_path(path: str, mode: int = Path.unix) -> str: """Formats the input path to use consistent, platform specific separators. - Absolute paths are converted between drive letters and a prepended '/' as per platform + Absolute paths are converted between drive letters and a prepended ``/`` as per platform requirement. Parameters: path: the path to be normalized, must be a string or expose the replace method. mode: the path file separator style to normalize the passed path to. - Default is unix style, i.e. '/' + Default is unix style, i.e. ``/`` """ if not path: return path diff --git a/lib/spack/spack/llnl/string.py b/lib/spack/spack/llnl/string.py index baadff36573744..2f15051bf9634a 100644 --- a/lib/spack/spack/llnl/string.py +++ b/lib/spack/spack/llnl/string.py @@ -28,14 +28,14 @@ def comma_list(sequence: List[str], article: str = "") -> str: def comma_or(sequence: List[str]) -> str: """Return a string with all the elements of the input joined by comma, but the last - one (which is joined by 'or'). + one (which is joined by ``"or"``). """ return comma_list(sequence, "or") def comma_and(sequence: List[str]) -> str: """Return a string with all the elements of the input joined by comma, but the last - one (which is joined by 'and'). + one (which is joined by ``"and"``). """ return comma_list(sequence, "and") diff --git a/lib/spack/spack/llnl/url.py b/lib/spack/spack/llnl/url.py index af3eb384959712..6abc0552df35df 100644 --- a/lib/spack/spack/llnl/url.py +++ b/lib/spack/spack/llnl/url.py @@ -32,16 +32,16 @@ def find_list_urls(url: str) -> Set[str]: unique list URL different from the dirname of the download URL: ========= ======================================================= - GitHub https://github.com///releases - GitLab https://gitlab.\*///tags - BitBucket https://bitbucket.org///downloads/?tab=tags - CRAN https://\*.r-project.org/src/contrib/Archive/ - PyPI https://pypi.org/simple// - LuaRocks https://luarocks.org/modules// + GitHub ``https://github.com///releases`` + GitLab ``https://gitlab.\*///tags`` + BitBucket ``https://bitbucket.org///downloads/?tab=tags`` + CRAN ``https://\*.r-project.org/src/contrib/Archive/`` + PyPI ``https://pypi.org/simple//`` + LuaRocks ``https://luarocks.org/modules//`` ========= ======================================================= - Note: this function is called by `spack versions`, `spack checksum`, - and `spack create`, but not by `spack fetch` or `spack install`. + Note: this function is called by ``spack versions``, ``spack checksum``, + and ``spack create``, but not by ``spack fetch`` or ``spack install``. Parameters: url (str): The download URL for the package @@ -134,7 +134,7 @@ def strip_query_and_fragment(url: str) -> Tuple[str, str]: def split_url_on_sourceforge_suffix(url: str) -> Tuple[str, ...]: - """If the input is a sourceforge URL, returns base URL and "/download" suffix. Otherwise, + """If the input is a sourceforge URL, returns base URL and ``/download`` suffix. Otherwise, returns the input URL and an empty string. """ match = SOURCEFORGE_RE.search(url) @@ -206,9 +206,9 @@ def strip_extension(path_or_url: str, *, extension: Optional[str] = None) -> str def split_url_extension(url: str) -> Tuple[str, ...]: """Some URLs have a query string, e.g.: - 1. https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true - 2. http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin.tar.gz - 3. https://gitlab.kitware.com/vtk/vtk/repository/archive.tar.bz2?ref=v7.0.0 + 1. ``https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true`` + 2. ``http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin.tar.gz`` + 3. ``https://gitlab.kitware.com/vtk/vtk/repository/archive.tar.bz2?ref=v7.0.0`` In (1), the query string needs to be stripped to get at the extension, but in (2) & (3), the filename is IN a single final query @@ -371,7 +371,7 @@ def strip_version_suffixes(path_or_url: str) -> str: def expand_contracted_extension(extension: str) -> str: """Returns the expanded version of a known contracted extension. - This function maps extensions like ".tgz" to ".tar.gz". On unknown extensions, + This function maps extensions like ``.tgz`` to ``.tar.gz``. On unknown extensions, return the input unmodified. """ extension = extension.strip(".") @@ -408,7 +408,7 @@ def compression_ext_from_compressed_archive(extension: str) -> Optional[str]: def strip_compression_extension(path_or_url: str, ext: Optional[str] = None) -> str: """Strips the compression extension from the input, and returns it. For instance, - "foo.tgz" becomes "foo.tar". + ``"foo.tgz"`` becomes ``"foo.tar"``. If no extension is given, try a default list of extensions. @@ -439,7 +439,7 @@ def determine_url_file_extension(path: str) -> str: """This returns the type of archive a URL refers to. This is sometimes confusing because of URLs like: - (1) https://github.com/petdance/ack/tarball/1.93_02 + (1) ``https://github.com/petdance/ack/tarball/1.93_02`` Where the URL doesn't actually contain the filename. We need to know what type it is so that we can appropriately name files diff --git a/lib/spack/spack/llnl/util/argparsewriter.py b/lib/spack/spack/llnl/util/argparsewriter.py index e42e6722674791..8771b32e40f490 100644 --- a/lib/spack/spack/llnl/util/argparsewriter.py +++ b/lib/spack/spack/llnl/util/argparsewriter.py @@ -315,7 +315,7 @@ def positional(self, name: str, help: str) -> str: Positional argument description. """ return """\ -{0} +``{0}`` {1} """.format( diff --git a/lib/spack/spack/llnl/util/filesystem.py b/lib/spack/spack/llnl/util/filesystem.py index e508a637e44bf3..eba14481a01caf 100644 --- a/lib/spack/spack/llnl/util/filesystem.py +++ b/lib/spack/spack/llnl/util/filesystem.py @@ -399,13 +399,11 @@ class FileFilter: multiple times to perform search-and-replace operations using Python regular expressions, similar to ``sed``. - Example usage: + Example usage:: - .. code-block:: python - - foo_c = FileFilter("foo.c") - foo_c.filter(r"#define FOO", "#define BAR") - foo_c.filter(r"old_func", "new_func") + foo_c = FileFilter("foo.c") + foo_c.filter(r"#define FOO", "#define BAR") + foo_c.filter(r"old_func", "new_func") """ def __init__(self, *filenames): @@ -433,7 +431,7 @@ def filter( ) -def change_sed_delimiter(old_delim, new_delim, *filenames): +def change_sed_delimiter(old_delim: str, new_delim: str, *filenames: str) -> None: """Find all sed search/replace commands and change the delimiter. e.g., if the file contains seds that look like ``'s///'``, you can @@ -444,8 +442,8 @@ def change_sed_delimiter(old_delim, new_delim, *filenames): Handling those is left for future work. Parameters: - old_delim (str): The delimiter to search for - new_delim (str): The delimiter to replace with + old_delim: The delimiter to search for + new_delim: The delimiter to replace with *filenames: One or more files to search and replace """ assert len(old_delim) == 1 @@ -629,7 +627,7 @@ def chgrp(path, group, follow_symlinks=True): @system_path_filter(arg_slice=slice(1)) def chmod_x(entry, perms): """Implements chmod, treating all executable bits as set using the chmod - utility's `+X` option. + utility's ``+X`` option. """ mode = os.stat(entry).st_mode if os.path.isfile(entry): @@ -677,7 +675,7 @@ def copy(src: str, dest: str, _permissions: bool = False) -> None: Parameters: src: the file(s) to copy dest: the destination file or directory - _permissions (bool): for internal use only + _permissions: for internal use only Raises: OSError: if ``src`` does not match any files or directories @@ -708,7 +706,7 @@ def copy(src: str, dest: str, _permissions: bool = False) -> None: @system_path_filter -def install(src: str, dest: str): +def install(src: str, dest: str) -> None: """Install the file(s) ``src`` to the file or directory ``dest``. Same as :py:func:`copy` with the addition of setting proper @@ -860,13 +858,13 @@ def install_tree( @system_path_filter -def is_exe(path): +def is_exe(path) -> bool: """Returns :obj:`True` iff the specified path exists, is a regular file, and has executable permissions for the current process.""" return os.path.isfile(path) and os.access(path, os.X_OK) -def has_shebang(path): +def has_shebang(path) -> bool: """Returns whether a path has a shebang line. Returns False if the file cannot be opened.""" try: with open(path, "rb") as f: @@ -917,11 +915,11 @@ def mkdirp( if not provided group: optional group for permissions of final created directory -- use OS default if not provided. Only used if world write permissions are not set - default_perms: one of 'parents' or 'args'. The default permissions that are set for - directories that are not themselves an argument for mkdirp. 'parents' means + default_perms: one of ``"parents"`` or ``"args"``. The default permissions that are set for + directories that are not themselves an argument for mkdirp. ``"parents"`` means intermediate directories get the permissions of their direct parent directory, - 'args' means intermediate get the same permissions specified in the arguments to - mkdirp -- default value is 'args' + ``"args"`` means intermediate get the same permissions specified in the arguments to + mkdirp -- default value is ``"args"`` """ default_perms = default_perms or "args" paths = path_to_os_path(*paths) @@ -996,7 +994,7 @@ def longest_existing_parent(path: str) -> Tuple[str, List[str]]: @system_path_filter -def force_remove(*paths): +def force_remove(*paths: str) -> None: """Remove files without printing errors. Like ``rm -f``, does NOT remove directories.""" for path in paths: @@ -1015,13 +1013,11 @@ def working_dir(dirname: str, *, create: bool = False): dirname: the directory to change to create: if :obj:`True`, create the directory if it does not exist - Example usage: - - .. code-block:: python + Example usage:: - with working_dir("/path/to/dir"): - # do something in /path/to/dir - pass + with working_dir("/path/to/dir"): + # do something in /path/to/dir + pass """ if create: mkdirp(dirname) @@ -1154,7 +1150,7 @@ def touchp(path): @system_path_filter -def force_symlink(src, dest): +def force_symlink(src: str, dest: str) -> None: """Create a symlink at ``dest`` pointing to ``src``. Similar to ``ln -sf``.""" try: symlink(src, dest) @@ -1164,7 +1160,7 @@ def force_symlink(src, dest): @system_path_filter -def join_path(prefix, *args): +def join_path(prefix, *args) -> str: """Alias for :func:`os.path.join`""" path = str(prefix) for elt in args: @@ -1233,14 +1229,14 @@ def windows_sfn(path: os.PathLike): @contextmanager -def temp_cwd(): +def temp_cwd(ignore_cleanup_errors=False): tmp_dir = tempfile.mkdtemp() try: with working_dir(tmp_dir): yield tmp_dir finally: kwargs = {} - if sys.platform == "win32": + if sys.platform == "win32" or ignore_cleanup_errors: kwargs["ignore_errors"] = False kwargs["onerror"] = readonly_file_handler(ignore_errors=True) shutil.rmtree(tmp_dir, **kwargs) @@ -1279,16 +1275,16 @@ def traverse_tree( When called on dest, this yields:: - ('root', 'dest') - ('root/a', 'dest/a') - ('root/a/file1', 'dest/a/file1') - ('root/a/file2', 'dest/a/file2') - ('root/b', 'dest/b') - ('root/b/file3', 'dest/b/file3') + ("root", "dest") + ("root/a", "dest/a") + ("root/a/file1", "dest/a/file1") + ("root/a/file2", "dest/a/file2") + ("root/b", "dest/b") + ("root/b/file3", "dest/b/file3") Keyword Arguments: order (str): Whether to do pre- or post-order traversal. Accepted - values are 'pre' and 'post' + values are ``"pre"`` and ``"post"`` ignore (typing.Callable): function indicating which files to ignore. This will also ignore symlinks if they point to an ignored file (regardless of whether the symlink is explicitly ignored); note this only supports one layer of indirection (i.e. if @@ -1606,7 +1602,7 @@ def error_remove_readonly(func, path, exc): @system_path_filter -def remove_linked_tree(path): +def remove_linked_tree(path: str) -> None: """Removes a directory and its contents. If the directory is a symlink, follows the link and removes the real @@ -1615,9 +1611,9 @@ def remove_linked_tree(path): This method will force-delete files on Windows Parameters: - path (str): Directory to be removed + path: Directory to be removed """ - kwargs = {"ignore_errors": True} + kwargs: dict = {"ignore_errors": True} # Windows readonly files cannot be removed by Python # directly. @@ -1704,13 +1700,13 @@ def find_first(root: str, files: Union[Iterable[str], str], bfs_depth: int = 2) until depth bfs_depth, after which depth-first search is used. Parameters: - root (str): The root directory to start searching from - files (str or Iterable): File pattern(s) to search for - bfs_depth (int): (advanced) parameter that specifies at which + root: The root directory to start searching from + files: File pattern(s) to search for + bfs_depth: (advanced) parameter that specifies at which depth to switch to depth-first search. Returns: - str or None: The matching file or None when no file is found. + The matching file or :data:`None` when no file is found. """ if isinstance(files, str): files = [files] @@ -1729,7 +1725,7 @@ def find( matching file is returned only once at lowest depth in case multiple paths exist due to symlinked directories. - Accepts any glob characters accepted by fnmatch: + Accepts any glob characters accepted by :py:func:`fnmatch.fnmatch`: ========== ==================================== Pattern Meaning @@ -1924,41 +1920,41 @@ class FileList(collections.abc.Sequence): Provides a few convenience methods to manipulate file paths. """ - def __init__(self, files): + def __init__(self, files: Union[str, Iterable[str]]) -> None: if isinstance(files, str): files = [files] self.files = list(dedupe(files)) @property - def directories(self): + def directories(self) -> List[str]: """Stable de-duplication of the directories where the files reside. - >>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/libc.a']) + >>> l = LibraryList(["/dir1/liba.a", "/dir2/libb.a", "/dir1/libc.a"]) >>> l.directories - ['/dir1', '/dir2'] - >>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h']) + ["/dir1", "/dir2"] + >>> h = HeaderList(["/dir1/a.h", "/dir1/b.h", "/dir2/c.h"]) >>> h.directories - ['/dir1', '/dir2'] + ["/dir1", "/dir2"] Returns: - list: A list of directories + A list of directories """ return list(dedupe(os.path.dirname(x) for x in self.files if os.path.dirname(x))) @property - def basenames(self): + def basenames(self) -> List[str]: """Stable de-duplication of the base-names in the list - >>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir3/liba.a']) + >>> l = LibraryList(["/dir1/liba.a", "/dir2/libb.a", "/dir3/liba.a"]) >>> l.basenames - ['liba.a', 'libb.a'] - >>> h = HeaderList(['/dir1/a.h', '/dir2/b.h', '/dir3/a.h']) + ["liba.a", "libb.a"] + >>> h = HeaderList(["/dir1/a.h", "/dir2/b.h", "/dir3/a.h"]) >>> h.basenames - ['a.h', 'b.h'] + ["a.h", "b.h"] Returns: - list: A list of base-names + A list of base-names """ return list(dedupe(os.path.basename(x) for x in self.files)) @@ -1980,7 +1976,7 @@ def __eq__(self, other): def __len__(self): return len(self.files) - def joined(self, separator=" "): + def joined(self, separator: str = " ") -> str: return separator.join(self.files) def __repr__(self): @@ -2010,7 +2006,7 @@ def __init__(self, files): self._directories = None @property - def directories(self): + def directories(self) -> List[str]: """Directories to be searched for header files.""" values = self._directories if values is None: @@ -2041,31 +2037,31 @@ def _default_directories(self): return values @property - def headers(self): + def headers(self) -> List[str]: """Stable de-duplication of the headers. Returns: - list: A list of header files + A list of header files """ return self.files @property - def names(self): + def names(self) -> List[str]: """Stable de-duplication of header names in the list without extensions - >>> h = HeaderList(['/dir1/a.h', '/dir2/b.h', '/dir3/a.h']) + >>> h = HeaderList(["/dir1/a.h", "/dir2/b.h", "/dir3/a.h"]) >>> h.names - ['a', 'b'] + ["a", "b"] Returns: - list: A list of files without extensions + A list of files without extensions """ names = [] for x in self.basenames: name = x - # Valid extensions include: ['.cuh', '.hpp', '.hh', '.h'] + # Valid extensions include: [".cuh", ".hpp", ".hh", ".h"] for ext in [".cuh", ".hpp", ".hh", ".h"]: i = name.rfind(ext) if i != -1: @@ -2078,84 +2074,84 @@ def names(self): return list(dedupe(names)) @property - def include_flags(self): + def include_flags(self) -> str: """Include flags - >>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h']) + >>> h = HeaderList(["/dir1/a.h", "/dir1/b.h", "/dir2/c.h"]) >>> h.include_flags - '-I/dir1 -I/dir2' + "-I/dir1 -I/dir2" Returns: - str: A joined list of include flags + A joined list of include flags """ return " ".join(["-I" + x for x in self.directories]) @property - def macro_definitions(self): + def macro_definitions(self) -> str: """Macro definitions - >>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h']) - >>> h.add_macro('-DBOOST_LIB_NAME=boost_regex') - >>> h.add_macro('-DBOOST_DYN_LINK') + >>> h = HeaderList(["/dir1/a.h", "/dir1/b.h", "/dir2/c.h"]) + >>> h.add_macro("-DBOOST_LIB_NAME=boost_regex") + >>> h.add_macro("-DBOOST_DYN_LINK") >>> h.macro_definitions - '-DBOOST_LIB_NAME=boost_regex -DBOOST_DYN_LINK' + "-DBOOST_LIB_NAME=boost_regex -DBOOST_DYN_LINK" Returns: - str: A joined list of macro definitions + A joined list of macro definitions """ return " ".join(self._macro_definitions) @property - def cpp_flags(self): + def cpp_flags(self) -> str: """Include flags + macro definitions - >>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h']) + >>> h = HeaderList(["/dir1/a.h", "/dir1/b.h", "/dir2/c.h"]) >>> h.cpp_flags - '-I/dir1 -I/dir2' - >>> h.add_macro('-DBOOST_DYN_LINK') + "-I/dir1 -I/dir2" + >>> h.add_macro("-DBOOST_DYN_LINK") >>> h.cpp_flags - '-I/dir1 -I/dir2 -DBOOST_DYN_LINK' + "-I/dir1 -I/dir2 -DBOOST_DYN_LINK" Returns: - str: A joined list of include flags and macro definitions + A joined list of include flags and macro definitions """ cpp_flags = self.include_flags if self.macro_definitions: cpp_flags += " " + self.macro_definitions return cpp_flags - def add_macro(self, macro): + def add_macro(self, macro: str) -> None: """Add a macro definition Parameters: - macro (str): The macro to add + macro: The macro to add """ self._macro_definitions.append(macro) -def find_headers(headers, root, recursive=False): +def find_headers(headers: Union[str, List[str]], root: str, recursive: bool = False) -> HeaderList: """Returns an iterable object containing a list of full paths to headers if found. - Accepts any glob characters accepted by fnmatch: + Accepts any glob characters accepted by :py:func:`fnmatch.fnmatch`: - ======= ==================================== - Pattern Meaning - ======= ==================================== - * matches everything - ? matches any single character - [seq] matches any character in ``seq`` - [!seq] matches any character not in ``seq`` - ======= ==================================== + ========== ==================================== + Pattern Meaning + ========== ==================================== + ``*`` matches one or more characters + ``?`` matches any single character + ``[seq]`` matches any character in ``seq`` + ``[!seq]`` matches any character not in ``seq`` + ========== ==================================== Parameters: - headers (str or list): Header name(s) to search for - root (str): The root directory to start searching from - recursive (bool): if False search only root folder, - if True descends top-down from the root. Defaults to False. + headers: Header name(s) to search for + root: The root directory to start searching from + recursive: if :data:`False` search only root folder, + if :data:`True` descends top-down from the root. Defaults to :data:`False`. Returns: - HeaderList: The headers that have been found + The headers that have been found """ if isinstance(headers, str): headers = [headers] @@ -2189,12 +2185,12 @@ def find_headers(headers, root, recursive=False): @system_path_filter -def find_all_headers(root): +def find_all_headers(root: str) -> HeaderList: """Convenience function that returns the list of all headers found in the directory passed as argument. Args: - root (str): directory where to look recursively for header files + root: directory where to look recursively for header files Returns: List of all headers found in ``root`` and subdirectories. @@ -2210,24 +2206,24 @@ class LibraryList(FileList): """ @property - def libraries(self): + def libraries(self) -> List[str]: """Stable de-duplication of library files. Returns: - list: A list of library files + A list of library files """ return self.files @property - def names(self): + def names(self) -> List[str]: """Stable de-duplication of library names in the list - >>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir3/liba.so']) + >>> l = LibraryList(["/dir1/liba.a", "/dir2/libb.a", "/dir3/liba.so"]) >>> l.names - ['a', 'b'] + ["a", "b"] Returns: - list: A list of library names + A list of library names """ names = [] @@ -2253,46 +2249,46 @@ def names(self): return list(dedupe(names)) @property - def search_flags(self): + def search_flags(self) -> str: """Search flags for the libraries - >>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so']) + >>> l = LibraryList(["/dir1/liba.a", "/dir2/libb.a", "/dir1/liba.so"]) >>> l.search_flags - '-L/dir1 -L/dir2' + "-L/dir1 -L/dir2" Returns: - str: A joined list of search flags + A joined list of search flags """ return " ".join(["-L" + x for x in self.directories]) @property - def link_flags(self): + def link_flags(self) -> str: """Link flags for the libraries - >>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so']) + >>> l = LibraryList(["/dir1/liba.a", "/dir2/libb.a", "/dir1/liba.so"]) >>> l.link_flags - '-la -lb' + "-la -lb" Returns: - str: A joined list of link flags + A joined list of link flags """ return " ".join(["-l" + name for name in self.names]) @property - def ld_flags(self): + def ld_flags(self) -> str: """Search flags + link flags - >>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so']) + >>> l = LibraryList(["/dir1/liba.a", "/dir2/libb.a", "/dir1/liba.so"]) >>> l.ld_flags - '-L/dir1 -L/dir2 -la -lb' + "-L/dir1 -L/dir2 -la -lb" Returns: - str: A joined list of search flags and link flags + A joined list of search flags and link flags """ return self.search_flags + " " + self.link_flags -def find_system_libraries(libraries, shared=True): +def find_system_libraries(libraries: Union[str, List[str]], shared: bool = True) -> LibraryList: """Searches the usual system library locations for ``libraries``. Search order is as follows: @@ -2304,24 +2300,24 @@ def find_system_libraries(libraries, shared=True): 5. ``/usr/local/lib64`` 6. ``/usr/local/lib`` - Accepts any glob characters accepted by fnmatch: + Accepts any glob characters accepted by :py:func:`fnmatch.fnmatch`: - ======= ==================================== - Pattern Meaning - ======= ==================================== - * matches everything - ? matches any single character - [seq] matches any character in ``seq`` - [!seq] matches any character not in ``seq`` - ======= ==================================== + ========== ==================================== + Pattern Meaning + ========== ==================================== + ``*`` matches one or more characters + ``?`` matches any single character + ``[seq]`` matches any character in ``seq`` + ``[!seq]`` matches any character not in ``seq`` + ========== ==================================== Parameters: - libraries (str or list): Library name(s) to search for - shared (bool): if True searches for shared libraries, - otherwise for static. Defaults to True. + libraries: Library name(s) to search for + shared: if :data:`True` searches for shared libraries, + otherwise for static. Defaults to :data:`True`. Returns: - LibraryList: The libraries that have been found + The libraries that have been found """ if isinstance(libraries, str): libraries = [libraries] @@ -2331,7 +2327,7 @@ def find_system_libraries(libraries, shared=True): message = message.format(find_system_libraries.__name__, type(libraries)) raise TypeError(message) - libraries_found = [] + libraries_found = LibraryList([]) search_locations = [ "/lib64", "/lib", @@ -2352,37 +2348,42 @@ def find_system_libraries(libraries, shared=True): def find_libraries( - libraries, root, shared=True, recursive=False, runtime=True, max_depth: Optional[int] = None -): + libraries: Union[str, List[str]], + root: str, + shared: bool = True, + recursive: bool = False, + runtime: bool = True, + max_depth: Optional[int] = None, +) -> LibraryList: """Returns an iterable of full paths to libraries found in a root dir. - Accepts any glob characters accepted by fnmatch: + Accepts any glob characters accepted by :py:func:`fnmatch.fnmatch`: - ======= ==================================== - Pattern Meaning - ======= ==================================== - * matches everything - ? matches any single character - [seq] matches any character in ``seq`` - [!seq] matches any character not in ``seq`` - ======= ==================================== + ========== ==================================== + Pattern Meaning + ========== ==================================== + ``*`` matches one or more characters + ``?`` matches any single character + ``[seq]`` matches any character in ``seq`` + ``[!seq]`` matches any character not in ``seq`` + ========== ==================================== Parameters: - libraries (str or list): Library name(s) to search for - root (str): The root directory to start searching from - shared (bool): if True searches for shared libraries, - otherwise for static. Defaults to True. - recursive (bool): if False search only root folder, - if True descends top-down from the root. Defaults to False. - max_depth (int): if set, don't search below this depth. Cannot be set - if recursive is False - runtime (bool): Windows only option, no-op elsewhere. If true, - search for runtime shared libs (.DLL), otherwise, search - for .Lib files. If shared is false, this has no meaning. - Defaults to True. + libraries: Library name(s) to search for + root: The root directory to start searching from + shared: if :data:`True` searches for shared libraries, + otherwise for static. Defaults to :data:`True`. + recursive: if :data:`False` search only root folder, + if :data:`True` descends top-down from the root. Defaults to :data:`False`. + max_depth: if set, don't search below this depth. Cannot be set + if recursive is :data:`False` + runtime: Windows only option, no-op elsewhere. If :data:`True`, + search for runtime shared libs (``.DLL``), otherwise, search + for ``.Lib`` files. If ``shared`` is :data:`False`, this has no meaning. + Defaults to :data:`True`. Returns: - LibraryList: The libraries that have been found + The libraries that have been found """ if isinstance(libraries, str): @@ -2443,29 +2444,31 @@ def find_libraries( return LibraryList(found_libs) -def find_all_shared_libraries(root, recursive=False, runtime=True): +def find_all_shared_libraries( + root: str, recursive: bool = False, runtime: bool = True +) -> LibraryList: """Convenience function that returns the list of all shared libraries found in the directory passed as argument. - See documentation for `spack.llnl.util.filesystem.find_libraries` for more information + See documentation for :py:func:`find_libraries` for more information """ return find_libraries("*", root=root, shared=True, recursive=recursive, runtime=runtime) -def find_all_static_libraries(root, recursive=False): +def find_all_static_libraries(root: str, recursive: bool = False) -> LibraryList: """Convenience function that returns the list of all static libraries found in the directory passed as argument. - See documentation for `spack.llnl.util.filesystem.find_libraries` for more information + See documentation for :py:func:`find_libraries` for more information """ return find_libraries("*", root=root, shared=False, recursive=recursive) -def find_all_libraries(root, recursive=False): +def find_all_libraries(root: str, recursive: bool = False) -> LibraryList: """Convenience function that returns the list of all libraries found in the directory passed as argument. - See documentation for `spack.llnl.util.filesystem.find_libraries` for more information + See documentation for :py:func:`find_libraries` for more information """ return find_all_shared_libraries(root, recursive=recursive) + find_all_static_libraries( @@ -2473,215 +2476,6 @@ def find_all_libraries(root, recursive=False): ) -class WindowsSimulatedRPath: - """Class representing Windows filesystem rpath analog - - One instance of this class is associated with a package (only on Windows) - For each lib/binary directory in an associated package, this class introduces - a symlink to any/all dependent libraries/binaries. This includes the packages - own bin/lib directories, meaning the libraries are linked to the binary directory - and vis versa. - """ - - def __init__( - self, - package, - base_modification_prefix: Optional[Union[str, pathlib.Path]] = None, - link_install_prefix: bool = True, - ): - """ - Args: - package (spack.package_base.PackageBase): Package requiring links - base_modification_prefix (str|pathlib.Path): Path representation indicating - the root directory in which to establish the simulated rpath, ie where the - symlinks that comprise the "rpath" behavior will be installed. - - Note: This is a mutually exclusive option with `link_install_prefix` using - both is an error. - - Default: None - link_install_prefix (bool): Link against package's own install or stage root. - Packages that run their own executables during build and require rpaths to - the build directory during build time require this option. - - Default: install - root - - Note: This is a mutually exclusive option with `base_modification_prefix`, using - both is an error. - """ - self.pkg = package - self._addl_rpaths: set[str] = set() - if link_install_prefix and base_modification_prefix: - raise RuntimeError( - "Invalid combination of arguments given to WindowsSimulated RPath.\n" - "Select either `link_install_prefix` to create an install prefix rpath" - " or specify a `base_modification_prefix` for any other link type. " - "Specifying both arguments is invalid." - ) - if not (link_install_prefix or base_modification_prefix): - raise RuntimeError( - "Insufficient arguments given to WindowsSimulatedRpath.\n" - "WindowsSimulatedRPath requires one of link_install_prefix" - " or base_modification_prefix to be specified." - " Neither was provided." - ) - - self.link_install_prefix = link_install_prefix - if base_modification_prefix: - self.base_modification_prefix = pathlib.Path(base_modification_prefix) - else: - self.base_modification_prefix = pathlib.Path(self.pkg.prefix) - self._additional_library_dependents: set[pathlib.Path] = set() - if not self.link_install_prefix: - tty.debug(f"Generating rpath for non install context: {base_modification_prefix}") - - @property - def library_dependents(self): - """ - Set of directories where package binaries/libraries are located. - """ - base_pths = set() - if self.link_install_prefix: - base_pths.add(pathlib.Path(self.pkg.prefix.bin)) - base_pths |= self._additional_library_dependents - return base_pths - - def add_library_dependent(self, *dest): - """ - Add paths to directories or libraries/binaries to set of - common paths that need to link against other libraries - - Specified paths should fall outside of a package's common - link paths, i.e. the bin - directories. - """ - for pth in dest: - if os.path.isfile(pth): - new_pth = pathlib.Path(pth).parent - else: - new_pth = pathlib.Path(pth) - path_is_in_prefix = new_pth.is_relative_to(self.base_modification_prefix) - if not path_is_in_prefix: - raise RuntimeError( - f"Attempting to generate rpath symlink out of rpath context:\ -{str(self.base_modification_prefix)}" - ) - self._additional_library_dependents.add(new_pth) - - @property - def rpaths(self): - """ - Set of libraries this package needs to link against during runtime - These packages will each be symlinked into the packages lib and binary dir - """ - dependent_libs = [] - for path in self.pkg.rpath: - dependent_libs.extend(list(find_all_shared_libraries(path, recursive=True))) - for extra_path in self._addl_rpaths: - dependent_libs.extend(list(find_all_shared_libraries(extra_path, recursive=True))) - return set([pathlib.Path(x) for x in dependent_libs]) - - def add_rpath(self, *paths): - """ - Add libraries found at the root of provided paths to runtime linking - - These are libraries found outside of the typical scope of rpath linking - that require manual inclusion in a runtime linking scheme. - These links are unidirectional, and are only - intended to bring outside dependencies into this package - - Args: - *paths (str): arbitrary number of paths to be added to runtime linking - """ - self._addl_rpaths = self._addl_rpaths | set(paths) - - def _link(self, path: pathlib.Path, dest_dir: pathlib.Path): - """Perform link step of simulated rpathing, installing - simlinks of file in path to the dest_dir - location. This method deliberately prevents - the case where a path points to a file inside the dest_dir. - This is because it is both meaningless from an rpath - perspective, and will cause an error when Developer - mode is not enabled""" - - def report_already_linked(): - # We have either already symlinked or we are encountering a naming clash - # either way, we don't want to overwrite existing libraries - already_linked = islink(str(dest_file)) - tty.debug( - "Linking library %s to %s failed, " % (str(path), str(dest_file)) - + "already linked." - if already_linked - else "library with name %s already exists at location %s." - % (str(file_name), str(dest_dir)) - ) - - file_name = path.name - dest_file = dest_dir / file_name - if not dest_file.exists() and dest_dir.exists() and not dest_file == path: - try: - symlink(str(path), str(dest_file)) - # For py2 compatibility, we have to catch the specific Windows error code - # associate with trying to create a file that already exists (winerror 183) - # Catch OSErrors missed by the SymlinkError checks - except OSError as e: - if sys.platform == "win32" and e.errno == errno.EEXIST: - report_already_linked() - else: - raise e - # catch errors we raise ourselves from Spack - except AlreadyExistsError: - report_already_linked() - - def establish_link(self): - """ - (sym)link packages to runtime dependencies based on RPath configuration for - Windows heuristics - """ - # from build_environment.py:463 - # The top-level package is always RPATHed. It hasn't been installed yet - # so the RPATHs are added unconditionally - - # for each binary install dir in self.pkg (i.e. pkg.prefix.bin, pkg.prefix.lib) - # install a symlink to each dependent library - - # do not rpath for system libraries included in the dag - # we should not be modifying libraries managed by the Windows system - # as this will negatively impact linker behavior and can result in permission - # errors if those system libs are not modifiable by Spack - if "windows-system" not in getattr(self.pkg, "tags", []): - for library, lib_dir in itertools.product(self.rpaths, self.library_dependents): - self._link(library, lib_dir) - - -def make_package_test_rpath(pkg, test_dir: Union[str, pathlib.Path]): - """Establishes a temp Windows simulated rpath for the pkg in the testing directory - so an executable can test the libraries/executables with proper access - to dependent dlls - - Note: this is a no-op on all other platforms besides Windows - - Args: - pkg (spack.package_base.PackageBase): the package for which the rpath should be computed - test_dir: the testing directory in which we should construct an rpath - """ - # link_install_prefix as false ensures we're not linking into the install prefix - mini_rpath = WindowsSimulatedRPath(pkg, link_install_prefix=False) - # add the testing directory as a location to install rpath symlinks - mini_rpath.add_library_dependent(test_dir) - - # check for whether build_directory is available, if not - # assume the stage root is the build dir - build_dir_attr = getattr(pkg, "build_directory", None) - build_directory = build_dir_attr if build_dir_attr else pkg.stage.path - # add the build dir & build dir bin - mini_rpath.add_rpath(os.path.join(build_directory, "bin")) - mini_rpath.add_rpath(os.path.join(build_directory)) - # construct rpath - mini_rpath.establish_link() - - @system_path_filter @memoized def can_access_dir(path): @@ -2890,12 +2684,17 @@ def remove_directory_contents(dir): @contextmanager @system_path_filter -def keep_modification_time(*filenames): +def keep_modification_time(*filenames: str) -> Generator[None, None, None]: """ Context manager to keep the modification timestamps of the input files. Tolerates and has no effect on non-existent files and files that are deleted by the nested code. + Example:: + + with keep_modification_time("file1.txt", "file2.txt"): + # do something that modifies file1.txt and file2.txt + Parameters: *filenames: one or more files that must have their modification timestamps unchanged @@ -2918,7 +2717,7 @@ def temporary_file_position(stream): @contextmanager -def current_file_position(stream: IO[str], loc: int, relative_to=io.SEEK_CUR): +def current_file_position(stream: IO, loc: int, relative_to=io.SEEK_CUR): with temporary_file_position(stream): stream.seek(loc, relative_to) yield @@ -3392,7 +3191,7 @@ def resolve_link_target_relative_to_the_link(link): rename = os.rename -class SymlinkError(RuntimeError): +class SymlinkError(OSError): """Exception class for errors raised while creating symlinks, junctions and hard links """ diff --git a/lib/spack/spack/llnl/util/lang.py b/lib/spack/spack/llnl/util/lang.py index 4050c235b96e0c..f12c2d7e47f8ce 100644 --- a/lib/spack/spack/llnl/util/lang.py +++ b/lib/spack/spack/llnl/util/lang.py @@ -107,62 +107,18 @@ def attr_setdefault(obj, name, value): return getattr(obj, name) -def union_dicts(*dicts): - """Use update() to combine all dicts into one. - - This builds a new dictionary, into which we ``update()`` each element - of ``dicts`` in order. Items from later dictionaries will override - items from earlier dictionaries. - - Args: - dicts (list): list of dictionaries - - Return: (dict): a merged dictionary containing combined keys and - values from ``dicts``. - - """ - result = {} - for d in dicts: - result.update(d) - return result - - -# Used as a sentinel that disambiguates tuples passed in *args from coincidentally -# matching tuples formed from kwargs item pairs. -_kwargs_separator = (object(),) - - -def stable_args(*args, **kwargs): - """A key factory that performs a stable sort of the parameters.""" - key = args - if kwargs: - key += _kwargs_separator + tuple(sorted(kwargs.items())) - return key - - def memoized(func): """Decorator that caches the results of a function, storing them in an attribute of that function. - """ - func.cache = {} - - @functools.wraps(func) - def _memoized_function(*args, **kwargs): - key = stable_args(*args, **kwargs) - try: - return func.cache[key] - except KeyError: - ret = func(*args, **kwargs) - func.cache[key] = ret - return ret - except TypeError as e: - # TypeError is raised when indexing into a dict if the key is unhashable. - raise UnhashableArguments( - "args + kwargs '{}' was not hashable for function '{}'".format(key, func.__name__) - ) from e + Example:: - return _memoized_function + @memoized + def expensive_computation(x): + # Some expensive computation + return result + """ + return functools.lru_cache(maxsize=None)(func) def list_modules(directory, **kwargs): @@ -322,12 +278,7 @@ def lazy_lexicographic_ordering(cls, set_hash=True): class Widget: def _cmp_key(self): - return ( - self.a, - self.b, - (self.c, self.d), - self.e - ) + return (self.a, self.b, (self.c, self.d), self.e) def __eq__(self, other): return self._cmp_key() == other._cmp_key() @@ -383,9 +334,11 @@ class Widget: def _cmp_iter(self): yield a yield b + def cd_fun(): yield c yield d + yield cd_fun yield e @@ -394,10 +347,10 @@ def _cmp_fast_eq(self, other): ``_cmp_fast_eq`` should return: - * ``True`` if ``self`` is equal to ``other``, - * ``False`` if ``self`` is not equal to ``other``, and - * ``None`` if it's not known whether they are equal, and the full - comparison should be done. + * ``True`` if ``self`` is equal to ``other``, + * ``False`` if ``self`` is not equal to ``other``, and + * ``None`` if it's not known whether they are equal, and the full + comparison should be done. ``lazy_lexicographic_ordering`` uses ``_cmp_fast_eq`` to short-circuit the comparison if the answer can be determined quickly. If you do not @@ -405,16 +358,16 @@ def _cmp_fast_eq(self, other): Some things to note: - * If a class already has ``__eq__``, ``__ne__``, ``__lt__``, - ``__le__``, ``__gt__``, ``__ge__``, or ``__hash__`` defined, this - decorator will overwrite them. + * If a class already has ``__eq__``, ``__ne__``, ``__lt__``, + ``__le__``, ``__gt__``, ``__ge__``, or ``__hash__`` defined, this + decorator will overwrite them. - * If ``set_hash`` is ``False``, this will not overwrite - ``__hash__``. + * If ``set_hash`` is ``False``, this will not overwrite + ``__hash__``. - * This class uses Python 2 None-comparison semantics. If you yield - None and it is compared to a non-None type, None will always be - less than the other object. + * This class uses Python 2 None-comparison semantics. If you yield + None and it is compared to a non-None type, None will always be + less than the other object. Raises: TypeError: If the class does not have a ``_cmp_iter`` method @@ -554,7 +507,7 @@ def dedupe(sequence, key=None): Examples: - Dedupe a list of integers: + Dedupe a list of integers:: [x for x in dedupe([1, 2, 1, 3, 2])] == [1, 2, 3] @@ -568,17 +521,15 @@ def dedupe(sequence, key=None): seen.add(x_key) -def pretty_date(time, now=None): +def pretty_date(time: Union[datetime, int], now: Optional[datetime] = None) -> str: """Convert a datetime or timestamp to a pretty, relative date. Args: - time (datetime.datetime or int): date to print prettily - now (datetime.datetime): datetime for 'now', i.e. the date the pretty date - is relative to (default is datetime.now()) + time: date to print prettily + now: the date the pretty date is relative to (default is ``datetime.now()``) Returns: - (str): pretty string like 'an hour ago', 'Yesterday', - '3 months ago', 'just now', etc. + pretty string like "an hour ago", "Yesterday", "3 months ago", "just now", etc. Adapted from https://stackoverflow.com/questions/1551382. @@ -603,51 +554,49 @@ def pretty_date(time, now=None): if second_diff < 10: return "just now" if second_diff < 60: - return str(second_diff) + " seconds ago" + return f"{second_diff} seconds ago" if second_diff < 120: return "a minute ago" if second_diff < 3600: - return str(second_diff // 60) + " minutes ago" + return f"{second_diff // 60} minutes ago" if second_diff < 7200: return "an hour ago" if second_diff < 86400: - return str(second_diff // 3600) + " hours ago" + return f"{second_diff // 3600} hours ago" if day_diff == 1: return "yesterday" if day_diff < 7: - return str(day_diff) + " days ago" + return f"{day_diff} days ago" if day_diff < 28: weeks = day_diff // 7 if weeks == 1: return "a week ago" else: - return str(day_diff // 7) + " weeks ago" + return f"{day_diff // 7} weeks ago" if day_diff < 365: months = day_diff // 30 if months == 1: return "a month ago" elif months == 12: months -= 1 - return str(months) + " months ago" + return f"{months} months ago" - diff = day_diff // 365 - if diff == 1: + year_diff = day_diff // 365 + if year_diff == 1: return "a year ago" - else: - return str(diff) + " years ago" + return f"{year_diff} years ago" -def pretty_string_to_date(date_str, now=None): +def pretty_string_to_date(date_str: str, now: Optional[datetime] = None) -> datetime: """Parses a string representing a date and returns a datetime object. Args: - date_str (str): string representing a date. This string might be + date_str: string representing a date. This string might be in different format (like ``YYYY``, ``YYYY-MM``, ``YYYY-MM-DD``, ``YYYY-MM-DD HH:MM``, ``YYYY-MM-DD HH:MM:SS``) or be a *pretty date* (like ``yesterday`` or ``two months ago``) - Returns: - (datetime.datetime): datetime object corresponding to ``date_str`` + Returns: datetime object corresponding to ``date_str`` """ pattern = {} @@ -694,8 +643,7 @@ def _n_xxx_ago(x): if bool(regexp.match(date_str)): return parser(date_str) - msg = 'date "{0}" does not match any valid format'.format(date_str) - raise ValueError(msg) + raise ValueError(f'date "{date_str}" does not match any valid format') def pretty_seconds_formatter(seconds): @@ -909,7 +857,7 @@ def uniq(sequence): def elide_list(line_list: List[str], max_num: int = 10) -> List[str]: """Takes a long list and limits it to a smaller number of elements, - replacing intervening elements with '...'. For example:: + replacing intervening elements with ``"..."``. For example:: elide_list(["1", "2", "3", "4", "5", "6"], 4) @@ -1002,12 +950,12 @@ def __set__(self, instance, value): class TypedMutableSequence(collections.abc.MutableSequence): """Base class that behaves like a list, just with a different type. - Client code can inherit from this base class: + Client code can inherit from this base class:: class Foo(TypedMutableSequence): pass - and later perform checks based on types: + and later perform checks based on types:: if isinstance(l, Foo): # do something @@ -1100,6 +1048,7 @@ class classproperty(Generic[ClassPropertyType]): def __init__(self, callback: Callable[[Any], ClassPropertyType]) -> None: self.callback = callback + self.__doc__ = callback.__doc__ def __get__(self, instance, owner) -> ClassPropertyType: return self.callback(owner) @@ -1190,6 +1139,12 @@ def reversed_values(self): """Iterates over values from the highest priority, to the lowest.""" yield from (self._data[key] for _, key in reversed(self._priorities)) + def priority_values(self, priority: int): + """Iterate over values of a given priority.""" + if not any(p == priority for p, _ in self._priorities): + raise KeyError(f"No such priority in PriorityOrderedMapping: {priority}") + yield from (self._data[k] for p, k in self._priorities if p == priority) + def _highest_priority(self) -> int: if not self._priorities: return 0 diff --git a/lib/spack/spack/llnl/util/link_tree.py b/lib/spack/spack/llnl/util/link_tree.py index 3a38db862bf760..bb302ef5d1add3 100644 --- a/lib/spack/spack/llnl/util/link_tree.py +++ b/lib/spack/spack/llnl/util/link_tree.py @@ -456,25 +456,27 @@ def unmerge_directories(self, dest_root, ignore): os.remove(marker) def merge( - self, dest_root, ignore_conflicts=False, ignore=None, link=fs.symlink, relative=False + self, + dest_root, + ignore_conflicts: bool = False, + ignore: Optional[Callable[[str], bool]] = None, + link: Callable = fs.symlink, + relative: bool = False, ): - """Link all files in src into dest, creating directories - if necessary. + """Link all files in src into dest, creating directories if necessary. - Keyword Args: + Arguments: - ignore_conflicts (bool): if True, do not break when the target exists; - return a list of files that could not be linked + ignore_conflicts: if True, do not break when the target exists; return a list of files + that could not be linked - ignore (callable): callable that returns True if a file is to be - ignored in the merge (by default ignore nothing) + ignore: callable that returns True if a file is to be ignored in the merge (by default + ignore nothing) - link (callable): function to create links with - (defaults to spack.llnl.util.filesystem.symlink) - - relative (bool): create all symlinks relative to the target - (default False) + link: function to create links with (defaults to + ``spack.llnl.util.filesystem.symlink``) + relative: create all symlinks relative to the target (default False) """ if ignore is None: ignore = lambda x: False diff --git a/lib/spack/spack/llnl/util/lock.py b/lib/spack/spack/llnl/util/lock.py index f30f9b4f3a9283..63abafa6ece8aa 100644 --- a/lib/spack/spack/llnl/util/lock.py +++ b/lib/spack/spack/llnl/util/lock.py @@ -339,25 +339,22 @@ def _lock(self, op: int, timeout: Optional[float] = None) -> Tuple[float, int]: ) ) - poll_intervals = iter(Lock._poll_interval_generator()) - start_time = time.time() - num_attempts = 0 - while (not timeout) or (time.time() - start_time) < timeout: - num_attempts += 1 - if self._poll_lock(op): - total_wait_time = time.time() - start_time - return total_wait_time, num_attempts + start_time = time.monotonic() + end_time = float("inf") if not timeout else start_time + timeout + num_attempts = 1 + poll_intervals = Lock._poll_interval_generator() + while True: + if self._poll_lock(op): + return time.monotonic() - start_time, num_attempts + if time.monotonic() >= end_time: + break time.sleep(next(poll_intervals)) + num_attempts += 1 - # TBD: Is an extra attempt after timeout needed/appropriate? - num_attempts += 1 - if self._poll_lock(op): - total_wait_time = time.time() - start_time - return total_wait_time, num_attempts - - total_wait_time = time.time() - start_time - raise LockTimeoutError(op_str.lower(), self.path, total_wait_time, num_attempts) + raise LockTimeoutError( + op_str.lower(), self.path, time.monotonic() - start_time, num_attempts + ) def _poll_lock(self, op: int) -> bool: """Attempt to acquire the lock in a non-blocking manner. Return whether @@ -404,17 +401,7 @@ def _ensure_parent_directory(self) -> str: # relative paths to lockfiles in the current directory have no parent if not parent: return "." - - try: - os.makedirs(parent) - except OSError as e: - # os.makedirs can fail in a number of ways when the directory already exists. - # With EISDIR, we know it exists, and others like EEXIST, EACCES, and EROFS - # are fine if we ensure that the directory exists. - # Python 3 allows an exist_ok parameter and ignores any OSError as long as - # the directory exists. - if not (e.errno == errno.EISDIR or os.path.isdir(parent)): - raise + os.makedirs(parent, exist_ok=True) return parent def _read_log_debug_data(self) -> None: @@ -709,16 +696,13 @@ class LockTransaction: """Simple nested transaction context manager that uses a file lock. Arguments: - lock (Lock): underlying lock for this transaction to be acquired on - enter and released on exit - acquire (typing.Callable or contextlib.contextmanager): function to be called - after lock is acquired, or contextmanager to enter after acquire and leave - before release. - release (typing.Callable): function to be called before release. If - ``acquire`` is a contextmanager, this will be called *after* - exiting the nexted context and before the lock is released. - timeout (float): number of seconds to set for the timeout when - acquiring the lock (default no timeout) + lock: underlying lock for this transaction to be acquired on enter and released on exit + acquire: function to be called after lock is acquired, or contextmanager to enter after + acquire and leave before release. + release: function to be called before release. If ``acquire`` is a contextmanager, this + will be called *after* exiting the nested context and before the lock is released. + timeout: number of seconds to set for the timeout when acquiring the lock (default no + timeout) If the ``acquire_fn`` returns a value, it is used as the return value for ``__enter__``, allowing it to be passed as the ``as`` argument of a diff --git a/lib/spack/spack/llnl/util/tty/__init__.py b/lib/spack/spack/llnl/util/tty/__init__.py index 5a63a47cfd3692..775a040c56b0fe 100644 --- a/lib/spack/spack/llnl/util/tty/__init__.py +++ b/lib/spack/spack/llnl/util/tty/__init__.py @@ -5,17 +5,13 @@ import contextlib import io import os -import struct +import shutil import sys import textwrap import traceback from datetime import datetime -from sys import platform as _platform -from typing import Any, NoReturn, Tuple - -if _platform != "win32": - import fcntl - import termios +from types import TracebackType +from typing import Callable, Iterator, NoReturn, Optional, Type, Union from .color import cescape, clen, cprint, cwrite @@ -27,67 +23,67 @@ _msg_enabled = True _warn_enabled = True _error_enabled = True -_output_filter = lambda s: s +_output_filter: Callable[[str], str] = lambda s: s indent = " " -def debug_level(): +def debug_level() -> int: return _debug -def is_verbose(): +def is_verbose() -> bool: return _verbose -def is_debug(level=1): +def is_debug(level: int = 1) -> bool: return _debug >= level -def set_debug(level=0): +def set_debug(level: int = 0) -> None: global _debug assert level >= 0, "Debug level must be a positive value" _debug = level -def set_verbose(flag): +def set_verbose(flag: bool) -> None: global _verbose _verbose = flag -def set_timestamp(flag): +def set_timestamp(flag: bool) -> None: global _timestamp _timestamp = flag -def set_msg_enabled(flag): +def set_msg_enabled(flag: bool) -> None: global _msg_enabled _msg_enabled = flag -def set_warn_enabled(flag): +def set_warn_enabled(flag: bool) -> None: global _warn_enabled _warn_enabled = flag -def set_error_enabled(flag): +def set_error_enabled(flag: bool) -> None: global _error_enabled _error_enabled = flag -def msg_enabled(): +def msg_enabled() -> bool: return _msg_enabled -def warn_enabled(): +def warn_enabled() -> bool: return _warn_enabled -def error_enabled(): +def error_enabled() -> bool: return _error_enabled @contextlib.contextmanager -def output_filter(filter_fn): +def output_filter(filter_fn: Callable[[str], str]) -> Iterator[None]: """Context manager that applies a filter to all output.""" global _output_filter saved_filter = _output_filter @@ -99,9 +95,11 @@ def output_filter(filter_fn): class SuppressOutput: - """Class for disabling output in a scope using 'with' keyword""" + """Class for disabling output in a scope using ``with`` keyword""" - def __init__(self, msg_enabled=True, warn_enabled=True, error_enabled=True): + def __init__( + self, msg_enabled: bool = True, warn_enabled: bool = True, error_enabled: bool = True + ) -> None: self._msg_enabled_initial = _msg_enabled self._warn_enabled_initial = _warn_enabled self._error_enabled_initial = _error_enabled @@ -110,24 +108,29 @@ def __init__(self, msg_enabled=True, warn_enabled=True, error_enabled=True): self._warn_enabled = warn_enabled self._error_enabled = error_enabled - def __enter__(self): + def __enter__(self) -> None: set_msg_enabled(self._msg_enabled) set_warn_enabled(self._warn_enabled) set_error_enabled(self._error_enabled) - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: set_msg_enabled(self._msg_enabled_initial) set_warn_enabled(self._warn_enabled_initial) set_error_enabled(self._error_enabled_initial) -def set_stacktrace(flag): +def set_stacktrace(flag: bool) -> None: global _stacktrace _stacktrace = flag -def process_stacktrace(countback): - """Gives file and line frame 'countback' frames from the bottom""" +def process_stacktrace(countback: int) -> str: + """Gives file and line frame ``countback`` frames from the bottom""" st = traceback.extract_stack() # Not all entries may be spack files, we have to remove those that aren't. file_list = [] @@ -139,25 +142,25 @@ def process_stacktrace(countback): root_dir = os.path.commonprefix(file_list) root_len = len(root_dir) st_idx = len(st) - countback - 1 - st_text = "%s:%i " % (st[st_idx][0][root_len:], st[st_idx][1]) + st_text = f"{st[st_idx][0][root_len:]}:{st[st_idx][1]:d} " return st_text -def show_pid(): +def show_pid() -> bool: return is_debug(2) -def get_timestamp(force=False): +def get_timestamp(force: bool = False) -> str: """Get a string timestamp""" if _debug or _timestamp or force: - # Note inclusion of the PID is useful for parallel builds. - pid = ", {0}".format(os.getpid()) if show_pid() else "" - return "[{0}{1}] ".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S.%f"), pid) + # Note the inclusion of the PID is useful for parallel builds. + pid = f", {os.getpid()}" if show_pid() else "" + return f"[{datetime.now().strftime('%Y-%m-%d-%H:%M:%S.%f')}{pid}] " else: return "" -def msg(message: Any, *args: Any, newline: bool = True) -> None: +def msg(message: Union[Exception, str], *args: str, newline: bool = True) -> None: """Print a message to the console.""" if not msg_enabled(): return @@ -178,24 +181,27 @@ def msg(message: Any, *args: Any, newline: bool = True) -> None: print(indent + _output_filter(str(arg))) -def info(message, *args, **kwargs): +def info( + message: Union[Exception, str], + *args, + format: str = "*b", + stream: Optional[io.IOBase] = None, + wrap: bool = False, + break_long_words: bool = False, + countback: int = 3, +) -> None: """Print an informational message.""" if isinstance(message, Exception): - message = "%s: %s" % (message.__class__.__name__, str(message)) - - format = kwargs.get("format", "*b") - stream = kwargs.get("stream", sys.stdout) - wrap = kwargs.get("wrap", False) - break_long_words = kwargs.get("break_long_words", False) - st_countback = kwargs.get("countback", 3) + message = f"{message.__class__.__name__}: {str(message)}" + stream = stream or sys.stdout st_text = "" if _stacktrace: - st_text = process_stacktrace(st_countback) + st_text = process_stacktrace(countback) cprint( "@%s{%s==>} %s%s" % (format, st_text, get_timestamp(), cescape(_output_filter(str(message)))), - stream=stream, + stream=stream, # type: ignore[arg-type] ) for arg in args: if wrap: @@ -212,56 +218,64 @@ def info(message, *args, **kwargs): stream.flush() -def verbose(message, *args, **kwargs): +def verbose(message, *args, format: str = "c", **kwargs) -> None: """Print a verbose message if the verbose flag is set.""" if _verbose: - kwargs.setdefault("format", "c") - info(message, *args, **kwargs) + info(message, *args, format=format, **kwargs) -def debug(message, *args, **kwargs): +def debug( + message, *args, level: int = 1, format: str = "g", stream: Optional[io.IOBase] = None, **kwargs +) -> None: """Print a debug message if the debug level is set.""" - level = kwargs.get("level", 1) if is_debug(level): - kwargs.setdefault("format", "g") - kwargs.setdefault("stream", sys.stderr) - info(message, *args, **kwargs) + stream_arg = stream or sys.stderr + info(message, *args, format=format, stream=stream_arg, **kwargs) # type: ignore[arg-type] -def error(message, *args, **kwargs): +def error( + message, *args, format: str = "*r", stream: Optional[io.IOBase] = None, **kwargs +) -> None: """Print an error message.""" if not error_enabled(): return - kwargs.setdefault("format", "*r") - kwargs.setdefault("stream", sys.stderr) - info("Error: " + str(message), *args, **kwargs) + stream = stream or sys.stderr + info( + f"Error: {message}", + *args, + format=format, + stream=stream, # type: ignore[arg-type] + **kwargs, + ) -def warn(message, *args, **kwargs): +def warn(message, *args, format: str = "*Y", stream: Optional[io.IOBase] = None, **kwargs) -> None: """Print a warning message.""" if not warn_enabled(): return - kwargs.setdefault("format", "*Y") - kwargs.setdefault("stream", sys.stderr) - info("Warning: " + str(message), *args, **kwargs) + stream = stream or sys.stderr + info( + f"Warning: {message}", + *args, + format=format, + stream=stream, # type: ignore[arg-type] + **kwargs, + ) -def die(message, *args, **kwargs) -> NoReturn: - kwargs.setdefault("countback", 4) - error(message, *args, **kwargs) +def die(message, *args, countback: int = 4, **kwargs) -> NoReturn: + error(message, *args, countback=countback, **kwargs) sys.exit(1) -def get_yes_or_no(prompt, **kwargs): - default_value = kwargs.get("default", None) - - if default_value is None: +def get_yes_or_no(prompt: str, default: Optional[bool] = None) -> Optional[bool]: + if default is None: prompt += " [y/n] " - elif default_value is True: + elif default is True: prompt += " [Y/n] " - elif default_value is False: + elif default is False: prompt += " [y/N] " else: raise ValueError("default for get_yes_no() must be True, False, or None.") @@ -271,7 +285,7 @@ def get_yes_or_no(prompt, **kwargs): msg(prompt, newline=False) ans = input().lower() if not ans: - result = default_value + result = default if result is None: print("Please enter yes or no.") else: @@ -282,21 +296,14 @@ def get_yes_or_no(prompt, **kwargs): return result -def hline(label=None, **kwargs): +def hline(label: Optional[str] = None, *, char: str = "-", max_width: int = 64) -> None: """Draw a labeled horizontal line. - Keyword Arguments: - char (str): Char to draw the line with. Default '-' - max_width (int): Maximum width of the line. Default is 64 chars. + Args: + char: char to draw the line with + max_width: maximum width of the line """ - char = kwargs.pop("char", "-") - max_width = kwargs.pop("max_width", 64) - if kwargs: - raise TypeError( - "'%s' is an invalid keyword argument for this function." % next(kwargs.iterkeys()) - ) - - rows, cols = terminal_size() + cols = shutil.get_terminal_size().columns if not cols: cols = max_width else: @@ -313,45 +320,3 @@ def hline(label=None, **kwargs): out.write(suffix) print(out.getvalue()) - - -def terminal_size() -> Tuple[int, int]: - """Gets the dimensions of the console: (rows, cols).""" - - def get_env_fallback() -> Tuple[int, int]: - """Get terminal size from environment variables with defaults.""" - rows = int(os.environ.get("LINES", 25)) - cols = int(os.environ.get("COLUMNS", 80)) - return rows, cols - - def is_valid_size(rows: int, cols: int) -> bool: - """Check if terminal dimensions are valid (positive integers).""" - return rows > 0 and cols > 0 - - # Try dynamic detection on Unix-like systems - if _platform != "win32": - - def ioctl_gwinsz(fd): - try: - rc = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234")) - return rc if is_valid_size(rc[0], rc[1]) else None - except (OSError, ValueError): - return None - - # Try standard file descriptors first - rc = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2) - - # If that fails, try opening the controlling terminal - if not rc: - try: - with open(os.ctermid(), "rb") as f: - rc = ioctl_gwinsz(f.fileno()) - except (OSError, ValueError): - pass - - # Return dynamic size if valid, otherwise fall back to environment - if rc: - return rc - - # Fallback to environment variables (Windows or failed Unix detection) - return get_env_fallback() diff --git a/lib/spack/spack/llnl/util/tty/colify.py b/lib/spack/spack/llnl/util/tty/colify.py index cfc34fef810db1..beeb535887b0f0 100644 --- a/lib/spack/spack/llnl/util/tty/colify.py +++ b/lib/spack/spack/llnl/util/tty/colify.py @@ -7,26 +7,28 @@ """ import io import os +import shutil import sys from typing import IO, Any, List, Optional -from spack.llnl.util.tty import terminal_size from spack.llnl.util.tty.color import cextra, clen class ColumnConfig: - def __init__(self, cols): + def __init__(self, cols: int) -> None: self.cols = cols self.line_length = 0 self.valid = True self.widths = [0] * cols # does not include ansi colors - def __repr__(self): + def __repr__(self) -> str: attrs = [(a, getattr(self, a)) for a in dir(self) if not a.startswith("__")] - return "" % ", ".join("%s: %r" % a for a in attrs) + return f"" -def config_variable_cols(elts, console_width, padding, cols=0): +def config_variable_cols( + elts: List[str], console_width: int, padding: int, cols: int = 0 +) -> ColumnConfig: """Variable-width column fitting algorithm. This function determines the most columns that can fit in the @@ -35,7 +37,8 @@ def config_variable_cols(elts, console_width, padding, cols=0): the width of its own longest element. This packs elements more efficiently on screen. - If cols is nonzero, force + If cols is nonzero, force the table to use that many columns and + just add minimal padding between the columns. """ if cols < 0: raise ValueError("cols must be non-negative.") @@ -54,7 +57,8 @@ def config_variable_cols(elts, console_width, padding, cols=0): for i, length in enumerate(lengths): for conf in configs: if conf.valid: - col = i // ((len(elts) + conf.cols - 1) // conf.cols) + rows = (len(elts) + conf.cols - 1) // conf.cols + col = i // rows p = padding if col < (conf.cols - 1) else 0 if conf.widths[col] < (length + p): @@ -63,17 +67,29 @@ def config_variable_cols(elts, console_width, padding, cols=0): conf.valid = conf.line_length < console_width try: + # take the last valid config in the list (the one with most columns) config = next(conf for conf in reversed(configs) if conf.valid) except StopIteration: - # If nothing was valid the screen was too narrow -- just use 1 col. + # If nothing was valid, the screen was too narrow -- use 1 col if cols was not + # specified, otherwise, use the requested columns and overflow. config = configs[0] - + if cols: + rows = (len(lengths) + cols - 1) // cols + config.widths = [ + max(length for i, length in enumerate(lengths) if i // rows == c) + + (padding if c < cols - 1 else 0) + for c in range(cols) + ] + + # trim off any columns with nothing in them config.widths = [w for w in config.widths if w != 0] config.cols = len(config.widths) return config -def config_uniform_cols(elts, console_width, padding, cols=0): +def config_uniform_cols( + elts: List[str], console_width: int, padding: int, cols: int = 0 +) -> ColumnConfig: """Uniform-width column fitting algorithm. Determines the longest element in the list, and determines how @@ -97,6 +113,7 @@ def config_uniform_cols(elts, console_width, padding, cols=0): def colify( elts: List[Any], + *, cols: int = 0, output: Optional[IO] = None, indent: int = 0, @@ -110,7 +127,7 @@ def colify( uniform-width and variable-width (tighter) columns. If elts is not a list of strings, each element is first converted - using ``str()``. + using :class:`str`. Keyword Arguments: output: A file object to write to. Default is ``sys.stdout`` @@ -138,20 +155,19 @@ def colify( env_size = os.environ.get("COLIFY_SIZE") if env_size: try: - r, c = env_size.split("x") - console_rows, console_cols = int(r), int(c) + console_cols = int(env_size.partition("x")[2]) tty = True - except BaseException: + except ValueError: pass - # Use only one column if not a tty. - if not tty: + # Use only one column if not a tty, unless cols specified explicitly + if not cols and not tty: if tty is False or not output.isatty(): cols = 1 # Specify the number of character columns to use. if console_cols is None: - console_rows, console_cols = terminal_size() + console_cols = shutil.get_terminal_size().columns elif not isinstance(console_cols, int): raise ValueError("Number of columns must be an int") @@ -192,6 +208,7 @@ def colify( def colify_table( table: List[List[Any]], + *, output: Optional[IO] = None, indent: int = 0, padding: int = 2, @@ -235,6 +252,7 @@ def transpose(): def colified( elts: List[Any], + *, cols: int = 0, indent: int = 0, padding: int = 2, diff --git a/lib/spack/spack/llnl/util/tty/color.py b/lib/spack/spack/llnl/util/tty/color.py index 88592a59a12e6b..290ca8d70c2cbc 100644 --- a/lib/spack/spack/llnl/util/tty/color.py +++ b/lib/spack/spack/llnl/util/tty/color.py @@ -6,69 +6,71 @@ This file implements an expression syntax, similar to ``printf``, for adding ANSI colors to text. -See ``colorize()``, ``cwrite()``, and ``cprint()`` for routines that can +See :func:`colorize`, :func:`cwrite`, and :func:`cprint` for routines that can generate colored output. -``colorize`` will take a string and replace all color expressions with +:func:`colorize` will take a string and replace all color expressions with ANSI control codes. If the ``isatty`` keyword arg is set to False, then the color expressions will be converted to null strings, and the returned string will have no color. -``cwrite`` and ``cprint`` are equivalent to ``write()`` and ``print()`` +:func:`cwrite` and :func:`cprint` are equivalent to ``write()`` and ``print()`` calls in python, but they colorize their output. If the ``stream`` argument is not supplied, they write to ``sys.stdout``. Here are some example color expressions: -========== ============================================================ -Expression Meaning -========== ============================================================ -@r Turn on red coloring -@R Turn on bright red coloring -@*{foo} Bold foo, but don't change text color -@_{bar} Underline bar, but don't change text color -@*b Turn on bold, blue text -@_B Turn on bright blue text with an underline -@. Revert to plain formatting -@*g{green} Print out 'green' in bold, green text, then reset to plain. -@*ggreen@. Print out 'green' in bold, green text, then reset to plain. -========== ============================================================ +============== ============================================================ +Expression Meaning +============== ============================================================ +``@r`` Turn on red coloring +``@R`` Turn on bright red coloring +``@*{foo}`` Bold foo, but don't change text color +``@_{bar}`` Underline bar, but don't change text color +``@*b`` Turn on bold, blue text +``@_B`` Turn on bright blue text with an underline +``@.`` Revert to plain formatting +``@*g{green}`` Print out 'green' in bold, green text, then reset to plain. +``@*ggreen@.`` Print out 'green' in bold, green text, then reset to plain. +============== ============================================================ The syntax consists of: -========== ================================================= -color-expr '@' [style] color-code '{' text '}' | '@.' | '@@' -style '*' | '_' -color-code [krgybmcwKRGYBMCW] -text .* -========== ================================================= - -'@' indicates the start of a color expression. It can be followed -by an optional * or _ that indicates whether the font should be bold or -underlined. If * or _ is not provided, the text will be plain. Then -an optional color code is supplied. This can be [krgybmcw] or [KRGYBMCW], -where the letters map to black(k), red(r), green(g), yellow(y), blue(b), -magenta(m), cyan(c), and white(w). Lowercase letters denote normal ANSI +========== ===================================================== +color-expr ``'@' [style] color-code '{' text '}' | '@.' | '@@'`` +style ``'*' | '_'`` +color-code ``[krgybmcwKRGYBMCW]`` +text ``.*`` +========== ===================================================== + +``@`` indicates the start of a color expression. It can be followed +by an optional ``*`` or ``_`` that indicates whether the font should be bold or +underlined. If ``*`` or ``_`` is not provided, the text will be plain. Then +an optional color code is supplied. This can be ``[krgybmcw]`` or ``[KRGYBMCW]``, +where the letters map to ``black(k)``, ``red(r)``, ``green(g)``, ``yellow(y)``, ``blue(b)``, +``magenta(m)``, ``cyan(c)``, and ``white(w)``. Lowercase letters denote normal ANSI colors and capital letters denote bright ANSI colors. -Finally, the color expression can be followed by text enclosed in {}. If +Finally, the color expression can be followed by text enclosed in ``{}``. If braces are present, only the text in braces is colored. If the braces are NOT present, then just the control codes to enable the color will be output. -The console can be reset later to plain text with '@.'. +The console can be reset later to plain text with ``@.``. -To output an @, use '@@'. To output a } inside braces, use '}}'. +To output an ``@``, use ``@@``. To output a ``}`` inside braces, use ``}}``. """ +import io import os import re import sys +import textwrap from contextlib import contextmanager -from typing import Optional +from typing import Iterator, List, NamedTuple, Optional, Tuple, Union class ColorParseError(Exception): """Raised when a color format fails to parse.""" - def __init__(self, message): + def __init__(self, message: str) -> None: super().__init__(message) @@ -102,17 +104,19 @@ def __init__(self, message): color_when_values = {"always": True, "auto": None, "never": False} -def _color_when_value(when): +def _color_when_value(when: Union[str, bool, None]) -> Optional[bool]: """Raise a ValueError for an invalid color setting. Valid values are 'always', 'never', and 'auto', or equivalently, True, False, and None. """ - if when in color_when_values: - return color_when_values[when] - elif when not in color_when_values.values(): - raise ValueError("Invalid color setting: %s" % when) - return when + if isinstance(when, bool) or when is None: + return when + + elif when not in color_when_values: + raise ValueError(f"Invalid color setting: {when}") + + return color_when_values[when] def _color_from_environ() -> Optional[bool]: @@ -126,7 +130,7 @@ def _color_from_environ() -> Optional[bool]: _force_color = _color_from_environ() -def try_enable_terminal_color_on_windows(): +def try_enable_terminal_color_on_windows() -> None: """Turns coloring in Windows terminal by enabling VTP in Windows consoles (CMD/PWSH/CONHOST) Method based on the link below https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#example-of-enabling-virtual-terminal-processing @@ -181,47 +185,49 @@ def _err_check(result, func, args): _force_color = False -def get_color_when(): +def get_color_when() -> bool: """Return whether commands should print color or not.""" if _force_color is not None: return _force_color return sys.stdout.isatty() -def set_color_when(when): +def set_color_when(when: Union[str, bool, None]) -> None: """Set when color should be applied. Options are: - * True or 'always': always print color - * False or 'never': never print color - * None or 'auto': only print color if sys.stdout is a tty. + * True or ``"always"``: always print color + * False or ``"never"``: never print color + * None or ``"auto"``: only print color if sys.stdout is a tty. """ global _force_color _force_color = _color_when_value(when) @contextmanager -def color_when(value): +def color_when(value: Union[str, bool, None]) -> Iterator[None]: """Context manager to temporarily use a particular color setting.""" - old_value = value + old_value = _force_color set_color_when(value) yield set_color_when(old_value) -def _escape(s: str, color: bool, enclose: bool, zsh: bool) -> str: - """Returns a TTY escape sequence for a color""" - if color: - if zsh: - result = rf"\e[0;{s}m" - else: - result = f"\033[{s}m" +_ConvertibleToStr = Union[str, int, bool, None] - if enclose: - result = rf"\[{result}\]" - return result - else: +def _escape(s: _ConvertibleToStr, color: bool, enclose: bool, zsh: bool) -> str: + """Returns a TTY escape sequence for a color""" + if not color: return "" + elif zsh: + return f"\033[0;{s}m" + + result = f"\033[{s}m" + + if enclose: + result = rf"\[{result}\]" + + return result def colorize( @@ -231,20 +237,16 @@ def colorize( Args: string: The string to replace - - Returns: - The filtered string - - Keyword Arguments: color: If False, output will be plain text without control codes, for output to non-console devices (default: automatically choose color or not) - enclose: If True, enclose ansi color sequences with - square brackets to prevent misestimation of terminal width. + enclose: If True, enclose ansi color sequences with square brackets to prevent + misestimation of terminal width. zsh: If True, use zsh ansi codes instead of bash ones (for variables like PS1) """ - color = color if color is not None else get_color_when() + if color is None: + color = get_color_when() - def match_to_ansi(match): + def match_to_ansi(match) -> str: """Convert a match object generated by ``COLOR_RE`` into an ansi color code. This can be used as a handler in ``re.sub``. """ @@ -270,17 +272,104 @@ def match_to_ansi(match): return COLOR_RE.sub(match_to_ansi, string).replace("}}", "}") -def clen(string): +#: matches a standard ANSI color code +ANSI_CODE_RE = re.compile(r"\033[^m]*m") + + +def csub(string: str) -> str: + """Return the string with ANSI color sequences removed.""" + return ANSI_CODE_RE.sub("", string) + + +class ColorMapping(NamedTuple): + color: str #: color string + colors: List[str] #: ANSI color codes in the color string, in order + offsets: List[Tuple[int, int]] #: map indices in plain string to offsets in color string + + def plain_to_color(self, index: int) -> int: + """Convert plain string index to color index.""" + offset = 0 + for i, off in self.offsets: + if i > index: + break + offset = off + return index + offset + + +def cmapping(string: str) -> ColorMapping: + """Return a mapping for translating indices in a plain string to indices in colored text. + + The returned dictionary maps indices in the plain string to the offset of the cooresponding + indices in the colored string. + + """ + colors = [] + offsets = [] + color_offset = 0 + + for m in ANSI_CODE_RE.finditer(string): + start, end = m.start(), m.end() + start_offset = color_offset + color_offset += end - start + offsets.append((start - start_offset, color_offset)) + colors.append(m.group()) + + return ColorMapping(string, colors, offsets) + + +def cwrap( + string: str, *, initial_indent: str = "", subsequent_indent: str = "", **kwargs +) -> List[str]: + """Wrapper around ``textwrap.wrap()`` that handles ANSI color codes.""" + plain = csub(string) + lines = textwrap.wrap( + plain, initial_indent=initial_indent, subsequent_indent=subsequent_indent, **kwargs + ) + + # do nothing if string has no ANSI codes + if plain == string: + return lines + + # otherwise add colors back to lines after wrapping plain text + cmap = cmapping(string) + + clines = [] + start = 0 + for i, line in enumerate(lines): + # scan to find the actual start, skipping any whitespace from a prior line break + # can assume this b/c textwrap only collapses whitespace at line breaks + while start < len(plain) and plain[start].isspace(): + start += 1 + + # map the start and end positions in the plain string to the color string + cstart = cmap.plain_to_color(start) + + # rewind to include any color codes before cstart + while cstart and string[cstart - 1] == "m": + cstart = string.rfind("\033", 0, cstart - 1) + + indent = initial_indent if i == 0 else subsequent_indent + end = start + len(line) - len(indent) + cend = cmap.plain_to_color(end) + + # append the color line to the result + clines.append(indent + string[cstart:cend]) + start = end + + return clines + + +def clen(string: str) -> int: """Return the length of a string, excluding ansi color sequences.""" - return len(re.sub(r"\033[^m]*m", "", string)) + return len(csub(string)) -def cextra(string): +def cextra(string: str) -> int: """Length of extra color characters in a string""" return len("".join(re.findall(r"\033[^m]*m", string))) -def cwrite(string, stream=None, color=None): +def cwrite(string: str, stream: Optional[io.IOBase] = None, color: Optional[bool] = None) -> None: """Replace all color expressions in string with ANSI control codes and write the result to the stream. If color is False, this will write plain text with no color. If True, @@ -293,9 +382,8 @@ def cwrite(string, stream=None, color=None): stream.write(colorize(string, color=color)) -def cprint(string, stream=None, color=None): +def cprint(string: str, stream: Optional[io.IOBase] = None, color: Optional[bool] = None) -> None: """Same as cwrite, but writes a trailing newline to the stream.""" - stream = sys.stdout if stream is None else stream cwrite(string + "\n", stream, color) @@ -319,12 +407,11 @@ def cescape(string: str) -> str: class ColorStream: - def __init__(self, stream, color=None): + def __init__(self, stream: io.IOBase, color: Optional[bool] = None) -> None: self._stream = stream self._color = color - def write(self, string, **kwargs): - raw = kwargs.get("raw", False) + def write(self, string: str, *, raw: bool = False) -> None: raw_write = getattr(self._stream, "write") color = self._color @@ -334,8 +421,3 @@ def write(self, string, **kwargs): else: color = get_color_when() raw_write(colorize(string, color=color)) - - def writelines(self, sequence, **kwargs): - raw = kwargs.get("raw", False) - for string in sequence: - self.write(string, self.color, raw=raw) diff --git a/lib/spack/spack/llnl/util/tty/log.py b/lib/spack/spack/llnl/util/tty/log.py index 5d245c3b716549..5072624e5e305f 100644 --- a/lib/spack/spack/llnl/util/tty/log.py +++ b/lib/spack/spack/llnl/util/tty/log.py @@ -138,7 +138,7 @@ class keyboard_input(preserve_terminal_settings): the stream immediately, and they are not printed to the terminal. Typically, standard input is line-buffered, which means keypresses won't be sent until the user hits return. In this mode, a - user can hit, e.g., 'v', and it will be read on the other end of the + user can hit, e.g., ``v``, and it will be read on the other end of the pipe immediately but not printed. The handler takes care to ensure that terminal changes only take @@ -306,74 +306,6 @@ def __getattr__(self, attr): return getattr(self.stream, attr) -def _file_descriptors_work(*streams): - """Whether we can get file descriptors for the streams specified. - - This tries to call ``fileno()`` on all streams in the argument list, - and returns ``False`` if anything goes wrong. - - This can happen, when, e.g., the test framework replaces stdout with - a ``StringIO`` object. - - We have to actually try this to see whether it works, rather than - checking for the fileno attribute, because frameworks like pytest add - dummy fileno methods on their dummy file objects that return - ``UnsupportedOperationErrors``. - - """ - # test whether we can get fds for out and error - try: - for stream in streams: - stream.fileno() - return True - except BaseException: - return False - - -class FileWrapper: - """Represents a file. Can be an open stream, a path to a file (not opened - yet), or neither. When unwrapped, it returns an open file (or file-like) - object. - """ - - def __init__(self, file_like, append=False): - # This records whether the file-like object returned by "unwrap" is - # purely in-memory. In that case a subprocess will need to explicitly - # transmit the contents to the parent. - self.write_in_parent = False - - self.file_like = file_like - self.append = append - - if isinstance(file_like, str): - self.open = True - elif _file_descriptors_work(file_like): - self.open = False - else: - self.file_like = None - self.open = True - self.write_in_parent = True - - self.file = None - - def unwrap(self): - if self.open: - if self.file_like: - mode = "a" if self.append else "w" - self.file = open(self.file_like, mode, encoding="utf-8") - else: - self.file = io.StringIO() - return self.file - else: - # We were handed an already-open file object. In this case we also - # will not actually close the object when requested to. - return self.file_like - - def close(self): - if self.file: - self.file.close() - - def log_output(*args, **kwargs): """Context manager that logs its output to a file. @@ -417,17 +349,15 @@ class nixlog: process and the daemon. The daemon writes our output to both the file and to stdout (if echoing). The parent process can communicate with the daemon to tell it when and when not to echo; this is what - force_echo does. You can also enable/disable echoing by typing 'v'. + force_echo does. You can also enable/disable echoing by typing ``v``. - We try to use OS-level file descriptors to do the redirection, but if - stdout or stderr has been set to some Python-level file object, we - use Python-level redirection instead. This allows the redirection to - work within test frameworks like nose and pytest. + We use OS-level file descriptors to do the redirection, which + redirects output for subprocesses and system calls. """ def __init__( self, - file_like=None, + filename: str, echo=False, debug=0, buffer=False, @@ -438,8 +368,7 @@ def __init__( """Create a new output log context manager. Args: - file_like (str or stream): open file object or name of file where - output should be logged + filename (str): path to file where output should be logged echo (bool): whether to echo output in addition to logging it debug (int): positive to enable tty debug mode during logging buffer (bool): pass buffer=True to skip unbuffering output; note @@ -448,10 +377,8 @@ def __init__( line of output append (bool): whether to append to file ('a' mode) - log_output can take either a file object or a filename. If a - filename is passed, the file will be opened and closed entirely - within ``__enter__`` and ``__exit__``. If a file object is passed, - this assumes the caller owns it and will close it. + The filename will be opened and closed entirely within ``__enter__`` + and ``__exit__``. By default, we unbuffer sys.stdout and sys.stderr because the logger will include output from executed programs and from python @@ -461,7 +388,7 @@ def __init__( Logger daemon is not started until ``__enter__()``. """ - self.file_like = file_like + self.filename = filename self.echo = echo self.debug = debug self.buffer = buffer @@ -470,45 +397,10 @@ def __init__( self._active = False # used to prevent re-entry - def __call__(self, file_like=None, echo=None, debug=None, buffer=None): - """This behaves the same as init. It allows a logger to be reused. - - Arguments are the same as for ``__init__()``. Args here take - precedence over those passed to ``__init__()``. - - With the ``__call__`` function, you can save state between uses - of a single logger. This is useful if you want to remember, - e.g., the echo settings for a prior ``with log_output()``:: - - logger = log_output() - - with logger('foo.txt'): - # log things; user can change echo settings with 'v' - - with logger('bar.txt'): - # log things; logger remembers prior echo settings. - - """ - if file_like is not None: - self.file_like = file_like - if echo is not None: - self.echo = echo - if debug is not None: - self.debug = debug - if buffer is not None: - self.buffer = buffer - return self - def __enter__(self): if self._active: raise RuntimeError("Can't re-enter the same log_output!") - if self.file_like is None: - raise RuntimeError("file argument must be set by either __init__ or __call__") - - # set up a stream for the daemon to write to - self.log_file = FileWrapper(self.file_like, append=self.append) - # record parent color settings before redirecting. We do this # because color output depends on whether the *original* stdout # is a TTY. New stdout won't be a TTY so we force colorization. @@ -526,25 +418,32 @@ def __enter__(self): # Currently only used to save echo value between uses self.parent_pipe, child_pipe = multiprocessing.Pipe(duplex=False) - # Sets a daemon that writes to file what it reads from a pipe + stdin_fd = None + stdout_fd = None try: # need to pass this b/c multiprocessing closes stdin in child. - input_fd = None try: if sys.stdin.isatty(): - input_fd = Connection(os.dup(sys.stdin.fileno())) + stdin_fd = Connection(os.dup(sys.stdin.fileno())) except BaseException: # just don't forward input if this fails pass + # If our process has redirected stdout after the forkserver was started, we need to + # make the forked processes use the new file descriptors. + if multiprocessing.get_start_method() == "forkserver": + stdout_fd = Connection(os.dup(sys.stdout.fileno())) + self.process = multiprocessing.Process( target=_writer_daemon, args=( - input_fd, + stdin_fd, + stdout_fd, read_fd, self.write_fd, self.echo, - self.log_file, + self.filename, + self.append, child_pipe, self.filter_fn, ), @@ -553,8 +452,10 @@ def __enter__(self): self.process.start() finally: - if input_fd: - input_fd.close() + if stdin_fd: + stdin_fd.close() + if stdout_fd: + stdout_fd.close() read_fd.close() # Flush immediately before redirecting so that anything buffered @@ -563,34 +464,18 @@ def __enter__(self): sys.stderr.flush() # Now do the actual output redirection. - self.use_fds = _file_descriptors_work(sys.stdout, sys.stderr) - if self.use_fds: - # We try first to use OS-level file descriptors, as this - # redirects output for subprocesses and system calls. - - # Save old stdout and stderr file descriptors - self._saved_stdout = os.dup(sys.stdout.fileno()) - self._saved_stderr = os.dup(sys.stderr.fileno()) + # We use OS-level file descriptors, as this + # redirects output for subprocesses and system calls. + self._redirected_fds = {} - # redirect to the pipe we created above - os.dup2(self.write_fd.fileno(), sys.stdout.fileno()) - os.dup2(self.write_fd.fileno(), sys.stderr.fileno()) - self.write_fd.close() + # sys.stdout and sys.stderr may have been replaced with file objects under pytest, so + # redirect their file descriptors in addition to the original fds 1 and 2. + fds = {sys.stdout.fileno(), sys.stderr.fileno(), 1, 2} + for fd in fds: + self._redirected_fds[fd] = os.dup(fd) + os.dup2(self.write_fd.fileno(), fd) - else: - # Handle I/O the Python way. This won't redirect lower-level - # output, but it's the best we can do, and the caller - # shouldn't expect any better, since *they* have apparently - # redirected I/O the Python way. - - # Save old stdout and stderr file objects - self._saved_stdout = sys.stdout - self._saved_stderr = sys.stderr - - # create a file object for the pipe; redirect to it. - pipe_fd_out = os.fdopen(self.write_fd.fileno(), "w", closefd=False) - sys.stdout = pipe_fd_out - sys.stderr = pipe_fd_out + self.write_fd.close() # Unbuffer stdout and stderr at the Python level if not self.buffer: @@ -613,23 +498,10 @@ def __exit__(self, exc_type, exc_val, exc_tb): sys.stdout.flush() sys.stderr.flush() - # restore previous output settings, either the low-level way or - # the python way - if self.use_fds: - os.dup2(self._saved_stdout, sys.stdout.fileno()) - os.close(self._saved_stdout) - - os.dup2(self._saved_stderr, sys.stderr.fileno()) - os.close(self._saved_stderr) - else: - sys.stdout = self._saved_stdout - sys.stderr = self._saved_stderr - self.write_fd.close() - - # print log contents in parent if needed. - if self.log_file.write_in_parent: - string = self.parent_pipe.recv() - self.file_like.write(string) + # restore previous output settings using the OS-level way + for fd, saved_fd in self._redirected_fds.items(): + os.dup2(saved_fd, fd) + os.close(saved_fd) # recover and store echo settings from the child before it dies try: @@ -743,19 +615,18 @@ class winlog: Similar to nixlog, with underlying functionality ported to support Windows. - Does not support the use of 'v' toggling as nixlog does. + Does not support the use of ``v`` toggling as nixlog does. """ def __init__( - self, file_like=None, echo=False, debug=0, buffer=False, filter_fn=None, append=False + self, filename: str, echo=False, debug=0, buffer=False, filter_fn=None, append=False ): self.debug = debug self.echo = echo - self.logfile = file_like + self.logfile = filename self.stdout = StreamWrapper("stdout") self.stderr = StreamWrapper("stderr") self._active = False - self._ioflag = False self.old_stdout = sys.stdout self.old_stderr = sys.stderr self.append = append @@ -764,69 +635,55 @@ def __enter__(self): if self._active: raise RuntimeError("Can't re-enter the same log_output!") - if self.logfile is None: - raise RuntimeError("file argument must be set by __init__ ") - # Open both write and reading on logfile - if isinstance(self.logfile, io.StringIO): - self._ioflag = True - # cannot have two streams on tempfile, so we must make our own - sys.stdout = self.logfile - sys.stderr = self.logfile - else: - write_mode = "ab+" if self.append else "wb+" - self.writer = open(self.logfile, mode=write_mode) - self.reader = open(self.logfile, mode="rb+") - - # Dup stdout so we can still write to it after redirection - self.echo_writer = open(os.dup(sys.stdout.fileno()), "w", encoding=sys.stdout.encoding) - # Redirect stdout and stderr to write to logfile - self.stderr.redirect_stream(self.writer.fileno()) - self.stdout.redirect_stream(self.writer.fileno()) - self._kill = threading.Event() - - def background_reader(reader, echo_writer, _kill): - # for each line printed to logfile, read it - # if echo: write line to user - try: - while True: - is_killed = _kill.wait(0.1) - # Flush buffered build output to file - # stdout/err fds refer to log file - self.stderr.flush() - self.stdout.flush() - - line = reader.readline() - if self.echo and line: - echo_writer.write("{0}".format(line.decode())) - echo_writer.flush() - - if is_killed: - break - finally: - reader.close() - - self._active = True - self._thread = Thread( - target=background_reader, args=(self.reader, self.echo_writer, self._kill) - ) - self._thread.start() + write_mode = "ab+" if self.append else "wb+" + self.writer = open(self.logfile, mode=write_mode) + self.reader = open(self.logfile, mode="rb+") + + # Dup stdout so we can still write to it after redirection + self.echo_writer = open(os.dup(sys.stdout.fileno()), "w", encoding=sys.stdout.encoding) + # Redirect stdout and stderr to write to logfile + self.stderr.redirect_stream(self.writer.fileno()) + self.stdout.redirect_stream(self.writer.fileno()) + self._kill = threading.Event() + + def background_reader(reader, echo_writer, _kill): + # for each line printed to logfile, read it + # if echo: write line to user + try: + while True: + is_killed = _kill.wait(0.1) + # Flush buffered build output to file + # stdout/err fds refer to log file + self.stderr.flush() + self.stdout.flush() + + line = reader.readline() + if self.echo and line: + echo_writer.write("{0}".format(line.decode())) + echo_writer.flush() + + if is_killed: + break + finally: + reader.close() + + self._active = True + self._thread = Thread( + target=background_reader, args=(self.reader, self.echo_writer, self._kill) + ) + self._thread.start() return self def __exit__(self, exc_type, exc_val, exc_tb): - if self._ioflag: - sys.stdout = self.old_stdout - sys.stderr = self.old_stderr - self._ioflag = False - else: - self.writer.close() - self.echo_writer.flush() - self.stdout.flush() - self.stderr.flush() - self._kill.set() - self._thread.join() - self.stdout.close() - self.stderr.close() + self.writer.close() + self.echo_writer.flush() + self.stdout.flush() + self.stderr.flush() + self._kill.set() + self._thread.join() + self.stdout.close() + self.stderr.close() self._active = False @contextmanager @@ -839,10 +696,12 @@ def force_echo(self): def _writer_daemon( stdin_fd: Optional[Connection], + stdout_fd: Optional[Connection], read_fd: Connection, write_fd: Connection, echo: bool, - log_file_wrapper: FileWrapper, + log_filename: str, + append: bool, control_fd: Connection, filter_fn: Optional[Callable[[str], str]], ) -> None: @@ -876,17 +735,15 @@ def _writer_daemon( In addition to the input and output file descriptors, the daemon interacts with the parent via ``control_pipe``. It reports whether - ``stdout`` was enabled or disabled when it finished and, if the - ``log_file`` is a ``StringIO`` object, then the daemon also sends the - logged output back to the parent as a string, to be written to the - ``StringIO`` in the parent. This is mainly for testing. + ``stdout`` was enabled or disabled when it finished. Arguments: stdin_fd: optional input from the terminal read_fd: pipe for reading from parent's redirected stdout echo: initial echo setting -- controlled by user and preserved across multiple writer daemons - log_file_wrapper: file to log all output + log_filename: filename where output should be logged + append: whether to append to the file or overwrite it control_pipe: multiprocessing pipe on which to send control information to the parent filter_fn: optional function to filter each line of output @@ -908,11 +765,14 @@ def _writer_daemon( else: stdin_file = None + if stdout_fd: + os.dup2(stdout_fd.fileno(), sys.stdout.fileno()) + stdout_fd.close() + # list of streams to select from istreams = [read_file, stdin_file] if stdin_file else [read_file] force_echo = False # parent can force echo for certain output - - log_file = log_file_wrapper.unwrap() + log_file = open(log_filename, mode="a" if append else "w", encoding="utf-8") try: with keyboard_input(stdin_file) as kb: @@ -923,7 +783,7 @@ def _writer_daemon( # wait for input from any stream. use a coarse timeout to # allow other checks while we wait for input - rlist, _, _ = _retry(select.select)(istreams, [], [], 1e-1) + rlist, _, _ = select.select(istreams, [], [], 0.1) # Allow user to toggle echo with 'v' key. # Currently ignores other chars. @@ -947,7 +807,7 @@ def _writer_daemon( try: while line_count < 100: # Handle output from the calling process. - line = _retry(read_file.readline)() + line = read_file.readline() if not line: return @@ -993,10 +853,7 @@ def _writer_daemon( traceback.print_exc() finally: - # send written data back to parent if we used a StringIO - if isinstance(log_file, io.StringIO): - control_fd.send(log_file.getvalue()) - log_file_wrapper.close() + log_file.close() read_fd.close() if stdin_fd: stdin_fd.close() @@ -1005,42 +862,5 @@ def _writer_daemon( control_fd.send(echo) -def _retry(function): - """Retry a call if errors indicating an interrupted system call occur. - - Interrupted system calls return -1 and set ``errno`` to ``EINTR`` if - certain flags are not set. Newer Pythons automatically retry them, - but older Pythons do not, so we need to retry the calls. - - This function converts a call like this: - - syscall(args) - - and makes it retry by wrapping the function like this: - - _retry(syscall)(args) - - This is a private function because EINTR is unfortunately raised in - different ways from different functions, and we only handle the ones - relevant for this file. - - """ - - def wrapped(*args, **kwargs): - while True: - try: - return function(*args, **kwargs) - except OSError as e: - if e.errno == errno.EINTR: - continue - raise - except select.error as e: - if e.args[0] == errno.EINTR: - continue - raise - - return wrapped - - def _input_available(f): return f in select.select([f], [], [], 0)[0] diff --git a/lib/spack/spack/llnl/util/tty/pty.py b/lib/spack/spack/llnl/util/tty/pty.py deleted file mode 100644 index 24db57dc095b48..00000000000000 --- a/lib/spack/spack/llnl/util/tty/pty.py +++ /dev/null @@ -1,348 +0,0 @@ -# Copyright Spack Project Developers. See COPYRIGHT file for details. -# -# SPDX-License-Identifier: (Apache-2.0 OR MIT) - -"""The pty module handles pseudo-terminals. - -Currently, the infrastructure here is only used to test spack.llnl.util.tty.log. - -If this is used outside a testing environment, we will want to reconsider -things like timeouts in ``ProcessController.wait()``, which are set to -get tests done quickly, not to avoid high CPU usage. - -Note: The functionality in this module is unsupported on Windows -""" -import multiprocessing -import os -import re -import signal -import sys -import time -import traceback - -import spack.llnl.util.tty.log as log -from spack.util.executable import which - -termios = None -try: - import termios as term_mod - - termios = term_mod -except ImportError: - pass - - -class ProcessController: - """Wrapper around some fundamental process control operations. - - This allows one process (the controller) to drive another (the - minion) similar to the way a shell would, by sending signals and I/O. - - """ - - def __init__(self, pid, controller_fd, timeout=1, sleep_time=1e-1, debug=False): - """Create a controller to manipulate the process with id ``pid`` - - Args: - pid (int): id of process to control - controller_fd (int): controller fd attached to pid's stdin - timeout (int): time in seconds for wait operations to time out - (default 1 second) - sleep_time (int): time to sleep after signals, to control the - signal rate of the controller (default 1e-1) - debug (bool): whether ``horizontal_line()`` and ``status()`` should - produce output when called (default False) - - ``sleep_time`` allows the caller to insert delays after calls - that signal or modify the controlled process. Python behaves very - poorly if signals arrive too fast, and drowning a Python process - with a Python handler with signals can kill the process and hang - our tests, so we throttle this a closer-to-interactive rate. - - """ - self.pid = pid - self.pgid = os.getpgid(pid) - self.controller_fd = controller_fd - self.timeout = timeout - self.sleep_time = sleep_time - self.debug = debug - - # we need the ps command to wait for process statuses - self.ps = which("ps", required=True) - - def get_canon_echo_attrs(self): - """Get echo and canon attributes of the terminal of controller_fd.""" - cfg = termios.tcgetattr(self.controller_fd) - return (bool(cfg[3] & termios.ICANON), bool(cfg[3] & termios.ECHO)) - - def horizontal_line(self, name): - """Labeled horizontal line for debugging.""" - if self.debug: - sys.stderr.write("------------------------------------------- %s\n" % name) - - def status(self): - """Print debug message with status info for the minion.""" - if self.debug: - canon, echo = self.get_canon_echo_attrs() - sys.stderr.write( - "canon: %s, echo: %s\n" % ("on" if canon else "off", "on" if echo else "off") - ) - sys.stderr.write("input: %s\n" % self.input_on()) - sys.stderr.write("bg: %s\n" % self.background()) - sys.stderr.write("\n") - - def input_on(self): - """True if keyboard input is enabled on the controller_fd pty.""" - return self.get_canon_echo_attrs() == (False, False) - - def background(self): - """True if pgid is in a background pgroup of controller_fd's tty.""" - return self.pgid != os.tcgetpgrp(self.controller_fd) - - def tstp(self): - """Send SIGTSTP to the controlled process.""" - self.horizontal_line("tstp") - os.killpg(self.pgid, signal.SIGTSTP) - time.sleep(self.sleep_time) - - def cont(self): - self.horizontal_line("cont") - os.killpg(self.pgid, signal.SIGCONT) - time.sleep(self.sleep_time) - - def fg(self): - self.horizontal_line("fg") - with log.ignore_signal(signal.SIGTTOU): - os.tcsetpgrp(self.controller_fd, os.getpgid(self.pid)) - time.sleep(self.sleep_time) - - def bg(self): - self.horizontal_line("bg") - with log.ignore_signal(signal.SIGTTOU): - os.tcsetpgrp(self.controller_fd, os.getpgrp()) - time.sleep(self.sleep_time) - - def write(self, byte_string): - self.horizontal_line("write '%s'" % byte_string.decode("utf-8")) - os.write(self.controller_fd, byte_string) - - def wait(self, condition): - start = time.time() - while ((time.time() - start) < self.timeout) and not condition(): - time.sleep(1e-2) - assert condition() - - def wait_enabled(self): - self.wait(lambda: self.input_on() and not self.background()) - - def wait_disabled(self): - self.wait(lambda: not self.input_on() and self.background()) - - def wait_disabled_fg(self): - self.wait(lambda: not self.input_on() and not self.background()) - - def proc_status(self): - status = self.ps("-p", str(self.pid), "-o", "stat", output=str) - status = re.split(r"\s+", status.strip(), re.M) - return status[1] - - def wait_stopped(self): - self.wait(lambda: "T" in self.proc_status()) - - def wait_running(self): - self.wait(lambda: "T" not in self.proc_status()) - - -class PseudoShell: - """Sets up controller and minion processes with a PTY. - - You can create a ``PseudoShell`` if you want to test how some - function responds to terminal input. This is a pseudo-shell from a - job control perspective; ``controller_function`` and ``minion_function`` - are set up with a pseudoterminal (pty) so that the controller can drive - the minion through process control signals and I/O. - - The two functions should have signatures like this:: - - def controller_function(proc, ctl, **kwargs) - def minion_function(**kwargs) - - ``controller_function`` is spawned in its own process and passed three - arguments: - - proc - the ``multiprocessing.Process`` object representing the minion - ctl - a ``ProcessController`` object tied to the minion - kwargs - keyword arguments passed from ``PseudoShell.start()``. - - ``minion_function`` is only passed ``kwargs`` delegated from - ``PseudoShell.start()``. - - The ``ctl.controller_fd`` will have its ``controller_fd`` connected to - ``sys.stdin`` in the minion process. Both processes will share the - same ``sys.stdout`` and ``sys.stderr`` as the process instantiating - ``PseudoShell``. - - Here are the relationships between processes created:: - - ._________________________________________________________. - | Minion Process | pid 2 - | - runs minion_function | pgroup 2 - |_________________________________________________________| session 1 - ^ - | create process with controller_fd connected to stdin - | stdout, stderr are the same as caller - ._________________________________________________________. - | Controller Process | pid 1 - | - runs controller_function | pgroup 1 - | - uses ProcessController and controller_fd to | session 1 - | control minion | - |_________________________________________________________| - ^ - | create process - | stdin, stdout, stderr are the same as caller - ._________________________________________________________. - | Caller | pid 0 - | - Constructs, starts, joins PseudoShell | pgroup 0 - | - provides controller_function, minion_function | session 0 - |_________________________________________________________| - - """ - - def __init__(self, controller_function, minion_function): - self.proc = None - self.controller_function = controller_function - self.minion_function = minion_function - - # these can be optionally set to change defaults - self.controller_timeout = 3 - self.sleep_time = 0.1 - - def start(self, **kwargs): - """Start the controller and minion processes. - - Arguments: - kwargs (dict): arbitrary keyword arguments that will be - passed to controller and minion functions - - The controller process will create the minion, then call - ``controller_function``. The minion process will call - ``minion_function``. - - """ - self.proc = multiprocessing.Process( - target=PseudoShell._set_up_and_run_controller_function, - args=( - self.controller_function, - self.minion_function, - self.controller_timeout, - self.sleep_time, - ), - kwargs=kwargs, - ) - self.proc.start() - - def join(self): - """Wait for the minion process to finish, and return its exit code.""" - self.proc.join() - return self.proc.exitcode - - @staticmethod - def _set_up_and_run_minion_function( - tty_name, stdout_fd, stderr_fd, ready, minion_function, **kwargs - ): - """Minion process wrapper for PseudoShell. - - Handles the mechanics of setting up a PTY, then calls - ``minion_function``. - - """ - # new process group, like a command or pipeline launched by a shell - os.setpgrp() - - # take controlling terminal and set up pty IO - stdin_fd = os.open(tty_name, os.O_RDWR) - os.dup2(stdin_fd, sys.stdin.fileno()) - os.dup2(stdout_fd, sys.stdout.fileno()) - os.dup2(stderr_fd, sys.stderr.fileno()) - os.close(stdin_fd) - - if kwargs.get("debug"): - sys.stderr.write("minion: stdin.isatty(): %s\n" % sys.stdin.isatty()) - - # tell the parent that we're really running - if kwargs.get("debug"): - sys.stderr.write("minion: ready!\n") - ready.value = True - - try: - minion_function(**kwargs) - except BaseException: - traceback.print_exc() - - @staticmethod - def _set_up_and_run_controller_function( - controller_function, minion_function, controller_timeout, sleep_time, **kwargs - ): - """Set up a pty, spawn a minion process, execute controller_function. - - Handles the mechanics of setting up a PTY, then calls - ``controller_function``. - - """ - os.setsid() # new session; this process is the controller - - controller_fd, minion_fd = os.openpty() - pty_name = os.ttyname(minion_fd) - - # take controlling terminal - pty_fd = os.open(pty_name, os.O_RDWR) - os.close(pty_fd) - - ready = multiprocessing.Value("i", False) - minion_process = multiprocessing.Process( - target=PseudoShell._set_up_and_run_minion_function, - args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(), ready, minion_function), - kwargs=kwargs, - ) - minion_process.start() - - # wait for subprocess to be running and connected. - while not ready.value: - time.sleep(1e-5) - pass - - if kwargs.get("debug"): - sys.stderr.write("pid: %d\n" % os.getpid()) - sys.stderr.write("pgid: %d\n" % os.getpgrp()) - sys.stderr.write("sid: %d\n" % os.getsid(0)) - sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(controller_fd)) - sys.stderr.write("\n") - - minion_pgid = os.getpgid(minion_process.pid) - sys.stderr.write("minion pid: %d\n" % minion_process.pid) - sys.stderr.write("minion pgid: %d\n" % minion_pgid) - sys.stderr.write("minion sid: %d\n" % os.getsid(minion_process.pid)) - sys.stderr.write("\n") - sys.stderr.flush() - # set up controller to ignore SIGTSTP, like a shell - signal.signal(signal.SIGTSTP, signal.SIG_IGN) - - # call the controller function once the minion is ready - try: - controller = ProcessController( - minion_process.pid, controller_fd, debug=kwargs.get("debug") - ) - controller.timeout = controller_timeout - controller.sleep_time = sleep_time - error = controller_function(minion_process, controller, **kwargs) - except BaseException: - error = 1 - traceback.print_exc() - - minion_process.join() - - # return whether either the parent or minion failed - return error or minion_process.exitcode diff --git a/lib/spack/spack/main.py b/lib/spack/spack/main.py index 08aa0129a935ec..5ec3d8fc574a59 100644 --- a/lib/spack/spack/main.py +++ b/lib/spack/spack/main.py @@ -8,22 +8,20 @@ after the system path is set up. """ import argparse - -# import spack.modules.common import inspect -import io import operator import os import pstats import re import shlex import signal -import subprocess as sp import sys import tempfile +import textwrap import traceback import warnings -from typing import Any, Callable, List, Tuple +from contextlib import contextmanager +from typing import Any, List, Optional, Set, Tuple import spack.vendor.archspec.cpu @@ -46,7 +44,6 @@ import spack.util.debug import spack.util.environment import spack.util.lock -from spack.llnl.util.tty.log import log_output from .enums import ConfigScopePriority @@ -57,26 +54,20 @@ levels = ["short", "long"] #: intro text for help at different levels -intro_by_level = { - "short": "These are common spack commands:", - "long": "Complete list of spack commands:", -} +intro_by_level = {"short": "Common spack commands:", "long": "Commands:"} #: control top-level spack options shown in basic vs. advanced help -options_by_level = {"short": ["h", "k", "V", "color"], "long": "all"} +options_by_level = {"short": ["e", "h", "k", "V", "color"], "long": "all"} #: Longer text for each section, to show in help section_descriptions = { - "admin": "administration", - "basic": "query packages", - "build": "build packages", - "config": "configuration", - "developer": "developer", + "query": "query packages", + "build": "build, install, and test packages", "environment": "environment", - "extensions": "extensions", - "help": "more help", + "config": "configuration", "packaging": "create packages", - "system": "system", + "admin": "administration", + "developer": "spack development", } #: preferential command order for some sections (e.g., build pipeline is @@ -148,6 +139,55 @@ def _format_actions_usage(self, actions, groups): usage = "[-%s] %s" % (chars, usage) return usage.strip() + def start_section(self, heading): + return super().start_section(color.colorize(f"@*B{{{heading}}}")) + + def _format_usage(self, usage, actions, groups, prefix=None): + # if no optionals or positionals are available, usage is just prog + if usage is None and not actions: + return super()._format_usage(usage, actions, groups, prefix) + + # add color *after* argparse aligns the text, so as not to interfere + result = super()._format_usage(usage, actions, groups, prefix) + escaped = color.cescape(result) + escaped = escaped.replace(self._prog, f"@.@*C{{{self._prog}}}@c") + return color.colorize(f"@B{escaped}@.") + + def add_argument(self, action): + if action.help is not argparse.SUPPRESS: + + # find all invocations + get_invocation = self._format_action_invocation + invocation_lengths = [color.clen(get_invocation(action)) + self._current_indent] + for subaction in self._iter_indented_subactions(action): + invocation_lengths.append( + color.clen(get_invocation(subaction)) + self._current_indent + ) + + # update the maximum item length + action_length = max(invocation_lengths) + self._action_max_length = max(self._action_max_length, action_length) + + # add the item to the list + self._add_item(self._format_action, [action]) + + def _format_action(self, action): + # this is where argparse aligns the help text next to each option + help_position = min(self._action_max_length + 2, self._max_help_position) + + result = super()._format_action(action) + + # add color *after* argparse aligns the text, so we don't interfere with lengths + if len(result) <= help_position: + header, rest = result, "" + elif result[help_position - 1] == " ": + header, rest = result[:help_position], result[help_position:] + else: + first_newline = result.index("\n") + header, rest = result[:first_newline], result[first_newline:] + + return color.colorize(f"@c{{{color.cescape(header)}}}{color.cescape(rest)}") + def add_arguments(self, actions): actions = sorted(actions, key=operator.attrgetter("option_strings")) super().add_arguments(actions) @@ -158,7 +198,7 @@ def format_help_sections(self, level): """Format help on sections for a particular verbosity level. Args: - level (str): 'short' or 'long' (more commands shown for long) + level (str): ``"short"`` or ``"long"`` (more commands shown for long) """ if level not in levels: raise ValueError("level must be one of: %s" % levels) @@ -200,24 +240,26 @@ def add_subcommand_group(title, commands): # select only the options for the particular level we're showing. show_options = options_by_level[level] + options = [ + opt + for group in self._action_groups + for opt in group._group_actions + if group.title not in ["positional arguments"] + ] + opts = {opt.option_strings[0].strip("-"): opt for opt in options} + actions = [o for o in opts.values()] if show_options != "all": - opts = dict( - (opt.option_strings[0].strip("-"), opt) for opt in self._optionals._group_actions - ) - - new_actions = [opts[letter] for letter in show_options] - self._optionals._group_actions = new_actions + actions = [opts[letter] for letter in show_options] # custom, more concise usage for top level - help_options = self._optionals._group_actions - help_options = help_options + [self._positionals._group_actions[-1]] + help_options = actions + [self._positionals._group_actions[-1]] formatter.add_usage(self.usage, help_options, self._mutually_exclusive_groups) # description formatter.add_text(self.description) # start subcommands - formatter.add_text(intro_by_level[level]) + formatter.add_text(color.colorize(f"@*C{{{intro_by_level[level]}}}")) # add argument groups based on metadata in commands index = index_commands() @@ -242,21 +284,37 @@ def add_subcommand_group(title, commands): # add the group to the parser add_subcommand_group(group_description, commands) - # optionals - add_group(self._optionals) + # start subcommands + formatter.add_text(color.colorize("@*C{Options:}")) + + # optionals and user-defined groups + for group in sorted( + self._action_groups, key=lambda g: (g.title == "help", g.title != "general", g.title) + ): + if group.title == "positional arguments": + continue # handled by subcommand help above + + filtered_actions = [action for action in group._group_actions if action in actions] + if not filtered_actions: + continue + + formatter.start_section(group.title) + formatter.add_text(group.description) + + formatter.add_arguments(filtered_actions) + formatter.end_section() # epilog - formatter.add_text( + help_section = textwrap.dedent( """\ -{help}: - spack help --all list all commands and options - spack help help on a specific command - spack help --spec help on the package specification syntax - spack docs open https://spack.rtfd.io/ in a browser -""".format( - help=section_descriptions["help"] - ) + @*C{More help}: + @c{spack help --all} list all commands and options + @c{spack help } help on a specific command + @c{spack help --spec} help on the package specification syntax + @c{spack docs} open https://spack.rtfd.io/ in a browser + """ ) + formatter.add_text(color.colorize(help_section)) # determine help from format above return formatter.format_help() @@ -336,6 +394,7 @@ def _check_value(self, action, value): def make_argument_parser(**kwargs): """Create an basic argument parser without any subcommands added.""" parser = SpackArgumentParser( + prog="spack", formatter_class=SpackHelpFormatter, add_help=False, description=( @@ -345,10 +404,30 @@ def make_argument_parser(**kwargs): **kwargs, ) - # stat names in groups of 7, for nice wrapping. - stat_lines = list(zip(*(iter(stat_names),) * 7)) - - parser.add_argument( + general = parser.add_argument_group("general") + general.add_argument( + "--color", + action="store", + default=None, + choices=("always", "never", "auto"), + help="when to colorize output (default: auto)", + ) + general.add_argument( + "-v", "--verbose", action="store_true", help="print additional output during builds" + ) + general.add_argument( + "-k", + "--insecure", + action="store_true", + help="do not check ssl certificates when downloading", + ) + general.add_argument( + "-b", "--bootstrap", action="store_true", help="use bootstrap config, store, and externals" + ) + general.add_argument( + "-V", "--version", action="store_true", help="show version number and exit" + ) + general.add_argument( "-h", "--help", dest="help", @@ -357,86 +436,92 @@ def make_argument_parser(**kwargs): default=None, help="show this help message and exit", ) - parser.add_argument( + general.add_argument( "-H", "--all-help", dest="help", action="store_const", const="long", default=None, - help="show help for all commands (same as spack help --all)", - ) - parser.add_argument( - "--color", - action="store", - default=None, - choices=("always", "never", "auto"), - help="when to colorize output (default: auto)", + help="show help for all commands (same as `spack help --all`)", ) - parser.add_argument( + + config = parser.add_argument_group("configuration and environments") + config.add_argument( "-c", "--config", default=None, action="append", dest="config_vars", - help="add one or more custom, one off config settings", + help="add one or more custom, one-off config settings", ) - parser.add_argument( + config.add_argument( "-C", "--config-scope", dest="config_scopes", action="append", metavar="DIR|ENV", - help="add directory or environment as read-only configuration scope, without activating " - "the environment.", - ) - parser.add_argument( - "-d", - "--debug", - action="count", - default=0, - help="write out debug messages\n\n(more d's for more verbosity: -d, -dd, -ddd, etc.)", + help="add directory or environment as read-only config scope", ) - parser.add_argument("--timestamp", action="store_true", help="add a timestamp to tty output") - parser.add_argument("--pdb", action="store_true", help="run spack under the pdb debugger") - - env_group = parser.add_mutually_exclusive_group() - env_group.add_argument( - "-e", - "--env", - dest="env", - metavar="ENV", - action=SetEnvironmentAction, - help="run with a specific environment (see spack env)", + envs = config # parser.add_argument_group("environments") + env_mutex = envs.add_mutually_exclusive_group() + env_mutex.add_argument( + "-e", "--env", dest="env", metavar="ENV", action="store", help="run with an environment" ) - env_group.add_argument( + env_mutex.add_argument( "-D", "--env-dir", dest="env_dir", metavar="DIR", - action=SetEnvironmentAction, - help="run with an environment directory (ignore managed environments)", + action="store", + help="run with environment in directory (ignore managed envs)", ) - env_group.add_argument( + env_mutex.add_argument( "-E", "--no-env", dest="no_env", action="store_true", help="run without any environments activated (see spack env)", ) - parser.add_argument( + envs.add_argument( "--use-env-repo", action="store_true", - help="when running in an environment, use its package repository", + help="when in an environment, use its package repository", ) - parser.add_argument( - "-k", - "--insecure", + debug = parser.add_argument_group("debug") + debug.add_argument( + "-d", + "--debug", + action="count", + default=0, + help="write out debug messages\n\n(more d's for more verbosity: -d, -dd, -ddd, etc.)", + ) + debug.add_argument( + "-t", + "--backtrace", action="store_true", - help="do not check ssl certificates when downloading", + default="SPACK_BACKTRACE" in os.environ, + help="always show backtraces for exceptions", + ) + debug.add_argument("--pdb", action="store_true", help="run spack under the pdb debugger") + debug.add_argument("--timestamp", action="store_true", help="add a timestamp to tty output") + debug.add_argument( + "-m", "--mock", action="store_true", help="use mock packages instead of real ones" ) - parser.add_argument( + debug.add_argument( + "--print-shell-vars", action="store", help="print info needed by setup-env.*sh" + ) + debug.add_argument( + "--stacktrace", + action="store_true", + default="SPACK_STACKTRACE" in os.environ, + help="add stacktraces to all printed statements", + ) + + locks = general + lock_mutex = locks.add_mutually_exclusive_group() + lock_mutex.add_argument( "-l", "--enable-locks", action="store_true", @@ -444,63 +529,36 @@ def make_argument_parser(**kwargs): default=None, help="use filesystem locking (default)", ) - parser.add_argument( + lock_mutex.add_argument( "-L", "--disable-locks", action="store_false", dest="locks", help="do not use filesystem locking (unsafe)", ) - parser.add_argument( - "-m", "--mock", action="store_true", help="use mock packages instead of real ones" - ) - parser.add_argument( - "-b", - "--bootstrap", - action="store_true", - help="use bootstrap configuration (bootstrap store, config, externals)", - ) - parser.add_argument( + + profile = parser.add_argument_group("profiling") + profile.add_argument( "-p", "--profile", action="store_true", dest="spack_profile", help="profile execution using cProfile", ) - parser.add_argument( + profile.add_argument("--profile-file", default=None, help="Filename to save profile data to.") + profile.add_argument( "--sorted-profile", default=None, metavar="STAT", - help=f"profile and sort\n\none or more of: {stat_lines[0]}", + help="profile and sort by STAT, which can be: calls, ncalls,\n" + "cumtime, cumulative, filename, line, module", ) - parser.add_argument( + profile.add_argument( "--lines", default=20, action="store", help="lines of profile output or 'all' (default: 20)", ) - parser.add_argument( - "-v", "--verbose", action="store_true", help="print additional output during builds" - ) - parser.add_argument( - "--stacktrace", - action="store_true", - default="SPACK_STACKTRACE" in os.environ, - help="add stacktraces to all printed statements", - ) - parser.add_argument( - "-t", - "--backtrace", - action="store_true", - default="SPACK_BACKTRACE" in os.environ, - help="always show backtraces for exceptions", - ) - parser.add_argument( - "-V", "--version", action="store_true", help="show version number and exit" - ) - parser.add_argument( - "--print-shell-vars", action="store", help="print info needed by setup-env.*sh" - ) return parser @@ -595,116 +653,114 @@ def _invoke_command(command, parser, args, unknown_args): class SpackCommand: - """Callable object that invokes a spack command (for testing). + """Callable object that invokes a Spack command (for testing). Example usage:: - install = SpackCommand('install') - install('-v', 'mpich') + install = SpackCommand("install") + install("-v", "mpich") - Use this to invoke Spack commands directly from Python and check - their output. - """ + Use this to invoke Spack commands directly from Python and check their output.""" - def __init__(self, command_name, subprocess=False): + def __init__(self, command_name: str) -> None: """Create a new SpackCommand that invokes ``command_name`` when called. Args: - command_name (str): name of the command to invoke - subprocess (bool): whether to fork a subprocess or not. Currently not supported on - Windows, where it is always False. + command_name: name of the command to invoke """ self.parser = make_argument_parser() self.command_name = command_name - # TODO: figure out how to support this on windows - self.subprocess = subprocess if sys.platform != "win32" else False - - def __call__(self, *argv, **kwargs): - """Invoke this SpackCommand. + #: Return code of the last command invocation + self.returncode: Any = None + #: Error raised during the last command invocation, if any + self.error: Optional[BaseException] = None + #: Binary output captured from the last command invocation + self.binary_output = b"" + #: Decoded output captured from the last command invocation + self.output = "" + + def __call__(self, *argv: str, capture: bool = True, fail_on_error: bool = True) -> str: + """Invoke this SpackCommand. Returns the combined stdout/stderr. Args: - argv (list): command line arguments. + argv: command line arguments. Keyword Args: - fail_on_error (optional bool): Don't raise an exception on error - global_args (optional list): List of global spack arguments: - simulates ``spack [global_args] [command] [*argv]`` + capture: Capture output from the command + fail_on_error: Don't raise an exception on error - Returns: - (str): combined output and error as a string - - On return, if ``fail_on_error`` is False, return value of command - is set in ``returncode`` property, and the error is set in the - ``error`` property. Otherwise, raise an error. - """ - # set these before every call to clear them out + On return, if ``fail_on_error`` is False, return value of command is set in ``returncode`` + property, and the error is set in the ``error`` property. Otherwise, raise an error.""" self.returncode = None self.error = None + self.binary_output = b"" + self.output = "" - prepend = kwargs["global_args"] if "global_args" in kwargs else [] - fail_on_error = kwargs.get("fail_on_error", True) - - if self.subprocess: - p = sp.Popen( - [spack.paths.spack_script] + prepend + [self.command_name] + list(argv), - stdout=sp.PIPE, - stderr=sp.STDOUT, - ) - out, self.returncode = p.communicate() - out = out.decode() - else: - command = self.parser.add_command(self.command_name) - args, unknown = self.parser.parse_known_args( - prepend + [self.command_name] + list(argv) - ) - - out = io.StringIO() + try: + with self.capture_output(enable=capture): + command = self.parser.add_command(self.command_name) + args, unknown = self.parser.parse_known_args([self.command_name, *argv]) + setup_main_options(args) + self.returncode = _invoke_command(command, self.parser, args, unknown) + except SystemExit as e: + # When the command calls sys.exit instead of returning an exit code + self.error = e + self.returncode = e.code + except BaseException as e: + # For other exceptions, raise the original exception if fail_on_error is True + self.error = e + if fail_on_error: + raise + finally: + self.output = self.binary_output.decode("utf-8", errors="replace") + + if fail_on_error and self.returncode not in (0, None): + raise SpackCommandError(self.returncode, self.output) from self.error + + return self.output + + @contextmanager + def capture_output(self, enable: bool = True): + """Captures stdout and stderr from the current process and all subprocesses. This uses a + temporary file and os.dup2 to redirect file descriptors.""" + if not enable: + yield self + return + with tempfile.TemporaryFile(mode="w+b") as tmp_file: + # sys.stdout and sys.stderr may have been replaced with file objects under pytest, so + # redirect their file descriptors in addition to the original fds 1 and 2. + fds: Set[int] = {sys.stdout.fileno(), sys.stderr.fileno(), 1, 2} + saved_fds = {fd: os.dup(fd) for fd in fds} + sys.stdout.flush() + sys.stderr.flush() + for fd in fds: + os.dup2(tmp_file.fileno(), fd) try: - with log_output(out, echo=True): - self.returncode = _invoke_command(command, self.parser, args, unknown) - - except SystemExit as e: - self.returncode = e.code - - except BaseException as e: - tty.debug(e) - self.error = e - if fail_on_error: - self._log_command_output(out) - raise - out = out.getvalue() - - if fail_on_error and self.returncode not in (None, 0): - self._log_command_output(out) - raise SpackCommandError( - "Command exited with code %d: %s(%s)" - % (self.returncode, self.command_name, ", ".join("'%s'" % a for a in argv)) - ) - - return out - - def _log_command_output(self, out): - if tty.is_verbose(): - fmt = self.command_name + ": {0}" - for ln in out.getvalue().split("\n"): - if len(ln) > 0: - tty.verbose(fmt.format(ln.replace("==> ", ""))) - - -def _profile_wrapper(command, parser, args, unknown_args): + yield self + finally: + sys.stdout.flush() + sys.stderr.flush() + for fd, saved_fd in saved_fds.items(): + os.dup2(saved_fd, fd) + os.close(saved_fd) + tmp_file.seek(0) + self.binary_output = tmp_file.read() + + +def _profile_wrapper(command, main_args, parser, args, unknown_args): import cProfile try: - nlines = int(args.lines) + nlines = int(main_args.lines) except ValueError: - if args.lines != "all": - tty.die("Invalid number for --lines: %s" % args.lines) + if main_args.lines != "all": + tty.die("Invalid number for --lines: %s" % main_args.lines) nlines = -1 # allow comma-separated list of fields sortby = ["time"] - if args.sorted_profile: - sortby = args.sorted_profile.split(",") + if main_args.sorted_profile: + sortby = main_args.sorted_profile.split(",") for stat in sortby: if stat not in stat_names: tty.die("Invalid sort field: %s" % stat) @@ -718,6 +774,9 @@ def _profile_wrapper(command, parser, args, unknown_args): finally: pr.disable() + if main_args.profile_file: + pr.dump_stats(main_args.profile_file) + # print out profile stats. stats = pstats.Stats(pr, stream=sys.stderr) stats.sort_stats(*sortby) @@ -746,12 +805,12 @@ def print_setup_info(*info): Args: info (list): list of things to print: comma-separated list - of 'csh', 'sh', or 'modules' + of ``"csh"``, ``"sh"``, or ``"modules"`` This is in ``main.py`` to make it fast; the setup scripts need to invoke spack in login scripts, and it needs to be quick. """ - import spack.modules.common + from spack.modules.common import root_path shell = "csh" if "csh" in info else "sh" @@ -769,7 +828,7 @@ def shell_set(var, value): # print roots for all module systems module_to_roots = {"tcl": list(), "lmod": list()} for name in module_to_roots.keys(): - path = spack.modules.common.root_path(name, "default") + path = root_path(name, "default") module_to_roots[name].append(path) other_spack_instances = spack.config.get("upstreams") or {} @@ -802,10 +861,10 @@ def shell_set(var, value): def restore_macos_dyld_vars(): """ - Spack mutates DYLD_* variables in `spack load` and `spack env activate`. + Spack mutates ``DYLD_*`` variables in ``spack load`` and ``spack env activate``. Unlike Linux, macOS SIP clears these variables in new processes, meaning - that os.environ["DYLD_*"] in our Python process is not the same as the user's - shell. Therefore, we store the user's DYLD_* variables in SPACK_DYLD_* and + that ``os.environ["DYLD_*"]`` in our Python process is not the same as the user's + shell. Therefore, we store the user's ``DYLD_*`` variables in ``SPACK_DYLD_*`` and restore them here. """ if not sys.platform == "darwin": @@ -861,50 +920,19 @@ def resolve_alias(cmd_name: str, cmd: List[str]) -> Tuple[str, List[str]]: _ENV = object() -class SetEnvironmentAction(argparse.Action): - """Records an environment both in the ``env`` attribute and in the ``config_scopes`` list. - - We need to know where the environment appeared on the CLI set scope precedence. - - """ - - def __call__(self, parser, namespace, name_or_dir, option_string): - setattr(namespace, self.dest, name_or_dir) - - scopes = getattr(namespace, "config_scopes", None) - if scopes is None: - scopes = [] - scopes.append(_ENV) - namespace.config_scopes = scopes - - def add_command_line_scopes( - cfg: spack.config.Configuration, - command_line_scopes: List[Any], # str or _ENV but mypy can't type sentinels - add_environment: Callable[[ConfigScopePriority], None], + cfg: spack.config.Configuration, command_line_scopes: List[str] ) -> None: - """Add additional scopes from the --config-scope argument, either envs or dirs. + """Add additional scopes from the ``--config-scope`` argument, either envs or dirs. Args: cfg: configuration instance command_line_scopes: list of configuration scope paths - add_environment: method to add an environment scope if encountered Raises: spack.error.ConfigError: if the path is an invalid configuration scope """ - # remove all but the last _ENV from CLI scopes, because we can only - # have a single environment active. - for _ in range(command_line_scopes.count(_ENV) - 1): - command_line_scopes.remove(_ENV) - for i, path in enumerate(command_line_scopes): - # If an environment is set on the CLI, add its scope in the order it appears there. - # Subsequent custom scopes will override it, and it will override prior custom scopes. - if path is _ENV: - add_environment(ConfigScopePriority.CUSTOM) - continue - name = f"cmd_scope_{i}" scope = ev.environment_path_scope(name, path) if scope is None: @@ -982,25 +1010,30 @@ def _main(argv=None): if not args.no_env: try: env = spack.cmd.find_environment(args) - except spack.config.ConfigFormatError as e: + except (spack.config.ConfigFormatError, ev.SpackEnvironmentConfigError) as e: # print the context but delay this exception so that commands like # `spack config edit` can still work with a bad environment. e.print_context() env_format_error = e - def add_environment_scope(priority): + def add_environment_scope(): + if env_format_error: + # Allow command to continue without env in case it is `spack config edit` + # All other cases will raise in `finish_parse_and_run` + spack.environment.environment._active_environment_error = env_format_error + return # do not call activate here, as it has a lot of expensive function calls to deal # with mutation of spack.config.CONFIG -- but we are still building the config. - env.manifest.prepare_config_scope(priority) + env.manifest.prepare_config_scope() spack.environment.environment._active_environment = env - # add the environment *first*, if it is coming from an environment variable - if env and _ENV not in (args.config_scopes or []): - add_environment_scope(priority=ConfigScopePriority.ENVIRONMENT) + # add the environment + if env: + add_environment_scope() # Push scopes from the command line last if args.config_scopes: - add_command_line_scopes(spack.config.CONFIG, args.config_scopes, add_environment_scope) + add_command_line_scopes(spack.config.CONFIG, args.config_scopes) spack.config.CONFIG.push_scope( spack.config.InternalConfigScope("command_line"), priority=ConfigScopePriority.COMMAND_LINE ) @@ -1062,8 +1095,8 @@ def finish_parse_and_run(parser, cmd_name, main_args, env_format_error): spack.paths.set_working_dir() # now we can actually execute the command. - if main_args.spack_profile or main_args.sorted_profile: - _profile_wrapper(command, parser, args, unknown) + if main_args.spack_profile or main_args.sorted_profile or main_args.profile_file: + _profile_wrapper(command, main_args, parser, args, unknown) elif main_args.pdb: import pdb @@ -1158,4 +1191,9 @@ def _handle_solver_bug( class SpackCommandError(Exception): - """Raised when SpackCommand execution fails.""" + """Raised when SpackCommand execution fails, replacing SystemExit.""" + + def __init__(self, code, output): + self.code = code + self.output = output + super().__init__(f"Spack command failed with exit code {code}") diff --git a/lib/spack/spack/mirrors/layout.py b/lib/spack/spack/mirrors/layout.py index 4e5e0292d29568..0d4d0e765c1c56 100644 --- a/lib/spack/spack/mirrors/layout.py +++ b/lib/spack/spack/mirrors/layout.py @@ -2,16 +2,18 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os -from typing import Optional +from typing import TYPE_CHECKING, Optional import spack.fetch_strategy import spack.llnl.url import spack.oci.image import spack.repo -import spack.spec from spack.error import MirrorError from spack.llnl.util.filesystem import mkdirp, symlink +if TYPE_CHECKING: + import spack.spec + class MirrorLayout: """A ``MirrorLayout`` object describes the relative path of a mirror entry.""" diff --git a/lib/spack/spack/mirrors/mirror.py b/lib/spack/spack/mirrors/mirror.py index 0db5ac4f47ae9a..3ace9ec4743156 100644 --- a/lib/spack/spack/mirrors/mirror.py +++ b/lib/spack/spack/mirrors/mirror.py @@ -4,7 +4,7 @@ import operator import os import urllib.parse -from typing import Any, Dict, Mapping, Optional, Tuple, Union +from typing import Any, Dict, List, Mapping, Optional, Tuple, Union import spack.config import spack.llnl.util.tty as tty @@ -13,9 +13,13 @@ import spack.util.spack_yaml as syaml import spack.util.url as url_util from spack.error import MirrorError +from spack.oci.image import is_oci_url #: What schemes do we support -supported_url_schemes = ("file", "http", "https", "sftp", "ftp", "s3", "gs", "oci") +supported_url_schemes = ("file", "http", "https", "sftp", "ftp", "s3", "gs", "oci", "oci+http") + +#: The layout version spack can current install +SUPPORTED_LAYOUT_VERSIONS = (3, 2) def _url_or_path_to_url(url_or_path: str) -> str: @@ -111,6 +115,11 @@ def source(self): @property def signed(self) -> bool: + # TODO: OCI support signing + # Only checking for fetch, push is handled by OCI implementation + if is_oci_url(self.fetch_url): + return False + return isinstance(self._data, str) or self._data.get("signed", True) @property @@ -129,6 +138,16 @@ def push_url(self): """Get the valid, canonicalized fetch URL""" return self.get_url("push") + @property + def fetch_view(self): + """Get the valid, canonicalized fetch URL""" + return self.get_view("fetch") + + @property + def push_view(self): + """Get the valid, canonicalized fetch URL""" + return self.get_view("push") + def ensure_mirror_usable(self, direction: str = "push"): access_pair = self._get_value("access_pair", direction) access_token_variable = self._get_value("access_token_variable", direction) @@ -158,6 +177,25 @@ def ensure_mirror_usable(self, direction: str = "push"): msg += "\n ".join(errors) raise MirrorError(msg) + @property + def supported_layout_versions(self) -> List[int]: + """List all of the supported layouts a mirror can fetch from""" + # Only check the fetch configuration, the push configuration is whatever the latest + # mirror version is which should support all configurable features. + + # All configured mirrors support the latest version + supported_versions = [SUPPORTED_LAYOUT_VERSIONS[0]] + has_view = self.fetch_view is not None + + # Check if the mirror supports older layout versions + # OCI - Only return the newest version, the layout version is a dummy version since OCI + # has its own layout. + # Views - Only versions >=3 support the views feature + if not is_oci_url(self.fetch_url) and not has_view: + supported_versions.extend(SUPPORTED_LAYOUT_VERSIONS[1:]) + + return supported_versions + def _update_connection_dict(self, current_data: dict, new_data: dict, top_level: bool): # Only allow one to exist in the config if "access_token" in current_data and "access_token_variable" in new_data: @@ -304,6 +342,9 @@ def get_url(self, direction: str) -> str: return _url_or_path_to_url(url) + def get_view(self, direction: str): + return self._get_value("view", direction) + def get_credentials(self, direction: str) -> Dict[str, Any]: """Get the mirror credentials from the mirror config @@ -314,9 +355,10 @@ def get_credentials(self, direction: str) -> Dict[str, Any]: Dictionary from credential type string to value Credential Type Map: - access_token -> str - access_pair -> tuple(str,str) - profile -> str + + * ``access_token``: ``str`` + * ``access_pair``: ``Tuple[str, str]`` + * ``profile``: ``str`` """ creddict: Dict[str, Any] = {} access_token = self.get_access_token(direction) @@ -387,7 +429,7 @@ def __init__( mirrors_data = ( mirrors.items() if mirrors is not None - else spack.config.get("mirrors", scope=scope).items() + else spack.config.CONFIG.get_config("mirrors", scope=scope).items() ) mirrors = (Mirror(data=mirror, name=name) for name, mirror in mirrors_data) diff --git a/lib/spack/spack/mirrors/utils.py b/lib/spack/spack/mirrors/utils.py index 9e8337b73e90f0..ac720a1b96d11f 100644 --- a/lib/spack/spack/mirrors/utils.py +++ b/lib/spack/spack/mirrors/utils.py @@ -3,10 +3,10 @@ # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os import traceback +from collections import Counter import spack.caches import spack.config -import spack.error import spack.llnl.util.tty as tty import spack.repo import spack.spec @@ -89,45 +89,17 @@ def get_matching_versions(specs, num_versions=1): return matching -def create(path, specs, skip_unstable_versions=False): - """Create a directory to be used as a spack mirror, and fill it with - package archives. - - Arguments: - path: Path to create a mirror directory hierarchy in. - specs: Any package versions matching these specs will be added \ - to the mirror. - skip_unstable_versions: if true, this skips adding resources when - they do not have a stable archive checksum (as determined by - ``fetch_strategy.stable_target``) - - Return Value: - Returns a tuple of lists: (present, mirrored, error) - - * present: Package specs that were already present. - * mirrored: Package specs that were successfully mirrored. - * error: Package specs that failed to mirror due to some error. - """ - # automatically spec-ify anything in the specs array. - specs = [s if isinstance(s, spack.spec.Spec) else spack.spec.Spec(s) for s in specs] - - mirror_cache, mirror_stats = mirror_cache_and_stats(path, skip_unstable_versions) - for spec in specs: - mirror_stats.next_spec(spec) - create_mirror_from_package_object(spec.package, mirror_cache, mirror_stats) - - return mirror_stats.stats() - - -def mirror_cache_and_stats(path, skip_unstable_versions=False): - """Return both a mirror cache and a mirror stats, starting from the path - where a mirror ought to be created. +def get_mirror_cache(path, skip_unstable_versions=False): + """Returns a mirror cache, starting from the path where a mirror ought to be created. Args: path (str): path to create a mirror directory hierarchy in. skip_unstable_versions: if true, this skips adding resources when they do not have a stable archive checksum (as determined by - ``fetch_strategy.stable_target``) + ``fetch_strategy.stable_target``). + + Returns: + spack.caches.MirrorCache: mirror cache object for the given path. """ # Get the absolute path of the root before we start jumping around. if not os.path.isdir(path): @@ -136,8 +108,7 @@ def mirror_cache_and_stats(path, skip_unstable_versions=False): except OSError as e: raise MirrorError("Cannot create directory '%s':" % path, str(e)) mirror_cache = spack.caches.MirrorCache(path, skip_unstable_versions=skip_unstable_versions) - mirror_stats = MirrorStats() - return mirror_cache, mirror_stats + return mirror_cache def add(mirror: Mirror, scope=None): @@ -161,41 +132,28 @@ def remove(name, scope): if not mirrors: mirrors = syaml.syaml_dict() - if name not in mirrors: - tty.die("No mirror with name %s" % name) - - mirrors.pop(name) + removed = mirrors.pop(name, False) spack.config.set("mirrors", mirrors, scope=scope) - tty.msg("Removed mirror %s." % name) - + return bool(removed) -class MirrorStats: - def __init__(self): - self.present = {} - self.new = {} - self.errors = set() - self.current_spec = None +class MirrorStatsForOneSpec: + def __init__(self, spec): + self.present = Counter() + self.new = Counter() + self.errors = Counter() + self.spec = spec self.added_resources = set() self.existing_resources = set() - def next_spec(self, spec): - self._tally_current_spec() - self.current_spec = spec - - def _tally_current_spec(self): - if self.current_spec: + def finalize(self): + if self.spec: if self.added_resources: - self.new[self.current_spec] = len(self.added_resources) + self.new[self.spec] = len(self.added_resources) if self.existing_resources: - self.present[self.current_spec] = len(self.existing_resources) + self.present[self.spec] = len(self.existing_resources) self.added_resources = set() self.existing_resources = set() - self.current_spec = None - - def stats(self): - self._tally_current_spec() - return list(self.present), list(self.new), list(self.errors) def already_existed(self, resource): # If an error occurred after caching a subset of a spec's @@ -207,11 +165,35 @@ def added(self, resource): self.added_resources.add(resource) def error(self): - self.errors.add(self.current_spec) + if self.spec: + self.errors[self.spec] += 1 + + +class MirrorStatsForAllSpecs: + def __init__(self): + # Counter is used to easily merge mirror stats for one spec into mirror stats for all specs + self.present = Counter() + self.new = Counter() + self.errors = Counter() + + def merge(self, ext_mirror_stat: MirrorStatsForOneSpec): + # For the sake of parallelism we need a way to reduce/merge different + # MirrorStats objects. + self.present.update(ext_mirror_stat.present) + self.new.update(ext_mirror_stat.new) + self.errors.update(ext_mirror_stat.errors) + + def stats(self): + # Convert dictionary to list + present_list = list(self.present.keys()) + new_list = list(self.new.keys()) + errors_list = list(self.errors.keys()) + + return present_list, new_list, errors_list def create_mirror_from_package_object( - pkg_obj, mirror_cache: "spack.caches.MirrorCache", mirror_stats: MirrorStats + pkg_obj, mirror_cache: "spack.caches.MirrorCache", mirror_stats: MirrorStatsForOneSpec ) -> bool: """Add a single package object to a mirror. @@ -235,6 +217,7 @@ def create_mirror_from_package_object( pkg_stage.cache_mirror(mirror_cache, mirror_stats) break except Exception as e: + pkg_obj.stage.destroy() if num_retries + 1 == max_retries: if spack.config.get("config:debug"): traceback.print_exc() diff --git a/lib/spack/spack/mixins.py b/lib/spack/spack/mixins.py index 738341fb35e975..257be86a4e1781 100644 --- a/lib/spack/spack/mixins.py +++ b/lib/spack/spack/mixins.py @@ -2,16 +2,23 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -"""This module contains additional behavior that can be attached to any given -package. -""" +"""This module contains additional behavior that can be attached to any given package.""" import os +from typing import Optional import spack.llnl.util.filesystem import spack.phase_callbacks -def filter_compiler_wrappers(*files: str, **kwargs): +def filter_compiler_wrappers( + *files: str, + after: str = "install", + relative_root: Optional[str] = None, + ignore_absent: bool = True, + backup: bool = False, + recursive: bool = False, + **kwargs, # for compatibility with package api v2.0 +) -> None: """Registers a phase callback (e.g. post-install) to look for references to Spack's compiler wrappers in the given files and replace them with the underlying compilers. @@ -28,45 +35,26 @@ class MyPackage(Package): Args: *files: files to be filtered relative to the search root (install prefix by default). - - **kwargs: allowed keyword arguments - - after - specifies after which phase the files should be filtered (defaults to "install") - - relative_root - path relative to install prefix where to start searching for the files to be - filtered. If not set the install prefix will be used as the search root. - It is *highly recommended* to set this, as searching recursively from the - installation prefix can be very slow. - - ignore_absent, backup - these two keyword arguments, if present, will be forwarded to - :func:`~spack.llnl.util.filesystem.filter_file` - - recursive - this keyword argument, if present, will be forwarded to - :func:`~spack.llnl.util.filesystem.find` + after: specifies after which phase the files should be filtered (defaults to + ``"install"``). + relative_root: path relative to install prefix where to start searching for the files to be + filtered. If not set the install prefix will be used as the search root. It is *highly + recommended* to set this, as searching recursively from the installation prefix can be + very slow. + ignore_absent: if present, will be forwarded to + :func:`~spack.llnl.util.filesystem.filter_file` + backup: if present, will be forwarded to + :func:`~spack.llnl.util.filesystem.filter_file` + recursive: if present, will be forwarded to :func:`~spack.llnl.util.filesystem.find` """ - after = kwargs.get("after", "install") - relative_root = kwargs.get("relative_root", None) - - filter_kwargs = { - "ignore_absent": kwargs.get("ignore_absent", True), - "backup": kwargs.get("backup", False), - "string": True, - } - - find_kwargs = {"recursive": kwargs.get("recursive", False)} def _filter_compiler_wrappers_impl(pkg_or_builder): pkg = getattr(pkg_or_builder, "pkg", pkg_or_builder) # Compute the absolute path of the search root root = os.path.join(pkg.prefix, relative_root) if relative_root else pkg.prefix - # Compute the absolute path of the files to be filtered and - # remove links from the list. - abs_files = spack.llnl.util.filesystem.find(root, files, **find_kwargs) + # Compute the absolute path of the files to be filtered and remove links from the list. + abs_files = spack.llnl.util.filesystem.find(root, files, recursive=recursive) abs_files = [x for x in abs_files if not os.path.islink(x)] x = spack.llnl.util.filesystem.FileFilter(*abs_files) @@ -86,14 +74,12 @@ def _filter_compiler_wrappers_impl(pkg_or_builder): # For example: # CC=/path/to/spack/lib/spack/env/cc (realpath to the wrapper) # FC=/path/to/spack/lib/spack/env/cce/ftn - # Therefore, we perform the filtering in the reversed sorted order of - # the substituted strings. If, however, the strings are identical (e.g. - # both CC and FC are set using realpath), the filtering is done - # according to the order in compiler_vars. To achieve that, we populate - # the following array with tuples of three elements: path to the - # wrapper, negated index of the variable in compiler_vars, path to the - # real compiler. This way, the reversed sorted order of the resulting - # array is the order of replacements that we need. + # Therefore, we perform the filtering in the reversed sorted order of the substituted + # strings. If, however, the strings are identical (e.g. both CC and FC are set using + # realpath), the filtering is done according to the order in compiler_vars. To achieve + # that, we populate the following array with tuples of three elements: path to the wrapper, + # negated index of the variable in compiler_vars, path to the real compiler. This way, the + # reversed sorted order of the resulting array is the order of replacements that we need. replacements = [] for idx, (env_var, compiler_path) in enumerate(compiler_vars): @@ -105,18 +91,36 @@ def _filter_compiler_wrappers_impl(pkg_or_builder): replacements.append((wrapper_path, -idx, compiler_path)) for wrapper_path, _, compiler_path in sorted(replacements, reverse=True): - x.filter(wrapper_path, compiler_path, **filter_kwargs) + x.filter( + wrapper_path, + compiler_path, + ignore_absent=ignore_absent, + backup=backup, + string=True, + ) # Remove this linking flag if present (it turns RPATH into RUNPATH) for compiler_lang in ("c", "cxx", "fortran"): if compiler_lang not in pkg.spec: continue compiler_pkg = pkg.spec[compiler_lang].package - x.filter(f"{compiler_pkg.linker_arg}--enable-new-dtags", "", **filter_kwargs) + x.filter( + f"{compiler_pkg.linker_arg}--enable-new-dtags", + "", + ignore_absent=ignore_absent, + backup=backup, + string=True, + ) # NAG compiler is usually mixed with GCC, which has a different # prefix for linker arguments. if pkg.compiler.name == "nag": - x.filter("-Wl,--enable-new-dtags", "", **filter_kwargs) + x.filter( + "-Wl,--enable-new-dtags", + "", + ignore_absent=ignore_absent, + backup=backup, + string=True, + ) spack.phase_callbacks.run_after(after)(_filter_compiler_wrappers_impl) diff --git a/lib/spack/spack/modules/common.py b/lib/spack/spack/modules/common.py index a149f41a8516f6..7c7a1a5b2df686 100644 --- a/lib/spack/spack/modules/common.py +++ b/lib/spack/spack/modules/common.py @@ -6,22 +6,19 @@ This information maps **a single spec** to: - * a unique module filename - * the module file content +* a unique module filename +* the module file content and is divided among four classes: - * a configuration class that provides a convenient interface to query - details about the configuration for the spec under consideration. - - * a layout class that provides the information associated with module - file names and directories - - * a context class that provides the dictionary used by the template engine - to generate the module file - - * a writer that collects and uses the information above to either write - or remove the module file +* a configuration class that provides a convenient interface to query + details about the configuration for the spec under consideration. +* a layout class that provides the information associated with module + file names and directories +* a context class that provides the dictionary used by the template engine + to generate the module file +* a writer that collects and uses the information above to either write + or remove the module file Each of the four classes needs to be sub-classed when implementing a new module type. @@ -142,7 +139,7 @@ def dependencies(spec: spack.spec.Spec, request: str = "all") -> List[spack.spec Args: spec: spec to be analyzed - request: one of "none", "run", "direct", "all" + request: one of ``"none"``, ``"run"``, ``"direct"``, ``"all"`` Returns: list of requested dependencies @@ -212,7 +209,7 @@ def root_path(name, module_set_name): """Returns the root folder for module file installation. Args: - name: name of the module system to be used (e.g. 'tcl') + name: name of the module system to be used (``"tcl"`` or ``"lmod"``) module_set_name: name of the set of module configs to use Returns: @@ -1038,19 +1035,19 @@ class ModuleNotFoundError(ModulesError): class DefaultTemplateNotDefined(AttributeError, ModulesError): - """Raised if the attribute 'default_template' has not been specified + """Raised if the attribute ``default_template`` has not been specified in the derived classes. """ class HideCmdFormatNotDefined(AttributeError, ModulesError): - """Raised if the attribute 'hide_cmd_format' has not been specified + """Raised if the attribute ``hide_cmd_format`` has not been specified in the derived classes. """ class ModulercHeaderNotDefined(AttributeError, ModulesError): - """Raised if the attribute 'modulerc_header' has not been specified + """Raised if the attribute ``modulerc_header`` has not been specified in the derived classes. """ diff --git a/lib/spack/spack/modules/lmod.py b/lib/spack/spack/modules/lmod.py index fb6af2cb58db10..1f958b0d15d0fe 100644 --- a/lib/spack/spack/modules/lmod.py +++ b/lib/spack/spack/modules/lmod.py @@ -154,7 +154,7 @@ def filter_hierarchy_specs(self): @lang.memoized def hierarchy_tokens(self): """Returns the list of tokens that are part of the modulefile - hierarchy. 'compiler' is always present. + hierarchy. ``compiler`` is always present. """ tokens = configuration(self.name).get("hierarchy", []) @@ -180,7 +180,7 @@ def hierarchy_tokens(self): def requires(self): """Returns a dictionary mapping all the requirements of this spec to the actual provider. - The 'compiler' key is always present among the requirements. + The ``compiler`` key is always present among the requirements. """ # If it's a core_spec, lie and say it requires a core compiler if ( @@ -506,12 +506,12 @@ class LmodModulefileWriter(BaseModuleFileWriter): class CoreCompilersNotFoundError(spack.error.SpackError, KeyError): - """Error raised if the key 'core_compilers' has not been specified + """Error raised if the key ``core_compilers`` has not been specified in the configuration file. """ class NonVirtualInHierarchyError(spack.error.SpackError, TypeError): """Error raised if non-virtual specs are used as hierarchy tokens in - the lmod section of 'modules.yaml'. + the lmod section of ``modules.yaml``. """ diff --git a/lib/spack/spack/multimethod.py b/lib/spack/spack/multimethod.py index 4f0a8948b0c490..df96f4870b62ae 100644 --- a/lib/spack/spack/multimethod.py +++ b/lib/spack/spack/multimethod.py @@ -67,15 +67,16 @@ class SpecMultiMethod: decorator (see docs below) creates SpecMultiMethods and registers method versions with them. - To register a method, you can do something like this: + To register a method, you can do something like this:: + mm = SpecMultiMethod() mm.register("^chaos_5_x86_64_ib", some_method) The object registered needs to be a Spec or some string that will parse to be a valid spec. - When the mm is actually called, it selects a version of the - method to call based on the sys_type of the object it is + When the ``mm`` is actually called, it selects a version of the + method to call based on the ``sys_type`` of the object it is called on. See the docs for decorators below for more details. @@ -154,7 +155,8 @@ def __call__(self, package_or_builder_self, *args, **kwargs): class when: """This is a multi-purpose class, which can be used - 1. As a context manager to **group directives together** that share the same `when=` argument. + 1. As a context manager to **group directives together** that share the same ``when=`` + argument. 2. As a **decorator** for defining multi-methods (multiple methods with the same name are defined, but the version that is called depends on the condition of the package's spec) @@ -183,7 +185,7 @@ class when: depends_on("dependency", when="+foo +bar +baz") As a **decorator**, it allows packages to declare multiple versions of methods like - `install()` that depend on the package's spec. For example:: + ``install()`` that depend on the package's spec. For example:: class SomePackage(Package): ... @@ -201,7 +203,7 @@ def install(self, spec: Spec, prefix: Prefix): # This will be executed if the package's target is in # the aarch64 family - This allows each package to have a default version of install() AND + This allows each package to have a default version of ``install()`` AND specialized versions for particular platforms. The version that is called depends on the architecture of the instantiated package. @@ -223,9 +225,8 @@ def setup(self): @when("^openmpi") def setup(self): - # do something special when this is built with OpenMPI for - # its MPI implementations. - + # do something special when this is built with OpenMPI for its MPI implementations. + pass def install(self, prefix): # Do common install stuff diff --git a/lib/spack/spack/new_installer.py b/lib/spack/spack/new_installer.py new file mode 100644 index 00000000000000..404da25f7dfc2b --- /dev/null +++ b/lib/spack/spack/new_installer.py @@ -0,0 +1,1537 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +"""New installer that will ultimately replace installer.py. It features an event loop, non-blocking +I/O, and a POSIX jobserver to limit concurrency. It also has a more advanced terminal UI. It's +mostly self-contained to avoid interfering with the rest of Spack too much while it's being +developed and tested. + +The installer consists of a UI process that manages multiple build processes and handles updates +to the database. It detects or creates a jobserver, and then kicks off an event loop in which it +runs through a build queue, always running at least one build. Concurrent builds run as jobserver +tokens are obtained. This means only one -j flag is needed to control concurrency. + +The UI process has two modes: an overview mode where it shows the status of all builds, and a +mode where it follows the logs of a specific build. It listens to keyboard input to switch between +modes. + +The build process does an ordinary install, but also spawns a "tee" thread that forwards its build +output to both a log file and the UI process (if the UI process has requested it). This thread also +runs an event loop to listen for control messages from the UI process (to enable/disable echoing +of logs), and for output from the build process.""" + +import fcntl +import io +import json +import os +import re +import selectors +import shutil +import sys +import tempfile +import termios +import threading +import time +import traceback +import tty +from gzip import GzipFile +from multiprocessing import Pipe, Process +from multiprocessing.connection import Connection +from typing import TYPE_CHECKING, Callable, Dict, Generator, List, Optional, Set, Tuple, Union + +from spack.vendor.typing_extensions import Literal + +import spack.binary_distribution +import spack.build_environment +import spack.builder +import spack.config +import spack.database +import spack.deptypes as dt +import spack.error +import spack.hooks +import spack.llnl.util.lock +import spack.llnl.util.tty +import spack.paths +import spack.report +import spack.spec +import spack.stage +import spack.store +import spack.traverse +import spack.url_buildcache +import spack.util.lock + +if TYPE_CHECKING: + import spack.package_base + +#: Type for specifying installation source modes +InstallPolicy = Literal["auto", "cache_only", "source_only"] + +#: How often to update a spinner in seconds +SPINNER_INTERVAL = 0.1 + +#: How long to display finished packages before graying them out +CLEANUP_TIMEOUT = 2.0 + +#: Size of the output buffer for child processes +OUTPUT_BUFFER_SIZE = 4096 + +#: Suffix for temporary backup during overwrite install +OVERWRITE_BACKUP_SUFFIX = ".old" + +#: Suffix for temporary cleanup during failed install +OVERWRITE_GARBAGE_SUFFIX = ".garbage" + + +class ChildInfo: + """Information about a child process.""" + + __slots__ = ("proc", "spec", "output_r_conn", "state_r_conn", "control_w_conn", "explicit") + + def __init__( + self, + proc: Process, + spec: spack.spec.Spec, + output_r_conn: Connection, + state_r_conn: Connection, + control_w_conn: Connection, + explicit: bool = False, + ) -> None: + self.proc = proc + self.spec = spec + self.output_r_conn = output_r_conn + self.state_r_conn = state_r_conn + self.control_w_conn = control_w_conn + self.explicit = explicit + + def cleanup(self, selector: selectors.BaseSelector) -> None: + """Unregister and close file descriptors, and join the child process.""" + try: + selector.unregister(self.output_r_conn.fileno()) + except KeyError: + pass + try: + selector.unregister(self.state_r_conn.fileno()) + except KeyError: + pass + try: + selector.unregister(self.proc.sentinel) + except (KeyError, ValueError): + pass + self.output_r_conn.close() + self.state_r_conn.close() + self.control_w_conn.close() + self.proc.join() + + +def send_state(state: str, state_pipe: io.TextIOWrapper) -> None: + """Send a state update message.""" + json.dump({"state": state}, state_pipe, separators=(",", ":")) + state_pipe.write("\n") + + +def send_progress(current: int, total: int, state_pipe: io.TextIOWrapper) -> None: + """Send a progress update message.""" + json.dump({"progress": current, "total": total}, state_pipe, separators=(",", ":")) + state_pipe.write("\n") + + +def tee(control_r: int, log_r: int, file_w: int, parent_w: int) -> None: + """Forward log_r to file_w and parent_w (if echoing is enabled). + Echoing is enabled and disabled by reading from control_r.""" + echo_on = False + selector = selectors.DefaultSelector() + selector.register(log_r, selectors.EVENT_READ) + selector.register(control_r, selectors.EVENT_READ) + + try: + while True: + for key, _ in selector.select(): + if key.fd == log_r: + data = os.read(log_r, OUTPUT_BUFFER_SIZE) + if not data: # EOF: exit the thread + return + os.write(file_w, data) + if echo_on: + os.write(parent_w, data) + + elif key.fd == control_r: + control_data = os.read(control_r, 1) + if not control_data: + return + else: + echo_on = control_data == b"1" + except OSError: # do not raise + pass + finally: + os.close(log_r) + + +class Tee: + """Emulates ./build 2>&1 | tee build.log. The output is sent both to a log file and the parent + process (if echoing is enabled). The control_fd is used to enable/disable echoing.""" + + def __init__(self, control: Connection, parent: Connection, log_fd: int) -> None: + self.control = control + self.parent = parent + #: The file descriptor of the log file + self.log_fd = log_fd + r, w = os.pipe() + self.tee_thread = threading.Thread( + target=tee, + args=(self.control.fileno(), r, self.log_fd, self.parent.fileno()), + daemon=True, + ) + self.tee_thread.start() + os.dup2(w, sys.stdout.fileno()) + os.dup2(w, sys.stderr.fileno()) + os.close(w) + + def close(self) -> None: + # Closing stdout and stderr should close the last reference to the write end of the pipe, + # causing the tee thread to wake up, flush the last data, and exit. + sys.stdout.flush() + sys.stderr.flush() + os.close(sys.stdout.fileno()) + os.close(sys.stderr.fileno()) + self.tee_thread.join() + # Only then close the other fds. + self.control.close() + self.parent.close() + os.close(self.log_fd) + + +def install_from_buildcache( + mirrors: List[spack.url_buildcache.MirrorMetadata], + spec: spack.spec.Spec, + unsigned: Optional[bool], + state_stream: io.TextIOWrapper, +) -> bool: + send_state("fetching from build cache", state_stream) + tarball_stage = spack.binary_distribution.download_tarball(spec.build_spec, unsigned, mirrors) + + if tarball_stage is None: + return False + + send_state("relocating", state_stream) + spack.binary_distribution.extract_tarball(spec, tarball_stage, force=False) + + if spec.spliced: # overwrite old metadata with new + spack.store.STORE.layout.write_spec(spec, spack.store.STORE.layout.spec_file_path(spec)) + + # now a block of curious things follow that should be fixed. + pkg = spec.package + if hasattr(pkg, "_post_buildcache_install_hook"): + pkg._post_buildcache_install_hook() + pkg.installed_from_binary_cache = True + + return True + + +class PrefixPivoter: + """Manages the installation prefix during overwrite installations.""" + + def __init__(self, prefix: str, overwrite: bool, keep_prefix: bool = False) -> None: + """Initialize the prefix pivoter. + + Args: + prefix: The installation prefix path + overwrite: Whether to allow overwriting an existing prefix + keep_prefix: Whether to keep a failed installation prefix (when not overwriting) + """ + self.prefix = prefix + #: Whether to allow installation when the prefix exists + self.overwrite = overwrite + #: Whether to keep a failed installation prefix + self.keep_prefix = keep_prefix + #: Temporary location for the original prefix during overwrite + self.tmp_prefix: Optional[str] = None + self.parent = os.path.dirname(prefix) + + def __enter__(self) -> "PrefixPivoter": + """Enter the context: move existing prefix to temporary location if needed.""" + if not self._lexists(self.prefix): + return self + if not self.overwrite: + raise spack.error.InstallError(f"Install prefix {self.prefix} already exists") + # Move the existing prefix to a temporary location + self.tmp_prefix = self._mkdtemp( + dir=self.parent, prefix=".", suffix=OVERWRITE_BACKUP_SUFFIX + ) + self._rename(self.prefix, self.tmp_prefix) + return self + + def __exit__( + self, exc_type: Optional[type], exc_val: Optional[BaseException], exc_tb: Optional[object] + ) -> None: + """Exit the context: cleanup on success, restore on failure.""" + if exc_type is None: + # Success: remove the backup in case of overwrite + if self.tmp_prefix is not None: + self._rmtree_ignore_errors(self.tmp_prefix) + return + + # Failure handling: + # Priority 1: If we're overwriting, always restore the original prefix + # Priority 2: If keep_prefix is False, remove the failed installation + + if self.overwrite and self.tmp_prefix is not None: + # Overwrite case: restore the original prefix if it existed + # The highest priority is to restore the original prefix, so we try to: + # rename prefix -> garbage: move failed dir out of the way + # rename tmp_prefix -> prefix: restore original prefix + # remove garbage (this is allowed to fail) + garbage = self._mkdtemp(dir=self.parent, prefix=".", suffix=OVERWRITE_GARBAGE_SUFFIX) + try: + self._rename(self.prefix, garbage) + has_failed_prefix = True + except FileNotFoundError: # prefix dir does not exist, so we don't have to delete it. + has_failed_prefix = False + self._rename(self.tmp_prefix, self.prefix) + if has_failed_prefix: + self._rmtree_ignore_errors(garbage) + elif not self.keep_prefix and self._lexists(self.prefix): + # Not overwriting, keep_prefix is False: remove the failed installation + garbage = self._mkdtemp(dir=self.parent, prefix=".", suffix=OVERWRITE_GARBAGE_SUFFIX) + self._rename(self.prefix, garbage) + self._rmtree_ignore_errors(garbage) + # else: keep_prefix is True, leave the failed prefix in place + + def _lexists(self, path: str) -> bool: + return os.path.lexists(path) + + def _rename(self, src: str, dst: str) -> None: + os.rename(src, dst) + + def _mkdtemp(self, dir: str, prefix: str, suffix: str) -> str: + return tempfile.mkdtemp(dir=dir, prefix=prefix, suffix=suffix) + + def _rmtree_ignore_errors(self, path: str) -> None: + shutil.rmtree(path, ignore_errors=True) + + +def worker_function( + spec: spack.spec.Spec, + explicit: bool, + mirrors: List[spack.url_buildcache.MirrorMetadata], + unsigned: Optional[bool], + install_policy: InstallPolicy, + dirty: bool, + keep_stage: bool, + restage: bool, + overwrite: bool, + keep_prefix: bool, + skip_patch: bool, + state: Connection, + parent: Connection, + echo_control: Connection, + makeflags: str, + js1: Optional[Connection], + js2: Optional[Connection], + store: spack.store.Store, + config: spack.config.Configuration, +): + """ + Function run in the build child process. Installs the specified spec, sending state updates + and build output back to the parent process. + + Args: + spec: Spec to install + explicit: Whether the spec was explicitly requested by the user + mirrors: List of buildcache mirrors to try + unsigned: Whether to allow unsigned buildcache entries + install_policy: ``"auto"``, ``"cache_only"``, or ``"source_only"`` + dirty: Whether to preserve user environment in the build environment + keep_stage: Whether to keep the build stage after installation + restage: Whether to restage the source before building + overwrite: Whether to overwrite the existing install prefix + keep_prefix: Whether to keep a failed installation prefix + skip_patch: Whether to skip the patch phase + state: Connection to send state updates to + parent: Connection to send build output to + echo_control: Connection to receive echo control messages from + makeflags: MAKEFLAGS to set, so that the build process uses the POSIX jobserver + js1: Connection for old style jobserver read fd (if any). Unused, just to inherit fd. + js2: Connection for old style jobserver write fd (if any). Unused, just to inherit fd. + store: global store instance from parent + config: global config instance from parent + """ + + # TODO: don't start a build for external packages + if spec.external: + return + + os.environ["MAKEFLAGS"] = makeflags + spack.store.STORE = store + spack.config.CONFIG = config + spack.paths.set_working_dir() + + # Create a log file in the root of the stage dir. + log_fd, log_path = tempfile.mkstemp( + prefix=f"spack-stage-{spec.name}-{spec.dag_hash()}-", + suffix=".log", + dir=spack.stage.get_stage_root(), + ) + tee = Tee(echo_control, parent, log_fd) + + # Use closedfd=false because of the connection objects. Use line buffering. + state_stream = os.fdopen(state.fileno(), "w", buffering=1, closefd=False) + exit_code = 0 + + try: + with PrefixPivoter(spec.prefix, overwrite, keep_prefix): + _install( + spec, + explicit, + mirrors, + unsigned, + install_policy, + dirty, + keep_stage, + restage, + skip_patch, + state_stream, + log_path, + store, + ) + except Exception: + traceback.print_exc() # log the traceback to the log file + exit_code = 1 + finally: + tee.close() + state_stream.close() + + if exit_code == 0 and not os.path.lexists(spec.package.install_log_path): + # Try to install the compressed log file + try: + with open(log_path, "rb") as f, open(spec.package.install_log_path, "wb") as g: + # Use GzipFile directly so we can omit filename / mtime in header + gzip_file = GzipFile(filename="", mode="wb", compresslevel=6, mtime=0, fileobj=g) + shutil.copyfileobj(f, gzip_file) + gzip_file.close() + os.unlink(log_path) + except Exception: + pass # don't fail the build just because log compression failed + + sys.exit(exit_code) + + +def _install( + spec: spack.spec.Spec, + explicit: bool, + mirrors: List[spack.url_buildcache.MirrorMetadata], + unsigned: Optional[bool], + install_policy: InstallPolicy, + dirty: bool, + keep_stage: bool, + restage: bool, + skip_patch: bool, + state_stream: io.TextIOWrapper, + log_path: str, + store: spack.store.Store = spack.store.STORE, +) -> None: + """Install a spec from build cache or source.""" + + # Create the stage and log file before starting the tee thread. + pkg = spec.package + + # Try to install from buildcache, unless user asked for source only + if install_policy != "source_only": + if mirrors and install_from_buildcache(mirrors, spec, unsigned, state_stream): + spack.hooks.post_install(spec, explicit) + return + elif install_policy == "cache_only": + # Binary required but not available + send_state("no binary available", state_stream) + raise spack.error.InstallError(f"No binary available for {spec}") + + spack.build_environment.setup_package(pkg, dirty=dirty) + store.layout.create_install_directory(spec) + + stage = pkg.stage + stage.keep = keep_stage + + # Then try a source build. + with stage: + if restage: + stage.destroy() + stage.create() + + os.symlink(log_path, pkg.log_path) + + send_state("staging", state_stream) + + if not skip_patch: + pkg.do_patch() + else: + pkg.do_stage() + + os.chdir(stage.source_path) + + spack.hooks.pre_install(spec) + + for phase in spack.builder.create(pkg): + send_state(phase.name, state_stream) + phase.execute() + + spack.hooks.post_install(spec, explicit) + + +class JobServer: + """Attach to an existing POSIX jobserver or create a FIFO-based one.""" + + def __init__(self, num_jobs: int) -> None: + #: Keep track of how many tokens Spack itself has acquired, which is used to release them. + self.tokens_acquired = 0 + self.num_jobs = num_jobs + self.fifo_path: Optional[str] = None + self.created = False + self._setup() + # Ensure that Executable()(...) in build processes ultimately inherit jobserver fds. + os.set_inheritable(self.r, True) + os.set_inheritable(self.w, True) + # r_conn and w_conn are used to make build processes inherit the jobserver fds if needed. + # Connection objects close the fd as they are garbage collected, so store them. + self.r_conn = Connection(self.r) + self.w_conn = Connection(self.w) + + def _setup(self) -> None: + + fifo_config = get_jobserver_config() + + if type(fifo_config) is str: + # FIFO-based jobserver. Try to open the FIFO. + open_attempt = open_existing_jobserver_fifo(fifo_config) + if open_attempt: + self.r, self.w = open_attempt + self.fifo_path = fifo_config + return + elif type(fifo_config) is tuple: + # Old style pipe-based jobserver. Validate the fds before using them. + r, w = fifo_config + if fcntl.fcntl(r, fcntl.F_GETFD) != -1 and fcntl.fcntl(w, fcntl.F_GETFD) != -1: + self.r, self.w = r, w + return + + # No existing jobserver we can connect to: create a FIFO-based one. + self.r, self.w, self.fifo_path = create_jobserver_fifo(self.num_jobs) + self.created = True + + def makeflags(self, gmake: Optional[spack.spec.Spec]) -> str: + """Return the MAKEFLAGS for a build process, depending on its gmake build dependency.""" + if self.fifo_path and (not gmake or gmake.satisfies("@4.4:")): + return f" -j{self.num_jobs} --jobserver-auth=fifo:{self.fifo_path}" + elif not gmake or gmake.satisfies("@4.0:"): + return f" -j{self.num_jobs} --jobserver-auth={self.r},{self.w}" + else: + return f" -j{self.num_jobs} --jobserver-fds={self.r},{self.w}" + + def acquire(self, jobs: int) -> int: + """Try and acquire at most 'jobs' tokens from the jobserver. Returns the number of + tokens actually acquired (may be less than requested, or zero).""" + try: + num_acquired = len(os.read(self.r, jobs)) + self.tokens_acquired += num_acquired + return num_acquired + except BlockingIOError: + return 0 + + def release(self) -> None: + """Release a token back to the jobserver.""" + # The last job to quit has an implicit token, so don't release if we have none. + if self.tokens_acquired == 0: + return + os.write(self.w, b"+") + self.tokens_acquired -= 1 + + def close(self) -> None: + # Remove the FIFO if we created it. + if self.created and self.fifo_path: + try: + os.unlink(self.fifo_path) + except OSError: + pass + try: + os.rmdir(os.path.dirname(self.fifo_path)) + except OSError: + pass + # TODO: implement a sanity check here: + # 1. did we release all tokens we acquired? + # 2. if we created the jobserver, did the children return all tokens? + self.r_conn.close() + self.w_conn.close() + + +def start_build( + spec: spack.spec.Spec, + explicit: bool, + mirrors: List[spack.url_buildcache.MirrorMetadata], + unsigned: Optional[bool], + install_policy: InstallPolicy, + dirty: bool, + keep_stage: bool, + restage: bool, + overwrite: bool, + keep_prefix: bool, + skip_patch: bool, + jobserver: JobServer, +) -> ChildInfo: + """Start a new build.""" + # Create pipes for the child's output, state reporting, and control. + state_r_conn, state_w_conn = Pipe(duplex=False) + output_r_conn, output_w_conn = Pipe(duplex=False) + control_r_conn, control_w_conn = Pipe(duplex=False) + + # Obtain the MAKEFLAGS to be set in the child process, and determine whether it's necessary + # for the child process to inherit our jobserver fds. + gmake = next(iter(spec.dependencies("gmake")), None) + makeflags = jobserver.makeflags(gmake) + fifo = "--jobserver-auth=fifo:" in makeflags + + proc = Process( + target=worker_function, + args=( + spec, + explicit, + mirrors, + unsigned, + install_policy, + dirty, + keep_stage, + restage, + overwrite, + keep_prefix, + skip_patch, + state_w_conn, + output_w_conn, + control_r_conn, + makeflags, + None if fifo else jobserver.r_conn, + None if fifo else jobserver.w_conn, + spack.store.STORE, + spack.config.CONFIG, + ), + ) + proc.start() + + # The parent process does not need the write ends of the main pipes or the read end of control. + state_w_conn.close() + output_w_conn.close() + control_r_conn.close() + + # Set the read ends to non-blocking: in principle redundant with epoll/kqueue, but safer. + os.set_blocking(output_r_conn.fileno(), False) + os.set_blocking(state_r_conn.fileno(), False) + + return ChildInfo(proc, spec, output_r_conn, state_r_conn, control_w_conn, explicit) + + +def get_jobserver_config(makeflags: Optional[str] = None) -> Optional[Union[str, Tuple[int, int]]]: + """Parse MAKEFLAGS for jobserver. Either it's a FIFO or (r, w) pair of file descriptors. + + Args: + makeflags: MAKEFLAGS string to parse. If None, reads from os.environ. + """ + makeflags = os.environ.get("MAKEFLAGS", "") if makeflags is None else makeflags + if not makeflags: + return None + # We can have the following flags: + # --jobserver-fds=R,W (before GNU make 4.2) + # --jobserver-auth=fifo:PATH or --jobserver-auth=R,W (after GNU make 4.2) + # In case of multiple, the last one wins. + matches = re.findall(r" --jobserver-[^=]+=([^ ]+)", makeflags) + if not matches: + return None + last_match: str = matches[-1] + assert isinstance(last_match, str) + if last_match.startswith("fifo:"): + return last_match[5:] + parts = last_match.split(",", 1) + if len(parts) != 2: + return None + try: + return int(parts[0]), int(parts[1]) + except ValueError: + return None + + +def create_jobserver_fifo(num_jobs: int) -> Tuple[int, int, str]: + """Create a new jobserver FIFO with the specified number of job tokens.""" + tmpdir = tempfile.mkdtemp() + fifo_path = os.path.join(tmpdir, "jobserver_fifo") + + try: + os.mkfifo(fifo_path, 0o600) + read_fd = os.open(fifo_path, os.O_RDONLY | os.O_NONBLOCK) + write_fd = os.open(fifo_path, os.O_WRONLY) + # write num_jobs - 1 tokens, because the first job is implicit + os.write(write_fd, b"+" * (num_jobs - 1)) + return read_fd, write_fd, fifo_path + except Exception: + try: + os.unlink(fifo_path) + except OSError as e: + spack.llnl.util.tty.debug(f"Failed to remove POSIX jobserver FIFO: {e}", level=3) + pass + try: + os.rmdir(tmpdir) + except OSError as e: + spack.llnl.util.tty.debug(f"Failed to remove POSIX jobserver FIFO dir: {e}", level=3) + pass + raise + + +def open_existing_jobserver_fifo(fifo_path: str) -> Optional[Tuple[int, int]]: + """Open an existing jobserver FIFO for reading and writing.""" + try: + read_fd = os.open(fifo_path, os.O_RDONLY | os.O_NONBLOCK) + write_fd = os.open(fifo_path, os.O_WRONLY) + return read_fd, write_fd + except OSError: + return None + + +class FdInfo: + """Information about a file descriptor mapping.""" + + __slots__ = ("pid", "name") + + def __init__(self, pid: int, name: str) -> None: + self.pid = pid + self.name = name + + +class BuildInfo: + """Information about a package being built.""" + + __slots__ = ( + "state", + "explicit", + "version", + "hash", + "name", + "external", + "prefix", + "finished_time", + "progress_percent", + "control_w_conn", + ) + + def __init__(self, spec: spack.spec.Spec, explicit: bool, control_w_conn: Connection) -> None: + self.state: str = "starting" + self.explicit: bool = explicit + self.version: str = str(spec.version) + self.hash: str = spec.dag_hash(7) + self.name: str = spec.name + self.external: bool = spec.external + self.prefix: str = spec.prefix + self.finished_time: Optional[float] = None + self.progress_percent: Optional[int] = None + self.control_w_conn = control_w_conn + + +class BuildStatus: + """Tracks the build status display for terminal output.""" + + def __init__( + self, + total: int, + stdout: io.TextIOWrapper = sys.stdout, # type: ignore[assignment] + get_terminal_size: Callable[[], Tuple[int, int]] = os.get_terminal_size, + get_time: Callable[[], float] = time.monotonic, + is_tty: Optional[bool] = None, + ) -> None: + #: Ordered dict of build ID -> info + self.total = total + self.completed = 0 + self.builds: Dict[str, BuildInfo] = {} + self.finished_builds: List[BuildInfo] = [] + self.spinner_chars = ["|", "/", "-", "\\"] + self.spinner_index = 0 + self.dirty = True # Start dirty to draw initial state + self.active_area_rows = 0 + self.total_lines = 0 + self.next_spinner_update = 0.0 + self.next_update = 0.0 + self.overview_mode = True # Whether to draw the package overview + self.tracked_build_id = "" # identifier of the package whose logs we follow + self.search_term = "" + self.search_mode = False + + self.stdout = stdout + self.get_terminal_size = get_terminal_size + self.get_time = get_time + self.is_tty = is_tty if is_tty is not None else self.stdout.isatty() + + def add_build(self, spec: spack.spec.Spec, explicit: bool, control_w_conn: Connection) -> None: + """Add a new build to the display and mark the display as dirty.""" + self.builds[spec.dag_hash()] = BuildInfo(spec, explicit, control_w_conn) + self.dirty = True + + def toggle(self) -> None: + """Toggle between overview mode and following a specific build.""" + if self.overview_mode: + self.next() + else: + self.active_area_rows = 0 + self.search_term = "" + self.search_mode = False + self.overview_mode = True + self.dirty = True + try: + os.write(self.builds[self.tracked_build_id].control_w_conn.fileno(), b"0") + except (KeyError, OSError): + pass + self.tracked_build_id = "" + + def search_input(self, input: str) -> None: + """Handle keyboard input when in search mode""" + if input in ("\r", "\n"): + self.next(1) + elif input == "\x1b": # Escape + self.search_mode = False + self.search_term = "" + self.dirty = True + elif input in ("\x7f", "\b"): # Backspace + self.search_term = self.search_term[:-1] + self.dirty = True + elif input.isprintable(): + self.search_term += input + self.dirty = True + + def enter_search(self) -> None: + self.search_mode = True + self.dirty = True + + def _is_displayed(self, build: BuildInfo) -> bool: + """Returns true if the build matches the search term, or when no search term is set.""" + # When not in search mode, the search_term is "", which always evaluates to True below + return self.search_term in build.name or build.hash.startswith(self.search_term) + + def _get_next(self, direction: int) -> Optional[str]: + """Returns the next or previous unfinished build ID matching the search term, or None if + none found. Direction should be 1 for next, -1 for previous.""" + matching = [ + build_id + for build_id, build in self.builds.items() + if build.finished_time is None and self._is_displayed(build) + ] + if not matching: + return None + try: + idx = matching.index(self.tracked_build_id) + except ValueError: + return matching[0] if direction == 1 else matching[-1] + + return matching[(idx + direction) % len(matching)] + + def next(self, direction: int = 1) -> None: + """Follow the logs of the next build in the list.""" + new_build_id = self._get_next(direction) + + if not new_build_id or self.tracked_build_id == new_build_id: + return + + new_build = self.builds[new_build_id] + + if self.overview_mode: + self.overview_mode = False + + # Stop following the previous and start following the new build. + if self.tracked_build_id: + try: + os.write(self.builds[self.tracked_build_id].control_w_conn.fileno(), b"0") + except (KeyError, OSError): + pass + + self.tracked_build_id = new_build_id + + # Tell the user we're following new logs, and instruct the child to start sending them. + self.stdout.write( + f"\n==> Following logs of {new_build.name}" f"\033[0;36m@{new_build.version}\033[0m\n" + ) + self.stdout.flush() + try: + os.write(new_build.control_w_conn.fileno(), b"1") + except (KeyError, OSError): + pass + + def update_state(self, build_id: str, state: str) -> None: + """Update the state of a package and mark the display as dirty.""" + build_info = self.builds[build_id] + build_info.state = state + build_info.progress_percent = None + + if state in ("finished", "failed"): + self.completed += 1 + build_info.finished_time = self.get_time() + CLEANUP_TIMEOUT + + if build_id == self.tracked_build_id and not self.overview_mode: + self.toggle() + + self.dirty = True + + # For non-TTY output, print state changes immediately without colors + if not self.is_tty: + self.stdout.write( + f"{build_info.hash} {build_info.name}@{build_info.version}: {state}\n" + ) + self.stdout.flush() + + def update_progress(self, build_id: str, current: int, total: int) -> None: + """Update the progress of a package and mark the display as dirty.""" + percent = int((current / total) * 100) + build_info = self.builds[build_id] + if build_info.progress_percent != percent: + build_info.progress_percent = percent + self.dirty = True + + def update(self, finalize: bool = False) -> None: + """Redraw the interactive display.""" + if not self.is_tty or not self.overview_mode: + return + + now = self.get_time() + + # Avoid excessive redraws + if not finalize and now < self.next_update: + return + + # Only update the spinner if there are still running packages + if now >= self.next_spinner_update and any( + pkg.finished_time is None for pkg in self.builds.values() + ): + self.spinner_index = (self.spinner_index + 1) % len(self.spinner_chars) + self.dirty = True + self.next_spinner_update = now + SPINNER_INTERVAL + + for build_id in list(self.builds): + build_info = self.builds[build_id] + if build_info.state == "failed" or build_info.finished_time is None: + continue + + if finalize or now >= build_info.finished_time: + self.finished_builds.append(build_info) + del self.builds[build_id] + self.dirty = True + + if not self.dirty: + return + + # Build the overview output in a buffer and print all at once to avoid flickering. + buffer = io.StringIO() + + # Move cursor up to the start of the display area + if self.active_area_rows > 0: + buffer.write(f"\033[{self.active_area_rows}F") + + max_width, max_height = self.get_terminal_size() + + self.total_lines = 0 + total_finished = len(self.finished_builds) + + # First flush the finished builds. These are "persisted" in terminal history. + for build in self.finished_builds: + self._render_build(build, buffer, max_width) + self.finished_builds.clear() + + # Then a header followed by the active builds. This is the "mutable" part of the display. + long_header_len = len( + f"Progress: {self.completed}/{self.total} /: filter v: logs n/p: next/prev" + ) + if long_header_len < max_width: + self._println( + buffer, + f"\033[1mProgress:\033[0m {self.completed}/{self.total}" + " \033[36m/\033[0m: filter \033[36mv\033[0m: logs" + " \033[36mn\033[0m/\033[36mp\033[0m: next/prev", + ) + else: + self._println(buffer, f"\033[1mProgress:\033[0m {self.completed}/{self.total}") + + displayed_builds = ( + [b for b in self.builds.values() if self._is_displayed(b)] + if self.search_term + else self.builds.values() + ) + len_builds = len(displayed_builds) + + # Truncate if we have more builds than fit on the screen. In that case we have to reserve + # an additional line for the "N more..." message. + truncate_at = max_height - 3 if len_builds + 2 > max_height else len_builds + + for i, build in enumerate(displayed_builds, 1): + if i > truncate_at: + self._println(buffer, f"{len_builds - i + 1} more...") + break + self._render_build(build, buffer, max_width) + + if self.search_mode: + buffer.write(f"filter> {self.search_term}\033[K") + + # Clear any remaining lines from previous display + buffer.write("\033[0J") + + # Print everything at once to avoid flickering + self.stdout.write(buffer.getvalue()) + self.stdout.flush() + + # Update the number of lines drawn for next time. It reflects the number of active builds. + self.active_area_rows = self.total_lines - total_finished + self.dirty = False + + # Schedule next UI update + self.next_update = now + SPINNER_INTERVAL / 2 + + def _println(self, buffer: io.StringIO, line: str = "") -> None: + """Print a line to the buffer, handling line clearing and cursor movement.""" + self.total_lines += 1 + if line: + buffer.write(line) + if self.total_lines > self.active_area_rows: + buffer.write("\033[0m\033[K\n") # reset, clear to EOL, newline + else: + buffer.write("\033[0m\033[K\033[1E") # reset, clear to EOL, move down 1 line + + def print_logs(self, build_id: str, data: bytes) -> None: + # Discard logs we are not following. Generally this should not happen as we tell the child + # to only send logs when we are following it. It could maybe happen while transitioning + # between builds. + if self.overview_mode or build_id != self.tracked_build_id: + return + # TODO: drop initial bytes from data until first newline (?) + self.stdout.buffer.write(data) + self.stdout.flush() + + def _render_build(self, build_info: BuildInfo, buffer: io.StringIO, max_width: int) -> None: + line_width = 0 + for component in self._generate_line_components(build_info): + # ANSI escape sequence(s), does not contribute to width + if not component.startswith("\033"): + line_width += len(component) + if line_width > max_width: + break + buffer.write(component) + self._println(buffer) + + def _generate_line_components(self, build_info: BuildInfo) -> Generator[str, None, None]: + """Yield formatted line components for a package. Escape sequences are yielded as separate + strings so they do not contribute to the line width.""" + if build_info.external: + indicator = "[e]" + elif build_info.state == "finished": + indicator = "[+]" + elif build_info.state == "failed": + indicator = "[x]" + else: + indicator = f"[{self.spinner_chars[self.spinner_index]}]" + + if build_info.state == "failed": + yield "\033[31m" # red + elif build_info.state == "finished": + yield "\033[32m" # green + + yield indicator + yield "\033[0m" # reset + yield " " + yield "\033[0;90m" # dark gray + yield build_info.hash + yield "\033[0m" # reset + yield " " + + # Package name in bold white if explicit, default otherwise + if build_info.explicit: + yield "\033[1;37m" # bold white + yield build_info.name + yield "\033[0m" # reset + else: + yield build_info.name + + yield "\033[0;36m" # cyan + yield f"@{build_info.version}" + yield "\033[0m" # reset + + # progress or state + if build_info.progress_percent is not None: + yield " fetching" + yield f": {build_info.progress_percent}%" + elif build_info.state == "finished": + yield f" {build_info.prefix}" + else: + yield f" {build_info.state}" + + +Nodes = Dict[str, spack.spec.Spec] +Edges = Dict[str, Set[str]] + + +class BuildGraph: + """Represents the dependency graph for package installation.""" + + def __init__( + self, + specs: List[spack.spec.Spec], + root_policy: InstallPolicy, + dependencies_policy: InstallPolicy, + include_build_deps: bool, + install_package: bool, + install_deps: bool, + database: spack.database.Database, + overwrite_set: Optional[Set[str]] = None, + ): + """Construct a build graph from the given specs. This includes only packages that need to + be installed. Installed packages are pruned from the graph, and build dependencies are only + included when necessary.""" + self.roots = {s.dag_hash() for s in specs} + self.nodes = {s.dag_hash(): s for s in specs} + self.parent_to_child: Dict[str, Set[str]] = {} + self.child_to_parent: Dict[str, Set[str]] = {} + overwrite_set = overwrite_set or set() + specs_to_prune: Set[str] = set() + stack: List[Tuple[spack.spec.Spec, InstallPolicy]] = [ + (s, root_policy) for s in self.nodes.values() + ] + + with database.read_transaction(): + # Set the install prefix for each spec based on the db record or store layout + for s in spack.traverse.traverse_nodes(specs): + _, record = database.query_by_spec_hash(s.dag_hash()) + if record and record.path: + s.set_prefix(record.path) + else: + s.set_prefix(spack.store.STORE.layout.path_for_spec(s)) + + # Build the graph and determine which specs to prune + while stack: + spec, install_policy = stack.pop() + key = spec.dag_hash() + _, record = database.query_by_spec_hash(key) + + # Conditionally include build dependencies + if record and record.installed and key not in overwrite_set: + specs_to_prune.add(key) + dependencies = spec.dependencies(deptype=dt.LINK | dt.RUN) + elif install_policy == "cache_only" and not include_build_deps: + dependencies = spec.dependencies(deptype=dt.LINK | dt.RUN) + else: + dependencies = spec.dependencies(deptype=dt.BUILD | dt.LINK | dt.RUN) + + self.parent_to_child[key] = {d.dag_hash() for d in dependencies} + + # Enqueue new dependencies + for d in dependencies: + if d.dag_hash() in self.nodes: + continue + self.nodes[d.dag_hash()] = d + stack.append((d, dependencies_policy)) + + # Construct reverse lookup from child to parent + for parent, children in self.parent_to_child.items(): + for child in children: + if child in self.child_to_parent: + self.child_to_parent[child].add(parent) + else: + self.child_to_parent[child] = {parent} + + # If we're not installing the package itself, mark root specs for pruning too + if not install_package: + specs_to_prune.update(s.dag_hash() for s in specs) + + # Prune specs from the build graph. Their parents become parents of their children and + # their children become children of their parents. + for key in specs_to_prune: + for parent in self.child_to_parent.get(key, ()): + self.parent_to_child[parent].remove(key) + self.parent_to_child[parent].update(self.parent_to_child.get(key, ())) + for child in self.parent_to_child.get(key, ()): + self.child_to_parent[child].remove(key) + self.child_to_parent[child].update(self.child_to_parent.get(key, ())) + self.parent_to_child.pop(key, None) + self.child_to_parent.pop(key, None) + self.nodes.pop(key, None) + + # If we're not installing dependencies, verify that all remaining nodes in the build graph + # after pruning are roots. If there are any non-root nodes, it means there are uninstalled + # dependencies that we're not supposed to install. + if not install_deps: + non_root_spec = next((v for k, v in self.nodes.items() if k not in self.roots), None) + if non_root_spec is not None: + raise spack.error.InstallError( + f"Failed to install in package only mode: dependency {non_root_spec} is not " + "installed" + ) + + def enqueue_parents(self, dag_hash: str, pending_builds: List[str]) -> None: + """After a spec is installed, remove it from the graph and enqueue any parents that are + now ready to install. + + Args: + dag_hash: The dag_hash of the spec that was just installed + pending_builds: List to append parent specs that are ready to build + """ + # Remove node and edges from the node in the build graph + self.parent_to_child.pop(dag_hash, None) + self.nodes.pop(dag_hash, None) + parents = self.child_to_parent.pop(dag_hash, None) + + if not parents: + return + + # Enqueue any parents and remove edges to the installed child + for parent in parents: + children = self.parent_to_child[parent] + children.remove(dag_hash) + if not children: + pending_builds.append(parent) + + +class PackageInstaller: + + def __init__( + self, + packages: List["spack.package_base.PackageBase"], + *, + dirty: bool = False, + explicit: Union[Set[str], bool] = False, + overwrite: Optional[Union[List[str], Set[str]]] = None, + fail_fast: bool = False, + fake: bool = False, + include_build_deps: bool = False, + install_deps: bool = True, + install_package: bool = True, + install_source: bool = False, + keep_prefix: bool = False, + keep_stage: bool = False, + restage: bool = True, + skip_patch: bool = False, + stop_at: Optional[str] = None, + stop_before: Optional[str] = None, + tests: Union[bool, List[str], Set[str]] = False, + unsigned: Optional[bool] = None, + verbose: bool = False, + concurrent_packages: Optional[int] = None, + root_policy: InstallPolicy = "auto", + dependencies_policy: InstallPolicy = "auto", + ) -> None: + assert install_package or install_deps, "Must install package, dependencies or both" + + if fail_fast: + raise NotImplementedError("Fail-fast installs are not implemented") + elif fake: + raise NotImplementedError("Fake installs are not implemented") + elif install_source: + raise NotImplementedError("Installing sources is not implemented") + elif stop_at is not None: + raise NotImplementedError("Stopping at an install phase is not implemented") + elif stop_before is not None: + raise NotImplementedError("Stopping before an install phase is not implemented") + elif tests is not False: + raise NotImplementedError("Tests during install are not implemented") + # verbose and concurrent_packages are not worth erroring out for + + specs = [pkg.spec for pkg in packages] + + self.root_policy: InstallPolicy = root_policy + self.dependencies_policy: InstallPolicy = dependencies_policy + self.include_build_deps = include_build_deps + #: Set of DAG hashes to overwrite (if already installed) + self.overwrite: Set[str] = set(overwrite) if overwrite else set() + self.keep_prefix = keep_prefix + + # Buffer for incoming, partially received state data from child processes + self.state_buffers: Dict[int, str] = {} + + # Build the dependency graph + self.build_graph = BuildGraph( + specs, + root_policy, + dependencies_policy, + include_build_deps, + install_package, + install_deps, + spack.store.STORE.db, + self.overwrite, + ) + + #: check what specs we could fetch from binaries (checks against cache, not remotely) + spack.binary_distribution.BINARY_INDEX.update() + self.binary_cache_for_spec = { + s.dag_hash(): spack.binary_distribution.BINARY_INDEX.find_by_hash(s.dag_hash()) + for s in self.build_graph.nodes.values() + } + self.unsigned = unsigned + self.dirty = dirty + self.restage = restage + self.keep_stage = keep_stage + self.skip_patch = skip_patch + + #: queue of packages ready to install (no children) + self.pending_builds = [ + parent for parent, children in self.build_graph.parent_to_child.items() if not children + ] + + if explicit is True: + self.explicit = {spec.dag_hash() for spec in specs} + elif explicit is False: + self.explicit = set() + else: + self.explicit = explicit + + self.running_builds: Dict[int, ChildInfo] = {} + self.build_status = BuildStatus(len(self.build_graph.nodes)) + self.jobs = spack.config.determine_number_of_jobs(parallel=True) + self.reports: Dict[str, spack.report.RequestRecord] = {} + + def install(self) -> None: + # This installer has not implemented the per-spec exclusive locks during installation. + # Instead, take an exclusive lock on the entire range to avoid that other Spack install + # process start installing the same specs. + lock = spack.util.lock.Lock( + str(spack.store.STORE.prefix_locker.lock_path), desc="prefix lock" + ) + lock.acquire_write() + try: + self._installer() + finally: + lock.release_write() + + def _installer(self) -> None: + jobserver = JobServer(self.jobs) + + # Set stdin to non-blocking for key press detection + if sys.stdin.isatty(): + old_stdin_settings = termios.tcgetattr(sys.stdin) + tty.setcbreak(sys.stdin.fileno()) + else: + old_stdin_settings = None + + selector = selectors.DefaultSelector() + selector.register(sys.stdin.fileno(), selectors.EVENT_READ, "stdin") + + # Setup the database write lock. TODO: clean this up + if isinstance(spack.store.STORE.db.lock, spack.util.lock.Lock): + spack.store.STORE.db.lock._ensure_parent_directory() + spack.store.STORE.db.lock._file = spack.llnl.util.lock.FILE_TRACKER.get_fh( + spack.store.STORE.db.lock.path + ) + + to_insert_in_database: List[ChildInfo] = [] + failures: List[spack.spec.Spec] = [] + + try: + # Start the first job immediately, as it does not require a jobserver token. + if self.pending_builds and not self.running_builds: + self._start(selector, jobserver) + + while self.pending_builds or self.running_builds or to_insert_in_database: + # Only monitor the jobserver if we have pending builds. + if self.pending_builds and jobserver.r not in selector.get_map(): + selector.register(jobserver.r, selectors.EVENT_READ, "jobserver") + elif not self.pending_builds and jobserver.r in selector.get_map(): + selector.unregister(jobserver.r) + + jobserver_token_available = False + stdin_ready = False + + events = selector.select(timeout=SPINNER_INTERVAL) + + finished_pids = [] + + for key, _ in events: + data = key.data + if isinstance(data, FdInfo): + # Child output (logs and state updates) + child_info = self.running_builds[data.pid] + if data.name == "output": + self._handle_child_logs(key.fd, child_info, selector) + elif data.name == "state": + self._handle_child_state(key.fd, child_info, selector) + elif data.name == "sentinel": + finished_pids.append(data.pid) + elif data == "jobserver": + jobserver_token_available = True + elif data == "stdin": + stdin_ready = True + + for pid in finished_pids: + build = self.running_builds.pop(pid) + jobserver.release() + build.cleanup(selector) + if build.proc.exitcode == 0: + to_insert_in_database.append(build) + self.build_status.update_state(build.spec.dag_hash(), "finished") + else: + failures.append(build.spec) + self.build_status.update_state(build.spec.dag_hash(), "failed") + + if stdin_ready: + try: + char = sys.stdin.read(1) + except OSError: + continue + overview = self.build_status.overview_mode + if overview and self.build_status.search_mode: + self.build_status.search_input(char) + elif overview and char == "/": + self.build_status.enter_search() + elif char == "v" or char in ("q", "\x1b") and not overview: + self.build_status.toggle() + elif char == "n": + self.build_status.next(1) + elif char == "p" or char == "N": + self.build_status.next(-1) + + # Flush installed packages to the database and enqueue any parents that are now + # ready. + if to_insert_in_database and self._save_to_db(to_insert_in_database): + for entry in to_insert_in_database: + self.build_graph.enqueue_parents( + entry.spec.dag_hash(), self.pending_builds + ) + to_insert_in_database.clear() + + # Again, the first job should start immediately and does not require a token. + if self.pending_builds and not self.running_builds: + self._start(selector, jobserver) + + # For the rest we try to obtain tokens from the jobserver. + if self.pending_builds and jobserver_token_available: + # Then we try to schedule as many jobs as we can acquire tokens for. + max_new_jobs = len(self.pending_builds) + for _ in range(jobserver.acquire(max_new_jobs)): + self._start(selector, jobserver) + + # Finally update the UI + self.build_status.update() + except KeyboardInterrupt: + # Cleanup running builds. + for child in self.running_builds.values(): + child.proc.join() + raise + finally: + # Restore terminal settings + if old_stdin_settings: + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_stdin_settings) + + # Clean up resources + # Final cleanup of any remaining finished packages before exit + self.build_status.overview_mode = True + self.build_status.update(finalize=True) + selector.close() + jobserver.close() + + if failures: + lines = [f"{s}: {s.package.log_path}" for s in failures] + raise spack.error.InstallError( + "The following packages failed to install:\n" + "\n".join(lines) + ) + + def _save_to_db(self, to_insert_in_database: List[ChildInfo]) -> bool: + db = spack.store.STORE.db + try: + # Only try to get the lock once (non-blocking). If it fails, try it next time. + if db.lock.acquire_write(timeout=1e-9): + db._read() + except spack.util.lock.LockTimeoutError: + return False + try: + for entry in to_insert_in_database: + db._add(entry.spec, explicit=entry.explicit) + return True + finally: + db.lock.release_write(db._write) + + def _start(self, selector: selectors.BaseSelector, jobserver: JobServer) -> None: + dag_hash = self.pending_builds.pop() + explicit = dag_hash in self.explicit + spec = self.build_graph.nodes[dag_hash] + is_develop = spec.is_develop + child_info = start_build( + spec, + explicit=explicit, + mirrors=self.binary_cache_for_spec[dag_hash], + unsigned=self.unsigned, + install_policy=( + self.root_policy + if dag_hash in self.build_graph.roots + else self.dependencies_policy + ), + dirty=self.dirty, + # keep_stage/restage logic taken from installer.py + keep_stage=self.keep_stage or is_develop, + restage=self.restage and not is_develop, + overwrite=dag_hash in self.overwrite, + keep_prefix=self.keep_prefix, + skip_patch=self.skip_patch, + jobserver=jobserver, + ) + pid = child_info.proc.pid + assert type(pid) is int + self.running_builds[pid] = child_info + selector.register( + child_info.output_r_conn.fileno(), selectors.EVENT_READ, FdInfo(pid, "output") + ) + selector.register( + child_info.state_r_conn.fileno(), selectors.EVENT_READ, FdInfo(pid, "state") + ) + selector.register(child_info.proc.sentinel, selectors.EVENT_READ, FdInfo(pid, "sentinel")) + self.build_status.add_build( + child_info.spec, explicit=explicit, control_w_conn=child_info.control_w_conn + ) + + def _handle_child_logs( + self, r_fd: int, child_info: ChildInfo, selector: selectors.BaseSelector + ) -> None: + """Handle reading output logs from a child process pipe.""" + try: + # There might be more data than OUTPUT_BUFFER_SIZE, but we will read that in the next + # iteration of the event loop to keep things responsive. + data = os.read(r_fd, OUTPUT_BUFFER_SIZE) + except OSError: + data = None + + if not data: # EOF or error + try: + selector.unregister(r_fd) + except KeyError: + pass + return + + self.build_status.print_logs(child_info.spec.dag_hash(), data) + + def _handle_child_state( + self, r_fd: int, child_info: ChildInfo, selector: selectors.BaseSelector + ) -> None: + """Handle reading state updates from a child process pipe.""" + try: + # There might be more data than OUTPUT_BUFFER_SIZE, but we will read that in the next + # iteration of the event loop to keep things responsive. + data = os.read(r_fd, OUTPUT_BUFFER_SIZE) + except OSError: + data = None + + if not data: # EOF or error + try: + selector.unregister(r_fd) + except KeyError: + pass + self.state_buffers.pop(r_fd, None) + return + + # Append new data to the buffer for this fd and process it + buffer = self.state_buffers.get(r_fd, "") + data.decode(errors="replace") + lines = buffer.split("\n") + + # The last element of split() will be a partial line or an empty string. + # We store it back in the buffer for the next read. + self.state_buffers[r_fd] = lines.pop() + + for line in lines: + if not line: + continue + message = json.loads(line) + if "state" in message: + self.build_status.update_state(child_info.spec.dag_hash(), message["state"]) + elif "progress" in message and "total" in message: + self.build_status.update_progress( + child_info.spec.dag_hash(), message["progress"], message["total"] + ) diff --git a/lib/spack/spack/oci/image.py b/lib/spack/spack/oci/image.py index 7ed1259047c895..f6389cfafe4452 100644 --- a/lib/spack/spack/oci/image.py +++ b/lib/spack/spack/oci/image.py @@ -48,6 +48,11 @@ def ensure_sha256_checksum(oci_blob: str): return checksum +def is_oci_url(url: str) -> bool: + """Check if the URL is an OCI URL.""" + return url.startswith("oci://") or url.startswith("oci+http://") + + class Digest: """Represents a digest in the format :. Currently only supports sha256 digests.""" @@ -80,18 +85,25 @@ class ImageReference: The digest is optional, and domain and tag are automatically filled out with defaults when parsed from string.""" - __slots__ = ["domain", "name", "tag", "digest"] + __slots__ = ["scheme", "domain", "name", "tag", "digest"] def __init__( - self, *, domain: str, name: str, tag: str = "latest", digest: Optional[Digest] = None + self, + *, + domain: str, + name: str, + tag: str = "latest", + digest: Optional[Digest] = None, + scheme: str = "https", ): + self.scheme = scheme self.domain = domain self.name = name self.tag = tag self.digest = digest @classmethod - def from_string(cls, string) -> "ImageReference": + def from_string(cls, string: str, *, scheme: str = "https") -> "ImageReference": match = referencePat.match(string) if not match: raise ValueError(f"Invalid image reference: {string}") @@ -142,36 +154,53 @@ def from_string(cls, string) -> "ImageReference": if isinstance(digest, str): digest = Digest.from_string(digest) - return cls(domain=domain, name=name, tag=tag, digest=digest) + return cls(domain=domain, name=name, tag=tag, digest=digest, scheme=scheme) + + @classmethod + def from_url(cls, url: str) -> "ImageReference": + """Parse an OCI URL into an ImageReference, either oci:// or oci+http://.""" + if url.startswith("oci://"): + img = url[6:] + scheme = "https" + elif url.startswith("oci+http://"): + img = url[11:] + scheme = "http" + else: + raise ValueError(f"Invalid OCI URL: {url}") + return cls.from_string(img, scheme=scheme) def manifest_url(self) -> str: digest_or_tag = self.digest or self.tag - return f"https://{self.domain}/v2/{self.name}/manifests/{digest_or_tag}" + return f"{self.scheme}://{self.domain}/v2/{self.name}/manifests/{digest_or_tag}" def blob_url(self, digest: Union[str, Digest]) -> str: if isinstance(digest, str): digest = Digest.from_string(digest) - return f"https://{self.domain}/v2/{self.name}/blobs/{digest}" + return f"{self.scheme}://{self.domain}/v2/{self.name}/blobs/{digest}" def with_digest(self, digest: Union[str, Digest]) -> "ImageReference": if isinstance(digest, str): digest = Digest.from_string(digest) - return ImageReference(domain=self.domain, name=self.name, tag=self.tag, digest=digest) + return ImageReference( + domain=self.domain, name=self.name, tag=self.tag, digest=digest, scheme=self.scheme + ) def with_tag(self, tag: str) -> "ImageReference": - return ImageReference(domain=self.domain, name=self.name, tag=tag, digest=self.digest) + return ImageReference( + domain=self.domain, name=self.name, tag=tag, digest=self.digest, scheme=self.scheme + ) def uploads_url(self, digest: Optional[Digest] = None) -> str: - url = f"https://{self.domain}/v2/{self.name}/blobs/uploads/" + url = f"{self.scheme}://{self.domain}/v2/{self.name}/blobs/uploads/" if digest: url += f"?digest={digest}" return url def tags_url(self) -> str: - return f"https://{self.domain}/v2/{self.name}/tags/list" + return f"{self.scheme}://{self.domain}/v2/{self.name}/tags/list" def endpoint(self, path: str = "") -> str: - return urllib.parse.urljoin(f"https://{self.domain}/v2/", path) + return urllib.parse.urljoin(f"{self.scheme}://{self.domain}/v2/", path) def __str__(self) -> str: s = f"{self.domain}/{self.name}" @@ -189,6 +218,7 @@ def __eq__(self, __value: object) -> bool: and self.name == __value.name and self.tag == __value.tag and self.digest == __value.digest + and self.scheme == __value.scheme ) diff --git a/lib/spack/spack/oci/oci.py b/lib/spack/spack/oci/oci.py index 16c2e098dfdf92..0f4c4bb2d13ee5 100644 --- a/lib/spack/spack/oci/oci.py +++ b/lib/spack/spack/oci/oci.py @@ -7,7 +7,6 @@ import os import urllib.error import urllib.parse -import urllib.request from http.client import HTTPResponse from typing import List, NamedTuple, Tuple from urllib.request import Request @@ -214,10 +213,7 @@ def upload_manifest( def image_from_mirror(mirror: spack.mirrors.mirror.Mirror) -> ImageReference: """Given an OCI based mirror, extract the URL and image name from it""" - url = mirror.push_url - if not url.startswith("oci://"): - raise ValueError(f"Mirror {mirror} is not an OCI mirror") - return ImageReference.from_string(url[6:]) + return ImageReference.from_url(mirror.push_url) def blob_exists( diff --git a/lib/spack/spack/oci/opener.py b/lib/spack/spack/oci/opener.py index abbf09cbcfb570..76e7c1a6cc023a 100644 --- a/lib/spack/spack/oci/opener.py +++ b/lib/spack/spack/oci/opener.py @@ -97,6 +97,14 @@ def __eq__(self, other: object) -> bool: and self.params == other.params ) + def matches_scheme(self, scheme: str) -> bool: + """Checks whether the challenge matches the given scheme, case-insensitive.""" + return self.scheme == scheme.lower() + + def get_param(self, key: str) -> Optional[str]: + """Get the value of an auth param by key, or None if not found.""" + return next((v for k, v in self.params if k == key.lower()), None) + def parse_www_authenticate(input: str): """Very basic parsing of www-authenticate parsing (RFC7235 section 4.1) @@ -119,7 +127,7 @@ def parse_www_authenticate(input: str): def extract_auth_param(input: str) -> Tuple[str, str]: key, value = input.split("=", 1) - key = key.rstrip() + key = key.rstrip().lower() value = value.lstrip() if value.startswith('"'): value = unquote(value) @@ -132,7 +140,7 @@ def extract_auth_param(input: str) -> Tuple[str, str]: if token.kind == WwwAuthenticateTokens.EOF: raise ValueError(token) elif token.kind == WwwAuthenticateTokens.TOKEN: - current_challenge.scheme = token.value + current_challenge.scheme = token.value.lower() mode = State.AUTH_PARAM_LIST_START else: raise ValueError(token) @@ -176,7 +184,7 @@ def extract_auth_param(input: str) -> Tuple[str, str]: raise ValueError(token) elif token.kind == WwwAuthenticateTokens.TOKEN: challenges.append(current_challenge) - current_challenge = Challenge(token.value) + current_challenge = Challenge(token.value.lower()) mode = State.AUTH_PARAM_LIST_START elif token.kind == WwwAuthenticateTokens.AUTH_PARAM: key, value = extract_auth_param(token.value) @@ -196,18 +204,25 @@ class UsernamePassword(NamedTuple): username: str password: str + @property + def basic_auth_header(self) -> str: + encoded = base64.b64encode(f"{self.username}:{self.password}".encode("utf-8")).decode( + "utf-8" + ) + return f"Basic {encoded}" -def get_bearer_challenge(challenges: List[Challenge]) -> Optional[RealmServiceScope]: - # Find a challenge that we can handle (currently only Bearer) - challenge = next((c for c in challenges if c.scheme == "Bearer"), None) + +def _get_bearer_challenge(challenges: List[Challenge]) -> Optional[RealmServiceScope]: + """Return the realm/service/scope for a Bearer auth challenge, or None if not found.""" + challenge = next((c for c in challenges if c.matches_scheme("Bearer")), None) if challenge is None: return None # Get realm / service / scope from challenge - realm = next((v for k, v in challenge.params if k == "realm"), None) - service = next((v for k, v in challenge.params if k == "service"), None) - scope = next((v for k, v in challenge.params if k == "scope"), None) + realm = challenge.get_param("realm") + service = challenge.get_param("service") + scope = challenge.get_param("scope") if realm is None or service is None or scope is None: return None @@ -215,6 +230,16 @@ def get_bearer_challenge(challenges: List[Challenge]) -> Optional[RealmServiceSc return RealmServiceScope(realm, service, scope) +def _get_basic_challenge(challenges: List[Challenge]) -> Optional[str]: + """Return the realm for a Basic auth challenge, or None if not found.""" + challenge = next((c for c in challenges if c.matches_scheme("Basic")), None) + + if challenge is None: + return None + + return challenge.get_param("realm") + + class OCIAuthHandler(urllib.request.BaseHandler): def __init__(self, credentials_provider: Callable[[str], Optional[UsernamePassword]]): """ @@ -223,53 +248,8 @@ def __init__(self, credentials_provider: Callable[[str], Optional[UsernamePasswo """ self.credentials_provider = credentials_provider - # Cached bearer tokens for a given domain. - self.cached_tokens: Dict[str, str] = {} - - def obtain_bearer_token(self, registry: str, challenge: RealmServiceScope, timeout) -> str: - # See https://docs.docker.com/registry/spec/auth/token/ - - query = urllib.parse.urlencode( - {"service": challenge.service, "scope": challenge.scope, "client_id": "spack"} - ) - - parsed = urllib.parse.urlparse(challenge.realm)._replace( - query=query, fragment="", params="" - ) - - # Don't send credentials over insecure transport. - if parsed.scheme != "https": - raise ValueError( - f"Cannot login to {registry} over insecure {parsed.scheme} connection" - ) - - request = Request(urllib.parse.urlunparse(parsed)) - - # I guess we shouldn't cache this, since we don't know - # the context in which it's used (may depend on config) - pair = self.credentials_provider(registry) - - if pair is not None: - encoded = base64.b64encode(f"{pair.username}:{pair.password}".encode("utf-8")).decode( - "utf-8" - ) - request.add_unredirected_header("Authorization", f"Basic {encoded}") - - # Do a GET request. - response = self.parent.open(request, timeout=timeout) - - # Read the response and parse the JSON - response_json = json.load(response) - - # Get the token from the response - token = response_json["token"] - - # Remember the last obtained token for this registry - # Note: we should probably take into account realm, service and scope - # so we can store multiple tokens for the same registry. - self.cached_tokens[registry] = token - - return token + # Cached authorization headers for a given domain. + self.cached_auth_headers: Dict[str, str] = {} def https_request(self, req: Request): # Eagerly add the bearer token to the request if no @@ -282,14 +262,65 @@ def https_request(self, req: Request): return req parsed = urllib.parse.urlparse(req.full_url) - token = self.cached_tokens.get(parsed.netloc) + auth_header = self.cached_auth_headers.get(parsed.netloc) - if not token: + if not auth_header: return req - req.add_unredirected_header("Authorization", f"Bearer {token}") + req.add_unredirected_header("Authorization", auth_header) return req + def _try_bearer_challenge( + self, + challenges: List[Challenge], + credentials: Optional[UsernamePassword], + timeout: Optional[float], + ) -> Optional[str]: + # Check whether a Bearer challenge is present in the WWW-Authenticate header + challenge = _get_bearer_challenge(challenges) + if not challenge: + return None + + # Get the token from the auth handler + query = urllib.parse.urlencode( + {"service": challenge.service, "scope": challenge.scope, "client_id": "spack"} + ) + parsed = urllib.parse.urlparse(challenge.realm)._replace( + query=query, fragment="", params="" + ) + + # Don't send credentials over insecure transport. + if parsed.scheme != "https": + raise ValueError(f"Cannot login over insecure {parsed.scheme} connection") + + request = Request(urllib.parse.urlunparse(parsed), method="GET") + + if credentials is not None: + request.add_unredirected_header("Authorization", credentials.basic_auth_header) + + # Do a GET request. + response = self.parent.open(request, timeout=timeout) + try: + response_json = json.load(response) + token = response_json.get("token") + if token is None: + token = response_json.get("access_token") + assert type(token) is str + except Exception as e: + raise ValueError(f"Malformed token response from {challenge.realm}") from e + return f"Bearer {token}" + + def _try_basic_challenge( + self, challenges: List[Challenge], credentials: UsernamePassword + ) -> Optional[str]: + # Check whether a Basic challenge is present in the WWW-Authenticate header + # A realm is required for Basic auth, although we don't use it here. Leave this as a + # validation step. + realm = _get_basic_challenge(challenges) + if not realm: + return None + return credentials.basic_auth_header + def http_error_401(self, req: Request, fp, code, msg, headers): # Login failed, avoid infinite recursion where we go back and # forth between auth server and registry @@ -305,47 +336,46 @@ def http_error_401(self, req: Request, fp, code, msg, headers): req, code, "Cannot login to registry, missing WWW-Authenticate header", headers, fp ) - header_value = headers["WWW-Authenticate"] + www_auth_str = headers["WWW-Authenticate"] try: - challenge = get_bearer_challenge(parse_www_authenticate(header_value)) + challenges = parse_www_authenticate(www_auth_str) except ValueError as e: raise spack.util.web.DetailedHTTPError( req, code, - f"Cannot login to registry, malformed WWW-Authenticate header: {header_value}", + f"Cannot login to registry, malformed WWW-Authenticate header: {www_auth_str}", headers, fp, ) from e - # If there is no bearer challenge, we can't handle it - if not challenge: - raise spack.util.web.DetailedHTTPError( - req, - code, - f"Cannot login to registry, unsupported authentication scheme: {header_value}", - headers, - fp, - ) + registry = urllib.parse.urlparse(req.get_full_url()).netloc - # Get the token from the auth handler + credentials = self.credentials_provider(registry) + + # First try Bearer, then Basic try: - token = self.obtain_bearer_token( - registry=urllib.parse.urlparse(req.get_full_url()).netloc, - challenge=challenge, - timeout=req.timeout, - ) - except ValueError as e: + auth_header = self._try_bearer_challenge(challenges, credentials, req.timeout) + if not auth_header and credentials: + auth_header = self._try_basic_challenge(challenges, credentials) + except Exception as e: + raise spack.util.web.DetailedHTTPError( + req, code, f"Cannot login to registry: {e}", headers, fp + ) from e + + if not auth_header: raise spack.util.web.DetailedHTTPError( req, code, - f"Cannot login to registry, failed to obtain bearer token: {e}", + f"Cannot login to registry, unsupported authentication scheme: {www_auth_str}", headers, fp, - ) from e + ) - # Add the token to the request - req.add_unredirected_header("Authorization", f"Bearer {token}") + self.cached_auth_headers[registry] = auth_header + + # Add the authorization header to the request + req.add_unredirected_header("Authorization", auth_header) setattr(req, "login_attempted", True) return self.parent.open(req, timeout=req.timeout) @@ -367,10 +397,8 @@ def credentials_from_mirrors( continue url = mirror.get_url(direction) - if not url.startswith("oci://"): - continue try: - parsed = ImageReference.from_string(url[6:]) + parsed = ImageReference.from_url(url) except ValueError: continue if parsed.domain == domain: @@ -384,7 +412,8 @@ def create_opener(): for handler in [ urllib.request.ProxyHandler(), urllib.request.UnknownHandler(), - urllib.request.HTTPSHandler(context=spack.util.web.ssl_create_default_context()), + urllib.request.HTTPHandler(), + spack.util.web.SpackHTTPSHandler(context=spack.util.web.ssl_create_default_context()), spack.util.web.SpackHTTPDefaultErrorHandler(), urllib.request.HTTPRedirectHandler(), urllib.request.HTTPErrorProcessor(), diff --git a/lib/spack/spack/operating_systems/_operating_system.py b/lib/spack/spack/operating_systems/_operating_system.py index c0d8cb26e03b06..8d8c4e40b6605a 100644 --- a/lib/spack/spack/operating_systems/_operating_system.py +++ b/lib/spack/spack/operating_systems/_operating_system.py @@ -16,8 +16,8 @@ class OperatingSystem: There are two different types of compiler detection: - 1. Through the $PATH env variable (front-end detection) - 2. Through the module system. (back-end detection) + 1. Through the $PATH env variable (front-end detection) + 2. Through the module system. (back-end detection) Depending on which operating system is specified, the compiler will be detected using one of those methods. diff --git a/lib/spack/spack/operating_systems/mac_os.py b/lib/spack/spack/operating_systems/mac_os.py index a8fef4f0f95e01..e3401d3ded1112 100644 --- a/lib/spack/spack/operating_systems/mac_os.py +++ b/lib/spack/spack/operating_systems/mac_os.py @@ -8,13 +8,13 @@ import spack.llnl.util.lang from spack.util.executable import Executable -from spack.version import Version +from spack.version import StandardVersion, Version from ._operating_system import OperatingSystem @spack.llnl.util.lang.memoized -def macos_version(): +def macos_version() -> StandardVersion: """Get the current macOS version as a version object. This has three mechanisms for determining the macOS version, which is used @@ -43,7 +43,7 @@ def macos_version(): """ env_ver = os.environ.get("MACOSX_DEPLOYMENT_TARGET", None) if env_ver: - return Version(env_ver) + return StandardVersion.from_string(env_ver) try: output = Executable("sw_vers")(output=str, fail_on_error=False) @@ -53,11 +53,11 @@ def macos_version(): else: match = re.search(r"ProductVersion:\s*([0-9.]+)", output) if match: - return Version(match.group(1)) + return StandardVersion.from_string(match.group(1)) # Fall back to python-reported version, which can be inaccurate around # macOS 11 (e.g. showing 10.16 for macOS 12) - return Version(py_platform.mac_ver()[0]) + return StandardVersion.from_string(py_platform.mac_ver()[0]) @spack.llnl.util.lang.memoized @@ -142,6 +142,7 @@ def __init__(self): "13": "ventura", "14": "sonoma", "15": "sequoia", + "26": "tahoe", } version = macos_version() diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index 365d9cb9907c4a..80a0c240e0d327 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -98,7 +98,6 @@ join_path, keep_modification_time, library_extensions, - make_package_test_rpath, mkdirp, path_contains_subdirectory, readlink, @@ -119,13 +118,7 @@ from spack.multimethod import default_args, when from spack.operating_systems.linux_distro import kernel_version from spack.operating_systems.mac_os import macos_version -from spack.package_base import ( - PackageBase, - build_system_flags, - env_flags, - inject_flags, - on_package_attributes, -) +from spack.package_base import PackageBase, make_package_test_rpath, on_package_attributes from spack.package_completions import ( bash_completion_path, fish_completion_path, @@ -235,6 +228,50 @@ def filter_system_paths(paths: Iterable[str]) -> List[str]: return _filter_system_paths(paths) +#: Assigning this to :attr:`spack.package_base.PackageBase.flag_handler` means that compiler flags +#: are passed to the build system. This can be used in any package that derives from a build system +#: class that implements :meth:`spack.package_base.PackageBase.flags_to_build_system_args`. +#: +#: See also :func:`env_flags` and :func:`inject_flags`. +#: +#: Example:: +#: +#: from spack.package import * +#: +#: class MyPackage(CMakePackage): +#: flag_handler = build_system_flags +build_system_flags = PackageBase.build_system_flags + +#: Assigning this to :attr:`spack.package_base.PackageBase.flag_handler` means that compiler flags +#: are set as canonical environment variables. +#: +#: See also :func:`build_system_flags` and :func:`inject_flags`. +#: +#: Example:: +#: +#: from spack.package import * +#: +#: class MyPackage(MakefilePackage): +#: flag_handler = env_flags +env_flags = PackageBase.env_flags + + +#: This is the default value of :attr:`spack.package_base.PackageBase.flag_handler`, which tells +#: Spack to inject compiler flags through the compiler wrappers, which means that the build system +#: will not see them directly. This is typically a good default, but in rare case you may need to +#: use :func:`env_flags` or :func:`build_system_flags` instead. +#: +#: See also :func:`build_system_flags` and :func:`env_flags`. +#: +#: Example:: +#: +#: from spack.package import * +#: +#: class MyPackage(MakefilePackage): +#: flag_handler = inject_flags +inject_flags = PackageBase.inject_flags + + api: Dict[str, Tuple[str, ...]] = { "v2.0": ( "BaseBuilder", @@ -572,9 +609,6 @@ def filter_system_paths(paths: Iterable[str]) -> List[str]: "windows_sfn", ] -# Make sure `__all__` and `api` are consistent. -assert __all__ == [symbol for symbols in api.values() for symbol in symbols] - # These are just here for editor support; they may be set when the build env is set up. configure: Executable make_jobs: int diff --git a/lib/spack/spack/package_base.py b/lib/spack/spack/package_base.py index 4941a69a779aee..a53aa39aff3f06 100644 --- a/lib/spack/spack/package_base.py +++ b/lib/spack/spack/package_base.py @@ -1,21 +1,19 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -"""This is where most of the action happens in Spack. - -The spack package class structure is based strongly on Homebrew -(http://brew.sh/), mainly because Homebrew makes it very easy to create -packages. -""" +"""Base class for all Spack packages.""" import base64 import collections import copy +import errno import functools import glob import hashlib import io +import itertools import os +import pathlib import re import sys import textwrap @@ -55,9 +53,14 @@ from spack.compilers.adaptor import DeprecatedCompiler from spack.error import InstallError, NoURLError, PackageError from spack.filesystem_view import YamlFilesystemView -from spack.llnl.util.lang import ClassProperty, classproperty, memoized +from spack.llnl.util.filesystem import ( + AlreadyExistsError, + find_all_shared_libraries, + islink, + symlink, +) +from spack.llnl.util.lang import ClassProperty, classproperty, dedupe, memoized from spack.resource import Resource -from spack.solver.version_order import concretization_version_order from spack.util.package_hash import package_hash from spack.util.typing import SupportsRichComparison from spack.version import GitVersion, StandardVersion, VersionError, is_git_version @@ -99,7 +102,7 @@ def win_add_library_dependent(self): This method should be overridden by packages that produce binaries/libraries/python extension modules/etc that are installed into - directories outside a package's `bin`, `lib`, and `lib64` directories, + directories outside a package's ``bin``, ``lib``, and ``lib64`` directories, but still require linking against one of the packages dependencies, or other components of the package itself. No-op otherwise. @@ -129,7 +132,7 @@ def windows_establish_runtime_linkage(self): # Spack should in general not modify things it has not installed # we can reasonably expect externals to have their link interface properly established if sys.platform == "win32" and not self.spec.external: - win_rpath = fsys.WindowsSimulatedRPath(self) + win_rpath = WindowsSimulatedRPath(self) win_rpath.add_library_dependent(*self.win_add_library_dependent()) win_rpath.add_rpath(*self.win_add_rpath()) win_rpath.establish_link() @@ -443,14 +446,6 @@ def _num_definitions(when_indexed_dictionary: Dict[spack.spec.Spec, Dict[K, V]]) return sum(len(dictionary) for dictionary in when_indexed_dictionary.values()) -def _precedence(obj) -> int: - """Get either a 'precedence' attribute or item from an object.""" - precedence = getattr(obj, "precedence", None) - if precedence is None: - raise KeyError(f"Couldn't get precedence from {type(obj)}") - return precedence - - def _remove_overridden_defs(defs: List[Tuple[spack.spec.Spec, Any]]) -> None: """Remove definitions from the list if their when specs are satisfied by later ones. @@ -499,7 +494,7 @@ def _definitions( # With multiple definitions, ensure precedence order and simplify overrides if len(defs) > 1: - defs.sort(key=lambda v: _precedence(v[1])) + defs.sort(key=lambda v: getattr(v[1], "precedence", 0)) _remove_overridden_defs(defs) return defs @@ -513,147 +508,112 @@ def __init__(self, source, binary): class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta): - """This is the universal base class for all spack packages. - - ***The Package class*** - - At its core, a package consists of a set of software to be installed. - A package may focus on a piece of software and its associated software - dependencies or it may simply be a set, or bundle, of software. The - former requires defining how to fetch, verify (via, e.g., sha256), build, - and install that software and the packages it depends on, so that - dependencies can be installed along with the package itself. The latter, - sometimes referred to as a ``no-source`` package, requires only defining - the packages to be built. + """This is the universal base class for all Spack packages. - Packages are written in pure Python. + At its core, a package consists of a set of software to be installed. A package may focus on a + piece of software and its associated software dependencies or it may simply be a set, or + bundle, of software. The former requires defining how to fetch, verify (via, e.g., ``sha256``), + build, and install that software and the packages it depends on, so that dependencies can be + installed along with the package itself. The latter, sometimes referred to as a "no-source" + package, requires only defining the packages to be built. There are two main parts of a Spack package: - 1. **The package class**. Classes contain ``directives``, which are special functions, that - add metadata (versions, patches, dependencies, and other information) to packages (see - ``directives.py``). Directives provide the constraints that are used as input to the - concretizer. - - 2. **Package instances**. Once instantiated, a package can be passed to the PackageInstaller. - It calls methods like ``do_stage()`` on the ``Package`` object, and it uses those to drive - user-implemented methods like ``patch()``, ``install()``, and other build steps. To - install software, an instantiated package needs a *concrete* spec, which guides the - behavior of the various install methods. - - Packages are imported from repos (see ``repo.py``). - - **Package DSL** - - Look in ``lib/spack/docs`` or check https://spack.readthedocs.io for - the full documentation of the package domain-specific language. That - used to be partially documented here, but as it grew, the docs here - became increasingly out of date. + 1. **The package class**. Classes contain *directives*, which are functions such as + :py:func:`spack.package.version`, :py:func:`spack.package.patch`, and + :py:func:`spack.package.depends_on`, that store metadata on the package class. Directives + provide the constraints that are used as input to the concretizer. - **Package Lifecycle** + 2. **Package instances**. Once instantiated with a concrete spec, a package can be passed to + the :py:class:`spack.installer.PackageInstaller`. It calls methods like :meth:`do_stage` on + the package instance, and it uses those to drive user-implemented methods like ``def patch`` + and install phases like ``def configure`` and ``def install``. - A package's lifecycle over a run of Spack looks something like this: + Packages are imported from package repositories (see :py:mod:`spack.repo`). - .. code-block:: python + For most use cases, package creators typically just add attributes like ``homepage`` and, for + a code-based package, ``url``, or installation phases such as ``install()``. + There are many custom ``PackageBase`` subclasses in the ``spack_repo.builtin.build_systems`` + package that make things even easier for specific build systems. - p = Package() # Done for you by spack + .. note:: - p.do_fetch() # downloads tarball from a URL (or VCS) - p.do_stage() # expands tarball in a temp directory - p.do_patch() # applies patches to expanded source - p.do_uninstall() # removes install directory - - although packages that do not have code have nothing to fetch so omit - ``p.do_fetch()``. - - There are also some other commands that clean the build area: - - .. code-block:: python - - p.do_clean() # removes the stage directory entirely - p.do_restage() # removes the build directory and - # re-expands the archive. - - The convention used here is that a ``do_*`` function is intended to be - called internally by Spack commands (in ``spack.cmd``). These aren't for - package writers to override, and doing so may break the functionality - of the Package class. - - Package creators have a lot of freedom, and they could technically - override anything in this class. That is not usually required. - - For most use cases. Package creators typically just add attributes - like ``homepage`` and, for a code-based package, ``url``, or functions - such as ``install()``. - There are many custom ``Package`` subclasses in the - ``spack_repo.builtin.build_systems`` package that make things even easier for - specific build systems. - - """ + Many methods and attributes that appear to be public interface are not meant to be + overridden by packagers. They are "final", but we currently have not adopted the ``@final`` + decorator in the Spack codebase. For example, the ``do_*`` functions are intended only to be + called internally by Spack commands. These aren't for package writers to override, and + doing so may break the functionality of the ``PackageBase`` class.""" compiler = DeprecatedCompiler() - # - # These are default values for instance variables. - # - - # Declare versions dictionary as placeholder for values. - # This allows analysis tools to correctly interpret the class attributes. + #: Class level dictionary populated by :func:`~spack.directives.version` directives versions: dict + #: Class level dictionary populated by :func:`~spack.directives.resource` directives resources: Dict[spack.spec.Spec, List[Resource]] + #: Class level dictionary populated by :func:`~spack.directives.depends_on` and + #: :func:`~spack.directives.extends` directives dependencies: Dict[spack.spec.Spec, Dict[str, spack.dependency.Dependency]] + #: Class level dictionary populated by :func:`~spack.directives.extends` directives + extendees: Dict[str, Tuple[spack.spec.Spec, spack.spec.Spec]] + #: Class level dictionary populated by :func:`~spack.directives.conflicts` directives conflicts: Dict[spack.spec.Spec, List[Tuple[spack.spec.Spec, Optional[str]]]] + #: Class level dictionary populated by :func:`~spack.directives.requires` directives requirements: Dict[ spack.spec.Spec, List[Tuple[Tuple[spack.spec.Spec, ...], str, Optional[str]]] ] + #: Class level dictionary populated by :func:`~spack.directives.provides` directives provided: Dict[spack.spec.Spec, Set[spack.spec.Spec]] + #: Class level dictionary populated by :func:`~spack.directives.provides` directives provided_together: Dict[spack.spec.Spec, List[Set[str]]] + #: Class level dictionary populated by :func:`~spack.directives.patch` directives patches: Dict[spack.spec.Spec, List[spack.patch.Patch]] + #: Class level dictionary populated by :func:`~spack.directives.variant` directives variants: Dict[spack.spec.Spec, Dict[str, spack.variant.Variant]] - languages: Dict[spack.spec.Spec, Set[str]] + #: Class level dictionary populated by :func:`~spack.directives.license` directives licenses: Dict[spack.spec.Spec, str] + #: Class level dictionary populated by :func:`~spack.directives.can_splice` directives splice_specs: Dict[spack.spec.Spec, Tuple[spack.spec.Spec, Union[None, str, List[str]]]] - - #: Store whether a given Spec source/binary should not be redistributed. + #: Class level dictionary populated by :func:`~spack.directives.redistribute` directives disable_redistribute: Dict[spack.spec.Spec, DisableRedistribute] - #: Must be defined as a fallback for old specs that don't have the `build_system` variant + #: Must be defined as a fallback for old specs that don't have the ``build_system`` variant default_buildsystem: str - # Use :attr:`default_buildsystem` instead of this attribute, which is deprecated + #: Use :attr:`~spack.package_base.PackageBase.default_buildsystem` instead of this attribute, + #: which is deprecated legacy_buildsystem: str - #: Must be defined in derived classes. Used when reporting the build system to users - build_system_class: str + #: Used when reporting the build system to users + build_system_class: str = "PackageBase" #: By default, packages are not virtual #: Virtual packages override this attribute - virtual = False + virtual: bool = False #: Most Spack packages are used to install source or binary code while #: those that do not can be used to install a set of other Spack packages. - has_code = True + has_code: bool = True #: By default we build in parallel. Subclasses can override this. - parallel = True + parallel: bool = True #: By default do not run tests within package's install() - run_tests = False + run_tests: bool = False #: Most packages are NOT extendable. Set to True if you want extensions. - extendable = False + extendable: bool = False #: When True, add RPATHs for the entire DAG. When False, add RPATHs only #: for immediate dependencies. - transitive_rpaths = True + transitive_rpaths: bool = True #: List of shared objects that should be replaced with a different library at - #: runtime. Typically includes stub libraries like libcuda.so. When linking + #: runtime. Typically includes stub libraries like ``libcuda.so``. When linking #: against a library listed here, the dependent will only record its soname #: or filename, not its absolute path, so that the dynamic linker will search #: for it. Note: accepts both file names and directory names, for example - #: ``["libcuda.so", "stubs"]`` will ensure libcuda.so and all libraries in the - #: stubs directory are not bound by path.""" + #: ``["libcuda.so", "stubs"]`` will ensure ``libcuda.so`` and all libraries in the + #: ``stubs`` directory are not bound by path. non_bindable_shared_objects: List[str] = [] #: List of fnmatch patterns of library file names (specifically DT_NEEDED entries) that are not @@ -677,7 +637,7 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta): #: Boolean. Set to ``True`` for packages that require a manual download. #: This is currently used by package sanity tests and generation of a #: more meaningful fetch failure error. - manual_download = False + manual_download: bool = False #: Set of additional options used when fetching package versions. fetch_options: Dict[str, Any] = {} @@ -685,29 +645,29 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta): # # Set default licensing information # - #: Boolean. If set to ``True``, this software requires a license. + #: If set to ``True``, this software requires a license. #: If set to ``False``, all of the ``license_*`` attributes will #: be ignored. Defaults to ``False``. - license_required = False + license_required: bool = False - #: String. Contains the symbol used by the license manager to denote + #: Contains the symbol used by the license manager to denote #: a comment. Defaults to ``#``. - license_comment = "#" + license_comment: str = "#" - #: List of strings. These are files that the software searches for when + #: These are files that the software searches for when #: looking for a license. All file paths must be relative to the #: installation directory. More complex packages like Intel may require #: multiple licenses for individual components. Defaults to the empty list. license_files: List[str] = [] - #: List of strings. Environment variables that can be set to tell the + #: Environment variables that can be set to tell the #: software where to look for a license if it is not in the usual location. #: Defaults to the empty list. license_vars: List[str] = [] - #: String. A URL pointing to license setup instructions for the software. + #: A URL pointing to license setup instructions for the software. #: Defaults to the empty string. - license_url = "" + license_url: str = "" #: Verbosity level, preserved across installs. _verbose = None @@ -719,23 +679,23 @@ class PackageBase(WindowsRPath, PackageViewMixin, metaclass=PackageMeta): list_url: ClassProperty[Optional[str]] = None #: Link depth to which list_url should be searched for new versions - list_depth = 0 + list_depth: int = 0 - #: List of strings which contains GitHub usernames of package maintainers. + #: List of GitHub usernames of package maintainers. #: Do not include @ here in order not to unnecessarily ping the users. maintainers: List[str] = [] #: Set to ``True`` to indicate the stand-alone test requires a compiler. - #: It is used to ensure a compiler and build dependencies like 'cmake' + #: It is used to ensure a compiler and build dependencies like ``cmake`` #: are available to build a custom test code. test_requires_compiler: bool = False #: TestSuite instance used to manage stand-alone tests for 1+ specs. test_suite: Optional[Any] = None - def __init__(self, spec): + def __init__(self, spec: spack.spec.Spec) -> None: # this determines how the package should be built. - self.spec: spack.spec.Spec = spec + self.spec = spec # Allow custom staging paths for packages self.path = None @@ -752,7 +712,8 @@ def __init__(self, spec): # init internal variables self._stage: Optional[stg.StageComposite] = None - self._patch_stages = [] # need to track patch stages separately, in order to apply them + # need to track patch stages separately, in order to apply them + self._patch_stages: List[stg.Stage] = [] self._fetcher = None self._tester: Optional[Any] = None @@ -824,6 +785,19 @@ def get_variant(self, name: str) -> spack.variant.Variant: except StopIteration: raise ValueError(f"No variant '{name}' on spec: {self.spec}") + @classmethod + def validate_variant_names(self, spec: spack.spec.Spec): + """Check that all variant names on Spec exist in this package. + + Raises ``UnknownVariantError`` if invalid variants are on the spec. + """ + names = self.variant_names() + for v in spec.variants: + if v not in names: + raise spack.variant.UnknownVariantError( + f"No such variant '{v}' in package {self.name}", [v] + ) + @classproperty def package_dir(cls): """Directory where the package.py file lives.""" @@ -924,10 +898,11 @@ def keep_werror(self) -> Optional[Literal["all", "specific", "none"]]: """Keep ``-Werror`` flags, matches ``config:flags:keep_werror`` to override config. Valid return values are: + * ``"all"``: keep all ``-Werror`` flags. * ``"specific"``: keep only ``-Werror=specific-warning`` flags. * ``"none"``: filter out all ``-Werror*`` flags. - * ``None``: respect the user's configuration (``"none"`` by default). + * :data:`None`: respect the user's configuration (``"none"`` by default). """ if self.spec.satisfies("%nvhpc@:23.3"): # Filtering works by replacing -Werror with -Wno-error, but older nvhpc and @@ -969,10 +944,9 @@ def nearest_url(self, version): This uses the following precedence order: - 1. Find the next lowest or equal version with a URL. - 2. If no lower URL, return the next *higher* URL. - 3. If no higher URL, return None. - + 1. Find the next lowest or equal version with a URL. + 2. If no lower URL, return the next *higher* URL. + 3. If no higher URL, return None. """ version_urls = self.version_urls() @@ -988,15 +962,11 @@ def nearest_url(self, version): return last_url - def url_for_version(self, version): - """Returns a URL from which the specified version of this package - may be downloaded. + def url_for_version(self, version: Union[str, StandardVersion]) -> str: + """Returns a URL from which the specified version of this package may be downloaded. - version: class Version - The version for which a URL is sought. - - See Class Version (version.py) - """ + Arguments: + version: The version for which a URL is sought.""" return self._implement_all_urls_for_version(version)[0] def _update_external_dependencies( @@ -1042,24 +1012,20 @@ def needs_commit(cls, version) -> bool: return False - def resolve_binary_provenance(self) -> None: - """ - Method to ensure concrete spec has binary provenance. - Base implementation will look up git commits when appropriate. - Packages may override this implementation for custom implementations - """ + @classmethod + def _resolve_git_provenance(cls, spec) -> None: # early return cases, don't overwrite user intention # commit pre-assigned or develop specs don't need commits changed # since this would create un-necessary churn - if "commit" in self.spec.variants or self.spec.is_develop: + if "commit" in spec.variants or spec.is_develop: return - if is_git_version(str(self.spec.version)): - ref = self.spec.version.ref + if is_git_version(str(spec.version)): + ref = spec.version.ref else: - v_attrs = self.versions.get(self.spec.version, {}) + v_attrs = cls.versions.get(spec.version, {}) if "commit" in v_attrs: - self.spec.variants["commit"] = spack.variant.SingleValuedVariant( + spec.variants["commit"] = spack.variant.SingleValuedVariant( "commit", v_attrs["commit"] ) return @@ -1067,33 +1033,43 @@ def resolve_binary_provenance(self) -> None: if not ref: raise VersionError( - f"{self.name}'s version {str(self.spec.version)} " + f"{spec.name}'s version {str(spec.version)} " "is missing a git ref (commit, tag or branch)" ) # Look for commits in the following places: - # 1) stage, (cheap, local, static) - # 2) mirror archive file, (cheapish, local, staticish) - # 3) URL (cheap, remote, dynamic) - # If users pre-stage, or use a mirror they can expect consistent commit resolution + # 1) mirror archive file, (cheapish, local, staticish) + # 2) URL (cheap, remote, dynamic) + # + # If users pre-stage (_LOCAL_CACHE), or use a mirror they can expect + # consistent commit resolution sha = None - if self.stage.expanded: - sha = spack.util.git.get_commit_sha(self.stage.source_path, ref) - if not sha: - try: - self.do_fetch(mirror_only=True) - except spack.error.FetchError: - pass - if self.stage.archive_file: - sha = spack.util.archive.retrieve_commit_from_archive(self.stage.archive_file, ref) + # construct a package instance to get fetch/staging together + pkg_instance = cls(spec.copy()) + try: + pkg_instance.do_fetch(mirror_only=True) + except spack.error.FetchError: + pass + if pkg_instance.stage.archive_file: + sha = spack.util.archive.retrieve_commit_from_archive( + pkg_instance.stage.archive_file, ref + ) if not sha: - url = self.version_or_package_attr("git", self.spec.version) + url = cls.version_or_package_attr("git", spec.version) sha = spack.util.git.get_commit_sha(url, ref) if sha: - self.spec.variants["commit"] = spack.variant.SingleValuedVariant("commit", sha) + spec.variants["commit"] = spack.variant.SingleValuedVariant("commit", sha) + + def resolve_binary_provenance(self): + """ + Method to ensure concrete spec has binary provenance. + Base implementation will look up git commits when appropriate. + Packages may override this implementation for custom implementations + """ + self._resolve_git_provenance(self.spec) def all_urls_for_version(self, version: StandardVersion) -> List[str]: """Return all URLs derived from version_urls(), url, urls, and @@ -1159,15 +1135,13 @@ def sub_and_add(u: Optional[str]) -> None: return urls - def find_valid_url_for_version(self, version): - """Returns a URL from which the specified version of this package - may be downloaded after testing whether the url is valid. Will try - url, urls, and list_url before failing. + def find_valid_url_for_version(self, version: StandardVersion) -> Optional[str]: + """Returns a URL from which the specified version of this package may be downloaded after + testing whether the url is valid. Will try ``url``, ``urls``, and :attr:`list_url` + before failing. - version: class Version - The version for which a URL is sought. - - See Class Version (version.py) + Arguments: + version: The version for which a URL is sought. """ urls = self.all_urls_for_version(version) @@ -1425,10 +1399,8 @@ def dependencies_of_type(cls, deptypes: dt.DepFlag): # TODO: allow more than one active extendee. @property - def extendee_spec(self): - """ - Spec of the extendee of this package, or None if it is not an extension - """ + def extendee_spec(self) -> Optional[spack.spec.Spec]: + """Spec of the extendee of this package, or None if it is not an extension.""" if not self.extendees: return None @@ -1464,7 +1436,7 @@ def is_extension(self): # If not, then it's an extension if it *could* be an extension return bool(self.extendees) - def extends(self, spec): + def extends(self, spec: spack.spec.Spec) -> bool: """ Returns True if this package extends the given spec. @@ -1477,9 +1449,9 @@ def extends(self, spec): if spec.name not in self.extendees: return False s = self.extendee_spec - return s and spec.satisfies(s) + return s is not None and spec.satisfies(s) - def provides(self, vpkg_name): + def provides(self, vpkg_name: str) -> bool: """ True if this package provides a virtual package with the specified name """ @@ -1489,6 +1461,65 @@ def provides(self, vpkg_name): if self.spec.intersects(when_spec) ) + def intersects(self, spec: spack.spec.Spec) -> bool: + """Context-ful intersection that takes into account package information. + + By design, ``Spec.intersects()`` does not know anything about package metdata. + This avoids unnecessary package lookups and keeps things efficient where extra + information is not needed, and it decouples ``Spec`` from ``PackageBase``. + + In many cases, though, we can rule more cases out in ``intersects()`` if we + know, for example, that certain variants are always single-valued, or that + certain variants are conditional on other variants. This adds logic for such + cases when they are knowable. + + Note that because ``intersects()`` is conservative, it can only give false + positives ("i.e., the two specs *may* overlap"), not false negatives. This + method can fix false positives (i.e. it may return ``False`` when + ``Spec.intersects()`` would return ``True``, but it will never return ``True`` + when ``Spec.intersects()`` returns ``False``. + + """ + # Spec.intersects() is right when False + if not self.spec.intersects(spec): + return False + + def sv_variant_conflicts(spec, variant): + name = variant.name + return ( + variant.name in spec.variants + and all(not d[name].multi for when, d in self.variants.items() if name in d) + and spec.variants[name].value != variant.value + ) + + # Specs don't know if a variant is single- or multi-valued (concretization handles this) + # But, we know if the spec has a value for a single-valued variant, it *has* to equal the + # value in self.spec, if there is one. + for v, variant in spec.variants.items(): + if sv_variant_conflicts(self.spec, variant): + return False + + # if there is no intersecting condition for a conditional variant, it can't exist. e.g.: + # - cuda_arch= can't be satisfied when ~cuda. + # - generator= can't be satisfied when build_system=autotools + def mutually_exclusive(spec, variant_name): + return all( + not spec.intersects(when) + or any(sv_variant_conflicts(spec, wv) for wv in when.variants.values()) + for when, d in self.variants.items() + if variant_name in d + ) + + names = self.variant_names() + for v in set(itertools.chain(spec.variants, self.spec.variants)): + if v not in names: # treat unknown variants as intersecting + continue + + if mutually_exclusive(self.spec, v) or mutually_exclusive(spec, v): + return False + + return True + @property def virtuals_provided(self): """ @@ -1544,13 +1575,13 @@ def remove_prefix(self): spack.store.STORE.layout.remove_install_directory(self.spec) @property - def download_instr(self): + def download_instr(self) -> str: """ Defines the default manual download instructions. Packages can override the property to provide more information. Returns: - (str): default manual download instructions + default manual download instructions """ required = ( f"Manual download is required for {self.spec.name}. " if self.manual_download else "" @@ -1636,7 +1667,11 @@ def do_stage(self, mirror_only=False): self.stage.create() # Fetch/expand any associated code. - if self.has_code and not self.spec.external: + user_dev_path = spack.config.get(f"develop:{self.name}:path", None) + skip = user_dev_path and os.path.exists(user_dev_path) + if skip: + tty.debug("Skipping staging because develop path exists") + if self.has_code and not self.spec.external and not skip: self.do_fetch(mirror_only) self.stage.expand_archive() else: @@ -1707,7 +1742,11 @@ def do_patch(self): patch_path = patch.path spack.patch.apply_patch( - self.stage, patch_path, patch.level, patch.working_dir, patch.reverse + self.stage.source_path, + patch_path, + patch.level, + patch.working_dir, + patch.reverse, ) tty.msg(f"Applied patch {patch.path_or_url}") @@ -1789,9 +1828,10 @@ def content_hash(self, content: Optional[bytes] = None) -> str: """Create a hash based on the artifacts and patches used to build this package. This includes: - * source artifacts (tarballs, repositories) used to build; - * content hashes (``sha256``'s) of all patches applied by Spack; and - * canonicalized contents the ``package.py`` recipe used to build. + + * source artifacts (tarballs, repositories) used to build; + * content hashes (``sha256``'s) of all patches applied by Spack; and + * canonicalized contents the ``package.py`` recipe used to build. This hash is only included in Spack's DAG hash for concrete specs, but if it happens to be called on a package with an abstract spec, only applicable (i.e., @@ -1999,7 +2039,7 @@ def do_test(self, *, dirty=False, externals=False, timeout: Optional[int] = None self.tester.stand_alone_tests(kwargs, timeout=timeout) - def unit_test_check(self): + def unit_test_check(self) -> bool: """Hook for unit tests to assert things about package internals. Unit tests can override this function to perform checks after @@ -2008,39 +2048,29 @@ def unit_test_check(self): The overridden function may indicate that the install procedure should terminate early (before updating the database) by - returning ``False`` (or any value such that ``bool(result)`` is - ``False``). + returning :data:`False` (or any value such that ``bool(result)`` is + :data:`False`). Return: - (bool): ``True`` to continue, ``False`` to skip ``install()`` + :data:`True` to continue, :data:`False` to skip ``install()`` """ return True @classmethod def inject_flags(cls: Type[Pb], name: str, flags: Iterable[str]) -> FLAG_HANDLER_RETURN_TYPE: - """ - flag_handler that injects all flags through the compiler wrapper. - """ + """See :func:`spack.package.inject_flags`.""" return flags, None, None @classmethod def env_flags(cls: Type[Pb], name: str, flags: Iterable[str]) -> FLAG_HANDLER_RETURN_TYPE: - """ - flag_handler that adds all flags to canonical environment variables. - """ + """See :func:`spack.package.env_flags`.""" return None, flags, None @classmethod def build_system_flags( cls: Type[Pb], name: str, flags: Iterable[str] ) -> FLAG_HANDLER_RETURN_TYPE: - """ - flag_handler that passes flags to the build system arguments. Any - package using `build_system_flags` must also implement - `flags_to_build_system_args`, or derive from a class that - implements it. Currently, AutotoolsPackage and CMakePackage - implement it. - """ + """See :func:`spack.package.build_system_flags`.""" return None, None, flags def setup_run_environment(self, env: spack.util.environment.EnvironmentModifications) -> None: @@ -2277,10 +2307,10 @@ def fetch_remote_versions( ) -> Dict[StandardVersion, str]: """Find remote versions of this package. - Uses ``list_url`` and any other URLs listed in the package file. + Uses :attr:`list_url` and any other URLs listed in the package file. Returns: - dict: a dictionary mapping versions to URLs + a dictionary mapping versions to URLs """ if not self.all_urls: return {} @@ -2325,9 +2355,212 @@ def rpath_args(self): return " ".join("-Wl,-rpath,%s" % p for p in self.rpath) -inject_flags = PackageBase.inject_flags -env_flags = PackageBase.env_flags -build_system_flags = PackageBase.build_system_flags +class WindowsSimulatedRPath: + """Class representing Windows filesystem rpath analog + + One instance of this class is associated with a package (only on Windows) + For each lib/binary directory in an associated package, this class introduces + a symlink to any/all dependent libraries/binaries. This includes the packages + own bin/lib directories, meaning the libraries are linked to the binary directory + and vis versa. + """ + + def __init__( + self, + package: PackageBase, + base_modification_prefix: Optional[Union[str, pathlib.Path]] = None, + link_install_prefix: bool = True, + ): + """ + Args: + package: Package requiring links + base_modification_prefix: Path representation indicating + the root directory in which to establish the simulated rpath, ie where the + symlinks that comprise the "rpath" behavior will be installed. + + Note: This is a mutually exclusive option with `link_install_prefix` using + both is an error. + + Default: None + link_install_prefix: Link against package's own install or stage root. + Packages that run their own executables during build and require rpaths to + the build directory during build time require this option. + + Default: install + root + + Note: This is a mutually exclusive option with `base_modification_prefix`, using + both is an error. + """ + self.pkg = package + self._addl_rpaths: set[str] = set() + if link_install_prefix and base_modification_prefix: + raise RuntimeError( + "Invalid combination of arguments given to WindowsSimulated RPath.\n" + "Select either `link_install_prefix` to create an install prefix rpath" + " or specify a `base_modification_prefix` for any other link type. " + "Specifying both arguments is invalid." + ) + if not (link_install_prefix or base_modification_prefix): + raise RuntimeError( + "Insufficient arguments given to WindowsSimulatedRpath.\n" + "WindowsSimulatedRPath requires one of link_install_prefix" + " or base_modification_prefix to be specified." + " Neither was provided." + ) + + self.link_install_prefix = link_install_prefix + if base_modification_prefix: + self.base_modification_prefix = pathlib.Path(base_modification_prefix) + else: + self.base_modification_prefix = pathlib.Path(self.pkg.prefix) + self._additional_library_dependents: set[pathlib.Path] = set() + if not self.link_install_prefix: + tty.debug(f"Generating rpath for non install context: {base_modification_prefix}") + + @property + def library_dependents(self): + """ + Set of directories where package binaries/libraries are located. + """ + base_pths = set() + if self.link_install_prefix: + base_pths.add(pathlib.Path(self.pkg.prefix.bin)) + base_pths |= self._additional_library_dependents + return base_pths + + def add_library_dependent(self, *dest: Union[str, pathlib.Path]): + """ + Add paths to directories or libraries/binaries to set of + common paths that need to link against other libraries + + Specified paths should fall outside of a package's common + link paths, i.e. the bin + directories. + """ + for pth in dest: + if os.path.isfile(pth): + new_pth = pathlib.Path(pth).parent + else: + new_pth = pathlib.Path(pth) + path_is_in_prefix = new_pth.is_relative_to(self.base_modification_prefix) + if not path_is_in_prefix: + raise RuntimeError( + f"Attempting to generate rpath symlink out of rpath context:\ +{str(self.base_modification_prefix)}" + ) + self._additional_library_dependents.add(new_pth) + + @property + def rpaths(self): + """ + Set of libraries this package needs to link against during runtime + These packages will each be symlinked into the packages lib and binary dir + """ + dependent_libs = [] + for path in self.pkg.rpath: + dependent_libs.extend(list(find_all_shared_libraries(path, recursive=True))) + for extra_path in self._addl_rpaths: + dependent_libs.extend(list(find_all_shared_libraries(extra_path, recursive=True))) + return set([pathlib.Path(x) for x in dependent_libs]) + + def add_rpath(self, *paths: str): + """ + Add libraries found at the root of provided paths to runtime linking + + These are libraries found outside of the typical scope of rpath linking + that require manual inclusion in a runtime linking scheme. + These links are unidirectional, and are only + intended to bring outside dependencies into this package + + Args: + *paths : arbitrary number of paths to be added to runtime linking + """ + self._addl_rpaths = self._addl_rpaths | set(paths) + + def _link(self, path: pathlib.Path, dest_dir: pathlib.Path): + """Perform link step of simulated rpathing, installing + simlinks of file in path to the dest_dir + location. This method deliberately prevents + the case where a path points to a file inside the dest_dir. + This is because it is both meaningless from an rpath + perspective, and will cause an error when Developer + mode is not enabled""" + + def report_already_linked(): + # We have either already symlinked or we are encountering a naming clash + # either way, we don't want to overwrite existing libraries + already_linked = islink(str(dest_file)) + tty.debug( + "Linking library %s to %s failed, " % (str(path), str(dest_file)) + + "already linked." + if already_linked + else "library with name %s already exists at location %s." + % (str(file_name), str(dest_dir)) + ) + + file_name = path.name + dest_file = dest_dir / file_name + if not dest_file.exists() and dest_dir.exists() and not dest_file == path: + try: + symlink(str(path), str(dest_file)) + # For py2 compatibility, we have to catch the specific Windows error code + # associate with trying to create a file that already exists (winerror 183) + # Catch OSErrors missed by the SymlinkError checks + except OSError as e: + if sys.platform == "win32" and e.errno == errno.EEXIST: + report_already_linked() + else: + raise e + # catch errors we raise ourselves from Spack + except AlreadyExistsError: + report_already_linked() + + def establish_link(self): + """ + (sym)link packages to runtime dependencies based on RPath configuration for + Windows heuristics + """ + # from build_environment.py:463 + # The top-level package is always RPATHed. It hasn't been installed yet + # so the RPATHs are added unconditionally + + # for each binary install dir in self.pkg (i.e. pkg.prefix.bin, pkg.prefix.lib) + # install a symlink to each dependent library + + # do not rpath for system libraries included in the dag + # we should not be modifying libraries managed by the Windows system + # as this will negatively impact linker behavior and can result in permission + # errors if those system libs are not modifiable by Spack + if "windows-system" not in getattr(self.pkg, "tags", []): + for library, lib_dir in itertools.product(self.rpaths, self.library_dependents): + self._link(library, lib_dir) + + +def make_package_test_rpath(pkg: PackageBase, test_dir: Union[str, pathlib.Path]) -> None: + """Establishes a temp Windows simulated rpath for the pkg in the testing directory so an + executable can test the libraries/executables with proper access to dependent dlls. + + Note: this is a no-op on all other platforms besides Windows + + Args: + pkg: the package for which the rpath should be computed + test_dir: the testing directory in which we should construct an rpath + """ + # link_install_prefix as false ensures we're not linking into the install prefix + mini_rpath = WindowsSimulatedRPath(pkg, link_install_prefix=False) + # add the testing directory as a location to install rpath symlinks + mini_rpath.add_library_dependent(test_dir) + + # check for whether build_directory is available, if not + # assume the stage root is the build dir + build_dir_attr = getattr(pkg, "build_directory", None) + build_directory = build_dir_attr if build_dir_attr else pkg.stage.path + # add the build dir & build dir bin + mini_rpath.add_rpath(os.path.join(build_directory, "bin")) + mini_rpath.add_rpath(os.path.join(build_directory)) + # construct rpath + mini_rpath.establish_link() def deprecated_version(pkg: PackageBase, version: Union[str, StandardVersion]) -> bool: @@ -2344,18 +2577,79 @@ def deprecated_version(pkg: PackageBase, version: Union[str, StandardVersion]) - return details is not None and details.get("deprecated", False) -def preferred_version(pkg: PackageBase): - """ - Returns a sorted list of the preferred versions of the package. +def preferred_version( + pkg: Union[PackageBase, Type[PackageBase]], +) -> Union[StandardVersion, GitVersion]: + """Returns the preferred versions of the package according to package.py. + + Accounts for version deprecation in the package recipe. Doesn't account for + any user configuration in packages.yaml. Arguments: pkg: The package whose versions are to be assessed. """ - version, _ = max(pkg.versions.items(), key=concretization_version_order) + def _version_order(version_info): + version, info = version_info + deprecated_key = not info.get("deprecated", False) + return (deprecated_key, *concretization_version_order(version_info)) + + version, _ = max(pkg.versions.items(), key=_version_order) return version +def non_preferred_version(node: spack.spec.Spec) -> bool: + """Returns True if the spec version is not the preferred one, according to the package.py""" + if not node.versions.concrete: + return False + + try: + return node.version != preferred_version(node.package) + except ValueError: + return False + + +def non_default_variant(node: spack.spec.Spec, variant_name: str) -> bool: + """Returns True if the variant in the spec has a non-default value.""" + try: + default_variant = node.package.get_variant(variant_name).make_default() + return not node.satisfies(str(default_variant)) + except ValueError: + # This is the case for special variants like "patches" etc. + return False + + +def sort_by_pkg_preference( + versions: Iterable[Union[GitVersion, StandardVersion]], + *, + pkg: Union[PackageBase, Type[PackageBase]], +) -> List[Union[GitVersion, StandardVersion]]: + """Sorts the list of versions passed in input according to the preferences in the package. The + return value does not contain duplicate versions. Most preferred versions first. + """ + s = [(v, pkg.versions.get(v, {})) for v in dedupe(versions)] + return [v for v, _ in sorted(s, reverse=True, key=concretization_version_order)] + + +def concretization_version_order( + version_info: Tuple[Union[GitVersion, StandardVersion], dict], +) -> Tuple[bool, bool, bool, bool, Union[GitVersion, StandardVersion]]: + """Version order key for concretization, where preferred > not preferred, + finite > any infinite component; only if all are the same, do we use default version + ordering. + + Version deprecation needs to be accounted for separately. + """ + version, info = version_info + return ( + info.get("preferred", False), + not isinstance(version, GitVersion), + not version.isdevelop(), + not version.is_prerelease(), + version, + ) + + class PackageStillNeededError(InstallError): """Raised when package is still needed by another on uninstall.""" diff --git a/lib/spack/spack/package_prefs.py b/lib/spack/spack/package_prefs.py index d454a9df44548a..6bad354ed05ae7 100644 --- a/lib/spack/spack/package_prefs.py +++ b/lib/spack/spack/package_prefs.py @@ -22,29 +22,29 @@ def _spec_type(component): class PackagePrefs: """Defines the sort order for a set of specs. - Spack's package preference implementation uses PackagePrefss to + Spack's package preference implementation uses PackagePrefs to define sort order. The PackagePrefs class looks at Spack's packages.yaml configuration and, when called on a spec, returns a key that can be used to sort that spec in order of the user's preferences. - You can use it like this: + You can use it like this:: # key function sorts CompilerSpecs for `mpich` in order of preference - kf = PackagePrefs('mpich', 'compiler') + kf = PackagePrefs("mpich", "compiler") compiler_list.sort(key=kf) - Or like this: + Or like this:: # key function to sort VersionLists for OpenMPI in order of preference. - kf = PackagePrefs('openmpi', 'version') + kf = PackagePrefs("openmpi", "version") version_list.sort(key=kf) Optionally, you can sort in order of preferred virtual dependency - providers. To do that, provide 'providers' and a third argument - denoting the virtual package (e.g., ``mpi``): + providers. To do that, provide ``"providers"`` and a third argument + denoting the virtual package (e.g., ``mpi``):: - kf = PackagePrefs('trilinos', 'providers', 'mpi') + kf = PackagePrefs("trilinos", "providers", "mpi") provider_spec_list.sort(key=kf) """ @@ -93,8 +93,10 @@ def order_for_package(cls, pkgname, component, vpkg=None, all=True): if all: pkglist.append("all") + packages = spack.config.CONFIG.get_config("packages") + for pkg in pkglist: - pkg_entry = spack.config.get("packages").get(pkg) + pkg_entry = packages.get(pkg) if not pkg_entry: continue @@ -137,8 +139,9 @@ def has_preferred_targets(cls, pkg_name): @classmethod def preferred_variants(cls, pkg_name): """Return a VariantMap of preferred variants/values for a spec.""" + packages = spack.config.CONFIG.get_config("packages") for pkg_cls in (pkg_name, "all"): - variants = spack.config.get("packages").get(pkg_cls, {}).get("variants", "") + variants = packages.get(pkg_cls, {}).get("variants", "") if variants: break diff --git a/lib/spack/spack/package_test.py b/lib/spack/spack/package_test.py index 94cdc72f7e09cc..d2b403d5cf12ea 100644 --- a/lib/spack/spack/package_test.py +++ b/lib/spack/spack/package_test.py @@ -36,7 +36,7 @@ def compare_output(current_output: str, blessed_output: str) -> None: print("-" * 80) print(current_output) print("-" * 80) - raise RuntimeError("Ouput check failed.", "See spack_output.log for details") + raise RuntimeError("Output check failed.", "See spack_output.log for details") def compare_output_file(current_output: str, blessed_output_file: str) -> None: diff --git a/lib/spack/spack/patch.py b/lib/spack/spack/patch.py index 9eaef5d4be512d..8f9c5decd41e05 100644 --- a/lib/spack/spack/patch.py +++ b/lib/spack/spack/patch.py @@ -6,22 +6,24 @@ import os import pathlib import sys -from typing import Any, Dict, Optional, Tuple, Type, Union +from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, Union import spack import spack.error import spack.fetch_strategy import spack.llnl.util.filesystem -import spack.repo -import spack.stage import spack.util.spack_json as sjson from spack.llnl.url import allowed_archive from spack.util.crypto import Checker, checksum from spack.util.executable import which, which_string +if TYPE_CHECKING: + import spack.package_base + import spack.repo + def apply_patch( - stage: "spack.stage.Stage", + source_path: str, patch_path: str, level: int = 1, working_dir: str = ".", @@ -59,7 +61,7 @@ def apply_patch( # has issues handling CRLF line endings unless the --binary # flag is passed. patch = which("patch", required=True, path=git_utils_path) - with spack.llnl.util.filesystem.working_dir(stage.source_path): + with spack.llnl.util.filesystem.working_dir(source_path): patch(*args) @@ -278,7 +280,7 @@ def __init__( raise spack.error.PatchDirectiveError("URL patches require a sha256 checksum") self.sha256 = sha256 - def fetcher(self) -> "spack.fetch_strategy.FetchStrategy": + def fetcher(self) -> spack.fetch_strategy.FetchStrategy: """Construct a fetcher that can download (and unpack) this patch.""" # Two checksums, one for compressed file, one for its contents if self.archive_sha256 and self.sha256: @@ -303,9 +305,7 @@ def to_dict(self) -> Dict[str, Any]: return data -def from_dict( - dictionary: Dict[str, Any], repository: Optional["spack.repo.RepoPath"] = None -) -> Patch: +def from_dict(dictionary: Dict[str, Any], repository: "spack.repo.RepoPath") -> Patch: """Create a patch from json dictionary. Args: @@ -318,7 +318,6 @@ def from_dict( Raises: ValueError: If *owner* or *url*/*relative_path* are missing in the dictionary. """ - repository = repository or spack.repo.PATH owner = dictionary.get("owner") if owner is None: raise ValueError(f"Invalid patch dictionary: {dictionary}") diff --git a/lib/spack/spack/paths.py b/lib/spack/spack/paths.py index 0087a3830e5ce8..bfede02ea60ce4 100644 --- a/lib/spack/spack/paths.py +++ b/lib/spack/spack/paths.py @@ -116,9 +116,6 @@ def _get_user_cache_path(): #: transient caches for Spack data (virtual cache, patch sha256 lookup, etc.) default_misc_cache_path = os.path.join(user_cache_path, spack_instance_id, "cache") -#: concretization cache for Spack concretizations -default_conc_cache_path = os.path.join(default_misc_cache_path, "concretization") - # Below paths pull configuration from the host environment. # # There are three environment variables you can use to isolate spack from diff --git a/lib/spack/spack/platforms/_functions.py b/lib/spack/spack/platforms/_functions.py index 155bf5c7200cc0..217938834335da 100644 --- a/lib/spack/spack/platforms/_functions.py +++ b/lib/spack/spack/platforms/_functions.py @@ -26,7 +26,7 @@ def reset(): """The result of the host search is memoized. In case it needs to be recomputed we must clear the cache, which is what this function does. """ - _host.cache.clear() + _host.cache_clear() @spack.llnl.util.lang.memoized diff --git a/lib/spack/spack/provider_index.py b/lib/spack/spack/provider_index.py index db5f5d23098dc7..886a89c0cb0831 100644 --- a/lib/spack/spack/provider_index.py +++ b/lib/spack/spack/provider_index.py @@ -2,14 +2,17 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) """Classes and functions to manage providers of virtual dependencies""" -from typing import Dict, List, Optional, Set +from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Union import spack.error -import spack.spec import spack.util.spack_json as sjson +if TYPE_CHECKING: + import spack.repo + import spack.spec -class _IndexBase: + +class ProviderIndex: #: This is a dict of dicts used for finding providers of particular #: virtual dependencies. The dict of dicts looks like: #: @@ -26,28 +29,72 @@ class _IndexBase: #: Calling providers_for(spec) will find specs that provide a #: matching implementation of MPI. Derived class need to construct #: this attribute according to the semantics above. - providers: Dict[str, Dict[str, Set[str]]] + providers: Dict[str, Dict["spack.spec.Spec", Set["spack.spec.Spec"]]] + + def __init__( + self, + repository: "spack.repo.RepoType", + specs: Optional[Iterable["spack.spec.Spec"]] = None, + restrict: bool = False, + ): + """Provider index based on a single mapping of providers. + + Args: + specs: if provided, will call update on each + single spec to initialize this provider index. + + restrict: "restricts" values to the verbatim input specs; do not + pre-apply package's constraints. + + TODO: rename this. It is intended to keep things as broad + TODO: as possible without overly restricting results, so it is + TODO: not the best name. + """ + self.repository = repository + self.restrict = restrict + self.providers = {} + + specs = specs or [] + for spec in specs: + if isinstance(spec, str): + from spack.spec import Spec - def providers_for(self, virtual_spec): - """Return a list of specs of all packages that provide virtual - packages with the supplied spec. + spec = Spec(spec) + + if self.repository.is_virtual_safe(spec.name): + continue + + self.update(spec) + + def providers_for(self, virtual: Union[str, "spack.spec.Spec"]) -> List["spack.spec.Spec"]: + """Return a list of specs of all packages that provide virtual packages with the supplied + spec. Args: - virtual_spec: virtual spec to be provided + virtual: either a Spec or a string name of a virtual package """ - result = set() - # Allow string names to be passed as input, as well as specs - if isinstance(virtual_spec, str): - virtual_spec = spack.spec.Spec(virtual_spec) + result: Set["spack.spec.Spec"] = set() + + if isinstance(virtual, str): + # In the common case where just a package name is passed, we can avoid running the + # spec parser and intersects, since intersects is always true. + if virtual.isalnum(): + if virtual in self.providers: + for p_spec, spec_set in self.providers[virtual].items(): + result.update(spec_set) + return list(result) + + from spack.spec import Spec + + virtual = Spec(virtual) # Add all the providers that satisfy the vpkg spec. - if virtual_spec.name in self.providers: - for p_spec, spec_set in self.providers[virtual_spec.name].items(): - if p_spec.intersects(virtual_spec, deps=False): + if virtual.name in self.providers: + for p_spec, spec_set in self.providers[virtual.name].items(): + if p_spec.intersects(virtual, deps=False): result.update(spec_set) - # Return providers in order. Defensively copy. - return sorted(s.copy() for s in result) + return list(result) def __contains__(self, name): return name in self.providers @@ -75,49 +122,16 @@ def __str__(self): def __repr__(self): return repr(self.providers) - -class ProviderIndex(_IndexBase): - def __init__( - self, - repository: "spack.repo.RepoType", - specs: Optional[List["spack.spec.Spec"]] = None, - restrict: bool = False, - ): - """Provider index based on a single mapping of providers. - - Args: - specs: if provided, will call update on each - single spec to initialize this provider index. - - restrict: "restricts" values to the verbatim input specs; do not - pre-apply package's constraints. - - TODO: rename this. It is intended to keep things as broad - TODO: as possible without overly restricting results, so it is - TODO: not the best name. - """ - self.repository = repository - self.restrict = restrict - self.providers = {} - - specs = specs or [] - for spec in specs: - if not isinstance(spec, spack.spec.Spec): - spec = spack.spec.Spec(spec) - - if self.repository.is_virtual_safe(spec.name): - continue - - self.update(spec) - - def update(self, spec): + def update(self, spec: Union[str, "spack.spec.Spec"]) -> None: """Update the provider index with additional virtual specs. Args: spec: spec potentially providing additional virtual specs """ - if not isinstance(spec, spack.spec.Spec): - spec = spack.spec.Spec(spec) + if isinstance(spec, str): + from spack.spec import Spec + + spec = Spec(spec) if not spec.name: # Empty specs do not have a package @@ -126,8 +140,8 @@ def update(self, spec): msg = "cannot update an index passing the virtual spec '{}'".format(spec.name) assert not self.repository.is_virtual_safe(spec.name), msg - pkg_provided = self.repository.get_pkg_class(spec.name).provided - for provider_spec_readonly, provided_specs in pkg_provided.items(): + pkg_cls = self.repository.get_pkg_class(spec.name) + for provider_spec_readonly, provided_specs in pkg_cls.provided.items(): for provided_spec in provided_specs: # TODO: fix this comment. # We want satisfaction other than flags @@ -237,11 +251,13 @@ def from_json(stream, repository): index = ProviderIndex(repository=repository) providers = data["provider_index"]["providers"] + from spack.spec import SpecfileLatest + index.providers = _transform( providers, lambda vpkg, plist: ( - spack.spec.SpecfileLatest.from_node_dict(vpkg), - set(spack.spec.SpecfileLatest.from_node_dict(p) for p in plist), + SpecfileLatest.from_node_dict(vpkg), + set(SpecfileLatest.from_node_dict(p) for p in plist), ), ) return index diff --git a/lib/spack/spack/repo.py b/lib/spack/spack/repo.py index 6726d7fb327641..b6e1687cfc8a92 100644 --- a/lib/spack/spack/repo.py +++ b/lib/spack/spack/repo.py @@ -10,7 +10,6 @@ import importlib import importlib.machinery import importlib.util -import inspect import itertools import os import re @@ -22,6 +21,7 @@ import uuid import warnings from typing import ( + TYPE_CHECKING, Any, Callable, Dict, @@ -47,7 +47,6 @@ import spack.patch import spack.paths import spack.provider_index -import spack.spec import spack.tag import spack.util.executable import spack.util.file_cache @@ -59,6 +58,10 @@ import spack.util.spack_yaml as syaml from spack.llnl.util.filesystem import working_dir +if TYPE_CHECKING: + import spack.package_base + import spack.spec + PKG_MODULE_PREFIX_V1 = "spack.pkg." PKG_MODULE_PREFIX_V2 = "spack_repo." @@ -84,7 +87,7 @@ def is_package_module(fullname: str) -> bool: def namespace_from_fullname(fullname: str) -> str: """Return the repository namespace only for the full module name. - For instance: + For instance:: namespace_from_fullname("spack.pkg.builtin.hdf5") == "builtin" namespace_from_fullname("spack_repo.x.y.z.packages.pkg_name.package") == "x.y.z" @@ -253,9 +256,9 @@ def get_all_package_diffs(type: str, repo: "Repo", rev1="HEAD^1", rev2="HEAD") - Arguments: - type: String containing one or more of 'A', 'R', 'C' - rev1: Revision to compare against, default is 'HEAD^' - rev2: Revision to compare to rev1, default is 'HEAD' + type: String containing one or more of ``A``, ``R``, ``C``. + rev1: Revision to compare against, default is ``"HEAD^"`` + rev2: Revision to compare to rev1, default is ``"HEAD"`` """ lower_type = type.lower() if not re.match("^[arc]*$", lower_type): @@ -291,7 +294,7 @@ def get_all_package_diffs(type: str, repo: "Repo", rev1="HEAD^1", rev2="HEAD") - def add_package_to_git_stage(packages: List[str], repo: "Repo") -> None: - """add a package to the git stage with `git add`""" + """add a package to the git stage with ``git add``""" git = GitExe(repo.packages_path) for pkg_name in packages: @@ -309,30 +312,15 @@ def autospec(function): @functools.wraps(function) def converter(self, spec_like, *args, **kwargs): - if not isinstance(spec_like, spack.spec.Spec): - spec_like = spack.spec.Spec(spec_like) + from spack.spec import Spec + + if not isinstance(spec_like, Spec): + spec_like = Spec(spec_like) return function(self, spec_like, *args, **kwargs) return converter -def is_package_file(filename): - """Determine whether we are in a package file from a repo.""" - # Package files are named `package.py` and are not in lib/spack/spack - # We have to remove the file extension because it can be .py and can be - # .pyc depending on context, and can differ between the files - import spack.package_base # break cycle - - filename_noext = os.path.splitext(filename)[0] - packagebase_filename_noext = os.path.splitext(inspect.getfile(spack.package_base.PackageBase))[ - 0 - ] - return ( - filename_noext != packagebase_filename_noext - and os.path.basename(filename_noext) == "package" - ) - - class SpackNamespace(types.ModuleType): """Allow lazy loading of modules.""" @@ -355,17 +343,27 @@ def __getattr__(self, name): return getattr(self, name) -class FastPackageChecker(Mapping[str, os.stat_result]): - """Cache that maps package names to the stats obtained on the - 'package.py' files associated with them. +@contextlib.contextmanager +def _directory_fd(path: str) -> Generator[Optional[int], None, None]: + if sys.platform == "win32": + yield None + return - For each repository a cache is maintained at class level, and shared among - all instances referring to it. Update of the global cache is done lazily - during instance initialization. - """ + fd = os.open(path, os.O_RDONLY) + try: + yield fd + finally: + os.close(fd) + + +class FastPackageChecker(Mapping[str, float]): + """Cache that maps package names to the modification times of their ``package.py`` files. + + For each repository a cache is maintained at class level, and shared among all instances + referring to it. Update of the global cache is done lazily during instance initialization.""" #: Global cache, reused by every instance - _paths_cache: Dict[str, Dict[str, os.stat_result]] = {} + _paths_cache: Dict[str, Dict[str, float]] = {} def __init__(self, packages_path: str, package_api: Tuple[int, int]) -> None: # The path of the repository managed by this instance @@ -377,39 +375,45 @@ def __init__(self, packages_path: str, package_api: Tuple[int, int]) -> None: self._paths_cache[packages_path] = self._create_new_cache() #: Reference to the appropriate entry in the global cache - self._packages_to_stats = self._paths_cache[packages_path] + self._packages_to_mtime = self._paths_cache[packages_path] def invalidate(self) -> None: """Regenerate cache for this checker.""" self._paths_cache[self.packages_path] = self._create_new_cache() - self._packages_to_stats = self._paths_cache[self.packages_path] + self._packages_to_mtime = self._paths_cache[self.packages_path] - def _create_new_cache(self) -> Dict[str, os.stat_result]: + def _create_new_cache(self) -> Dict[str, float]: """Create a new cache for packages in a repo. - The implementation here should try to minimize filesystem - calls. At the moment, it is O(number of packages) and makes - about one stat call per package. This is reasonably fast, and - avoids actually importing packages in Spack, which is slow. - """ + The implementation here should try to minimize filesystem calls. At the moment, it makes + one stat call per package. This is reasonably fast, and avoids actually importing packages + in Spack, which is slow.""" # Create a dictionary that will store the mapping between a - # package name and its stat info - cache: Dict[str, os.stat_result] = {} - with os.scandir(self.packages_path) as entries: + # package name and its mtime + cache: Dict[str, float] = {} + # Don't use os.path.join in the loop cause it's slow and redundant. + package_py_suffix = f"{os.path.sep}{package_file_name}" + + # Use a file descriptor for the packages directory to avoid repeated path resolution. + with _directory_fd(self.packages_path) as fd, os.scandir(self.packages_path) as entries: for entry in entries: # Construct the file name from the directory - pkg_file = os.path.join(entry.path, package_file_name) + if sys.platform == "win32": + pkg_file = f"{entry.path}{package_py_suffix}" + else: + pkg_file = f"{entry.name}{package_py_suffix}" try: - sinfo = os.stat(pkg_file) + sinfo = os.stat(pkg_file, dir_fd=fd) except OSError as e: if e.errno in (errno.ENOENT, errno.ENOTDIR): # No package.py file here. continue elif e.errno == errno.EACCES: + pkg_file = os.path.join(self.packages_path, entry.name, package_file_name) tty.warn(f"Can't read package file {pkg_file}.") continue - raise e + raise # If it's not a file, skip it. if not stat.S_ISREG(sinfo.st_mode): @@ -419,31 +423,32 @@ def _create_new_cache(self) -> Dict[str, os.stat_result]: # the current package API if not nm.valid_module_name(entry.name, self.package_api): x, y = self.package_api + pkg_file = os.path.join(self.packages_path, entry.name, package_file_name) tty.warn( f"Package {pkg_file} cannot be used because `{entry.name}` is not a valid " f"Spack package module name for Package API v{x}.{y}." ) continue - # Store the stat info by package name. - cache[nm.pkg_dir_to_pkg_name(entry.name, self.package_api)] = sinfo + # Store the mtime by package name. + cache[nm.pkg_dir_to_pkg_name(entry.name, self.package_api)] = sinfo.st_mtime return cache def last_mtime(self) -> float: - return max(sinfo.st_mtime for sinfo in self._packages_to_stats.values()) + return max(self._packages_to_mtime.values()) def modified_since(self, since: float) -> List[str]: - return [name for name, sinfo in self._packages_to_stats.items() if sinfo.st_mtime > since] + return [name for name, mtime in self._packages_to_mtime.items() if mtime > since] - def __getitem__(self, item: str) -> os.stat_result: - return self._packages_to_stats[item] + def __getitem__(self, item: str) -> float: + return self._packages_to_mtime[item] def __iter__(self) -> Iterator[str]: - return iter(self._packages_to_stats) + return iter(self._packages_to_mtime) def __len__(self) -> int: - return len(self._packages_to_stats) + return len(self._packages_to_mtime) class Indexer(metaclass=abc.ABCMeta): @@ -460,12 +465,11 @@ def create(self): def _create(self): """Create an empty index and return it.""" - def needs_update(self, pkg): + def needs_update(self, pkg) -> bool: """Whether an update is needed when the package file hasn't changed. Returns: - (bool): ``True`` if this package needs its index - updated, ``False`` otherwise. + ``True`` iff this package needs its index updated. We already automatically update indexes when package files change, but other files (like patches) may change underneath the @@ -492,14 +496,14 @@ def write(self, stream): class TagIndexer(Indexer): """Lifecycle methods for a TagIndex on a Repo.""" - def _create(self): - return spack.tag.TagIndex(self.repository) + def _create(self) -> spack.tag.TagIndex: + return spack.tag.TagIndex() def read(self, stream): - self.index = spack.tag.TagIndex.from_json(stream, self.repository) + self.index = spack.tag.TagIndex.from_json(stream) def update(self, pkg_fullname): - self.index.update_package(pkg_fullname.split(".")[-1]) + self.index.update_package(pkg_fullname.split(".")[-1], self.repository) def write(self, stream): self.index.to_json(stream) @@ -508,7 +512,7 @@ def write(self, stream): class ProviderIndexer(Indexer): """Lifecycle methods for virtual package providers.""" - def _create(self): + def _create(self) -> "spack.provider_index.ProviderIndex": return spack.provider_index.ProviderIndex(repository=self.repository) def read(self, stream): @@ -531,7 +535,7 @@ def write(self, stream): class PatchIndexer(Indexer): """Lifecycle methods for patch cache.""" - def _create(self): + def _create(self) -> spack.patch.PatchCache: return spack.patch.PatchCache(repository=self.repository) def needs_update(self): @@ -615,9 +619,9 @@ def _build_index(self, name: str, indexer: Indexer): """Determine which packages need an update, and update indexes.""" # Filename of the provider index cache (we assume they're all json) - cache_filename = ( - f"{name}/{self.namespace}-specfile_v{spack.spec.SPECFILE_FORMAT_VERSION}-index.json" - ) + from spack.spec import SPECFILE_FORMAT_VERSION + + cache_filename = f"{name}/{self.namespace}-specfile_v{SPECFILE_FORMAT_VERSION}-index.json" # Compute which packages needs to be updated in the cache index_mtime = self.cache.mtime(cache_filename) @@ -686,7 +690,7 @@ def from_config(config: spack.config.Configuration) -> "RepoPath": """Create a RepoPath from a configuration object.""" overrides = { pkg_name: data["package_attributes"] - for pkg_name, data in config.get("packages").items() + for pkg_name, data in config.get_config("packages").items() if pkg_name != "all" and "package_attributes" in data } @@ -802,7 +806,7 @@ def provider_index(self) -> spack.provider_index.ProviderIndex: def tag_index(self) -> spack.tag.TagIndex: """Merged TagIndex from all Repos in the RepoPath.""" if self._tag_index is None: - self._tag_index = spack.tag.TagIndex(repository=self) + self._tag_index = spack.tag.TagIndex() for repo in reversed(self.repos): self._tag_index.merge(repo.tag_index) return self._tag_index @@ -811,30 +815,34 @@ def tag_index(self) -> spack.tag.TagIndex: def patch_index(self) -> spack.patch.PatchCache: """Merged PatchIndex from all Repos in the RepoPath.""" if self._patch_index is None: - self._patch_index = spack.patch.PatchCache(repository=self) + from spack.patch import PatchCache + + self._patch_index = PatchCache(repository=self) for repo in reversed(self.repos): self._patch_index.update(repo.patch_index) return self._patch_index - @autospec - def providers_for(self, virtual_spec: "spack.spec.Spec") -> List["spack.spec.Spec"]: + def providers_for(self, virtual: Union[str, "spack.spec.Spec"]) -> List["spack.spec.Spec"]: + all_packages = self._all_package_names_set(include_virtuals=False) providers = [ spec - for spec in self.provider_index.providers_for(virtual_spec) - if spec.name in self._all_package_names_set(include_virtuals=False) + for spec in self.provider_index.providers_for(virtual) + if spec.name in all_packages ] if not providers: - raise UnknownPackageError(virtual_spec.fullname) + raise UnknownPackageError(virtual if isinstance(virtual, str) else virtual.fullname) return providers @autospec def extensions_for( self, extendee_spec: "spack.spec.Spec" ) -> List["spack.package_base.PackageBase"]: + from spack.spec import Spec + return [ - pkg_cls(spack.spec.Spec(pkg_cls.name)) + pkg_cls(Spec(pkg_cls.name)) for pkg_cls in self.all_package_classes() - if pkg_cls(spack.spec.Spec(pkg_cls.name)).extends(extendee_spec) + if pkg_cls(Spec(pkg_cls.name)).extends(extendee_spec) ] def last_mtime(self): @@ -845,7 +853,9 @@ def repo_for_pkg(self, spec: Union[str, "spack.spec.Spec"]) -> "Repo": """Given a spec, get the repository for its package.""" # We don't @_autospec this function b/c it's called very frequently # and we want to avoid parsing str's into Specs unnecessarily. - if isinstance(spec, spack.spec.Spec): + from spack.spec import Spec + + if isinstance(spec, Spec): namespace = spec.namespace name = spec.name else: @@ -874,8 +884,10 @@ def repo_for_pkg(self, spec: Union[str, "spack.spec.Spec"]) -> "Repo": def get(self, spec: "spack.spec.Spec") -> "spack.package_base.PackageBase": """Returns the package associated with the supplied spec.""" + from spack.spec import Spec + msg = "RepoPath.get can only be called on concrete specs" - assert isinstance(spec, spack.spec.Spec) and spec.concrete, msg + assert isinstance(spec, Spec) and spec.concrete, msg return self.repo_for_pkg(spec).get(spec) def python_paths(self) -> List[str]: @@ -908,12 +920,6 @@ def exists(self, pkg_name: str) -> bool: """ return any(repo.exists(pkg_name) for repo in self.repos) - def _have_name(self, pkg_name: str) -> bool: - have_name = pkg_name is not None - if have_name and not isinstance(pkg_name, str): - raise ValueError(f"is_virtual(): expected package name, got {type(pkg_name)}") - return have_name - def is_virtual(self, pkg_name: str) -> bool: """Return True if the package with this name is virtual, False otherwise. @@ -921,9 +927,9 @@ def is_virtual(self, pkg_name: str) -> bool: is used to construct the provider index use the ``is_virtual_safe`` function. Args: - pkg_name (str): name of the package we want to check + pkg_name: name of the package we want to check """ - have_name = self._have_name(pkg_name) + have_name = bool(pkg_name) return have_name and pkg_name in self.provider_index def is_virtual_safe(self, pkg_name: str) -> bool: @@ -932,9 +938,9 @@ def is_virtual_safe(self, pkg_name: str) -> bool: This function doesn't use the provider index. Args: - pkg_name (str): name of the package we want to check + pkg_name: name of the package we want to check """ - have_name = self._have_name(pkg_name) + have_name = bool(pkg_name) return have_name and (not self.exists(pkg_name) or self.get_pkg_class(pkg_name).virtual) def __contains__(self, pkg_name): @@ -1007,18 +1013,18 @@ def _validate_and_normalize_subdir(subdir: Any, root: str, package_api: Tuple[in class Repo: """Class representing a package repository in the filesystem. - Each package repository must have a top-level configuration file called `repo.yaml`. + Each package repository must have a top-level configuration file called ``repo.yaml``. It contains the following keys: - `namespace`: + ``namespace`` A Python namespace where the repository's packages should live. - `subdirectory`: + ``subdirectory`` An optional subdirectory name where packages are placed - `api`: - A string of the form vX.Y that indicates the Package API version. The default is "v1.0". + ``api`` + A string of the form vX.Y that indicates the Package API version. The default is ``v1.0``. For the repo to be compatible with the current version of Spack, the version must be greater than or equal to :py:data:`spack.min_package_api_version` and less than or equal to :py:data:`spack.package_api_version`. @@ -1061,6 +1067,7 @@ def check(condition, msg): config.get("subdirectory", packages_dir_name), root, self.package_api ) self.packages_path = os.path.join(self.root, self.subdirectory) + self.build_systems_path = os.path.join(self.root, "build_systems") check( os.path.isdir(self.packages_path), @@ -1150,9 +1157,10 @@ def real_name(self, import_name: str) -> Optional[str]: package names and Python module names, so there is no guessing. For Packge API v1.x we support the following one-to-many mappings: - num3proxy -> 3proxy - foo_bar -> foo_bar, foo-bar - foo_bar_baz -> foo_bar_baz, foo-bar-baz, foo_bar-baz, foo-bar_baz + + * ``num3proxy`` -> ``3proxy`` + * ``foo_bar`` -> ``foo_bar``, ``foo-bar`` + * ``foo_bar_baz`` -> ``foo_bar_baz``, ``foo-bar-baz``, ``foo_bar-baz``, ``foo-bar_baz`` """ if self.package_api >= (2, 0): if nm.pkg_dir_to_pkg_name(import_name, package_api=self.package_api) in self: @@ -1199,13 +1207,15 @@ def _read_config(self) -> Dict[str, Any]: def get(self, spec: "spack.spec.Spec") -> "spack.package_base.PackageBase": """Returns the package associated with the supplied spec.""" + from spack.spec import Spec + msg = "Repo.get can only be called on concrete specs" - assert isinstance(spec, spack.spec.Spec) and spec.concrete, msg + assert isinstance(spec, Spec) and spec.concrete, msg # NOTE: we only check whether the package is None here, not whether it # actually exists, because we have to load it anyway, and that ends up # checking for existence. We avoid constructing FastPackageChecker, # which will stat all packages. - if spec.name is None: + if not spec.name: raise UnknownPackageError(None, self) if spec.namespace and spec.namespace != self.namespace: @@ -1279,18 +1289,19 @@ def patch_index(self) -> spack.patch.PatchCache: """Index of patches and packages they're defined on.""" return self.index["patches"] - @autospec - def providers_for(self, vpkg_spec: "spack.spec.Spec") -> List["spack.spec.Spec"]: - providers = self.provider_index.providers_for(vpkg_spec) + def providers_for(self, virtual: Union[str, "spack.spec.Spec"]) -> List["spack.spec.Spec"]: + providers = self.provider_index.providers_for(virtual) if not providers: - raise UnknownPackageError(vpkg_spec.fullname) + raise UnknownPackageError(virtual if isinstance(virtual, str) else virtual.fullname) return providers @autospec def extensions_for( self, extendee_spec: "spack.spec.Spec" ) -> List["spack.package_base.PackageBase"]: - result = [pkg_cls(spack.spec.Spec(pkg_cls.name)) for pkg_cls in self.all_package_classes()] + from spack.spec import Spec + + result = [pkg_cls(Spec(pkg_cls.name)) for pkg_cls in self.all_package_classes()] return [x for x in result if x.extends(extendee_spec)] def dirname_for_package_name(self, pkg_name: str) -> str: @@ -1337,7 +1348,8 @@ def all_package_paths(self) -> Generator[str, None, None]: def packages_with_tags(self, *tags: str) -> Set[str]: v = set(self.all_package_names()) - v.intersection_update(*(self.tag_index[tag.lower()] for tag in tags)) + for tag in tags: + v.intersection_update(self.tag_index.get_packages(tag.lower())) return v def all_package_classes(self) -> Generator[Type["spack.package_base.PackageBase"], None, None]: @@ -1492,8 +1504,8 @@ def partition_package_name(pkg_name: str) -> Tuple[str, str]: If the package name is unqualified, the namespace is an empty string. Args: - pkg_name: a package name, either unqualified like "llvl", or - fully-qualified, like "builtin.llvm" + pkg_name: a package name, either unqualified like ``llvm``, or + fully-qualified, like ``builtin.llvm`` """ namespace, _, pkg_name = pkg_name.rpartition(".") return namespace, pkg_name @@ -1707,14 +1719,26 @@ def _clone_or_pull( # determine the default branch from ls-remote # (if no branch, tag, or commit is specified) if not (self.commit or self.tag or self.branch): - refs = git("ls-remote", "--symref", remote, "HEAD", output=str) - ref_match = re.search(r"refs/heads/(\S+)", refs) - if not ref_match: + # Get HEAD and all branches. On more recent versions of git, this can + # be done with a single call to `git ls-remote --symref remote HEAD`. + refs = git("ls-remote", remote, "HEAD", "refs/heads/*", output=str) + head_match = re.search(r"^([0-9a-f]+)\s+HEAD$", refs, re.MULTILINE) + if not head_match: + self.error = f"Unable to locate HEAD for {self.repository}" + return + + head_sha = head_match.group(1) + + # Find the first branch that matches this SHA + branch_match = re.search( + rf"^{re.escape(head_sha)}\s+refs/heads/(\S+)$", refs, re.MULTILINE + ) + if not branch_match: self.error = ( f"Unable to locate a default branch for {self.repository}" ) return - self.branch = ref_match.group(1) + self.branch = branch_match.group(1) # determine the branch and remote if no config values exist elif not (self.commit or self.tag or self.branch): @@ -1722,11 +1746,13 @@ def _clone_or_pull( remote = git("config", f"branch.{self.branch}.remote", output=str).strip() if self.commit: - spack.util.git.pull_checkout_commit(self.commit, git_exe=git) + spack.util.git.pull_checkout_commit( + self.commit, remote=remote, depth=depth, git_exe=git + ) elif self.tag: spack.util.git.pull_checkout_tag( - self.tag, remote, depth=depth, git_exe=git + self.tag, remote=remote, depth=depth, git_exe=git ) elif self.branch: @@ -1873,7 +1899,7 @@ def from_config( return RepoDescriptors( { name: parse_config_descriptor(name, cfg, lock) - for name, cfg in config.get("repos", scope=scope).items() + for name, cfg in config.get_config("repos", scope=scope).items() } ) @@ -1921,7 +1947,7 @@ def parse_config_descriptor( Args: name: the name of the repository, used for error messages descriptor: the configuration for the repository, which can be a string (local path), - or a dictionary with 'git' key containing git URL and other options. + or a dictionary with ``git`` key containing git URL and other options. Returns: A RepoDescriptor instance, either LocalRepoDescriptor or RemoteRepoDescriptor. diff --git a/lib/spack/spack/reporters/cdash.py b/lib/spack/spack/reporters/cdash.py index de0e36962202db..358c121866c301 100644 --- a/lib/spack/spack/reporters/cdash.py +++ b/lib/spack/spack/reporters/cdash.py @@ -1,9 +1,9 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import codecs import collections import hashlib +import io import os import platform import posixpath @@ -78,10 +78,10 @@ class CDash(Reporter): ``spack install``:: spack install --cdash-upload-url=\\ - https://mydomain.com/cdash/submit.php?project=Spack + https://example.com/cdash/submit.php?project=Spack In this example, results will be uploaded to the *Spack* project on the - CDash instance hosted at https://mydomain.com/cdash. + CDash instance hosted at ``https://example.com/cdash``. """ def __init__(self, configuration: CDashConfiguration): @@ -454,7 +454,7 @@ def upload(self, filename): try: response = web_util.urlopen(request, timeout=SPACK_CDASH_TIMEOUT) if self.current_package_name not in self.buildIds: - resp_value = codecs.getreader("utf-8")(response).read() + resp_value = io.TextIOWrapper(response, encoding="utf-8").read() match = self.buildid_regexp.search(resp_value) if match: buildid = match.group(1) diff --git a/lib/spack/spack/rewiring.py b/lib/spack/spack/rewiring.py index ba3ba2344913f8..de7231254ad339 100644 --- a/lib/spack/spack/rewiring.py +++ b/lib/spack/spack/rewiring.py @@ -5,7 +5,7 @@ import os import tempfile -import spack.binary_distribution as bindist +import spack.binary_distribution import spack.error import spack.hooks import spack.store @@ -34,11 +34,11 @@ def rewire_node(spec, explicit): # Copy spec.build_spec.prefix to spec.prefix through a temporary tarball tarball = os.path.join(tempdir, f"{spec.dag_hash()}.tar.gz") - bindist.create_tarball(spec.build_spec, tarball) + spack.binary_distribution.create_tarball(spec.build_spec, tarball) spack.hooks.pre_install(spec) - bindist.extract_buildcache_tarball(tarball, destination=spec.prefix) - bindist.relocate_package(spec) + spack.binary_distribution.extract_buildcache_tarball(tarball, destination=spec.prefix) + spack.binary_distribution.relocate_package(spec) # run post install hooks and add to db spack.hooks.post_install(spec, explicit) diff --git a/lib/spack/spack/schema/__init__.py b/lib/spack/spack/schema/__init__.py index 43bbd3e5acd1b0..bda7246f8f7daa 100644 --- a/lib/spack/spack/schema/__init__.py +++ b/lib/spack/spack/schema/__init__.py @@ -75,7 +75,7 @@ def _deprecated_properties(validator, deprecated, instance, schema): def _append(string: str) -> bool: """Test if a spack YAML string is an append. - See ``spack_yaml`` for details. Keys in Spack YAML can end in `+:`, + See ``spack_yaml`` for details. Keys in Spack YAML can end in ``+:``, and if they do, their values append lower-precedence configs. @@ -89,7 +89,7 @@ def _append(string: str) -> bool: def _prepend(string: str) -> bool: """Test if a spack YAML string is an prepend. - See ``spack_yaml`` for details. Keys in Spack YAML can end in `+:`, + See ``spack_yaml`` for details. Keys in Spack YAML can end in ``+:``, and if they do, their values prepend lower-precedence configs. @@ -102,7 +102,7 @@ def _prepend(string: str) -> bool: def override(string: str) -> bool: """Test if a spack YAML string is an override. - See ``spack_yaml`` for details. Keys in Spack YAML can end in `::`, + See ``spack_yaml`` for details. Keys in Spack YAML can end in ``::``, and if they do, their values completely replace lower-precedence configs instead of merging into them. @@ -114,7 +114,7 @@ def merge_yaml(dest, source, prepend=False, append=False): """Merges source into dest; entries in source take precedence over dest. This routine may modify dest and should be assigned to dest, in - case dest was None to begin with, e.g.: + case dest was None to begin with, e.g.:: dest = merge_yaml(dest, source) @@ -124,11 +124,11 @@ def merge_yaml(dest, source, prepend=False, append=False): appear before keys from ``dest``. Config file authors can optionally end any attribute in a dict - with `::` instead of `:`, and the key will override that of the + with ``::`` instead of ``:``, and the key will override that of the parent instead of merging. - `+:` will extend the default prepend merge strategy to include string concatenation - `-:` will change the merge strategy to append, it also includes string concatentation + ``+:`` will extend the default prepend merge strategy to include string concatenation + ``-:`` will change the merge strategy to append, it also includes string concatentation """ def they_are(t): diff --git a/lib/spack/spack/schema/bootstrap.py b/lib/spack/spack/schema/bootstrap.py index fa4a30737c00a6..33d384ed5b2657 100644 --- a/lib/spack/spack/schema/bootstrap.py +++ b/lib/spack/spack/schema/bootstrap.py @@ -7,7 +7,18 @@ #: Schema of a single source _source_schema: Dict[str, Any] = { "type": "object", - "properties": {"name": {"type": "string"}, "metadata": {"type": "string"}}, + "description": "Bootstrap source configuration", + "properties": { + "name": { + "type": "string", + "description": "Name of the bootstrap source (e.g., 'github-actions-v0.6', " + "'spack-install')", + }, + "metadata": { + "type": "string", + "description": "Path to metadata directory containing bootstrap source configuration", + }, + }, "additionalProperties": False, "required": ["name", "metadata"], } @@ -15,11 +26,28 @@ properties: Dict[str, Any] = { "bootstrap": { "type": "object", + "description": "Configure how Spack bootstraps its own dependencies when needed", "properties": { - "enable": {"type": "boolean"}, - "root": {"type": "string"}, - "sources": {"type": "array", "items": _source_schema}, - "trusted": {"type": "object", "patternProperties": {r"\w[\w-]*": {"type": "boolean"}}}, + "enable": { + "type": "boolean", + "description": "Enable or disable bootstrapping entirely", + }, + "root": { + "type": "string", + "description": "Where to install bootstrapped dependencies", + }, + "sources": { + "type": "array", + "items": _source_schema, + "description": "List of bootstrap sources tried in order. Each method may " + "bootstrap different software depending on its type (e.g., pre-built binaries, " + "source builds)", + }, + "trusted": { + "type": "object", + "additionalProperties": {"type": "boolean"}, + "description": "Controls which sources are enabled for automatic bootstrapping", + }, }, } } diff --git a/lib/spack/spack/schema/buildcache_spec.py b/lib/spack/spack/schema/buildcache_spec.py index 81cde38901a8b5..c52af939e6cff8 100644 --- a/lib/spack/spack/schema/buildcache_spec.py +++ b/lib/spack/spack/schema/buildcache_spec.py @@ -14,11 +14,7 @@ properties: Dict[str, Any] = { # `buildinfo` is no longer needed as of Spack 0.21 "buildinfo": {"type": "object"}, - "spec": { - "type": "object", - "additionalProperties": True, - "items": spack.schema.spec.properties, - }, + "spec": {**spack.schema.spec.spec_node, "additionalProperties": True}, "buildcache_layout_version": {"type": "number"}, } diff --git a/lib/spack/spack/schema/cdash.py b/lib/spack/spack/schema/cdash.py index c1480cbf5649eb..49334c6c4ba94e 100644 --- a/lib/spack/spack/schema/cdash.py +++ b/lib/spack/spack/schema/cdash.py @@ -13,13 +13,16 @@ "cdash": { "type": "object", "additionalProperties": False, - # "required": ["build-group", "url", "project", "site"], "required": ["build-group"], - "patternProperties": { - r"build-group": {"type": "string"}, - r"url": {"type": "string"}, - r"project": {"type": "string"}, - r"site": {"type": "string"}, + "description": "Configuration for uploading build results to CDash", + "properties": { + "build-group": { + "type": "string", + "description": "Unique build group name for this stack", + }, + "url": {"type": "string", "description": "CDash server URL"}, + "project": {"type": "string", "description": "CDash project name"}, + "site": {"type": "string", "description": "Site identifier for CDash reporting"}, }, } } diff --git a/lib/spack/spack/schema/ci.py b/lib/spack/spack/schema/ci.py index 07a4ee1d06332f..4c694c8927250f 100644 --- a/lib/spack/spack/schema/ci.py +++ b/lib/spack/spack/schema/ci.py @@ -8,8 +8,6 @@ """ from typing import Any, Dict -from spack.llnl.util.lang import union_dicts - # Schema for script fields # List of lists and/or strings # This is similar to what is allowed in @@ -44,7 +42,7 @@ "tags": {"type": "array", "items": {"type": "string"}}, "variables": { "type": "object", - "patternProperties": {r"[\w\d\-_\.]+": {"type": ["string", "number"]}}, + "patternProperties": {r"^[\w\-\.]+$": {"type": ["string", "number"]}}, }, "before_script": script_schema, "script": script_schema, @@ -84,12 +82,12 @@ "required": ["endpoint"], "properties": { "name": {"type": "string"}, - # "endpoint" cannot have http patternProperties constaint as it is a required field + # "endpoint" cannot have http patternProperties constraint since it is required # Constrain is applied in code "endpoint": {"type": "string"}, "timeout": {"type": "integer", "minimum": 0}, "verify_ssl": {"type": "boolean", "default": False}, - "header": {"type": "object", "additionalProperties": False}, + "header": {"type": "object", "additionalProperties": {"type": "string"}}, "allow": {"type": "array", "items": {"type": "string"}}, "require": {"type": "array", "items": {"type": "string"}}, "ignore": {"type": "array", "items": {"type": "string"}}, @@ -124,18 +122,19 @@ def job_schema(name: str): }, } -core_shared_properties = union_dicts( - { - "pipeline-gen": pipeline_gen_schema, - "rebuild-index": {"type": "boolean"}, - "broken-specs-url": {"type": "string"}, - "broken-tests-packages": {"type": "array", "items": {"type": "string"}}, - "target": {"type": "string", "enum": ["gitlab"], "default": "gitlab"}, - } -) - #: Properties for inclusion in other schemas -properties: Dict[str, Any] = {"ci": core_shared_properties} +properties: Dict[str, Any] = { + "ci": { + "type": "object", + "properties": { + "pipeline-gen": pipeline_gen_schema, + "rebuild-index": {"type": "boolean"}, + "broken-specs-url": {"type": "string"}, + "broken-tests-packages": {"type": "array", "items": {"type": "string"}}, + "target": {"type": "string", "default": "gitlab"}, + }, + } +} #: Full schema with metadata schema = { diff --git a/lib/spack/spack/schema/compilers.py b/lib/spack/spack/schema/compilers.py index 4ad475b9a657cb..8d91505fce0d4e 100644 --- a/lib/spack/spack/schema/compilers.py +++ b/lib/spack/spack/schema/compilers.py @@ -13,21 +13,47 @@ flags: Dict[str, Any] = { "type": "object", "additionalProperties": False, + "description": "Flags to pass to the compiler during compilation and linking", "properties": { - "cflags": {"anyOf": [{"type": "string"}, {"type": "null"}]}, - "cxxflags": {"anyOf": [{"type": "string"}, {"type": "null"}]}, - "fflags": {"anyOf": [{"type": "string"}, {"type": "null"}]}, - "cppflags": {"anyOf": [{"type": "string"}, {"type": "null"}]}, - "ldflags": {"anyOf": [{"type": "string"}, {"type": "null"}]}, - "ldlibs": {"anyOf": [{"type": "string"}, {"type": "null"}]}, + "cflags": { + "anyOf": [{"type": "string"}, {"type": "null"}], + "description": "Flags for C compiler, e.g. -std=c11", + }, + "cxxflags": { + "anyOf": [{"type": "string"}, {"type": "null"}], + "description": "Flags for C++ compiler, e.g. -std=c++14", + }, + "fflags": { + "anyOf": [{"type": "string"}, {"type": "null"}], + "description": "Flags for Fortran 77 compiler, e.g. -ffixed-line-length-none", + }, + "cppflags": { + "anyOf": [{"type": "string"}, {"type": "null"}], + "description": "Flags for C preprocessor, e.g. -DFOO=1", + }, + "ldflags": { + "anyOf": [{"type": "string"}, {"type": "null"}], + "description": "Flags passed to the compiler driver during linking, e.g. " + "-Wl,--gc-sections", + }, + "ldlibs": { + "anyOf": [{"type": "string"}, {"type": "null"}], + "description": "Flags for linker libraries, e.g. -lpthread", + }, }, } -extra_rpaths: Dict[str, Any] = {"type": "array", "default": [], "items": {"type": "string"}} +extra_rpaths: Dict[str, Any] = { + "type": "array", + "default": [], + "items": {"type": "string"}, + "description": "List of extra rpaths to inject by Spack's compiler wrappers", +} implicit_rpaths: Dict[str, Any] = { - "anyOf": [{"type": "array", "items": {"type": "string"}}, {"type": "boolean"}] + "anyOf": [{"type": "array", "items": {"type": "string"}}, {"type": "boolean"}], + "description": "List of non-default link directories to register at runtime as rpaths", } #: Properties for inclusion in other schemas diff --git a/lib/spack/spack/schema/concretizer.py b/lib/spack/spack/schema/concretizer.py index 3dbe8d5f7934d5..52da5788dacb0d 100644 --- a/lib/spack/spack/schema/concretizer.py +++ b/lib/spack/spack/schema/concretizer.py @@ -14,22 +14,55 @@ "concretizer": { "type": "object", "additionalProperties": False, + "description": "Concretizer configuration that controls dependency selection, package " + "reuse, and solver behavior", "properties": { - "force": {"type": "boolean", "default": False}, + "force": { + "type": "boolean", + "default": False, + "description": "Force re-concretization when concretizing environments", + }, "reuse": { + "description": "Controls how aggressively Spack reuses installed packages and " + "build caches during concretization", "oneOf": [ - {"type": "boolean"}, - {"type": "string", "enum": ["dependencies"]}, + { + "type": "boolean", + "description": "If true, reuse installed packages and build caches for " + "all specs; if false, always perform fresh concretization", + }, + { + "type": "string", + "enum": ["dependencies"], + "description": "Reuse installed packages and build caches only for " + "dependencies, not root specs", + }, { "type": "object", + "description": "Advanced reuse configuration with fine-grained control " + "over which specs are reused", "properties": { - "roots": {"type": "boolean"}, - "include": LIST_OF_SPECS, - "exclude": LIST_OF_SPECS, + "roots": { + "type": "boolean", + "description": "If true, root specs are reused; if false, only " + "dependencies of root specs are reused", + }, + "include": { + **LIST_OF_SPECS, + "description": "List of spec constraints. Reusable specs must " + "match at least one constraint", + }, + "exclude": { + **LIST_OF_SPECS, + "description": "List of spec constraints. Reusable specs must " + "not match any constraint", + }, "from": { "type": "array", + "description": "List of sources from which reused specs are taken", "items": { "type": "object", + "description": "Source configuration for reusable specs", "properties": { "type": { "type": "string", @@ -39,63 +72,203 @@ "external", "environment", ], + "description": "Type of source: 'local' (installed " + "packages), 'buildcache' (remote binaries), " + "'external' (system packages), or 'environment' " + "(from specific environment)", + }, + "path": { + "type": "string", + "description": "Path to the source (for environment " + "type sources)", + }, + "include": { + **LIST_OF_SPECS, + "description": "Spec constraints that must be " + "matched for this source (overrides global include)", + }, + "exclude": { + **LIST_OF_SPECS, + "description": "Spec constraints that must not be " + "matched for this source (overrides global exclude)", }, - "path": {"type": "string"}, - "include": LIST_OF_SPECS, - "exclude": LIST_OF_SPECS, }, }, }, }, }, - ] + ], }, - "enable_node_namespace": {"type": "boolean"}, "targets": { "type": "object", + "description": "Controls which target microarchitectures are considered " + "during concretization", "properties": { - "host_compatible": {"type": "boolean"}, - "granularity": {"type": "string", "enum": ["generic", "microarchitectures"]}, + "host_compatible": { + "type": "boolean", + "description": "If true, only allow targets compatible with the " + "current host; if false, allow any target (e.g., concretize for icelake " + "while running on haswell)", + }, + "granularity": { + "type": "string", + "enum": ["generic", "microarchitectures"], + "description": "Target selection granularity: 'microarchitectures' " + "(e.g., haswell, skylake) or 'generic' (e.g., x86_64_v3, aarch64)", + }, }, }, "unify": { - "oneOf": [{"type": "boolean"}, {"type": "string", "enum": ["when_possible"]}] + "description": "Controls whether environment specs are concretized together " + "or separately", + "oneOf": [ + { + "type": "boolean", + "description": "If true, concretize environment root specs together " + "for unified dependencies; if false, concretize each spec independently", + }, + { + "type": "string", + "enum": ["when_possible"], + "description": "Maximizes reuse, while allowing multiple instances of the " + "same package", + }, + ], + }, + "compiler_mixing": { + "oneOf": [{"type": "boolean"}, {"type": "array"}], + "description": "Whether to allow compiler mixing between link/run dependencies", }, "splice": { "type": "object", "additionalProperties": False, + "description": "Configuration for spec splicing: replacing dependencies " + "with ABI-compatible alternatives to improve package reuse", "properties": { "explicit": { "type": "array", "default": [], + "description": "List of explicit splice configurations to replace " + "specific dependencies", "items": { "type": "object", "required": ["target", "replacement"], "additionalProperties": False, + "description": "Explicit splice configuration", "properties": { - "target": {"type": "string"}, - "replacement": {"type": "string"}, - "transitive": {"type": "boolean", "default": False}, + "target": { + "type": "string", + "description": "Abstract spec to be replaced (e.g., 'mpi' " + "or specific package)", + }, + "replacement": { + "type": "string", + "description": "Concrete spec with hash to use as " + "replacement (e.g., 'mpich/abcdef')", + }, + "transitive": { + "type": "boolean", + "default": False, + "description": "If true, use transitive splice (conflicts " + "resolved using replacement dependencies); if false, use " + "intransitive splice (conflicts resolved using original " + "dependencies)", + }, }, }, }, - "automatic": {"type": "boolean"}, + "automatic": { + "type": "boolean", + "description": "Enable automatic splicing for ABI-compatible packages " + "(experimental feature)", + }, }, }, "duplicates": { "type": "object", + "description": "Controls whether the dependency graph can contain multiple " + "configurations of the same package", "properties": { - "strategy": {"type": "string", "enum": ["none", "minimal", "full"]}, + "strategy": { + "type": "string", + "enum": ["none", "minimal", "full"], + "description": "Duplication strategy: 'none' (single config per " + "package), 'minimal' (allow build-tools duplicates), 'full' " + "(experimental: allow full build-tool stack separation)", + }, "max_dupes": { "type": "object", - "additional_properties": {"type": "integer", "minimum": 1}, + "description": "Maximum number of duplicates allowed per package when " + "using strategies that permit duplicates", + "additionalProperties": { + "type": "integer", + "minimum": 1, + "description": "Maximum number of duplicate instances for this " + "package", + }, }, }, }, - "static_analysis": {"type": "boolean"}, - "timeout": {"type": "integer", "minimum": 0}, - "error_on_timeout": {"type": "boolean"}, - "os_compatible": {"type": "object", "additionalProperties": {"type": "array"}}, + "static_analysis": { + "type": "boolean", + "description": "Enable static analysis to reduce concretization time by " + "generating smaller ASP problems", + }, + "timeout": { + "type": "integer", + "minimum": 0, + "description": "Maximum time in seconds for the solve phase (0 means no " + "time limit)", + }, + "error_on_timeout": { + "type": "boolean", + "description": "If true, timeout always results in error; if false, use best " + "suboptimal solution found before timeout (yields unreproducible results)", + }, + "os_compatible": { + "type": "object", + "additionalProperties": {"type": "array"}, + "description": "Compatibility mapping between operating systems for reuse of " + "compilers and packages (key: target OS, value: list of compatible source OSes)", + }, + "concretization_cache": { + "type": "object", + "description": "Configuration for caching solver outputs from successful " + "concretization runs", + "properties": { + "enable": { + "type": "boolean", + "description": "Whether to utilize a cache of solver outputs from " + "successful concretization runs", + }, + "url": { + "type": "string", + "description": "Path to the location where Spack will root the " + "concretization cache", + }, + "entry_limit": { + "type": "integer", + "minimum": 0, + "description": "Limit on the number of concretization results that " + "Spack will cache (0 disables pruning)", + }, + }, + }, + "externals": { + "type": "object", + "description": "Configuration for how Spack handles external packages during " + "concretization", + "properties": { + "completion": { + "type": "string", + "enum": ["architecture_only", "default_variants"], + "description": "Controls how missing information (variants, etc.) is " + "completed for external packages: 'architecture_only' completes only " + "mandatory architectural information; 'default_variants' also completes " + "missing variants using their default values", + } + }, + }, }, } } diff --git a/lib/spack/spack/schema/config.py b/lib/spack/spack/schema/config.py index 4282c2b0deeefd..1e3f8864f84d9c 100644 --- a/lib/spack/spack/schema/config.py +++ b/lib/spack/spack/schema/config.py @@ -10,123 +10,228 @@ import spack.schema import spack.schema.projections -from spack.llnl.util.lang import union_dicts #: Properties for inclusion in other schemas properties: Dict[str, Any] = { "config": { "type": "object", "default": {}, + "description": "Spack's basic configuration options", "properties": { "flags": { "type": "object", + "description": "Build flag configuration options", "properties": { - "keep_werror": {"type": "string", "enum": ["all", "specific", "none"]} + "keep_werror": { + "type": "string", + "enum": ["all", "specific", "none"], + "description": "Whether to keep -Werror flags active in package builds", + } }, }, "shared_linking": { + "description": "Control how shared libraries are located at runtime on Linux", "anyOf": [ {"type": "string", "enum": ["rpath", "runpath"]}, { "type": "object", "properties": { - "type": {"type": "string", "enum": ["rpath", "runpath"]}, - "bind": {"type": "boolean"}, - "missing_library_policy": {"enum": ["error", "warn", "ignore"]}, + "type": { + "type": "string", + "enum": ["rpath", "runpath"], + "description": "Whether to use RPATH or RUNPATH for runtime " + "library search paths", + }, + "bind": { + "type": "boolean", + "description": "Embed absolute paths of dependent libraries " + "directly in ELF binaries (experimental)", + }, + "missing_library_policy": { + "enum": ["error", "warn", "ignore"], + "description": "How to handle missing dynamic libraries after " + "installation", + }, }, }, - ] + ], }, "install_tree": { - "anyOf": [ - { - "type": "object", - "properties": union_dicts( - {"root": {"type": "string"}}, - { - "padded_length": { - "oneOf": [ - {"type": "integer", "minimum": 0}, - {"type": "boolean"}, - ] - } - }, - spack.schema.projections.properties, - ), - }, - {"type": "string"}, # deprecated - ] - }, - "concretization_cache": { "type": "object", + "description": "Installation tree configuration", "properties": { - "enable": {"type": "boolean"}, - "url": {"type": "string"}, - "entry_limit": {"type": "integer", "minimum": 0}, - "size_limit": {"type": "integer", "minimum": 0}, + "root": { + "type": "string", + "description": "The location where Spack will install packages and " + "their dependencies", + }, + "padded_length": { + "oneOf": [{"type": "integer", "minimum": 0}, {"type": "boolean"}], + "description": "Length to pad installation paths to allow better " + "relocation of binaries (true for max length, integer for specific " + "length)", + }, + **spack.schema.projections.properties, }, }, - "install_hash_length": {"type": "integer", "minimum": 1}, - "install_path_scheme": {"type": "string"}, # deprecated + "install_hash_length": { + "type": "integer", + "minimum": 1, + "description": "Length of hash used in installation directory names", + }, "build_stage": { - "oneOf": [{"type": "string"}, {"type": "array", "items": {"type": "string"}}] - }, - "stage_name": {"type": "string"}, - "develop_stage_link": {"type": "string"}, - "test_stage": {"type": "string"}, - "extensions": {"type": "array", "items": {"type": "string"}}, - "template_dirs": {"type": "array", "items": {"type": "string"}}, - "license_dir": {"type": "string"}, - "source_cache": {"type": "string"}, - "misc_cache": {"type": "string"}, - "environments_root": {"type": "string"}, - "connect_timeout": {"type": "integer", "minimum": 0}, - "verify_ssl": {"type": "boolean"}, - "ssl_certs": {"type": "string"}, - "suppress_gpg_warnings": {"type": "boolean"}, - "debug": {"type": "boolean"}, - "checksum": {"type": "boolean"}, - "deprecated": {"type": "boolean"}, - "locks": {"type": "boolean"}, - "dirty": {"type": "boolean"}, - "build_language": {"type": "string"}, - "build_jobs": {"type": "integer", "minimum": 1}, - "concurrent_packages": {"type": "integer", "minimum:": 1}, - "ccache": {"type": "boolean"}, - "db_lock_timeout": {"type": "integer", "minimum": 1}, + "oneOf": [{"type": "string"}, {"type": "array", "items": {"type": "string"}}], + "description": "Temporary locations Spack can try to use for builds", + }, + "stage_name": { + "type": "string", + "description": "Name format for build stage directories", + }, + "develop_stage_link": { + "type": "string", + "description": "Name for development spec build stage directories", + }, + "test_stage": { + "type": "string", + "description": "Directory in which to run tests and store test results", + }, + "extensions": { + "type": "array", + "items": {"type": "string"}, + "description": "List of Spack extensions to load", + }, + "template_dirs": { + "type": "array", + "items": {"type": "string"}, + "description": "Locations where templates should be found", + }, + "license_dir": { + "type": "string", + "description": "Directory where licenses should be located", + }, + "source_cache": { + "type": "string", + "description": "Location to cache downloaded tarballs and repositories", + }, + "misc_cache": { + "type": "string", + "description": "Temporary directory to store long-lived cache files, such as " + "indices of packages", + }, + "environments_root": { + "type": "string", + "description": "Directory where Spack managed environments are created and stored", + }, + "connect_timeout": { + "type": "integer", + "minimum": 0, + "description": "Abort downloads after this many seconds if no data is received " + "(0 disables timeout)", + }, + "verify_ssl": { + "type": "boolean", + "description": "When true, Spack will verify certificates of remote hosts when " + "making SSL connections", + }, + "ssl_certs": { + "type": "string", + "description": "Path to custom certificates for SSL verification", + }, + "suppress_gpg_warnings": { + "type": "boolean", + "description": "Suppress GPG warnings from binary package verification", + }, + "debug": { + "type": "boolean", + "description": "Enable debug mode for additional logging", + }, + "checksum": { + "type": "boolean", + "description": "When true, Spack verifies downloaded source code using checksums", + }, + "deprecated": { + "type": "boolean", + "description": "If true, Spack will fetch deprecated versions without warning", + }, + "locks": { + "type": "boolean", + "description": "When true, concurrent instances of Spack will use locks to avoid " + "conflicts (strongly recommended)", + }, + "dirty": { + "type": "boolean", + "description": "When true, builds will NOT clean potentially harmful variables " + "from the environment", + }, + "build_language": { + "type": "string", + "description": "The language the build environment will use (C for English, " + "empty string for user's environment)", + }, + "build_jobs": { + "type": "integer", + "minimum": 1, + "description": "The maximum number of jobs to use for the build system (e.g. " + "make -j), defaults to 16", + }, + "concurrent_packages": { + "type": "integer", + "minimum": 1, + "description": "The maximum number of concurrent package builds a single Spack " + "instance will run", + }, + "ccache": { + "type": "boolean", + "description": "When true, Spack's compiler wrapper will use ccache when " + "compiling C and C++", + }, + "db_lock_timeout": { + "type": "integer", + "minimum": 1, + "description": "How long to wait to lock the Spack installation database", + }, "package_lock_timeout": { - "anyOf": [{"type": "integer", "minimum": 1}, {"type": "null"}] - }, - "allow_sgid": {"type": "boolean"}, - "install_status": {"type": "boolean"}, - "binary_index_root": {"type": "string"}, - "url_fetch_method": {"type": "string", "pattern": r"^urllib$|^curl( .*)*"}, - "additional_external_search_paths": {"type": "array", "items": {"type": "string"}}, - "binary_index_ttl": {"type": "integer", "minimum": 0}, - "aliases": {"type": "object", "patternProperties": {r"\w[\w-]*": {"type": "string"}}}, + "anyOf": [{"type": "integer", "minimum": 1}, {"type": "null"}], + "description": "How long to wait when attempting to modify a package (null for " + "never timeout)", + }, + "allow_sgid": { + "type": "boolean", + "description": "Allow installation on filesystems that don't allow setgid bit " + "manipulation", + }, + "install_status": { + "type": "boolean", + "description": "Whether to show status information in the terminal title during " + "the build", + }, + "url_fetch_method": { + "anyOf": [{"enum": ["urllib", "curl"]}, {"type": "string", "pattern": r"^curl "}], + "description": "The default URL fetch method to use (urllib or curl)", + }, + "additional_external_search_paths": { + "type": "array", + "items": {"type": "string"}, + "description": "Additional paths to search for external packages", + }, + "binary_index_ttl": { + "type": "integer", + "minimum": 0, + "description": "Number of seconds a buildcache's index.json is cached locally " + "before probing for updates", + }, + "aliases": { + "type": "object", + "additionalProperties": {"type": "string"}, + "description": "A mapping of aliases that can be used to define new " + "Spack commands", + }, + "installer": { + "type": "string", + "enum": ["old", "new"], + "description": "Which installer to use. The new installer is experimental.", + }, }, - "deprecatedProperties": [ - { - "names": ["concretizer"], - "message": "Spack supports only clingo as a concretizer from v0.23. " - "The config:concretizer config option is ignored.", - "error": False, - }, - { - "names": ["install_missing_compilers"], - "message": "The config:install_missing_compilers option has been deprecated in " - "Spack v0.23, and is currently ignored. It will be removed from config in " - "Spack v1.0.", - "error": False, - }, - { - "names": ["install_path_scheme"], - "message": "The config:install_path_scheme option was deprecated in Spack v0.16 " - "in favor of config:install_tree:projections:all. It will be removed in Spack " - "v1.0.", - "error": False, - }, - ], } } @@ -141,54 +246,20 @@ } -def update(data): +def update(data: dict) -> bool: """Update the data in place to remove deprecated properties. Args: - data (dict): dictionary to be updated + data: dictionary to be updated - Returns: - True if data was changed, False otherwise + Returns: True if data was changed, False otherwise """ - # currently deprecated properties are - # install_tree: - # install_path_scheme: - # updated: install_tree: {root: , - # projections: = high_fixed_priority_offset: - num_high_fixed += 1 + priorities_names.append((build_priority, name, OptimizationKind.BUILD)) else: - num_fixed += 1 + priorities_names.append((priority, name, OptimizationKind.OTHER)) # sort the criteria by priority priorities_names = sorted(priorities_names, reverse=True) @@ -232,34 +209,11 @@ def build_criteria_names(costs, arg_tuples): error_criteria = len(costs) - len(priorities_names) costs = costs[error_criteria:] - # split list into three parts: build criteria, fixed criteria, non-build criteria - num_criteria = len(priorities_names) - num_build = (num_criteria - num_fixed - num_high_fixed) // 2 - - build_start_idx = num_high_fixed - fixed_start_idx = num_high_fixed + num_build - installed_start_idx = num_high_fixed + num_build + num_fixed - - high_fixed = priorities_names[:build_start_idx] - build = priorities_names[build_start_idx:fixed_start_idx] - fixed = priorities_names[fixed_start_idx:installed_start_idx] - installed = priorities_names[installed_start_idx:] - - # mapping from priority to index in cost list - indices = dict((p, i) for i, (p, n) in enumerate(priorities_names)) - - # make a list that has each name with its build and non-build costs - criteria = [(cost, None, name) for cost, (p, name) in zip(costs[:build_start_idx], high_fixed)] - criteria += [ - (cost, None, name) - for cost, (p, name) in zip(costs[fixed_start_idx:installed_start_idx], fixed) + return [ + OptimizationCriteria(priority, value, name, status) + for (priority, name, status), value in zip(priorities_names, costs) ] - for (i, name), (b, _) in zip(installed, build): - criteria.append((costs[indices[i]], costs[indices[b]], name)) - - return criteria - def specify(spec): if isinstance(spec, spack.spec.Spec): @@ -267,9 +221,7 @@ def specify(spec): return spack.spec.Spec(spec) -def remove_facts( - *to_be_removed: str, -) -> Callable[[spack.spec.Spec, List[AspFunction]], List[AspFunction]]: +def remove_facts(*to_be_removed: str) -> TransformFunction: """Returns a transformation function that removes facts from the input list of facts.""" def _remove(spec: spack.spec.Spec, facts: List[AspFunction]) -> List[AspFunction]: @@ -278,20 +230,15 @@ def _remove(spec: spack.spec.Spec, facts: List[AspFunction]) -> List[AspFunction return _remove -def all_libcs() -> Set[spack.spec.Spec]: - """Return a set of all libc specs targeted by any configured compiler. If none, fall back to - libc determined from the current Python process if dynamically linked.""" - libcs = set() - for c in spack.compilers.config.all_compilers_from(spack.config.CONFIG): - candidate = CompilerPropertyDetector(c).default_libc() - if candidate is not None: - libcs.add(candidate) - - if libcs: - return libcs - - libc = spack.util.libc.libc_from_current_python_process() - return {libc} if libc else set() +def dag_closure_by_deptype(spec: spack.spec.Spec, facts: List[AspFunction]) -> List[AspFunction]: + edges = spec.edges_to_dependencies() + # Compute the "link" transitive closure with `when: root ^[deptypes=link] ` + if len(edges) == 1: + edge = edges[0] + if not edge.direct and edge.depflag == dt.LINK | dt.RUN: + root, leaf = edge.parent.name, edge.spec.name + return [fn.attr("closure", root, leaf, "linkrun")] + return facts def libc_is_compatible(lhs: spack.spec.Spec, rhs: spack.spec.Spec) -> bool: @@ -302,11 +249,6 @@ def libc_is_compatible(lhs: spack.spec.Spec, rhs: spack.spec.Spec) -> bool: ) -def using_libc_compatibility() -> bool: - """Returns True if we are currently using libc compatibility""" - return spack.platforms.host().name == "linux" - - def c_compiler_runs(compiler) -> bool: return CompilerPropertyDetector(compiler).compiler_verbose_output() is not None @@ -325,6 +267,32 @@ def extend_flag_list(flag_list, new_flags): flag_list.append(flag) +def _reorder_flags(flag_list: List[spack.spec.CompilerFlag]) -> List[spack.spec.CompilerFlag]: + """Reorder a list of flags to ensure that the order matches that of the flag group.""" + if not flag_list: + return [] + + if len({x.flag_group for x in flag_list}) != 1 or len({x.source for x in flag_list}) != 1: + raise InternalConcretizerError( + "internal solver error: cannot reorder compiler flags for concretized specs. " + "Please report a bug at https://github.com/spack/spack/issues" + ) + + flag_group = flag_list[0].flag_group + flag_source = flag_list[0].source + flag_propagate = flag_list[0].propagate + # Once we have the flag_group, no need to iterate over the flag_list because the + # group represents all of them + return [ + spack.spec.CompilerFlag( + flag, propagate=flag_propagate, flag_group=flag_group, source=flag_source + ) + for flag, propagate in spack.compilers.flags.tokenize_flags( + flag_group, propagate=flag_propagate + ) + ] + + def check_packages_exist(specs): """Ensure all packages mentioned in specs exist.""" repo = spack.repo.PATH @@ -351,12 +319,8 @@ def __init__(self, specs, asp=None): self.warnings = None self.nmodels = 0 - # Saved control object for reruns when necessary - self.control = None - # specs ordered by optimization level self.answers = [] - self.cores = [] # names of optimization criteria self.criteria = [] @@ -364,96 +328,16 @@ def __init__(self, specs, asp=None): # Abstract user requests self.abstract_specs = specs + # possible dependencies + self.possible_dependencies = None + # Concrete specs self._concrete_specs_by_input = None self._concrete_specs = None self._unsolved_specs = None - def format_core(self, core): - """ - Format an unsatisfiable core for human readability - - Returns a list of strings, where each string is the human readable - representation of a single fact in the core, including a newline. - - Modeled after traceback.format_stack. - """ - error_msg = ( - "Internal Error: ASP Result.control not populated. Please report to the spack" - " maintainers" - ) - assert self.control, error_msg - - symbols = dict((a.literal, a.symbol) for a in self.control.symbolic_atoms) - - core_symbols = [] - for atom in core: - sym = symbols[atom] - core_symbols.append(sym) - - return sorted(str(symbol) for symbol in core_symbols) - - def minimize_core(self, core): - """ - Return a subset-minimal subset of the core. - - Clingo cores may be thousands of lines when two facts are sufficient to - ensure unsatisfiability. This algorithm reduces the core to only those - essential facts. - """ - error_msg = ( - "Internal Error: ASP Result.control not populated. Please report to the spack" - " maintainers" - ) - assert self.control, error_msg - - min_core = core[:] - for fact in core: - # Try solving without this fact - min_core.remove(fact) - ret = self.control.solve(assumptions=min_core) - if not ret.unsatisfiable: - min_core.append(fact) - return min_core - - def minimal_cores(self): - """ - Return a list of subset-minimal unsatisfiable cores. - """ - return [self.minimize_core(core) for core in self.cores] - - def format_minimal_cores(self): - """List of facts for each core - - Separate cores are separated by an empty line - """ - string_list = [] - for core in self.minimal_cores(): - if string_list: - string_list.append("\n") - string_list.extend(self.format_core(core)) - return string_list - - def format_cores(self): - """List of facts for each core - - Separate cores are separated by an empty line - Cores are not minimized - """ - string_list = [] - for core in self.cores: - if string_list: - string_list.append("\n") - string_list.extend(self.format_core(core)) - return string_list - def raise_if_unsat(self): - """ - Raise an appropriate error if the result is unsatisfiable. - - The error is an SolverError, and includes the minimized cores - resulting from the solve, formatted to be human readable. - """ + """Raise a generic internal error if the result is unsatisfiable.""" if self.satisfiable: return @@ -461,8 +345,7 @@ def raise_if_unsat(self): if len(constraints) == 1: constraints = constraints[0] - conflicts = self.format_minimal_cores() - raise SolverError(constraints, conflicts=conflicts) + raise SolverError(constraints) @property def specs(self): @@ -539,7 +422,7 @@ def format_unsolved(unsolved_specs): msg += "\n\t(No candidate specs from solver)" return msg - def to_dict(self, test: bool = False) -> dict: + def to_dict(self) -> dict: """Produces dict representation of Result object Does not include anything related to unsatisfiability as we @@ -596,7 +479,11 @@ def _dict_to_spec(spec_dict): if spec_list: spec_list = [_str_to_spec(x) for x in spec_list] result = Result(spec_list, asp) - result.criteria = obj.get("criteria") + + criteria = obj.get("criteria") + result.criteria = ( + None if criteria is None else [OptimizationCriteria(*t) for t in criteria] + ) result.optimal = obj.get("optimal") result.warnings = obj.get("warnings") result.nmodels = obj.get("nmodels") @@ -618,6 +505,26 @@ def _dict_to_spec(spec_dict): result._concrete_specs.append(_dict_to_spec(spec)) return result + def __eq__(self, other): + eq = ( + self.asp == other.asp, + self.satisfiable == other.satisfiable, + self.optimal == other.optimal, + self.warnings == other.warnings, + self.nmodels == other.nmodels, + self.criteria == other.criteria, + self.answers == other.answers, + self.abstract_specs == other.abstract_specs, + self._concrete_specs_by_input == other._concrete_specs_by_input, + self._concrete_specs == other._concrete_specs, + self._unsolved_specs == other._unsolved_specs, + # Not considered for equality + # self.control + # self.possible_dependencies + # self.possible_dependencies + ) + return all(eq) + class ConcretizationCache: """Store for Spack concretization results and statistics @@ -628,137 +535,74 @@ class ConcretizationCache: """ def __init__(self, root: Union[str, None] = None): - root = root or spack.config.get( - "config:concretization_cache:url", spack.paths.default_conc_cache_path - ) + root = root or spack.config.get("concretizer:concretization_cache:url", None) + if root is None: + root = os.path.join(spack.caches.misc_cache_location(), "concretization") self.root = pathlib.Path(spack.util.path.canonicalize_path(root)) - self._fc = FileCache(self.root) - self._cache_manifest = ".cache_manifest" - self._manifest_queue: List[Tuple[pathlib.Path, int]] = [] + self.root.mkdir(parents=True, exist_ok=True) + self._lockfile = self.root / ".cc_lock" def cleanup(self): - """Prunes the concretization cache according to configured size and entry - count limits. Cleanup is done in FIFO ordering.""" - # TODO: determine a better default - entry_limit = spack.config.get("config:concretization_cache:entry_limit", 1000) - bytes_limit = spack.config.get("config:concretization_cache:size_limit", 3e8) - # lock the entire buildcache as we're removing a lot of data from the - # manifest and cache itself - with self._fc.read_transaction(self._cache_manifest) as f: - count, cache_bytes = self._extract_cache_metadata(f) - if not count or not cache_bytes: - return - entry_count = int(count) - manifest_bytes = int(cache_bytes) - # move beyond the metadata entry - f.readline() - if entry_count > entry_limit and entry_limit > 0: - with self._fc.write_transaction(self._cache_manifest) as (old, new): - # prune the oldest 10% or until we have removed 10% of - # total bytes starting from oldest entry - # TODO: make this configurable? - prune_count = entry_limit // 10 - lines_to_prune = f.readlines(prune_count) - for i, line in enumerate(lines_to_prune): - sha, cache_entry_bytes = self._parse_manifest_entry(line) - if sha and cache_entry_bytes: - cache_path = self._cache_path_from_hash(sha) - if self._fc.remove(cache_path): - entry_count -= 1 - manifest_bytes -= int(cache_entry_bytes) - else: - tty.warn( - f"Invalid concretization cache entry: '{line}' on line: {i+1}" - ) - self._write_manifest(f, entry_count, manifest_bytes) - - elif manifest_bytes > bytes_limit and bytes_limit > 0: - with self._fc.write_transaction(self._cache_manifest) as (old, new): - # take 10% of current size off - prune_amount = bytes_limit // 10 - total_pruned = 0 - i = 0 - while total_pruned < prune_amount: - sha, manifest_cache_bytes = self._parse_manifest_entry(f.readline()) - if sha and manifest_cache_bytes: - entry_bytes = int(manifest_cache_bytes) - cache_path = self.root / sha[:2] / sha - if self._safe_remove(cache_path): - entry_count -= 1 - entry_bytes -= entry_bytes - total_pruned += entry_bytes - else: - tty.warn( - "Invalid concretization cache entry " - f"'{sha} {manifest_cache_bytes}' on line: {i}" - ) - i += 1 - self._write_manifest(f, entry_count, manifest_bytes) - for cache_dir in self.root.iterdir(): - if cache_dir.is_dir() and not any(cache_dir.iterdir()): - self._safe_remove(cache_dir) + """Prunes the concretization cache according to configured entry + count limits. Cleanup is done in LRU ordering.""" + entry_limit = spack.config.get("concretizer:concretization_cache:entry_limit", 1000) - def cache_entries(self): - """Generator producing cache entries""" - for cache_dir in self.root.iterdir(): - # ensure component is cache entry directory - # not metadata file - if cache_dir.is_dir(): - for cache_entry in cache_dir.iterdir(): - if not cache_entry.is_dir(): - yield cache_entry - else: - raise RuntimeError( - "Improperly formed concretization cache. " - f"Directory {cache_entry.name} is improperly located " - "within the concretization cache." - ) + # determine if we even need to clean up + entries = list(self.cache_entries()) + if len(entries) <= entry_limit: + return - def _parse_manifest_entry(self, line): - """Returns parsed manifest entry lines - with handling for invalid reads.""" - if line: - cache_values = line.strip("\n").split(" ") - if len(cache_values) < 2: - tty.warn(f"Invalid cache entry at {line}") - return None, None - return None, None + # collect stat info for mod time about all entries + removal_queue = [] + for entry in entries: + try: + entry_stat_info = entry.stat() + # mtime will always be time of last use as we update it after + # each read and obviously after each write + mod_time = entry_stat_info.st_mtime + removal_queue.append((mod_time, entry)) + except FileNotFoundError: + # don't need to cleanup the file, it's not there! + pass - def _write_manifest(self, manifest_file, entry_count, entry_bytes): - """Writes new concretization cache manifest file. + removal_queue.sort() # sort items for removal, ascending, so oldest first - Arguments: - manifest_file: IO stream opened for readin - and writing wrapping the manifest file - with cursor at calltime set to location - where manifest should be truncated - entry_count: new total entry count - entry_bytes: new total entry bytes count + # Try to remove the oldest half of the cache. + for _, entry_to_rm in removal_queue[: entry_limit // 2]: + # cache bucket was removed by another process -- that's fine; move on + if not entry_to_rm.exists(): + continue - """ - persisted_entries = manifest_file.readlines() - manifest_file.truncate(0) - manifest_file.write(f"{entry_count} {entry_bytes}\n") - manifest_file.writelines(persisted_entries) + try: + with self.write_transaction(entry_to_rm, timeout=1e-6): + self._safe_remove(entry_to_rm) + except lk.LockTimeoutError: + # if we can't get a lock, it's either + # 1) being read, so it's been used recently, i.e. not a good candidate for LRU, + # 2) it's already being removed by another process, so we don't care, or + # 3) system is busy, but we don't really need to wait just for cache cleanup. + pass # so skip it - def _results_from_cache(self, cache_entry_buffer: IO[str]) -> Union[Result, None]: + def cache_entries(self): + """Generator producing cache entries within a bucket""" + for cache_entry in self.root.iterdir(): + # Lockfile starts with "." + # old style concretization cache entries are in directories + if not cache_entry.name.startswith(".") and cache_entry.is_file(): + yield cache_entry + + def _results_from_cache(self, cache_entry_file: str) -> Union[Result, None]: """Returns a Results object from the concretizer cache Reads the cache hit and uses `Result`'s own deserializer to produce a new Result object """ - with current_file_position(cache_entry_buffer, 0): - cache_str = cache_entry_buffer.read() - # TODO: Should this be an error if None? - # Same for _stats_from_cache - if cache_str: - cache_entry = json.loads(cache_str) - result_json = cache_entry["results"] - return Result.from_dict(result_json) - return None - - def _stats_from_cache(self, cache_entry_buffer: IO[str]) -> Union[List, None]: + cache_entry = json.loads(cache_entry_file) + result_json = cache_entry["results"] + return Result.from_dict(result_json) + + def _stats_from_cache(self, cache_entry_file: str) -> Union[Dict, None]: """Returns concretization statistic from the concretization associated with the cache. @@ -766,90 +610,75 @@ def _stats_from_cache(self, cache_entry_buffer: IO[str]) -> Union[List, None]: statistics covering the cached concretization run and returns the Python data structures """ - with current_file_position(cache_entry_buffer, 0): - cache_str = cache_entry_buffer.read() - if cache_str: - return json.loads(cache_str)["statistics"] - return None - - def _extract_cache_metadata(self, cache_stream: IO[str]): - """Extracts and returns cache entry count and bytes count from head of manifest - file""" - # make sure we're always reading from the beginning of the stream - # concretization cache manifest data lives at the top of the file - with current_file_position(cache_stream, 0): - return self._parse_manifest_entry(cache_stream.readline()) - - def _prefix_digest(self, problem: str) -> Tuple[str, str]: + return json.loads(cache_entry_file)["statistics"] + + def _prefix_digest(self, problem: str) -> str: """Return the first two characters of, and the full, sha256 of the given asp problem""" - prob_digest = hashlib.sha256(problem.encode()).hexdigest() - prefix = prob_digest[:2] - return prefix, prob_digest + return spack.util.hash.b32_hash(problem) def _cache_path_from_problem(self, problem: str) -> pathlib.Path: """Returns a Path object representing the path to the cache - entry for the given problem""" - prefix, digest = self._prefix_digest(problem) - return pathlib.Path(prefix) / digest - - def _cache_path_from_hash(self, hash: str) -> pathlib.Path: - """Returns a Path object representing the cache entry - corresponding to the given sha256 hash""" - return pathlib.Path(hash[:2]) / hash - - def _lock_prefix_from_cache_path(self, cache_path: str): - """Returns the bit location corresponding to a given cache entry path - for file locking""" - return spack.util.hash.base32_prefix_bits( - spack.util.hash.b32_hash(cache_path), spack.util.crypto.bit_length(sys.maxsize) - ) + entry for the given problem where the problem is the sha256 of the given asp problem""" + prefix = self._prefix_digest(problem) + return self.root / prefix - def flush_manifest(self): - """Updates the concretization cache manifest file after a cache write operation - Updates the current byte count and entry counts and writes to the head of the - manifest file""" - manifest_file = self.root / self._cache_manifest - manifest_file.touch(exist_ok=True) - with open(manifest_file, "r+", encoding="utf-8") as f: - # check if manifest is empty - count, cache_bytes = self._extract_cache_metadata(f) - if not count or not cache_bytes: - # cache is unintialized - count = 0 - cache_bytes = 0 - f.seek(0, io.SEEK_END) - for manifest_update in self._manifest_queue: - entry_path, entry_bytes = manifest_update - count += 1 - cache_bytes += entry_bytes - f.write(f"{entry_path.name} {entry_bytes}") - f.seek(0, io.SEEK_SET) - new_stats = f"{int(count)+1} {int(cache_bytes)}\n" - f.write(new_stats) - - def _register_cache_update(self, cache_path: pathlib.Path, bytes_written: int): - """Adds manifest entry to update queue for later updates to the manifest""" - self._manifest_queue.append((cache_path, bytes_written)) - - def _safe_remove(self, cache_dir: pathlib.Path): + def _safe_remove(self, cache_dir: pathlib.Path) -> bool: """Removes cache entries with handling for the case where the entry has been removed already or there are multiple cache entries in a directory""" try: - if cache_dir.is_dir(): - cache_dir.rmdir() - else: - cache_dir.unlink() + cache_dir.unlink() return True except FileNotFoundError: - # This is acceptable, removal is idempotent + # That's fine, removal is idempotent pass except OSError as e: - if e.errno == errno.ENOTEMPTY: - # there exists another cache entry in this directory, don't clean yet - pass + # Catch other timing/access related issues + tty.debug( + f"Exception occured while attempting to remove Concretization Cache entry, {e}" + ) + pass return False - def store(self, problem: str, result: Result, statistics: List, test: bool = False): + def _lock(self, path: pathlib.Path) -> lk.Lock: + """Returns a lock over the byte range correspnding to the hash of the asp problem. + + ``path`` is a path to a file in the cache, and its basename is the hash of the problem. + + Args: + path: absolute or relative path to concretization cache entry to be locked + """ + return lk.Lock( + str(self._lockfile), + start=spack.util.hash.base32_prefix_bits( + path.name, spack.util.crypto.bit_length(sys.maxsize) + ), + length=1, + desc=f"Concretization cache lock for {path}", + ) + + def read_transaction( + self, path: pathlib.Path, timeout: Optional[float] = None + ) -> lk.ReadTransaction: + """Read transactions for concretization cache entries. + + Args: + path: absolute or relative path to the concretization cache entry to be locked + timeout: give up after this many seconds + """ + return lk.ReadTransaction(self._lock(path), timeout=timeout) + + def write_transaction( + self, path: pathlib.Path, timeout: Optional[float] = None + ) -> lk.WriteTransaction: + """Write transactions for concretization cache entries + + Args: + path: absolute or relative path to the concretization cache entry to be locked + timeout: give up after this many seconds + """ + return lk.WriteTransaction(self._lock(path), timeout=timeout) + + def store(self, problem: str, result: Result, statistics: List) -> None: """Creates entry in concretization cache for problem if none exists, storing the concretization Result object and statistics in the cache as serialized json joined as a single file. @@ -858,21 +687,17 @@ def store(self, problem: str, result: Result, statistics: List, test: bool = Fal problem. """ cache_path = self._cache_path_from_problem(problem) - if self._fc.init_entry(cache_path): - # if an entry for this conc hash exists already, we're don't want - # to overwrite, just exit - tty.debug(f"Cache entry {cache_path} exists, will not be overwritten") - return - with self._fc.write_transaction(cache_path) as (old, new): - if old: - # Entry for this conc hash exists already, do not overwrite - tty.debug(f"Cache entry {cache_path} exists, will not be overwritten") + with self.write_transaction(cache_path, timeout=30): + if cache_path.exists(): + # if cache path file exists, we already have a cache entry, likely created + # by another process. Exit early. return - cache_dict = {"results": result.to_dict(test=test), "statistics": statistics} - bytes_written = new.write(json.dumps(cache_dict)) - self._register_cache_update(cache_path, bytes_written) - def fetch(self, problem: str) -> Union[Tuple[Result, List], Tuple[None, None]]: + with gzip.open(cache_path, "xb", compresslevel=6) as cache_entry: + cache_dict = {"results": result.to_dict(), "statistics": statistics} + cache_entry.write(json.dumps(cache_dict).encode()) + + def fetch(self, problem: str) -> Union[Tuple[Result, Dict], Tuple[None, None]]: """Returns the concretization cache result for a lookup based on the given problem. Checks the concretization cache for the given problem, and either returns the @@ -880,48 +705,42 @@ def fetch(self, problem: str) -> Union[Tuple[Result, List], Tuple[None, None]]: or returns none if no cache entry was found. """ cache_path = self._cache_path_from_problem(problem) - result, statistics = None, None - with self._fc.read_transaction(cache_path) as f: - if f: - result = self._results_from_cache(f) - statistics = self._stats_from_cache(f) - if result and statistics: - tty.debug(f"Concretization cache hit at {str(cache_path)}") - return result, statistics - tty.debug(f"Concretization cache miss at {str(cache_path)}") - return None, None - - -CONC_CACHE: ConcretizationCache = spack.llnl.util.lang.Singleton( - lambda: ConcretizationCache() -) # type: ignore - - -def _normalize_packages_yaml(packages_yaml): - normalized_yaml = copy.copy(packages_yaml) - for pkg_name in packages_yaml: - is_virtual = spack.repo.PATH.is_virtual(pkg_name) - if pkg_name == "all" or not is_virtual: - continue + if not cache_path.exists(): + return None, None # if exists is false, then there's no chance of a hit + + cache_content = None + try: + with self.read_transaction(cache_path, timeout=2): + try: + with gzip.open(cache_path, "rb", compresslevel=6) as f: + f.peek(1) # Try to read at least one byte + f.seek(0) + cache_content = f.read().decode("utf-8") - # Remove the virtual entry from the normalized configuration - data = normalized_yaml.pop(pkg_name) - is_buildable = data.get("buildable", True) - if not is_buildable: - for provider in spack.repo.PATH.providers_for(pkg_name): - entry = normalized_yaml.setdefault(provider.name, {}) - entry["buildable"] = False + except OSError: + # Cache may have been created pre compression check if gzip, and if not, + # read from plaintext otherwise re raise + with open(cache_path, "rb") as f: + # raise if this is a gzip file we failed to open + if GZipFileType().matches_magic(f): + raise + cache_content = f.read().decode() - externals = data.get("externals", []) + except FileNotFoundError: + pass # cache miss, already cleaned up - def keyfn(x): - return spack.spec.Spec(x["spec"]).name + except lk.LockTimeoutError: + pass # if the lock times, out skip the cache - for provider, specs in itertools.groupby(externals, key=keyfn): - entry = normalized_yaml.setdefault(provider, {}) - entry.setdefault("externals", []).extend(specs) + if not cache_content: + return None, None - return normalized_yaml + # update mod/access time for use w/ LRU cleanup + os.utime(cache_path) + return ( + self._results_from_cache(cache_content), + self._stats_from_cache(cache_content), + ) # type: ignore def _is_checksummed_git_version(v): @@ -931,7 +750,7 @@ def _is_checksummed_git_version(v): def _is_checksummed_version(version_info: Tuple[GitOrStandardVersion, dict]): """Returns true iff the version is not a moving target""" version, info = version_info - if isinstance(version, spack.version.StandardVersion): + if isinstance(version, vn.StandardVersion): if any(h in info for h in spack.util.crypto.hashes.keys()) or "checksum" in info: return True return "commit" in info and len(info["commit"]) == 40 @@ -946,25 +765,6 @@ def _spec_with_default_name(spec_str, name): return spec -def _external_config_with_implicit_externals(configuration): - # Read packages.yaml and normalize it, so that it will not contain entries referring to - # virtual packages. - packages_yaml = _normalize_packages_yaml(configuration.get("packages")) - - # Add externals for libc from compilers on Linux - if not using_libc_compatibility(): - return packages_yaml - - seen = set() - for compiler in spack.compilers.config.all_compilers_from(configuration): - libc = CompilerPropertyDetector(compiler).default_libc() - if libc and libc not in seen: - seen.add(libc) - entry = {"spec": f"{libc}", "prefix": libc.external_path} - packages_yaml.setdefault(libc.name, {}).setdefault("externals", []).append(entry) - return packages_yaml - - class ErrorHandler: def __init__(self, model, input_specs: List[spack.spec.Spec]): self.model = model @@ -1102,36 +902,171 @@ def on_model(model): class PyclingoDriver: - def __init__(self, cores=True): + def __init__(self, conc_cache: Optional[ConcretizationCache] = None) -> None: """Driver for the Python clingo interface. - Arguments: - cores (bool): whether to generate unsatisfiable cores for better - error reporting. + Args: + conc_cache: concretization cache """ - self.cores = cores # This attribute will be reset at each call to solve - self.control = None + self.control: Any = None # TODO: fix typing of dynamic clingo import + self._conc_cache = conc_cache + + def _control_file_paths(self, control_files: List[str]) -> List[str]: + """Get absolute paths based on relative paths of control files. + + Right now the control files just live next to this file in the Spack tree. + """ + parent_dir = os.path.dirname(__file__) + return [os.path.join(parent_dir, rel_path) for rel_path in control_files] + + def _make_cache_key(self, asp_problem: List[str], control_file_paths: List[str]) -> str: + """Make a key for fetching a solve from the concretization cache. + + A key comprises the entire input to clingo, i.e., the problem instance plus the + control files. The problem instance is assumed to already be sorted and stripped of + comments and empty lines. + + The control files are stripped but not sorted, so changes to the control files will cause + cache misses if they modify any code. + + Arguments: + asp_problem: list of statements in the ASP program + control_file_paths: list of paths to control files we'll send to clingo + """ + lines = list(asp_problem) + for path in control_file_paths: + with open(path, "r", encoding="utf-8") as f: + lines.extend(strip_asp_problem(f.readlines())) + + return "\n".join(lines) + + def _run_clingo( + self, + specs: List[spack.spec.Spec], + setup: "SpackSolverSetup", + problem_str: str, + control_file_paths: List[str], + timer: spack.util.timer.Timer, + ) -> Result: + """Actually run clingo and generate a result. + + This is the core solve logic once the setup is done and once we know we can't + fetch a result from cache. See ``solve()`` for caching and setup logic. + """ + # We could just take the cache_key and add it to clingo (since it is the + # full problem representation), but we load conrol files separately as it + # makes clingo give us better, file-aware error messages. + with timer.measure("load"): + # Add the problem instance + self.control.add("base", [], problem_str) + # Load additinoal files + for path in control_file_paths: + self.control.load(path) + + # Grounding is the first step in the solve -- it turns our facts + # and first-order logic rules into propositional logic. + with timer.measure("ground"): + self.control.ground([("base", [])]) + + # With a grounded program, we can run the solve. + models = [] # stable models if things go well + + def on_model(model): + models.append((model.cost, model.symbols(shown=True, terms=True))) + + timer.start("solve") + # A timeout of 0 means no timeout + time_limit = spack.config.CONFIG.get("concretizer:timeout", 0) + timeout_end = time.monotonic() + time_limit if time_limit > 0 else float("inf") + error_on_timeout = spack.config.CONFIG.get("concretizer:error_on_timeout", True) + with self.control.solve(on_model=on_model, async_=True) as handle: + # Allow handling of interrupts every second. + # + # pyclingo's `SolveHandle` blocks the calling thread for the duration of each + # `.wait()` call. Python also requires that signal handlers must be handled in + # the main thread, so any `KeyboardInterrupt` is postponed until after the + # `.wait()` call exits the control of pyclingo. + finished = False + while not finished and time.monotonic() < timeout_end: + finished = handle.wait(1.0) + + if not finished: + specs_str = ", ".join(spack.llnl.util.lang.elide_list([str(s) for s in specs], 4)) + header = f"Spack is taking more than {time_limit} seconds to solve for {specs_str}" + if error_on_timeout: + raise UnsatisfiableSpecError(f"{header}, stopping concretization") + warnings.warn(f"{header}, using the best configuration found so far") + handle.cancel() + + solve_result = handle.get() + timer.stop("solve") + + # once done, construct the solve result + result = Result(specs) + result.satisfiable = solve_result.satisfiable + + if result.satisfiable: + timer.start("construct_specs") + # get the best model + builder = SpecBuilder(specs, hash_lookup=setup.reusable_and_possible) + min_cost, best_model = min(models) + + # first check for errors + error_handler = ErrorHandler(best_model, specs) + error_handler.raise_if_errors() + + # build specs from spec attributes in the model + spec_attrs = [(name, tuple(rest)) for name, *rest in extract_args(best_model, "attr")] + answers = builder.build_specs(spec_attrs) + + # add best spec to the results + result.answers.append((list(min_cost), 0, answers)) + + # get optimization criteria + criteria_args = extract_args(best_model, "opt_criterion") + result.criteria = build_criteria_names(min_cost, criteria_args) + + # record the number of models the solver considered + result.nmodels = len(models) + + # record the possible dependencies in the solve + result.possible_dependencies = setup.pkgs + timer.stop("construct_specs") + timer.stop() + + result.raise_if_unsat() + + if result.satisfiable and result.unsolved_specs and setup.concretize_everything: + raise OutputDoesNotSatisfyInputError(result.unsolved_specs) + + return result - def solve(self, setup, specs, reuse=None, output=None, control=None, allow_deprecated=False): + def solve( + self, + setup: "SpackSolverSetup", + specs: List[spack.spec.Spec], + reuse: Optional[List[spack.spec.Spec]] = None, + packages_with_externals=None, + output: Optional[OutputConfiguration] = None, + control: Optional[Any] = None, # TODO: figure out how to annotate clingo.Control + allow_deprecated: bool = False, + ) -> Tuple[Result, Optional[spack.util.timer.Timer], Optional[Dict]]: """Set up the input and solve for dependencies of ``specs``. Arguments: - setup (SpackSolverSetup): An object to set up the ASP problem. - specs (list): List of ``Spec`` objects to solve for. - reuse (None or list): list of concrete specs that can be reused - output (None or OutputConfiguration): configuration object to set - the output of this solve. - control (clingo.Control): configuration for the solver. If None, - default values will be used + setup: An object to set up the ASP problem. + specs: List of ``Spec`` objects to solve for. + reuse: list of concrete specs that can be reused + output: configuration object to set the output of this solve. + control: configuration for the solver. If None, default values will be used allow_deprecated: if True, allow deprecated versions in the solve Return: A tuple of the solve result, the timer for the different phases of the solve, and the internal statistics from clingo. """ - # avoid circular import - from spack.bootstrap.core import ensure_winsdk_external_or_raise + from spack.bootstrap import ensure_winsdk_external_or_raise output = output or DEFAULT_OUTPUT_CONFIGURATION timer = spack.util.timer.Timer() @@ -1143,8 +1078,10 @@ def solve(self, setup, specs, reuse=None, output=None, control=None, allow_depre # needs to modify active config scope, so cannot be run within # bootstrap config scope if sys.platform == "win32": - tty.debug("Ensuring basic dependencies {win-sdk, wgl} available") ensure_winsdk_external_or_raise() + + # assemble a list of the control files needed for this problem. Some are conditionally + # included depending on what features we're using in the solve. control_files = ["concretize.lp", "heuristic.lp", "display.lp", "direct_dependency.lp"] if not setup.concretize_everything: control_files.append("when_possible.lp") @@ -1156,137 +1093,61 @@ def solve(self, setup, specs, reuse=None, output=None, control=None, allow_depre control_files.append("splices.lp") timer.start("setup") - asp_problem = setup.setup(specs, reuse=reuse, allow_deprecated=allow_deprecated) - if output.out is not None: - output.out.write(asp_problem) - if output.setup_only: - return Result(specs), None, None + problem_builder = setup.setup( + specs, + reuse=reuse, + packages_with_externals=packages_with_externals, + allow_deprecated=allow_deprecated, + ) timer.stop("setup") - timer.start("cache-check") timer.start("ordering") - # ensure deterministic output - problem_repr = "\n".join(sorted(asp_problem.split("\n"))) - timer.stop("ordering") - parent_dir = os.path.dirname(__file__) - full_path = lambda x: os.path.join(parent_dir, x) - abs_control_files = [full_path(x) for x in control_files] - for ctrl_file in abs_control_files: - with open(ctrl_file, "r", encoding="utf-8") as f: - problem_repr += "\n" + f.read() - - result = None - conc_cache_enabled = spack.config.get("config:concretization_cache:enable", False) - if conc_cache_enabled: - result, concretization_stats = CONC_CACHE.fetch(problem_repr) - - timer.stop("cache-check") - if not result: - timer.start("load") - # Add the problem instance - self.control.add("base", [], asp_problem) - # Load the files - [self.control.load(lp) for lp in abs_control_files] - timer.stop("load") - - # Grounding is the first step in the solve -- it turns our facts - # and first-order logic rules into propositional logic. - timer.start("ground") - self.control.ground([("base", [])]) - timer.stop("ground") - - # With a grounded program, we can run the solve. - models = [] # stable models if things go well - cores = [] # unsatisfiable cores if they do not - - def on_model(model): - models.append((model.cost, model.symbols(shown=True, terms=True))) - - solve_kwargs = { - "assumptions": setup.assumptions, - "on_model": on_model, - "on_core": cores.append, - } - - if clingo_cffi(): - solve_kwargs["on_unsat"] = cores.append - - timer.start("solve") - time_limit = spack.config.CONFIG.get("concretizer:timeout", -1) - error_on_timeout = spack.config.CONFIG.get("concretizer:error_on_timeout", True) - # Spack uses 0 to set no time limit, clingo API uses -1 - if time_limit == 0: - time_limit = -1 - with self.control.solve(**solve_kwargs, async_=True) as handle: - finished = handle.wait(time_limit) - if not finished: - specs_str = ", ".join( - spack.llnl.util.lang.elide_list([str(s) for s in specs], 4) - ) - header = ( - f"Spack is taking more than {time_limit} seconds to solve for {specs_str}" - ) - if error_on_timeout: - raise UnsatisfiableSpecError(f"{header}, stopping concretization") - warnings.warn(f"{header}, using the best configuration found so far") - handle.cancel() - - solve_result = handle.get() - timer.stop("solve") - - # once done, construct the solve result - result = Result(specs) - result.satisfiable = solve_result.satisfiable - - if result.satisfiable: - timer.start("construct_specs") - # get the best model - builder = SpecBuilder(specs, hash_lookup=setup.reusable_and_possible) - min_cost, best_model = min(models) - - # first check for errors - error_handler = ErrorHandler(best_model, specs) - error_handler.raise_if_errors() - - # build specs from spec attributes in the model - spec_attrs = [ - (name, tuple(rest)) for name, *rest in extract_args(best_model, "attr") - ] - answers = builder.build_specs(spec_attrs) - - # add best spec to the results - result.answers.append((list(min_cost), 0, answers)) + # print the output with comments, etc. if the user asked + problem = problem_builder.asp_problem + if output.out is not None: + output.out.write("\n".join(problem)) - # get optimization criteria - criteria_args = extract_args(best_model, "opt_criterion") - result.criteria = build_criteria_names(min_cost, criteria_args) + if output.setup_only: + return Result(specs), None, None - # record the number of models the solver considered - result.nmodels = len(models) + # strip the problem of comments and empty lines + problem = strip_asp_problem(problem) + randomize = "SPACK_SOLVER_RANDOMIZATION" in os.environ + if randomize: + # create a shuffled copy -- useful for understanding performance variation + problem = random.sample(problem, len(problem)) + else: + problem.sort() # sort for deterministic output - # record the possible dependencies in the solve - result.possible_dependencies = setup.pkgs - timer.stop("construct_specs") - timer.stop() - elif cores: - result.control = self.control - result.cores.extend(cores) + timer.stop("ordering") - result.raise_if_unsat() + timer.start("cache-check") + # load control files to add to the input representation + control_file_paths = self._control_file_paths(control_files) + cache_key = self._make_cache_key(problem, control_file_paths) + + result, concretization_stats = None, None + conc_cache_enabled = spack.config.get("concretizer:concretization_cache:enable", False) + if conc_cache_enabled and self._conc_cache: + result, concretization_stats = self._conc_cache.fetch(cache_key) + timer.stop("cache-check") - if result.satisfiable and result.unsolved_specs and setup.concretize_everything: - raise OutputDoesNotSatisfyInputError(result.unsolved_specs) + # run the solver and store the result, if it wasn't cached already + if not result: + problem_repr = "\n".join(problem) + result = self._run_clingo(specs, setup, problem_repr, control_file_paths, timer) + if conc_cache_enabled and self._conc_cache: + self._conc_cache.store(cache_key, result, self.control.statistics) - if conc_cache_enabled: - CONC_CACHE.store(problem_repr, result, self.control.statistics, test=setup.tests) - concretization_stats = self.control.statistics if output.timers: timer.write_tty() print() + concretization_stats = concretization_stats or self.control.statistics if output.stats: print("Statistics:") pprint.pprint(concretization_stats) + return result, timer, concretization_stats @@ -1412,23 +1273,6 @@ def strip_type_suffix(source: str) -> Tuple[int, Optional[str]]: return -1, source -class SourceContext: - """Tracks context in which a Spec's clause-set is generated (i.e. - with ``SpackSolverSetup.spec_clauses``). - - Facts generated for the spec may include this context. - """ - - def __init__(self, *, source: Optional[str] = None): - # This can be "literal" for constraints that come from a user - # spec (e.g. from the command line); it can be the output of - # `ConstraintOrigin.append_type_suffix`; the default is "none" - # (which means it isn't important to keep track of the source - # in that case). - self.source = "none" if source is None else source - self.wrap_node_requirement: Optional[bool] = None - - class ConditionIdContext(SourceContext): """Derived from a ``ConditionContext``: for clause-sets generated by imposed/required specs, stores an associated transform. @@ -1442,12 +1286,12 @@ class ConditionIdContext(SourceContext): def __init__(self): super().__init__() - self.transform = None + self.transform: Optional[TransformFunction] = None class ConditionContext(SourceContext): """Tracks context in which a condition (i.e. ``SpackSolverSetup.condition``) - is generated (e.g. for a `depends_on`). + is generated (e.g. for a ``depends_on``). This may modify the required/imposed specs generated as relevant for the context. @@ -1457,10 +1301,10 @@ def __init__(self): super().__init__() # transformation applied to facts from the required spec. Defaults # to leave facts as they are. - self.transform_required = None + self.transform_required: Optional[TransformFunction] = None # transformation applied to facts from the imposed spec. Defaults # to removing "node" and "virtual_node" facts. - self.transform_imposed = None + self.transform_imposed: Optional[TransformFunction] = None # Whether to wrap direct dependency facts as node requirements, # imposed by the parent. If None, the default is used, which is: # - wrap head of rules @@ -1482,20 +1326,28 @@ def impose_context(self) -> ConditionIdContext: return ctxt +def _track_dependencies( + input_spec: spack.spec.Spec, requirements: List[AspFunction] +) -> List[AspFunction]: + return requirements + [fn.attr("track_dependencies", input_spec.name)] + + class SpackSolverSetup: """Class to set up and run a Spack concretization solve.""" - def __init__(self, tests: bool = False): + gen: "ProblemInstanceBuilder" + possible_versions: Dict[str, Dict[GitOrStandardVersion, List[Provenance]]] + + def __init__(self, tests: spack.concretize.TestsType = False): self.possible_graph = create_graph_analyzer() # these are all initialized in setup() - self.gen: "ProblemInstanceBuilder" = ProblemInstanceBuilder() self.requirement_parser = RequirementParser(spack.config.CONFIG) self.possible_virtuals: Set[str] = set() - self.assumptions: List[Tuple["clingo.Symbol", bool]] = [] # type: ignore[name-defined] - self.declared_versions: Dict[str, List[DeclaredVersion]] = collections.defaultdict(list) - self.possible_versions: Dict[str, Set[GitOrStandardVersion]] = collections.defaultdict(set) + # pkg_name -> version -> list of possible origins (package.py, installed, etc.) + self.possible_versions = collections.defaultdict(lambda: collections.defaultdict(list)) + self.versions_from_yaml: Dict[str, List[GitOrStandardVersion]] = {} self.git_commit_versions: Dict[str, Dict[GitOrStandardVersion, str]] = ( collections.defaultdict(dict) ) @@ -1539,38 +1391,28 @@ def __init__(self, tests: bool = False): # If true, we have to load the code for synthesizing splices self.enable_splicing: bool = spack.config.CONFIG.get("concretizer:splice:automatic") - def pkg_version_rules(self, pkg): - """Output declared versions of a package. - - This uses self.declared_versions so that we include any versions - that arise from a spec. - """ - - def key_fn(version): - # Origins are sorted by "provenance" first, see the Provenance enumeration above - return version.origin, version.idx - - if isinstance(pkg, str): - pkg = self.pkg_class(pkg) - - declared_versions = self.declared_versions[pkg.name] - partially_sorted_versions = sorted(set(declared_versions), key=key_fn) - - most_to_least_preferred = [] - for _, group in itertools.groupby(partially_sorted_versions, key=key_fn): - most_to_least_preferred.extend( - list(sorted(group, reverse=True, key=lambda x: vn.ver(x.version))) + def pkg_version_rules(self, pkg: Type[spack.package_base.PackageBase]) -> None: + """Declares known versions, their origins, and their weights.""" + version_provenance = self.possible_versions[pkg.name] + ordered_versions = spack.package_base.sort_by_pkg_preference( + self.possible_versions[pkg.name], pkg=pkg + ) + # Account for preferences in packages.yaml, if any + if pkg.name in self.versions_from_yaml: + ordered_versions = list( + spack.llnl.util.lang.dedupe(self.versions_from_yaml[pkg.name] + ordered_versions) ) - for weight, declared_version in enumerate(most_to_least_preferred): - self.gen.fact( - fn.pkg_fact( - pkg.name, - fn.version_declared( - declared_version.version, weight, str(declared_version.origin) - ), + # Set the deprecation penalty, according to the package. This should be enough to move the + # first version last if deprecated. + self.gen.fact(fn.pkg_fact(pkg.name, fn.version_deprecation_penalty(len(ordered_versions)))) + + for weight, declared_version in enumerate(ordered_versions): + self.gen.fact(fn.pkg_fact(pkg.name, fn.version_declared(declared_version, weight))) + for origin in version_provenance[declared_version]: + self.gen.fact( + fn.pkg_fact(pkg.name, fn.version_origin(declared_version, str(origin))) ) - ) for v in self.possible_versions[pkg.name]: if pkg.needs_commit(v): @@ -1582,50 +1424,53 @@ def key_fn(version): for v in sorted(deprecated): self.gen.fact(fn.pkg_fact(pkg.name, fn.deprecated_version(v))) - def spec_versions(self, spec): + def spec_versions(self, spec: spack.spec.Spec) -> List[AspFunction]: """Return list of clauses expressing spec's version constraints.""" - spec = specify(spec) + name = spec.name msg = "Internal Error: spec with no name occured. Please report to the spack maintainers." - assert spec.name, msg + assert name, msg if spec.concrete: - return [fn.attr("version", spec.name, spec.version)] + return [fn.attr("version", name, spec.version)] if spec.versions == vn.any_version: return [] # record all version constraints for later - self.version_constraints.add((spec.name, spec.versions)) - return [fn.attr("node_version_satisfies", spec.name, spec.versions)] + self.version_constraints.add((name, spec.versions)) + return [fn.attr("node_version_satisfies", name, spec.versions)] - def target_ranges(self, spec, single_target_fn): + def target_ranges(self, spec: spack.spec.Spec, single_target_fn) -> List[AspFunction]: + name = spec.name target = spec.architecture.target # Check if the target is a concrete target if str(target) in spack.vendor.archspec.cpu.TARGETS: - return [single_target_fn(spec.name, target)] + return [single_target_fn(name, target)] self.target_constraints.add(target) - return [fn.attr("node_target_satisfies", spec.name, target)] + return [fn.attr("node_target_satisfies", name, target)] def conflict_rules(self, pkg): for when_spec, conflict_specs in pkg.conflicts.items(): - when_spec_msg = f"conflict constraint {str(when_spec)}" + when_spec_msg = f"conflict constraint {when_spec}" when_spec_id = self.condition(when_spec, required_name=pkg.name, msg=when_spec_msg) + when_spec_str = str(when_spec) for conflict_spec, conflict_msg in conflict_specs: - conflict_spec = spack.spec.Spec(conflict_spec) + conflict_spec_str = str(conflict_spec) if conflict_msg is None: conflict_msg = f"{pkg.name}: " - if when_spec == spack.spec.Spec(): - conflict_msg += f"conflicts with '{conflict_spec}'" + if not when_spec_str: + conflict_msg += f"conflicts with '{conflict_spec_str}'" else: - conflict_msg += f"'{conflict_spec}' conflicts with '{when_spec}'" + conflict_msg += f"'{conflict_spec_str}' conflicts with '{when_spec_str}'" + + if not conflict_spec_str: + conflict_spec_msg = f"conflict is triggered when {pkg.name}" + else: + conflict_spec_msg = f"conflict is triggered when {conflict_spec_str}" - spec_for_msg = conflict_spec - if conflict_spec == spack.spec.Spec(): - spec_for_msg = spack.spec.Spec(pkg.name) - conflict_spec_msg = f"conflict is triggered when {str(spec_for_msg)}" conflict_spec_id = self.condition( conflict_spec, required_name=conflict_spec.name or pkg.name, @@ -1730,7 +1575,7 @@ def define_variant( # used to find a variant id from its variant definition (for variant values on specs) self.variant_ids_by_def_id[id(variant_def)] = vid - if when == spack.spec.Spec(): + if when == EMPTY_SPEC: # unconditional variant pkg_fact(fn.variant_definition(name, vid)) else: @@ -1745,34 +1590,42 @@ def define_variant( if variant_def.sticky: pkg_fact(fn.variant_sticky(vid)) - # define defaults for this variant definition + # Get the default values for this variant definition as a tuple + default_values: Tuple[Union[bool, str], ...] = (variant_def.default,) if variant_def.multi: - for val in sorted(variant_def.make_default().values): - pkg_fact(fn.variant_default_value_from_package_py(vid, val)) - else: - pkg_fact(fn.variant_default_value_from_package_py(vid, variant_def.default)) + default_values = variant_def.make_default().values - # define possible values for this variant definition - values = variant_def.values - if values is None: - values = [] + for val in default_values: + pkg_fact(fn.variant_default_value_from_package_py(vid, val)) - elif isinstance(values, vt.DisjointSetsOfValues): - union = set() - for sid, s in enumerate(sorted(values.sets)): - for value in sorted(s): + # Deal with variants that use validator functions + if variant_def.values_defined_by_validator(): + for value in default_values: + pkg_fact(fn.variant_possible_value(vid, value)) + self.gen.newline() + return + + values = variant_def.values or default_values + + # If we deal with disjoint sets of values, define the sets + if isinstance(values, vt.DisjointSetsOfValues): + for sid, s in enumerate(values.sets): + for value in s: pkg_fact(fn.variant_value_from_disjoint_sets(vid, value, sid)) - union.update(s) - values = union - # ensure that every variant has at least one possible value. - if not values: - values = [variant_def.default] + # Define penalties. Put default values first, otherwise keep the order + penalty = 1 + for v in default_values: + pkg_fact(fn.variant_penalty(vid, v, penalty)) + penalty += 1 - for value in sorted(values): - pkg_fact(fn.variant_possible_value(vid, value)) + for v in values: + if v not in default_values: + pkg_fact(fn.variant_penalty(vid, v, penalty)) + penalty += 1 - # we're done here for unconditional values + # Deal with conditional values + for value in values: if not isinstance(value, vt.ConditionalValue): continue @@ -1805,9 +1658,7 @@ def define_variant( msg=f"invalid variant value: {vstring}", ) constraint_id = self.condition( - spack.spec.Spec(), - required_name=pkg.name, - msg="empty (total) conflict constraint", + EMPTY_SPEC, required_name=pkg.name, msg="empty (total) conflict constraint" ) msg = f"variant value {vstring} is conditionally disabled" pkg_fact(fn.conflict(trigger_id, constraint_id, msg)) @@ -1935,7 +1786,7 @@ def condition( (required if imposed_spec is anonymous, ignored if not) msg: description of the condition context: if provided, indicates how to modify the clause-sets for the required/imposed - specs based on the type of constraint they are generated for (e.g. `depends_on`) + specs based on the type of constraint they are generated for (e.g. ``depends_on``) Returns: int: id of the condition created by this function """ @@ -1952,14 +1803,6 @@ def condition( return condition_id - def impose(self, condition_id, imposed_spec, node=True, body=False): - imposed_constraints = self.spec_clauses(imposed_spec, body=body) - for pred in imposed_constraints: - # imposed "node"-like conditions are no-ops - if not node and pred.args[0] in ("node", "virtual_node"): - continue - self.gen.fact(fn.imposed_constraint(condition_id, *pred.args)) - def package_provider_rules(self, pkg): for vpkg_name in pkg.provided_virtual_names(): if vpkg_name not in self.possible_virtuals: @@ -1990,9 +1833,12 @@ def package_provider_rules(self, pkg): self.gen.newline() def package_dependencies_rules(self, pkg): - """Translate 'depends_on' directives into ASP logic.""" - for cond, deps_by_name in sorted(pkg.dependencies.items()): - for _, dep in sorted(deps_by_name.items()): + """Translate ``depends_on`` directives into ASP logic.""" + + for cond, deps_by_name in pkg.dependencies.items(): + cond_str = str(cond) + cond_str_suffix = f" when {cond_str}" if cond_str else "" + for _, dep in deps_by_name.items(): depflag = dep.depflag # Skip test dependencies if they're not requested if not self.tests: @@ -2007,16 +1853,16 @@ def package_dependencies_rules(self, pkg): if not depflag: continue - msg = f"{pkg.name} depends on {dep.spec}" - if cond != spack.spec.Spec(): - msg += f" when {cond}" - else: - pass - - def track_dependencies(input_spec, requirements): - return requirements + [fn.attr("track_dependencies", input_spec.name)] + msg = f"{pkg.name} depends on {dep.spec}{cond_str_suffix}" - def dependency_holds(input_spec, requirements): + def dependency_holds( + input_spec: spack.spec.Spec, requirements: List[AspFunction] + ) -> List[AspFunction]: + # TODO: `dependency_holds` is used as a cache key, and is a unique object in + # every iteration of the loop. This prevents deduplication of identical + # "effects" when unique when specs impose the same dependency. We cannot move + # this out of the loop, because the effect cache is keyed only by a spec, and + # not by the dependency type. result = remove_facts("node", "virtual_node")(input_spec, requirements) + [ fn.attr( "dependency_holds", pkg.name, input_spec.name, dt.flag_to_string(t) @@ -2032,7 +1878,7 @@ def dependency_holds(input_spec, requirements): context.source = ConstraintOrigin.append_type_suffix( pkg.name, ConstraintOrigin.DEPENDS_ON ) - context.transform_required = track_dependencies + context.transform_required = _track_dependencies context.transform_imposed = dependency_holds self.condition(cond, dep.spec, required_name=pkg.name, msg=msg, context=context) @@ -2127,29 +1973,42 @@ def package_splice_rules(self, pkg): self.gen.newline() - def virtual_preferences(self, pkg_name, func): - """Call func(vspec, provider, i) for each of pkg's provider prefs.""" - config = spack.config.get("packages") - pkg_prefs = config.get(pkg_name, {}).get("providers", {}) - for vspec, providers in pkg_prefs.items(): - if vspec not in self.possible_virtuals: + def virtual_requirements_and_weights(self): + virtual_preferences = spack.config.CONFIG.get("packages:all:providers", {}) + + self.gen.h1("Virtual requirements and weights") + for virtual_str in sorted(self.possible_virtuals): + self.gen.newline() + self.gen.h2(f"Virtual: {virtual_str}") + self.gen.fact(fn.virtual(virtual_str)) + + rules = self.requirement_parser.rules_from_virtual(virtual_str) + if not rules and virtual_str not in virtual_preferences: continue - for i, provider in enumerate(providers): + required, preferred, removed = [], [], set() + for rule in rules: + # We don't deal with conditional requirements + if rule.condition != EMPTY_SPEC: + continue + + if rule.origin == RequirementOrigin.PREFER_YAML: + preferred.extend(x.name for x in rule.requirements if x.name) + elif rule.origin == RequirementOrigin.REQUIRE_YAML: + required.extend(x.name for x in rule.requirements if x.name) + elif rule.origin == RequirementOrigin.CONFLICT_YAML: + conflict_spec = rule.requirements[0] + # For conflicts, we take action only if just a name is used + if spack.spec.Spec(conflict_spec.name).satisfies(conflict_spec): + removed.add(conflict_spec.name) + + current_preferences = required + preferred + virtual_preferences.get(virtual_str, []) + current_preferences = [x for x in current_preferences if x not in removed] + for i, provider in enumerate(spack.llnl.util.lang.dedupe(current_preferences)): provider_name = spack.spec.Spec(provider).name - func(vspec, provider_name, i) + self.gen.fact(fn.provider_weight_from_config(virtual_str, provider_name, i)) self.gen.newline() - def provider_defaults(self): - self.gen.h2("Default virtual providers") - self.virtual_preferences( - "all", lambda v, p, i: self.gen.fact(fn.default_provider_preference(v, p, i)) - ) - - def provider_requirements(self): - self.gen.h2("Requirements on virtual providers") - for virtual_str in sorted(self.possible_virtuals): - rules = self.requirement_parser.rules_from_virtual(virtual_str) if rules: self.emit_facts_from_requirement_rules(rules) self.trigger_rules() @@ -2166,13 +2025,16 @@ def emit_facts_from_requirement_rules(self, rules: List[RequirementRule]): pkg_name, policy, requirement_grp = rule.pkg_name, rule.policy, rule.requirements requirement_weight = 0 - + # Propagated preferences have a higher penalty that normal preferences + weight_multiplier = 2 if rule.origin == RequirementOrigin.INPUT_SPECS else 1 # Write explicitly if a requirement is conditional or not - if rule.condition != spack.spec.Spec(): - msg = f"condition to activate requirement {requirement_grp_id}" + if rule.condition != EMPTY_SPEC: + msg = f"activate requirement {requirement_grp_id} if {rule.condition} holds" + context = ConditionContext() + context.transform_required = dag_closure_by_deptype try: main_condition_id = self.condition( - rule.condition, required_name=pkg_name, msg=msg + rule.condition, required_name=pkg_name, msg=msg, context=context ) except Exception as e: if rule.kind != RequirementKind.DEFAULT: @@ -2200,7 +2062,7 @@ def emit_facts_from_requirement_rules(self, rules: List[RequirementRule]): when_spec = spec if virtual and spec.name != pkg_name: - when_spec = spack.spec.Spec(f"^[virtuals={pkg_name}] {spec.name}") + when_spec = spack.spec.Spec(f"^[virtuals={pkg_name}] {spec}") try: context = ConditionContext() @@ -2216,11 +2078,16 @@ def emit_facts_from_requirement_rules(self, rules: List[RequirementRule]): # else: for virtuals we want to emit "node" and # "virtual_node" in imposed specs + info_msg = f"{input_spec} is a requirement for package {pkg_name}" + if rule.condition != EMPTY_SPEC: + info_msg += f" when {rule.condition}" + if rule.message: + info_msg += f" ({rule.message})" member_id = self.condition( required_spec=when_spec, imposed_spec=spec, required_name=pkg_name, - msg=f"{input_spec} is a requirement for package {pkg_name}", + msg=info_msg, context=context, ) @@ -2238,39 +2105,16 @@ def emit_facts_from_requirement_rules(self, rules: List[RequirementRule]): continue self.gen.fact(fn.requirement_group_member(member_id, pkg_name, requirement_grp_id)) - self.gen.fact(fn.requirement_has_weight(member_id, requirement_weight)) + self.gen.fact( + fn.requirement_has_weight(member_id, requirement_weight * weight_multiplier) + ) self.gen.newline() requirement_weight += 1 - def external_packages(self): + def external_packages(self, packages_with_externals): """Facts on external packages, from packages.yaml and implicit externals.""" self.gen.h1("External packages") - spec_filters = [] - concretizer_yaml = spack.config.get("concretizer") - reuse_yaml = concretizer_yaml.get("reuse") - if isinstance(reuse_yaml, typing.Mapping): - default_include = reuse_yaml.get("include", []) - default_exclude = reuse_yaml.get("exclude", []) - for source in reuse_yaml.get("from", []): - if source["type"] != "external": - continue - - include = source.get("include", default_include) - if include: - # Since libcs are implicit externals, we need to implicitly include them - include = include + self.libcs - exclude = source.get("exclude", default_exclude) - spec_filters.append( - SpecFilter( - factory=lambda: [], - is_usable=lambda x: True, - include=include, - exclude=exclude, - ) - ) - - packages_yaml = _external_config_with_implicit_externals(spack.config.CONFIG) - for pkg_name, data in packages_yaml.items(): + for pkg_name, data in packages_with_externals.items(): if pkg_name == "all": continue @@ -2278,99 +2122,10 @@ def external_packages(self): if pkg_name not in self.pkgs: continue - # Check if the external package is buildable. If it is - # not then "external()" is a fact, unless we can - # reuse an already installed spec. - external_buildable = data.get("buildable", True) - externals = data.get("externals", []) - if not external_buildable or externals: + if not data.get("buildable", True): self.gen.h2(f"External package: {pkg_name}") - - if not external_buildable: self.gen.fact(fn.buildable_false(pkg_name)) - # Read a list of all the specs for this package - candidate_specs = [ - spack.spec.parse_with_version_concrete(x["spec"]) for x in externals - ] - - selected_externals = set() - if spec_filters: - for current_filter in spec_filters: - current_filter.factory = lambda: candidate_specs - selected_externals.update(current_filter.selected_specs()) - - # Emit facts for externals specs. Note that "local_idx" is the index of the spec - # in packages::externals. This means: - # - # packages::externals[local_idx].spec == spec - external_versions = [] - for local_idx, spec in enumerate(candidate_specs): - msg = f"{spec.name} available as external when satisfying {spec}" - - if any(x.satisfies(spec) for x in self.rejected_compilers): - tty.debug( - f"[{__name__}]: not considering {spec} as external, since " - f"it's a non-working compiler" - ) - continue - - if spec_filters and spec not in selected_externals: - continue - - if not spec.versions.concrete: - warnings.warn(f"cannot use the external spec {spec}: needs a concrete version") - continue - - def external_requirement(input_spec, requirements): - result = [] - for asp_fn in requirements: - if asp_fn.args[0] == "depends_on": - continue - if asp_fn.args[1] != input_spec.name: - continue - result.append(asp_fn) - return result - - def external_imposition(input_spec, requirements): - result = [] - for asp_fn in requirements: - if asp_fn.args[0] == "depends_on": - continue - elif asp_fn.args[0] == "direct_dependency": - asp_fn.args = "external_build_requirement", *asp_fn.args[1:] - if asp_fn.args[1] != input_spec.name: - continue - result.append(asp_fn) - result.append(fn.attr("external_conditions_hold", input_spec.name, local_idx)) - return result - - try: - context = ConditionContext() - context.transform_required = external_requirement - context.transform_imposed = external_imposition - self.condition(spec, spec, msg=msg, context=context) - except (spack.error.SpecError, RuntimeError) as e: - warnings.warn(f"while setting up external spec {spec}: {e}") - continue - external_versions.append((spec.version, local_idx)) - self.possible_versions[spec.name].add(spec.version) - self.gen.newline() - - # Order the external versions to prefer more recent versions - # even if specs in packages.yaml are not ordered that way - external_versions = [ - (v, idx, external_id) - for idx, (v, external_id) in enumerate(sorted(external_versions, reverse=True)) - ] - for version, idx, external_id in external_versions: - self.declared_versions[pkg_name].append( - DeclaredVersion(version=version, idx=idx, origin=Provenance.EXTERNAL) - ) - - self.trigger_rules() - self.effect_rules() - def preferred_variants(self, pkg_name): """Facts on concretization preferences, as read from packages.yaml""" preferences = spack.package_prefs.PackagePrefs @@ -2425,7 +2180,7 @@ def spec_clauses( required_from: Optional[str] = None, context: Optional[SourceContext] = None, ) -> List[AspFunction]: - """Wrap a call to `_spec_clauses()` into a try/except block with better error handling. + """Wrap a call to ``_spec_clauses()`` into a try/except block with better error handling. Arguments are as for ``_spec_clauses()`` except ``required_from``. @@ -2459,6 +2214,7 @@ def _spec_clauses( concrete_build_deps: bool = False, include_runtimes: bool = False, context: Optional[SourceContext] = None, + seen: Optional[Set[int]] = None, ) -> List[AspFunction]: """Return a list of clauses for a spec mandates are true. @@ -2473,7 +2229,8 @@ def _spec_clauses( include_runtimes: generate full dependency clauses from runtime libraries that are ommitted from the solve. context: tracks what constraint this clause set is generated for (e.g. a - `depends_on` constraint in a package.py file) + ``depends_on`` constraint in a package.py file) + seen: set of ids of specs that have already been processed (for internal use only) Normally, if called with ``transitive=True``, ``spec_clauses()`` just generates hashes for the dependency requirements of concrete specs. If ``expand_hashes`` @@ -2482,17 +2239,18 @@ def _spec_clauses( for spec ``diff``). """ clauses = [] + seen = seen if seen is not None else set() + name = spec.name + seen.add(id(spec)) f: Union[Type[_Head], Type[_Body]] = _Body if body else _Head - if spec.name: + if name: clauses.append( - f.node(spec.name) - if not spack.repo.PATH.is_virtual(spec.name) - else f.virtual_node(spec.name) + f.node(name) if not spack.repo.PATH.is_virtual(name) else f.virtual_node(name) ) if spec.namespace: - clauses.append(f.namespace(spec.name, spec.namespace)) + clauses.append(f.namespace(name, spec.namespace)) clauses.extend(self.spec_versions(spec)) @@ -2501,9 +2259,9 @@ def _spec_clauses( arch = spec.architecture if arch: if arch.platform: - clauses.append(f.node_platform(spec.name, arch.platform)) + clauses.append(f.node_platform(name, arch.platform)) if arch.os: - clauses.append(f.node_os(spec.name, arch.os)) + clauses.append(f.node_os(name, arch.os)) if arch.target: clauses.extend(self.target_ranges(spec, f.node_target)) @@ -2517,22 +2275,22 @@ def _spec_clauses( for value in variant.values: # ensure that the value *can* be valid for the spec - if spec.name and not spec.concrete and not spack.repo.PATH.is_virtual(spec.name): + if name and not spec.concrete and not spack.repo.PATH.is_virtual(name): variant_defs = vt.prevalidate_variant_value( - self.pkg_class(spec.name), variant, spec + self.pkg_class(name), variant, spec ) # Record that that this is a valid possible value. Accounts for # int/str/etc., where valid values can't be listed in the package for variant_def in variant_defs: - self.variant_values_from_specs.add((spec.name, id(variant_def), value)) + self.variant_values_from_specs.add((name, id(variant_def), value)) if variant.propagate: - clauses.append(f.propagate(spec.name, fn.variant_value(vname, value))) - if self.pkg_class(spec.name).has_variant(vname): - clauses.append(f.variant_value(spec.name, vname, value)) + clauses.append(f.propagate(name, fn.variant_value(vname, value))) + if self.pkg_class(name).has_variant(vname): + clauses.append(f.variant_value(name, vname, value)) else: - variant_clause = f.variant_value(spec.name, vname, value) + variant_clause = f.variant_value(name, vname, value) if ( variant.concrete and variant.type == vt.VariantType.MULTI @@ -2544,9 +2302,7 @@ def _spec_clauses( *variant_clause.args[1:], ) else: - clauses.append( - fn.attr("concrete_variant_request", spec.name, vname, value) - ) + clauses.append(fn.attr("concrete_variant_request", name, vname, value)) clauses.append(variant_clause) # compiler flags @@ -2555,12 +2311,12 @@ def _spec_clauses( flag_group = " ".join(flags) for flag in flags: clauses.append( - f.node_flag(spec.name, fn.node_flag(flag_type, flag, flag_group, source)) + f.node_flag(name, fn.node_flag(flag_type, flag, flag_group, source)) ) if not spec.concrete and flag.propagate is True: clauses.append( f.propagate( - spec.name, + name, fn.node_flag(flag_type, flag, flag_group, source), fn.edge_types("link", "run"), ) @@ -2571,8 +2327,10 @@ def _spec_clauses( # older specs do not have package hashes, so we have to do this carefully package_hash = getattr(spec, "_package_hash", None) if package_hash: - clauses.append(fn.attr("package_hash", spec.name, package_hash)) - clauses.append(fn.attr("hash", spec.name, spec.dag_hash())) + clauses.append(fn.attr("package_hash", name, package_hash)) + clauses.append(fn.attr("hash", name, spec.dag_hash())) + if spec.external: + clauses.append(fn.attr("external", name)) edges = spec.edges_from_dependents() virtuals = sorted( @@ -2580,17 +2338,17 @@ def _spec_clauses( ) if not body and not spec.concrete: for virtual in virtuals: - clauses.append(fn.attr("provider_set", spec.name, virtual)) + clauses.append(fn.attr("provider_set", name, virtual)) clauses.append(fn.attr("virtual_node", virtual)) else: for virtual in virtuals: - clauses.append(fn.attr("virtual_on_incoming_edges", spec.name, virtual)) + clauses.append(fn.attr("virtual_on_incoming_edges", name, virtual)) # If the spec is external and concrete, we allow all the libcs on the system if spec.external and spec.concrete and using_libc_compatibility(): - clauses.append(fn.attr("needs_libc", spec.name)) + clauses.append(fn.attr("needs_libc", name)) for libc in self.libcs: - clauses.append(fn.attr("compatible_libc", spec.name, libc.name, libc.version)) + clauses.append(fn.attr("compatible_libc", name, libc.name, libc.version)) if not transitive: return clauses @@ -2599,7 +2357,7 @@ def _spec_clauses( edge_clauses = [] for dspec in spec.edges_to_dependencies(): # Ignore conditional dependencies, they are handled by caller - if dspec.when != spack.spec.Spec(): + if dspec.when != EMPTY_SPEC: continue dep = dspec.spec @@ -2609,7 +2367,7 @@ def _spec_clauses( # the possibility to reuse specs built against a different runtime. if dep.name == "gcc-runtime": edge_clauses.append( - fn.attr("compatible_runtime", spec.name, dep.name, f"{dep.version}:") + fn.attr("compatible_runtime", name, dep.name, f"{dep.version}:") ) constraint_spec = spack.spec.Spec(f"{dep.name}@{dep.version}") self.spec_versions(constraint_spec) @@ -2619,11 +2377,11 @@ def _spec_clauses( # libc is also solved again by clingo, but in this case the compatibility # is not encoded in the parent node - so we need to emit explicit facts if "libc" in dspec.virtuals: - edge_clauses.append(fn.attr("needs_libc", spec.name)) + edge_clauses.append(fn.attr("needs_libc", name)) for libc in self.libcs: if libc_is_compatible(libc, dep): edge_clauses.append( - fn.attr("compatible_libc", spec.name, libc.name, libc.version) + fn.attr("compatible_libc", name, libc.name, libc.version) ) if not include_runtimes: continue @@ -2636,11 +2394,11 @@ def _spec_clauses( # skip build dependencies of already-installed specs if concrete_build_deps or dtype != dt.BUILD: edge_clauses.append( - fn.attr("depends_on", spec.name, dep.name, dt.flag_to_string(dtype)) + fn.attr("depends_on", name, dep.name, dt.flag_to_string(dtype)) ) for virtual_name in dspec.virtuals: edge_clauses.append( - fn.attr("virtual_on_edge", spec.name, dep.name, virtual_name) + fn.attr("virtual_on_edge", name, dep.name, virtual_name) ) edge_clauses.append(fn.attr("virtual_node", virtual_name)) @@ -2650,23 +2408,24 @@ def _spec_clauses( edge_clauses.append(fn.attr("hash", dep.name, dep.dag_hash())) elif not concrete_build_deps and dspec.depflag: edge_clauses.append( - fn.attr("concrete_build_dependency", spec.name, dep.name, dep.dag_hash()) + fn.attr("concrete_build_dependency", name, dep.name, dep.dag_hash()) ) for virtual_name in dspec.virtuals: edge_clauses.append( - fn.attr("virtual_on_build_edge", spec.name, dep.name, virtual_name) + fn.attr("virtual_on_build_edge", name, dep.name, virtual_name) ) # if the spec is abstract, descend into dependencies. # if it's concrete, then the hashes above take care of dependency # constraints, but expand the hashes if asked for. - if not spec.concrete or expand_hashes: + if (not spec.concrete or expand_hashes) and id(dep) not in seen: dependency_clauses = self._spec_clauses( dep, body=body, expand_hashes=expand_hashes, concrete_build_deps=concrete_build_deps, context=context, + seen=seen, ) ### # Dependency expressed with "^" @@ -2679,9 +2438,7 @@ def _spec_clauses( # Direct dependencies expressed with "%" ### for dependency_type in dt.flag_to_tuple(dspec.depflag): - edge_clauses.append( - fn.attr("depends_on", spec.name, dep.name, dependency_type) - ) + edge_clauses.append(fn.attr("depends_on", name, dep.name, dependency_type)) # By default, wrap head of rules, unless the context says otherwise wrap_node_requirement = body is False @@ -2694,7 +2451,7 @@ def _spec_clauses( for clause in dependency_clauses: clause.name = "node_requirement" - edge_clauses.append(fn.attr("direct_dependency", spec.name, clause)) + edge_clauses.append(fn.attr("direct_dependency", name, clause)) clauses.extend(edge_clauses) return clauses @@ -2703,61 +2460,56 @@ def define_package_versions_and_validate_preferences( self, possible_pkgs: Set[str], *, require_checksum: bool, allow_deprecated: bool ): """Declare any versions in specs not declared in packages.""" - packages_yaml = spack.config.get("packages") + packages_yaml = spack.config.CONFIG.get_config("packages") for pkg_name in sorted(possible_pkgs): pkg_cls = self.pkg_class(pkg_name) # All the versions from the corresponding package.py file. Since concepts # like being a "develop" version or being preferred exist only at a # package.py level, sort them in this partial list here - package_py_versions = sorted( - pkg_cls.versions.items(), key=concretization_version_order, reverse=True - ) + from_package_py = list(pkg_cls.versions.items()) if require_checksum and pkg_cls.has_code: - package_py_versions = [ - x for x in package_py_versions if _is_checksummed_version(x) - ] + from_package_py = [x for x in from_package_py if _is_checksummed_version(x)] - for idx, (v, version_info) in enumerate(package_py_versions): + for v, version_info in from_package_py: if version_info.get("deprecated", False): self.deprecated_versions[pkg_name].add(v) if not allow_deprecated: continue - self.possible_versions[pkg_name].add(v) - self.declared_versions[pkg_name].append( - DeclaredVersion(version=v, idx=idx, origin=Provenance.PACKAGE_PY) - ) + self.possible_versions[pkg_name][v].append(Provenance.PACKAGE_PY) if pkg_name not in packages_yaml or "version" not in packages_yaml[pkg_name]: continue # TODO(psakiev) Need facts about versions # - requires_commit (associated with tag or branch) - version_defs: List[GitOrStandardVersion] = [] + from_packages_yaml: List[GitOrStandardVersion] = [] for vstr in packages_yaml[pkg_name]["version"]: v = vn.ver(vstr) if isinstance(v, vn.GitVersion): if not require_checksum or v.is_commit: - version_defs.append(v) + from_packages_yaml.append(v) else: matches = [x for x in self.possible_versions[pkg_name] if x.satisfies(v)] matches.sort(reverse=True) if not matches: raise spack.error.ConfigError( f"Preference for version {v} does not match any known " - f"version of {pkg_name} (in its package.py or any external)" + f"version of {pkg_name}" ) - version_defs.extend(matches) + from_packages_yaml.extend(matches) - for weight, vdef in enumerate(spack.llnl.util.lang.dedupe(version_defs)): - self.declared_versions[pkg_name].append( - DeclaredVersion(version=vdef, idx=weight, origin=Provenance.PACKAGES_YAML) - ) - self.possible_versions[pkg_name].add(vdef) + from_packages_yaml = list(spack.llnl.util.lang.dedupe(from_packages_yaml)) + for v in from_packages_yaml: + provenance = Provenance.PACKAGES_YAML + if isinstance(v, vn.GitVersion): + provenance = Provenance.PACKAGES_YAML_GIT_VERSION + self.possible_versions[pkg_name][v].append(provenance) + self.versions_from_yaml[pkg_name] = from_packages_yaml def define_ad_hoc_versions_from_specs( self, specs, origin, *, allow_deprecated: bool, require_checksum: bool @@ -2765,8 +2517,7 @@ def define_ad_hoc_versions_from_specs( """Add concrete versions to possible versions from lists of CLI/dev specs.""" for s in traverse.traverse_nodes(specs): # If there is a concrete version on the CLI *that we know nothing - # about*, add it to the known versions. Use idx=0, which is the - # best possible, so they're guaranteed to be used preferentially. + # about*, add it to the known versions. version = s.versions.concrete if version is None or (any((v == version) for v in self.possible_versions[s.name])): @@ -2780,9 +2531,7 @@ def define_ad_hoc_versions_from_specs( if not allow_deprecated and version in self.deprecated_versions[s.name]: continue - declared = DeclaredVersion(version=version, idx=0, origin=origin) - self.declared_versions[s.name].append(declared) - self.possible_versions[s.name].add(version) + self.possible_versions[s.name][version].append(origin) def _supported_targets(self, compiler_name, compiler_version, targets): """Get a list of which targets are supported by the compiler. @@ -2899,7 +2648,7 @@ def target_defaults(self, specs): self.gen.newline() - i = 0 # TODO compute per-target offset? + i = 0 for target in candidate_targets: self.gen.fact(fn.target(target.name)) self.gen.fact(fn.target_family(target.name, target.family.name)) @@ -2922,16 +2671,10 @@ def target_defaults(self, specs): self.default_targets = list(sorted(set(self.default_targets))) self.target_preferences() - def virtual_providers(self): - self.gen.h2("Virtual providers") - for vspec in sorted(self.possible_virtuals): - self.gen.fact(fn.virtual(vspec)) - self.gen.newline() - def define_version_constraints(self): """Define what version_satisfies(...) means in ASP logic.""" - for pkg_name, versions in sorted(self.possible_versions.items()): + for pkg_name, versions in self.possible_versions.items(): for v in versions: if v in self.git_commit_versions[pkg_name]: sha = self.git_commit_versions[pkg_name].get(v) @@ -2941,11 +2684,12 @@ def define_version_constraints(self): self.gen.fact(fn.pkg_fact(pkg_name, fn.version_needs_commit(v))) self.gen.newline() - for pkg_name, versions in sorted(self.version_constraints): + for pkg_name, versions in self.version_constraints: # generate facts for each package constraint and the version # that satisfies it - for v in sorted(v for v in self.possible_versions[pkg_name] if v.satisfies(versions)): - self.gen.fact(fn.pkg_fact(pkg_name, fn.version_satisfies(versions, v))) + for v in self.possible_versions[pkg_name]: + if v.satisfies(versions): + self.gen.fact(fn.pkg_fact(pkg_name, fn.version_satisfies(versions, v))) self.gen.newline() def collect_virtual_constraints(self): @@ -2977,7 +2721,7 @@ def versions_for(v): for pkg_name, versions in sorted(constraint_map.items()): possible_versions = set(sum([versions_for(v) for v in versions], [])) for version in sorted(possible_versions): - self.possible_versions[pkg_name].add(version) + self.possible_versions[pkg_name][version].append(Provenance.VIRTUAL_CONSTRAINT) def define_compiler_version_constraints(self): for constraint in sorted(self.compiler_version_constraints): @@ -3030,8 +2774,8 @@ def define_variant_values(self): variant definitions. """ - # Tell the concretizer about possible values from specs seen in spec_clauses(). - # We might want to order these facts by pkg and name if we are debugging. + # for determinism, sort by variant ids, not variant def ids (which are object ids) + def_info = [] for pkg_name, variant_def_id, value in sorted(self.variant_values_from_specs): try: vid = self.variant_ids_by_def_id[variant_def_id] @@ -3040,10 +2784,13 @@ def define_variant_values(self): f"[{__name__}] cannot retrieve id of the {value} variant from {pkg_name}" ) continue + def_info.append((pkg_name, vid, value)) + # Tell the concretizer about possible values from specs seen in spec_clauses(). + for pkg_name, vid, value in sorted(def_info): self.gen.fact(fn.pkg_fact(pkg_name, fn.variant_possible_value(vid, value))) - def register_concrete_spec(self, spec, possible): + def register_concrete_spec(self, spec, possible: set): # tell the solver about any installed packages that could # be dependencies (don't tell it about the others) if spec.name not in possible: @@ -3071,35 +2818,77 @@ def concrete_specs(self): # - Add versions to possible versions # - Add OS to possible OS's - # is traverse deterministic? for dep in spec.traverse(): - self.possible_versions[dep.name].add(dep.version) + provenance = Provenance.INSTALLED if isinstance(dep.version, vn.GitVersion): - self.declared_versions[dep.name].append( - DeclaredVersion( - version=dep.version, idx=0, origin=Provenance.INSTALLED_GIT_VERSION - ) - ) - else: - self.declared_versions[dep.name].append( - DeclaredVersion(version=dep.version, idx=0, origin=Provenance.INSTALLED) - ) + provenance = Provenance.INSTALLED_GIT_VERSION + + self.possible_versions[dep.name][dep.version].append(provenance) self.possible_oses.add(dep.os) - def define_concrete_input_specs(self, specs, possible): + def define_concrete_input_specs(self, specs: tuple, possible: set): # any concrete specs in the input spec list for input_spec in specs: for spec in input_spec.traverse(): if spec.concrete: self.register_concrete_spec(spec, possible) + def impossible_dependencies_check(self, specs) -> None: + for edge in traverse.traverse_edges(specs): + possible_deps = self.pkgs + if spack.repo.PATH.is_virtual(edge.spec.name): + possible_deps = self.possible_virtuals + if edge.spec.name not in possible_deps and not str(edge.when): + raise InvalidDependencyError( + f"'{edge.spec.name}' is not a possible dependency of any root spec" + ) + + def input_spec_version_check(self, specs, allow_deprecated: bool) -> None: + """Raise an error early if no versions available in the solve can satisfy the inputs.""" + only_deprecated = [] + impossible = [] + + for spec in traverse.traverse_nodes(specs): + if spack.repo.PATH.is_virtual(spec.name): + continue + if spec.name not in self.pkgs: + continue # conditional dependency that won't be satisfied + + deprecated = self.deprecated_versions.get(spec.name, set()) + sat_deprecated = [v for v in deprecated if deprecated and v.satisfies(spec.versions)] + + possible: Iterable = self.possible_versions.get(spec.name, set()) + sat_possible = [v for v in possible if possible and v.satisfies(spec.versions)] + + if sat_deprecated and not sat_possible: + only_deprecated.append(spec) + + if not sat_deprecated and not sat_possible: + impossible.append(spec) + + if not allow_deprecated and only_deprecated: + raise DeprecatedVersionError( + "The following input specs can only be satisfied by deprecated versions:", + " " + + ", ".join(str(spec) for spec in only_deprecated) + + "\n" + + "Run with --deprecated to allow Spack to use these versions.", + ) + + if impossible: + raise InvalidVersionError( + "No version exists that satisfies these input specs:", + " " + ", ".join(str(spec) for spec in impossible), + ) + def setup( self, - specs: List[spack.spec.Spec], + specs: Sequence[spack.spec.Spec], *, reuse: Optional[List[spack.spec.Spec]] = None, + packages_with_externals=None, allow_deprecated: bool = False, - ) -> str: + ) -> "ProblemInstanceBuilder": """Generate an ASP program with relevant constraints for specs. This calls methods on the solve driver to set up the problem with @@ -3109,16 +2898,19 @@ def setup( Arguments: specs: list of Specs to solve reuse: list of concrete specs that can be reused + packages_with_externals: precomputed packages config with implicit externals allow_deprecated: if True adds deprecated versions into the solve + + Return: + A ProblemInstanceBuilder populated with facts and rules for an ASP solve. """ reuse = reuse or [] + if packages_with_externals is None: + packages_with_externals = external_config_with_implicit_externals(spack.config.CONFIG) check_packages_exist(specs) self.gen = ProblemInstanceBuilder() - # Compute possible compilers first, so we can record which dependencies they might inject - _ = spack.compilers.config.all_compilers(init_config=True) - - # Get compilers from buildcache only if injected through "reuse" specs + # Get compilers from buildcaches only if injected through "reuse" specs supported_compilers = spack.compilers.config.supported_compilers() compilers_from_reuse = { x for x in reuse if x.name in supported_compilers and not x.external @@ -3135,13 +2927,19 @@ def setup( candidate_compilers.update(compilers_from_reuse) self.possible_compilers = list(candidate_compilers) - self.possible_compilers.sort() # type: ignore[call-overload] + + # TODO: warning is because mypy doesn't know Spec supports rich comparison via decorator + self.possible_compilers.sort() # type: ignore[call-arg,call-overload] + + self.compiler_mixing() self.gen.h1("Runtimes") injected_dependencies = self.define_runtime_constraints() node_counter = create_counter( - specs + injected_dependencies, tests=self.tests, possible_graph=self.possible_graph + list(specs) + injected_dependencies, + tests=self.tests, + possible_graph=self.possible_graph, ) self.possible_virtuals = node_counter.possible_virtuals() self.pkgs = node_counter.possible_dependencies() @@ -3151,6 +2949,7 @@ def setup( if node.namespace is not None: self.explicitly_required_namespaces[node.name] = node.namespace + self.requirement_parser.parse_rules_from_input_specs(specs) self.gen.h1("Generic information") if using_libc_compatibility(): for libc in self.libcs: @@ -3203,10 +3002,8 @@ def setup( self.os_defaults(specs + dev_specs) self.target_defaults(specs + dev_specs) - self.virtual_providers() - self.provider_defaults() - self.provider_requirements() - self.external_packages() + self.virtual_requirements_and_weights() + self.external_packages(packages_with_externals) # TODO: make a config option for this undocumented feature checksummed = "SPACK_CONCRETIZER_REQUIRE_CHECKSUM" in os.environ @@ -3260,28 +3057,24 @@ def setup( self.gen.h1("Target Constraints") self.define_target_constraints() - self.gen.h1("Internal errors") - self.internal_errors() - - return self.gen.value() + # once we've done a full traversal and know possible versions, check that the + # requested solve is at least consistent. + self.impossible_dependencies_check(specs) + self.input_spec_version_check(specs, allow_deprecated) - def internal_errors(self): - parent_dir = os.path.dirname(__file__) + return self.gen - def visit(node): - if ast_type(node) == clingo().ast.ASTType.Rule: - for term in node.body: - if ast_type(term) == clingo().ast.ASTType.Literal: - if ast_type(term.atom) == clingo().ast.ASTType.SymbolicAtom: - name = ast_sym(term.atom).name - if name == "internal_error": - arg = ast_sym(ast_sym(term.atom).arguments[0]) - symbol = AspFunction(name)(arg.string) - self.assumptions.append((parse_term(str(symbol)), True)) - self.gen.asp_problem.append(f"{{ {symbol} }}.\n") - - path = os.path.join(parent_dir, "concretize.lp") - parse_files([path], visit) + def compiler_mixing(self): + should_mix = spack.config.get("concretizer:compiler_mixing", True) + if should_mix is True: + return + # anything besides should_mix: true + for lang in ["c", "cxx", "fortran"]: + self.gen.fact(fn.no_compiler_mixing(lang)) + # user specified an allow-list + if isinstance(should_mix, list): + for pkg_name in should_mix: + self.gen.fact(fn.allow_mixing(pkg_name)) def define_runtime_constraints(self) -> List[spack.spec.Spec]: """Define the constraints to be imposed on the runtimes, and returns a list of @@ -3300,15 +3093,16 @@ def define_runtime_constraints(self) -> List[spack.spec.Spec]: # Inject default flags for compilers recorder("*").default_flags(compiler) - # FIXME (compiler as nodes): think of using isinstance(compiler_cls, WrappedCompiler) # Add a dependency on the compiler wrapper + compiler_str = f"{compiler.name} /{compiler.dag_hash()}" for language in ("c", "cxx", "fortran"): - compiler_str = f"{compiler.name}@{compiler.versions}" + # Using compiler.name causes a bit of duplication, but that is taken care of by + # clingo during grounding. recorder("*").depends_on( "compiler-wrapper", - when=f"%[deptypes=build virtuals={language}] {compiler_str}", + when=f"%[deptypes=build virtuals={language}] {compiler.name}", type="build", - description=f"Add the compiler wrapper when using {compiler} for {language}", + description=f"Add compiler wrapper when using {compiler.name} for {language}", ) if not using_libc_compatibility(): @@ -3326,9 +3120,9 @@ def define_runtime_constraints(self) -> List[spack.spec.Spec]: if current_libc: recorder("*").depends_on( "libc", - when=f"%[deptypes=build] {compiler_str}", + when=f"%[deptypes=build] {compiler.name}", type="link", - description=f"Add libc when using {compiler}", + description=f"Add libc when using {compiler.name}", ) recorder("*").depends_on( f"{current_libc.name}@={current_libc.version}", @@ -3402,13 +3196,15 @@ def generate_conditional_dep_conditions(self, spec: spack.spec.Spec, condition_i """ for dspec in spec.traverse_edges(): # Ignore unconditional deps - if dspec.when == spack.spec.Spec(): + if dspec.when == EMPTY_SPEC: continue # Cannot use "virtual_node" attr as key for condition # because reused specs do not track virtual nodes. # Instead, track whether the parent uses the virtual - def virtual_handler(input_spec, requirements): + def virtual_handler( + input_spec: spack.spec.Spec, requirements: List[AspFunction] + ) -> List[AspFunction]: ret = remove_facts("virtual_node")(input_spec, requirements) for edge in input_spec.traverse_edges(root=False, cover="edges"): if spack.repo.PATH.is_virtual(edge.spec.name): @@ -3444,14 +3240,14 @@ def validate_and_define_versions_from_requirements( versions. If they are abstract and statically have no match, then we need to throw an error. This function assumes all possible versions are already registered in self.possible_versions.""" - for pkg_name, d in spack.config.get("packages").items(): + for pkg_name, d in spack.config.CONFIG.get_config("packages").items(): if pkg_name == "all" or "require" not in d: continue for s in traverse.traverse_nodes(self._specs_from_requires(pkg_name, d["require"])): name, versions = s.name, s.versions - if name not in self.pkgs or versions == spack.version.any_version: + if name not in self.pkgs or versions == vn.any_version: continue s.attach_git_version_lookup() @@ -3477,10 +3273,7 @@ def validate_and_define_versions_from_requirements( # If concrete an not yet defined, conditionally define it, like we do for specs # from the command line. if not require_checksum or _is_checksummed_git_version(v): - self.declared_versions[name].append( - DeclaredVersion(version=v, idx=0, origin=Provenance.PACKAGE_REQUIREMENT) - ) - self.possible_versions[name].add(v) + self.possible_versions[name][v].append(Provenance.PACKAGE_REQUIREMENT) def _specs_from_requires(self, pkg_name, section): """Collect specs from a requirement rule""" @@ -3504,7 +3297,7 @@ def _specs_from_requires(self, pkg_name, section): for s in spec_group[key]: yield _spec_with_default_name(s, pkg_name) - def pkg_class(self, pkg_name: str) -> typing.Type[spack.package_base.PackageBase]: + def pkg_class(self, pkg_name: str) -> Type[spack.package_base.PackageBase]: request = pkg_name if pkg_name in self.explicitly_required_namespaces: namespace = self.explicitly_required_namespaces[pkg_name] @@ -3540,6 +3333,19 @@ class _Body: propagate = fn.attr("propagate") +def strip_asp_problem(asp_problem: Iterable[str]) -> List[str]: + """Remove comments and empty lines from an ASP program.""" + + def strip_statement(stmt: str) -> str: + lines = [line for line in stmt.split("\n") if not line.startswith("%")] + return "".join(line.strip() for line in lines if line) + + value = [strip_statement(stmt) for stmt in asp_problem] + value = [s for s in value if s] + + return value + + class ProblemInstanceBuilder: """Provides an interface to construct a problem instance. @@ -3550,24 +3356,24 @@ class ProblemInstanceBuilder: >>> problem_instance = builder.value() The problem instance can be added directly to the "control" structure of clingo. + """ - def __init__(self): - self.asp_problem = [] + def __init__(self) -> None: + self.asp_problem: List[str] = [] def fact(self, atom: AspFunction) -> None: - self.asp_problem.append(f"{atom}.\n") + self.asp_problem.append(f"{atom}.") def append(self, rule: str) -> None: self.asp_problem.append(rule) def title(self, header: str, char: str) -> None: - self.asp_problem.append("\n") - self.asp_problem.append("%" + (char * 76)) - self.asp_problem.append("\n") - self.asp_problem.append(f"% {header}\n") - self.asp_problem.append("%" + (char * 76)) - self.asp_problem.append("\n") + sep = char * 76 + self.newline() + self.asp_problem.append(f"%{sep}") + self.asp_problem.append(f"% {header}") + self.asp_problem.append(f"%{sep}") def h1(self, header: str) -> None: self.title(header, "=") @@ -3576,13 +3382,10 @@ def h2(self, header: str) -> None: self.title(header, "-") def h3(self, header: str): - self.asp_problem.append(f"% {header}\n") + self.asp_problem.append(f"% {header}") def newline(self): - self.asp_problem.append("\n") - - def value(self) -> str: - return "".join(self.asp_problem) + self.asp_problem.append("") def possible_compilers(*, configuration) -> Tuple[Set["spack.spec.Spec"], Set["spack.spec.Spec"]]: @@ -3625,246 +3428,7 @@ def possible_compilers(*, configuration) -> Tuple[Set["spack.spec.Spec"], Set["s return result, rejected -class RuntimePropertyRecorder: - """An object of this class is injected in callbacks to compilers, to let them declare - properties of the runtimes they support and of the runtimes they provide, and to add - runtime dependencies to the nodes using said compiler. - - The usage of the object is the following. First, a runtime package name or the wildcard - "*" are passed as an argument to __call__, to set which kind of package we are referring to. - Then we can call one method with a directive-like API. - - Examples: - >>> pkg = RuntimePropertyRecorder(setup) - >>> # Every package compiled with %gcc has a link dependency on 'gcc-runtime' - >>> pkg("*").depends_on( - ... "gcc-runtime", - ... when="%gcc", - ... type="link", - ... description="If any package uses %gcc, it depends on gcc-runtime" - ... ) - >>> # The version of gcc-runtime is the same as the %gcc used to "compile" it - >>> pkg("gcc-runtime").requires("@=9.4.0", when="%gcc@=9.4.0") - """ - - def __init__(self, setup): - self._setup = setup - self.rules = [] - self.runtime_conditions = set() - self.injected_dependencies = set() - # State of this object set in the __call__ method, and reset after - # each directive-like method - self.current_package = None - - def __call__(self, package_name: str) -> "RuntimePropertyRecorder": - """Sets a package name for the next directive-like method call""" - assert self.current_package is None, f"state was already set to '{self.current_package}'" - self.current_package = package_name - return self - - def reset(self): - """Resets the current state.""" - self.current_package = None - - def depends_on(self, dependency_str: str, *, when: str, type: str, description: str) -> None: - """Injects conditional dependencies on packages. - - Conditional dependencies can be either "real" packages or virtual dependencies. - - Args: - dependency_str: the dependency spec to inject - when: anonymous condition to be met on a package to have the dependency - type: dependency type - description: human-readable description of the rule for adding the dependency - """ - # TODO: The API for this function is not final, and is still subject to change. At - # TODO: the moment, we implemented only the features strictly needed for the - # TODO: functionality currently provided by Spack, and we assert nothing else is required. - msg = "the 'depends_on' method can be called only with pkg('*')" - assert self.current_package == "*", msg - - when_spec = spack.spec.Spec(when) - assert when_spec.name is None, "only anonymous when specs are accepted" - - dependency_spec = spack.spec.Spec(dependency_str) - if dependency_spec.versions != vn.any_version: - self._setup.version_constraints.add((dependency_spec.name, dependency_spec.versions)) - - self.injected_dependencies.add(dependency_spec) - body_str, node_variable = self.rule_body_from(when_spec) - - head_clauses = self._setup.spec_clauses(dependency_spec, body=False) - runtime_pkg = dependency_spec.name - is_virtual = head_clauses[0].args[0] == "virtual_node" - main_rule = ( - f"% {description}\n" - f'1 {{ attr("depends_on", {node_variable}, node(0..X-1, "{runtime_pkg}"), "{type}") :' - f' max_dupes("{runtime_pkg}", X)}} 1:-\n' - f"{body_str}.\n\n" - ) - if is_virtual: - main_rule = ( - f"% {description}\n" - f'attr("dependency_holds", {node_variable}, "{runtime_pkg}", "{type}") :-\n' - f"{body_str}.\n\n" - ) - - self.rules.append(main_rule) - for clause in head_clauses: - if clause.args[0] == "node": - continue - runtime_node = f'node(RuntimeID, "{runtime_pkg}")' - head_str = str(clause).replace(f'"{runtime_pkg}"', runtime_node) - depends_on_constraint = ( - f' attr("depends_on", {node_variable}, {runtime_node}, "{type}"),\n' - ) - if is_virtual: - depends_on_constraint = ( - f' attr("depends_on", {node_variable}, ProviderNode, "{type}"),\n' - f" provider(ProviderNode, {runtime_node}),\n" - ) - - rule = f"{head_str} :-\n" f"{depends_on_constraint}" f"{body_str}.\n\n" - self.rules.append(rule) - - self.reset() - - @staticmethod - def node_for(name: str) -> str: - return f'node(ID{name.replace("-", "_")}, "{name}")' - - def rule_body_from(self, when_spec: "spack.spec.Spec") -> Tuple[str, str]: - """Computes the rule body from a "when" spec, and returns it, along with the - node variable. - """ - - node_placeholder = "XXX" - node_variable = "node(ID, Package)" - when_substitutions = {} - for s in when_spec.traverse(root=False): - when_substitutions[f'"{s.name}"'] = self.node_for(s.name) - when_spec.name = node_placeholder - body_clauses = self._setup.spec_clauses(when_spec, body=True) - for clause in body_clauses: - if clause.args[0] == "virtual_on_incoming_edges": - # Substitute: attr("virtual_on_incoming_edges", ProviderNode, Virtual) - # with: attr("virtual_on_edge", ParentNode, ProviderNode, Virtual) - # (avoid adding virtuals everywhere, if a single edge needs it) - _, provider, virtual = clause.args - clause.args = "virtual_on_edge", node_placeholder, provider, virtual - body_str = ",\n".join(f" {x}" for x in body_clauses) - body_str += f",\n not external({node_variable})" - body_str = body_str.replace(f'"{node_placeholder}"', f"{node_variable}") - for old, replacement in when_substitutions.items(): - body_str = body_str.replace(old, replacement) - return body_str, node_variable - - def requires(self, impose: str, *, when: str): - """Injects conditional requirements on a given package. - - Args: - impose: constraint to be imposed - when: condition triggering the constraint - """ - msg = "the 'requires' method cannot be called with pkg('*') or without setting the package" - assert self.current_package is not None and self.current_package != "*", msg - - imposed_spec = spack.spec.Spec(f"{self.current_package}{impose}") - when_spec = spack.spec.Spec(f"{self.current_package}{when}") - - assert imposed_spec.versions.concrete, f"{impose} must have a concrete version" - - # Add versions to possible versions - for s in (imposed_spec, when_spec): - if not s.versions.concrete: - continue - self._setup.possible_versions[s.name].add(s.version) - self._setup.declared_versions[s.name].append( - DeclaredVersion(version=s.version, idx=0, origin=Provenance.RUNTIME) - ) - - self.runtime_conditions.add((imposed_spec, when_spec)) - self.reset() - - def propagate(self, constraint_str: str, *, when: str): - msg = "the 'propagate' method can be called only with pkg('*')" - assert self.current_package == "*", msg - - when_spec = spack.spec.Spec(when) - assert when_spec.name is None, "only anonymous when specs are accepted" - - when_substitutions = {} - for s in when_spec.traverse(root=False): - when_substitutions[f'"{s.name}"'] = self.node_for(s.name) - - body_str, node_variable = self.rule_body_from(when_spec) - constraint_spec = spack.spec.Spec(constraint_str) - - constraint_clauses = self._setup.spec_clauses(constraint_spec, body=False) - for clause in constraint_clauses: - if clause.args[0] == "node_version_satisfies": - self._setup.version_constraints.add( - (constraint_spec.name, constraint_spec.versions) - ) - args = f'"{constraint_spec.name}", "{constraint_spec.versions}"' - head_str = f"propagate({node_variable}, node_version_satisfies({args}))" - rule = f"{head_str} :-\n{body_str}.\n\n" - self.rules.append(rule) - - self.reset() - - def default_flags(self, spec: "spack.spec.Spec"): - if not spec.external or "flags" not in spec.extra_attributes: - self.reset() - return - - when_spec = spack.spec.Spec(f"%[deptypes=build] {spec}") - body_str, node_variable = self.rule_body_from(when_spec) - - node_placeholder = "XXX" - flags = spec.extra_attributes["flags"] - root_spec_str = f"{node_placeholder}" - for flag_type, default_values in flags.items(): - root_spec_str = f"{root_spec_str} {flag_type}='{default_values}'" - root_spec = spack.spec.Spec(root_spec_str) - head_clauses = self._setup.spec_clauses( - root_spec, body=False, context=SourceContext(source="compiler") - ) - self.rules.append(f"% Default compiler flags for {spec}\n") - for clause in head_clauses: - if clause.args[0] == "node": - continue - head_str = str(clause).replace(f'"{node_placeholder}"', f"{node_variable}") - rule = f"{head_str} :-\n{body_str}.\n\n" - self.rules.append(rule) - - self.reset() - - def consume_facts(self): - """Consume the facts collected by this object, and emits rules and - facts for the runtimes. - """ - self._setup.gen.h2("Runtimes: declarations") - runtime_pkgs = sorted( - {x.name for x in self.injected_dependencies if not spack.repo.PATH.is_virtual(x.name)} - ) - for runtime_pkg in runtime_pkgs: - self._setup.gen.fact(fn.runtime(runtime_pkg)) - self._setup.gen.newline() - - self._setup.gen.h2("Runtimes: rules") - self._setup.gen.newline() - for rule in self.rules: - self._setup.gen.append(rule) - self._setup.gen.newline() - - self._setup.gen.h2("Runtimes: requirements") - for imposed_spec, when_spec in sorted(self.runtime_conditions): - msg = f"{when_spec} requires {imposed_spec} at runtime" - _ = self._setup.condition(when_spec, imposed_spec=imposed_spec, msg=msg) - - self._setup.trigger_rules() - self._setup.effect_rules() +FunctionTupleT = Tuple[str, Tuple[Union[str, NodeArgument], ...]] class SpecBuilder: @@ -3879,7 +3443,6 @@ class SpecBuilder: r"^.*_set$", r"^compatible_libc$", r"^dependency_holds$", - r"^external_conditions_hold$", r"^package_hash$", r"^root$", r"^track_dependencies$", @@ -3966,42 +3529,6 @@ def node_flag(self, node, node_flag): node_flag.flag_type, node_flag.flag, False, node_flag.flag_group, node_flag.source ) - def external_spec_selected(self, node, idx): - """This means that the external spec and index idx has been selected for this package.""" - packages_yaml = _external_config_with_implicit_externals(spack.config.CONFIG) - spec_info = packages_yaml[node.pkg]["externals"][int(idx)] - self._specs[node].external_path = spec_info.get("prefix", None) - self._specs[node].external_modules = spack.spec.Spec._format_module_list( - spec_info.get("modules", None) - ) - self._specs[node].extra_attributes = spec_info.get("extra_attributes", {}) - - # Annotate compiler specs from externals - external_spec = spack.spec.Spec(spec_info["spec"]) - external_spec_deps = external_spec.dependencies() - if len(external_spec_deps) > 1: - raise InvalidExternalError( - f"external spec {spec_info['spec']} cannot have more than one dependency" - ) - elif len(external_spec_deps) == 1: - compiler_str = external_spec_deps[0] - self._specs[node].annotations.with_compiler(spack.spec.Spec(compiler_str)) - - # Packages that are external - but normally depend on python - - # get an edge inserted to python as a post-concretization step - package = spack.repo.PATH.get_pkg_class(self._specs[node].fullname)(self._specs[node]) - extendee_spec = package.extendee_spec - if ( - extendee_spec - and extendee_spec.name == "python" - # More-general criteria like "depends on Python" pulls in things - # we don't want to apply this logic to (in particular LLVM, which - # is now a common external because that's how we detect Clang) - and any([c.__name__ == "PythonExtension" for c in package.__class__.__mro__]) - ): - candidate_python_to_attach = self._specs.get(SpecBuilder.make_node(pkg="python")) - _attach_python_to_external(package, extendee_spec=candidate_python_to_attach) - def depends_on(self, parent_node, dependency_node, type): dependency_spec = self._specs[dependency_node] depflag = dt.flag_from_string(type) @@ -4022,7 +3549,7 @@ def reorder_flags(self): 1. Flags applied in compiler definitions should come first 2. Flags applied by dependents are ordered topologically (with a - dependency on `traverse` to resolve the partial order into a + dependency on ``traverse`` to resolve the partial order into a stable total order) 3. Flags from requirements are then applied (requirements always come from the package and never a parent) @@ -4030,11 +3557,9 @@ def reorder_flags(self): Additionally, for each source (requirements, compiler, command line, and dependents), flags from that source should retain their order and grouping: - e.g. for `y cflags="-z -a"` "-z" and "-a" should never have any intervening + e.g. for ``y cflags="-z -a"`` ``-z`` and ``-a`` should never have any intervening flags inserted, and should always appear in that order. """ - cmd_specs = {s.name: s for spec in self._command_line_specs for s in spec.traverse()} - for node, spec in self._specs.items(): # if bootstrapping, compiler is not in config and has no flags flagmap_from_compiler = { @@ -4042,6 +3567,19 @@ def reorder_flags(self): for flag_type, values in spec.compiler_flags.items() } + flagmap_from_cli = {} + for flag_type, values in spec.compiler_flags.items(): + if not values: + continue + + flags = [x for x in values if x.source == "literal"] + if not flags: + continue + + # For compiler flags from literal specs, reorder any flags to + # the input order from flag.flag_group + flagmap_from_cli[flag_type] = _reorder_flags(flags) + for flag_type in spec.compiler_flags.valid_compiler_flags(): ordered_flags = [] @@ -4104,9 +3642,8 @@ def _order_index(flag_group): extend_flag_list(ordered_flags, as_compiler_flags) # 3. Now put cmd-line flags last - if node.pkg in cmd_specs: - cmd_flags = cmd_specs[node.pkg].compiler_flags.get(flag_type, []) - extend_flag_list(ordered_flags, cmd_flags) + if flag_type in flagmap_from_cli: + extend_flag_list(ordered_flags, flagmap_from_cli[flag_type]) compiler_flags = spec.compiler_flags.get(flag_type, []) msg = f"{set(compiler_flags)} does not equal {set(ordered_flags)}" @@ -4131,36 +3668,21 @@ def splice_at_hash( ) self._splices.setdefault(parent_spec, []).append(splice) - @staticmethod - def sort_fn(function_tuple) -> Tuple[int, int]: - """Ensure attributes are evaluated in the correct order. - - hash attributes are handled first, since they imply entire concrete specs - node attributes are handled next, since they instantiate nodes - external_spec_selected attributes are handled last, so that external extensions can find - the concrete specs on which they depend because all nodes are fully constructed before we - consider which ones are external. - """ - name = function_tuple[0] - if name == "hash": - return (-5, 0) - elif name == "node": - return (-4, 0) - elif name == "node_flag": - return (-2, 0) - elif name == "external_spec_selected": - return (0, 0) # note out of order so this goes last - elif name == "virtual_on_edge": - return (1, 0) - else: - return (-1, 0) + def build_specs(self, function_tuples: List[FunctionTupleT]) -> List[spack.spec.Spec]: + + attr_key = { + # hash attributes are handled first, since they imply entire concrete specs + "hash": -5, + # node attributes are handled next, since they instantiate nodes + "node": -4, + # evaluated last, so all nodes are fully constructed + "virtual_on_edge": 2, + } - def build_specs(self, function_tuples): - # Functions don't seem to be in particular order in output. Sort them here so that - # directives that build objects, like node, are called in the right order. - self.function_tuples = sorted(set(function_tuples), key=self.sort_fn) + # Sort functions so that directives building objects are called in the right order + function_tuples.sort(key=lambda x: attr_key.get(x[0], 0)) self._specs = {} - for name, args in self.function_tuples: + for name, args in function_tuples: if SpecBuilder.ignored_attributes.match(name): continue @@ -4183,19 +3705,20 @@ def build_specs(self, function_tuples): # predicates on virtual packages. if name != "error": node = args[0] + assert isinstance(node, NodeArgument), ( + f"internal solver error: expected a node, but got a {type(args[0])}. " + "Please report a bug at https://github.com/spack/spack/issues" + ) + pkg = node.pkg if spack.repo.PATH.is_virtual(pkg): continue - # if we've already gotten a concrete spec for this pkg, - # do not bother calling actions on it except for node_flag_source, - # since node_flag_source is tracking information not in the spec itself - # we also need to keep track of splicing information. - spec = self._specs.get(args[0]) - if spec and spec.concrete: - do_not_ignore_attrs = ["node_flag_source", "splice_at_hash"] - if name not in do_not_ignore_attrs: - continue + # if we've already gotten a concrete spec for this pkg, we're done, unless + # we're splicing. `splice_at_hash()` is the only action we call on concrete specs. + spec = self._specs.get(node) + if spec and spec.concrete and name != "splice_at_hash": + continue action(*args) @@ -4208,7 +3731,7 @@ def build_specs(self, function_tuples): roots = [spec.root for spec in self._specs.values()] roots = dict((id(r), r) for r in roots) for root in roots.values(): - _inject_patches_variant(root) + spack.spec._inject_patches_variant(root) # Add external paths to specs with just external modules for s in self._specs.values(): @@ -4217,6 +3740,10 @@ def build_specs(self, function_tuples): for s in self._specs.values(): _develop_specs_from_env(s, ev.active_environment()) + # check for commits must happen after all version adaptations are complete + for s in self._specs.values(): + _specs_with_commits(s) + # mark concrete and assign hashes to all specs in the solve for root in roots.values(): root._finalize_concretization() @@ -4252,10 +3779,6 @@ def build_specs(self, function_tuples): spack.version.git_ref_lookup.GitRefLookup(spec.fullname) ) - # check for commits must happen after all version adaptations are complete - for s in self._specs.values(): - _specs_with_commits(s) - specs = self.execute_explicit_splices() return specs @@ -4297,20 +3820,22 @@ def execute_explicit_splices(self): def _specs_with_commits(spec): - if not spec.package.needs_commit(spec.version): + pkg_class = spack.repo.PATH.get_pkg_class(spec.fullname) + if not pkg_class.needs_commit(spec.version): return - if isinstance(spec.version, spack.version.GitVersion): + if isinstance(spec.version, vn.GitVersion): if "commit" not in spec.variants and spec.version.commit_sha: spec.variants["commit"] = vt.SingleValuedVariant("commit", spec.version.commit_sha) - spec.package.resolve_binary_provenance() + pkg_class._resolve_git_provenance(spec) if "commit" not in spec.variants: - tty.warn( - f"Unable to resolve the git commit for {spec.name}. " - "An installation of this binary won't have complete binary provenance." - ) + if not spec.is_develop: + tty.warn( + f"Unable to resolve the git commit for {spec.name}. " + "An installation of this binary won't have complete binary provenance." + ) return # check integrity of user specified commit shas @@ -4321,173 +3846,6 @@ def _specs_with_commits(spec): assert vn.is_git_commit_sha(spec.variants["commit"].value), invalid_commit_msg -def _attach_python_to_external( - dependent_package, extendee_spec: Optional[spack.spec.Spec] = None -) -> None: - """ - Ensure all external python packages have a python dependency - - If another package in the DAG depends on python, we use that - python for the dependency of the external. If not, we assume - that the external PythonPackage is installed into the same - directory as the python it depends on. - """ - # TODO: Include this in the solve, rather than instantiating post-concretization - if "python" not in dependent_package.spec: - if extendee_spec: - python = extendee_spec - else: - python = _get_external_python_for_prefix(dependent_package) - if not python.concrete: - repo = spack.repo.PATH.repo_for_pkg(python) - python.namespace = repo.namespace - - # Ensure architecture information is present - if not python.architecture: - host_platform = spack.platforms.host() - host_os = host_platform.default_operating_system() - host_target = host_platform.default_target() - python.architecture = spack.spec.ArchSpec( - (str(host_platform), str(host_os), str(host_target)) - ) - else: - if not python.architecture.platform: - python.architecture.platform = spack.platforms.host() - platform = spack.platforms.by_name(python.architecture.platform) - if not python.architecture.os: - python.architecture.os = platform.default_operating_system() - if not python.architecture.target: - python.architecture.target = spack.vendor.archspec.cpu.host().family.name - - python.external_path = dependent_package.spec.external_path - python._mark_concrete() - dependent_package.spec.add_dependency_edge( - python, depflag=dt.BUILD | dt.LINK | dt.RUN, virtuals=() - ) - - -def _get_external_python_for_prefix(python_package): - """ - For an external package that extends python, find the most likely spec for the python - it depends on. - - First search: an "installed" external that shares a prefix with this package - Second search: a configured external that shares a prefix with this package - Third search: search this prefix for a python package - - Returns: - spack.spec.Spec: The external Spec for python most likely to be compatible with self.spec - """ - python_externals_installed = [ - s - for s in spack.store.STORE.db.query("python") - if s.prefix == python_package.spec.external_path - ] - if python_externals_installed: - return python_externals_installed[0] - - python_external_config = spack.config.get("packages:python:externals", []) - python_externals_configured = [ - spack.spec.parse_with_version_concrete(item["spec"]) - for item in python_external_config - if item["prefix"] == python_package.spec.external_path - ] - if python_externals_configured: - return python_externals_configured[0] - - python_externals_detection = spack.detection.by_path( - ["python"], path_hints=[python_package.spec.external_path], max_workers=1 - ) - - python_externals_detected = [ - spec - for spec in python_externals_detection.get("python", []) - if spec.external_path == python_package.spec.external_path - ] - python_externals_detected = [ - spack.spec.parse_with_version_concrete(str(x)) for x in python_externals_detected - ] - if python_externals_detected: - return list(sorted(python_externals_detected, key=lambda x: x.version))[-1] - - raise StopIteration( - "No external python could be detected for %s to depend on" % python_package.spec - ) - - -def _inject_patches_variant(root: spack.spec.Spec) -> None: - # This dictionary will store object IDs rather than Specs as keys - # since the Spec __hash__ will change as patches are added to them - spec_to_patches: Dict[int, Set[spack.patch.Patch]] = {} - for s in root.traverse(): - # After concretizing, assign namespaces to anything left. - # Note that this doesn't count as a "change". The repository - # configuration is constant throughout a spack run, and - # normalize and concretize evaluate Packages using Repo.get(), - # which respects precedence. So, a namespace assignment isn't - # changing how a package name would have been interpreted and - # we can do it as late as possible to allow as much - # compatibility across repositories as possible. - if s.namespace is None: - s.namespace = spack.repo.PATH.repo_for_pkg(s.name).namespace - - if s.concrete: - continue - - # Add any patches from the package to the spec. - node_patches = { - patch - for cond, patch_list in spack.repo.PATH.get_pkg_class(s.fullname).patches.items() - if s.satisfies(cond) - for patch in patch_list - } - if node_patches: - spec_to_patches[id(s)] = node_patches - - # Also record all patches required on dependencies by depends_on(..., patch=...) - for dspec in root.traverse_edges(deptype=dt.ALL, cover="edges", root=False): - if dspec.spec.concrete: - continue - - pkg_deps = spack.repo.PATH.get_pkg_class(dspec.parent.fullname).dependencies - - edge_patches: List[spack.patch.Patch] = [] - for cond, deps_by_name in pkg_deps.items(): - dependency = deps_by_name.get(dspec.spec.name) - if not dependency: - continue - - if not dspec.parent.satisfies(cond): - continue - - for pcond, patch_list in dependency.patches.items(): - if dspec.spec.satisfies(pcond): - edge_patches.extend(patch_list) - - if edge_patches: - spec_to_patches.setdefault(id(dspec.spec), set()).update(edge_patches) - - for spec in root.traverse(): - if id(spec) not in spec_to_patches: - continue - - patches = list(spec_to_patches[id(spec)]) - variant: vt.VariantValue = spec.variants.setdefault( - "patches", vt.MultiValuedVariant("patches", ()) - ) - variant.set(*(p.sha256 for p in patches)) - # FIXME: Monkey patches variant to store patches order - ordered_hashes = [(*p.ordering_key, p.sha256) for p in patches if p.ordering_key] - ordered_hashes.sort() - tty.debug( - f"Ordered hashes [{spec.name}]: " - + ", ".join("/".join(str(e) for e in t) for t in ordered_hashes) - ) - setattr( - variant, "_patches_in_order_of_appearance", [sha256 for _, _, sha256 in ordered_hashes] - ) - - def _ensure_external_path_if_external(spec: spack.spec.Spec) -> None: if not spec.external_modules or spec.external_path: return @@ -4522,291 +3880,6 @@ def _develop_specs_from_env(spec, env): assert spec.satisfies(dev_info["spec"]) -def _is_reusable(spec: spack.spec.Spec, packages, local: bool) -> bool: - """A spec is reusable if it's not a dev spec, it's imported from the cray manifest, it's not - external, or it's external with matching packages.yaml entry. The latter prevents two issues: - - 1. Externals in build caches: avoid installing an external on the build machine not - available on the target machine - 2. Local externals: avoid reusing an external if the local config changes. This helps in - particular when a user removes an external from packages.yaml, and expects that that - takes effect immediately. - - Arguments: - spec: the spec to check - packages: the packages configuration - """ - if "dev_path" in spec.variants: - return False - - if spec.name == "compiler-wrapper": - return False - - if not spec.external: - return _has_runtime_dependencies(spec) - - # Cray external manifest externals are always reusable - if local: - _, record = spack.store.STORE.db.query_by_spec_hash(spec.dag_hash()) - if record and record.origin == "external-db": - return True - - try: - provided = spack.repo.PATH.get(spec).provided_virtual_names() - except spack.repo.RepoError: - provided = [] - - for name in {spec.name, *provided}: - for entry in packages.get(name, {}).get("externals", []): - if ( - spec.satisfies(entry["spec"]) - and spec.external_path == entry.get("prefix") - and spec.external_modules == entry.get("modules") - ): - return True - - return False - - -def _has_runtime_dependencies(spec: spack.spec.Spec) -> bool: - # TODO (compiler as nodes): this function contains specific names from builtin, and should - # be made more general - if "gcc" in spec and "gcc-runtime" not in spec: - return False - - if "intel-oneapi-compilers" in spec and "intel-oneapi-runtime" not in spec: - return False - - return True - - -class SpecFilter: - """Given a method to produce a list of specs, this class can filter them according to - different criteria. - """ - - def __init__( - self, - factory: Callable[[], List[spack.spec.Spec]], - is_usable: Callable[[spack.spec.Spec], bool], - include: List[str], - exclude: List[str], - ) -> None: - """ - Args: - factory: factory to produce a list of specs - is_usable: predicate that takes a spec in input and returns False if the spec - should not be considered for this filter, True otherwise. - include: if present, a "good" spec must match at least one entry in the list - exclude: if present, a "good" spec must not match any entry in the list - """ - self.factory = factory - self.is_usable = is_usable - self.include = include - self.exclude = exclude - - def is_selected(self, s: spack.spec.Spec) -> bool: - if not self.is_usable(s): - return False - - if self.include and not any(s.satisfies(c) for c in self.include): - return False - - if self.exclude and any(s.satisfies(c) for c in self.exclude): - return False - - return True - - def selected_specs(self) -> List[spack.spec.Spec]: - return [s for s in self.factory() if self.is_selected(s)] - - @staticmethod - def from_store(configuration, *, include, exclude) -> "SpecFilter": - """Constructs a filter that takes the specs from the current store.""" - packages = _external_config_with_implicit_externals(configuration) - is_reusable = functools.partial(_is_reusable, packages=packages, local=True) - factory = functools.partial(_specs_from_store, configuration=configuration) - return SpecFilter(factory=factory, is_usable=is_reusable, include=include, exclude=exclude) - - @staticmethod - def from_buildcache(configuration, *, include, exclude) -> "SpecFilter": - """Constructs a filter that takes the specs from the configured buildcaches.""" - packages = _external_config_with_implicit_externals(configuration) - is_reusable = functools.partial(_is_reusable, packages=packages, local=False) - return SpecFilter( - factory=_specs_from_mirror, is_usable=is_reusable, include=include, exclude=exclude - ) - - @staticmethod - def from_environment(configuration, *, include, exclude, env) -> "SpecFilter": - packages = _external_config_with_implicit_externals(configuration) - is_reusable = functools.partial(_is_reusable, packages=packages, local=True) - factory = functools.partial(_specs_from_environment, env=env) - return SpecFilter(factory=factory, is_usable=is_reusable, include=include, exclude=exclude) - - @staticmethod - def from_environment_included_concrete( - configuration, - *, - include: List[str], - exclude: List[str], - env: ev.Environment, - included_concrete: str, - ) -> "SpecFilter": - packages = _external_config_with_implicit_externals(configuration) - is_reusable = functools.partial(_is_reusable, packages=packages, local=True) - factory = functools.partial( - _specs_from_environment_included_concrete, env=env, included_concrete=included_concrete - ) - return SpecFilter(factory=factory, is_usable=is_reusable, include=include, exclude=exclude) - - -def _specs_from_store(configuration): - store = spack.store.create(configuration) - with store.db.read_transaction(): - return store.db.query(installed=True) - - -def _specs_from_mirror(): - try: - return spack.binary_distribution.update_cache_and_get_specs() - except (spack.binary_distribution.FetchCacheError, IndexError): - # this is raised when no mirrors had indices. - # TODO: update mirror configuration so it can indicate that the - # TODO: source cache (or any mirror really) doesn't have binaries. - return [] - - -def _specs_from_environment(env): - """Return all concrete specs from the environment. This includes all included concrete""" - if env: - return [concrete for _, concrete in env.concretized_specs()] - else: - return [] - - -def _specs_from_environment_included_concrete(env, included_concrete): - """Return only concrete specs from the environment included from the included_concrete""" - if env: - assert included_concrete in env.included_concrete_envs - return [concrete for concrete in env.included_specs_by_hash[included_concrete].values()] - else: - return [] - - -class ReuseStrategy(enum.Enum): - ROOTS = enum.auto() - DEPENDENCIES = enum.auto() - NONE = enum.auto() - - -class ReusableSpecsSelector: - """Selects specs that can be reused during concretization.""" - - def __init__(self, configuration: spack.config.Configuration) -> None: - self.configuration = configuration - self.store = spack.store.create(configuration) - self.reuse_strategy = ReuseStrategy.ROOTS - - reuse_yaml = self.configuration.get("concretizer:reuse", False) - self.reuse_sources = [] - if not isinstance(reuse_yaml, typing.Mapping): - if reuse_yaml is False: - self.reuse_strategy = ReuseStrategy.NONE - if reuse_yaml == "dependencies": - self.reuse_strategy = ReuseStrategy.DEPENDENCIES - self.reuse_sources.extend( - [ - SpecFilter.from_store( - configuration=self.configuration, include=[], exclude=[] - ), - SpecFilter.from_buildcache( - configuration=self.configuration, include=[], exclude=[] - ), - SpecFilter.from_environment( - configuration=self.configuration, - include=[], - exclude=[], - env=ev.active_environment(), # includes all concrete includes - ), - ] - ) - else: - roots = reuse_yaml.get("roots", True) - if roots is True: - self.reuse_strategy = ReuseStrategy.ROOTS - else: - self.reuse_strategy = ReuseStrategy.DEPENDENCIES - default_include = reuse_yaml.get("include", []) - default_exclude = reuse_yaml.get("exclude", []) - default_sources = [{"type": "local"}, {"type": "buildcache"}] - for source in reuse_yaml.get("from", default_sources): - include = source.get("include", default_include) - exclude = source.get("exclude", default_exclude) - if source["type"] == "environment" and "path" in source: - env_dir = ev.as_env_dir(source["path"]) - active_env = ev.active_environment() - if active_env and env_dir in active_env.included_concrete_envs: - # If environment is included as a concrete environment, use the local copy - # of specs in the active environment. - # note: included concrete environments are only updated at concretization - # time, and reuse needs to matchthe included specs. - self.reuse_sources.append( - SpecFilter.from_environment_included_concrete( - self.configuration, - include=include, - exclude=exclude, - env=active_env, - included_concrete=env_dir, - ) - ) - else: - # If the environment is not included as a concrete environment, use the - # current specs from its lockfile. - self.reuse_sources.append( - SpecFilter.from_environment( - self.configuration, - include=include, - exclude=exclude, - env=ev.environment_from_name_or_dir(env_dir), - ) - ) - elif source["type"] == "environment": - # reusing from the current environment implicitly reuses from all of the - # included concrete environments - self.reuse_sources.append( - SpecFilter.from_environment( - self.configuration, - include=include, - exclude=exclude, - env=ev.active_environment(), - ) - ) - elif source["type"] == "local": - self.reuse_sources.append( - SpecFilter.from_store(self.configuration, include=include, exclude=exclude) - ) - elif source["type"] == "buildcache": - self.reuse_sources.append( - SpecFilter.from_buildcache( - self.configuration, include=include, exclude=exclude - ) - ) - - def reusable_specs(self, specs: List[spack.spec.Spec]) -> List[spack.spec.Spec]: - if self.reuse_strategy == ReuseStrategy.NONE: - return [] - - result = [] - for reuse_source in self.reuse_sources: - result.extend(reuse_source.selected_specs()) - # If we only want to reuse dependencies, remove the root specs - if self.reuse_strategy == ReuseStrategy.DEPENDENCIES: - result = [spec for spec in result if not any(root in spec for root in specs)] - - return result - - class Solver: """This is the main external interface class for solving. @@ -4815,12 +3888,24 @@ class Solver: """ def __init__(self): - self.driver = PyclingoDriver() - self.selector = ReusableSpecsSelector(configuration=spack.config.CONFIG) + # Compute possible compilers first, so we see them as externals + _ = spack.compilers.config.all_compilers(init_config=True) + + self._conc_cache = ConcretizationCache() + self.driver = PyclingoDriver(conc_cache=self._conc_cache) + + # Compute packages configuration with implicit externals once and reuse it + self.packages_with_externals = external_config_with_implicit_externals(spack.config.CONFIG) + completion_mode = spack.config.CONFIG.get("concretizer:externals:completion") + self.selector = ReusableSpecsSelector( + configuration=spack.config.CONFIG, + external_parser=create_external_parser(self.packages_with_externals, completion_mode), + packages_with_externals=self.packages_with_externals, + ) @staticmethod def _check_input_and_extract_concrete_specs( - specs: List[spack.spec.Spec], + specs: Sequence[spack.spec.Spec], ) -> List[spack.spec.Spec]: reusable: List[spack.spec.Spec] = [] analyzer = create_graph_analyzer() @@ -4835,7 +3920,7 @@ def _check_input_and_extract_concrete_specs( deps = { edge.spec.name for edge in s.edges_to_dependencies() - if edge.direct and edge.when == spack.spec.Spec() + if edge.direct and edge.when == EMPTY_SPEC } if deps: graph = analyzer.possible_dependencies( @@ -4860,27 +3945,27 @@ def _check_input_and_extract_concrete_specs( def solve_with_stats( self, - specs, - out=None, - timers=False, - stats=False, - tests=False, - setup_only=False, - allow_deprecated=False, - ): + specs: Sequence[spack.spec.Spec], + out: Optional[io.IOBase] = None, + timers: bool = False, + stats: bool = False, + tests: spack.concretize.TestsType = False, + setup_only: bool = False, + allow_deprecated: bool = False, + ) -> Tuple[Result, Optional[spack.util.timer.Timer], Optional[Dict]]: """ Concretize a set of specs and track the timing and statistics for the solve Arguments: - specs (list): List of ``Spec`` objects to solve for. + specs: List of ``Spec`` objects to solve for. out: Optionally write the generate ASP program to a file-like object. - timers (bool): Print out coarse timers for different solve phases. - stats (bool): Print out detailed stats from clingo. - tests (bool or tuple): If True, concretize test dependencies for all packages. + timers: Print out coarse timers for different solve phases. + stats: Print out detailed stats from clingo. + tests: If True, concretize test dependencies for all packages. If a tuple of package names, concretize test dependencies for named packages (defaults to False: do not concretize test dependencies). - setup_only (bool): if True, stop after setup and don't solve (default False). - allow_deprecated (bool): allow deprecated version in the solve + setup_only: if True, stop after setup and don't solve (default False). + allow_deprecated: allow deprecated version in the solve """ specs = [s.lookup_hash() for s in specs] reusable_specs = self._check_input_and_extract_concrete_specs(specs) @@ -4888,13 +3973,18 @@ def solve_with_stats( setup = SpackSolverSetup(tests=tests) output = OutputConfiguration(timers=timers, stats=stats, out=out, setup_only=setup_only) - CONC_CACHE.flush_manifest() - CONC_CACHE.cleanup() - return self.driver.solve( - setup, specs, reuse=reusable_specs, output=output, allow_deprecated=allow_deprecated + result = self.driver.solve( + setup, + specs, + reuse=reusable_specs, + packages_with_externals=self.packages_with_externals, + output=output, + allow_deprecated=allow_deprecated, ) + self._conc_cache.cleanup() + return result - def solve(self, specs, **kwargs): + def solve(self, specs: Sequence[spack.spec.Spec], **kwargs) -> Result: """ Convenience function for concretizing a set of specs and ignoring timing and statistics. Uses the same kwargs as solve_with_stats. @@ -4904,8 +3994,14 @@ def solve(self, specs, **kwargs): return result def solve_in_rounds( - self, specs, out=None, timers=False, stats=False, tests=False, allow_deprecated=False - ): + self, + specs: Sequence[spack.spec.Spec], + out: Optional[io.IOBase] = None, + timers: bool = False, + stats: bool = False, + tests: spack.concretize.TestsType = False, + allow_deprecated: bool = False, + ) -> Generator[Result, None, None]: """Solve for a stable model of specs in multiple rounds. This relaxes the assumption of solve that everything must be consistent and @@ -4937,6 +4033,7 @@ def solve_in_rounds( setup, input_specs, reuse=reusable_specs, + packages_with_externals=self.packages_with_externals, output=output, allow_deprecated=allow_deprecated, ) @@ -4955,8 +4052,7 @@ def solve_in_rounds( for spec in result.specs: reusable_specs.extend(spec.traverse()) - CONC_CACHE.flush_manifest() - CONC_CACHE.cleanup() + self._conc_cache.cleanup() class UnsatisfiableSpecError(spack.error.UnsatisfiableSpecError): @@ -5000,16 +4096,14 @@ class SolverError(InternalConcretizerError): get a solution. """ - def __init__(self, provided, conflicts): + def __init__(self, provided): msg = ( - "Spack concretizer internal error. Please submit a bug report and include the " - "command, environment if applicable and the following error message." + "Spack concretizer internal error. Please submit a bug report at " + "https://github.com/spack/spack and include the command and environment " + "if applicable." f"\n {provided} is unsatisfiable" ) - if conflicts: - msg += ", errors are:" + "".join([f"\n {conflict}" for conflict in conflicts]) - super().__init__(msg) # Add attribute expected of the superclass interface @@ -5028,3 +4122,15 @@ class NoCompilerFoundError(spack.error.SpackError): class InvalidExternalError(spack.error.SpackError): """Raised when there is no possible compiler""" + + +class DeprecatedVersionError(spack.error.SpackError): + """Raised when user directly requests a deprecated version.""" + + +class InvalidVersionError(spack.error.SpackError): + """Raised when a version can't be satisfied by any possible versions.""" + + +class InvalidDependencyError(spack.error.SpackError): + """Raised when an explicit dependency is not a possible dependency.""" diff --git a/lib/spack/spack/solver/concretize.lp b/lib/spack/spack/solver/concretize.lp index e911cf398a790c..ea8d14072d2d65 100644 --- a/lib/spack/spack/solver/concretize.lp +++ b/lib/spack/spack/solver/concretize.lp @@ -16,67 +16,56 @@ { attr("node", node(0..X-1, Package)) } :- max_dupes(Package, X), not virtual(Package). { attr("virtual_node", node(0..X-1, Package)) } :- max_dupes(Package, X), virtual(Package). +%%%% +% Rules to prevent symmetry on duplicates +%%%% + +duplicate_penalty(node(ID1, Package), 1, "version") + :- attr("node", node(ID1, Package)), attr("node", node(ID2, Package)), + version_weight(node(ID1, Package), Weight1), version_weight(node(ID2, Package), Weight2), + multiple_unification_sets(Package), + ID2 = ID1 + 1, Weight2 < Weight1, + max_dupes(Package, X), X > 1, ID2 < X. + +% We can enforce this, and hope the grounder generates fewer rules +:- provider_weight(ProviderNode1, node(ID1, Virtual), Weight1), + provider_weight(ProviderNode2, node(ID2, Virtual), Weight2), + ID2 = ID1 + 1, Weight2 < Weight1, + max_dupes(Virtual, X), X > 1, ID2 < X. + % Integrity constraints on DAG nodes -:- attr("root", PackageNode), - not attr("node", PackageNode), - internal_error("Every root must be a node"). -:- attr("version", PackageNode, _), - not attr("node", PackageNode), - not attr("virtual_node", PackageNode), - internal_error("Only nodes and virtual_nodes can have versions"). -:- attr("node_version_satisfies", PackageNode, _), - not attr("node", PackageNode), - not attr("virtual_node", PackageNode), - internal_error("Only nodes and virtual_nodes can have version satisfaction"). -:- attr("hash", PackageNode, _), - not attr("node", PackageNode), - internal_error("Only nodes can have hashes"). -:- attr("node_platform", PackageNode, _), - not attr("node", PackageNode), - internal_error("Only nodes can have platforms"). -:- attr("node_os", PackageNode, _), not attr("node", PackageNode), - internal_error("Only nodes can have node_os"). -:- attr("node_target", PackageNode, _), not attr("node", PackageNode), - internal_error("Only nodes can have node_target"). -:- attr("variant_value", PackageNode, _, _), not attr("node", PackageNode), - internal_error("variant_value true for a non-node"). -:- attr("node_flag", PackageNode, _), not attr("node", PackageNode), - internal_error("node_flag assigned for non-node"). -:- attr("external_spec_selected", PackageNode, _), not attr("node", PackageNode), - internal_error("external_spec_selected for non-node"). -:- attr("depends_on", ParentNode, _, _), not attr("node", ParentNode), - internal_error("non-node depends on something"). -:- attr("depends_on", _, ChildNode, _), not attr("node", ChildNode), - internal_error("something depends_on a non-node"). -:- attr("virtual_node", VirtualNode), not provider(_, VirtualNode), - internal_error("virtual node with no provider"). -:- provider(_, VirtualNode), not attr("virtual_node", VirtualNode), - internal_error("provider with no virtual node"). -:- provider(PackageNode, _), not attr("node", PackageNode), - internal_error("provider with no real node"). -:- node_has_variant(PackageNode, _, _), not attr("node", PackageNode), - internal_error("node has variant for a non-node"). -:- attr("variant_set", PackageNode, _, _), not attr("node", PackageNode), - internal_error("variant_set for a non-node"). -:- variant_is_propagated(PackageNode, _), not attr("node", PackageNode), - internal_error("variant_is_propagated for a non-node"). - -:- attr("root", node(ID, PackageNode)), ID > min_dupe_id, - internal_error("root with a non-minimal duplicate ID"). +:- attr("root", PackageNode), not attr("node", PackageNode). +:- attr("version", PackageNode, _), not attr("node", PackageNode), not attr("virtual_node", PackageNode). +:- attr("node_version_satisfies", PackageNode, _), not attr("node", PackageNode), not attr("virtual_node", PackageNode). +:- attr("hash", PackageNode, _), not attr("node", PackageNode). +:- attr("node_platform", PackageNode, _), not attr("node", PackageNode). +:- attr("node_os", PackageNode, _), not attr("node", PackageNode). +:- attr("node_target", PackageNode, _), not attr("node", PackageNode). +:- attr("variant_value", PackageNode, _, _), not attr("node", PackageNode). +:- attr("node_flag", PackageNode, _), not attr("node", PackageNode). +:- attr("depends_on", ParentNode, _, _), not attr("node", ParentNode). +:- attr("depends_on", _, ChildNode, _), not attr("node", ChildNode). +:- attr("virtual_node", VirtualNode), not provider(_, VirtualNode). +:- provider(_, VirtualNode), not attr("virtual_node", VirtualNode). +:- provider(PackageNode, _), not attr("node", PackageNode). +:- node_has_variant(PackageNode, _, _), not attr("node", PackageNode). +:- attr("variant_set", PackageNode, _, _), not attr("node", PackageNode). +:- variant_is_propagated(PackageNode, _), not attr("node", PackageNode). + +:- attr("root", node(ID, PackageNode)), ID > min_dupe_id. % Nodes in the "root" unification set cannot depend on non-root nodes if the dependency is "link" or "run" -:- attr("depends_on", node(min_dupe_id, Package), node(ID, _), "link"), ID != min_dupe_id, unification_set("root", node(min_dupe_id, Package)), internal_error("link dependency out of the root unification set"). -:- attr("depends_on", node(min_dupe_id, Package), node(ID, _), "run"), ID != min_dupe_id, unification_set("root", node(min_dupe_id, Package)), internal_error("run dependency out of the root unification set"). +:- attr("depends_on", node(min_dupe_id, Package), node(ID, _), "link"), ID != min_dupe_id, unification_set("root", node(min_dupe_id, Package)). +:- attr("depends_on", node(min_dupe_id, Package), node(ID, _), "run"), ID != min_dupe_id, unification_set("root", node(min_dupe_id, Package)). % Namespaces are statically assigned by a package fact if not otherwise set error(100, "{0} does not have a namespace", Package) :- attr("node", node(ID, Package)), - not attr("namespace", node(ID, Package), _), - internal_error("A node must have a namespace"). + not attr("namespace", node(ID, Package), _). + error(100, "{0} cannot come from both {1} and {2} namespaces", Package, NS1, NS2) :- attr("node", node(ID, Package)), attr("namespace", node(ID, Package), NS1), attr("namespace", node(ID, Package), NS2), - NS1 != NS2, - internal_error("A node cannot have two namespaces"). + NS1 < NS2. attr("namespace", node(ID, Package), Namespace) :- attr("namespace_set", node(ID, Package), Namespace). attr("namespace", node(ID, Package), Namespace) @@ -86,21 +75,25 @@ attr("namespace", node(ID, Package), Namespace) % Rules on "unification sets", i.e. on sets of nodes allowing a single configuration of any given package unify(SetID, PackageName) :- unification_set(SetID, node(_, PackageName)). -:- 2 { unification_set(SetID, node(_, PackageName)) }, unify(SetID, PackageName), - internal_error("Cannot have multiple unification sets IDs for one set"). + +error(1, "Cannot have multiple nodes for {0} in the same unification set {1}", PackageName, SetID) + :- 2 { unification_set(SetID, node(_, PackageName)) }, unify(SetID, PackageName). unification_set("root", PackageNode) :- attr("root", PackageNode). unification_set(SetID, ChildNode) :- attr("depends_on", ParentNode, ChildNode, Type), Type != "build", unification_set(SetID, ParentNode). +build_only_dependency(ParentNode, node(X, Child)) :- + attr("depends_on", ParentNode, node(X, Child), "build"), + not attr("depends_on", ParentNode, node(X, Child), "link"), + not attr("depends_on", ParentNode, node(X, Child), "run"). + unification_set(("build", node(X, Child)), node(X, Child)) - :- attr("depends_on", ParentNode, node(X, Child), Type), - Type == "build", + :- build_only_dependency(ParentNode, node(X, Child)), multiple_unification_sets(Child), - unification_set(SetID, ParentNode). + unification_set(_, ParentNode). unification_set("generic_build", node(X, Child)) - :- attr("depends_on", ParentNode, node(X, Child), Type), - Type == "build", + :- build_only_dependency(ParentNode, node(X, Child)), not multiple_unification_sets(Child), unification_set(_, ParentNode). @@ -108,14 +101,44 @@ unification_set(SetID, VirtualNode) :- provider(PackageNode, VirtualNode), unification_set(SetID, PackageNode). +% Compute sub-sets of the nodes, if requested. These can be either the nodes connected +% to another node by "link" edges, or the nodes connected to another node by "link and +% "run" edges. + +compute_closure(node(ID, Package), "linkrun") :- + condition_requirement(_, "closure", Package, _, "linkrun"), + attr("node", node(ID, Package)). + +attr("closure", PackageNode, DependencyNode, "linkrun") :- + attr("depends_on", PackageNode, DependencyNode, "link"), + not provider(DependencyNode, node(_, Language)) : language(Language); + compute_closure(PackageNode, "linkrun"). + +attr("closure", PackageNode, DependencyNode, "linkrun") :- + attr("depends_on", PackageNode, DependencyNode, "run"), + not provider(DependencyNode, node(_, Language)) : language(Language); + compute_closure(PackageNode, "linkrun"). + +attr("closure", PackageNode, DependencyNode, "linkrun") :- + attr("depends_on", ParentNode, DependencyNode, "link"), + not provider(DependencyNode, node(_, Language)) : language(Language); + attr("closure", PackageNode, ParentNode, "linkrun"), + compute_closure(PackageNode, "linkrun"). + +attr("closure", PackageNode, DependencyNode, "linkrun") :- + attr("depends_on", ParentNode, DependencyNode, "run"), + not provider(DependencyNode, node(_, Language)) : language(Language); + attr("closure", PackageNode, ParentNode, "linkrun"), + compute_closure(PackageNode, "linkrun"). + +related(NodeA, NodeB) :- attr("closure", NodeA, NodeB, "linkrun"). + % Do not allow split dependencies, for now. This ensures that we don't construct graphs where e.g. % a python extension depends on setuptools@63.4 as a run dependency, but uses e.g. setuptools@68 % as a build dependency. % % We'll need to relax the rule before we get to actual cross-compilation -:- depends_on(ParentNode, node(X, Dependency)), depends_on(ParentNode, node(Y, Dependency)), X < Y, - internal_error("Cannot split link/build deptypes for a single edge (yet)"). - +:- depends_on(ParentNode, node(X, Dependency)), depends_on(ParentNode, node(Y, Dependency)), X < Y. #defined multiple_unification_sets/1. #defined runtime/1. @@ -125,24 +148,22 @@ unification_set(SetID, VirtualNode) %---- % In the "root" unification set only ID = 0 are allowed -:- unification_set("root", node(ID, _)), ID != 0, internal_error("root unification set has node with non-zero unification set ID"). +:- unification_set("root", node(ID, _)), ID != 0. % In the "root" unification set we allow only packages from the link-run possible subDAG -:- unification_set("root", node(_, Package)), not possible_in_link_run(Package), not virtual(Package), internal_error("package outside possible link/run graph in root unification set"). +:- unification_set("root", node(_, Package)), not possible_in_link_run(Package), not virtual(Package). % Each node must belong to at least one unification set -:- attr("node", PackageNode), not unification_set(_, PackageNode), internal_error("node belongs to no unification set"). +:- attr("node", PackageNode), not unification_set(_, PackageNode). % Cannot have a node with an ID, if lower ID of the same package are not used :- attr("node", node(ID1, Package)), not attr("node", node(ID2, Package)), - max_dupes(Package, X), ID1=0..X-1, ID2=0..X-1, ID2 < ID1, - internal_error("node skipped id number"). + max_dupes(Package, X), ID1=0..X-1, ID2=0..X-1, ID2 < ID1. :- attr("virtual_node", node(ID1, Package)), not attr("virtual_node", node(ID2, Package)), - max_dupes(Package, X), ID1=0..X-1, ID2=0..X-1, ID2 < ID1, - internal_error("virtual node skipped id number"). + max_dupes(Package, X), ID1=0..X-1, ID2=0..X-1, ID2 < ID1. % Prefer to assign lower ID to virtuals associated with a lower penalty provider :- not unification_set("root", node(X, Virtual)), @@ -165,6 +186,7 @@ node_attributes_with_custom_rules("virtual_on_edge"). node_attributes_with_custom_rules("provider_set"). node_attributes_with_custom_rules("concrete_variant_set"). node_attributes_with_custom_rules("concrete_variant_request"). +node_attributes_with_custom_rules("closure"). trigger_condition_holds(TriggerID, node(min_dupe_id, Package)) :- solve_literal(TriggerID), @@ -180,19 +202,24 @@ mentioned_in_literal(Root, Mentioned) :- mentioned_in_literal(TriggerID, Root, M literal_node(Root, node(min_dupe_id, Root)) :- mentioned_in_literal(Root, Root). 1 { literal_node(Root, node(0..Y-1, Mentioned)) : max_dupes(Mentioned, Y) } 1 :- - mentioned_in_literal(Root, Mentioned), Mentioned != Root, - internal_error("must have exactly one condition_set for literals"). + mentioned_in_literal(Root, Mentioned), Mentioned != Root. build_dependency_of_literal_node(LiteralNode, node(X, BuildDependency)) :- literal_node(Root, LiteralNode), build(LiteralNode), - not external(LiteralNode), build_requirement(LiteralNode, node(X, BuildDependency)), attr("direct_dependency", LiteralNode, node_requirement("node", BuildDependency)). condition_set(node(min_dupe_id, Root), LiteralNode) :- literal_node(Root, LiteralNode). condition_set(LiteralNode, BuildNode) :- build_dependency_of_literal_node(LiteralNode, BuildNode). +% Generate a comprehensible error for cases like 'foo ^bar' where 'bar' is a build dependency +% of a transitive dependency of 'foo' +error(1, "{0} is not a direct 'build' or 'test' dependency, or transitive 'link' or 'run' dependency of any root", Literal) + :- literal_node(RootPackage, node(X, Literal)), + not depends_on(node(min_dupe_id, RootPackage), node(X, Literal)), + not unification_set("root", node(X, Literal)). + :- build_dependency_of_literal_node(LiteralNode, BuildNode), not attr("depends_on", LiteralNode, BuildNode, "build"). @@ -214,8 +241,7 @@ associated_with_root(RootNode, ChildNode) :- :- attr("root", RootNode), condition_set(RootNode, node(X, Package)), not virtual(Package), - not associated_with_root(RootNode, node(X, Package)), - internal_error("nodes in root condition set must be associated with root"). + not associated_with_root(RootNode, node(X, Package)). #defined concretize_everything/0. #defined literal/1. @@ -227,7 +253,7 @@ attr_single_value("node_os"). attr_single_value("node_target"). % Error when no attribute is selected -error(100, no_value_error, Attribute, Package) +error(10000, no_value_error, Attribute, Package) :- attr("node", node(ID, Package)), attr_single_value(Attribute), not attr(Attribute, node(ID, Package), _). @@ -242,23 +268,6 @@ error(100, multiple_values_error, Attribute, Package) % Version semantics %----------------------------------------------------------------------------- -% Versions are declared with a weight and an origin, which indicates where the -% version was declared (e.g. "package_py" or "external"). -pkg_fact(Package, version_declared(Version, Weight)) :- pkg_fact(Package, version_declared(Version, Weight, _)). - -% We can't emit the same version **with the same weight** from two different sources -:- pkg_fact(Package, version_declared(Version, Weight, Origin1)), - pkg_fact(Package, version_declared(Version, Weight, Origin2)), - Origin1 < Origin2, - internal_error("Two versions with identical weights"). - -% We cannot use a version declared for an installed package if we end up building it -:- pkg_fact(Package, version_declared(Version, Weight, "installed")), - attr("version", node(ID, Package), Version), - version_weight(node(ID, Package), Weight), - not attr("hash", node(ID, Package), _), - internal_error("Reuse version weight used for built package"). - % versions are declared w/priority -- declared with priority implies declared pkg_fact(Package, version_declared(Version)) :- pkg_fact(Package, version_declared(Version, _)). @@ -268,13 +277,19 @@ pkg_fact(Package, version_declared(Version)) :- pkg_fact(Package, version_declar % is not precisely one version chosen. Error facts are heavily optimized % against to ensure they cannot be inferred when a non-error solution is % possible -{ attr("version", node(ID, Package), Version) : pkg_fact(Package, version_declared(Version)) } - :- attr("node", node(ID, Package)). -% A virtual package may or may not have a version, but never has more than one -error(100, "Cannot select a single version for virtual '{0}'", Virtual) - :- attr("virtual_node", node(ID, Virtual)), - 2 { attr("version", node(ID, Virtual), Version) }. +% Pick a single version among the possible ones +1 { choose_version(node(ID, Package), Version) : pkg_fact(Package, version_declared(Version)) } 1 :- attr("node", node(ID, Package)). + +% To choose the "fake" version of virtual packages, we need a separate rule. +% Note that a virtual node may or may not have a version, but cannot have more than one. +{ choose_version(node(ID, Package), Version) : pkg_fact(Package, version_satisfies(Constraint, Version)) } 1 + :- attr("node_version_satisfies", node(ID, Package), Constraint), + attr("virtual_node", node(ID, Package)). + +#defined compiler_package/1. + +attr("version", node(ID, Package), Version) :- choose_version(node(ID, Package), Version). % If we select a deprecated version, mark the package as deprecated attr("deprecated", node(ID, Package), Version) :- @@ -285,54 +300,43 @@ attr("deprecated", node(ID, Package), Version) :- error(100, "Package '{0}' needs the deprecated version '{1}', and this is not allowed", Package, Version) :- deprecated_versions_not_allowed(), attr("version", node(ID, Package), Version), - not external(node(ID, Package)), - not concrete(node(ID, Package)), + build(node(ID, Package)), pkg_fact(Package, deprecated_version(Version)). -% we can't use the weight for an external version if we don't use the -% corresponding external spec. -:- attr("version", node(ID, Package), Version), - version_weight(node(ID, Package), Weight), - pkg_fact(Package, version_declared(Version, Weight, "external")), - not external(node(ID, Package)), - internal_error("External weight used for built package"). - % we can't use a weight from an installed spec if we are building it % and vice-versa -:- attr("version", node(ID, Package), Version), - version_weight(node(ID, Package), Weight), - pkg_fact(Package, version_declared(Version, Weight, "installed")), - build(node(ID, Package)), - internal_error("Reuse version weight used for build package"). -:- attr("version", node(ID, Package), Version), - version_weight(node(ID, Package), Weight), - not pkg_fact(Package, version_declared(Version, Weight, "installed")), - not pkg_fact(Package, version_declared(Version, Weight, "installed_git_version")), - concrete(node(ID, Package)), - internal_error("Build version weight used for reused package"). +1 { allowed_origins(Origin): pkg_fact(Package, version_origin(Version, Origin)), + Origin != "installed", Origin != "packages_yaml"} + :- attr("version", node(ID, Package), Version), + build(node(ID, Package)). + +% We cannot use a version declared for an installed package if we end up building it +:- not pkg_fact(Package, version_origin(Version, "installed")), + not pkg_fact(Package, version_origin(Version, "installed_git_version")), + attr("version", node(ID, Package), Version), + concrete(node(ID, Package)). -1 { version_weight(node(ID, Package), Weight) : pkg_fact(Package, version_declared(Version, Weight)) } 1 +version_weight(node(ID, Package), Weight) :- attr("version", node(ID, Package), Version), attr("node", node(ID, Package)), - internal_error("version weights must exist and be unique"). + pkg_fact(Package, version_declared(Version, Weight)). -% node_version_satisfies implies that exactly one of the satisfying versions -% is the package's version, and vice versa. -% While this choice rule appears redundant with the initial choice rule for -% versions, virtual nodes with version constraints require this rule to be -% able to choose versions -{ attr("version", node(ID, Package), Version) : pkg_fact(Package, version_satisfies(Constraint, Version)) } - :- attr("node_version_satisfies", node(ID, Package), Constraint). +version_deprecation_penalty(node(ID, Package), Penalty) + :- pkg_fact(Package, deprecated_version(Version)), + pkg_fact(Package, version_deprecation_penalty(Penalty)), + attr("node", node(ID, Package)), + attr("version", node(ID, Package), Version), + not external(node(ID, Package)). % More specific error message if the version cannot satisfy some constraint % Otherwise covered by `no_version_error` and `versions_conflict_error`. -error(1, "Cannot satisfy '{0}@{1}'", Package, Constraint) +error(10000, "Cannot satisfy '{0}@{1}' 1({2})", Package, Constraint, Version) :- attr("node_version_satisfies", node(ID, Package), Constraint), attr("version", node(ID, Package), Version), not pkg_fact(Package, version_satisfies(Constraint, Version)). -error(10, "Cannot satisfy '{0}@{1}'", Package, Constraint) +error(10000, "Cannot satisfy '{0}@{1}'", Package, Constraint) :- attr("node_version_satisfies", node(ID, Package), Constraint), not attr("version", node(ID, Package), _). @@ -356,10 +360,7 @@ error(10, "Commit '{0}' must match package.py value '{1}' for '{2}@={3}'", Vsha, pkg_fact(Package, version_has_commit(Version, Psha)), Vsha != Psha. -#defined version_satisfies/3. #defined deprecated_versions_not_allowed/0. -#defined deprecated_version/2. -#defined can_accept_commit/2. %----------------------------------------------------------------------------- % Spec conditions and imposed constraints @@ -390,40 +391,137 @@ condition_packages(ID, A1) :- condition_requirement(ID, _, A1, _, _, _). trigger_node(ID, node(PackageID, Package), node(PackageID, Package)) :- pkg_fact(Package, trigger_id(ID)), attr("node", node(PackageID, Package)). trigger_node(ID, node(PackageID, Package), node(VirtualID, Virtual)) :- pkg_fact(Virtual, trigger_id(ID)), provider(node(PackageID, Package), node(VirtualID, Virtual)). -condition_nodes(TriggerID, PackageNode, node(X, A1)) - :- condition_packages(TriggerID, A1), +% This is the "real node" that triggers the request, e.g. if the request started from "mpi" this is the mpi provider +trigger_real_node(ID, PackageNode) :- trigger_node(ID, PackageNode, _). + +% This is the requestor node, which may be a "real" or a "virtual" node +trigger_requestor_node(ID, RequestorNode) :- trigger_node(ID, _, RequestorNode). + +trigger_package_requirement(TriggerNode, A1) :- + trigger_real_node(ID, TriggerNode), + condition_packages(ID, A1). + +condition_nodes(PackageNode, node(X, A1)) + :- trigger_package_requirement(PackageNode, A1), condition_set(PackageNode, node(X, A1)), - not self_build_requirement(PackageNode, node(X, A1)), - trigger_node(TriggerID, PackageNode, _). + not self_build_requirement(PackageNode, node(X, A1)). cannot_hold(TriggerID, PackageNode) :- condition_packages(TriggerID, A1), not condition_set(PackageNode, node(_, A1)), - trigger_node(TriggerID, PackageNode, _). + trigger_real_node(TriggerID, PackageNode), + attr("node", PackageNode). +% Aggregates generic condition requirements with TriggerID, to see if a condition holds trigger_condition_holds(ID, RequestorNode) :- trigger_node(ID, PackageNode, RequestorNode); - attr(Name, node(X, A1)) : condition_requirement(ID, Name, A1), condition_nodes(ID, PackageNode, node(X, A1)); - attr(Name, node(X, A1), A2) : condition_requirement(ID, Name, A1, A2), condition_nodes(ID, PackageNode, node(X, A1)); - attr(Name, node(X, A1), A2, A3) : condition_requirement(ID, Name, A1, A2, A3), condition_nodes(ID, PackageNode, node(X, A1)), not node_attributes_with_custom_rules(Name); - attr(Name, node(X, A1), A2, A3, A4) : condition_requirement(ID, Name, A1, A2, A3, A4), condition_nodes(ID, PackageNode, node(X, A1)); - % Special cases - attr("depends_on", node(X, A1), node(Y, A2), A3) : condition_requirement(ID, "depends_on", A1, A2, A3), condition_nodes(ID, PackageNode, node(X, A1)), condition_nodes(ID, PackageNode, node(Y, A2)); + satisfied(trigger(PackageNode), condition_requirement(Name, A1)) : condition_requirement(ID, Name, A1); + satisfied(trigger(PackageNode), condition_requirement(Name, A1, A2)) : condition_requirement(ID, Name, A1, A2); + satisfied(trigger(PackageNode), condition_requirement(Name, A1, A2, A3)) : condition_requirement(ID, Name, A1, A2, A3); + satisfied(trigger(PackageNode), condition_requirement(Name, A1, A2, A3, A4)) : condition_requirement(ID, Name, A1, A2, A3, A4); not cannot_hold(ID, PackageNode). + +%%%% +% Conditions verified on actual nodes in the DAG +%%%% + +% Here we project out the trigger ID from condition requirements, so that we can reduce the space +% of satisfied facts below. All we care about below is if a condition is met (e.g. a node exists +% in the DAG), we don't care instead about *which* rule is requesting that. +generic_condition_requirement(Name, A1) :- condition_requirement(ID, Name, A1). +generic_condition_requirement(Name, A1, A2) :- condition_requirement(ID, Name, A1, A2). +generic_condition_requirement(Name, A1, A2, A3) :- condition_requirement(ID, Name, A1, A2, A3). +generic_condition_requirement(Name, A1, A2, A3, A4) :- condition_requirement(ID, Name, A1, A2, A3, A4). + +satisfied(trigger(PackageNode), condition_requirement(Name, A1)) :- + attr(Name, node(X, A1)), + generic_condition_requirement(Name, A1), + condition_nodes(PackageNode, node(X, A1)). + +satisfied(trigger(PackageNode), condition_requirement(Name, A1, A2)) :- + attr(Name, node(X, A1), A2), + generic_condition_requirement(Name, A1, A2), + condition_nodes(PackageNode, node(X, A1)). + +satisfied(trigger(PackageNode), condition_requirement(Name, A1, A2, A3)) :- + attr(Name, node(X, A1), A2, A3), + generic_condition_requirement(Name, A1, A2, A3), + condition_nodes(PackageNode, node(X, A1)), + not node_attributes_with_custom_rules(Name). + +satisfied(trigger(PackageNode), condition_requirement(Name, A1, A2, A3, A4)) :- + attr(Name, node(X, A1), A2, A3, A4), + generic_condition_requirement(Name, A1, A2, A3, A4), + condition_nodes(PackageNode, node(X, A1)). + +satisfied(trigger(PackageNode), condition_requirement("depends_on", A1, A2, A3)) :- + attr("depends_on", node(X, A1), node(Y, A2), A3), + generic_condition_requirement("depends_on", A1, A2, A3), + condition_nodes(PackageNode, node(X, A1)), + condition_nodes(PackageNode, node(Y, A2)). + +satisfied(trigger(PackageNode), condition_requirement("concrete_variant_request", A1, A2, A3)) :- + generic_condition_requirement("concrete_variant_request", A1, A2, A3), + condition_nodes(PackageNode, node(X, A1)). + +satisfied(trigger(PackageNode), condition_requirement("closure", A1, A2, A3)) :- + attr("closure", node(X, A1), node(_, A2), A3), + generic_condition_requirement("closure", A1, A2, A3), + condition_nodes(PackageNode, node(X, A1)). + +%%%% +% Conditions verified on pure build deps of reused nodes +%%%% + +satisfied(trigger(PackageNode), condition_requirement("node", A1)) :- + trigger_real_node(ID, PackageNode), + reused_provider(PackageNode, _), + condition_requirement(ID, "node", A1). + +satisfied(trigger(PackageNode), condition_requirement("virtual_node", A1)) :- + trigger_real_node(ID, PackageNode), + reused_provider(PackageNode, node(_, A1)), + condition_requirement(ID, "virtual_node", A1). + +satisfied(trigger(PackageNode), condition_requirement("virtual_on_incoming_edges", A1, A2)) :- + trigger_real_node(ID, PackageNode), + reused_provider(node(Hash, A1), node(Hash, A2)), + condition_requirement(ID, "virtual_on_incoming_edges", A1, A2). + +satisfied(trigger(node(Hash, Package)), condition_requirement(Name, Package, A1)) :- + trigger_real_node(ID, node(Hash, Package)), + reused_provider(node(Hash, Package), node(Hash, Language)), + hash_attr(Hash, Name, Package, A1), + condition_requirement(ID, Name, Package, A1). + +satisfied(trigger(node(Hash, Package)), condition_requirement("node_version_satisfies", Package, VersionConstraint)) :- + trigger_real_node(ID, node(Hash, Package)), + reused_provider(node(Hash, Package), node(Hash, Language)), + hash_attr(Hash, "version", Package, Version), + condition_requirement(ID, "node_version_satisfies", Package, VersionConstraint), + pkg_fact(Package, version_satisfies(VersionConstraint, Version)). + +satisfied(trigger(node(Hash, Package)), condition_requirement(Name, Package, A1, A2)) :- + trigger_real_node(ID, node(Hash, Package)), + reused_provider(node(Hash, Package), node(Hash, Language)), + hash_attr(Hash, Name, Package, A1, A2), + condition_requirement(ID, Name, Package, A1, A2). + + condition_with_concrete_variant(ID, Package, Variant) :- condition_requirement(ID, "concrete_variant_request", Package, Variant, _). cannot_hold(ID, PackageNode) :- not attr("variant_value", node(X, A1), Variant, Value), condition_with_concrete_variant(ID, A1, Variant), condition_requirement(ID, "concrete_variant_request", A1, Variant, Value), - condition_nodes(ID, PackageNode, node(X, A1)). + condition_nodes(PackageNode, node(X, A1)). cannot_hold(ID, PackageNode) :- attr("variant_value", node(X, A1), Variant, Value), condition_with_concrete_variant(ID, A1, Variant), not condition_requirement(ID, "concrete_variant_request", A1, Variant, Value), - condition_nodes(ID, PackageNode, node(X, A1)). + condition_nodes(PackageNode, node(X, A1)). condition_holds(ConditionID, node(X, Package)) :- pkg_fact(Package, condition_trigger(ConditionID, TriggerID)), @@ -442,7 +540,7 @@ trigger_and_effect(Package, TriggerID, EffectID) :- trigger_and_effect(Package, impose(EffectID, node(X, Package)) :- not subcondition(ConditionID, _), trigger_and_effect(Package, ConditionID, TriggerID, EffectID), - trigger_node(TriggerID, _, node(X, Package)), + trigger_requestor_node(TriggerID, node(X, Package)), trigger_condition_holds(TriggerID, node(X, Package)), not do_not_impose(EffectID, node(X, Package)). @@ -453,7 +551,7 @@ impose(EffectID, node(X, Package)) condition_holds(ConditionID, ConditionNode), condition_set(ConditionNode, node(X, Package)), trigger_and_effect(Package, SubconditionID, TriggerID, EffectID), - trigger_node(TriggerID, _, node(X, Package)), + trigger_requestor_node(TriggerID, node(X, Package)), trigger_condition_holds(TriggerID, node(X, Package)), not do_not_impose(EffectID, node(X, Package)). @@ -463,32 +561,27 @@ imposed_packages(ID, A1) :- imposed_constraint(ID, _, A1, _, _). imposed_packages(ID, A1) :- imposed_constraint(ID, _, A1, _, _, _). imposed_packages(ID, A1) :- imposed_constraint(ID, "depends_on", _, A1, _). -imposed_nodes(EffectID, node(NodeID, Package), node(X, A1)) - :- trigger_and_effect(Package, TriggerID, EffectID), - imposed_packages(EffectID, A1), - condition_set(node(NodeID, Package), node(X, A1)), - trigger_node(TriggerID, _, node(NodeID, Package)), +imposed_nodes(node(NodeID, Package), node(X, A1)) + :- condition_set(node(NodeID, Package), node(X, A1)), % We don't want to add build requirements to imposed nodes, to avoid % unsat problems when we deal with self-dependencies: gcc@14 %gcc@10 not self_build_requirement(node(NodeID, Package), node(X, A1)). self_build_requirement(node(X, Package), node(Y, Package)) :- build_requirement(node(X, Package), node(Y, Package)). -imposed_nodes(ConditionID, PackageNode, node(X, A1)) +imposed_nodes(PackageNode, node(X, A1)) :- imposed_packages(ConditionID, A1), condition_set(PackageNode, node(X, A1)), attr("hash", PackageNode, ConditionID). -:- imposed_packages(ID, A1), impose(ID, PackageNode), not condition_set(PackageNode, node(_, A1)), - internal_error("Imposing constraint outside of condition set"). -:- imposed_packages(ID, A1), impose(ID, PackageNode), not imposed_nodes(ID, PackageNode, node(_, A1)), - internal_error("Imposing constraint outside of imposed_nodes"). +:- imposed_packages(ID, A1), impose(ID, PackageNode), not condition_set(PackageNode, node(_, A1)). +:- imposed_packages(ID, A1), impose(ID, PackageNode), not imposed_nodes(PackageNode, node(_, A1)). % Conditions that hold impose may impose constraints on other specs -attr(Name, node(X, A1)) :- impose(ID, PackageNode), imposed_constraint(ID, Name, A1), imposed_nodes(ID, PackageNode, node(X, A1)). -attr(Name, node(X, A1), A2) :- impose(ID, PackageNode), imposed_constraint(ID, Name, A1, A2), imposed_nodes(ID, PackageNode, node(X, A1)), not node_attributes_with_custom_rules(Name). -attr(Name, node(X, A1), A2, A3) :- impose(ID, PackageNode), imposed_constraint(ID, Name, A1, A2, A3), imposed_nodes(ID, PackageNode, node(X, A1)), not node_attributes_with_custom_rules(Name). -attr(Name, node(X, A1), A2, A3, A4) :- impose(ID, PackageNode), imposed_constraint(ID, Name, A1, A2, A3, A4), imposed_nodes(ID, PackageNode, node(X, A1)). +attr(Name, node(X, A1)) :- impose(ID, PackageNode), imposed_constraint(ID, Name, A1), imposed_nodes(PackageNode, node(X, A1)). +attr(Name, node(X, A1), A2) :- impose(ID, PackageNode), imposed_constraint(ID, Name, A1, A2), imposed_nodes(PackageNode, node(X, A1)), not node_attributes_with_custom_rules(Name). +attr(Name, node(X, A1), A2, A3) :- impose(ID, PackageNode), imposed_constraint(ID, Name, A1, A2, A3), imposed_nodes(PackageNode, node(X, A1)), not node_attributes_with_custom_rules(Name). +attr(Name, node(X, A1), A2, A3, A4) :- impose(ID, PackageNode), imposed_constraint(ID, Name, A1, A2, A3, A4), imposed_nodes(PackageNode, node(X, A1)). % Provider set is relevant only for literals, since it's the only place where `^[virtuals=foo] bar` % might appear in the HEAD of a rule @@ -505,15 +598,14 @@ provider(ProviderNode, VirtualNode) :- attr("provider_set", ProviderNode, Virtua % satisfy the dependency. 1 { attr("depends_on", node(X, A1), node(0..Y-1, A2), A3) : max_dupes(A2, Y) } 1 :- impose(ID, node(X, A1)), - imposed_constraint(ID, "depends_on", A1, A2, A3), - internal_error("Build deps must land in exactly one duplicate"). + imposed_constraint(ID, "depends_on", A1, A2, A3). % For := we must keep track of the origin of the fact, since we need to check % each condition separately, i.e. foo:=a,b in one place and foo:=c in another % should not make foo:=a,b,c possible attr("concrete_variant_set", node(X, A1), Variant, Value, ID) :- impose(ID, PackageNode), - imposed_nodes(ID, PackageNode, node(X, A1)), + imposed_nodes(PackageNode, node(X, A1)), imposed_constraint(ID, "concrete_variant_set", A1, Variant, Value). @@ -527,14 +619,13 @@ attr("concrete_variant_set", node(X, A1), Variant, Value, ID) unification_set("root", RootNode), condition_set(RootNode, node(A1_DUPE_ID, A1)), not self_build_requirement(RootNode, node(A1_DUPE_ID, A1)), - imposed_constraint(ID, "depends_on", A1, A2, A3), - internal_error("Build deps must land in exactly one duplicate"). + imposed_constraint(ID, "depends_on", A1, A2, A3). :- attr("direct_dependency", ParentNode, node_requirement("virtual_on_incoming_edges", ChildPkg, Virtual)), not attr("virtual_on_edge", ParentNode, node(_, ChildPkg), Virtual). % If the parent is built, then we have a build_requirement on another node. For concrete nodes, -% or external nodes, we don't since we are trimming their build dependencies. +% we don't since we are trimming their build dependencies. % Concrete nodes :- attr("direct_dependency", ParentNode, node_requirement("node", BuildDependency)), @@ -550,8 +641,26 @@ attr("concrete_variant_set", node(X, A1), Variant, Value, ID) :- attr("direct_dependency", ParentNode, node_requirement("provider_set", BuildDependency, Virtual)), concrete_build_requirement(ParentNode, BuildDependency), attr("concrete_build_dependency", ParentNode, BuildDependency, BuildDependencyHash), - attr("virtual_on_build_edge", ParentNode, BuildDependency, Virtual), - not 1 { pkg_fact(BuildDependency, version_satisfies(Constraint, Version)) : hash_attr(BuildDependencyHash, "version", BuildDependency, Version) } 1. + not attr("virtual_on_build_edge", ParentNode, BuildDependency, Virtual). + +% Give a penalty if reuse introduces a node compiled with a compiler that is not used otherwise +compiler_from_reuse(Hash, DependencyPackage) :- + attr("concrete_build_dependency", ParentNode, DependencyPackage, Hash), + attr("virtual_on_build_edge", ParentNode, DependencyPackage, Virtual), + language(Virtual). + +compiler_penalty_from_reuse(Hash) :- + compiler_from_reuse(Hash, DependencyPackage), + not node_compiler(_, node(_, DependencyPackage)), + % We don't want to give penalties if we're just installing binaries + will_build_packages(). + +compiler_penalty_from_reuse(Hash) :- + compiler_from_reuse(Hash, DependencyPackage), + not 1 { attr("hash", node(X, DependencyPackage), Hash) : node_compiler(_, node(X, DependencyPackage)) }, + % We don't want to give penalties if we're just installing binaries + will_build_packages(). + error(100, "Cannot satisfy the request on {0} to have {1}={2}", BuildDependency, Variant, Value) :- attr("direct_dependency", ParentNode, node_requirement("variant_set", BuildDependency, Variant, Value)), @@ -584,23 +693,6 @@ error(100, "Cannot satisfy the request on {0} to have the following hash {1}", B attr("direct_dependency", ParentNode, node_requirement("hash", BuildDependency, BuildHash)), BuildHash != BuildDependencyHash. -% External nodes -:- attr("direct_dependency", ParentNode, node_requirement("node", BuildDependency)), - external(ParentNode), - not attr("external_build_requirement", ParentNode, node_requirement("node", BuildDependency)). - -candidate_external_version(Constraint, BuildDependency, Version) - :- attr("direct_dependency", ParentNode, node_requirement("node_version_satisfies", BuildDependency, Constraint)), - external(ParentNode), - pkg_fact(BuildDependency, version_satisfies(Constraint, Version)). - -error(100, "External {0} cannot satisfy both {1} and {2}", BuildDependency, LiteralConstraint, ExternalConstraint) - :- attr("direct_dependency", ParentNode, node_requirement("node_version_satisfies", BuildDependency, LiteralConstraint)), - external(ParentNode), - attr("external_build_requirement", ParentNode, node_requirement("node_version_satisfies", BuildDependency, ExternalConstraint)), - not 1 { pkg_fact(BuildDependency, version_satisfies(ExternalConstraint, Version)) : candidate_external_version(LiteralConstraint, BuildDependency, Version) }. - - % Asking for gcc@10 %gcc@9 shouldn't give us back an external gcc@10, just because of the hack % we have on externals :- attr("direct_dependency", node(X, Parent), node_requirement("node", BuildDependency)), @@ -614,10 +706,18 @@ error(100, "External {0} cannot satisfy both {1} and {2}", BuildDependency, Lite not attr("dependency_holds", ParentNode, Virtual,"run"), virtual(Virtual). +% The virtual build dependency must be on the correct duplicate +:- virtual_build_requirement(ParentNode, node(X, Virtual)), + provider(ProviderNode, node(X, Virtual)), + not depends_on(ParentNode, ProviderNode). + attr("virtual_node", VirtualNode) :- virtual_build_requirement(ParentNode, VirtualNode). -{ build_requirement(ParentNode, ProviderNode) } :- +build_requirement(ParentNode, ProviderNode) :- virtual_build_requirement(ParentNode, VirtualNode), - provider(ProviderNode, VirtualNode). + provider(ProviderNode, VirtualNode), + not attr("depends_on", ParentNode, ProviderNode, "link"), + not attr("depends_on", ParentNode, ProviderNode, "run"), + attr("depends_on", ParentNode, ProviderNode, "build"). % From cli we can have literal expressions like: % @@ -651,15 +751,31 @@ attr("node_platform_set", node(X, BuildDependency), NodePlatform) :- attr("direct_dependency", ParentNode, node_requirement("node_platform_set", BuildDependency, NodePlatform)), build_requirement(ParentNode, node(X, BuildDependency)). +attr("node_flag_set", node(X, BuildDependency), NodeFlag) :- + attr("direct_dependency", ParentNode, node_requirement("node_flag_set", BuildDependency, NodeFlag)), + build_requirement(ParentNode, node(X, BuildDependency)). + attr("hash", node(X, BuildDependency), BuildHash) :- attr("direct_dependency", ParentNode, node_requirement("hash", BuildDependency, BuildHash)), build_requirement(ParentNode, node(X, BuildDependency)). - +% For a spec like `hdf5 %cxx=gcc` we need to ensure that +% 1. gcc is a provider for cxx +% 2. hdf5 depends on that provider for cxx 1 { attr("provider_set", node(X, BuildDependency), node(0..Y-1, Virtual)) : max_dupes(Virtual, Y) } 1 :- attr("direct_dependency", ParentNode, node_requirement("provider_set", BuildDependency, Virtual)), direct_dependency(ParentNode, node(X, BuildDependency)). +error(10, "{0} cannot have a dependency on {1}", Package, Virtual) + :- attr("direct_dependency", node(ID, Package), node_requirement("provider_set", BuildDependency, Virtual)), + direct_dependency(node(ID, Package), node(X, BuildDependency)), + not attr("virtual_on_edge", node(ID, Package), node(X, BuildDependency), Virtual). + +% For a spec like `hdf5 %cxx` we need to ensure that the virtual is needed on a direct edge +error(10, "{0} cannot have a dependency on {1}", Package, Virtual) + :- attr("direct_dependency", node(ID, Package), node_requirement("virtual_node", Virtual)), + not attr("virtual_on_edge", node(ID, Package), _, Virtual). + % Reconstruct virtual dependencies for reused specs attr("virtual_on_edge", node(X, A1), node(Y, A2), Virtual) :- impose(ID, node(X, A1)), @@ -686,17 +802,15 @@ attr("uses_virtual", PackageNode, Virtual) :- :- attr("node", node(ID, Package)), attr("hash", node(ID, Package), Hash), attr("variant_value", node(ID, Package), Variant, Value), - not imposed_constraint(Hash, "variant_value", Package, Variant, Value), - internal_error("imposed hash without imposing all variant values"). + not imposed_constraint(Hash, "variant_value", Package, Variant, Value). % we cannot have additional flag values when we are working with concrete specs :- attr("node", node(ID, Package)), attr("hash", node(ID, Package), Hash), attr("node_flag", node(ID, Package), node_flag(FlagType, Flag, _, _)), - not imposed_constraint(Hash, "node_flag", Package, node_flag(FlagType, Flag, _, _)), - internal_error("imposed hash without imposing all flag values"). + not imposed_constraint(Hash, "node_flag", Package, node_flag(FlagType, Flag, _, _)). -#defined condition/2. +#defined condition/1. #defined subcondition/2. #defined condition_requirement/3. #defined condition_requirement/4. @@ -725,11 +839,9 @@ concrete(PackageNode) :- attr("hash", PackageNode, _), attr("node", PackageNode) % Dependencies of any type imply that one package "depends on" another depends_on(PackageNode, DependencyNode) :- attr("depends_on", PackageNode, DependencyNode, _). -% a dependency holds if its condition holds and if it is not external or -% concrete. We chop off dependencies for externals, and dependencies of -% concrete specs don't need to be resolved -- they arise from the concrete -% specs themselves. -attr("track_dependencies", Node) :- build(Node), not external(Node). +% a dependency holds if its condition holds and if it is not concrete. +% Dependencies of concrete specs don't need to be resolved -- they arise from the concrete specs themselves. +attr("track_dependencies", Node) :- build(Node). % If a dependency holds on a package node, there must be one and only one dependency node satisfying it 1 { attr("depends_on", PackageNode, node(0..Y-1, Dependency), Type) : max_dupes(Dependency, Y) } 1 @@ -796,30 +908,41 @@ error(100, "{0} and {1} must depend on the same {2}", ExtensionParent, Extension X != Y. -#defined dependency_type/2. - %----------------------------------------------------------------------------- % Conflicts %----------------------------------------------------------------------------- -error(1, Msg) - :- attr("node", node(ID, Package)), - pkg_fact(Package, conflict(TriggerID, ConstraintID, Msg)), - % node(ID1, TriggerPackage) is node(ID2, Package) in most, but not all, cases - condition_holds(TriggerID, node(ID1, TriggerPackage)), - condition_holds(ConstraintID, node(ID2, Package)), - unification_set(X, node(ID2, Package)), - unification_set(X, node(ID1, TriggerPackage)), - not external(node(ID, Package)), % ignore conflicts for externals - not attr("hash", node(ID, Package), _). % ignore conflicts for installed packages + +% Most conflicts are internal to the same package +conflict_is_cross_package(Package, TriggerID) :- + pkg_fact(Package, conflict(TriggerID, _, _)), + pkg_fact(TriggerPackage, condition_trigger(TriggerID, _)), + TriggerPackage != Package. + +conflict_internal(Package, TriggerID, ConstraintID, Msg) :- + pkg_fact(Package, conflict(TriggerID, ConstraintID, Msg)), + not conflict_is_cross_package(Package, TriggerID). + +% Case 1: conflict is within the same package +error(1, Msg) :- + conflict_internal(Package, TriggerID, ConstraintID, Msg), + condition_holds(TriggerID, node(ID, Package)), + condition_holds(ConstraintID, node(ID, Package)), + build(node(ID, Package)). % ignore conflicts for installed packages + +% Case 2: Cross-package conflicts (Rare case - slower) +error(1, Msg) :- + build(node(ID, Package)), + conflict_is_cross_package(Package, TriggerID), + pkg_fact(Package, conflict(TriggerID, ConstraintID, Msg)), + condition_holds(TriggerID, node(ID1, TriggerPackage)), + condition_holds(ConstraintID, node(ID, Package)), + unification_set(X, node(ID, Package)), + unification_set(X, node(ID1, TriggerPackage)). %----------------------------------------------------------------------------- % Virtual dependencies %----------------------------------------------------------------------------- -% If the provider is set from the command line, its weight is 0 -possible_provider_weight(ProviderNode, VirtualNode, 0, "Set on the command line") - :- attr("provider_set", ProviderNode, VirtualNode). - % Enforces all virtuals to be provided, if multiple of them are provided together error(100, "Package '{0}' needs to provide both '{1}' and '{2}' together, but provides only '{1}'", Package, Virtual1, Virtual2) :- % This package provides 2 or more virtuals together @@ -839,8 +962,7 @@ error(100, "Package '{0}' needs to provide both '{1}' and '{2}' together, but pr % provider for that virtual then it depends on the provider node_depends_on_virtual(PackageNode, Virtual, Type) :- attr("dependency_holds", PackageNode, Virtual, Type), - virtual(Virtual), - not external(PackageNode). + virtual(Virtual). node_depends_on_virtual(PackageNode, Virtual) :- node_depends_on_virtual(PackageNode, Virtual, Type). @@ -850,15 +972,13 @@ node_depends_on_virtual(PackageNode, Virtual) :- node_depends_on_virtual(Package attr("virtual_on_edge", PackageNode, ProviderNode, Virtual) :- attr("dependency_holds", PackageNode, Virtual, Type), attr("depends_on", PackageNode, ProviderNode, Type), - provider(ProviderNode, node(_, Virtual)), - not external(PackageNode). + provider(ProviderNode, node(_, Virtual)). % If a virtual node is in the answer set, it must be either a virtual root, % or used somewhere :- attr("virtual_node", node(_, Virtual)), not attr("virtual_on_incoming_edges", _, Virtual), - not attr("virtual_root", node(_, Virtual)), - internal_error("virtual node does not match incoming edge"). + not attr("virtual_root", node(_, Virtual)). attr("virtual_on_incoming_edges", ProviderNode, Virtual) :- attr("virtual_on_edge", _, ProviderNode, Virtual). @@ -921,100 +1041,45 @@ do_not_impose(EffectID, node(X, Package)) explicitly_requested_root(PossibleProvider), not self_build_requirement(PossibleProvider, ProviderNode), not explicitly_requested_root(ProviderNode), - not language(Virtual), - internal_error("If a root can provide a virtual, it must be the provider"). + not language(Virtual). % A package cannot be the actual provider for a virtual if it does not % fulfill the conditions to provide that virtual :- provider(PackageNode, node(VirtualID, Virtual)), - not virtual_condition_holds(PackageNode, Virtual), - internal_error("Virtual when provides not respected"). + not virtual_condition_holds(PackageNode, Virtual). -#defined provided_together/4. +#defined provided_together/3. %----------------------------------------------------------------------------- % Virtual dependency weights %----------------------------------------------------------------------------- -% A provider has different possible weights depending on its preference. This rule ensures that -% we select the weight, among the possible ones, that minimizes the overall objective function. -1 { provider_weight(DependencyNode, VirtualNode, Weight) : - possible_provider_weight(DependencyNode, VirtualNode, Weight, _) } 1 - :- provider(DependencyNode, VirtualNode), internal_error("Package provider weights must be unique"). - % Any configured provider has a weight based on index in the preference list -possible_provider_weight(node(ProviderID, Provider), node(VirtualID, Virtual), Weight, "default") +provider_weight(node(ProviderID, Provider), node(VirtualID, Virtual), Weight) :- provider(node(ProviderID, Provider), node(VirtualID, Virtual)), - default_provider_preference(Virtual, Provider, Weight). + provider_weight_from_config(Virtual, Provider, Weight). % Any non-configured provider has a default weight of 100 -possible_provider_weight(node(ProviderID, Provider), VirtualNode, 100, "fallback") - :- provider(node(ProviderID, Provider), VirtualNode). +provider_weight(node(ProviderID, Provider), node(VirtualID, Virtual), 100) + :- provider(node(ProviderID, Provider), node(VirtualID, Virtual)), + not provider_weight_from_config(Virtual, Provider, _). % do not warn if generated program contains none of these. #defined virtual/1. -#defined virtual_condition_holds/2. -#defined external/1. #defined buildable_false/1. -#defined default_provider_preference/3. +#defined provider_weight_from_config/3. %----------------------------------------------------------------------------- % External semantics %----------------------------------------------------------------------------- -% if a package is external its version must be one of the external versions -{ external_version(node(ID, Package), Version, Weight): - pkg_fact(Package, version_declared(Version, Weight, "external")) } - :- external(node(ID, Package)). - -error(100, "Attempted to use external for '{0}' which does not satisfy any configured external spec version", Package) - :- external(node(ID, Package)), - not external_version(node(ID, Package), _, _). - -error(100, "Attempted to use external for '{0}' which does not satisfy a unique configured external spec version", Package) - :- external(node(ID, Package)), - 2 { external_version(node(ID, Package), Version, Weight) }. - -version_weight(PackageNode, Weight) :- external_version(PackageNode, Version, Weight). -attr("version", PackageNode, Version) :- external_version(PackageNode, Version, Weight). +external(PackageNode) :- attr("external", PackageNode). -% if a package is not buildable, only externals or hashed specs are allowed -external(node(ID, Package)) +% if a package is not buildable, only concrete specs are allowed +error(1000, "Cannot build {0}, since it is configured `buildable:false` and no externals satisfy the request", Package) :- buildable_false(Package), attr("node", node(ID, Package)), - not attr("hash", node(ID, Package), _). - -% a package is a real_node if it is not external -real_node(PackageNode) :- attr("node", PackageNode), not external(PackageNode). - -% a package is external if we are using an external spec for it -external(PackageNode) :- attr("external_spec_selected", PackageNode, _). - -% we can't use the weight for an external version if we don't use the -% corresponding external spec. -:- attr("version", node(ID, Package), Version), - version_weight(node(ID, Package), Weight), - pkg_fact(Package, version_declared(Version, Weight, "external")), - not external(node(ID, Package)), - internal_error("External weight used for internal spec"). - -% determine if an external spec has been selected -attr("external_spec_selected", node(ID, Package), LocalIndex) :- - attr("external_conditions_hold", node(ID, Package), LocalIndex), - attr("node", node(ID, Package)), - not attr("hash", node(ID, Package), _). - -% At most a single external can be active for a given node -:- attr("node", node(ID, Package)), - 2 { attr("external_spec_selected", node(ID, Package), LocalIndex) }. - -% Allow clingo not to impose an external condition. This is needed to allow solving -% for externals that are indistinguishable besides compiler "metadata" e.g. -% mpich@4.2 %gcc vs. mpich@4.2 %clang -{ do_not_impose(EffectID, node(X, Package)) } :- - trigger_condition_holds(TriggerID, node(X, Package)), - trigger_and_effect(Package, TriggerID, EffectID), - imposed_constraint(EffectID, "external_conditions_hold", Package, _). + build(node(ID, Package)). % Account for compiler annotation on externals :- not attr("root", ExternalNode), @@ -1026,29 +1091,24 @@ attr("external_spec_selected", node(ID, Package), LocalIndex) :- attr("external_build_requirement", ExternalNode, node_requirement("node", Compiler)), attr("external_build_requirement", ExternalNode, node_requirement("node_version_satisfies", Compiler, Constraint)). -error(100, "Cannot use an external for {0}, because the {1} compiler is overspecified. Omit version requirement.", ExternalPackage, Compiler) :- - external(node(X, ExternalPackage)), - not attr("external_build_requirement", node(X, ExternalPackage), node_requirement("node_version_satisfies", Compiler, _)), - attr("external_build_requirement", ExternalNode, node_requirement("node", Compiler)), - attr("direct_dependency", node(X, ExternalPackage), node_requirement("node_version_satisfies", Compiler, Constraint)). - -% If we require a compiler on an external root, be sure it's mentioned in the external spec -:- external(ExternalNode), - not attr("external_build_requirement", ExternalNode, node_requirement("node", Compiler)), - attr("direct_dependency", ExternalNode, node_requirement("node",Compiler)). - -% it cannot happen that a spec is external, but none of the external specs -% conditions hold. -error(100, "Attempted to use external for '{0}' which does not satisfy any configured external spec", Package) - :- external(node(ID, Package)), - not attr("external_conditions_hold", node(ID, Package), _). - %----------------------------------------------------------------------------- % Config required semantics %----------------------------------------------------------------------------- -package_in_dag(Node) :- attr("node", Node). -package_in_dag(Node) :- attr("virtual_node", Node). +package_in_dag(Node) :- attr("node", Node). +package_in_dag(Node) :- attr("virtual_node", Node). +package_in_dag(Node) :- attr("reused_virtual_node", Node). + +reused_provider(node(CompilerHash, CompilerName), node(CompilerHash, Virtual)) :- + language(Virtual), + attr("hash", PackageNode, Hash), + attr("concrete_build_dependency", PackageNode, CompilerName, CompilerHash), + attr("virtual_on_build_edge", PackageNode, CompilerName, Virtual). + +attr("reused_virtual_node", VirtualNode) :- reused_provider(_, VirtualNode). + +trigger_node(ID, node(PackageID, Package), node(VirtualID, Virtual)) :- pkg_fact(Virtual, trigger_id(ID)), reused_provider(node(PackageID, Package), node(VirtualID, Virtual)). + activate_requirement(node(ID, Package), X) :- package_in_dag(node(ID, Package)), @@ -1061,6 +1121,16 @@ activate_requirement(node(ID, Package), X) :- condition_holds(Y, node(ID, Package)), requirement_conditional(Package, X, Y). +activate_requirement(node(ID, Package), X) :- + package_in_dag(node(ID, Package)), + package_in_dag(node(CID, ConditionPackage)), + requirement_group(Package, X), + pkg_fact(ConditionPackage, condition(Y)), + related(node(CID, ConditionPackage), node(ID, Package)), + condition_holds(Y, node(CID, ConditionPackage)), + requirement_conditional(Package, X, Y), + ConditionPackage != Package. + requirement_group_satisfied(node(ID, Package), GroupID) :- 1 { requirement_is_met(GroupID, ConditionID, node(ID, Package)) } 1, requirement_policy(Package, GroupID, "one_of"), @@ -1104,6 +1174,15 @@ requirement_is_met(GroupID, ConditionID, node(X, Package)) :- subcondition(SubconditionID, ConditionID), pkg_fact(Package, condition_effect(SubconditionID, EffectID)). +% clingo decided not to impose a condition for a subcondition that holds +exclude_requirement_weight(ConditionID, Package, GroupID) :- + requirement_group_member(ConditionID, Package, GroupID), + condition_holds(ConditionID, node(X, Package)), + subcondition(SubconditionID, ConditionID), + condition_holds(SubconditionID, node(X, Package)), + do_not_impose(EffectID, node(X, Package)), + pkg_fact(Package, condition_effect(SubconditionID, EffectID)). + % Do not impose requirements, if the conditional requirement is not active do_not_impose(EffectID, node(ID, Package)) :- trigger_condition_holds(TriggerID, node(ID, Package)), @@ -1112,6 +1191,14 @@ do_not_impose(EffectID, node(ID, Package)) :- requirement_group_member(ConditionID , Package, RequirementID), not activate_requirement(node(ID, Package), RequirementID). +do_not_impose(EffectID, node(Hash, Virtual)) :- + trigger_condition_holds(TriggerID, node(Hash, Virtual)), + pkg_fact(Virtual, condition_trigger(ConditionID, TriggerID)), + pkg_fact(Virtual, condition_effect(ConditionID, EffectID)), + requirement_group_member(ConditionID , Virtual, RequirementID), + reused_provider(_, node(Hash, Virtual)), + activate_requirement(node(Hash, Virtual), RequirementID). + % When we have a required provider, we need to ensure that the provider/2 facts respect % the requirement. This is particularly important for packages that could provide multiple % virtuals independently @@ -1124,6 +1211,7 @@ required_provider(Provider, Virtual) error(1, "Cannot use {1} for the {0} virtual, but that is required", Virtual, Provider) :- required_provider(Provider, Virtual), + not reused_provider(node(_, Provider), node(_, Virtual)), not provider(node(_, Provider), node(_, Virtual)). % TODO: the following choice rule allows the solver to add compiler @@ -1136,9 +1224,19 @@ error(1, "Cannot use {1} for the {0} virtual, but that is required", Virtual, Pr pkg_fact(Package, condition_effect(ConditionID, EffectID)), imposed_constraint(EffectID, "node_flag_set", Package, NodeFlag). +{ attr("node_flag", node(ID, Package), NodeFlag) } :- + requirement_group_member(ConditionID, Virtual, RequirementID), + activate_requirement(node(VirtualID, Virtual), RequirementID), + provider(node(ID, Package), VirtualNode), + pkg_fact(Virtual, condition_effect(ConditionID, EffectID)), + imposed_constraint(EffectID, "node_flag_set", Package, NodeFlag). + requirement_weight(node(ID, Package), Group, W) :- W = #min { - Z : requirement_has_weight(Y, Z), condition_holds(Y, node(ID, Package)), requirement_group_member(Y, Package, Group); + Z : requirement_has_weight(Y, Z), + condition_holds(Y, node(ID, Package)), + requirement_group_member(Y, Package, Group), + not exclude_requirement_weight(Y, Package, Group); % We need this to avoid an annoying warning during the solve % concretize.lp:1151:5-11: info: tuple ignored: % #sup@73 @@ -1147,14 +1245,31 @@ requirement_weight(node(ID, Package), Group, W) :- requirement_policy(Package, Group, "any_of"), requirement_group_satisfied(node(ID, Package), Group). -error(100, "cannot satisfy a requirement for package '{0}'.", Package) :- +requirement_penalty(node(ID, Package), Group, W) :- + requirement_weight(node(ID, Package), Group, W), + not language(Package). + +requirement_penalty(PackageNode, Language, Group, W) :- + requirement_weight(node(ID, Language), Group, W), + language(Language), + provider(ProviderNode, node(ID, Language)), + attr("virtual_on_edge", PackageNode, ProviderNode, Language). + +requirement_penalty(PackageNode, Language, Group, W) :- + requirement_weight(node(CompilerHash, Language), Group, W), + language(Language), + reused_provider(node(CompilerHash, CompilerName), node(CompilerHash, Language)), + attr("concrete_build_dependency", PackageNode, CompilerName, CompilerHash), + attr("virtual_on_build_edge", PackageNode, CompilerName, Language). + +error(60000, "cannot satisfy a requirement for package '{0}'.", Package) :- activate_requirement(node(ID, Package), X), requirement_group(Package, X), not requirement_message(Package, X, _), not requirement_group_satisfied(node(ID, Package), X). -error(10, Message) :- +error(50000, Message) :- activate_requirement(node(ID, Package), X), requirement_group(Package, X), requirement_message(Package, X, Message), @@ -1204,8 +1319,7 @@ variant_defined(PackageNode, Name) :- variant_definition(PackageNode, Name, _). % for two or more variant definitions, this prefers the last one defined. :- node_has_variant(node(NodeID, Package), Name, SelectedVariantID), variant_definition(node(NodeID, Package), Name, VariantID), - VariantID > SelectedVariantID, - internal_error("If the solver picks a variant descriptor it must use that variant descriptor"). + VariantID > SelectedVariantID. % B: Associating applicable package rules with nodes @@ -1234,11 +1348,25 @@ variant_default_value(node(NodeID, Package), VariantName, Value) :- node_has_variant(node(NodeID, Package), VariantName, _), attr("variant_default_value_from_cli", node(min_dupe_id, Package), VariantName, Value). +variant_penalty(node(NodeID, Package), Variant, Value, Penalty) :- + node_has_variant(node(NodeID, Package), Variant, VariantID), + attr("variant_value", node(NodeID, Package), Variant, Value), + pkg_fact(Package, variant_penalty(VariantID, Value, Penalty)), + not variant_default_value(node(NodeID, Package), Variant, Value), + % variants set explicitly from a directive don't count as non-default + not attr("variant_set", node(NodeID, Package), Variant, _), + % variant values forced by propagation don't count as non-default + not propagate(node(NodeID, Package), variant_value(Variant, _, _)). + % -- Associate the definition's possible values with the node variant_possible_value(node(NodeID, Package), VariantName, Value) :- node_has_variant(node(NodeID, Package), VariantName, VariantID), pkg_fact(Package, variant_possible_value(VariantID, Value)). +variant_possible_value(node(NodeID, Package), VariantName, Value) :- + node_has_variant(node(NodeID, Package), VariantName, VariantID), + pkg_fact(Package, variant_penalty(VariantID, Value, _)). + variant_value_from_disjoint_sets(node(NodeID, Package), VariantName, Value1, Set1) :- node_has_variant(node(NodeID, Package), VariantName, VariantID), pkg_fact(Package, variant_value_from_disjoint_sets(VariantID, Value1, Set1)). @@ -1246,7 +1374,8 @@ variant_value_from_disjoint_sets(node(NodeID, Package), VariantName, Value1, Set % -- Associate definition's arity with the node variant_single_value(node(NodeID, Package), VariantName) :- node_has_variant(node(NodeID, Package), VariantName, VariantID), - not variant_type(VariantID, "multi"). + variant_type(VariantID, VariantType), + VariantType != "multi". % C: Determining variant values on each node @@ -1260,14 +1389,13 @@ attr("variant_selected", node(ID, Package), Variant, Value, VariantType, Variant build(node(ID, Package)). % we can choose variant values from all the possible values for the node -{ - attr("variant_selected", node(ID, Package), Variant, Value, VariantType, VariantID) - : variant_possible_value(node(ID, Package), Variant, Value) +1 { + attr("variant_selected", PackageNode, Variant, Value, VariantType, VariantID) + : variant_possible_value(PackageNode, Variant, Value) } :- - attr("node", node(ID, Package)), - node_has_variant(node(ID, Package), Variant, VariantID), + node_has_variant(PackageNode, Variant, VariantID), variant_type(VariantID, VariantType), - build(node(ID, Package)). + build(PackageNode). % variant_selected is only needed for reconstruction on the python side, so we can ignore it here attr("variant_value", PackageNode, Variant, Value) :- @@ -1286,7 +1414,7 @@ error(100, "Cannot set variant '{0}' for package '{1}' because the variant condi build(node(ID, Package)). % at most one variant value for single-valued variants. -error(100, "'{0}' requires conflicting variant values 'Spec({1}={2})' and 'Spec({1}={3})'", Package, Variant, Value1, Value2) +error(1000, "'{0}' requires conflicting variant values 'Spec({1}={2})' and 'Spec({1}={3})'", Package, Variant, Value1, Value2) :- attr("node", node(ID, Package)), node_has_variant(node(ID, Package), Variant, _), variant_single_value(node(ID, Package), Variant), @@ -1340,43 +1468,18 @@ error(100, "{0} variant '{1}' cannot have values '{2}' and '{3}' as they come fr :- attr("variant_set", node(ID, Package), Variant, Value), not attr("variant_value", node(ID, Package), Variant, Value). - internal_error("If a variant is set to a value it must have that value"). -% The rules below allow us to prefer default values for variants -% whenever possible. If a variant is set in a spec, or if it is -% specified in an external, we score it as if it was a default value. -variant_not_default(node(ID, Package), Variant, Value) - :- attr("variant_value", node(ID, Package), Variant, Value), - not variant_default_value(node(ID, Package), Variant, Value), - % variants set explicitly on the CLI don't count as non-default - not attr("variant_set", node(ID, Package), Variant, Value), - % variant values forced by propagation don't count as non-default - not propagate(node(ID, Package), variant_value(Variant, Value, _)), - % variants set on externals that we could use don't count as non-default - % this makes spack prefer to use an external over rebuilding with the - % default configuration - not external_with_variant_set(node(ID, Package), Variant, Value), - attr("node", node(ID, Package)). - -% A default variant value that is not used +% A default variant value that is not used, makes sense only for multi valued variants variant_default_not_used(node(ID, Package), Variant, Value) :- variant_default_value(node(ID, Package), Variant, Value), - node_has_variant(node(ID, Package), Variant, _), + node_has_variant(node(ID, Package), Variant, VariantID), + variant_type(VariantID, VariantType), VariantType == "multi", not attr("variant_value", node(ID, Package), Variant, Value), not propagate(node(ID, Package), variant_value(Variant, _, _)), % variant set explicitly don't count for this metric not attr("variant_set", node(ID, Package), Variant, _), attr("node", node(ID, Package)). -% The variant is set in an external spec -external_with_variant_set(node(NodeID, Package), Variant, Value) - :- attr("variant_value", node(NodeID, Package), Variant, Value), - condition_requirement(TriggerID, "variant_value", Package, Variant, Value), - trigger_and_effect(Package, TriggerID, EffectID), - imposed_constraint(EffectID, "external_conditions_hold", Package, _), - external(node(NodeID, Package)), - attr("node", node(NodeID, Package)). - % Treat 'none' in a special way - it cannot be combined with other % values even if the variant is multi-valued error(100, "{0} variant '{1}' cannot have values '{2}' and 'none'", Package, Variant, Value) @@ -1397,7 +1500,8 @@ node_has_variant(PackageNode, Variant, VariantID) variant_single_value(PackageNode, Variant) :- node_has_variant(PackageNode, Variant, VariantID), auto_variant(Variant, VariantID), - not variant_type(VariantID, "multi"). + variant_type(VariantID, VariantType), + VariantType != "multi". % to respect requirements/preferences we need to define that an auto_variant is set { attr("variant_set", node(ID, Package), Variant, VariantValue)} @@ -1410,9 +1514,8 @@ variant_single_value(PackageNode, Variant) % suppress warnings about this atom being unset. It's only set if some % spec or some package sets it, and without this, clingo will give % warnings like 'info: atom does not occur in any rule head'. -#defined variant_default_value/3. #defined variant_default_value_from_packages_yaml/3. -#defined variant_default_value_from_package_py/3. +#defined variant_default_value_from_package_py/2. %----------------------------------------------------------------------------- % Propagation semantics @@ -1534,7 +1637,6 @@ error(100, "{0} and {1} cannot both propagate compiler flags '{2}' to {3}", Sour attr("node_version_satisfies", node(Y, Compiler), VersionRange) :- propagate(node(X, Package), node_version_satisfies(Compiler, VersionRange)), attr("depends_on", node(X, Package), node(Y, Compiler), "build"), - not external(node(X, Package)), not runtime(Package). attr("node_version_satisfies", node(X, Runtime), VersionRange) :- @@ -1554,16 +1656,69 @@ compiler_used_as_a_library(node(X, Child), Hash) :- attr("virtual_on_edge", PackageNode, CompilerNode2, "cxx"), CompilerNode1 != CompilerNode2. +% Compiler-unmixing: 1st rule +unification_set_compiler("root", CompilerNode, Language) :- + node_compiler(node(ID, Package), CompilerNode, Language), + no_compiler_mixing(Language), + not allow_mixing(Package), + unification_set("root", node(ID, Package)). + +% Compiler for a reused node +% This differs from compiler_from_reuse in that this is about x->y +% where y is a compiler and x is reused (compiler_from_reuse is +% is concerned with reuse of the compiler itself) +reused_node_compiler(PackageNode, node(CompilerHash, Compiler), Language) :- + concrete(PackageNode), + attr("concrete_build_dependency", PackageNode, Compiler, CompilerHash), + attr("virtual_on_build_edge", PackageNode, Compiler, Language), + language(Language). + +% Compiler-unmixing: 2nd rule +% The compiler appears on a reused node as well as a built node. In +% that case there will be a generated node() with an ID. +% While easier to understand than rule 3, in fact this rule addresses +% a small set of use cases beyond rules 1 and 3: generally speaking +% rule 1 ensures that all non-reused nodes get a consistent compiler. +% Rule 3 generates compiler IDs that almost always fail the count +% rule, but does not "activate" when in combination with rule 1 when +% it is possible to propagate a compiler to another built node in the +% unification set. This in fact is only really used when the reused +% node compiler has a node(), but associated with a different +% unification set. +unification_set_compiler("root", CompilerNode, Language) :- + reused_node_compiler(node(ID, Package), node(CompilerHash, Compiler), Language), + attr("hash", CompilerNode, CompilerHash), + no_compiler_mixing(Language), + not allow_mixing(Package), + unification_set("root", node(ID, Package)). + +% Compiler-unmixing: 3rd rule +% If the compiler only appears in reused nodes, then there is no node() +% for it; this will always generate an error unless all nodes in the +% root unification set are reused. +unification_set_compiler("root", node(CompilerHash, Compiler), Language) :- + reused_node_compiler(node(ID, Package), node(CompilerHash, Compiler), Language), + not attr("hash", _, CompilerHash), + no_compiler_mixing(Language), + not allow_mixing(Package), + unification_set("root", node(ID, Package)). + +#defined no_compiler_mixing/1. +#defined allow_mixing/1. + +% You can't have >1 compiler for a given language if mixing is disabled +error(100, "Compiler mixing is disabled") :- + #count { CompilerNode : unification_set_compiler("root", CompilerNode, Language) } > 1. %----------------------------------------------------------------------------- % Runtimes %----------------------------------------------------------------------------- % Check whether the DAG has any built package -has_built_packages() :- build(X), not external(X). +will_build_packages() :- build(X). % "gcc-runtime" is always built -:- concrete(node(X, "gcc-runtime")), has_built_packages(). +:- concrete(node(X, "gcc-runtime")), will_build_packages(). % The "gcc" linked to "gcc-runtime" must be used by at least another package :- attr("depends_on", node(X, "gcc-runtime"), node(Y, "gcc"), "build"), @@ -1578,7 +1733,7 @@ has_built_packages() :- build(X), not external(X). % NOTE: Currently we have a single allowed platform per DAG, therefore there is no % need to have additional optimization criteria. If we ever add cross-platform dags, % this needs to be changed. -:- 2 { allowed_platform(Platform) }, internal_error("More than one allowed platform detected"). +:- 2 { allowed_platform(Platform) }. 1 { attr("node_platform", PackageNode, Platform) : allowed_platform(Platform) } 1 :- attr("node", PackageNode). @@ -1627,7 +1782,7 @@ attr("node_os", PackageNode, OS) :- attr("node_os_set", PackageNode, OS), attr(" %----------------------------------------------------------------------------- % Each node has only one target chosen among the known targets -{ attr("node_target", PackageNode, Target) : target(Target) } :- attr("node", PackageNode). +1 { attr("node_target", PackageNode, Target) : target(Target) } 1 :- attr("node", PackageNode). % If a node must satisfy a target constraint, enforce it error(10, "'{0} target={1}' cannot satisfy constraint 'target={2}'", Package, Target, Constraint) @@ -1649,9 +1804,9 @@ error(100, "Cannot find compatible targets for {0} and {1}", Package, Dependency % Intermediate step for performance reasons % When the integrity constraint above was formulated including this logic % we suffered a substantial performance penalty -node_target_compatible(PackageNode, Target) - :- attr("node_target", PackageNode, MyTarget), - target_compatible(Target, MyTarget). +node_target_compatible(ChildNode, ParentTarget) + :- attr("node_target", ChildNode, ChildTarget), + target_compatible(ParentTarget, ChildTarget). #defined target_satisfies/2. compiler(Compiler) :- target_supported(Compiler, _, _). @@ -1665,8 +1820,7 @@ language_runtime("fortran-rt"). error(10, "Only external, or concrete, compilers are allowed for the {0} language", Language) :- provider(ProviderNode, node(_, Language)), language(Language), - not external(ProviderNode), - not concrete(ProviderNode). + build(ProviderNode). error(10, "{0} compiler '{2}@{3}' incompatible with 'target={1}'", Package, Target, Compiler, Version) :- attr("node_target", node(X, Package), Target), @@ -1712,6 +1866,12 @@ error(100, "'{0} target={1}' is not compatible with this machine", Package, Targ attr("node_flag", PackageNode, NodeFlag) :- attr("node_flag_set", PackageNode, NodeFlag). +% If we set "foo %bar cflags=A ^fee %bar cflags=B" we want two nodes for "bar" +error(100, "Cannot set multiple {0} values for {1} from cli", FlagType, Package) +:- attr("node_flag_set", node(X, Package), node_flag(FlagType, _, FlagGroup1, "literal")), + attr("node_flag_set", node(X, Package), node_flag(FlagType, _, FlagGroup2, "literal")), + FlagGroup1 < FlagGroup2. + %----------------------------------------------------------------------------- % Installed Packages %----------------------------------------------------------------------------- @@ -1729,8 +1889,7 @@ attr("node_flag", PackageNode, NodeFlag) :- attr("node_flag_set", PackageNode, N #defined hash_attr/7. { attr("hash", node(ID, PackageName), Hash): installed_hash(PackageName, Hash) } 1 :- - attr("node", node(ID, PackageName)), - internal_error("Package must resolve to at most 1 hash"). + attr("node", node(ID, PackageName)). % you can't choose an installed hash for a dev spec :- attr("hash", PackageNode, Hash), attr("variant_value", PackageNode, "dev_path", _). % You can't install a hash, if it is not installed @@ -1818,30 +1977,12 @@ build(PackageNode) :- attr("node", PackageNode), not concrete(PackageNode). % 100 - 199 Unshifted priorities. Currently only includes minimizing #builds and minimizing dupes. % 0 - 99 Priorities for non-built nodes. -treat_node_as_concrete(node(X, Package)) :- external(node(X, Package)). treat_node_as_concrete(node(X, Package)) :- attr("node", node(X, Package)), runtime(Package). build_priority(PackageNode, 200) :- build(PackageNode), attr("node", PackageNode), not treat_node_as_concrete(PackageNode). build_priority(PackageNode, 0) :- build(PackageNode), attr("node", PackageNode), treat_node_as_concrete(PackageNode). build_priority(PackageNode, 0) :- concrete(PackageNode), attr("node", PackageNode). -% don't assign versions from installed packages unless reuse is enabled -% NOTE: that "installed" means the declared version was only included because -% that package happens to be installed, NOT because it was asked for on the -% command line. If the user specifies a hash, the origin will be "spec". -% -% TODO: There's a slight inconsistency with this: if the user concretizes -% and installs `foo ^bar`, for some build dependency `bar`, and then later -% does a `spack install --fresh foo ^bar/abcde` (i.e.,the hash of `bar`, it -% currently *won't* force versions for `bar`'s build dependencies -- `--fresh` -% will instead build the latest bar. When we actually include transitive -% build deps in the solve, consider using them as a preference to resolve this. -:- attr("version", node(ID, Package), Version), - version_weight(node(ID, Package), Weight), - pkg_fact(Package, version_declared(Version, Weight, "installed")), - not optimize_for_reuse(). - - % This statement, which is a hidden feature of clingo, let us avoid cycles in the DAG #edge (A, B) : depends_on(A, B). @@ -1873,7 +2014,11 @@ opt_criterion(310, "requirement weight"). #minimize{ 0@310: #true }. #minimize { Weight@310,PackageNode,Group - : requirement_weight(PackageNode, Group, Weight) + : requirement_penalty(PackageNode, Group, Weight) +}. +#minimize { + Weight@310,PackageNode,Language,Group + : requirement_penalty(PackageNode, Language, Group, Weight) }. % Try hard to reuse installed packages (i.e., minimize the number built) @@ -1883,7 +2028,7 @@ opt_criterion(110, "number of packages to build (vs. reuse)"). opt_criterion(100, "number of nodes from the same package"). #minimize { 0@100: #true }. -#minimize { ID@100,Package : attr("node", node(ID, Package)) }. +#minimize { ID@100,Package : attr("node", node(ID, Package)), not self_build_requirement(_, node(ID, Package)) }. #minimize { ID@100,Package : attr("virtual_node", node(ID, Package)) }. #defined optimize_for_reuse/0. @@ -1911,13 +2056,19 @@ opt_criterion(70, "version badness (roots)"). version_weight(PackageNode, Weight), build_priority(PackageNode, Priority) }. +#minimize { + Penalty@70+Priority,PackageNode + : attr("root", PackageNode), + version_deprecation_penalty(PackageNode, Penalty), + build_priority(PackageNode, Priority) +}. -opt_criterion(65, "number of non-default variants (roots)"). +opt_criterion(65, "variant penalty (roots)"). #minimize{ 0@265: #true }. #minimize{ 0@65: #true }. #minimize { - 1@65+Priority,PackageNode,Variant,Value - : variant_not_default(PackageNode, Variant, Value), + Penalty@65+Priority,PackageNode,Variant,Value + : variant_penalty(PackageNode, Variant, Value, Penalty), attr("root", PackageNode), build_priority(PackageNode, Priority) }. @@ -1943,12 +2094,12 @@ opt_criterion(55, "default values of variants not being used (roots)"). }. % Try to use default variants or variants that have been set -opt_criterion(50, "number of non-default variants (non-roots)"). +opt_criterion(50, "variant penalty (non-roots)"). #minimize{ 0@250: #true }. #minimize{ 0@50: #true }. #minimize { - 1@50+Priority,PackageNode,Variant,Value - : variant_not_default(PackageNode, Variant, Value), + Penalty@50+Priority,PackageNode,Variant,Value + : variant_penalty(PackageNode, Variant, Value, Penalty), not attr("root", PackageNode), build_priority(PackageNode, Priority) }. @@ -1969,7 +2120,7 @@ opt_criterion(48, "preferred providers (non-roots)"). compiler_penalty(PackageNode, C-1) :- C = #count { CompilerNode : node_compiler(PackageNode, CompilerNode) }, - node_compiler(PackageNode, _). + node_compiler(PackageNode, _), C > 0. opt_criterion(46, "number of compilers used on the same node"). #minimize{ 0@246: #true }. @@ -1979,16 +2130,6 @@ opt_criterion(46, "number of compilers used on the same node"). : compiler_penalty(PackageNode, Penalty), build_priority(PackageNode, Priority) }. -% Minimize the ids of the providers, i.e. use as much as -% possible the first providers -opt_criterion(45, "number of duplicate virtuals needed"). -#minimize{ 0@245: #true }. -#minimize{ 0@45: #true }. -#minimize{ - Weight@45+Priority,ProviderNode,Virtual - : provider(ProviderNode, node(Weight, Virtual)), - build_priority(ProviderNode, Priority) -}. opt_criterion(40, "preferred compilers"). #minimize{ 0@240: #true }. @@ -2000,6 +2141,11 @@ opt_criterion(40, "preferred compilers"). build_priority(ProviderNode, Priority) }. +opt_criterion(41, "compiler penalty from reuse"). +#minimize{ 0@241: #true }. +#minimize{ 0@41: #true }. +#minimize{1@41,Hash : compiler_penalty_from_reuse(Hash)}. + opt_criterion(30, "non-preferred OS's"). #minimize{ 0@230: #true }. #minimize{ 0@30: #true }. @@ -2020,6 +2166,14 @@ opt_criterion(25, "version badness (non roots)"). not attr("root", node(X, Package)), not runtime(Package) }. +#minimize { + Penalty@25+Priority,node(X, Package) + : version_deprecation_penalty(node(X, Package), Penalty), + build_priority(node(X, Package), Priority), + not attr("root", node(X, Package)), + not runtime(Package) +}. + % Try to use all the default values of variants opt_criterion(20, "default values of variants not being used (non-roots)"). @@ -2084,28 +2238,38 @@ opt_criterion(3, "non-preferred targets (runtimes)"). runtime(Package) }. -% Choose more recent versions for nodes +% Try to use the most optimal providers as much as possible opt_criterion(2, "providers on edges"). #minimize{ 0@202: #true }. #minimize{ 0@2: #true }. #minimize{ - Weight@2,ParentNode,ProviderNode,Virtual - : provider_weight(ProviderNode, Virtual, Weight), + Weight@2,ParentNode,ProviderNode,node(X, Virtual) + : provider_weight(ProviderNode, node(X, Virtual), Weight), + max_dupes(Virtual, MaxDupes), MaxDupes > 1, not attr("root", ProviderNode), + language(Virtual), depends_on(ParentNode, ProviderNode) }. -% Choose more recent versions for nodes +% Try to use latest versions of nodes as much as possible opt_criterion(1, "version badness on edges"). #minimize{ 0@201: #true }. #minimize{ 0@1: #true }. #minimize{ - Weight@1,ParentNode,PackageNode - : version_weight(PackageNode, Weight), - not attr("root", PackageNode), - depends_on(ParentNode, PackageNode) + Weight@1,ParentNode,node(X, Package) + : version_weight(node(X, Package), Weight), + multiple_unification_sets(Package), + not attr("root", node(X, Package)), + depends_on(ParentNode, node(X, Package)) }. +% Reduce symmetry on duplicates +opt_criterion(0, "penalty on symmetric duplicates"). +#minimize{ 0@200: #true }. +#minimize{ 0@0: #true }. +#minimize{ + Weight@1,PackageNode,Reason : duplicate_penalty(PackageNode, Weight, Reason) +}. %----------- diff --git a/lib/spack/spack/solver/core.py b/lib/spack/spack/solver/core.py index de6f69e74376d2..f31ad7c37c887c 100644 --- a/lib/spack/spack/solver/core.py +++ b/lib/spack/spack/solver/core.py @@ -1,12 +1,13 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -"""Low-level wrappers around clingo API.""" +"""Low-level wrappers around clingo API and other basic functionality related to ASP""" import importlib import pathlib from types import ModuleType -from typing import Any, Callable, NamedTuple, Optional, Tuple, Union +from typing import Any, Callable, NamedTuple, Optional, Tuple +import spack.platforms from spack.llnl.util import lang @@ -27,30 +28,13 @@ def getter(node): ast_sym = _ast_getter("symbol", "term") -class AspObject: - """Object representing a piece of ASP code.""" - - -def _id(thing: Any) -> Union[str, int, AspObject]: - """Quote string if needed for it to be a valid identifier.""" - if isinstance(thing, bool): - return f'"{thing}"' - elif isinstance(thing, (AspObject, int)): - return thing - else: - if isinstance(thing, str): - # escape characters that cannot be in clingo strings - thing = thing.replace("\\", r"\\") - thing = thing.replace("\n", r"\n") - thing = thing.replace('"', r"\"") - return f'"{thing}"' - - -class AspVar(AspObject): +class AspVar: """Represents a variable in an ASP rule, allows for conditionally generating rules""" - def __init__(self, name: str): + __slots__ = ("name",) + + def __init__(self, name: str) -> None: self.name = name def __str__(self) -> str: @@ -58,16 +42,16 @@ def __str__(self) -> str: @lang.key_ordering -class AspFunction(AspObject): +class AspFunction: """A term in the ASP logic program""" - __slots__ = ["name", "args"] + __slots__ = ("name", "args") - def __init__(self, name: str, args: Optional[Tuple[Any, ...]] = None) -> None: + def __init__(self, name: str, args: Tuple[Any, ...] = ()) -> None: self.name = name - self.args = () if args is None else tuple(args) + self.args = args - def _cmp_key(self) -> Tuple[str, Optional[Tuple[Any, ...]]]: + def _cmp_key(self) -> Tuple[str, Tuple[Any, ...]]: return self.name, self.args def __call__(self, *args: Any) -> "AspFunction": @@ -94,15 +78,23 @@ def __call__(self, *args: Any) -> "AspFunction": return AspFunction(self.name, self.args + args) def __str__(self) -> str: - args = f"({','.join(str(_id(arg)) for arg in self.args)})" - return f"{self.name}{args}" + parts = [] + for arg in self.args: + if type(arg) is str: + arg = arg.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\"") + parts.append(f'"{arg}"') + elif type(arg) is AspFunction or type(arg) is int or type(arg) is AspVar: + parts.append(str(arg)) + else: + parts.append(f'"{arg}"') + return f"{self.name}({','.join(parts)})" def __repr__(self) -> str: return str(self) class _AspFunctionBuilder: - def __getattr__(self, name): + def __getattr__(self, name: str) -> AspFunction: return AspFunction(name) @@ -282,3 +274,25 @@ def extract_args(model, predicate_name): return their intermediate representation. """ return [intermediate_repr(sym.arguments) for sym in model if sym.name == predicate_name] + + +class SourceContext: + """Tracks context in which a Spec's clause-set is generated (i.e. + with ``SpackSolverSetup.spec_clauses``). + + Facts generated for the spec may include this context. + """ + + def __init__(self, *, source: Optional[str] = None): + # This can be "literal" for constraints that come from a user + # spec (e.g. from the command line); it can be the output of + # `ConstraintOrigin.append_type_suffix`; the default is "none" + # (which means it isn't important to keep track of the source + # in that case). + self.source = "none" if source is None else source + self.wrap_node_requirement: Optional[bool] = None + + +def using_libc_compatibility() -> bool: + """Returns True if we are currently using libc compatibility""" + return spack.platforms.host().name == "linux" diff --git a/lib/spack/spack/solver/direct_dependency.lp b/lib/spack/spack/solver/direct_dependency.lp index 233a7e3f5cc352..ed2cd3352f1287 100644 --- a/lib/spack/spack/solver/direct_dependency.lp +++ b/lib/spack/spack/solver/direct_dependency.lp @@ -7,8 +7,7 @@ build_requirement(PackageNode, node(0..X-1, DirectDependency)) : max_dupes(DirectDependency, X); runtime_requirement(PackageNode, node(0..X-1, DirectDependency)) : max_dupes(DirectDependency, X) } 1 :- attr("direct_dependency", PackageNode, node_requirement("node", DirectDependency)), - not external(PackageNode), - not concrete(PackageNode). + build(PackageNode). 1 { concrete_build_requirement(PackageNode, DirectDependency); diff --git a/lib/spack/spack/solver/display.lp b/lib/spack/spack/solver/display.lp index 8c90e97063caf1..6fce3ac92785f3 100644 --- a/lib/spack/spack/solver/display.lp +++ b/lib/spack/spack/solver/display.lp @@ -41,13 +41,12 @@ #show node_has_variant/3. #show build/1. #show external/1. -#show external_version/3. #show trigger_and_effect/3. #show unification_set/2. #show provider/2. -#show condition_nodes/3. +#show condition_nodes/2. #show trigger_node/3. -#show imposed_nodes/3. +#show imposed_nodes/2. #show variant_single_value/2. % debug diff --git a/lib/spack/spack/solver/error_messages.lp b/lib/spack/spack/solver/error_messages.lp index 0027669648648e..0e83f3e293ce84 100644 --- a/lib/spack/spack/solver/error_messages.lp +++ b/lib/spack/spack/solver/error_messages.lp @@ -12,55 +12,58 @@ #program error_messages. -% Create a causal tree between trigger conditions by locating the effect conditions -% that are triggers for another condition. Condition2 is caused by Condition1 +% The following "condition_cause" rules create a causal tree between trigger +% conditions by locating the effect conditions that are triggers for another +% condition. In all these rules, Condition2 is caused by Condition1 + +% For condition_cause rules, it is not necessary to confirm that the attr is present +% on the node because `condition_holds` and `impose_constraint` collectively +% guarantee it. +% We omit those facts to reduce the burden on the grounder/solver. + condition_cause(Condition2, ID2, Condition1, ID1) :- condition_holds(Condition2, node(ID2, Package2)), pkg_fact(Package2, condition_trigger(Condition2, Trigger)), condition_requirement(Trigger, Name, Package), - condition_nodes(Trigger, TriggerNode, node(ID, Package)), + condition_nodes(TriggerNode, node(ID, Package)), trigger_node(Trigger, TriggerNode, node(ID2, Package2)), - attr(Name, node(ID, Package)), condition_holds(Condition1, node(ID1, Package1)), pkg_fact(Package1, condition_effect(Condition1, Effect)), imposed_constraint(Effect, Name, Package), - imposed_nodes(Effect, node(ID1, Package1), node(ID, Package)). + imposed_nodes(node(ID1, Package1), node(ID, Package)). condition_cause(Condition2, ID2, Condition1, ID1) :- condition_holds(Condition2, node(ID2, Package2)), pkg_fact(Package2, condition_trigger(Condition2, Trigger)), condition_requirement(Trigger, Name, Package, A1), - condition_nodes(Trigger, TriggerNode, node(ID, Package)), + condition_nodes(TriggerNode, node(ID, Package)), trigger_node(Trigger, TriggerNode, node(ID2, Package2)), - attr(Name, node(ID, Package), A1), condition_holds(Condition1, node(ID1, Package1)), pkg_fact(Package1, condition_effect(Condition1, Effect)), imposed_constraint(Effect, Name, Package, A1), - imposed_nodes(Effect, node(ID1, Package1), node(ID, Package)). + imposed_nodes(node(ID1, Package1), node(ID, Package)). condition_cause(Condition2, ID2, Condition1, ID1) :- condition_holds(Condition2, node(ID2, Package2)), pkg_fact(Package2, condition_trigger(Condition2, Trigger)), condition_requirement(Trigger, Name, Package, A1, A2), - condition_nodes(Trigger, TriggerNode, node(ID, Package)), + condition_nodes(TriggerNode, node(ID, Package)), trigger_node(Trigger, TriggerNode, node(ID2, Package2)), - attr(Name, node(ID, Package), A1, A2), condition_holds(Condition1, node(ID1, Package1)), pkg_fact(Package1, condition_effect(Condition1, Effect)), imposed_constraint(Effect, Name, Package, A1, A2), - imposed_nodes(Effect, node(ID1, Package1), node(ID, Package)). + imposed_nodes(node(ID1, Package1), node(ID, Package)). condition_cause(Condition2, ID2, Condition1, ID1) :- condition_holds(Condition2, node(ID2, Package2)), pkg_fact(Package2, condition_trigger(Condition2, Trigger)), condition_requirement(Trigger, Name, Package, A1, A2, A3), - condition_nodes(Trigger, TriggerNode, node(ID, Package)), + condition_nodes(TriggerNode, node(ID, Package)), trigger_node(Trigger, TriggerNode, node(ID2, Package2)), - attr(Name, node(ID, Package), A1, A2, A3), condition_holds(Condition1, node(ID1, Package1)), pkg_fact(Package1, condition_effect(Condition1, Effect)), imposed_constraint(Effect, Name, Package, A1, A2, A3), - imposed_nodes(Effect, node(ID1, Package1), node(ID, Package)). + imposed_nodes(node(ID1, Package1), node(ID, Package)). % special condition cause for dependency conditions % we can't simply impose the existence of the node for dependency conditions @@ -69,30 +72,41 @@ condition_cause(Condition2, ID2, Condition1, ID1) :- condition_holds(Condition2, node(ID2, Package2)), pkg_fact(Package2, condition_trigger(Condition2, Trigger)), condition_requirement(Trigger, "node", Package), - condition_nodes(Trigger, TriggerNode, node(ID, Package)), + condition_nodes(TriggerNode, node(ID, Package)), trigger_node(Trigger, TriggerNode, node(ID2, Package2)), - attr("node", node(ID, Package)), condition_holds(Condition1, node(ID1, Package1)), pkg_fact(Package1, condition_effect(Condition1, Effect)), imposed_constraint(Effect, "dependency_holds", Parent, Package, Type), - imposed_nodes(Effect, node(ID1, Package1), node(ID, Package)), + imposed_nodes(node(ID1, Package1), node(ID, Package)), attr("depends_on", node(X, Parent), node(ID, Package), Type). % The literal startcauses is used to separate the variables that are part of the error from the % ones describing the causal tree of the error. After startcauses, each successive pair must be % a condition and a condition_set id for which it holds. +#defined choose_version/2. + % More specific error message if the version cannot satisfy some constraint % Otherwise covered by `no_version_error` and `versions_conflict_error`. -error(1, "Cannot satisfy '{0}@{1}'", Package, Constraint, startcauses, ConstraintCause, CauseID) +error(10000, "Cannot satisfy '{0}@{1}' 3({2})", Package, Constraint, Version, startcauses, ConstraintCause, CauseID) + :- attr("node_version_satisfies", node(ID, Package), Constraint), + pkg_fact(TriggerPkg, condition_effect(ConstraintCause, EffectID)), + imposed_constraint(EffectID, "node_version_satisfies", Package, Constraint), + condition_holds(ConstraintCause, node(CauseID, TriggerPkg)), + attr("version", node(ID, Package), Version), + not pkg_fact(Package, version_satisfies(Constraint, Version)), + choose_version(node(ID, Package), Version). + +error(100, "Cannot satisfy '{0}@{1}' 4({2})", Package, Constraint, Version, startcauses, ConstraintCause, CauseID) :- attr("node_version_satisfies", node(ID, Package), Constraint), pkg_fact(TriggerPkg, condition_effect(ConstraintCause, EffectID)), imposed_constraint(EffectID, "node_version_satisfies", Package, Constraint), condition_holds(ConstraintCause, node(CauseID, TriggerPkg)), attr("version", node(ID, Package), Version), - not pkg_fact(Package, version_satisfies(Constraint, Version)). + not pkg_fact(Package, version_satisfies(Constraint, Version)), + not choose_version(node(ID, Package), Version). -error(0, "Cannot satisfy '{0}@{1}' and '{0}@{2}", Package, Constraint1, Constraint2, startcauses, Cause1, C1ID, Cause2, C2ID) +error(0, "Cannot satisfy '{0}@{1}' and '{0}@{2}'", Package, Constraint1, Constraint2, startcauses, Cause1, C1ID, Cause2, C2ID) :- attr("node_version_satisfies", node(ID, Package), Constraint1), pkg_fact(TriggerPkg1, condition_effect(Cause1, EffectID1)), imposed_constraint(EffectID1, "node_version_satisfies", Package, Constraint1), @@ -106,7 +120,8 @@ error(0, "Cannot satisfy '{0}@{1}' and '{0}@{2}", Package, Constraint1, Constrai attr("version", node(ID, Package), Version), % version satisfies one but not the other pkg_fact(Package, version_satisfies(Constraint1, Version)), - not pkg_fact(Package, version_satisfies(Constraint2, Version)). + not pkg_fact(Package, version_satisfies(Constraint2, Version)), + Cause1 < Cause2. % causation tracking error for no or multiple virtual providers error(0, "Cannot find a valid provider for virtual {0}", Virtual, startcauses, Cause, CID) @@ -132,59 +147,6 @@ error(0, "'{0}' requires conflicting variant values 'Spec({1}={2})' and 'Spec({1 condition_holds(Cause2, node(X, TriggerPkg2)), Value1 < Value2. % see[1] in concretize.lp -% Externals have to specify external conditions -error(0, "Attempted to use external for {0} which does not satisfy any configured external spec version", Package, startcauses, ExternalCause, CID) - :- external(node(ID, Package)), - attr("external_spec_selected", node(ID, Package), Index), - imposed_constraint(EID, "external_conditions_hold", Package, Index), - pkg_fact(TriggerPkg, condition_effect(ExternalCause, EID)), - condition_holds(ExternalCause, node(CID, TriggerPkg)), - not external_version(node(ID, Package), _, _). - -error(0, "Attempted to build package {0} which is not buildable and does not have a satisfying external\n attr('{1}', '{2}') is an external constraint for {0} which was not satisfied", Package, Name, A1) - :- external(node(ID, Package)), - not attr("external_conditions_hold", node(ID, Package), _), - imposed_constraint(EID, "external_conditions_hold", Package, _), - trigger_and_effect(Package, TID, EID), - condition_requirement(TID, Name, A1), - not attr(Name, node(_, A1)). - -error(0, "Attempted to build package {0} which is not buildable and does not have a satisfying external\n attr('{1}', '{2}', '{3}') is an external constraint for {0} which was not satisfied", Package, Name, A1, A2) - :- external(node(ID, Package)), - not attr("external_conditions_hold", node(ID, Package), _), - imposed_constraint(EID, "external_conditions_hold", Package, _), - trigger_and_effect(Package, TID, EID), - condition_requirement(TID, Name, A1, A2), - not attr(Name, node(_, A1), A2). - -error(0, "Attempted to build package {0} which is not buildable and does not have a satisfying external\n attr('{1}', '{2}', '{3}', '{4}') is an external constraint for {0} which was not satisfied", Package, Name, A1, A2, A3) - :- external(node(ID, Package)), - not attr("external_conditions_hold", node(ID, Package), _), - imposed_constraint(EID, "external_conditions_hold", Package, _), - trigger_and_effect(Package, TID, EID), - condition_requirement(TID, Name, A1, A2, A3), - not attr(Name, node(_, A1), A2, A3). - -error(0, "Attempted to build package {0} which is not buildable and does not have a satisfying external\n 'Spec({0} {1}={2})' is an external constraint for {0} which was not satisfied\n 'Spec({0} {1}={3})' required", Package, Variant, Value, OtherValue, startcauses, OtherValueCause, CID) - :- external(node(ID, Package)), - not attr("external_conditions_hold", node(ID, Package), _), - imposed_constraint(EID, "external_conditions_hold", Package, _), - trigger_and_effect(Package, TID, EID), - condition_requirement(TID, "variant_value", Package, Variant, Value), - not attr("variant_value", node(ID, Package), Variant, Value), - attr("variant_value", node(ID, Package), Variant, OtherValue), - imposed_constraint(EID2, "variant_set", Package, Variant, OtherValue), - pkg_fact(TriggerPkg, condition_effect(OtherValueCause, EID2)), - condition_holds(OtherValueCause, node(CID, TriggerPkg)). - -error(0, "Attempted to build package {0} which is not buildable and does not have a satisfying external\n attr('{1}', '{2}', '{3}', '{4}', '{5}') is an external constraint for {0} which was not satisfied", Package, Name, A1, A2, A3, A4) - :- external(node(ID, Package)), - not attr("external_conditions_hold", node(ID, Package), _), - imposed_constraint(EID, "external_conditions_hold", Package, _), - trigger_and_effect(Package, TID, EID), - condition_requirement(TID, Name, A1, A2, A3, A4), - not attr(Name, node(_, A1), A2, A3, A4). - % error message with causes for conflicts error(0, Msg, startcauses, TriggerID, ID1, ConstraintID, ID2) :- attr("node", node(ID, Package)), @@ -194,8 +156,7 @@ error(0, Msg, startcauses, TriggerID, ID1, ConstraintID, ID2) condition_holds(ConstraintID, node(ID2, Package)), unification_set(X, node(ID2, Package)), unification_set(X, node(ID1, TriggerPackage)), - not external(node(ID, Package)), % ignore conflicts for externals - not attr("hash", node(ID, Package), _). % ignore conflicts for installed packages + build(node(ID, Package)). % ignore conflicts for concrete packages % variables to show #show error/2. @@ -233,6 +194,7 @@ error(0, Msg, startcauses, TriggerID, ID1, ConstraintID, ID2) #defined imposed_constraint/5. #defined imposed_constraint/6. #defined condition_cause/4. +#defined condition_nodes/2. #defined condition_requirement/3. #defined condition_requirement/4. #defined condition_requirement/5. @@ -244,5 +206,4 @@ error(0, Msg, startcauses, TriggerID, ID1, ConstraintID, ID2) #defined build/1. #defined node_has_variant/3. #defined provider/2. -#defined external_version/3. #defined variant_single_value/2. diff --git a/lib/spack/spack/solver/heuristic.lp b/lib/spack/spack/solver/heuristic.lp index 38b116a30b6fff..1baed82b7091f6 100644 --- a/lib/spack/spack/solver/heuristic.lp +++ b/lib/spack/spack/solver/heuristic.lp @@ -6,46 +6,35 @@ % Heuristic to speed-up solves %============================================================================= -#heuristic node_compiler(ParentNode, CompilerNode). [1200, init] -#heuristic node_compiler(ParentNode, CompilerNode). [ 6, factor] -#heuristic node_compiler(ParentNode, CompilerNode). [ -1, sign] -#heuristic node_compiler(ParentNode, CompilerNode) : attr("depends_on", ParentNode, CompilerNode, "build"), provider_weight(CompilerNode, Language, 0), language(Language). [1@2, sign] +% Decide about DAG atoms, before trying to guess facts used only in +% the internal representation +#heuristic attr("virtual_node", node(X, Virtual)). [80, level] +#heuristic attr("node", PackageNode). [80, level] +#heuristic attr("version", node(PackageID, Package), Version). [80, level] +#heuristic attr("variant_value", PackageNode, Variant, Value). [80, level] +#heuristic attr("node_target", node(PackageID, Package), Target). [80, level] #heuristic attr("virtual_node", node(X, Virtual)). [600, init] #heuristic attr("virtual_node", node(X, Virtual)). [-1, sign] #heuristic attr("virtual_node", node(0, Virtual)) : node_depends_on_virtual(PackageNode, Virtual). [1@2, sign] #heuristic attr("virtual_node", node(0, "c")). [1@3, sign] #heuristic attr("virtual_node", node(0, "cxx")). [1@3, sign] - -#heuristic unification_set(SetID, Node). [400, init] -#heuristic unification_set(SetID, Node). [ 4, factor] -#heuristic unification_set(SetID, Node). [ -1, sign] -#heuristic unification_set("root", node(0, "libc")). [ 1@2, sign] +#heuristic attr("virtual_node", node(0, "libc")). [1@3, sign] #heuristic attr("node", PackageNode). [300, init] #heuristic attr("node", PackageNode). [ 4, factor] #heuristic attr("node", PackageNode). [ -1, sign] #heuristic attr("node", node(0, Dependency)) : attr("dependency_holds", ParentNode, Dependency, Type), not virtual(Dependency). [1@2, sign] -#heuristic attr("depends_on", ParentNode, ChildNode, Type). [100, init] -#heuristic attr("depends_on", ParentNode, ChildNode, Type). [4, factor] -#heuristic attr("depends_on", ParentNode, ChildNode, Type). [-1, sign] -#heuristic attr("depends_on", ParentNode, node(0, Dependency), Type) : attr("dependency_holds", ParentNode, Dependency, Type), not virtual(Dependency). [1@2, sign] -#heuristic attr("depends_on", ParentNode, ProviderNode , Type) : node_depends_on_virtual(ParentNode, Virtual, Type), provider(ProviderNode, node(VirtualID, Virtual)). [1@2, sign] - #heuristic attr("version", node(PackageID, Package), Version). [30, init] #heuristic attr("version", node(PackageID, Package), Version). [-1, sign] #heuristic attr("version", node(PackageID, Package), Version) : pkg_fact(Package, version_declared(Version, 0)), attr("node", node(PackageID, Package)). [ 1@2, sign] -#heuristic version_weight(node(PackageID, Package), Weight). [30, init] -#heuristic version_weight(node(PackageID, Package), Weight). [-1 , sign] -#heuristic version_weight(node(PackageID, Package), 0 ) : attr("node", node(PackageID, Package)). [ 1@2, sign] +% Use default targets +#heuristic attr("node_target", node(PackageID, Package), Target). [-1, sign] +#heuristic attr("node_target", node(PackageID, Package), Target) : target_weight(Target, 0), attr("node", node(PackageID, Package)). [1@2, sign] % Use default variants #heuristic attr("variant_value", PackageNode, Variant, Value). [30, init] #heuristic attr("variant_value", PackageNode, Variant, Value). [-1, sign] #heuristic attr("variant_value", PackageNode, Variant, Value) : variant_default_value(PackageNode, Variant, Value), attr("node", PackageNode). [1@2, sign] - -% Use default targets -#heuristic attr("node_target", node(PackageID, Package), Target). [-1, sign] -#heuristic attr("node_target", node(PackageID, Package), Target) : target_weight(Target, 0), attr("node", node(PackageID, Package)). [1@2, sign] diff --git a/lib/spack/spack/solver/input_analysis.py b/lib/spack/spack/solver/input_analysis.py index 5679f7d7f8b37d..647bb5eb820d67 100644 --- a/lib/spack/spack/solver/input_analysis.py +++ b/lib/spack/spack/solver/input_analysis.py @@ -8,6 +8,7 @@ import spack.vendor.archspec.cpu import spack.binary_distribution +import spack.concretize import spack.config import spack.deptypes as dt import spack.platforms @@ -258,9 +259,9 @@ def __init__( store: spack.store.Store, binary_index: spack.binary_distribution.BinaryCacheIndex, ): - super().__init__(configuration=configuration, repo=repo) self.store = store self.binary_index = binary_index + super().__init__(configuration=configuration, repo=repo) @lang.memoized def providers_for(self, virtual_str: str) -> List[spack.spec.Spec]: @@ -363,7 +364,10 @@ class Counter: """ def __init__( - self, specs: List["spack.spec.Spec"], tests: bool, possible_graph: PossibleDependencyGraph + self, + specs: List[spack.spec.Spec], + tests: spack.concretize.TestsType, + possible_graph: PossibleDependencyGraph, ) -> None: self.possible_graph = possible_graph self.specs = specs @@ -426,7 +430,10 @@ def possible_packages_facts(self, gen: "spack.solver.asp.ProblemInstanceBuilder" class MinimalDuplicatesCounter(NoDuplicatesCounter): def __init__( - self, specs: List["spack.spec.Spec"], tests: bool, possible_graph: PossibleDependencyGraph + self, + specs: List[spack.spec.Spec], + tests: spack.concretize.TestsType, + possible_graph: PossibleDependencyGraph, ) -> None: super().__init__(specs, tests, possible_graph) self._link_run: Set[str] = set() @@ -465,10 +472,9 @@ def possible_packages_facts(self, gen, fn): gen.h2("Packages with multiple possible nodes (build-tools)") default = spack.config.CONFIG.get("concretizer:duplicates:max_dupes:default", 2) + duplicates = spack.config.CONFIG.get("concretizer:duplicates:max_dupes", {}) for package_name in sorted(self.possible_dependencies() & build_tools): - max_dupes = spack.config.CONFIG.get( - f"concretizer:duplicates:max_dupes:{package_name}", default - ) + max_dupes = duplicates.get(package_name, default) gen.fact(fn.max_dupes(package_name, max_dupes)) if max_dupes > 1: gen.fact(fn.multiple_unification_sets(package_name)) @@ -481,9 +487,7 @@ def possible_packages_facts(self, gen, fn): gen.h2("Maximum number of nodes (other virtuals)") for package_name in sorted(self.possible_virtuals() - self._link_run_virtuals): - max_dupes = spack.config.CONFIG.get( - f"concretizer:duplicates:max_dupes:{package_name}", default - ) + max_dupes = duplicates.get(package_name, default) gen.fact(fn.max_dupes(package_name, max_dupes)) gen.newline() @@ -528,7 +532,9 @@ def possible_packages_facts(self, gen, fn): def create_counter( - specs: List[spack.spec.Spec], tests: bool, possible_graph: PossibleDependencyGraph + specs: List[spack.spec.Spec], + tests: spack.concretize.TestsType, + possible_graph: PossibleDependencyGraph, ) -> Counter: strategy = spack.config.CONFIG.get("concretizer:duplicates:strategy", "none") if strategy == "full": diff --git a/lib/spack/spack/solver/libc_compatibility.lp b/lib/spack/spack/solver/libc_compatibility.lp index fe6aefda9584cd..1b3b7fdd7cb530 100644 --- a/lib/spack/spack/solver/libc_compatibility.lp +++ b/lib/spack/spack/solver/libc_compatibility.lp @@ -29,7 +29,7 @@ error(100, "Cannot reuse {0} since we cannot determine libc compatibility", Reus not attr("compatible_libc", node(R, ReusedPackage), _, _). % The libc provider must be one that a compiler can target -:- has_built_packages(), +:- will_build_packages(), provider(node(X, LibcPackage), node(0, "libc")), attr("node", node(X, LibcPackage)), attr("version", node(X, LibcPackage), LibcVersion), diff --git a/lib/spack/spack/solver/os_compatibility.lp b/lib/spack/spack/solver/os_compatibility.lp index 952dbc3a86ad43..e9c808469d23ca 100644 --- a/lib/spack/spack/solver/os_compatibility.lp +++ b/lib/spack/spack/solver/os_compatibility.lp @@ -11,6 +11,7 @@ %============================================================================= % macOS +os_compatible("tahoe", "sequoia"). os_compatible("sequoia", "sonoma"). os_compatible("sonoma", "ventura"). os_compatible("ventura", "monterey"). diff --git a/lib/spack/spack/solver/requirements.py b/lib/spack/spack/solver/requirements.py index b8bb9660810925..8f0de29f939d51 100644 --- a/lib/spack/spack/solver/requirements.py +++ b/lib/spack/spack/solver/requirements.py @@ -2,13 +2,15 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import enum -from typing import List, NamedTuple, Optional, Sequence +from typing import List, NamedTuple, Optional, Sequence, Tuple import spack.config import spack.error import spack.package_base import spack.repo import spack.spec +import spack.traverse +from spack.enums import PropagationPolicy from spack.llnl.util import tty from spack.util.spack_yaml import get_mark_from_yaml_data @@ -24,17 +26,76 @@ class RequirementKind(enum.Enum): PACKAGE = enum.auto() +class RequirementOrigin(enum.Enum): + """Origin of a requirement""" + + REQUIRE_YAML = enum.auto() + PREFER_YAML = enum.auto() + CONFLICT_YAML = enum.auto() + DIRECTIVE = enum.auto() + INPUT_SPECS = enum.auto() + + class RequirementRule(NamedTuple): """Data class to collect information on a requirement""" pkg_name: str policy: str + origin: RequirementOrigin requirements: Sequence[spack.spec.Spec] condition: spack.spec.Spec kind: RequirementKind message: Optional[str] +def preference( + pkg_name: str, + constraint: spack.spec.Spec, + condition: spack.spec.Spec = spack.spec.Spec(), + origin: RequirementOrigin = RequirementOrigin.PREFER_YAML, + kind: RequirementKind = RequirementKind.PACKAGE, + message: Optional[str] = None, +) -> RequirementRule: + """Returns a preference rule""" + # A strong preference is defined as: + # + # require: + # - any_of: [spec_str, "@:"] + return RequirementRule( + pkg_name=pkg_name, + policy="any_of", + requirements=[constraint, spack.spec.Spec("@:")], + kind=kind, + condition=condition, + origin=origin, + message=message, + ) + + +def conflict( + pkg_name: str, + constraint: spack.spec.Spec, + condition: spack.spec.Spec = spack.spec.Spec(), + origin: RequirementOrigin = RequirementOrigin.CONFLICT_YAML, + kind: RequirementKind = RequirementKind.PACKAGE, + message: Optional[str] = None, +) -> RequirementRule: + """Returns a conflict rule""" + # A conflict is defined as: + # + # require: + # - one_of: [spec_str, "@:"] + return RequirementRule( + pkg_name=pkg_name, + policy="one_of", + requirements=[constraint, spack.spec.Spec("@:")], + kind=kind, + condition=condition, + origin=origin, + message=message, + ) + + class RequirementParser: """Parses requirements from package.py files and configuration, and returns rules.""" @@ -42,15 +103,36 @@ def __init__(self, configuration: spack.config.Configuration): self.config = configuration self.runtime_pkgs = spack.repo.PATH.packages_with_tags("runtime") self.compiler_pkgs = spack.repo.PATH.packages_with_tags("compiler") + self.preferences_from_input: List[Tuple[spack.spec.Spec, str]] = [] def rules(self, pkg: spack.package_base.PackageBase) -> List[RequirementRule]: result = [] + result.extend(self.rules_from_input_specs(pkg)) result.extend(self.rules_from_package_py(pkg)) result.extend(self.rules_from_require(pkg)) result.extend(self.rules_from_prefer(pkg)) result.extend(self.rules_from_conflict(pkg)) return result + def parse_rules_from_input_specs(self, specs: Sequence[spack.spec.Spec]): + self.preferences_from_input.clear() + for edge in spack.traverse.traverse_edges(specs, root=False): + if edge.propagation == PropagationPolicy.PREFERENCE: + for constraint in _split_edge_on_virtuals(edge): + root_name = edge.parent.name + self.preferences_from_input.append((constraint, root_name)) + + def rules_from_input_specs(self, pkg: spack.package_base.PackageBase) -> List[RequirementRule]: + return [ + preference( + pkg.name, + constraint=s, + condition=spack.spec.Spec(f"{root_name} ^[deptypes=link,run]{pkg.name}"), + origin=RequirementOrigin.INPUT_SPECS, + ) + for s, root_name in self.preferences_from_input + ] + def rules_from_package_py(self, pkg: spack.package_base.PackageBase) -> List[RequirementRule]: rules = [] for when_spec, requirement_list in pkg.requirements.items(): @@ -63,6 +145,7 @@ def rules_from_package_py(self, pkg: spack.package_base.PackageBase) -> List[Req kind=RequirementKind.PACKAGE, condition=when_spec, message=message, + origin=RequirementOrigin.DIRECTIVE, ) ) return rules @@ -92,20 +175,9 @@ def _rules_from_preferences( ) -> List[RequirementRule]: result = [] for item in preferences: - spec, condition, message = self._parse_prefer_conflict_item(item) + spec, condition, msg = self._parse_prefer_conflict_item(item) result.append( - # A strong preference is defined as: - # - # require: - # - any_of: [spec_str, "@:"] - RequirementRule( - pkg_name=pkg_name, - policy="any_of", - requirements=[spec, spack.spec.Spec("@:")], - kind=kind, - message=message, - condition=condition, - ) + preference(pkg_name, constraint=spec, condition=condition, kind=kind, message=msg) ) return result @@ -118,20 +190,9 @@ def _rules_from_conflicts( ) -> List[RequirementRule]: result = [] for item in conflicts: - spec, condition, message = self._parse_prefer_conflict_item(item) + spec, condition, msg = self._parse_prefer_conflict_item(item) result.append( - # A conflict is defined as: - # - # require: - # - one_of: [spec_str, "@:"] - RequirementRule( - pkg_name=pkg_name, - policy="one_of", - requirements=[spec, spack.spec.Spec("@:")], - kind=kind, - message=message, - condition=condition, - ) + conflict(pkg_name, constraint=spec, condition=condition, kind=kind, message=msg) ) return result @@ -148,7 +209,7 @@ def _parse_prefer_conflict_item(self, item): return spec, condition, message def _raw_yaml_data(self, pkg_name: str, *, section: str, virtual: bool = False): - config = self.config.get("packages") + config = self.config.get_config("packages") data = config.get(pkg_name, {}).get(section, []) kind = RequirementKind.PACKAGE @@ -209,6 +270,7 @@ def _rules_from_requirements( kind=kind, message=requirement.get("message"), condition=when, + origin=RequirementOrigin.REQUIRE_YAML, ) ) return rules @@ -239,13 +301,28 @@ def reject_requirement_constraint( except spack.error.SpackError as e: tty.debug( f"[{__name__}] Rejecting the default '{constraint}' requirement " - f"on '{pkg_name}': {str(e)}", - level=2, + f"on '{pkg_name}': {str(e)}" ) return True return False +def _split_edge_on_virtuals(edge: spack.spec.DependencySpec) -> List[spack.spec.Spec]: + """Split the edge on virtuals and removes the parent.""" + if not edge.virtuals: + return [spack.spec.Spec(str(edge.copy(keep_parent=False)))] + + result = [] + # We split on virtuals so that "%%c,cxx=gcc" enforces "%%c=gcc" and "%%cxx=gcc" separately + for v in edge.virtuals: + t = edge.copy(keep_parent=False, keep_virtuals=False) + t.update_virtuals(v) + t.when = spack.spec.Spec(f"%{v}") + result.append(spack.spec.Spec(str(t))) + + return result + + def parse_spec_from_yaml_string(string: str, *, named: bool = False) -> spack.spec.Spec: """Parse a spec from YAML and add file/line info to errors, if it's available. diff --git a/lib/spack/spack/solver/reuse.py b/lib/spack/spack/solver/reuse.py new file mode 100644 index 00000000000000..81c2274f0b0362 --- /dev/null +++ b/lib/spack/spack/solver/reuse.py @@ -0,0 +1,389 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +import enum +import functools +from typing import Any, Callable, List, Mapping + +import spack.binary_distribution +import spack.config +import spack.environment +import spack.llnl.path +import spack.repo +import spack.spec +import spack.store +import spack.traverse +from spack.externals import ( + ExternalSpecsParser, + complete_architecture, + complete_variants_and_architecture, + extract_dicts_from_configuration, +) + +from .runtimes import all_libcs + + +class SpecFilter: + """Given a method to produce a list of specs, this class can filter them according to + different criteria. + """ + + def __init__( + self, + factory: Callable[[], List[spack.spec.Spec]], + is_usable: Callable[[spack.spec.Spec], bool], + include: List[str], + exclude: List[str], + ) -> None: + """ + Args: + factory: factory to produce a list of specs + is_usable: predicate that takes a spec in input and returns False if the spec + should not be considered for this filter, True otherwise. + include: if present, a "good" spec must match at least one entry in the list + exclude: if present, a "good" spec must not match any entry in the list + """ + self.factory = factory + self.is_usable = is_usable + self.include = include + self.exclude = exclude + + def is_selected(self, s: spack.spec.Spec) -> bool: + if not self.is_usable(s): + return False + + if self.include and not any(s.satisfies(c) for c in self.include): + return False + + if self.exclude and any(s.satisfies(c) for c in self.exclude): + return False + + return True + + def selected_specs(self) -> List[spack.spec.Spec]: + return [s for s in self.factory() if self.is_selected(s)] + + @staticmethod + def from_store(configuration, *, packages_with_externals, include, exclude) -> "SpecFilter": + """Constructs a filter that takes the specs from the current store.""" + is_reusable = functools.partial( + _is_reusable, packages_with_externals=packages_with_externals, local=True + ) + factory = functools.partial(_specs_from_store, configuration=configuration) + return SpecFilter(factory=factory, is_usable=is_reusable, include=include, exclude=exclude) + + @staticmethod + def from_buildcache(*, packages_with_externals, include, exclude) -> "SpecFilter": + """Constructs a filter that takes the specs from the configured buildcaches.""" + is_reusable = functools.partial( + _is_reusable, packages_with_externals=packages_with_externals, local=False + ) + return SpecFilter( + factory=_specs_from_mirror, is_usable=is_reusable, include=include, exclude=exclude + ) + + @staticmethod + def from_environment(*, packages_with_externals, include, exclude, env) -> "SpecFilter": + is_reusable = functools.partial( + _is_reusable, packages_with_externals=packages_with_externals, local=True + ) + factory = functools.partial(_specs_from_environment, env=env) + return SpecFilter(factory=factory, is_usable=is_reusable, include=include, exclude=exclude) + + @staticmethod + def from_environment_included_concrete( + *, + packages_with_externals, + include: List[str], + exclude: List[str], + env: spack.environment.Environment, + included_concrete: str, + ) -> "SpecFilter": + is_reusable = functools.partial( + _is_reusable, packages_with_externals=packages_with_externals, local=True + ) + factory = functools.partial( + _specs_from_environment_included_concrete, env=env, included_concrete=included_concrete + ) + return SpecFilter(factory=factory, is_usable=is_reusable, include=include, exclude=exclude) + + @staticmethod + def from_packages_yaml( + *, external_parser: ExternalSpecsParser, packages_with_externals, include, exclude + ) -> "SpecFilter": + is_reusable = functools.partial( + _is_reusable, packages_with_externals=packages_with_externals, local=True + ) + return SpecFilter( + external_parser.all_specs, is_usable=is_reusable, include=include, exclude=exclude + ) + + +def _has_runtime_dependencies(spec: spack.spec.Spec) -> bool: + # TODO (compiler as nodes): this function contains specific names from builtin, and should + # be made more general + if "gcc" in spec and "gcc-runtime" not in spec: + return False + + if "intel-oneapi-compilers" in spec and "intel-oneapi-runtime" not in spec: + return False + + return True + + +def _is_reusable(spec: spack.spec.Spec, packages_with_externals, local: bool) -> bool: + """A spec is reusable if it's not a dev spec, it's imported from the cray manifest, it's not + external, or it's external with matching packages.yaml entry. The latter prevents two issues: + + 1. Externals in build caches: avoid installing an external on the build machine not + available on the target machine + 2. Local externals: avoid reusing an external if the local config changes. This helps in + particular when a user removes an external from packages.yaml, and expects that that + takes effect immediately. + + Arguments: + spec: the spec to check + packages_with_externals: the pre-processed packages configuration + """ + if "dev_path" in spec.variants: + return False + + if spec.name == "compiler-wrapper": + return False + + if not spec.external: + return _has_runtime_dependencies(spec) + + # Cray external manifest externals are always reusable + if local: + _, record = spack.store.STORE.db.query_by_spec_hash(spec.dag_hash()) + if record and record.origin == "external-db": + return True + + try: + provided = spack.repo.PATH.get(spec).provided_virtual_names() + except spack.repo.RepoError: + provided = [] + + for name in {spec.name, *provided}: + for entry in packages_with_externals.get(name, {}).get("externals", []): + expected_prefix = entry.get("prefix") + if expected_prefix is not None: + expected_prefix = spack.llnl.path.path_to_os_path(expected_prefix)[0] + if ( + spec.satisfies(entry["spec"]) + and spec.external_path == expected_prefix + and spec.external_modules == entry.get("modules") + ): + return True + + return False + + +def _specs_from_store(configuration): + store = spack.store.create(configuration) + with store.db.read_transaction(): + return store.db.query(installed=True) + + +def _specs_from_mirror(): + try: + return spack.binary_distribution.update_cache_and_get_specs() + except (spack.binary_distribution.FetchCacheError, IndexError): + # this is raised when no mirrors had indices. + # TODO: update mirror configuration so it can indicate that the + # TODO: source cache (or any mirror really) doesn't have binaries. + return [] + + +def _specs_from_environment(env): + """Return all concrete specs from the environment. This includes all included concrete""" + if env: + return list(spack.traverse.traverse_nodes([s for _, s in env.concretized_specs()])) + else: + return [] + + +def _specs_from_environment_included_concrete(env, included_concrete): + """Return only concrete specs from the environment included from the included_concrete""" + if env: + assert included_concrete in env.included_concrete_envs + return [concrete for concrete in env.included_specs_by_hash[included_concrete].values()] + else: + return [] + + +class ReuseStrategy(enum.Enum): + ROOTS = enum.auto() + DEPENDENCIES = enum.auto() + NONE = enum.auto() + + +def create_external_parser( + packages_with_externals: Any, completion_mode: str +) -> ExternalSpecsParser: + """Get externals from a pre-processed packages.yaml (with implicit externals).""" + external_dicts = extract_dicts_from_configuration(packages_with_externals) + if completion_mode == "default_variants": + complete_fn = complete_variants_and_architecture + elif completion_mode == "architecture_only": + complete_fn = complete_architecture + else: + raise ValueError( + f"Unknown value for concretizer:externals:completion: {completion_mode!r}" + ) + return ExternalSpecsParser(external_dicts, complete_node=complete_fn) + + +class ReusableSpecsSelector: + """Selects specs that can be reused during concretization.""" + + def __init__( + self, + configuration: spack.config.Configuration, + external_parser: ExternalSpecsParser, + packages_with_externals: Any, + ) -> None: + self.configuration = configuration + self.store = spack.store.create(configuration) + self.reuse_strategy = ReuseStrategy.ROOTS + + reuse_yaml = self.configuration.get("concretizer:reuse", False) + self.reuse_sources = [] + if not isinstance(reuse_yaml, Mapping): + self.reuse_sources.append( + SpecFilter.from_packages_yaml( + external_parser=external_parser, + packages_with_externals=packages_with_externals, + include=[], + exclude=[], + ) + ) + if reuse_yaml is False: + self.reuse_strategy = ReuseStrategy.NONE + return + + if reuse_yaml == "dependencies": + self.reuse_strategy = ReuseStrategy.DEPENDENCIES + self.reuse_sources.extend( + [ + SpecFilter.from_store( + configuration=self.configuration, + packages_with_externals=packages_with_externals, + include=[], + exclude=[], + ), + SpecFilter.from_buildcache( + packages_with_externals=packages_with_externals, include=[], exclude=[] + ), + SpecFilter.from_environment( + packages_with_externals=packages_with_externals, + include=[], + exclude=[], + env=spack.environment.active_environment(), # with all concrete includes + ), + ] + ) + else: + has_external_source = False + roots = reuse_yaml.get("roots", True) + if roots is True: + self.reuse_strategy = ReuseStrategy.ROOTS + else: + self.reuse_strategy = ReuseStrategy.DEPENDENCIES + default_include = reuse_yaml.get("include", []) + default_exclude = reuse_yaml.get("exclude", []) + default_sources = [{"type": "local"}, {"type": "buildcache"}] + for source in reuse_yaml.get("from", default_sources): + include = source.get("include", default_include) + exclude = source.get("exclude", default_exclude) + if source["type"] == "environment" and "path" in source: + env_dir = spack.environment.as_env_dir(source["path"]) + active_env = spack.environment.active_environment() + if active_env and env_dir in active_env.included_concrete_envs: + # If the environment is included as a concrete environment, use the + # local copy of specs in the active environment. + # note: included concrete environments are only updated at concretization + # time, and reuse needs to match the included specs. + self.reuse_sources.append( + SpecFilter.from_environment_included_concrete( + packages_with_externals=packages_with_externals, + include=include, + exclude=exclude, + env=active_env, + included_concrete=env_dir, + ) + ) + else: + # If the environment is not included as a concrete environment, use the + # current specs from its lockfile. + self.reuse_sources.append( + SpecFilter.from_environment( + packages_with_externals=packages_with_externals, + include=include, + exclude=exclude, + env=spack.environment.environment_from_name_or_dir(env_dir), + ) + ) + elif source["type"] == "environment": + # reusing from the current environment implicitly reuses from all of the + # included concrete environments + self.reuse_sources.append( + SpecFilter.from_environment( + packages_with_externals=packages_with_externals, + include=include, + exclude=exclude, + env=spack.environment.active_environment(), + ) + ) + elif source["type"] == "local": + self.reuse_sources.append( + SpecFilter.from_store( + self.configuration, + packages_with_externals=packages_with_externals, + include=include, + exclude=exclude, + ) + ) + elif source["type"] == "buildcache": + self.reuse_sources.append( + SpecFilter.from_buildcache( + packages_with_externals=packages_with_externals, + include=include, + exclude=exclude, + ) + ) + elif source["type"] == "external": + has_external_source = True + if include: + # Since libcs are implicit externals, we need to implicitly include them + include = include + sorted(all_libcs()) # type: ignore[type-var] + self.reuse_sources.append( + SpecFilter.from_packages_yaml( + external_parser=external_parser, + packages_with_externals=packages_with_externals, + include=include, + exclude=exclude, + ) + ) + + # If "external" is not specified, we assume that all externals have to be included + if not has_external_source: + self.reuse_sources.append( + SpecFilter.from_packages_yaml( + external_parser=external_parser, + packages_with_externals=packages_with_externals, + include=[], + exclude=[], + ) + ) + + def reusable_specs(self, specs: List[spack.spec.Spec]) -> List[spack.spec.Spec]: + result = [] + for reuse_source in self.reuse_sources: + result.extend(reuse_source.selected_specs()) + # If we only want to reuse dependencies, remove the root specs + if self.reuse_strategy == ReuseStrategy.DEPENDENCIES: + result = [spec for spec in result if not any(root in spec for root in specs)] + + return result diff --git a/lib/spack/spack/solver/runtimes.py b/lib/spack/spack/solver/runtimes.py new file mode 100644 index 00000000000000..db54085c350faa --- /dev/null +++ b/lib/spack/spack/solver/runtimes.py @@ -0,0 +1,322 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +import itertools +from typing import Any, Dict, Set, Tuple + +import spack.compilers.config +import spack.compilers.libraries +import spack.config +import spack.repo +import spack.spec +import spack.util.libc +import spack.version + +from .core import SourceContext, fn, using_libc_compatibility +from .versions import Provenance + + +class RuntimePropertyRecorder: + """An object of this class is injected in callbacks to compilers, to let them declare + properties of the runtimes they support and of the runtimes they provide, and to add + runtime dependencies to the nodes using said compiler. + + The usage of the object is the following. First, a runtime package name or the wildcard + "*" are passed as an argument to __call__, to set which kind of package we are referring to. + Then we can call one method with a directive-like API. + + Examples: + >>> pkg = RuntimePropertyRecorder(setup) + >>> # Every package compiled with %gcc has a link dependency on 'gcc-runtime' + >>> pkg("*").depends_on( + ... "gcc-runtime", + ... when="%gcc", + ... type="link", + ... description="If any package uses %gcc, it depends on gcc-runtime" + ... ) + >>> # The version of gcc-runtime is the same as the %gcc used to "compile" it + >>> pkg("gcc-runtime").requires("@=9.4.0", when="%gcc@=9.4.0") + """ + + def __init__(self, setup): + self._setup = setup + self.rules = [] + self.runtime_conditions = set() + self.injected_dependencies = set() + # State of this object set in the __call__ method, and reset after + # each directive-like method + self.current_package = None + + def __call__(self, package_name: str) -> "RuntimePropertyRecorder": + """Sets a package name for the next directive-like method call""" + assert self.current_package is None, f"state was already set to '{self.current_package}'" + self.current_package = package_name + return self + + def reset(self): + """Resets the current state.""" + self.current_package = None + + def depends_on(self, dependency_str: str, *, when: str, type: str, description: str) -> None: + """Injects conditional dependencies on packages. + + Conditional dependencies can be either "real" packages or virtual dependencies. + + Args: + dependency_str: the dependency spec to inject + when: anonymous condition to be met on a package to have the dependency + type: dependency type + description: human-readable description of the rule for adding the dependency + """ + # TODO: The API for this function is not final, and is still subject to change. At + # TODO: the moment, we implemented only the features strictly needed for the + # TODO: functionality currently provided by Spack, and we assert nothing else is required. + msg = "the 'depends_on' method can be called only with pkg('*')" + assert self.current_package == "*", msg + + when_spec = spack.spec.Spec(when) + assert not when_spec.name, "only anonymous when specs are accepted" + + dependency_spec = spack.spec.Spec(dependency_str) + if dependency_spec.versions != spack.version.any_version: + self._setup.version_constraints.add((dependency_spec.name, dependency_spec.versions)) + + self.injected_dependencies.add(dependency_spec) + body_str, node_variable = self.rule_body_from(when_spec) + + head_clauses = self._setup.spec_clauses(dependency_spec, body=False) + runtime_pkg = dependency_spec.name + is_virtual = head_clauses[0].args[0] == "virtual_node" + main_rule = ( + f"% {description}\n" + f'1 {{ attr("depends_on", {node_variable}, node(0..X-1, "{runtime_pkg}"), "{type}") :' + f' max_dupes("{runtime_pkg}", X)}} 1:-\n' + f"{body_str}." + ) + if is_virtual: + main_rule = ( + f"% {description}\n" + f'attr("dependency_holds", {node_variable}, "{runtime_pkg}", "{type}") :-\n' + f"{body_str}." + ) + + self.rules.append(main_rule) + for clause in head_clauses: + if clause.args[0] == "node": + continue + runtime_node = f'node(RuntimeID, "{runtime_pkg}")' + head_str = str(clause).replace(f'"{runtime_pkg}"', runtime_node) + depends_on_constraint = ( + f' attr("depends_on", {node_variable}, {runtime_node}, "{type}"),\n' + ) + if is_virtual: + depends_on_constraint = ( + f' attr("depends_on", {node_variable}, ProviderNode, "{type}"),\n' + f" provider(ProviderNode, {runtime_node}),\n" + ) + + rule = f"{head_str} :-\n" f"{depends_on_constraint}" f"{body_str}." + self.rules.append(rule) + + self.reset() + + @staticmethod + def node_for(name: str) -> str: + return f'node(ID{name.replace("-", "_")}, "{name}")' + + def rule_body_from(self, when_spec: "spack.spec.Spec") -> Tuple[str, str]: + """Computes the rule body from a "when" spec, and returns it, along with the + node variable. + """ + + node_placeholder = "XXX" + node_variable = "node(ID, Package)" + when_substitutions = {} + for s in when_spec.traverse(root=False): + when_substitutions[f'"{s.name}"'] = self.node_for(s.name) + when_spec.name = node_placeholder + body_clauses = self._setup.spec_clauses(when_spec, body=True) + for clause in body_clauses: + if clause.args[0] == "virtual_on_incoming_edges": + # Substitute: attr("virtual_on_incoming_edges", ProviderNode, Virtual) + # with: attr("virtual_on_edge", ParentNode, ProviderNode, Virtual) + # (avoid adding virtuals everywhere, if a single edge needs it) + _, provider, virtual = clause.args + clause.args = "virtual_on_edge", node_placeholder, provider, virtual + + # Check for abstract hashes in the body + for s in when_spec.traverse(root=False): + if s.abstract_hash: + body_clauses.append(fn.attr("hash", s.name, s.abstract_hash)) + + body_str = ",\n".join(f" {x}" for x in body_clauses) + body_str = body_str.replace(f'"{node_placeholder}"', f"{node_variable}") + for old, replacement in when_substitutions.items(): + body_str = body_str.replace(old, replacement) + return body_str, node_variable + + def requires(self, impose: str, *, when: str): + """Injects conditional requirements on a given package. + + Args: + impose: constraint to be imposed + when: condition triggering the constraint + """ + msg = "the 'requires' method cannot be called with pkg('*') or without setting the package" + assert self.current_package is not None and self.current_package != "*", msg + + imposed_spec = spack.spec.Spec(f"{self.current_package}{impose}") + when_spec = spack.spec.Spec(f"{self.current_package}{when}") + + assert imposed_spec.versions.concrete, f"{impose} must have a concrete version" + + # Add versions to possible versions + for s in (imposed_spec, when_spec): + if not s.versions.concrete: + continue + self._setup.possible_versions[s.name][s.version].append(Provenance.RUNTIME) + + self.runtime_conditions.add((imposed_spec, when_spec)) + self.reset() + + def propagate(self, constraint_str: str, *, when: str): + msg = "the 'propagate' method can be called only with pkg('*')" + assert self.current_package == "*", msg + + when_spec = spack.spec.Spec(when) + assert not when_spec.name, "only anonymous when specs are accepted" + + when_substitutions = {} + for s in when_spec.traverse(root=False): + when_substitutions[f'"{s.name}"'] = self.node_for(s.name) + + body_str, node_variable = self.rule_body_from(when_spec) + constraint_spec = spack.spec.Spec(constraint_str) + + constraint_clauses = self._setup.spec_clauses(constraint_spec, body=False) + for clause in constraint_clauses: + if clause.args[0] == "node_version_satisfies": + self._setup.version_constraints.add( + (constraint_spec.name, constraint_spec.versions) + ) + args = f'"{constraint_spec.name}", "{constraint_spec.versions}"' + head_str = f"propagate({node_variable}, node_version_satisfies({args}))" + rule = f"{head_str} :-\n{body_str}." + self.rules.append(rule) + + self.reset() + + def default_flags(self, spec: "spack.spec.Spec"): + if not spec.external or "flags" not in spec.extra_attributes: + self.reset() + return + + when_spec = spack.spec.Spec(f"%[deptypes=build] {spec}") + body_str, node_variable = self.rule_body_from(when_spec) + + node_placeholder = "XXX" + flags = spec.extra_attributes["flags"] + root_spec_str = f"{node_placeholder}" + for flag_type, default_values in flags.items(): + root_spec_str = f"{root_spec_str} {flag_type}='{default_values}'" + root_spec = spack.spec.Spec(root_spec_str) + head_clauses = self._setup.spec_clauses( + root_spec, body=False, context=SourceContext(source="compiler") + ) + self.rules.append(f"% Default compiler flags for {spec}\n") + for clause in head_clauses: + if clause.args[0] == "node": + continue + head_str = str(clause).replace(f'"{node_placeholder}"', f"{node_variable}") + rule = f"{head_str} :-\n{body_str}." + self.rules.append(rule) + + self.reset() + + def consume_facts(self): + """Consume the facts collected by this object, and emits rules and + facts for the runtimes. + """ + self._setup.gen.h2("Runtimes: declarations") + runtime_pkgs = sorted( + {x.name for x in self.injected_dependencies if not spack.repo.PATH.is_virtual(x.name)} + ) + for runtime_pkg in runtime_pkgs: + self._setup.gen.fact(fn.runtime(runtime_pkg)) + self._setup.gen.newline() + + self._setup.gen.h2("Runtimes: rules") + self._setup.gen.newline() + for rule in self.rules: + self._setup.gen.append(rule) + self._setup.gen.newline() + + self._setup.gen.h2("Runtimes: requirements") + for imposed_spec, when_spec in sorted(self.runtime_conditions): + msg = f"{when_spec} requires {imposed_spec} at runtime" + _ = self._setup.condition(when_spec, imposed_spec=imposed_spec, msg=msg) + + self._setup.trigger_rules() + self._setup.effect_rules() + + +def _normalize_packages_yaml(packages_yaml: Dict[str, Any]) -> None: + for pkg_name in list(packages_yaml.keys()): + is_virtual = spack.repo.PATH.is_virtual(pkg_name) + if pkg_name == "all" or not is_virtual: + continue + + # Remove the virtual entry from the normalized configuration + data = packages_yaml.pop(pkg_name) + is_buildable = data.get("buildable", True) + if not is_buildable: + for provider in spack.repo.PATH.providers_for(pkg_name): + entry = packages_yaml.setdefault(provider.name, {}) + entry["buildable"] = False + + externals = data.get("externals", []) + + def keyfn(x): + return spack.spec.Spec(x["spec"]).name + + for provider, specs in itertools.groupby(externals, key=keyfn): + entry = packages_yaml.setdefault(provider, {}) + entry.setdefault("externals", []).extend(specs) + + +def external_config_with_implicit_externals( + configuration: spack.config.Configuration, +) -> Dict[str, Any]: + # Read packages.yaml and normalize it so that it will not contain entries referring to + # virtual packages. + packages_yaml = configuration.deepcopy_as_builtin("packages", line_info=True) + _normalize_packages_yaml(packages_yaml) + + # Add externals for libc from compilers on Linux + if not using_libc_compatibility(): + return packages_yaml + + seen = set() + for compiler in spack.compilers.config.all_compilers_from(configuration): + libc = spack.compilers.libraries.CompilerPropertyDetector(compiler).default_libc() + if libc and libc not in seen: + seen.add(libc) + entry = {"spec": f"{libc}", "prefix": libc.external_path} + packages_yaml.setdefault(libc.name, {}).setdefault("externals", []).append(entry) + return packages_yaml + + +def all_libcs() -> Set[spack.spec.Spec]: + """Return a set of all libc specs targeted by any configured compiler. If none, fall back to + libc determined from the current Python process if dynamically linked.""" + libcs = set() + for c in spack.compilers.config.all_compilers_from(spack.config.CONFIG): + candidate = spack.compilers.libraries.CompilerPropertyDetector(c).default_libc() + if candidate is not None: + libcs.add(candidate) + + if libcs: + return libcs + + libc = spack.util.libc.libc_from_current_python_process() + return {libc} if libc else set() diff --git a/lib/spack/spack/solver/splicing.py b/lib/spack/spack/solver/splicing.py index b29bca1b6eca62..060cca57336d47 100644 --- a/lib/spack/spack/solver/splicing.py +++ b/lib/spack/spack/solver/splicing.py @@ -14,7 +14,7 @@ class Splice(NamedTuple): splice_spec: Spec #: The name of the child that splice spec is replacing child_name: str - #: The hash of the child that `splice_spec` is replacing + #: The hash of the child that ``splice_spec`` is replacing child_hash: str diff --git a/lib/spack/spack/solver/version_order.py b/lib/spack/spack/solver/version_order.py deleted file mode 100644 index e3f56a8f256de6..00000000000000 --- a/lib/spack/spack/solver/version_order.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright Spack Project Developers. See COPYRIGHT file for details. -# -# SPDX-License-Identifier: (Apache-2.0 OR MIT) -from typing import Tuple, Union - -from spack.version import GitVersion, StandardVersion - - -def concretization_version_order(version_info: Tuple[Union[GitVersion, StandardVersion], dict]): - """Version order key for concretization, where preferred > not preferred, - not deprecated > deprecated, finite > any infinite component; only if all are - the same, do we use default version ordering.""" - version, info = version_info - return ( - info.get("preferred", False), - not info.get("deprecated", False), - not version.isdevelop(), - not version.is_prerelease(), - version, - ) diff --git a/lib/spack/spack/solver/versions.py b/lib/spack/spack/solver/versions.py new file mode 100644 index 00000000000000..acac1ce32995a7 --- /dev/null +++ b/lib/spack/spack/solver/versions.py @@ -0,0 +1,32 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +import enum + + +class Provenance(enum.IntEnum): + """Enumeration of the possible provenances of a version.""" + + # A spec literal + SPEC = enum.auto() + # A dev spec literal + DEV_SPEC = enum.auto() + # The 'packages' section of the configuration + PACKAGES_YAML = enum.auto() + # A git version in the 'packages' section of the configuration + PACKAGES_YAML_GIT_VERSION = enum.auto() + # A package requirement + PACKAGE_REQUIREMENT = enum.auto() + # A 'package.py' file + PACKAGE_PY = enum.auto() + # An installed spec + INSTALLED = enum.auto() + # lower provenance for installed git refs so concretizer prefers StandardVersion installs + INSTALLED_GIT_VERSION = enum.auto() + # Synthetic versions for virtual packages + VIRTUAL_CONSTRAINT = enum.auto() + # A runtime injected from another package (e.g. a compiler) + RUNTIME = enum.auto() + + def __str__(self): + return f"{self._name_.lower()}" diff --git a/lib/spack/spack/solver/when_possible.lp b/lib/spack/spack/solver/when_possible.lp index 7c50b6d430bf8e..8f2c376b9f71ac 100644 --- a/lib/spack/spack/solver/when_possible.lp +++ b/lib/spack/spack/solver/when_possible.lp @@ -27,7 +27,7 @@ do_not_impose(EffectID, node(X, Package)) :- subcondition(SubconditionID, ParentConditionID), pkg_fact(Package, condition_effect(SubconditionID, EffectID)), trigger_and_effect(_, TriggerID, EffectID), - trigger_node(TriggerID, _, node(X, Package)). + trigger_requestor_node(TriggerID, node(X, Package)). opt_criterion(320, "number of input specs not concretized"). #minimize{ 0@320: #true }. diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index b713f91a32e92d..26929412d2d30a 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -14,38 +14,37 @@ $ spack install mpileaks ^openmpi @1.2:1.4 +debug %intel @12.1 target=zen 0 1 2 3 4 5 6 -The first part of this is the command, 'spack install'. The rest of the +The first part of this is the command, ``spack install``. The rest of the line is a spec for a particular installation of the mpileaks package. 0. The package to install -1. A dependency of the package, prefixed by ^ +1. A dependency of the package, prefixed by ``^`` 2. A version descriptor for the package. This can either be a specific - version, like "1.2", or it can be a range of versions, e.g. "1.2:1.4". + version, like ``1.2``, or it can be a range of versions, e.g. ``1.2:1.4``. If multiple specific versions or multiple ranges are acceptable, they can be separated by commas, e.g. if a package will only build with - versions 1.0, 1.2-1.4, and 1.6-1.8 of mvapich, you could say: + versions 1.0, 1.2-1.4, and 1.6-1.8 of mvapich, you could say:: depends_on("mvapich@1.0,1.2:1.4,1.6:1.8") 3. A compile-time variant of the package. If you need openmpi to be built in debug mode for your package to work, you can require it by - adding +debug to the openmpi spec when you depend on it. If you do - NOT want the debug option to be enabled, then replace this with -debug. + adding ``+debug`` to the openmpi spec when you depend on it. If you do + NOT want the debug option to be enabled, then replace this with ``-debug``. If you would like for the variant to be propagated through all your - package's dependencies use "++" for enabling and "--" or "~~" for disabling. + package's dependencies use ``++`` for enabling and ``--`` or ``~~`` for disabling. 4. The name of the compiler to build with. 5. The versions of the compiler to build with. Note that the identifier - for a compiler version is the same '@' that is used for a package version. - A version list denoted by '@' is associated with the compiler only if + for a compiler version is the same ``@`` that is used for a package version. + A version list denoted by ``@`` is associated with the compiler only if if it comes immediately after the compiler name. Otherwise it will be associated with the current package spec. -6. The architecture to build with. This is needed on machines where - cross-compilation is required +6. The architecture to build with. """ import collections import collections.abc @@ -89,12 +88,12 @@ import spack.llnl.util.lang as lang import spack.llnl.util.tty as tty import spack.llnl.util.tty.color as clr +import spack.patch import spack.paths import spack.platforms import spack.provider_index import spack.repo import spack.spec_parser -import spack.store import spack.traverse import spack.util.hash import spack.util.prefix @@ -104,7 +103,7 @@ import spack.version as vn import spack.version.git_ref_lookup -from .enums import InstallRecordStatus +from .enums import InstallRecordStatus, PropagationPolicy __all__ = [ "CompilerSpec", @@ -160,18 +159,23 @@ ARCHITECTURE_COLOR = "@m" #: color for highlighting architectures VARIANT_COLOR = "@B" #: color for highlighting variants HASH_COLOR = "@K" #: color for highlighting package hashes +HIGHLIGHT_COLOR = "@_R" #: color for highlighting spec parts on demand #: Default format for Spec.format(). This format can be round-tripped, so that: #: Spec(Spec("string").format()) == Spec("string)" DEFAULT_FORMAT = ( "{name}{@versions}{compiler_flags}" - "{variants}{ namespace=namespace_if_anonymous}{ arch=architecture}{/abstract_hash}" + "{variants}{ namespace=namespace_if_anonymous}" + "{ platform=architecture.platform}{ os=architecture.os}{ target=architecture.target}" + "{/abstract_hash}" ) #: Display format, which eliminates extra `@=` in the output, for readability. DISPLAY_FORMAT = ( "{name}{@version}{compiler_flags}" - "{variants}{ namespace=namespace_if_anonymous}{ arch=architecture}{/abstract_hash}" + "{variants}{ namespace=namespace_if_anonymous}" + "{ platform=architecture.platform}{ os=architecture.os}{ target=architecture.target}" + "{/abstract_hash}" "{compilers}" ) @@ -228,6 +232,8 @@ def _make_microarchitecture(name: str) -> spack.vendor.archspec.cpu.Microarchite class ArchSpec: """Aggregate the target platform, the operating system and the target microarchitecture.""" + ANY_TARGET = _make_microarchitecture("*") + @staticmethod def default_arch(): """Return the default architecture""" @@ -401,6 +407,11 @@ def satisfies(self, other: "ArchSpec") -> bool: for attribute in ("platform", "os"): other_attribute = getattr(other, attribute) self_attribute = getattr(self, attribute) + + # platform=* or os=* + if self_attribute and other_attribute == "*": + return True + if other_attribute and self_attribute != other_attribute: return False @@ -440,6 +451,10 @@ def _target_satisfies(self, other: "ArchSpec", strict: bool) -> bool: if self.target is None: return False + # self.target is not None, and other is target=* + if other.target == ArchSpec.ANY_TARGET: + return True + return bool(self._target_intersection(other)) def _target_constrain(self, other: "ArchSpec") -> bool: @@ -654,8 +669,8 @@ def versions(self): @property def display_str(self): - """Equivalent to {compiler.name}{@compiler.version} for Specs, without extra - @= for readability.""" + """Equivalent to ``{compiler.name}{@compiler.version}`` for Specs, without extra + ``@=`` for readability.""" if self.versions != vn.any_version: return self.spec.format("{name}{@version}") return self.spec.format("{name}") @@ -721,7 +736,7 @@ class DependencySpec: virtuals: virtual packages provided from child to parent node. """ - __slots__ = "parent", "spec", "depflag", "virtuals", "direct", "when" + __slots__ = "parent", "spec", "depflag", "virtuals", "direct", "when", "propagation" def __init__( self, @@ -731,13 +746,18 @@ def __init__( depflag: dt.DepFlag, virtuals: Tuple[str, ...], direct: bool = False, + propagation: PropagationPolicy = PropagationPolicy.NONE, when: Optional["Spec"] = None, ): + if direct is False and propagation != PropagationPolicy.NONE: + raise InvalidEdgeError("only direct dependencies can be propagated") + self.parent = parent self.spec = spec self.depflag = depflag self.virtuals = tuple(sorted(set(virtuals))) self.direct = direct + self.propagation = propagation self.when = when or Spec() def update_deptypes(self, depflag: dt.DepFlag) -> bool: @@ -761,13 +781,16 @@ def update_virtuals(self, virtuals: Union[str, Iterable[str]]) -> bool: self.virtuals = tuple(sorted(union)) return True - def copy(self) -> "DependencySpec": + def copy(self, *, keep_virtuals: bool = True, keep_parent: bool = True) -> "DependencySpec": """Return a copy of this edge""" + parent = self.parent if keep_parent else Spec() + virtuals = self.virtuals if keep_virtuals else () return DependencySpec( - self.parent, + parent, self.spec, depflag=self.depflag, - virtuals=self.virtuals, + virtuals=virtuals, + propagation=self.propagation, direct=self.direct, when=self.when, ) @@ -778,15 +801,25 @@ def _cmp_iter(self): yield self.depflag yield self.virtuals yield self.direct + yield self.propagation yield self.when def __str__(self) -> str: - parent = self.parent.name if self.parent else None - child = self.spec.name if self.spec else None - virtuals_string = f"virtuals={','.join(self.virtuals)}" if self.virtuals else "" - when_string = f"when='{self.when}'" if self.when != Spec() else "" - edge_attrs = filter(lambda x: bool(x), (virtuals_string, when_string)) - return f"{parent} {self.depflag}[{' '.join(edge_attrs)}] --> {child}" + return self.format() + + def __repr__(self) -> str: + keywords = [f"depflag={self.depflag}", f"virtuals={self.virtuals}"] + if self.direct: + keywords.append(f"direct={self.direct}") + + if self.when != Spec(): + keywords.append(f"when={self.when}") + + if self.propagation != PropagationPolicy.NONE: + keywords.append(f"propagation={self.propagation}") + + keywords_str = ", ".join(keywords) + return f"DependencySpec({self.parent.format()!r}, {self.spec.format()!r}, {keywords_str})" def format(self, *, unconditional: bool = False) -> str: """Returns a string, using the spec syntax, representing this edge @@ -795,8 +828,7 @@ def format(self, *, unconditional: bool = False) -> str: unconditional: if True, removes any condition statement from the representation """ - parent = self.parent.name if self.parent.name else "" - child = self.spec if self.spec else "" + parent_str, child_str = self.parent.format(), self.spec.format() virtuals_str = f"virtuals={','.join(self.virtuals)}" if self.virtuals else "" when_str = "" @@ -804,14 +836,17 @@ def format(self, *, unconditional: bool = False) -> str: when_str = f"when='{self.when}'" dep_sigil = "%" if self.direct else "^" - edge_attrs = filter(lambda x: bool(x), (virtuals_str, when_str)) + if self.propagation == PropagationPolicy.PREFERENCE: + dep_sigil = "%%" + + edge_attrs = [x for x in (virtuals_str, when_str) if x] if edge_attrs: - return f"{parent} {dep_sigil}[{' '.join(edge_attrs)}] {child}" - return f"{parent} {dep_sigil}{child}" + return f"{parent_str} {dep_sigil}[{' '.join(edge_attrs)}] {child_str}" + return f"{parent_str} {dep_sigil}{child_str}" def flip(self) -> "DependencySpec": - """Flip the dependency, and drop virtual and conditional information""" + """Flips the dependency and keeps its type. Drops all othe information.""" return DependencySpec( parent=self.spec, spec=self.parent, depflag=self.depflag, virtuals=() ) @@ -834,6 +869,10 @@ class CompilerFlag(str): for "-g" would indicate ``depends_on``. """ + propagate: bool + flag_group: str + source: str + def __new__(cls, value, **kwargs): obj = str.__new__(cls, value) obj.propagate = kwargs.pop("propagate", False) @@ -992,98 +1031,69 @@ def _sort_by_dep_types(dspec: DependencySpec): return dspec.depflag -class _EdgeMap(collections.abc.Mapping): - """Represent a collection of edges (DependencySpec objects) in the DAG. - - Objects of this class are used in Specs to track edges that are - outgoing towards direct dependencies, or edges that are incoming - from direct dependents. - - Edges are stored in a dictionary and keyed by package name. - """ - - __slots__ = "edges", "store_by_child" - - def __init__(self, store_by_child: bool = True) -> None: - self.edges: Dict[str, List[DependencySpec]] = {} - self.store_by_child = store_by_child - - def __getitem__(self, key: str) -> List[DependencySpec]: - return self.edges[key] - - def __iter__(self): - return iter(self.edges) +EdgeMap = Dict[str, List[DependencySpec]] - def __len__(self) -> int: - return len(self.edges) - def add(self, edge: DependencySpec) -> None: - key = edge.spec.name if self.store_by_child else edge.parent.name - if key in self.edges: - lst = self.edges[key] - lst.append(edge) - lst.sort(key=_sort_by_dep_types) - else: - self.edges[key] = [edge] - - def __str__(self) -> str: - return f"{{deps: {', '.join(str(d) for d in sorted(self.values()))}}}" +def _add_edge_to_map(edge_map: EdgeMap, key: str, edge: DependencySpec) -> None: + if key in edge_map: + lst = edge_map[key] + lst.append(edge) + lst.sort(key=_sort_by_dep_types) + else: + edge_map[key] = [edge] - def select( - self, - *, - parent: Optional[str] = None, - child: Optional[str] = None, - depflag: dt.DepFlag = dt.ALL, - virtuals: Optional[Union[str, Sequence[str]]] = None, - ) -> List[DependencySpec]: - """Selects a list of edges and returns them. - If an edge: +def _select_edges( + edge_map: EdgeMap, + *, + parent: Optional[str] = None, + child: Optional[str] = None, + depflag: dt.DepFlag = dt.ALL, + virtuals: Optional[Union[str, Sequence[str]]] = None, +) -> List[DependencySpec]: + """Selects a list of edges and returns them. - - Has *any* of the dependency types passed as argument, - - Matches the parent and/or child name - - Provides *any* of the virtuals passed as argument + If an edge: - then it is selected. + - Has *any* of the dependency types passed as argument, + - Matches the parent and/or child name + - Provides *any* of the virtuals passed as argument - The deptypes argument needs to be a flag, since the method won't - convert it for performance reason. + then it is selected. - Args: - parent: name of the parent package - child: name of the child package - depflag: allowed dependency types in flag form - virtuals: list of virtuals or specific virtual on the edge - """ - if not depflag: - return [] + Args: + edge_map: map of edges to select from + parent: name of the parent package + child: name of the child package + depflag: allowed dependency types in flag form + virtuals: list of virtuals or specific virtual on the edge + """ + if not depflag: + return [] - # Start from all the edges we store - selected = (d for d in itertools.chain.from_iterable(self.values())) + # Start from all the edges we store + selected: Iterable[DependencySpec] = itertools.chain.from_iterable(edge_map.values()) - # Filter by parent name - if parent: - selected = (d for d in selected if d.parent.name == parent) + # Filter by parent name + if parent: + selected = (d for d in selected if d.parent.name == parent) - # Filter by child name - if child: - selected = (d for d in selected if d.spec.name == child) + # Filter by child name + if child: + selected = (d for d in selected if d.spec.name == child) - # Filter by allowed dependency types + # Filter by allowed dependency types + if depflag != dt.ALL: selected = (dep for dep in selected if not dep.depflag or (depflag & dep.depflag)) - # Filter by virtuals - if virtuals is not None: - if isinstance(virtuals, str): - selected = (dep for dep in selected if virtuals in dep.virtuals) - else: - selected = (dep for dep in selected if any(v in dep.virtuals for v in virtuals)) - - return list(selected) + # Filter by virtuals + if virtuals is not None: + if isinstance(virtuals, str): + selected = (dep for dep in selected if virtuals in dep.virtuals) + else: + selected = (dep for dep in selected if any(v in dep.virtuals for v in virtuals)) - def clear(self): - self.edges.clear() + return list(selected) def _headers_default_handler(spec: "Spec"): @@ -1189,10 +1199,10 @@ def __get__(self, instance: "SpecBuildInterface", cls): The order of call is: 1. if the query was through the name of a virtual package try to - search for the attribute `{virtual_name}_{attribute_name}` + search for the attribute ``{virtual_name}_{attribute_name}`` in Package - 2. try to search for attribute `{attribute_name}` in Package + 2. try to search for attribute ``{attribute_name}`` in Package 3. try to call the default handler @@ -1342,6 +1352,8 @@ def tree( status_fn: Optional[Callable[["Spec"], InstallStatus]] = None, prefix: Optional[Callable[["Spec"], str]] = None, key: Callable[["Spec"], Any] = id, + highlight_version_fn: Optional[Callable[["Spec"], bool]] = None, + highlight_variant_fn: Optional[Callable[["Spec", str], bool]] = None, ) -> str: """Prints out specs and their dependencies, tree-formatted with indentation. @@ -1353,7 +1365,7 @@ def tree( depth: print the depth from the root hashes: if True, print the hash of each node hashlen: length of the hash to be printed - cover: either "nodes" or "edges" + cover: either ``"nodes"`` or ``"edges"`` indent: extra indentation for the tree being printed format: format to be used to print each node deptypes: dependency types to be represented in the tree @@ -1364,6 +1376,10 @@ def tree( installation status prefix: optional callable that takes a node as an argument and return its installation prefix + highlight_version_fn: optional callable that returns true on nodes where the version + needs to be highlighted + highlight_variant_fn: optional callable that returns true on variants that need + to be highlighted """ out = "" @@ -1419,7 +1435,15 @@ def tree( out += " " * d if d > 0: out += "^" - out += node.format(format, color=color) + "\n" + out += ( + node.format( + format, + color=color, + highlight_version_fn=highlight_version_fn, + highlight_variant_fn=highlight_variant_fn, + ) + + "\n" + ) # Check if we wanted just the first line if not recurse_dependencies: @@ -1448,7 +1472,7 @@ def __repr__(self) -> str: return result -def _anonymous_star(dep, dep_format): +def _anonymous_star(dep: DependencySpec, dep_format: str) -> str: """Determine if a spec needs a star to disambiguate it from an anonymous spec w/variants. Returns: @@ -1473,7 +1497,7 @@ def _anonymous_star(dep, dep_format): # booleans come first, and they don't need a star. key-value pairs do. If there are # no key value pairs, we're left with either an empty spec, which needs * as in # '^*', or we're left with arch, which is a key value pair, and needs a star. - if not any(v.type == spack.variant.VariantType.BOOL for v in dep.spec.variants.values()): + if not any(v.type == vt.VariantType.BOOL for v in dep.spec.variants.values()): return "*" return "*" if dep.spec.architecture else "" @@ -1508,13 +1532,13 @@ def __init__(self, spec_like=None, *, external_path=None, external_modules=None) return # init an empty spec that matches anything. - self.name = None - self.versions = vn.VersionList(":") + self.name: str = "" + self.versions = vn.VersionList.any() self.variants = VariantMap(self) self.architecture = None self.compiler_flags = FlagMap(self) - self._dependents = _EdgeMap(store_by_child=False) - self._dependencies = _EdgeMap(store_by_child=True) + self._dependents = {} + self._dependencies = {} self.namespace = None self.abstract_hash = None @@ -1540,7 +1564,7 @@ def __init__(self, spec_like=None, *, external_path=None, external_modules=None) self.external_modules = Spec._format_module_list(external_modules) # This attribute is used to store custom information for external specs. - self.extra_attributes: dict = {} + self.extra_attributes: Dict[str, Any] = {} # This attribute holds the original build copy of the spec if it is # deployed differently than it was built. None signals that the spec @@ -1588,7 +1612,7 @@ def external(self): @property def is_develop(self): """Return whether the Spec represents a user-developed package - in a Spack ``Environment`` (i.e. using `spack develop`). + in a Spack Environment (i.e. using ``spack develop``). """ return bool(self.variants.get("dev_path", False)) @@ -1613,12 +1637,12 @@ def detach(self, deptype="all"): for dep in self.dependencies(deptype=deptype): # Remove the spec from dependents if self.name in dep._dependents: - dependents_copy = dep._dependents.edges[self.name] - del dep._dependents.edges[self.name] + dependents_copy = dep._dependents[self.name] + del dep._dependents[self.name] for edge in dependents_copy: if edge.parent.dag_hash() == key: continue - dep._dependents.add(edge) + _add_edge_to_map(dep._dependents, edge.parent.name, edge) def _get_dependency(self, name): # WARNING: This function is an implementation detail of the @@ -1635,7 +1659,7 @@ def _get_dependency(self, name): def edges_from_dependents( self, - name=None, + name: Optional[str] = None, depflag: dt.DepFlag = dt.ALL, *, virtuals: Optional[Union[str, Sequence[str]]] = None, @@ -1644,17 +1668,15 @@ def edges_from_dependents( to parents. Args: - name (str): filter dependents by package name + name: filter dependents by package name depflag: allowed dependency types virtuals: allowed virtuals """ - return [ - d for d in self._dependents.select(parent=name, depflag=depflag, virtuals=virtuals) - ] + return _select_edges(self._dependents, parent=name, depflag=depflag, virtuals=virtuals) def edges_to_dependencies( self, - name=None, + name: Optional[str] = None, depflag: dt.DepFlag = dt.ALL, *, virtuals: Optional[Union[str, Sequence[str]]] = None, @@ -1666,9 +1688,7 @@ def edges_to_dependencies( depflag: allowed dependency types virtuals: allowed virtuals """ - return [ - d for d in self._dependencies.select(child=name, depflag=depflag, virtuals=virtuals) - ] + return _select_edges(self._dependencies, child=name, depflag=depflag, virtuals=virtuals) @property def edge_attributes(self) -> str: @@ -1702,7 +1722,7 @@ def edge_attributes(self) -> str: def dependencies( self, - name=None, + name: Optional[str] = None, deptype: Union[dt.DepTypes, dt.DepFlag] = dt.ALL, *, virtuals: Optional[Union[str, Sequence[str]]] = None, @@ -1721,12 +1741,12 @@ def dependencies( ] def dependents( - self, name=None, deptype: Union[dt.DepTypes, dt.DepFlag] = dt.ALL + self, name: Optional[str] = None, deptype: Union[dt.DepTypes, dt.DepFlag] = dt.ALL ) -> List["Spec"]: """Return a list of direct dependents (nodes in the DAG). Args: - name (str): filter dependents by package name + name: filter dependents by package name deptype: allowed dependency types """ if not isinstance(deptype, dt.DepFlag): @@ -1744,7 +1764,7 @@ def _dependencies_dict(self, depflag: dt.DepFlag = dt.ALL): """ _sort_fn = lambda x: (x.spec.name, _sort_by_dep_types(x)) _group_fn = lambda x: x.spec.name - selected_edges = self._dependencies.select(depflag=depflag) + selected_edges = _select_edges(self._dependencies, depflag=depflag) result = {} for key, group in itertools.groupby(sorted(selected_edges, key=_sort_fn), key=_group_fn): result[key] = list(group) @@ -1810,6 +1830,7 @@ def _add_dependency( depflag: dt.DepFlag, virtuals: Tuple[str, ...], direct: bool = False, + propagation: PropagationPolicy = PropagationPolicy.NONE, when: Optional["Spec"] = None, ): """Called by the parser to add another spec as a dependency. @@ -1818,6 +1839,7 @@ def _add_dependency( depflag: dependency type for this edge virtuals: virtuals on this edge direct: if True denotes a direct dependency (associated with the % sigil) + propagation: propagation policy for this edge when: optional condition under which dependency holds """ if when is None: @@ -1825,7 +1847,12 @@ def _add_dependency( if spec.name not in self._dependencies or not spec.name: self.add_dependency_edge( - spec, depflag=depflag, virtuals=virtuals, direct=direct, when=when + spec, + depflag=depflag, + virtuals=virtuals, + direct=direct, + when=when, + propagation=propagation, ) return @@ -1873,22 +1900,24 @@ def add_dependency_edge( depflag: dt.DepFlag, virtuals: Tuple[str, ...], direct: bool = False, + propagation: PropagationPolicy = PropagationPolicy.NONE, when: Optional["Spec"] = None, ): """Add a dependency edge to this spec. Args: dependency_spec: spec of the dependency - deptypes: dependency types for this edge + depflag: dependency type for this edge virtuals: virtuals provided by this edge direct: if True denotes a direct dependency + propagation: propagation policy for this edge when: if non-None, condition under which dependency holds """ if when is None: when = Spec() # Check if we need to update edges that are already present - selected = self._dependencies.select(child=dependency_spec.name) + selected = self._dependencies.get(dependency_spec.name, []) for edge in selected: has_errors, details = False, [] msg = f"cannot update the edge from {edge.parent.name} to {edge.spec.name}" @@ -1928,10 +1957,16 @@ def add_dependency_edge( return edge = DependencySpec( - self, dependency_spec, depflag=depflag, virtuals=virtuals, direct=direct, when=when + self, + dependency_spec, + depflag=depflag, + virtuals=virtuals, + direct=direct, + propagation=propagation, + when=when, ) - self._dependencies.add(edge) - dependency_spec._dependents.add(edge) + _add_edge_to_map(self._dependencies, edge.spec.name, edge) + _add_edge_to_map(dependency_spec._dependents, edge.parent.name, edge) # # Public interface @@ -1973,7 +2008,7 @@ def concrete(self): """A spec is concrete if it describes a single build of a package. More formally, a spec is concrete if concretize() has been called - on it and it has been marked `_concrete`. + on it and it has been marked ``_concrete``. Concrete specs either can be or have been built. All constraints have been resolved, optional dependencies have been added or @@ -2002,7 +2037,9 @@ def installed(self): try: # If the spec is in the DB, check the installed # attribute of the record - return spack.store.STORE.db.get_record(self).installed + from spack.store import STORE + + return STORE.db.get_record(self).installed except KeyError: # If the spec is not in the DB, the method # above raises a Key error @@ -2018,7 +2055,9 @@ def installed_upstream(self): if not self.concrete: return False - upstream, record = spack.store.STORE.db.query_by_spec_hash(self.dag_hash()) + from spack.store import STORE + + upstream, record = STORE.db.query_by_spec_hash(self.dag_hash()) return upstream and record and record.installed @overload @@ -2127,153 +2166,30 @@ def traverse_edges( visited=visited, ) - def _format_edge_attributes(self, dep: DependencySpec, deptypes=True, virtuals=True): - deptypes_str = ( - f"deptypes={','.join(dt.flag_to_tuple(dep.depflag))}" - if deptypes and dep.depflag - else "" - ) - when_str = f"when='{(dep.when)}'" if dep.when != Spec() else "" - virtuals_str = f"virtuals={','.join(dep.virtuals)}" if virtuals and dep.virtuals else "" - - attrs = " ".join(s for s in (when_str, deptypes_str, virtuals_str) if s) - if attrs: - attrs = f"[{attrs}] " - - return attrs - - def _format_dependencies( - self, - format_string: str = DEFAULT_FORMAT, - include: Optional[Callable[[DependencySpec], bool]] = None, - deptypes=True, - _force_direct=False, - ): - """Helper for formatting dependencies on specs. - - Arguments: - format_string: format string to use for each dependency - include: predicate to select which dependencies to include - deptypes: whether to format deptypes - _force_direct: if True, print all dependencies as direct dependencies - (to be removed when we have this metadata on concrete edges) - """ - include = include or (lambda dep: True) - parts = [] - if self.concrete: - direct = self.edges_to_dependencies() - transitive: List[DependencySpec] = [] - else: - direct, transitive = lang.stable_partition( - self.edges_to_dependencies(), predicate_fn=lambda x: x.direct - ) - - # helper for direct and transitive loops below - def format_edge(edge, sigil, dep_spec=None): - dep_spec = dep_spec or edge.spec - dep_format = dep_spec.format(format_string) - - edge_attributes = ( - self._format_edge_attributes(edge, deptypes=deptypes, virtuals=False) - if edge.depflag or edge.when != Spec() - else "" - ) - virtuals = f"{','.join(edge.virtuals)}=" if edge.virtuals else "" - star = _anonymous_star(edge, dep_format) - - return f"{sigil}{edge_attributes}{star}{virtuals}{dep_format}" - - # direct dependencies - for edge in sorted(direct, key=lambda x: x.spec.name): - if not include(edge): - continue - - # replace legacy compiler names - old_name = edge.spec.name - new_name = spack.aliases.BUILTIN_TO_LEGACY_COMPILER.get(old_name) - try: - # this is ugly but copies can be expensive - if new_name: - edge.spec.name = new_name - parts.append(format_edge(edge, "%", edge.spec)) - finally: - edge.spec.name = old_name - - if self.concrete: - # Concrete specs should go no further, as the complexity - # below is O(paths) - return " ".join(parts).strip() - - # transitive dependencies (with any direct dependencies) - for edge in sorted(transitive, key=lambda x: x.spec.name): - if not include(edge): - continue - sigil = "%" if _force_direct else "^" # hack til direct deps represented better - parts.append(format_edge(edge, sigil, edge.spec)) - - # also recursively add any direct dependencies of transitive dependencies - if edge.spec._dependencies: - parts.append( - edge.spec._format_dependencies( - format_string=format_string, - include=include, - deptypes=deptypes, - _force_direct=_force_direct, - ) - ) - - return " ".join(parts).strip() - - @property - def compilers(self): - # TODO: get rid of the space here and make formatting smarter - return " " + self._format_dependencies( - "{name}{@version}", - include=lambda dep: any(lang in dep.virtuals for lang in ("c", "cxx", "fortran")), - deptypes=False, - _force_direct=True, - ) - - @property - def long_spec(self): - """Returns a string of the spec with the dependencies completely enumerated.""" - if self.concrete: - return self.tree(format=DISPLAY_FORMAT) - return f"{self.format()} {self._format_dependencies()}".strip() - - @property - def short_spec(self): - """Returns a version of the spec with the dependencies hashed - instead of completely enumerated.""" - return self.format("{name}{@version}{variants}{ arch=architecture}{/hash:7}") - - @property - def cshort_spec(self): - """Returns an auto-colorized version of ``self.short_spec``.""" - return self.cformat("{name}{@version}{variants}{ arch=architecture}{/hash:7}") - @property def prefix(self) -> spack.util.prefix.Prefix: if not self._concrete: raise spack.error.SpecError(f"Spec is not concrete: {self}") if self._prefix is None: - _, record = spack.store.STORE.db.query_by_spec_hash(self.dag_hash()) + from spack.store import STORE + + _, record = STORE.db.query_by_spec_hash(self.dag_hash()) if record and record.path: self.set_prefix(record.path) else: - self.set_prefix(spack.store.STORE.layout.path_for_spec(self)) + self.set_prefix(STORE.layout.path_for_spec(self)) assert self._prefix is not None return self._prefix def set_prefix(self, value: str) -> None: self._prefix = spack.util.prefix.Prefix(spack.llnl.path.convert_to_platform_path(value)) - def spec_hash(self, hash): + def spec_hash(self, hash: ht.SpecHashDescriptor) -> str: """Utility method for computing different types of Spec hashes. Arguments: - hash (spack.hash_types.SpecHashDescriptor): type of hash to generate. + hash: type of hash to generate. """ # TODO: currently we strip build dependencies by default. Rethink # this when we move to using package hashing on all specs. @@ -2290,7 +2206,9 @@ def spec_hash(self, hash): return out[:-7] + self.build_spec.spec_hash(hash)[-7:] return out - def _cached_hash(self, hash, length=None, force=False): + def _cached_hash( + self, hash: ht.SpecHashDescriptor, length: Optional[int] = None, force: bool = False + ) -> str: """Helper function for storing a cached hash on the spec. This will run spec_hash() with the deptype and package_hash @@ -2298,22 +2216,19 @@ def _cached_hash(self, hash, length=None, force=False): in the supplied attribute on this spec. Arguments: - hash (spack.hash_types.SpecHashDescriptor): type of hash to generate. - length (int): length of hash prefix to return (default is full hash string) - force (bool): cache the hash even if spec is not concrete (default False) + hash: type of hash to generate. + length: length of hash prefix to return (default is full hash string) + force: cache the hash even if spec is not concrete (default False) """ - if not hash.attr: - return self.spec_hash(hash)[:length] - hash_string = getattr(self, hash.attr, None) if hash_string: return hash_string[:length] - else: - hash_string = self.spec_hash(hash) - if force or self.concrete: - setattr(self, hash.attr, hash_string) - return hash_string[:length] + hash_string = self.spec_hash(hash) + if force or self.concrete: + setattr(self, hash.attr, hash_string) + + return hash_string[:length] def package_hash(self): """Compute the hash of the contents of the package for this node""" @@ -2342,16 +2257,17 @@ def dag_hash_bit_prefix(self, bits): def _lookup_hash(self): """Lookup just one spec with an abstract hash, returning a spec from the the environment, store, or finally, binary caches.""" - import spack.binary_distribution - import spack.environment + from spack.binary_distribution import BinaryCacheQuery + from spack.environment import active_environment + from spack.store import STORE - active_env = spack.environment.active_environment() + active_env = active_environment() # First env, then store, then binary cache matches = ( (active_env.all_matching_specs(self) if active_env else []) - or spack.store.STORE.db.query(self, installed=InstallRecordStatus.ANY) - or spack.binary_distribution.BinaryCacheQuery(True)(self) + or STORE.db.query(self, installed=InstallRecordStatus.ANY) + or BinaryCacheQuery(True)(self) ) if not matches: @@ -2405,55 +2321,60 @@ def replace_hash(self): self._dup(self.lookup_hash()) - def to_node_dict(self, hash=ht.dag_hash): + def to_node_dict(self, hash: ht.SpecHashDescriptor = ht.dag_hash) -> Dict[str, Any]: """Create a dictionary representing the state of this Spec. - ``to_node_dict`` creates the content that is eventually hashed by - Spack to create identifiers like the DAG hash (see - ``dag_hash()``). Example result of ``to_node_dict`` for the + This method creates the content that is eventually hashed by Spack to create identifiers + like the DAG hash (see :meth:`dag_hash()`). Example result of this function for the ``sqlite`` package:: { - 'sqlite': { - 'version': '3.28.0', - 'arch': { - 'platform': 'darwin', - 'platform_os': 'mojave', - 'target': 'x86_64', - }, - 'namespace': 'builtin', - 'parameters': { - 'fts': 'true', - 'functions': 'false', - 'cflags': [], - 'cppflags': [], - 'cxxflags': [], - 'fflags': [], - 'ldflags': [], - 'ldlibs': [], + "name": "sqlite", + "version": "3.46.0", + "arch": {"platform": "linux", "platform_os": "ubuntu24.04", "target": "x86_64_v3"}, + "namespace": "builtin", + "parameters": { + "build_system": "autotools", + "column_metadata": True, + "dynamic_extensions": True, + "fts": True, + "functions": False, + "rtree": True, + "cflags": [], + "cppflags": [], + "cxxflags": [], + "fflags": [], + "ldflags": [], + "ldlibs": [], + }, + "package_hash": "umcghjlve5347o3q2odo7vfcso2zhxdzmfdba23nkdhe5jntlhia====", + "dependencies": [ + { + "name": "compiler-wrapper", + "hash": "c5bxlim3zge4snwrwtd6rzuvq2unek6s", + "parameters": {"deptypes": ("build",), "virtuals": ()}, }, - 'dependencies': { - 'readline': { - 'hash': 'zvaa4lhlhilypw5quj3akyd3apbq5gap', - 'type': ['build', 'link'], - } + { + "name": "gcc", + "hash": "6dzveld2rtt2dkhklxfnery5wbtb5uus", + "parameters": {"deptypes": ("build",), "virtuals": ("c",)}, }, - } + ... + ], + "annotations": {"original_specfile_version": 5}, } - Note that the dictionary returned does *not* include the hash of - the *root* of the spec, though it does include hashes for each - dependency, and (optionally) the package file corresponding to - each node. - See ``to_dict()`` for a "complete" spec hash, with hashes for - each node and nodes for each dependency (instead of just their - hashes). + Note that the dictionary returned does *not* include the hash of the *root* of the spec, + though it does include hashes for each dependency and its own package hash. + + See :meth:`to_dict()` for a "complete" spec hash, with hashes for each node and nodes for + each dependency (instead of just their hashes). Arguments: - hash (spack.hash_types.SpecHashDescriptor) type of hash to generate. + hash: type of hash to generate. """ - d = {"name": self.name} + d: Dict[str, Any] = {"name": self.name} if self.versions: d.update(self.versions.to_dict()) @@ -2464,7 +2385,7 @@ def to_node_dict(self, hash=ht.dag_hash): if self.namespace: d["namespace"] = self.namespace - params = dict(sorted(v.yaml_entry() for v in self.variants.values())) + params: Dict[str, Any] = dict(sorted(v.yaml_entry() for v in self.variants.values())) # Only need the string compiler flag for yaml file params.update( @@ -2557,87 +2478,66 @@ def to_node_dict(self, hash=ht.dag_hash): return d - def to_dict(self, hash=ht.dag_hash): + def to_dict(self, hash: ht.SpecHashDescriptor = ht.dag_hash) -> Dict[str, Any]: """Create a dictionary suitable for writing this spec to YAML or JSON. - This dictionaries like the one that is ultimately written to a - ``spec.json`` file in each Spack installation directory. For - example, for sqlite:: + This dictionary is like the one that is ultimately written to a ``spec.json`` file in each + Spack installation directory. For example, for sqlite:: { - "spec": { - "_meta": { - "version": 2 - }, - "nodes": [ - { - "name": "sqlite", - "version": "3.34.0", - "arch": { - "platform": "darwin", - "platform_os": "catalina", - "target": "x86_64" - }, - "compiler": { - "name": "apple-clang", - "version": "11.0.0" - }, - "namespace": "builtin", - "parameters": { - "column_metadata": true, - "fts": true, - "functions": false, - "rtree": false, - "cflags": [], - "cppflags": [], - "cxxflags": [], - "fflags": [], - "ldflags": [], - "ldlibs": [] - }, - "dependencies": [ - { - "name": "readline", - "hash": "4f47cggum7p4qmp3xna4hi547o66unva", - "type": [ - "build", - "link" - ] - }, - { - "name": "zlib", - "hash": "uvgh6p7rhll4kexqnr47bvqxb3t33jtq", - "type": [ - "build", - "link" - ] - } + "spec": { + "_meta": {"version": 5}, + "nodes": [ + { + "name": "sqlite", + "version": "3.46.0", + "arch": { + "platform": "linux", + "platform_os": "ubuntu24.04", + "target": "x86_64_v3" + }, + "namespace": "builtin", + "parameters": { + "build_system": "autotools", + "column_metadata": True, + "dynamic_extensions": True, + "fts": True, + "functions": False, + "rtree": True, + "cflags": [], + "cppflags": [], + "cxxflags": [], + "fflags": [], + "ldflags": [], + "ldlibs": [], + }, + "package_hash": "umcghjlve5347o...xdzmfdba23nkdhe5jntlhia====", + "dependencies": [ + { + "name": "compiler-wrapper", + "hash": "c5bxlim3zge4snwrwtd6rzuvq2unek6s", + "parameters": {"deptypes": ("build",), "virtuals": ()}, + }, + { + "name": "gcc", + "hash": "6dzveld2rtt2dkhklxfnery5wbtb5uus", + "parameters": {"deptypes": ("build",), "virtuals": ("c",)}, + }, + ... + ], + "annotations": {"original_specfile_version": 5}, + "hash": "a2ubvvqnula6zdppckwqrjf3zmsdzpoh", + }, + ... ], - "hash": "tve45xfqkfgmzwcyfetze2z6syrg7eaf", - }, - # ... more node dicts for readline and its dependencies ... - ] + } } - Note that this dictionary starts with the 'spec' key, and what - follows is a list starting with the root spec, followed by its - dependencies in preorder. Each node in the list also has a - 'hash' key that contains the hash of the node *without* the hash - field included. - - In the example, the package content hash is not included in the - spec, but if ``package_hash`` were true there would be an - additional field on each node called ``package_hash``. - - ``from_dict()`` can be used to read back in a spec that has been - converted to a dictionary, serialized, and read back in. - - Arguments: - deptype (tuple or str): dependency types to include when - traversing the spec. - package_hash (bool): whether to include package content - hashes in the dictionary. + Note that this dictionary starts with the ``spec`` key, and what follows is a list starting + with the root spec, followed by its dependencies in preorder. + The method :meth:`from_dict()` can be used to read back in a spec that has been converted + to a dictionary, serialized, and read back in. """ node_list = [] # Using a list to preserve preorder traversal for hash. hash_set = set() @@ -2658,10 +2558,9 @@ def to_dict(self, hash=ht.dag_hash): return {"spec": {"_meta": {"version": SPECFILE_FORMAT_VERSION}, "nodes": node_list}} - def node_dict_with_hashes(self, hash=ht.dag_hash): - """Returns a node_dict of this spec with the dag hash added. If this - spec is concrete, the full hash is added as well. If 'build' is in - the hash_type, the build hash is also added.""" + def node_dict_with_hashes(self, hash: ht.SpecHashDescriptor = ht.dag_hash) -> Dict[str, Any]: + """Returns a node dict of this spec with the dag hash, and the provided hash (if not + the dag hash).""" node = self.to_node_dict(hash) # All specs have at least a DAG hash node[ht.dag_hash.name] = self.dag_hash() @@ -2718,7 +2617,7 @@ def override(init_spec, change_spec): return new_spec @staticmethod - def from_literal(spec_dict, normal=True): + def from_literal(spec_dict: dict, normal: bool = True) -> "Spec": """Builds a Spec from a dictionary containing the spec literal. The dictionary must have a single top level key, representing the root, @@ -2728,62 +2627,53 @@ def from_literal(spec_dict, normal=True): Spec and the dependency types. Args: - spec_dict (dict): the dictionary containing the spec literal - normal (bool): if True the same key appearing at different levels + spec_dict: the dictionary containing the spec literal + normal: if :data:`True` the same key appearing at different levels of the ``spec_dict`` will map to the same object in memory. Examples: - A simple spec ``foo`` with no dependencies: - - .. code-block:: python - - {'foo': None} + A simple spec ``foo`` with no dependencies:: - A spec ``foo`` with a ``(build, link)`` dependency ``bar``: + {"foo": None} - .. code-block:: python + A spec ``foo`` with a ``(build, link)`` dependency ``bar``:: - {'foo': - {'bar:build,link': None}} - - A spec with a diamond dependency and various build types: + {"foo": + {"bar:build,link": None} + } - .. code-block:: python + A spec with a diamond dependency and various build types:: - {'dt-diamond': { - 'dt-diamond-left:build,link': { - 'dt-diamond-bottom:build': None + {"dt-diamond": { + "dt-diamond-left:build,link": { + "dt-diamond-bottom:build": None }, - 'dt-diamond-right:build,link': { - 'dt-diamond-bottom:build,link,run': None + "dt-diamond-right:build,link": { + "dt-diamond-bottom:build,link,run": None } }} The same spec with a double copy of ``dt-diamond-bottom`` and - no diamond structure: + no diamond structure:: - .. code-block:: python - - {'dt-diamond': { - 'dt-diamond-left:build,link': { - 'dt-diamond-bottom:build': None + Spec.from_literal({"dt-diamond": { + "dt-diamond-left:build,link": { + "dt-diamond-bottom:build": None }, - 'dt-diamond-right:build,link': { - 'dt-diamond-bottom:build,link,run': None + "dt-diamond-right:build,link": { + "dt-diamond-bottom:build,link,run": None } }, normal=False} - Constructing a spec using a Spec object as key: - - .. code-block:: python + Constructing a spec using a Spec object as key:: - mpich = Spec('mpich') - libelf = Spec('libelf@1.8.11') + mpich = Spec("mpich") + libelf = Spec("libelf@1.8.11") expected_normalized = Spec.from_literal({ - 'mpileaks': { - 'callpath': { - 'dyninst': { - 'libdwarf': {libelf: None}, + "mpileaks": { + "callpath": { + "dyninst": { + "libdwarf": {libelf: None}, libelf: None }, mpich: None @@ -2976,19 +2866,18 @@ def _patches_assigned(self): return True @staticmethod - def ensure_no_deprecated(root): - """Raise if a deprecated spec is in the dag. - - Args: - root (Spec): root spec to be analyzed + def ensure_no_deprecated(root: "Spec") -> None: + """Raise if a deprecated spec is in the dag of the given root spec. Raises: spack.spec.SpecDeprecatedError: if any deprecated spec is found """ deprecated = [] - with spack.store.STORE.db.read_transaction(): + from spack.store import STORE + + with STORE.db.read_transaction(): for x in root.traverse(): - _, rec = spack.store.STORE.db.query_by_spec_hash(x.dag_hash()) + _, rec = STORE.db.query_by_spec_hash(x.dag_hash()) if rec and rec.deprecated_for: deprecated.append(rec) if deprecated: @@ -3006,6 +2895,8 @@ def _mark_root_concrete(self, value=True): return self._concrete = value self._validate_version() + for variant in self.variants.values(): + variant.concrete = True def _validate_version(self): # Specs that were concretized with just a git sha as version, without associated @@ -3044,23 +2935,23 @@ def _mark_concrete(self, value=True): def _finalize_concretization(self): """Assign hashes to this spec, and mark it concrete. - There are special semantics to consider for `package_hash`, because we can't + There are special semantics to consider for ``package_hash``, because we can't call it on *already* concrete specs, but we need to assign it *at concretization time* to just-concretized specs. So, the concretizer must assign the package hash *before* marking their specs concrete (so that we know which specs were already concrete before this latest concretization). - `dag_hash` is also tricky, since it cannot compute `package_hash()` lazily. - Because `package_hash` needs to be assigned *at concretization time*, - `to_node_dict()` can't just assume that it can compute `package_hash` itself - -- it needs to either see or not see a `_package_hash` attribute. + ``dag_hash`` is also tricky, since it cannot compute ``package_hash()`` lazily. + Because ``package_hash`` needs to be assigned *at concretization time*, + ``to_node_dict()`` can't just assume that it can compute ``package_hash`` itself + -- it needs to either see or not see a ``_package_hash`` attribute. - Rules of thumb for `package_hash`: - 1. Old-style concrete specs from *before* `dag_hash` included `package_hash` - will not have a `_package_hash` attribute at all. - 2. New-style concrete specs will have a `_package_hash` assigned at + Rules of thumb for ``package_hash``: + 1. Old-style concrete specs from *before* ``dag_hash`` included ``package_hash`` + will not have a ``_package_hash`` attribute at all. + 2. New-style concrete specs will have a ``_package_hash`` assigned at concretization time. - 3. Abstract specs will not have a `_package_hash` attribute at all. + 3. Abstract specs will not have a ``_package_hash`` attribute at all. """ for spec in self.traverse(): @@ -3123,11 +3014,8 @@ def validate_or_raise(self): substitute_abstract_variants(spec) @staticmethod - def ensure_valid_variants(spec): - """Ensures that the variant attached to a spec are valid. - - Args: - spec (Spec): spec to be analyzed + def ensure_valid_variants(spec: "Spec") -> None: + """Ensures that the variant attached to the given spec are valid. Raises: spack.variant.UnknownVariantError: on the first unknown variant found @@ -3150,28 +3038,59 @@ def ensure_valid_variants(spec): f"No such variant {not_existing} for spec: '{spec}'", list(not_existing) ) - def constrain(self, other, deps=True): - """Intersect self with other in-place. Return True if self changed, False otherwise. + def constrain(self, other, deps=True) -> bool: + """Constrains self with other, and returns True if self changed, False otherwise. Args: other: constraint to be added to self - deps: if False, constrain only the root node, otherwise constrain dependencies - as well. + deps: if False, constrain only the root node, otherwise constrain dependencies as well Raises: spack.error.UnsatisfiableSpecError: when self cannot be constrained """ + return self._constrain(other, deps=deps, resolve_virtuals=True) + + def _constrain_symbolically(self, other, deps=True) -> bool: + """Constrains self with other, and returns True if self changed, False otherwise. + + This function has no notion of virtuals, so it does not need a repository. + + Args: + other: constraint to be added to self + deps: if False, constrain only the root node, otherwise constrain dependencies as well + + Raises: + spack.error.UnsatisfiableSpecError: when self cannot be constrained + + Examples: + >>> from spack.spec import Spec, UnsatisfiableDependencySpecError + >>> s = Spec("hdf5 ^mpi@4") + >>> t = Spec("hdf5 ^mpi=openmpi") + >>> try: + ... s.constrain(t) + ... except UnsatisfiableDependencySpecError as e: + ... print(e) + ... + hdf5 ^mpi=openmpi does not satisfy hdf5 ^mpi@4 + >>> s._constrain_symbolically(t) + True + >>> s + hdf5 ^mpi@4 ^mpi=openmpi + """ + return self._constrain(other, deps=deps, resolve_virtuals=False) + + def _constrain(self, other, deps=True, *, resolve_virtuals: bool): # If we are trying to constrain a concrete spec, either the spec # already satisfies the constraint (and the method returns False) # or it raises an exception if self.concrete: - if self.satisfies(other): + if self._satisfies(other, resolve_virtuals=resolve_virtuals): return False else: raise spack.error.UnsatisfiableSpecError(self, other, "constrain a concrete spec") other = self._autospec(other) - if other.concrete and other.satisfies(self): + if other.concrete and other._satisfies(self, resolve_virtuals=resolve_virtuals): self._dup(other) return True @@ -3229,14 +3148,14 @@ def constrain(self, other, deps=True): changed = True if deps: - changed |= self._constrain_dependencies(other) + changed |= self._constrain_dependencies(other, resolve_virtuals=resolve_virtuals) if other.concrete and not self.concrete and other.satisfies(self): self._finalize_concretization() return changed - def _constrain_dependencies(self, other: "Spec") -> bool: + def _constrain_dependencies(self, other: "Spec", resolve_virtuals: bool = True) -> bool: """Apply constraints of other spec's dependencies to this spec.""" if not other._dependencies: return False @@ -3244,7 +3163,7 @@ def _constrain_dependencies(self, other: "Spec") -> bool: # TODO: might want more detail than this, e.g. specific deps # in violation. if this becomes a priority get rid of this # check and be more specific about what's wrong. - if not other._intersects_dependencies(self): + if not other._intersects_dependencies(self, resolve_virtuals=resolve_virtuals): raise UnsatisfiableDependencySpecError(other, self) if any(not d.name for d in other.traverse(root=False)): @@ -3259,34 +3178,24 @@ def _constrain_dependencies(self, other: "Spec") -> bool: existing[0].spec.constrain(edge.spec) existing[0].update_deptypes(edge.depflag) existing[0].update_virtuals(edge.virtuals) + existing[0].direct |= edge.direct else: self.add_dependency_edge( edge.spec, depflag=edge.depflag, virtuals=edge.virtuals, direct=edge.direct, + propagation=edge.propagation, when=edge.when, ) return self != reference_spec - def common_dependencies(self, other): - """Return names of dependencies that self and other have in common.""" - common = set(s.name for s in self.traverse(root=False)) - common.intersection_update(s.name for s in other.traverse(root=False)) - return common - def constrained(self, other, deps=True): """Return a constrained copy without modifying this spec.""" clone = self.copy(deps=deps) clone.constrain(other, deps) return clone - def direct_dep_difference(self, other): - """Returns dependencies in self that are not in other.""" - mine = set(dname for dname in self._dependencies) - mine.difference_update(dname for dname in other._dependencies) - return mine - def _autospec(self, spec_like): """ Used to convert arguments to specs. If spec_like is a spec, returns @@ -3308,16 +3217,21 @@ def intersects(self, other: Union[str, "Spec"], deps: bool = True) -> bool: other: spec to be checked for compatibility deps: if True check compatibility of dependency nodes too, if False only check root """ + return self._intersects(other=other, deps=deps, resolve_virtuals=True) + + def _intersects( + self, other: Union[str, "Spec"], deps: bool = True, resolve_virtuals: bool = True + ) -> bool: other = self._autospec(other) if other.concrete and self.concrete: return self.dag_hash() == other.dag_hash() elif self.concrete: - return self.satisfies(other) + return self._satisfies(other, resolve_virtuals=resolve_virtuals) elif other.concrete: - return other.satisfies(self) + return other._satisfies(self, resolve_virtuals=resolve_virtuals) # From here we know both self and other are not concrete self_hash = self.abstract_hash @@ -3332,6 +3246,9 @@ def intersects(self, other: Union[str, "Spec"], deps: bool = True) -> bool: # If the names are different, we need to consider virtuals if self.name != other.name and self.name and other.name: + if not resolve_virtuals: + return False + self_virtual = spack.repo.PATH.is_virtual(self.name) other_virtual = spack.repo.PATH.is_virtual(other.name) if self_virtual and other_virtual: @@ -3384,11 +3301,11 @@ def intersects(self, other: Union[str, "Spec"], deps: bool = True) -> bool: # If we need to descend into dependencies, do it, otherwise we're done. if deps: - return self._intersects_dependencies(other) + return self._intersects_dependencies(other, resolve_virtuals=resolve_virtuals) return True - def _intersects_dependencies(self, other): + def _intersects_dependencies(self, other, resolve_virtuals: bool = True): if not other._dependencies or not self._dependencies: # one spec *could* eventually satisfy the other return True @@ -3397,9 +3314,14 @@ def _intersects_dependencies(self, other): common_dependencies = {x.name for x in self.dependencies()} common_dependencies &= {x.name for x in other.dependencies()} for name in common_dependencies: - if not self[name].intersects(other[name], deps=True): + if not self[name]._intersects( + other[name], deps=True, resolve_virtuals=resolve_virtuals + ): return False + if not resolve_virtuals: + return True + # For virtual dependencies, we need to dig a little deeper. self_index = spack.provider_index.ProviderIndex( repository=spack.repo.PATH, specs=self.traverse(), restrict=True @@ -3434,7 +3356,20 @@ def satisfies(self, other: Union[str, "Spec"], deps: bool = True) -> bool: Args: other: spec to be satisfied - deps: if True descend to dependencies, otherwise only check root node + deps: if True, descend to dependencies, otherwise only check root node + """ + return self._satisfies(other=other, deps=deps, resolve_virtuals=True) + + def _satisfies( + self, other: Union[str, "Spec"], deps: bool = True, resolve_virtuals: bool = True + ) -> bool: + """Return True if all concrete specs matching self also match other, otherwise False. + + Args: + other: spec to be satisfied + deps: if True, descend to dependencies, otherwise only check root node + resolve_virtuals: if True, resolve virtuals in self and other. This requires a + repository to be available. """ other = self._autospec(other) @@ -3452,7 +3387,7 @@ def satisfies(self, other: Union[str, "Spec"], deps: bool = True) -> bool: return False # If the names are different, we need to consider virtuals - if self.name != other.name and self.name and other.name: + if self.name != other.name and self.name and other.name and resolve_virtuals: # A concrete provider can satisfy a virtual dependency. if not spack.repo.PATH.is_virtual(self.name) and spack.repo.PATH.is_virtual( other.name @@ -3514,21 +3449,20 @@ def satisfies(self, other: Union[str, "Spec"], deps: bool = True) -> bool: lhs_edges: Dict[str, Set[DependencySpec]] = collections.defaultdict(set) mock_nodes_from_old_specfiles = set() for rhs_edge in other.traverse_edges(root=False, cover="edges"): - # The condition cannot be applied in any case, skip the edge - test_root = rhs_edge.parent.name in (None, self.name) - if test_root and not self.intersects(rhs_edge.when): - continue - - if ( - not test_root - and rhs_edge.parent.name in self - and not self[rhs_edge.parent.name].intersects(rhs_edge.when) + # Check satisfaction of the dependency only if its when condition can apply + if not rhs_edge.parent.name or rhs_edge.parent.name == self.name: + test_spec = self + elif rhs_edge.parent.name in self: + test_spec = self[rhs_edge.parent.name] + else: + test_spec = None + if test_spec and not test_spec._intersects( + rhs_edge.when, resolve_virtuals=resolve_virtuals ): continue # If we are checking for ^mpi we need to verify if there is any edge - is_virtual_node = spack.repo.PATH.is_virtual(rhs_edge.spec.name) - if is_virtual_node: + if resolve_virtuals and spack.repo.PATH.is_virtual(rhs_edge.spec.name): # Don't mutate objects in memory that may be referred elsewhere rhs_edge = rhs_edge.copy() rhs_edge.update_virtuals(virtuals=(rhs_edge.spec.name,)) @@ -3543,14 +3477,17 @@ def satisfies(self, other: Union[str, "Spec"], deps: bool = True) -> bool: # # The same assumptions hold on Spec.constrain, and Spec.intersect current_node = self - if rhs_edge.parent.name is not None and rhs_edge.parent.name != rhs_edge.spec.name: + if rhs_edge.parent.name and rhs_edge.parent.name != rhs_edge.spec.name: try: current_node = self[rhs_edge.parent.name] except KeyError: return False if current_node.original_spec_format() < 5 or ( - current_node.original_spec_format() >= 5 and current_node.external + # If the current external node has dependencies, it has no annotations + current_node.original_spec_format() >= 5 + and current_node.external + and not current_node._dependencies ): compiler_spec = current_node.annotations.compiler_node_attribute if compiler_spec is None: @@ -3559,13 +3496,29 @@ def satisfies(self, other: Union[str, "Spec"], deps: bool = True) -> bool: mock_nodes_from_old_specfiles.add(compiler_spec) # This checks that the single node compiler spec satisfies the request # of a direct dependency. The check is not perfect, but based on heuristic. - if not compiler_spec.satisfies(rhs_edge.spec): + if not compiler_spec._satisfies( + rhs_edge.spec, resolve_virtuals=resolve_virtuals + ): return False else: - name = rhs_edge.spec.name if not is_virtual_node else None - candidate_edges = current_node.edges_to_dependencies( - name=name, virtuals=rhs_edge.virtuals or None + # If the branch is % or ^, check if we have a corresponding + # branch in the lhs + candidate_edges = [] + if resolve_virtuals and spack.repo.PATH.is_virtual(rhs_edge.spec.name): + candidate_edges = current_node.edges_to_dependencies( + name=rhs_edge.spec.name + ) + + name = ( + None + if resolve_virtuals and spack.repo.PATH.is_virtual(rhs_edge.spec.name) + else rhs_edge.spec.name + ) + candidate_edges.extend( + current_node.edges_to_dependencies( + name=name, virtuals=rhs_edge.virtuals or None + ) ) # Select at least the deptypes on the rhs_edge, and conditional edges that # constrain a bigger portion of the search space (so it's rhs.when <= lhs.when) @@ -3573,9 +3526,14 @@ def satisfies(self, other: Union[str, "Spec"], deps: bool = True) -> bool: lhs_edge.spec for lhs_edge in candidate_edges if ((lhs_edge.depflag & rhs_edge.depflag) ^ rhs_edge.depflag) == 0 - and rhs_edge.when.satisfies(lhs_edge.when) + and rhs_edge.when._satisfies( + lhs_edge.when, resolve_virtuals=resolve_virtuals + ) ] - if not candidates or not any(x.satisfies(rhs_edge.spec) for x in candidates): + if not candidates or not any( + x._satisfies(rhs_edge.spec, resolve_virtuals=resolve_virtuals) + for x in candidates + ): return False continue @@ -3602,10 +3560,10 @@ def satisfies(self, other: Union[str, "Spec"], deps: bool = True) -> bool: # We don't have edges to this dependency current_dependency_name = rhs_edge.spec.name - if current_dependency_name is not None and current_dependency_name not in lhs_edges: + if current_dependency_name and current_dependency_name not in lhs_edges: return False - if current_dependency_name is None: + if not current_dependency_name: # Here we have an anonymous spec e.g. ^ dev_path=* candidate_edges = list(itertools.chain(*lhs_edges.values())) @@ -3613,19 +3571,25 @@ def satisfies(self, other: Union[str, "Spec"], deps: bool = True) -> bool: candidate_edges = [ lhs_edge for lhs_edge in lhs_edges[current_dependency_name] - if rhs_edge.when.satisfies(lhs_edge.when) + if rhs_edge.when._satisfies(lhs_edge.when, resolve_virtuals=resolve_virtuals) ] if not candidate_edges: return False for virtual in rhs_edge.virtuals: - has_virtual = any(virtual in edge.virtuals for edge in candidate_edges) + # Check the name because ^mpi has the "mpi" virtual + has_virtual = any( + virtual in edge.virtuals or virtual == edge.spec.name + for edge in candidate_edges + ) if not has_virtual: return False for lhs_edge in candidate_edges: - if lhs_edge.spec.satisfies(rhs_edge.spec, deps=False): + if lhs_edge.spec._satisfies( + rhs_edge.spec, deps=False, resolve_virtuals=resolve_virtuals + ): break else: return False @@ -3663,7 +3627,13 @@ def patches(self): return self._patches - def _dup(self, other: "Spec", deps: Union[bool, dt.DepTypes, dt.DepFlag] = True) -> bool: + def _dup( + self, + other: "Spec", + deps: Union[bool, dt.DepTypes, dt.DepFlag] = True, + *, + propagation: Optional[PropagationPolicy] = None, + ) -> bool: """Copies "other" into self, by overwriting all attributes. Args: @@ -3701,8 +3671,8 @@ def _dup(self, other: "Spec", deps: Union[bool, dt.DepTypes, dt.DepFlag] = True) self._build_spec = other._build_spec # Clear dependencies - self._dependents = _EdgeMap(store_by_child=False) - self._dependencies = _EdgeMap(store_by_child=True) + self._dependents = {} + self._dependencies = {} # FIXME: we manage _patches_in_order_of_appearance specially here # to keep it from leaking out of spec.py, but we should figure @@ -3726,7 +3696,7 @@ def _dup(self, other: "Spec", deps: Union[bool, dt.DepTypes, dt.DepFlag] = True) depflag = dt.ALL if isinstance(deps, (tuple, list, str)): depflag = dt.canonicalize(deps) - self._dup_deps(other, depflag) + self._dup_deps(other, depflag, propagation=propagation) self._prefix = other._prefix self._concrete = other._concrete @@ -3744,7 +3714,9 @@ def _dup(self, other: "Spec", deps: Union[bool, dt.DepTypes, dt.DepFlag] = True) return changed - def _dup_deps(self, other, depflag: dt.DepFlag): + def _dup_deps( + self, other, depflag: dt.DepFlag, propagation: Optional[PropagationPolicy] = None + ): def spid(spec): return id(spec) @@ -3759,10 +3731,12 @@ def spid(spec): if spid(edge.spec) not in new_specs: new_specs[spid(edge.spec)] = edge.spec.copy(deps=False) + edge_propagation = edge.propagation if propagation is None else propagation new_specs[spid(edge.parent)].add_dependency_edge( new_specs[spid(edge.spec)], depflag=edge.depflag, virtuals=edge.virtuals, + propagation=edge_propagation, direct=edge.direct, when=edge.when, ) @@ -3771,8 +3745,8 @@ def copy(self, deps: Union[bool, dt.DepTypes, dt.DepFlag] = True, **kwargs): """Make a copy of this spec. Args: - deps: Defaults to True. If boolean, controls - whether dependencies are copied (copied if True). If a + deps: Defaults to :data:`True`. If boolean, controls + whether dependencies are copied (copied if :data:`True`). If a DepTypes or DepFlag is provided, *only* matching dependencies are copied. kwargs: additional arguments for internal use (passed to ``_dup``). @@ -3791,7 +3765,7 @@ def copy(self, deps: Union[bool, dt.DepTypes, dt.DepFlag] = True, **kwargs): Only build and run dependencies:: - deps=('build', 'run'): + deps=("build", "run"): """ clone = Spec.__new__(Spec) @@ -4074,35 +4048,98 @@ def edges(): def namespace_if_anonymous(self): return self.namespace if not self.name else None - def format(self, format_string: str = DEFAULT_FORMAT, color: Optional[bool] = False) -> str: - r"""Prints out attributes of a spec according to a format string. - - Using an ``{attribute}`` format specifier, any field of the spec can be + @property + def spack_root(self): + """Special field for using ``{spack_root}`` in :meth:`format`.""" + return spack.paths.spack_root + + @property + def spack_install(self): + """Special field for using ``{spack_install}`` in :meth:`format`.""" + from spack.store import STORE + + return STORE.layout.root + + def _format_default(self) -> str: + """Fast path for formatting with DEFAULT_FORMAT and no color. + + This method manually concatenates the string representation of spec attributes, + avoiding the regex parsing overhead of the general format() method. + """ + parts = [] + + if self.name: + parts.append(self.name) + + if self.versions: + version_str = str(self.versions) + if version_str and version_str != ":": # only include if not full range + parts.append(f"@{version_str}") + + compiler_flags_str = str(self.compiler_flags) + if compiler_flags_str: + parts.append(compiler_flags_str) + + variants_str = str(self.variants) + if variants_str: + parts.append(variants_str) + + if not self.name and self.namespace: + parts.append(f" namespace={self.namespace}") + + if self.architecture: + if self.architecture.platform: + parts.append(f" platform={self.architecture.platform}") + if self.architecture.os: + parts.append(f" os={self.architecture.os}") + if self.architecture.target: + parts.append(f" target={self.architecture.target}") + + if self.abstract_hash: + parts.append(f"/{self.abstract_hash}") + + return "".join(parts).strip() + + def format( + self, + format_string: str = DEFAULT_FORMAT, + color: Optional[bool] = False, + *, + highlight_version_fn: Optional[Callable[["Spec"], bool]] = None, + highlight_variant_fn: Optional[Callable[["Spec", str], bool]] = None, + ) -> str: + r"""Prints out attributes of a spec according to a format string. + + Using an ``{attribute}`` format specifier, any field of the spec can be selected. Those attributes can be recursive. For example, ``s.format({compiler.version})`` will print the version of the compiler. If the attribute in a format specifier evaluates to ``None``, then the format specifier will evaluate to the empty string, ``""``. - Commonly used attributes of the Spec for format strings include:: + Commonly used attributes of the Spec for format strings include: + + .. code-block:: text - name - version - compiler_flags - compilers - variants - architecture - architecture.platform - architecture.os - architecture.target - prefix - namespace + name + version + compiler_flags + compilers + variants + architecture + architecture.platform + architecture.os + architecture.target + prefix + namespace - Some additional special-case properties can be added:: + Some additional special-case properties can be added: - hash[:len] The DAG hash with optional length argument - spack_root The spack root directory - spack_install The spack install directory + .. code-block:: text + + hash[:len] The DAG hash with optional length argument + spack_root The spack root directory + spack_install The spack install directory The ``^`` sigil can be used to access dependencies by name. ``s.format({^mpi.name})`` will print the name of the MPI implementation in the @@ -4110,22 +4147,22 @@ def format(self, format_string: str = DEFAULT_FORMAT, color: Optional[bool] = Fa The ``@``, ``%``, and ``/`` sigils can be used to include the sigil with the printed string. These sigils may only be used with the appropriate attributes, - listed below:: + listed below: - @ ``{@version}``, ``{@compiler.version}`` - % ``{%compiler}``, ``{%compiler.name}`` - / ``{/hash}``, ``{/hash:7}``, etc + * ``@``: ``{@version}``, ``{@compiler.version}`` + * ``%``: ``{%compiler}``, ``{%compiler.name}`` + * ``/``: ``{/hash}``, ``{/hash:7}``, etc The ``@`` sigil may also be used for any other property named ``version``. Sigils printed with the attribute string are only printed if the attribute string is non-empty, and are colored according to the color of the attribute. Variants listed by name naturally print with their sigil. For example, - ``spec.format('{variants.debug}')`` prints either ``+debug`` or ``~debug`` + ``spec.format("{variants.debug}")`` prints either ``+debug`` or ``~debug`` depending on the name of the variant. Non-boolean variants print as ``name=value``. To print variant names or values independently, use - ``spec.format('{variants..name}')`` or - ``spec.format('{variants..value}')``. + ``spec.format("{variants..name}")`` or + ``spec.format("{variants..value}")``. There are a few attributes on specs that can be specified as key-value pairs that are *not* variants, e.g.: ``os``, ``arch``, ``architecture``, ``target``, @@ -4138,9 +4175,11 @@ def format(self, format_string: str = DEFAULT_FORMAT, color: Optional[bool] = Fa attribute is not set, you can add whitespace to the key *inside* the braces of the format string, e.g.: - { namespace=namespace} + .. code-block:: text + + { namespace=namespace} - This evaluates to `` namespace=builtin`` if ``namespace`` is set to ``builtin``, + This evaluates to ``" namespace=builtin"`` if ``namespace`` is set to ``builtin``, and to ``""`` if ``namespace`` is ``None``. Spec format strings use ``\`` as the escape character. Use ``\{`` and ``\}`` for @@ -4149,8 +4188,15 @@ def format(self, format_string: str = DEFAULT_FORMAT, color: Optional[bool] = Fa Args: format_string: string containing the format to be expanded color: True for colorized result; False for no color; None for auto color. - + highlight_version_fn: optional callable that returns true on nodes where the version + needs to be highlighted + highlight_variant_fn: optional callable that returns true on variants that need + to be highlighted """ + # Fast path for the common case: default format with no color + if format_string == DEFAULT_FORMAT and color is False: + return self._format_default() + ensure_modern_format_string(format_string) def safe_color(sigil: str, string: str, color_fmt: Optional[str]) -> str: @@ -4173,7 +4219,8 @@ def format_attribute(match_object: Match) -> str: elif not close_brace: raise SpecFormatStringError(f"Missing close brace: '{format_string}'") - current = self if dep is None else self[dep] + current_node = self if dep is None else self[dep] + current = current_node # Hash attributes can return early. # NOTE: we currently treat abstract_hash like an attribute and ignore @@ -4260,31 +4307,43 @@ def format_attribute(match_object: Match) -> str: color = COMPILER_COLOR elif "version" in parts or "versions" in parts: color = VERSION_COLOR + if highlight_version_fn and highlight_version_fn(current_node): + color = HIGHLIGHT_COLOR # return empty string if the value of the attribute is None. if current is None: return "" + # Override the color for single variants, if need be + if color and highlight_variant_fn and isinstance(current, VariantMap): + bool_keys, kv_keys = current.partition_keys() + result = "" + + for key in bool_keys: + current_color = color + if highlight_variant_fn(current_node, key): + current_color = HIGHLIGHT_COLOR + + result += safe_color(sig, str(current[key]), current_color) + + for key in kv_keys: + current_color = color + if highlight_variant_fn(current_node, key): + current_color = HIGHLIGHT_COLOR + + # Don't highlight the space before the key/value pair + result += " " + safe_color(sig, f"{current[key]}", current_color) + + return result + # return colored output return safe_color(sig, str(current), color) return SPEC_FORMAT_RE.sub(format_attribute, format_string).strip() - def cformat(self, *args, **kwargs): - """Same as format, but color defaults to auto instead of False.""" - kwargs = kwargs.copy() - kwargs.setdefault("color", None) - return self.format(*args, **kwargs) - - @property - def spack_root(self): - """Special field for using ``{spack_root}`` in Spec.format().""" - return spack.paths.spack_root - - @property - def spack_install(self): - """Special field for using ``{spack_install}`` in Spec.format().""" - return spack.store.STORE.layout.root + def cformat(self, format_string: str = DEFAULT_FORMAT) -> str: + """Same as :meth:`format`, but color defaults to auto instead of False.""" + return self.format(format_string, color=None) def format_path( # self, format_string: str, _path_ctor: Optional[pathlib.PurePath] = None @@ -4292,16 +4351,15 @@ def format_path( format_string: str, _path_ctor: Optional[Callable[[Any], pathlib.PurePath]] = None, ) -> str: - """Given a `format_string` that is intended as a path, generate a string - like from `Spec.format`, but eliminate extra path separators introduced by - formatting of Spec properties. + """Given a ``format_string`` that is intended as a path, generate a string like from + :meth:`format`, but eliminate extra path separators introduced by formatting of Spec + properties. Path separators explicitly added to the string are preserved, so for example - "{name}/{version}" would generate a directory based on the Spec's name, and - a subdirectory based on its version; this function guarantees though that - the resulting string would only have two directories (i.e. that if under - normal circumstances that `str(Spec.version)` would contain a path - separator, it would not in this case). + ``{name}/{version}`` would generate a directory based on the Spec's name, and a + subdirectory based on its version; this function guarantees though that the resulting + string would only have two directories (i.e. that if under normal circumstances that + ``str(self.version)`` would contain a path separator, it would not in this case). """ format_component_with_sep = r"\{[^}]*[/\\][^}]*}" if re.search(format_component_with_sep, format_string): @@ -4329,26 +4387,176 @@ def format_path( ] return str(path_ctor(*output_path_components)) - def __str__(self): - if self._concrete: - return self.format("{name}{@version}{/hash}") + def _format_edge_attributes(self, dep: DependencySpec, deptypes=True, virtuals=True): + deptypes_str = ( + f"deptypes={','.join(dt.flag_to_tuple(dep.depflag))}" + if deptypes and dep.depflag + else "" + ) + when_str = f"when='{(dep.when)}'" if dep.when != Spec() else "" + virtuals_str = f"virtuals={','.join(dep.virtuals)}" if virtuals and dep.virtuals else "" - if not self._dependencies: - return self.format() + attrs = " ".join(s for s in (when_str, deptypes_str, virtuals_str) if s) + if attrs: + attrs = f"[{attrs}] " + + return attrs + + def _format_dependencies( + self, + format_string: str = DEFAULT_FORMAT, + include: Optional[Callable[[DependencySpec], bool]] = None, + deptypes: bool = True, + color: Optional[bool] = False, + _force_direct: bool = False, + ): + """Helper for formatting dependencies on specs. + + Arguments: + format_string: format string to use for each dependency + include: predicate to select which dependencies to include + deptypes: whether to format deptypes + color: colorize if True, don't colorize if False, auto-colorize if None + _force_direct: if True, print all dependencies as direct dependencies + (to be removed when we have this metadata on concrete edges) + """ + include = include or (lambda dep: True) + parts = [] + if self.concrete: + direct = self.edges_to_dependencies() + transitive: List[DependencySpec] = [] + else: + direct, transitive = lang.stable_partition( + self.edges_to_dependencies(), predicate_fn=lambda x: x.direct + ) - return self.long_spec + # helper for direct and transitive loops below + def format_edge(edge: DependencySpec, sigil: str, dep_spec: Optional[Spec] = None) -> str: + dep_spec = dep_spec or edge.spec + dep_format = dep_spec.format(format_string, color=color) + + edge_attributes = ( + self._format_edge_attributes(edge, deptypes=deptypes, virtuals=False) + if edge.depflag or edge.when != Spec() + else "" + ) + virtuals = f"{','.join(edge.virtuals)}=" if edge.virtuals else "" + star = _anonymous_star(edge, dep_format) + + return f"{sigil}{edge_attributes}{star}{virtuals}{dep_format}" + + # direct dependencies + for edge in sorted(direct, key=lambda x: x.spec.name): + if not include(edge): + continue + + # replace legacy compiler names + old_name = edge.spec.name + new_name = spack.aliases.BUILTIN_TO_LEGACY_COMPILER.get(old_name) + try: + # this is ugly but copies can be expensive + sigil = "%" + if new_name: + edge.spec.name = new_name + + if edge.propagation == PropagationPolicy.PREFERENCE: + sigil = "%%" + + parts.append(format_edge(edge, sigil=sigil, dep_spec=edge.spec)) + finally: + edge.spec.name = old_name + + if self.concrete: + # Concrete specs should go no further, as the complexity + # below is O(paths) + return " ".join(parts).strip() + + # transitive dependencies (with any direct dependencies) + for edge in sorted(transitive, key=lambda x: x.spec.name): + if not include(edge): + continue + sigil = "%" if _force_direct else "^" # hack til direct deps represented better + parts.append(format_edge(edge, sigil, edge.spec)) + + # also recursively add any direct dependencies of transitive dependencies + if edge.spec._dependencies: + parts.append( + edge.spec._format_dependencies( + format_string=format_string, + include=include, + deptypes=deptypes, + _force_direct=_force_direct, + ) + ) + + return " ".join(parts).strip() + + def _long_spec(self, color: Optional[bool] = False) -> str: + """Helper for :attr:`long_spec` and :attr:`clong_spec`.""" + if self.concrete: + return self.tree(format=DISPLAY_FORMAT, color=color) + return f"{self.format(color=color)} {self._format_dependencies(color=color)}".strip() + + def _short_spec(self, color: Optional[bool] = False) -> str: + """Helper for :attr:`short_spec` and :attr:`cshort_spec`.""" + return self.format( + "{name}{@version}{variants}" + "{ platform=architecture.platform}{ os=architecture.os}{ target=architecture.target}" + "{/hash:7}", + color=color, + ) @property - def colored_str(self): - root_str = [self.cformat()] - sorted_dependencies = sorted( - self.traverse(root=False), key=lambda x: (x.name, x.abstract_hash) + def compilers(self): + # TODO: get rid of the space here and make formatting smarter + return " " + self._format_dependencies( + "{name}{@version}", + include=lambda dep: any(lang in dep.virtuals for lang in ("c", "cxx", "fortran")), + deptypes=False, + _force_direct=True, ) - sorted_dependencies = [ - d.cformat("{edge_attributes} " + DISPLAY_FORMAT) for d in sorted_dependencies - ] - spec_str = " ^".join(root_str + sorted_dependencies) - return spec_str.strip() + + @property + def long_spec(self): + """Long string of the spec, including dependencies.""" + return self._long_spec(color=False) + + @property + def clong_spec(self): + """Returns an auto-colorized version of :attr:`long_spec`.""" + return self._long_spec(color=None) + + @property + def short_spec(self): + """Short string of the spec, with hash and without dependencies.""" + return self._short_spec(color=False) + + @property + def cshort_spec(self): + """Returns an auto-colorized version of :attr:`short_spec`.""" + return self._short_spec(color=None) + + @property + def colored_str(self) -> str: + """Auto-colorized string representation of this spec.""" + return self._str(color=None) + + def _str(self, color: Optional[bool] = False) -> str: + """String representation of this spec. + Args: + color: colorize if True, don't colorize if False, auto-colorize if None + """ + if self._concrete: + return self.format("{name}{@version}{/hash}", color=color) + + if not self._dependencies: + return self.format(color=color) + + return self._long_spec(color=color) + + def __str__(self) -> str: + """String representation of this spec.""" + return self._str(color=False) def install_status(self) -> InstallStatus: """Helper for tree to print DB install status.""" @@ -4358,7 +4566,9 @@ def install_status(self) -> InstallStatus: if self.external: return InstallStatus.external - upstream, record = spack.store.STORE.db.query_by_spec_hash(self.dag_hash()) + from spack.store import STORE + + upstream, record = STORE.db.query_by_spec_hash(self.dag_hash()) if not record: return InstallStatus.absent elif upstream and record.installed: @@ -4373,7 +4583,9 @@ def _installed_explicitly(self): if not self.concrete: return None try: - record = spack.store.STORE.db.get_record(self) + from spack.store import STORE + + record = STORE.db.get_record(self) return record.explicit except KeyError: return None @@ -4395,6 +4607,8 @@ def tree( status_fn: Optional[Callable[["Spec"], InstallStatus]] = None, prefix: Optional[Callable[["Spec"], str]] = None, key=id, + highlight_version_fn: Optional[Callable[["Spec"], bool]] = None, + highlight_variant_fn: Optional[Callable[["Spec", str], bool]] = None, ) -> str: """Prints out this spec and its dependencies, tree-formatted with indentation. @@ -4407,7 +4621,7 @@ def tree( depth: print the depth from the root hashes: if True, print the hash of each node hashlen: length of the hash to be printed - cover: either "nodes" or "edges" + cover: either ``"nodes"`` or ``"edges"`` indent: extra indentation for the tree being printed format: format to be used to print each node deptypes: dependency types to be represented in the tree @@ -4418,6 +4632,10 @@ def tree( installation status prefix: optional callable that takes a node as an argument and return its installation prefix + highlight_version_fn: optional callable that returns true on nodes where the version + needs to be highlighted + highlight_variant_fn: optional callable that returns true on variants that need + to be highlighted """ return tree( [self], @@ -4435,6 +4653,8 @@ def tree( status_fn=status_fn, prefix=prefix, key=key, + highlight_version_fn=highlight_version_fn, + highlight_variant_fn=highlight_variant_fn, ) def __repr__(self): @@ -4462,16 +4682,16 @@ def build_spec(self, value): def trim(self, dep_name): """ - Remove any package that is or provides `dep_name` transitively + Remove any package that is or provides ``dep_name`` transitively from this tree. This can also remove other dependencies if - they are only present because of `dep_name`. + they are only present because of ``dep_name``. """ for spec in list(self.traverse()): - new_dependencies = _EdgeMap() # A new _EdgeMap + new_dependencies = {} for pkg_name, edge_list in spec._dependencies.items(): for edge in edge_list: if (dep_name not in edge.virtuals) and (not dep_name == edge.spec.name): - new_dependencies.add(edge) + _add_edge_to_map(new_dependencies, edge.spec.name, edge) spec._dependencies = new_dependencies def _virtuals_provided(self, root): @@ -4534,8 +4754,8 @@ def _splice_detach_and_add_dependents(self, replacement, context): if edge.parent not in ancestors_in_context: continue - edge.parent._dependencies.edges[self.name].remove(edge) - self._dependents.edges[edge.parent.name].remove(edge) + edge.parent._dependencies[self.name].remove(edge) + self._dependents[edge.parent.name].remove(edge) edge.parent._add_dependency(replacement, depflag=edge.depflag, virtuals=edge.virtuals) def _splice_helper(self, replacement): @@ -4608,41 +4828,47 @@ def _splice_helper(self, replacement): break def splice(self, other: "Spec", transitive: bool = True) -> "Spec": - """Returns a new, spliced concrete Spec with the "other" dependency and, + """Returns a new, spliced concrete :class:`Spec` with the ``other`` dependency and, optionally, its dependencies. Args: other: alternate dependency transitive: include other's dependencies - Returns: a concrete, spliced version of the current Spec + Returns: a concrete, spliced version of the current :class:`Spec` - When transitive is "True", use the dependencies from "other" to reconcile - conflicting dependencies. When transitive is "False", use dependencies from self. + When transitive is :data:`True`, use the dependencies from ``other`` to reconcile + conflicting dependencies. When transitive is :data:`False`, use dependencies from self. For example, suppose we have the following dependency graph: - T - | \ - Z<-H + .. code-block:: text - Spec T depends on H and Z, and H also depends on Z. Now we want to use - a different H, called H'. This function can be used to splice in H' to - create a new spec, called T*. If H' was built with Z', then transitive - "True" will ensure H' and T* both depend on Z': + T + | \\ + Z<-H - T* - | \ - Z'<-H' + Spec ``T`` depends on ``H`` and ``Z``, and ``H`` also depends on ``Z``. Now we want to use + a different ``H``, called ``H'``. This function can be used to splice in ``H'`` to + create a new spec, called ``T*``. If ``H'`` was built with ``Z'``, then ``transitive=True`` + will ensure ``H'`` and ``T*`` both depend on ``Z'``: - If transitive is "False", then H' and T* will both depend on - the original Z, resulting in a new H'* + .. code-block:: text - T* - | \ - Z<-H'* + T* + | \\ + Z'<-H' - Provenance of the build is tracked through the "build_spec" property + If ``transitive=False``, then ``H'`` and ``T*`` will both depend on + the original ``Z``, resulting in a new ``H'*``: + + .. code-block:: text + + T* + | \\ + Z<-H'* + + Provenance of the build is tracked through the :attr:`build_spec` property of the spliced spec and any correspondingly modified dependency specs. The build specs are set to that of the original spec, so the original spec's provenance is preserved unchanged.""" @@ -4714,6 +4940,89 @@ def mask_build_deps(in_spec): return spec + def mutate(self, mutator, rehash=True) -> bool: + """Mutate concrete spec to match constraints represented by mutator. + + Mutation can modify the spec version, variants, compiler flags, and architecture. + Mutation cannot change the spec name, namespace, dependencies, or abstract_hash. + Any attribute which is unset will not be touched. + Variant values can be replaced with the literal ``None`` to remove the variant. + ``None`` as a variant value is represented by ``VariantValue(..., (None,))``. + + If ``rehash``, concrete spec and its dependents have hashes updated. + + Returns whether the spec was modified by the mutation""" + assert self.concrete + + if mutator.name and mutator.name != self.name: + raise SpecMutationError(f"Cannot mutate spec name: spec {self} mutator {mutator}") + + if mutator.namespace and mutator.namespace != self.namespace: + raise SpecMutationError(f"Cannot mutate spec namespace: spec {self} mutator {mutator}") + + if len(mutator.dependencies()) > 0: + raise SpecMutationError(f"Cannot mutate dependencies: spec {self} mutator {mutator}") + + if ( + mutator.versions != vn.VersionList(":") + and not mutator.versions.concrete_range_as_version + ): + raise SpecMutationError( + f"Cannot mutate abstract version: spec {self} mutator {mutator}" + ) + + if mutator.abstract_hash and mutator.abstract_hash != self.abstract_hash: + raise SpecMutationError(f"Cannot mutate abstract_hash: spec {self} mutator {mutator}") + + changed = False + + if mutator.versions != vn.VersionList(":") and self.versions != mutator.versions: + self.versions = mutator.versions + changed = True + + for name, variant in mutator.variants.items(): + if variant == self.variants.get(name, None): + continue + + old_variant = self.variants.pop(name, None) + if not isinstance(variant, vt.VariantValueRemoval): # sigil type for removing variant + if old_variant: + variant.type = old_variant.type # coerce variant type to match + self.variants[name] = variant + changed = True + + for name, flags in mutator.compiler_flags.items(): + if not flags or flags == self.compiler_flags[name]: + continue + self.compiler_flags[name] = flags + changed = True + + if mutator.architecture: + if mutator.platform and mutator.platform != self.architecture.platform: + self.architecture.platform = mutator.platform + changed = True + if mutator.os and mutator.os != self.architecture.os: + self.architecture.os = mutator.os + changed = True + if mutator.target and mutator.target != self.architecture.target: + self.architecture.target = mutator.target + changed = True + + if changed and rehash: + roots = [] + for parent in spack.traverse.traverse_nodes([self], direction="parents"): + if not parent.dependents(): + roots.append(parent) + # invalidate hashes + parent._mark_root_concrete(False) + parent.clear_caches() + + for root in roots: + # compute new hashes on full DAGs + root._finalize_concretization() + + return changed + def clear_caches(self, ignore: Tuple[str, ...] = ()) -> None: """ Clears all cached hashes in a Spec, while preserving other properties. @@ -4740,8 +5049,52 @@ def __hash__(self): # so we hope it only runs on abstract specs, which are small. return hash(lang.tuplify(self._cmp_iter)) - def __reduce__(self): - return Spec.from_dict, (self.to_dict(hash=ht.dag_hash),) + def __getstate__(self): + state = self.__dict__.copy() + # The package is lazily loaded upon demand. + state.pop("_package", None) + # As with to_dict, do not include dependents. This avoids serializing more than intended. + state.pop("_dependents", None) + + # Do not pickle attributes dynamically set by SpecBuildInterface + state.pop("wrapped_obj", None) + state.pop("token", None) + state.pop("last_query", None) + state.pop("indirect_spec", None) + + # Optimize variants and compiler_flags serialization + variants = state.pop("variants", None) + if variants: + state["_variants_data"] = variants.dict + flags = state.pop("compiler_flags", None) + if flags: + state["_compiler_flags_data"] = flags.dict + + return state + + def __setstate__(self, state): + variants_data = state.pop("_variants_data", None) + compiler_flags_data = state.pop("_compiler_flags_data", None) + self.__dict__.update(state) + self._package = None + + # Reconstruct variants and compiler_flags + self.variants = VariantMap(self) + self.compiler_flags = FlagMap(self) + if variants_data is not None: + self.variants.dict = variants_data + if compiler_flags_data is not None: + self.compiler_flags.dict = compiler_flags_data + + # Reconstruct dependents map + if not hasattr(self, "_dependents"): + self._dependents = {} + + for edges in self._dependencies.values(): + for edge in edges: + if not hasattr(edge.spec, "_dependents"): + edge.spec._dependents = {} + _add_edge_to_map(edge.spec._dependents, edge.parent.name, edge) def attach_git_version_lookup(self): # Add a git lookup method for GitVersions @@ -4900,19 +5253,10 @@ def __str__(self): if not self: return "" - # print keys in order - sorted_keys = sorted(self.keys()) - # Separate boolean variants from key-value pairs as they print # differently. All booleans go first to avoid ' ~foo' strings that # break spec reuse in zsh. - bool_keys = [] - kv_keys = [] - for key in sorted_keys: - if self[key].type == vt.VariantType.BOOL: - bool_keys.append(key) - else: - kv_keys.append(key) + bool_keys, kv_keys = self.partition_keys() # add spaces before and after key/value variants. string = io.StringIO() @@ -4926,9 +5270,16 @@ def __str__(self): return string.getvalue() + def partition_keys(self) -> Tuple[List[str], List[str]]: + """Partition the keys of the map into two lists: booleans and key-value pairs.""" + bool_keys, kv_keys = lang.stable_partition( + sorted(self.keys()), lambda x: self[x].type == vt.VariantType.BOOL + ) + return bool_keys, kv_keys + def substitute_abstract_variants(spec: Spec): - """Uses the information in `spec.package` to turn any variant that needs + """Uses the information in ``spec.package`` to turn any variant that needs it into a SingleValuedVariant or BoolValuedVariant. This method is best effort. All variants that can be substituted will be @@ -4991,35 +5342,6 @@ def parse_with_version_concrete(spec_like: Union[str, Spec]): return s -def merge_abstract_anonymous_specs(*abstract_specs: Spec): - """Merge the abstracts specs passed as input and return the result. - - The root specs must be anonymous, and it's duty of the caller to ensure that. - - This function merge the abstract specs based on package names. In particular - it doesn't try to resolve virtual dependencies. - - Args: - *abstract_specs: abstract specs to be merged - """ - merged_spec = Spec() - for current_spec_constraint in abstract_specs: - merged_spec.constrain(current_spec_constraint, deps=False) - - for name in merged_spec.common_dependencies(current_spec_constraint): - merged_spec[name].constrain(current_spec_constraint[name], deps=False) - - # Update with additional constraints from other spec - for name in current_spec_constraint.direct_dep_difference(merged_spec): - edge = next(iter(current_spec_constraint.edges_to_dependencies(name))) - - merged_spec._add_dependency( - edge.spec.copy(), depflag=edge.depflag, virtuals=edge.virtuals - ) - - return merged_spec - - def reconstruct_virtuals_on_edges(spec: Spec) -> None: """Reconstruct virtuals on edges. Used to read from old DB and reindex.""" virtuals_needed: Dict[str, Set[str]] = {} @@ -5076,7 +5398,8 @@ def from_node_dict(cls, node): for h in ht.HASHES: setattr(spec, h.attr, node.get(h.name, None)) - spec.name = name + # old anonymous spec files had name=None, we use name="" now + spec.name = name if isinstance(name, str) else "" spec.namespace = node.get("namespace", None) if "version" in node or "versions" in node: @@ -5460,6 +5783,73 @@ def eval_conditional(string): return eval(string, valid_variables) +def _inject_patches_variant(root: Spec) -> None: + # This dictionary will store object IDs rather than Specs as keys + # since the Spec __hash__ will change as patches are added to them + spec_to_patches: Dict[int, Set[spack.patch.Patch]] = {} + for s in root.traverse(): + assert s.namespace is not None, ( + f"internal error: {s.name} has no namespace after concretization. " + f"Please report a bug at https://github.com/spack/spack/issues" + ) + + if s.concrete: + continue + + # Add any patches from the package to the spec. + node_patches = { + patch + for cond, patch_list in spack.repo.PATH.get_pkg_class(s.fullname).patches.items() + if s.satisfies(cond) + for patch in patch_list + } + if node_patches: + spec_to_patches[id(s)] = node_patches + + # Also record all patches required on dependencies by depends_on(..., patch=...) + for dspec in root.traverse_edges(deptype=dt.ALL, cover="edges", root=False): + if dspec.spec.concrete: + continue + + pkg_deps = spack.repo.PATH.get_pkg_class(dspec.parent.fullname).dependencies + + edge_patches: List[spack.patch.Patch] = [] + for cond, deps_by_name in pkg_deps.items(): + dependency = deps_by_name.get(dspec.spec.name) + if not dependency: + continue + + if not dspec.parent.satisfies(cond): + continue + + for pcond, patch_list in dependency.patches.items(): + if dspec.spec.satisfies(pcond): + edge_patches.extend(patch_list) + + if edge_patches: + spec_to_patches.setdefault(id(dspec.spec), set()).update(edge_patches) + + for spec in root.traverse(): + if id(spec) not in spec_to_patches: + continue + + patches = list(spec_to_patches[id(spec)]) + variant: vt.VariantValue = spec.variants.setdefault( + "patches", vt.MultiValuedVariant("patches", ()) + ) + variant.set(*(p.sha256 for p in patches)) + # FIXME: Monkey patches variant to store patches order + ordered_hashes = [(*p.ordering_key, p.sha256) for p in patches if p.ordering_key] + ordered_hashes.sort() + tty.debug( + f"Ordered hashes [{spec.name}]: " + + ", ".join("/".join(str(e) for e in t) for t in ordered_hashes) + ) + setattr( + variant, "_patches_in_order_of_appearance", [sha256 for _, _, sha256 in ordered_hashes] + ) + + class InvalidVariantForSpecError(spack.error.SpecError): """Raised when an invalid conditional variant is specified.""" @@ -5537,7 +5927,11 @@ def __init__(self, spec): class AmbiguousHashError(spack.error.SpecError): def __init__(self, msg, *specs): - spec_fmt = "{namespace}.{name}{@version}{variants}{ arch=architecture}{/hash:7}" + spec_fmt = ( + "{namespace}.{name}{@version}{variants}" + "{ platform=architecture.platform}{ os=architecture.os}{ target=architecture.target}" + "{/hash:7}" + ) specs_str = "\n " + "\n ".join(spec.format(spec_fmt) for spec in specs) super().__init__(msg + specs_str) @@ -5549,14 +5943,6 @@ def __init__(self, spec, hash): super().__init__(msg) -class SpecFilenameError(spack.error.SpecError): - """Raised when a spec file name is invalid.""" - - -class NoSuchSpecFileError(SpecFilenameError): - """Raised when a spec file doesn't exist.""" - - class SpecFormatStringError(spack.error.SpecError): """Called for errors in Spec format strings.""" @@ -5610,3 +5996,11 @@ class InvalidSpecDetected(spack.error.SpecError): class SpliceError(spack.error.SpecError): """Raised when a splice is not possible due to dependency or provider satisfaction mismatch. The resulting splice would be unusable.""" + + +class InvalidEdgeError(spack.error.SpecError): + """Raised when an edge doesn't pass validation checks.""" + + +class SpecMutationError(spack.error.SpecError): + """Raised when a mutation is attempted with invalid attributes.""" diff --git a/lib/spack/spack/spec_parser.py b/lib/spack/spack/spec_parser.py index c1f0abdfbe3682..1d5d66691b46c8 100644 --- a/lib/spack/spack/spec_parser.py +++ b/lib/spack/spack/spec_parser.py @@ -43,45 +43,44 @@ vid = [a-zA-Z0-9_][a-zA-Z_0-9-.]* id = [a-zA-Z0-9_][a-zA-Z_0-9-]* -Identifiers using the = command, such as architectures and +Identifiers using the ``=`` command, such as architectures and compiler flags, require a space before the name. -There is one context-sensitive part: ids in versions may contain '.', while +There is one context-sensitive part: ids in versions may contain ``.``, while other ids may not. -There is one ambiguity: since '-' is allowed in an id, you need to put -whitespace space before -variant for it to be tokenized properly. You can -either use whitespace, or you can just use ~variant since it means the same -thing. Spack uses ~variant in directory names and in the canonical form of -specs to avoid ambiguity. Both are provided because ~ can cause shell +There is one ambiguity: since ``-`` is allowed in an id, you need to put +whitespace space before ``-variant`` for it to be tokenized properly. You can +either use whitespace, or you can just use ``~variant`` since it means the same +thing. Spack uses ``~variant`` in directory names and in the canonical form of +specs to avoid ambiguity. Both are provided because ``~`` can cause shell expansion when it is the first character in an id typed on the command line. """ import json import pathlib import re import sys -import traceback -import warnings -from typing import Dict, Iterator, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Dict, Iterator, List, Optional, Tuple, Union import spack.config import spack.deptypes import spack.error -import spack.paths -import spack.spec -import spack.util.spack_yaml import spack.version from spack.aliases import LEGACY_COMPILER_TO_BUILTIN +from spack.enums import PropagationPolicy from spack.llnl.util.tty import color from spack.tokenize import Token, TokenBase, Tokenizer +if TYPE_CHECKING: + import spack.spec + #: Valid name for specs and variants. Here we are not using -#: the previous "w[\w.-]*" since that would match most +#: the previous ``w[\w.-]*`` since that would match most #: characters that can be part of a word in any language IDENTIFIER = r"(?:[a-zA-Z_0-9][a-zA-Z_0-9\-]*)" DOTTED_IDENTIFIER = rf"(?:{IDENTIFIER}(?:\.{IDENTIFIER})+)" GIT_HASH = r"(?:[A-Fa-f0-9]{40})" -#: Git refs include branch names, and can contain "." and "/" +#: Git refs include branch names, and can contain ``.`` and ``/`` GIT_REF = r"(?:[a-zA-Z_0-9][a-zA-Z_0-9./\-]*)" GIT_VERSION_PATTERN = rf"(?:(?:git\.(?:{GIT_REF}))|(?:{GIT_HASH}))" @@ -112,8 +111,8 @@ SPLIT_KVP = re.compile(rf"^({NAME})(:?==?)(.*)$") -#: A filename starts either with a "." or a "/" or a "{name}/, or on Windows, a drive letter -#: followed by a colon and "\" or "." or {name}\ +#: A filename starts either with a ``.`` or a ``/`` or a ``{name}/``, or on Windows, a drive letter +#: followed by a colon and ``\`` or ``.`` or ``{name}\`` WINDOWS_FILENAME = r"(?:\.|[a-zA-Z0-9-_]*\\|[a-zA-Z]:\\)(?:[a-zA-Z0-9-_\.\\]*)(?:\.json|\.yaml)" UNIX_FILENAME = r"(?:\.|\/|[a-zA-Z0-9-_]*\/)(?:[a-zA-Z0-9-_\.\/]*)(?:\.json|\.yaml)" FILENAME = WINDOWS_FILENAME if sys.platform == "win32" else UNIX_FILENAME @@ -133,9 +132,9 @@ class SpecTokens(TokenBase): """ # Dependency, with optional virtual assignment specifier - START_EDGE_PROPERTIES = r"(?:[\^%]\[)" + START_EDGE_PROPERTIES = r"(?:(?:\^|\%\%|\%)\[)" END_EDGE_PROPERTIES = rf"(?:\](?:\s*{VIRTUAL_ASSIGNMENT})?)" - DEPENDENCY = rf"(?:[\^\%](?:\s*{VIRTUAL_ASSIGNMENT})?)" + DEPENDENCY = rf"(?:(?:\^|\%\%|\%)(?:\s*{VIRTUAL_ASSIGNMENT})?)" # Version VERSION_HASH_PAIR = rf"(?:@(?:{GIT_VERSION_PATTERN})=(?:{VERSION}))" @@ -246,48 +245,24 @@ def __init__(self, tokens: List[Token], text: str): super().__init__(message) -def _warn_about_variant_after_compiler(literal_str: str, issues: List[str]): - """Issue a warning if variant or other token is preceded by a compiler token. The warning is - only issued if it's actionable: either we know the config file it originates from, or we have - call site that's not internal to Spack.""" - ignore = [spack.paths.lib_path, spack.paths.bin_path] - mark = spack.util.spack_yaml.get_mark_from_yaml_data(literal_str) - issue_str = ", ".join(issues) - error = f"{issue_str} in `{literal_str}`" - - # warning from config file - if mark: - warnings.warn(f"{mark.name}:{mark.line + 1}: {error}") - return - - # warning from hopefully package.py - for frame in reversed(traceback.extract_stack()): - if frame.lineno and not any(frame.filename.startswith(path) for path in ignore): - warnings.warn_explicit( - error, - category=spack.error.SpackAPIWarning, - filename=frame.filename, - lineno=frame.lineno, - ) - return - - def parse_virtual_assignment(context: TokenContext) -> Tuple[str]: """Look at subvalues and, if present, extract virtual and a push a substitute token. This handles things like: - * ^c=gcc - * ^c,cxx=gcc - * %[when=+bar] c=gcc - * %[when=+bar] c,cxx=gcc + + * ``^c=gcc`` + * ``^c,cxx=gcc`` + * ``%[when=+bar] c=gcc`` + * ``%[when=+bar] c,cxx=gcc`` Virtual assignment can happen anywhere a dependency node can appear. It is - shorthand for %[virtuals=c,cxx] gcc. + shorthand for ``%[virtuals=c,cxx] gcc``. - The virtuals=substitute key value pair appears in the subvalues of DEPENDENCY - and END_EDGE_PROPERTIES tokens. We extract the virutals and create a token from - the substitute, which is then pushed back on the parser stream so that the head - of the stream can be parsed like a regular node. + The ``virtuals=substitute`` key value pair appears in the subvalues of + :attr:`~spack.spec_parser.SpecTokens.DEPENDENCY` and + :attr:`~spack.spec_parser.SpecTokens.END_EDGE_PROPERTIES` tokens. We extract the virtuals and + create a token from the substitute, which is then pushed back on the parser stream so that the + head of the stream can be parsed like a regular node. Returns: the virtuals assigned, or None if there aren't any @@ -325,7 +300,7 @@ def __init__(self, literal_str: str): self.toolchains = {} configuration = getattr(spack.config, "CONFIG", None) if configuration is not None: - self.toolchains = configuration.get("toolchains", {}) + self.toolchains = configuration.get_config("toolchains") self.parsed_toolchains: Dict[str, "spack.spec.Spec"] = {} def tokens(self) -> List[Token]: @@ -343,7 +318,7 @@ def next_spec( initial_spec: object where to parse the spec. If None a new one will be created. - Return + Return: The spec that was parsed """ if not self.ctx.next_token: @@ -356,19 +331,27 @@ def add_dependency(dep, **edge_properties): except spack.error.SpecError as e: raise SpecParsingError(str(e), self.ctx.current_token, self.literal_str) from e - initial_spec = initial_spec or spack.spec.Spec() - root_spec, parser_warnings = SpecNodeParser(self.ctx, self.literal_str).parse(initial_spec) + if not initial_spec: + from spack.spec import Spec + + initial_spec = Spec() + root_spec = SpecNodeParser(self.ctx, self.literal_str).parse(initial_spec) current_spec = root_spec while True: if self.ctx.accept(SpecTokens.START_EDGE_PROPERTIES): is_direct = self.ctx.current_token.value[0] == "%" + propagation = PropagationPolicy.NONE + if is_direct and self.ctx.current_token.value.startswith("%%"): + propagation = PropagationPolicy.PREFERENCE + edge_properties = EdgeAttributeParser(self.ctx, self.literal_str).parse() edge_properties.setdefault("virtuals", ()) edge_properties["direct"] = is_direct edge_properties.setdefault("depflag", 0) + edge_properties["propagation"] = propagation - dependency, warnings = self._parse_node(root_spec) + dependency = self._parse_node(root_spec) if is_direct: target_spec = current_spec @@ -378,11 +361,15 @@ def add_dependency(dep, **edge_properties): current_spec = dependency target_spec = root_spec - parser_warnings.extend(warnings) add_dependency(dependency, **edge_properties) elif self.ctx.accept(SpecTokens.DEPENDENCY): is_direct = self.ctx.current_token.value[0] == "%" + propagation = PropagationPolicy.NONE + + if is_direct and self.ctx.current_token.value.startswith("%%"): + propagation = PropagationPolicy.PREFERENCE + virtuals = parse_virtual_assignment(self.ctx) # if no virtual assignment, check for a toolchain - look ahead to find the @@ -390,13 +377,20 @@ def add_dependency(dep, **edge_properties): if not virtuals and is_direct and self.ctx.next_token.value in self.toolchains: assert self.ctx.accept(SpecTokens.UNQUALIFIED_PACKAGE_NAME) try: - self._apply_toolchain(current_spec, self.ctx.current_token.value) + self._apply_toolchain( + current_spec, self.ctx.current_token.value, propagation=propagation + ) except spack.error.SpecError as e: raise SpecParsingError(str(e), self.ctx.current_token, self.literal_str) continue - edge_properties = {"direct": is_direct, "virtuals": virtuals, "depflag": 0} - dependency, warnings = self._parse_node(root_spec) + edge_properties = { + "direct": is_direct, + "virtuals": virtuals, + "depflag": 0, + "propagation": propagation, + } + dependency = self._parse_node(root_spec) if is_direct: target_spec = current_spec @@ -406,19 +400,15 @@ def add_dependency(dep, **edge_properties): current_spec = dependency target_spec = root_spec - parser_warnings.extend(warnings) add_dependency(dependency, **edge_properties) else: break - if parser_warnings: - _warn_about_variant_after_compiler(self.literal_str, parser_warnings) - return root_spec def _parse_node(self, root_spec: "spack.spec.Spec", root: bool = True): - dependency, parser_warnings = SpecNodeParser(self.ctx, self.literal_str).parse(root=root) + dependency = SpecNodeParser(self.ctx, self.literal_str).parse(root=root) if dependency is None: msg = ( "the dependency sigil and any optional edge attributes must be followed by a " @@ -426,15 +416,21 @@ def _parse_node(self, root_spec: "spack.spec.Spec", root: bool = True): ) raise SpecParsingError(msg, self.ctx.current_token, self.literal_str) if root_spec.concrete: - raise spack.error.SpecError(root_spec, "^" + str(dependency)) - return dependency, parser_warnings + raise spack.error.SpecError(str(root_spec), "^" + str(dependency)) + return dependency - def _apply_toolchain(self, spec: "spack.spec.Spec", name: str) -> None: + def _apply_toolchain( + self, spec: "spack.spec.Spec", name: str, *, propagation: PropagationPolicy + ) -> None: if name not in self.parsed_toolchains: toolchain = self._parse_toolchain(name) self.parsed_toolchains[name] = toolchain - toolchain = self.parsed_toolchains[name] + propagation_arg = None if propagation != PropagationPolicy.PREFERENCE else propagation + # Here we need to copy because we want "foo %toolc ^bar %toolc" to generate different + # objects for the toolc attached to foo and bar, since the solver depends on that to + # generate facts + toolchain = self.parsed_toolchains[name].copy(propagation=propagation_arg) spec.constrain(toolchain) def _parse_toolchain(self, name: str) -> "spack.spec.Spec": @@ -443,7 +439,9 @@ def _parse_toolchain(self, name: str) -> "spack.spec.Spec": toolchain = parse_one_or_raise(toolchain_config) self._ensure_all_direct_edges(toolchain) else: - toolchain = spack.spec.Spec() + from spack.spec import Spec + + toolchain = Spec() for entry in toolchain_config: toolchain_part = parse_one_or_raise(entry["spec"]) when = entry.get("when", "") @@ -480,24 +478,23 @@ def __init__(self, ctx, literal_str): def parse( self, initial_spec: Optional["spack.spec.Spec"] = None, root: bool = True - ) -> Tuple["spack.spec.Spec", List[str]]: + ) -> "spack.spec.Spec": """Parse a single spec node from a stream of tokens Args: initial_spec: object to be constructed root: True if we're parsing a root, False if dependency after ^ or % - Return + Return: The object passed as argument """ - parser_warnings: List[str] = [] - last_compiler = None - if initial_spec is None: - initial_spec = spack.spec.Spec() + from spack.spec import Spec + + initial_spec = Spec() if not self.ctx.next_token or self.ctx.expect(SpecTokens.DEPENDENCY): - return initial_spec, parser_warnings + return initial_spec # If we start with a package name we have a named spec, we cannot # accept another package name afterwards in a node @@ -514,7 +511,7 @@ def parse( initial_spec.namespace = namespace elif self.ctx.accept(SpecTokens.FILENAME): - return FileParser(self.ctx).parse(initial_spec), parser_warnings + return FileParser(self.ctx).parse(initial_spec) def raise_parsing_error(string: str, cause: Optional[Exception] = None): """Raise a spec parsing error with token context.""" @@ -527,12 +524,6 @@ def add_flag(name: str, value: Union[str, bool], propagate: bool, concrete: bool except Exception as e: raise_parsing_error(str(e), e) - def warn_if_after_compiler(token: str): - """Register a warning for %compiler followed by +variant that will in the future apply - to the compiler instead of the current root.""" - if last_compiler: - parser_warnings.append(f"`{token}` should go before `{last_compiler}`") - while True: if ( self.ctx.accept(SpecTokens.VERSION_HASH_PAIR) @@ -547,19 +538,16 @@ def warn_if_after_compiler(token: str): ) initial_spec.attach_git_version_lookup() self.has_version = True - warn_if_after_compiler(self.ctx.current_token.value) elif self.ctx.accept(SpecTokens.BOOL_VARIANT): name = self.ctx.current_token.value[1:].strip() variant_value = self.ctx.current_token.value[0] == "+" add_flag(name, variant_value, propagate=False, concrete=True) - warn_if_after_compiler(self.ctx.current_token.value) elif self.ctx.accept(SpecTokens.PROPAGATED_BOOL_VARIANT): name = self.ctx.current_token.value[2:].strip() variant_value = self.ctx.current_token.value[0:2] == "++" add_flag(name, variant_value, propagate=True, concrete=True) - warn_if_after_compiler(self.ctx.current_token.value) elif self.ctx.accept(SpecTokens.KEY_VALUE_PAIR): name, value = self.ctx.current_token.value.split("=", maxsplit=1) @@ -570,7 +558,6 @@ def warn_if_after_compiler(token: str): add_flag( name, strip_quotes_and_unescape(value), propagate=False, concrete=concrete ) - warn_if_after_compiler(self.ctx.current_token.value) elif self.ctx.accept(SpecTokens.PROPAGATED_KEY_VALUE_PAIR): name, value = self.ctx.current_token.value.split("==", maxsplit=1) @@ -578,19 +565,17 @@ def warn_if_after_compiler(token: str): if concrete: name = name[:-1] add_flag(name, strip_quotes_and_unescape(value), propagate=True, concrete=concrete) - warn_if_after_compiler(self.ctx.current_token.value) elif self.ctx.expect(SpecTokens.DAG_HASH): if initial_spec.abstract_hash: break self.ctx.accept(SpecTokens.DAG_HASH) initial_spec.abstract_hash = self.ctx.current_token.value[1:] - warn_if_after_compiler(self.ctx.current_token.value) else: break - return initial_spec, parser_warnings + return initial_spec class FileParser: @@ -607,19 +592,21 @@ def parse(self, initial_spec: "spack.spec.Spec") -> "spack.spec.Spec": Args: initial_spec: object where to parse the spec - Return + Return: The initial_spec passed as argument, once constructed """ file = pathlib.Path(self.ctx.current_token.value) if not file.exists(): - raise spack.spec.NoSuchSpecFileError(f"No such spec file: '{file}'") + raise spack.error.NoSuchSpecFileError(f"No such spec file: '{file}'") + + from spack.spec import Spec with file.open("r", encoding="utf-8") as stream: if str(file).endswith(".json"): - spec_from_file = spack.spec.Spec.from_json(stream) + spec_from_file = Spec.from_json(stream) else: - spec_from_file = spack.spec.Spec.from_yaml(stream) + spec_from_file = Spec.from_yaml(stream) initial_spec._dup(spec_from_file) return initial_spec @@ -730,9 +717,10 @@ def strip_quotes_and_unescape(string: str) -> str: def quote_if_needed(value: str) -> str: """Add quotes around the value if it requires quotes. - This will add quotes around the value unless it matches ``NO_QUOTES_NEEDED``. + This will add quotes around the value unless it matches :data:`NO_QUOTES_NEEDED`. This adds: + * single quotes by default * double quotes around any value that contains single quotes diff --git a/lib/spack/spack/stage.py b/lib/spack/spack/stage.py index cfbff761f9aaba..94a6d9053be34f 100644 --- a/lib/spack/spack/stage.py +++ b/lib/spack/spack/stage.py @@ -1,8 +1,8 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) +import abc import errno -import getpass import glob import hashlib import io @@ -11,7 +11,7 @@ import stat import sys import tempfile -from typing import Callable, Dict, Generator, Iterable, List, Optional, Set +from typing import TYPE_CHECKING, Callable, Dict, Generator, Iterable, List, Optional, Set, Union import spack.caches import spack.config @@ -19,15 +19,13 @@ import spack.llnl.string import spack.llnl.util.lang import spack.llnl.util.tty as tty -import spack.mirrors.layout -import spack.mirrors.utils +import spack.oci.image import spack.resource import spack.spec import spack.util.crypto import spack.util.lock import spack.util.parallel import spack.util.path as sup -import spack.util.pattern as pattern import spack.util.url as url_util from spack import fetch_strategy as fs # breaks a cycle from spack.llnl.util.filesystem import ( @@ -48,6 +46,12 @@ from spack.util.editor import editor, executable from spack.version import StandardVersion, VersionList +if TYPE_CHECKING: + import spack.mirrors.layout + import spack.mirrors.mirror + import spack.mirrors.utils + + # The well-known stage source subdirectory name. _source_path_subdir = "spack-src" @@ -57,8 +61,14 @@ def compute_stage_name(spec): """Determine stage name given a spec""" - default_stage_structure = stage_prefix + "{name}-{version}-{hash}" - stage_name_structure = spack.config.get("config:stage_name", default=default_stage_structure) + spec_stage_structure = stage_prefix + if spec.concrete: + spec_stage_structure += "{name}-{version}-{hash}" + else: + spec_stage_structure += "{name}-{version}" + # TODO (psakiev, scheibelp) Technically a user could still reintroduce a hash via + # config:stage_name. This is a fix for how to handle staging an abstact spec (see #51305) + stage_name_structure = spack.config.get("config:stage_name", default=spec_stage_structure) return spec.format_path(format_string=stage_name_structure) @@ -71,7 +81,7 @@ def create_stage_root(path: str) -> None: user_uid = getuid() # Obtain lists of ancestor and descendant paths of the $user node, if any. - group_paths, user_node, user_paths = partition_path(path, getpass.getuser()) + group_paths, user_node, user_paths = partition_path(path, sup.get_user()) for p in group_paths: if not os.path.exists(p): @@ -151,7 +161,7 @@ def _resolve_paths(candidates): $user and appending $user if it is not present in the path. """ temp_path = sup.canonicalize_path("$tempdir") - user = getpass.getuser() + user = sup.get_user() tmp_has_usr = user in temp_path.split(os.path.sep) paths = [] @@ -208,13 +218,20 @@ def _mirror_roots(): ] -class LockableStagingDir: - """A directory whose lifetime can be managed with a context +class AbstractStage(abc.ABC): + """Abstract base class for all stage types. + + A stage is a directory whose lifetime can be managed with a context manager (but persists if the user requests it). Instances can have a specified name and if they do, then for all instances that have the same name, only one can enter the context manager at a time. + + This class defines the interface that all stage types must implement. """ + #: Set to True to error out if patches fail + requires_patch_success = True + def __init__(self, name, path, keep, lock): # TODO: This uses a protected member of tempfile, but seemed the only # TODO: way to get a temporary name. It won't be the same as the @@ -300,11 +317,68 @@ def create(self): ensure_access(self.path) self.created = True + @abc.abstractmethod def destroy(self): - raise NotImplementedError(f"{self.__class__.__name__} is abstract") + """Remove the stage directory and its contents.""" + ... + + @abc.abstractmethod + def fetch(self, mirror_only: bool = False, err_msg: Optional[str] = None) -> None: + """Fetch the source code or resources for this stage.""" + ... + + @abc.abstractmethod + def check(self): + """Check the integrity of the fetched resources.""" + ... + + @abc.abstractmethod + def expand_archive(self): + """Expand any downloaded archives.""" + ... + + @abc.abstractmethod + def restage(self): + """Remove the expanded source and re-expand it.""" + ... + + @abc.abstractmethod + def cache_local(self): + """Cache the resources locally.""" + ... + + @property + @abc.abstractmethod + def source_path(self) -> str: + """Return the path to the expanded source code.""" + ... + + @property + @abc.abstractmethod + def expanded(self) -> bool: + """Return True if the source has been expanded.""" + ... + + @property + @abc.abstractmethod + def archive_file(self) -> Optional[str]: + """Return the path to the archive file, or None.""" + ... + + def cache_mirror( + self, + mirror: "spack.caches.MirrorCache", + stats: "spack.mirrors.utils.MirrorStatsForOneSpec", + ) -> None: + """Cache the resources to a mirror (can be no-op).""" + pass + + def steal_source(self, dest: str) -> None: + """Copy source to another location (can be no-op).""" + pass -class Stage(LockableStagingDir): +class Stage(AbstractStage): """Manages a temporary stage directory for building. A Stage object is a context manager that handles a directory where @@ -418,9 +492,9 @@ def __init__( self.default_fetcher_only = False @property - def expected_archive_files(self): + def expected_archive_files(self) -> List[str]: """Possible archive file paths.""" - fnames = [] + fnames: List[str] = [] expanded = True if isinstance(self.default_fetcher, fs.URLFetchStrategy): expanded = self.default_fetcher.expand_archive @@ -447,7 +521,7 @@ def save_filename(self): return possible_filenames[0] @property - def archive_file(self): + def archive_file(self) -> Optional[str]: """Path to the source archive within this stage directory.""" for path in self.expected_archive_files: if os.path.exists(path): @@ -501,7 +575,7 @@ def _generate_fetchers(self, mirror_only=False) -> Generator["fs.FetchStrategy", extension=extension, ) for mirror in self.mirrors - if not mirror.fetch_url.startswith("oci://") # no support for mirrors yet + if not spack.oci.image.is_oci_url(mirror.fetch_url) # no support for mirrors yet ) if not self.default_fetcher_only and self.mirror_layout and self.default_fetcher.cachable: @@ -608,7 +682,9 @@ def cache_local(self): spack.caches.FETCH_CACHE.store(self.fetcher, self.mirror_layout.path) def cache_mirror( - self, mirror: "spack.caches.MirrorCache", stats: "spack.mirrors.utils.MirrorStats" + self, + mirror: "spack.caches.MirrorCache", + stats: "spack.mirrors.utils.MirrorStatsForOneSpec", ) -> None: """Perform a fetch if the resource is not already cached @@ -679,9 +755,25 @@ def __init__( fetch_strategy: "fs.FetchStrategy", root: Stage, resource: spack.resource.Resource, - **kwargs, + *, + name=None, + mirror_paths: Optional["spack.mirrors.layout.MirrorLayout"] = None, + mirrors: Optional[Iterable["spack.mirrors.mirror.Mirror"]] = None, + keep=False, + path=None, + lock=True, + search_fn=None, ): - super().__init__(fetch_strategy, **kwargs) + super().__init__( + fetch_strategy, + name=name, + mirror_paths=mirror_paths, + mirrors=mirrors, + keep=keep, + path=path, + lock=lock, + search_fn=search_fn, + ) self.root_stage = root self.resource = resource @@ -741,103 +833,165 @@ def _add_to_root_stage(self): install(src, destination_path) -class StageComposite(pattern.Composite): +class StageComposite: """Composite for Stage type objects. The first item in this composite is considered to be the root package, and operations that return a value are forwarded to it.""" - # - # __enter__ and __exit__ delegate to all stages in the composite. - # - def __init__(self): - super().__init__( - [ - "fetch", - "create", - "created", - "check", - "expand_archive", - "restage", - "destroy", - "cache_local", - "cache_mirror", - "steal_source", - "disable_mirrors", - ] - ) + self._stages: List[AbstractStage] = [] @classmethod - def from_iterable(cls, iterable: Iterable[Stage]) -> "StageComposite": + def from_iterable(cls, iterable: Iterable[AbstractStage]) -> "StageComposite": """Create a new composite from an iterable of stages.""" composite = cls() composite.extend(iterable) return composite + def append(self, stage: AbstractStage) -> None: + """Add a stage to the composite.""" + self._stages.append(stage) + + def extend(self, stages: Iterable[AbstractStage]) -> None: + """Add multiple stages to the composite.""" + self._stages.extend(stages) + + def __iter__(self): + """Iterate over stages.""" + return iter(self._stages) + + def __len__(self): + """Return the number of stages.""" + return len(self._stages) + + def __getitem__(self, index): + """Get a stage by index.""" + return self._stages[index] + + # Context manager methods - delegate to all stages def __enter__(self): - for item in self: - item.__enter__() + for stage in self._stages: + stage.__enter__() return self def __exit__(self, exc_type, exc_val, exc_tb): - for item in reversed(self): - item.__exit__(exc_type, exc_val, exc_tb) + for stage in reversed(self._stages): + stage.__exit__(exc_type, exc_val, exc_tb) + + # Methods that delegate to all stages + def fetch(self, mirror_only: bool = False, err_msg: Optional[str] = None) -> None: + """Fetch all stages.""" + for stage in self._stages: + stage.fetch(mirror_only, err_msg) + + def create(self) -> None: + """Create all stages.""" + for stage in self._stages: + stage.create() + + def check(self) -> None: + """Check all stages.""" + for stage in self._stages: + stage.check() + + def expand_archive(self) -> None: + """Expand archives for all stages.""" + for stage in self._stages: + stage.expand_archive() + + def restage(self) -> None: + """Restage all stages.""" + for stage in self._stages: + stage.restage() + + def destroy(self) -> None: + """Destroy all stages.""" + for stage in self._stages: + stage.destroy() + + def cache_local(self) -> None: + """Cache all stages locally.""" + for stage in self._stages: + stage.cache_local() - # - # Below functions act only on the *first* stage in the composite. - # + def cache_mirror( + self, + mirror: "spack.caches.MirrorCache", + stats: "spack.mirrors.utils.MirrorStatsForOneSpec", + ) -> None: + """Cache all stages to mirror.""" + for stage in self._stages: + stage.cache_mirror(mirror, stats) + + def steal_source(self, dest: str) -> None: + """Steal source from all stages.""" + for stage in self._stages: + stage.steal_source(dest) + + def disable_mirrors(self) -> None: + """Disable mirrors for all stages that support it.""" + for stage in self._stages: + if isinstance(stage, Stage): + stage.default_fetcher_only = True + + # Properties that act only on the *first* stage in the composite @property def source_path(self): - return self[0].source_path + return self._stages[0].source_path @property def expanded(self): - return self[0].expanded + return self._stages[0].expanded @property def path(self): - return self[0].path + return self._stages[0].path @property def archive_file(self): - return self[0].archive_file + return self._stages[0].archive_file @property def requires_patch_success(self): - return self[0].requires_patch_success + return self._stages[0].requires_patch_success @property def keep(self): - return self[0].keep + return self._stages[0].keep @keep.setter def keep(self, value): - for item in self: - item.keep = value + for stage in self._stages: + stage.keep = value -class DevelopStage(LockableStagingDir): +class DevelopStage(AbstractStage): requires_patch_success = False def __init__(self, name, dev_path, reference_link): super().__init__(name=name, path=None, keep=False, lock=True) self.dev_path = dev_path - self.source_path = dev_path + self._source_path = dev_path # The path of a link that will point to this stage if os.path.isabs(reference_link): link_path = reference_link else: - link_path = os.path.join(self.source_path, reference_link) + link_path = os.path.join(self._source_path, reference_link) if not os.path.isdir(os.path.dirname(link_path)): raise StageError(f"The directory containing {link_path} must exist") self.reference_link = link_path + @property + def source_path(self): + """Returns the development source path.""" + return self._source_path + @property def archive_file(self): return None - def fetch(self, *args, **kwargs): + def fetch(self, mirror_only: bool = False, err_msg: Optional[str] = None) -> None: tty.debug("No fetching needed for develop stage.") def check(self): @@ -1092,7 +1246,7 @@ def get_checksums_for_versions( url_by_version: Dict[StandardVersion, str], package_name: str, *, - first_stage_function: Optional[Callable[[Stage, str], None]] = None, + first_stage_function: Optional[Callable[[str, str], None]] = None, keep_stage: bool = False, concurrency: Optional[int] = None, fetch_options: Optional[Dict[str, str]] = None, @@ -1107,8 +1261,8 @@ def get_checksums_for_versions( Args: url_by_version: URL keyed by version package_name: name of the package - first_stage_function: function that takes a Stage and a URL; this is run on the stage - of the first URL downloaded + first_stage_function: function that takes an archive file and a URL; this is run on the + stage of the first URL downloaded keep_stage: whether to keep staging area when command completes batch: whether to ask user how many versions to fetch (false) or fetch all versions (true) fetch_options: options used for the fetcher (such as timeout or cookies) @@ -1120,7 +1274,8 @@ def get_checksums_for_versions( versions = sorted(url_by_version.keys(), reverse=True) search_arguments = [(url_by_version[v], v) for v in versions] - version_hashes, errors = {}, [] + version_hashes: Dict[StandardVersion, str] = {} + errors: List[str] = [] # Don't spawn 16 processes when we need to fetch 2 urls if concurrency is not None: @@ -1133,25 +1288,24 @@ def get_checksums_for_versions( # can move this function call *after* having distributed the work to executors. if first_stage_function is not None: (url, version), search_arguments = search_arguments[0], search_arguments[1:] - checksum, error = _fetch_and_checksum(url, fetch_options, keep_stage, first_stage_function) - if error is not None: - errors.append(error) - - if checksum is not None: - version_hashes[version] = checksum + result = _fetch_and_checksum(url, fetch_options, keep_stage, first_stage_function) + if isinstance(result, Exception): + errors.append(str(result)) + else: + version_hashes[version] = result with spack.util.parallel.make_concurrent_executor(concurrency, require_fork=False) as executor: - results = [] - for url, version in search_arguments: - future = executor.submit(_fetch_and_checksum, url, fetch_options, keep_stage) - results.append((version, future)) + results = [ + (version, executor.submit(_fetch_and_checksum, url, fetch_options, keep_stage)) + for url, version in search_arguments + ] for version, future in results: - checksum, error = future.result() - if error is not None: - errors.append(error) - continue - version_hashes[version] = checksum + result = future.result() + if isinstance(result, Exception): + errors.append(str(result)) + else: + version_hashes[version] = result for msg in errors: tty.debug(msg) @@ -1165,13 +1319,14 @@ def get_checksums_for_versions( return version_hashes -def _fetch_and_checksum(url, options, keep_stage, action_fn=None): +def _fetch_and_checksum( + url: str, + options: Optional[dict], + keep_stage: bool, + action_fn: Optional[Callable[[str, str], None]] = None, +) -> Union[str, Exception]: try: - url_or_fs = url - if options: - url_or_fs = fs.URLFetchStrategy(url=url, fetch_options=options) - - with Stage(url_or_fs, keep=keep_stage) as stage: + with Stage(fs.URLFetchStrategy(url=url, fetch_options=options), keep=keep_stage) as stage: # Fetch the archive stage.fetch() archive = stage.archive_file @@ -1183,23 +1338,21 @@ def _fetch_and_checksum(url, options, keep_stage, action_fn=None): # Checksum the archive and add it to the list checksum = spack.util.crypto.checksum(hashlib.sha256, archive) - return checksum, None - except fs.FailedDownloadError: - return None, f"[WORKER] Failed to fetch {url}" + return checksum except Exception as e: - return None, f"[WORKER] Something failed on {url}, skipping. ({e})" + return Exception(f"[WORKER] Failed to fetch {url}: {e}") class StageError(spack.error.SpackError): - """ "Superclass for all errors encountered during staging.""" + """Superclass for all errors encountered during staging.""" class StagePathError(StageError): - """ "Error encountered with stage path.""" + """Error encountered with stage path.""" class RestageError(StageError): - """ "Error encountered during restaging.""" + """Error encountered during restaging.""" class VersionFetchError(StageError): diff --git a/lib/spack/spack/store.py b/lib/spack/spack/store.py index 724131e99dff18..4068398586a790 100644 --- a/lib/spack/spack/store.py +++ b/lib/spack/spack/store.py @@ -6,9 +6,8 @@ An install tree, or "build store" consists of two parts: - 1. A package database that tracks what is installed. - 2. A directory layout that determines how the installations - are laid out. +1. A package database that tracks what is installed. +2. A directory layout that determines how the installations are laid out. The store contains all the install prefixes for packages installed by Spack. The simplest store could just contain prefixes named by DAG hash, @@ -21,7 +20,7 @@ import pathlib import re import uuid -from typing import Any, Callable, Dict, Generator, List, Optional, Union +from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union import spack.config import spack.database @@ -37,17 +36,16 @@ DEFAULT_INSTALL_TREE_ROOT = os.path.join(spack.paths.opt_path, "spack") -def parse_install_tree(config_dict): +def parse_install_tree(config_dict: dict) -> Tuple[str, str, Dict[str, str]]: """Parse config settings and return values relevant to the store object. Arguments: - config_dict (dict): dictionary of config values, as returned from - spack.config.get('config') + config_dict: dictionary of config values, as returned from ``spack.config.get("config")`` Returns: - (tuple): triple of the install tree root, the unpadded install tree - root (before padding was applied), and the projections for the - install tree + triple of the install tree root, the unpadded install tree + root (before padding was applied), and the projections for the + install tree Encapsulate backwards compatibility capabilities for install_tree and deprecated values that are now parsed as part of install_tree. @@ -68,7 +66,7 @@ def parse_install_tree(config_dict): install_tree = config_dict.get("install_tree", {}) - padded_length = False + padded_length: Union[bool, int] = False if isinstance(install_tree, str): tty.warn("Using deprecated format for configuring install_tree") unpadded_root = install_tree @@ -209,13 +207,13 @@ def create(configuration: spack.config.Configuration) -> Store: configuration: configuration to create a store. """ configuration = configuration or spack.config.CONFIG - config_dict = configuration.get("config") + config_dict = configuration.get_config("config") root, unpadded_root, projections = parse_install_tree(config_dict) - hash_length = configuration.get("config:install_hash_length") + hash_length = config_dict.get("install_hash_length") install_roots = [ install_properties["install_tree"] - for install_properties in configuration.get("upstreams", {}).values() + for install_properties in configuration.get_config("upstreams").values() ] upstreams = _construct_upstream_dbs_from_install_roots(install_roots) @@ -332,7 +330,7 @@ def specfile_matches(filename: str, **kwargs) -> List["spack.spec.Spec"]: Args: filename: YAML or JSON file from which to read the query. - **kwargs: keyword arguments forwarded to "find" + **kwargs: keyword arguments forwarded to :func:`find` """ query = [spack.spec.Spec.from_specfile(filename)] return find(query, **kwargs) @@ -351,7 +349,7 @@ def use_store( Args: path: path to the store. - extra_data: extra configuration under "config:install_tree" to be + extra_data: extra configuration under ``config:install_tree`` to be taken into account. Yields: diff --git a/lib/spack/spack/subprocess_context.py b/lib/spack/spack/subprocess_context.py index 460a6ae2586a38..c48b642d7763e9 100644 --- a/lib/spack/spack/subprocess_context.py +++ b/lib/spack/spack/subprocess_context.py @@ -4,7 +4,7 @@ """ This module handles transmission of Spack state to child processes started -using the 'spawn' start method. Notably, installations are performed in a +using the ``"spawn"`` start method. Notably, installations are performed in a subprocess and require transmitting the Package object (in such a way that the repository is available for importing when it is deserialized); installations performed in Spack unit tests may include additional @@ -14,36 +14,32 @@ import importlib import io import multiprocessing +import multiprocessing.context import pickle -import pydoc from types import ModuleType -from typing import Any +from typing import TYPE_CHECKING, Optional import spack.config -import spack.environment import spack.paths import spack.platforms import spack.repo import spack.store -patches = None +if TYPE_CHECKING: + import spack.package_base +#: Used in tests to track monkeypatches that need to be restored in child processes +MONKEYPATCHES: list = [] -def append_patch(patch): - global patches - if not patches: - patches = list() - patches.append(patch) - -def serialize(pkg) -> io.BytesIO: +def serialize(pkg: "spack.package_base.PackageBase") -> io.BytesIO: serialized_pkg = io.BytesIO() pickle.dump(pkg, serialized_pkg) serialized_pkg.seek(0) return serialized_pkg -def deserialize(serialized_pkg: io.BytesIO) -> Any: +def deserialize(serialized_pkg: io.BytesIO) -> "spack.package_base.PackageBase": pkg = pickle.load(serialized_pkg) pkg.spec._package = pkg # ensure overwritten package class attributes get applied @@ -65,96 +61,95 @@ def create(self): class PackageInstallContext: - """Captures the in-memory process state of a package installation that - needs to be transmitted to a child process. - """ - - def __init__(self, pkg, *, ctx=None): + """Captures the in-memory process state of a package installation that needs to be transmitted + to a child process.""" + + def __init__( + self, + pkg: "spack.package_base.PackageBase", + *, + ctx: Optional[multiprocessing.context.BaseContext] = None, + ): ctx = ctx or multiprocessing.get_context() - self.serialize = ctx.get_start_method() != "fork" - if self.serialize: - self.serialized_pkg = serialize(pkg) - self.global_state = GlobalStateMarshaler() - self.test_patches = store_patches() - self.serialized_env = serialize(spack.environment.active_environment()) - else: - self.pkg = pkg - self.global_state = None - self.test_patches = None - self.env = spack.environment.active_environment() + self.global_state = GlobalStateMarshaler(ctx=ctx) + self.pkg = pkg if ctx.get_start_method() == "fork" else serialize(pkg) self.spack_working_dir = spack.paths.spack_working_dir - def restore(self): + def restore(self) -> "spack.package_base.PackageBase": spack.paths.spack_working_dir = self.spack_working_dir - # Activating the environment modifies the global configuration, so globals have to - # be restored afterward, in case other modifications were applied on top (e.g. from - # command line) - if self.serialize: - self.global_state.restore() - self.test_patches.restore() - - env = pickle.load(self.serialized_env) if self.serialize else self.env - if env: - spack.environment.activate(env) - - # Order of operation is important, since the package might be retrieved - # from a repo defined within the environment configuration - return deserialize(self.serialized_pkg) if self.serialize else self.pkg + self.global_state.restore() + return deserialize(self.pkg) if isinstance(self.pkg, io.BytesIO) else self.pkg class GlobalStateMarshaler: - """Class to serialize and restore global state for child processes. + """Class to serialize and restore global state for child processes if needed. Spack may modify state that is normally read from disk or command line in memory; this object is responsible for properly serializing that state to be applied to a subprocess. """ - def __init__(self): + def __init__( + self, *, ctx: Optional[Optional[multiprocessing.context.BaseContext]] = None + ) -> None: + ctx = ctx or multiprocessing.get_context() + self.is_forked = ctx.get_start_method() == "fork" + if self.is_forked: + return + + from spack.environment import active_environment + self.config = spack.config.CONFIG.ensure_unwrapped() self.platform = spack.platforms.host self.store = spack.store.STORE + self.test_patches = TestPatches.create() + self.env = active_environment() def restore(self): + if self.is_forked: + return spack.config.CONFIG = self.config spack.repo.enable_repo(spack.repo.RepoPath.from_config(self.config)) spack.platforms.host = self.platform spack.store.STORE = self.store + self.test_patches.restore() + if self.env: + from spack.environment import activate + + activate(self.env) class TestPatches: def __init__(self, module_patches, class_patches): - self.module_patches = list((x, y, serialize(z)) for (x, y, z) in module_patches) - self.class_patches = list((x, y, serialize(z)) for (x, y, z) in class_patches) + self.module_patches = [(x, y, serialize(z)) for (x, y, z) in module_patches] + self.class_patches = [(x, y, serialize(z)) for (x, y, z) in class_patches] def restore(self): + if not self.module_patches and not self.class_patches: + return + # this code path is only followed in tests, so use inline imports + from pydoc import locate + for module_name, attr_name, value in self.module_patches: value = pickle.load(value) module = importlib.import_module(module_name) setattr(module, attr_name, value) for class_fqn, attr_name, value in self.class_patches: value = pickle.load(value) - cls = pydoc.locate(class_fqn) + cls = locate(class_fqn) setattr(cls, attr_name, value) - -def store_patches(): - module_patches = list() - class_patches = list() - if not patches: - return TestPatches(list(), list()) - for target, name, _ in patches: - if isinstance(target, ModuleType): - new_val = getattr(target, name) - module_name = target.__name__ - module_patches.append((module_name, name, new_val)) - elif isinstance(target, type): - new_val = getattr(target, name) - class_fqn = ".".join([target.__module__, target.__name__]) - class_patches.append((class_fqn, name, new_val)) - - return TestPatches(module_patches, class_patches) - - -def clear_patches(): - global patches - patches = None + @staticmethod + def create(): + module_patches = [] + class_patches = [] + for target, name in MONKEYPATCHES: + if isinstance(target, ModuleType): + new_val = getattr(target, name) + module_name = target.__name__ + module_patches.append((module_name, name, new_val)) + elif isinstance(target, type): + new_val = getattr(target, name) + class_fqn = ".".join([target.__module__, target.__name__]) + class_patches.append((class_fqn, name, new_val)) + + return TestPatches(module_patches, class_patches) diff --git a/lib/spack/spack/tag.py b/lib/spack/spack/tag.py index 8768ea39be03bc..0e40759337fdf3 100644 --- a/lib/spack/spack/tag.py +++ b/lib/spack/spack/tag.py @@ -2,64 +2,26 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) """Classes and functions to manage package tags""" -import collections -import copy -from collections.abc import Mapping +from typing import TYPE_CHECKING, Dict, List import spack.error -import spack.repo import spack.util.spack_json as sjson +if TYPE_CHECKING: + import spack.repo -def _get_installed_package_names(): - """Returns names of packages installed in the active environment.""" - import spack.environment - specs = spack.environment.installed_specs() - return [spec.name for spec in specs] +class TagIndex: + """Maps tags to list of package names.""" + def __init__(self) -> None: + self.tags: Dict[str, List[str]] = {} -def packages_with_tags(tags, installed, skip_empty): - """ - Returns a dict, indexed by tag, containing lists of names of packages - containing the tag or, if no tags, for all available tags. - - Arguments: - tags (list or None): list of tags of interest or None for all - installed (bool): True if want names of packages that are installed; - otherwise, False if want all packages with the tag - skip_empty (bool): True if exclude tags with no associated packages; - otherwise, False if want entries for all tags even when no such - tagged packages - """ - tag_pkgs = collections.defaultdict(lambda: list) - spec_names = _get_installed_package_names() if installed else [] - keys = spack.repo.PATH.tag_index if tags is None else tags - for tag in keys: - packages = [ - name for name in spack.repo.PATH.tag_index[tag] if not installed or name in spec_names - ] - if packages or not skip_empty: - tag_pkgs[tag] = packages - return tag_pkgs - - -class TagIndex(Mapping): - """Maps tags to list of packages.""" - - def __init__(self, repository): - self._tag_dict = collections.defaultdict(list) - self.repository = repository - - @property - def tags(self): - return self._tag_dict - - def to_json(self, stream): - sjson.dump({"tags": self._tag_dict}, stream) + def to_json(self, stream) -> None: + sjson.dump({"tags": self.tags}, stream) @staticmethod - def from_json(stream, repository): + def from_json(stream) -> "TagIndex": d = sjson.load(stream) if not isinstance(d, dict): @@ -68,65 +30,47 @@ def from_json(stream, repository): if "tags" not in d: raise TagIndexError("TagIndex data does not start with 'tags'") - r = TagIndex(repository=repository) - + r = TagIndex() for tag, packages in d["tags"].items(): - r[tag].extend(packages) - + r.tags[tag] = packages return r - def __getitem__(self, item): - return self._tag_dict[item] - - def __iter__(self): - return iter(self._tag_dict) - - def __len__(self): - return len(self._tag_dict) - - def copy(self): - """Return a deep copy of this index.""" - clone = TagIndex(repository=self.repository) - clone._tag_dict = copy.deepcopy(self._tag_dict) - return clone - - def get_packages(self, tag): + def get_packages(self, tag: str) -> List[str]: """Returns all packages associated with the tag.""" - return self.tags[tag] if tag in self.tags else [] + return self.tags.get(tag, []) - def merge(self, other): + def merge(self, other: "TagIndex") -> None: """Merge another tag index into this one. Args: - other (TagIndex): tag index to be merged + other: tag index to be merged """ - other = other.copy() # defensive copy. - - for tag in other.tags: + for tag, pkgs in other.tags.items(): if tag not in self.tags: - self.tags[tag] = other.tags[tag] - continue - - spkgs, opkgs = self.tags[tag], other.tags[tag] - self.tags[tag] = sorted(list(set(spkgs + opkgs))) + self.tags[tag] = pkgs.copy() + else: + self.tags[tag] = sorted({*self.tags[tag], *pkgs}) - def update_package(self, pkg_name): + def update_package(self, pkg_name: str, repo: "spack.repo.Repo") -> None: """Updates a package in the tag index. Args: - pkg_name (str): name of the package to be removed from the index + pkg_name: name of the package to be updated """ - pkg_cls = self.repository.get_pkg_class(pkg_name) + pkg_cls = repo.get_pkg_class(pkg_name) # Remove the package from the list of packages, if present - for pkg_list in self._tag_dict.values(): + for pkg_list in self.tags.values(): if pkg_name in pkg_list: pkg_list.remove(pkg_name) # Add it again under the appropriate tags for tag in getattr(pkg_cls, "tags", []): tag = tag.lower() - self._tag_dict[tag].append(pkg_cls.name) + if tag not in self.tags: + self.tags[tag] = [pkg_cls.name] + else: + self.tags[tag].append(pkg_cls.name) class TagIndexError(spack.error.SpackError): diff --git a/lib/spack/spack/tengine.py b/lib/spack/spack/tengine.py index e88124e28b104b..28780819fe4bba 100644 --- a/lib/spack/spack/tengine.py +++ b/lib/spack/spack/tengine.py @@ -3,18 +3,19 @@ # SPDX-License-Identifier: (Apache-2.0 OR MIT) import itertools import textwrap -from typing import List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import spack.config import spack.extensions import spack.llnl.util.lang from spack.util.path import canonicalize_path +if TYPE_CHECKING: + import spack.vendor.jinja2 + class ContextMeta(type): - """Meta class for Context. It helps reducing the boilerplate in - client code. - """ + """Metaclass for Context. It helps reduce the boilerplate in client code.""" #: Keeps track of the context properties that have been added #: by the class that is being defined @@ -54,27 +55,28 @@ def context_property(cls, func): class Context(metaclass=ContextMeta): - """Base class for context classes that are used with the template - engine. - """ + """Base class for context classes that are used with the template engine.""" - def to_dict(self): + context_properties: List[str] + + def to_dict(self) -> Dict[str, Any]: """Returns a dictionary containing all the context properties.""" - d = [(name, getattr(self, name)) for name in self.context_properties] - return dict(d) + return {name: getattr(self, name) for name in self.context_properties} -@spack.llnl.util.lang.memoized -def make_environment(dirs: Optional[Tuple[str, ...]] = None): +def make_environment(dirs: Optional[Tuple[str, ...]] = None) -> "spack.vendor.jinja2.Environment": """Returns a configured environment for template rendering.""" - # Import at this scope to avoid slowing Spack startup down - import spack.vendor.jinja2 - if dirs is None: # Default directories where to search for templates - builtins = spack.config.get("config:template_dirs", ["$spack/share/spack/templates"]) - extensions = spack.extensions.get_template_dirs() - dirs = tuple(canonicalize_path(d) for d in itertools.chain(builtins, extensions)) + dirs = default_template_dirs(spack.config.CONFIG) + + return make_environment_from_dirs(dirs) + + +@spack.llnl.util.lang.memoized +def make_environment_from_dirs(dirs: Tuple[str, ...]) -> "spack.vendor.jinja2.Environment": + # Import at this scope to avoid slowing Spack startup down + import spack.vendor.jinja2 # Loader for the templates loader = spack.vendor.jinja2.FileSystemLoader(dirs) @@ -85,7 +87,14 @@ def make_environment(dirs: Optional[Tuple[str, ...]] = None): return env -# Extra filters for template engine environment +def default_template_dirs(configuration: spack.config.Configuration) -> Tuple[str, ...]: + config_yaml = configuration.get_config("config") + builtins = config_yaml.get("template_dirs", ["$spack/share/spack/templates"]) + extensions = spack.extensions.get_template_dirs() + return tuple(canonicalize_path(d) for d in itertools.chain(builtins, extensions)) + + +# Extra filters for the template engine environment def prepend_to_line(text, token): diff --git a/lib/spack/spack/test/bindist.py b/lib/spack/spack/test/binary_distribution.py similarity index 75% rename from lib/spack/spack/test/bindist.py rename to lib/spack/spack/test/binary_distribution.py index c6dfb367b50eea..d677a53c35bf08 100644 --- a/lib/spack/spack/test/bindist.py +++ b/lib/spack/spack/test/binary_distribution.py @@ -8,10 +8,7 @@ import json import os import pathlib -import platform import re -import shutil -import sys import tarfile import urllib.error import urllib.request @@ -21,22 +18,18 @@ import pytest -import spack.binary_distribution as bindist -import spack.caches -import spack.compilers.config +import spack.binary_distribution import spack.concretize import spack.config -import spack.fetch_strategy +import spack.environment as ev import spack.hooks.sbang as sbang -import spack.llnl.util.filesystem as fs import spack.main import spack.mirrors.mirror import spack.oci.image -import spack.paths -import spack.repo import spack.spec import spack.stage import spack.store +import spack.url_buildcache import spack.util.gpg import spack.util.spack_yaml as syaml import spack.util.url as url_util @@ -44,8 +37,7 @@ from spack.binary_distribution import CannotListKeys, GenerateIndexError from spack.database import INDEX_JSON_FILE from spack.installer import PackageInstaller -from spack.llnl.util.filesystem import copy_tree, join_path, readlink -from spack.paths import test_path +from spack.llnl.util.filesystem import join_path, readlink, working_dir from spack.spec import Spec from spack.url_buildcache import ( INDEX_MANIFEST_FILE, @@ -54,6 +46,7 @@ URLBuildcacheEntry, URLBuildcacheEntryV2, compression_writer, + get_entries_from_cache, get_url_buildcache_class, get_valid_spec_file, ) @@ -65,110 +58,8 @@ uninstall_cmd = spack.main.SpackCommand("uninstall") buildcache_cmd = spack.main.SpackCommand("buildcache") -legacy_mirror_dir = os.path.join(test_path, "data", "mirrors", "legacy_yaml") - - -@pytest.fixture(scope="function") -def cache_directory(tmp_path: pathlib.Path): - fetch_cache_dir = tmp_path / "fetch_cache" - fetch_cache_dir.mkdir() - fsc = spack.fetch_strategy.FsCache(str(fetch_cache_dir)) - spack.caches.FETCH_CACHE, old_cache_path = fsc, spack.caches.FETCH_CACHE - - yield spack.caches.FETCH_CACHE - - shutil.rmtree(str(fetch_cache_dir)) - spack.caches.FETCH_CACHE = old_cache_path - - -@pytest.fixture(scope="module") -def config_directory(tmp_path_factory: pytest.TempPathFactory): - # Copy defaults to a temporary "site" scope - defaults_dir = tmp_path_factory.mktemp("test_configs") - config_path = pathlib.Path(spack.paths.etc_path) - copy_tree(str(config_path / "defaults"), str(defaults_dir / "site")) - - # Create a "user" scope - (defaults_dir / "user").mkdir() - - # Detect compilers - cfg_scopes = [ - spack.config.DirectoryConfigScope(name, str(defaults_dir / name)) - for name in [f"site/{platform.system().lower()}", "site", "user"] - ] - with spack.config.use_configuration(*cfg_scopes): - _ = spack.compilers.config.find_compilers(scope="site") - - yield defaults_dir - - shutil.rmtree(str(defaults_dir)) - - -@pytest.fixture(scope="function") -def default_config(tmp_path: pathlib.Path, config_directory, mock_packages_repo, install_mockery): - # This fixture depends on install_mockery to ensure - # there is a clear order of initialization. The substitution of the - # config scopes here is done on top of the substitution that comes with - # install_mockery - mutable_dir = tmp_path / "mutable_config" / "tmp" - mutable_dir.mkdir(parents=True) - copy_tree(str(config_directory), str(mutable_dir)) - - scopes = [ - spack.config.DirectoryConfigScope(name, str(mutable_dir / name)) - for name in [f"site/{platform.system().lower()}", "site", "user"] - ] - - with spack.config.use_configuration(*scopes): - njobs = spack.config.get("config:build_jobs") - if not njobs: - spack.config.set("config:build_jobs", 4, scope="user") - extensions = spack.config.get("config:template_dirs") - if not extensions: - spack.config.set( - "config:template_dirs", - [os.path.join(spack.paths.share_path, "templates")], - scope="user", - ) - - (mutable_dir / "build_stage").mkdir() - build_stage = spack.config.get("config:build_stage") - if not build_stage: - spack.config.set( - "config:build_stage", [str(mutable_dir / "build_stage")], scope="user" - ) - timeout = spack.config.get("config:connect_timeout") - if not timeout: - spack.config.set("config:connect_timeout", 10, scope="user") - with spack.repo.use_repositories(mock_packages_repo): - yield spack.config.CONFIG - - -@pytest.fixture(scope="function") -def install_dir_default_layout(tmp_path: pathlib.Path): - """Hooks a fake install directory with a default layout""" - opt_dir = tmp_path / "opt" - original_store, spack.store.STORE = spack.store.STORE, spack.store.Store(str(opt_dir)) - try: - yield spack.store - finally: - spack.store.STORE = original_store - - -@pytest.fixture(scope="function") -def install_dir_non_default_layout(tmp_path: pathlib.Path): - """Hooks a fake install directory with a non-default layout""" - opt_dir = tmp_path / "opt" - original_store, spack.store.STORE = spack.store.STORE, spack.store.Store( - str(opt_dir), projections={"all": "{name}-{version}-{hash:4}"} - ) - try: - yield spack.store - finally: - spack.store.STORE = original_store - -@pytest.fixture(scope="function") +@pytest.fixture def dummy_prefix(tmp_path: pathlib.Path): """Dummy prefix used for testing tarball creation, validation, extraction""" p = tmp_path / "prefix" @@ -199,56 +90,40 @@ def dummy_prefix(tmp_path: pathlib.Path): return str(p) -if sys.platform == "darwin": - required_executables = ["/usr/bin/clang++", "install_name_tool"] -else: - required_executables = ["/usr/bin/g++", "patchelf"] - - -@pytest.mark.requires_executables(*required_executables) @pytest.mark.maybeslow -@pytest.mark.usefixtures( - "default_config", - "cache_directory", - "install_dir_default_layout", - "temporary_mirror", - "mutable_mock_env_path", -) -def test_default_rpaths_create_install_default_layout(temporary_mirror_dir): +def test_buildcache_cmd_smoke_test(tmp_path: pathlib.Path, install_mockery): """ Test the creation and installation of buildcaches with default rpaths into the default directory layout scheme. """ - gspec = spack.concretize.concretize_one("garply") - cspec = spack.concretize.concretize_one("corge") - sy_spec = spack.concretize.concretize_one("symly") + mirror_cmd("add", "--type", "binary", "--unsigned", "test-mirror", str(tmp_path)) # Install 'corge' without using a cache - install_cmd("--no-cache", cspec.name) - install_cmd("--no-cache", sy_spec.name) + install_cmd("--fake", "--no-cache", "corge") + install_cmd("--fake", "--no-cache", "symly") # Create a buildache - buildcache_cmd("push", "-u", temporary_mirror_dir, cspec.name, sy_spec.name) + buildcache_cmd("push", "-u", str(tmp_path), "corge", "symly") # Test force overwrite create buildcache (-f option) - buildcache_cmd("push", "-uf", temporary_mirror_dir, cspec.name) + buildcache_cmd("push", "-uf", str(tmp_path), "corge") # Create mirror index - buildcache_cmd("update-index", temporary_mirror_dir) + buildcache_cmd("update-index", str(tmp_path)) # List the buildcaches in the mirror buildcache_cmd("list", "-alv") # Uninstall the package and deps - uninstall_cmd("-y", "--dependents", gspec.name) + uninstall_cmd("-y", "--dependents", "garply") # Test installing from build caches - buildcache_cmd("install", "-uo", cspec.name, sy_spec.name) + buildcache_cmd("install", "-uo", "corge", "symly") # This gives warning that spec is already installed - buildcache_cmd("install", "-uo", cspec.name) + buildcache_cmd("install", "-uo", "corge") # Test overwrite install - buildcache_cmd("install", "-ufo", cspec.name) + buildcache_cmd("install", "-ufo", "corge") buildcache_cmd("keys", "-f") buildcache_cmd("list") @@ -257,80 +132,6 @@ def test_default_rpaths_create_install_default_layout(temporary_mirror_dir): buildcache_cmd("list", "-l", "-v") -@pytest.mark.requires_executables(*required_executables) -@pytest.mark.maybeslow -@pytest.mark.nomockstage -@pytest.mark.usefixtures( - "default_config", "cache_directory", "install_dir_non_default_layout", "temporary_mirror" -) -def test_default_rpaths_install_nondefault_layout(temporary_mirror_dir): - """ - Test the creation and installation of buildcaches with default rpaths - into the non-default directory layout scheme. - """ - cspec = spack.concretize.concretize_one("corge") - # This guy tests for symlink relocation - sy_spec = spack.concretize.concretize_one("symly") - - # Install some packages with dependent packages - # test install in non-default install path scheme - buildcache_cmd("install", "-uo", cspec.name, sy_spec.name) - - # Test force install in non-default install path scheme - buildcache_cmd("install", "-ufo", cspec.name) - - -@pytest.mark.requires_executables(*required_executables) -@pytest.mark.maybeslow -@pytest.mark.nomockstage -@pytest.mark.usefixtures( - "default_config", - "cache_directory", - "install_dir_default_layout", - "temporary_mirror", - "mutable_mock_env_path", -) -def test_relative_rpaths_install_default_layout(temporary_mirror_dir): - """ - Test the creation and installation of buildcaches with relative - rpaths into the default directory layout scheme. - """ - gspec = spack.concretize.concretize_one("garply") - cspec = spack.concretize.concretize_one("corge") - - # Install buildcache created with relativized rpaths - buildcache_cmd("install", "-ufo", cspec.name) - - # This gives warning that spec is already installed - buildcache_cmd("install", "-ufo", cspec.name) - - # Uninstall the package and deps - uninstall_cmd("-y", "--dependents", gspec.name) - - # Install build cache - buildcache_cmd("install", "-ufo", cspec.name) - - # Test overwrite install - buildcache_cmd("install", "-ufo", cspec.name) - - -@pytest.mark.requires_executables(*required_executables) -@pytest.mark.maybeslow -@pytest.mark.nomockstage -@pytest.mark.usefixtures( - "default_config", "cache_directory", "install_dir_non_default_layout", "temporary_mirror" -) -def test_relative_rpaths_install_nondefault(temporary_mirror_dir): - """ - Test the installation of buildcaches with relativized rpaths - into the non-default directory layout scheme. - """ - cspec = spack.concretize.concretize_one("corge") - - # Test install in non-default install path scheme and relative path - buildcache_cmd("install", "-ufo", cspec.name) - - def test_push_and_fetch_keys(mock_gnupghome, tmp_path: pathlib.Path): testpath = str(mock_gnupghome) @@ -351,39 +152,41 @@ def test_push_and_fetch_keys(mock_gnupghome, tmp_path: pathlib.Path): assert len(keys) == 1 fpr = keys[0] - bindist._url_push_keys(mirror, keys=[fpr], tmpdir=str(tmp_path), update_index=True) + spack.binary_distribution._url_push_keys( + mirror, keys=[fpr], tmpdir=str(tmp_path), update_index=True + ) # dir 2: import the key from the mirror, and confirm that its fingerprint # matches the one created above with spack.util.gpg.gnupghome_override(gpg_dir2): assert len(spack.util.gpg.public_keys()) == 0 - bindist.get_keys(mirrors=mirrors, install=True, trust=True, force=True) + spack.binary_distribution.get_keys(mirrors=mirrors, install=True, trust=True, force=True) new_keys = spack.util.gpg.public_keys() assert len(new_keys) == 1 assert new_keys[0] == fpr -@pytest.mark.requires_executables(*required_executables) @pytest.mark.maybeslow -@pytest.mark.nomockstage -@pytest.mark.usefixtures( - "default_config", "cache_directory", "install_dir_non_default_layout", "temporary_mirror" -) -def test_built_spec_cache(temporary_mirror_dir): +def test_built_spec_cache(install_mockery, tmp_path: pathlib.Path): """Because the buildcache list command fetches the buildcache index and uses it to populate the binary_distribution built spec cache, when this test calls get_mirrors_for_spec, it is testing the popluation of that cache from a buildcache index.""" + + install_cmd("--fake", "--no-cache", "corge") + buildcache_cmd("push", "--unsigned", "--update-index", str(tmp_path), "corge") + mirror_cmd("add", "--type", "binary", "--unsigned", "test-mirror", str(tmp_path)) buildcache_cmd("list", "-a", "-l") gspec = spack.concretize.concretize_one("garply") cspec = spack.concretize.concretize_one("corge") for s in [gspec, cspec]: - results = bindist.get_mirrors_for_spec(s) - assert any([r.spec == s for r in results]) + results = spack.binary_distribution.get_mirrors_for_spec(s) + assert len(results) == 1 + assert results[0].url == url_util.path_to_file_url(str(tmp_path)) def fake_dag_hash(spec, length=None): @@ -410,14 +213,14 @@ def test_spec_needs_rebuild(monkeypatch, tmp_path: pathlib.Path): # Put installed package in the buildcache buildcache_cmd("push", "-u", str(mirror_dir), s.name) - rebuild = bindist.needs_rebuild(s, mirror_url) + rebuild = spack.binary_distribution.needs_rebuild(s, mirror_url) assert not rebuild # Now monkey patch Spec to change the hash on the package monkeypatch.setattr(spack.spec.Spec, "dag_hash", fake_dag_hash) - rebuild = bindist.needs_rebuild(s, mirror_url) + rebuild = spack.binary_distribution.needs_rebuild(s, mirror_url) assert rebuild @@ -448,7 +251,9 @@ def test_generate_index_missing(monkeypatch, tmp_path: pathlib.Path, mutable_con # Remove dependency from cache libelf_files = glob.glob( os.path.join( - str(mirror_dir / bindist.buildcache_relative_specs_path()), "libelf", "*libelf*" + str(mirror_dir / spack.binary_distribution.buildcache_relative_specs_path()), + "libelf", + "*libelf*", ) ) os.remove(*libelf_files) @@ -468,7 +273,9 @@ def test_use_bin_index(monkeypatch, tmp_path: pathlib.Path, mutable_config): """Check use of binary cache index: perform an operation that instantiates it, and a second operation that reconstructs it. """ - monkeypatch.setattr(bindist, "BINARY_INDEX", bindist.BinaryCacheIndex()) + monkeypatch.setattr( + spack.binary_distribution, "BINARY_INDEX", spack.binary_distribution.BinaryCacheIndex() + ) # Create a mirror, configure us to point at it, install a spec, and # put it in the mirror @@ -482,7 +289,72 @@ def test_use_bin_index(monkeypatch, tmp_path: pathlib.Path, mutable_config): # Now the test buildcache_cmd("list", "-al") - bindist.BINARY_INDEX = bindist.BinaryCacheIndex() + spack.binary_distribution.BINARY_INDEX = spack.binary_distribution.BinaryCacheIndex() + cache_list = buildcache_cmd("list", "-al") + assert "libdwarf" in cache_list + + +@pytest.mark.usefixtures("install_mockery", "mock_packages", "mock_fetch") +def test_use_bin_index_active_env_with_view( + monkeypatch, tmp_path: pathlib.Path, mutable_config, mutable_mock_env_path +): + """Check use of binary cache index: perform an operation that + instantiates it, and a second operation that reconstructs it. + """ + monkeypatch.setattr( + spack.binary_distribution, "BINARY_INDEX", spack.binary_distribution.BinaryCacheIndex() + ) + + # Create a mirror, configure us to point at it, install a spec, and + # put it in the mirror + mirror_dir = tmp_path / "mirror_dir" + mirror_url = url_util.path_to_file_url(str(mirror_dir)) + spack.config.set("mirrors", {"test": {"url": mirror_url, "view": "test"}}) + s = spack.concretize.concretize_one("libdwarf") + + # Create an environment and install specs for the view + ev.create("testenv") + with ev.read("testenv"): + install_cmd("--add", "--fake", "--no-cache", s.name) + buildcache_cmd("push", "-u", "test", s.name) + buildcache_cmd("update-index", "test") + + # Now the test + buildcache_cmd("list", "-al") + spack.binary_distribution.BINARY_INDEX = spack.binary_distribution.BinaryCacheIndex() + cache_list = buildcache_cmd("list", "-al") + assert "libdwarf" in cache_list + + +@pytest.mark.usefixtures("install_mockery", "mock_packages", "mock_fetch") +def test_use_bin_index_with_view( + monkeypatch, tmp_path: pathlib.Path, mutable_config, mutable_mock_env_path +): + """Check use of binary cache index: perform an operation that + instantiates it, and a second operation that reconstructs it. + """ + monkeypatch.setattr( + spack.binary_distribution, "BINARY_INDEX", spack.binary_distribution.BinaryCacheIndex() + ) + + # Create a mirror, configure us to point at it, install a spec, and + # put it in the mirror + mirror_dir = tmp_path / "mirror_dir" + mirror_url = url_util.path_to_file_url(str(mirror_dir)) + spack.config.set("mirrors", {"test": {"url": mirror_url, "view": "test"}}) + s = spack.concretize.concretize_one("libdwarf") + + # Create an environment and install specs for the view + ev.create("testenv") + with ev.read("testenv"): + install_cmd("--add", "--fake", "--no-cache", s.name) + buildcache_cmd("push", "-u", "test", s.name) + + buildcache_cmd("update-index", "test", "testenv") + + # Now the test + buildcache_cmd("list", "-al") + spack.binary_distribution.BINARY_INDEX = spack.binary_distribution.BinaryCacheIndex() cache_list = buildcache_cmd("list", "-al") assert "libdwarf" in cache_list @@ -500,10 +372,14 @@ def push_to_url(*args, **kwargs): monkeypatch.setattr(web_util, "push_to_url", push_to_url) with pytest.raises(CannotListKeys, match="Encountered problem listing keys"): - bindist.generate_key_index("s3://non-existent/fails-listing", str(tmp_path)) + spack.binary_distribution.generate_key_index( + "s3://non-existent/fails-listing", str(tmp_path) + ) with pytest.raises(GenerateIndexError, match="problem pushing .* Couldn't upload"): - bindist.generate_key_index("s3://non-existent/fails-uploading", str(tmp_path)) + spack.binary_distribution.generate_key_index( + "s3://non-existent/fails-uploading", str(tmp_path) + ) def test_generate_package_index_failure(monkeypatch, tmp_path: pathlib.Path, capfd): @@ -515,7 +391,7 @@ def mock_list_url(url, recursive=False): test_url = "file:///fake/keys/dir" with pytest.raises(GenerateIndexError, match="Unable to generate package index"): - bindist._url_generate_package_index(test_url, str(tmp_path)) + spack.binary_distribution._url_generate_package_index(test_url, str(tmp_path)) assert ( "Warning: Encountered problem listing packages at " @@ -532,10 +408,10 @@ def mock_list_url(url, recursive=False): url = "file:///fake/keys/dir" with pytest.raises(GenerateIndexError, match=f"Encountered problem listing keys at {url}"): - bindist.generate_key_index(url, str(tmp_path)) + spack.binary_distribution.generate_key_index(url, str(tmp_path)) with pytest.raises(GenerateIndexError, match="Unable to generate package index"): - bindist._url_generate_package_index(url, str(tmp_path)) + spack.binary_distribution._url_generate_package_index(url, str(tmp_path)) assert f"Encountered problem listing packages at {url}" in capfd.readouterr().err @@ -563,7 +439,9 @@ def test_update_sbang(tmp_path: pathlib.Path, temporary_mirror, mock_fetch, inst new_prefix, new_sbang_shebang = s.prefix, sbang.sbang_shebang_line() assert old_prefix != new_prefix assert old_sbang_shebang != new_sbang_shebang - PackageInstaller([s.package], cache_only=True, unsigned=True).install() + PackageInstaller( + [s.package], root_policy="cache_only", dependencies_policy="cache_only", unsigned=True + ).install() # Check that the sbang line refers to the new install tree new_contents = f"""\ @@ -578,11 +456,11 @@ def test_update_sbang(tmp_path: pathlib.Path, temporary_mirror, mock_fetch, inst def test_FetchCacheError_only_accepts_lists_of_errors(): with pytest.raises(TypeError, match="list"): - bindist.FetchCacheError("error") + spack.binary_distribution.FetchCacheError("error") def test_FetchCacheError_pretty_printing_multiple(): - e = bindist.FetchCacheError([RuntimeError("Oops!"), TypeError("Trouble!")]) + e = spack.binary_distribution.FetchCacheError([RuntimeError("Oops!"), TypeError("Trouble!")]) str_e = str(e) assert "Multiple errors" in str_e assert "Error 1: RuntimeError: Oops!" in str_e @@ -591,7 +469,7 @@ def test_FetchCacheError_pretty_printing_multiple(): def test_FetchCacheError_pretty_printing_single(): - e = bindist.FetchCacheError([RuntimeError("Oops!")]) + e = spack.binary_distribution.FetchCacheError([RuntimeError("Oops!")]) str_e = str(e) assert "Multiple errors" not in str_e assert "RuntimeError: Oops!" in str_e @@ -604,7 +482,7 @@ def test_text_relocate_if_needed( install_cmd("needs-text-relocation") spec = temporary_store.db.query_one("needs-text-relocation") tgz_path = tmp_path / "relocatable.tar.gz" - bindist.create_tarball(spec, str(tgz_path)) + spack.binary_distribution.create_tarball(spec, str(tgz_path)) # extract the .spack/binary_distribution file with tarfile.open(tgz_path) as tar: @@ -635,7 +513,7 @@ def test_compression_writer(tmp_path: pathlib.Path): with open(compressed_output_path, "rb") as f: binary_content = f.read() - assert bindist.compute_hash(binary_content) == compressed_checksum + assert spack.binary_distribution.compute_hash(binary_content) == compressed_checksum assert os.stat(compressed_output_path).st_size == compressed_size assert binary_content[:2] == b"\x1f\x8b" decompressed_content = gzip.decompress(binary_content).decode("utf-8") @@ -656,7 +534,7 @@ def test_compression_writer(tmp_path: pathlib.Path): with open(uncompressed_output_path, "r", encoding="utf-8") as f: content = f.read() - assert bindist.compute_hash(content) == uncompressed_checksum + assert spack.binary_distribution.compute_hash(content) == uncompressed_checksum assert os.stat(uncompressed_output_path).st_size == uncompressed_size assert content == text @@ -683,14 +561,14 @@ def response_304(request: urllib.request.Request): ) assert False, "Should not fetch {}".format(url) - fetcher = bindist.EtagIndexFetcherV2( + fetcher = spack.binary_distribution.EtagIndexFetcherV2( url="https://www.example.com", etag="112a8bbc1b3f7f185621c1ee335f0502", urlopen=response_304, ) result = fetcher.conditional_fetch() - assert isinstance(result, bindist.FetchIndexResult) + assert isinstance(result, spack.binary_distribution.FetchIndexResult) assert result.fresh @@ -708,18 +586,18 @@ def response_200(request: urllib.request.Request): ) assert False, "Should not fetch {}".format(url) - fetcher = bindist.EtagIndexFetcherV2( + fetcher = spack.binary_distribution.EtagIndexFetcherV2( url="https://www.example.com", etag="112a8bbc1b3f7f185621c1ee335f0502", urlopen=response_200, ) result = fetcher.conditional_fetch() - assert isinstance(result, bindist.FetchIndexResult) + assert isinstance(result, spack.binary_distribution.FetchIndexResult) assert not result.fresh assert result.etag == "59bcc3ad6775562f845953cf01624225" assert result.data == "Result" # decoded utf-8. - assert result.hash == bindist.compute_hash("Result") + assert result.hash == spack.binary_distribution.compute_hash("Result") def test_v2_etag_fetching_404(): @@ -733,19 +611,19 @@ def response_404(request: urllib.request.Request): fp=None, ) - fetcher = bindist.EtagIndexFetcherV2( + fetcher = spack.binary_distribution.EtagIndexFetcherV2( url="https://www.example.com", etag="112a8bbc1b3f7f185621c1ee335f0502", urlopen=response_404, ) - with pytest.raises(bindist.FetchIndexError): + with pytest.raises(spack.binary_distribution.FetchIndexError): fetcher.conditional_fetch() def test_v2_default_index_fetch_200(): index_json = '{"Hello": "World"}' - index_json_hash = bindist.compute_hash(index_json) + index_json_hash = spack.binary_distribution.compute_hash(index_json) def urlopen(request: urllib.request.Request): url = request.get_full_url() @@ -767,13 +645,13 @@ def urlopen(request: urllib.request.Request): assert False, "Unexpected request {}".format(url) - fetcher = bindist.DefaultIndexFetcherV2( + fetcher = spack.binary_distribution.DefaultIndexFetcherV2( url="https://www.example.com", local_hash="outdated", urlopen=urlopen ) result = fetcher.conditional_fetch() - assert isinstance(result, bindist.FetchIndexResult) + assert isinstance(result, spack.binary_distribution.FetchIndexResult) assert not result.fresh assert result.etag == "59bcc3ad6775562f845953cf01624225" assert result.data == index_json @@ -784,7 +662,7 @@ def test_v2_default_index_dont_fetch_index_json_hash_if_no_local_hash(): # When we don't have local hash, we should not be fetching the # remote index.json.hash file, but only index.json. index_json = '{"Hello": "World"}' - index_json_hash = bindist.compute_hash(index_json) + index_json_hash = spack.binary_distribution.compute_hash(index_json) def urlopen(request: urllib.request.Request): url = request.get_full_url() @@ -798,13 +676,13 @@ def urlopen(request: urllib.request.Request): assert False, "Unexpected request {}".format(url) - fetcher = bindist.DefaultIndexFetcherV2( + fetcher = spack.binary_distribution.DefaultIndexFetcherV2( url="https://www.example.com", local_hash=None, urlopen=urlopen ) result = fetcher.conditional_fetch() - assert isinstance(result, bindist.FetchIndexResult) + assert isinstance(result, spack.binary_distribution.FetchIndexResult) assert result.data == index_json assert result.hash == index_json_hash assert result.etag == "59bcc3ad6775562f845953cf01624225" @@ -813,7 +691,7 @@ def urlopen(request: urllib.request.Request): def test_v2_default_index_not_modified(): index_json = '{"Hello": "World"}' - index_json_hash = bindist.compute_hash(index_json) + index_json_hash = spack.binary_distribution.compute_hash(index_json) def urlopen(request: urllib.request.Request): url = request.get_full_url() @@ -828,7 +706,7 @@ def urlopen(request: urllib.request.Request): # No request to index.json should be made. assert False, "Unexpected request {}".format(url) - fetcher = bindist.DefaultIndexFetcherV2( + fetcher = spack.binary_distribution.DefaultIndexFetcherV2( url="https://www.example.com", local_hash=index_json_hash, urlopen=urlopen ) @@ -838,7 +716,7 @@ def urlopen(request: urllib.request.Request): @pytest.mark.parametrize("index_json", [b"\xa9", b"!#%^"]) def test_v2_default_index_invalid_hash_file(index_json): # Test invalid unicode / invalid hash type - index_json_hash = bindist.compute_hash(index_json) + index_json_hash = spack.binary_distribution.compute_hash(index_json) def urlopen(request: urllib.request.Request): return urllib.response.addinfourl( @@ -848,7 +726,7 @@ def urlopen(request: urllib.request.Request): code=200, ) - fetcher = bindist.DefaultIndexFetcherV2( + fetcher = spack.binary_distribution.DefaultIndexFetcherV2( url="https://www.example.com", local_hash=index_json_hash, urlopen=urlopen ) @@ -858,7 +736,7 @@ def urlopen(request: urllib.request.Request): def test_v2_default_index_json_404(): # Test invalid unicode / invalid hash type index_json = '{"Hello": "World"}' - index_json_hash = bindist.compute_hash(index_json) + index_json_hash = spack.binary_distribution.compute_hash(index_json) def urlopen(request: urllib.request.Request): url = request.get_full_url() @@ -881,11 +759,11 @@ def urlopen(request: urllib.request.Request): assert False, "Unexpected fetch {}".format(url) - fetcher = bindist.DefaultIndexFetcherV2( + fetcher = spack.binary_distribution.DefaultIndexFetcherV2( url="https://www.example.com", local_hash="invalid", urlopen=urlopen ) - with pytest.raises(bindist.FetchIndexError, match="Could not fetch index"): + with pytest.raises(spack.binary_distribution.FetchIndexError, match="Could not fetch index"): fetcher.conditional_fetch() @@ -907,7 +785,7 @@ def test_tarball_doesnt_include_buildinfo_twice(tmp_path: Path): # Now create a tarball, which should include a new binary_distribution file tarball = str(tmp_path / "prefix.tar.gz") - bindist._do_create_tarball( + spack.binary_distribution._do_create_tarball( tarfile_path=tarball, prefix=str(p), buildinfo={"metadata": "new"}, prefixes_to_relocate=[] ) @@ -945,13 +823,13 @@ def test_reproducible_tarball_is_reproducible(tmp_path: Path): # Create a tarball with a certain mtime of bin/app os.utime(app, times=(0, 0)) - bindist._do_create_tarball( + spack.binary_distribution._do_create_tarball( tarball_1, prefix=str(p), buildinfo=buildinfo, prefixes_to_relocate=[] ) # Do it another time with different mtime of bin/app os.utime(app, times=(10, 10)) - bindist._do_create_tarball( + spack.binary_distribution._do_create_tarball( tarball_2, prefix=str(p), buildinfo=buildinfo, prefixes_to_relocate=[] ) @@ -1000,7 +878,9 @@ def test_tarball_normalized_permissions(tmp_path: pathlib.Path): ) as f: f.write("hello world") - bindist._do_create_tarball(tarball, prefix=str(p), buildinfo={}, prefixes_to_relocate=[]) + spack.binary_distribution._do_create_tarball( + tarball, prefix=str(p), buildinfo={}, prefixes_to_relocate=[] + ) expected_prefix = str(p).lstrip("/") @@ -1030,19 +910,23 @@ def test_tarball_common_prefix(dummy_prefix, tmp_path: pathlib.Path): assert os.path.isabs(dummy_prefix) expected_prefix = PurePath(dummy_prefix).as_posix().lstrip("/") - with fs.working_dir(str(tmp_path)): + with working_dir(str(tmp_path)): # Create a tarball (using absolute path for prefix dir) with tarfile.open("example.tar", mode="w") as tar: tar.add(name=dummy_prefix) # Open, verify common prefix, and extract it. with tarfile.open("example.tar", mode="r") as tar: - common_prefix = bindist._ensure_common_prefix(tar) + common_prefix = spack.binary_distribution._ensure_common_prefix(tar) assert common_prefix == expected_prefix + # For consistent behavior across all supported Python versions + tar.extraction_filter = lambda member, path: member + # Extract into prefix2 tar.extractall( - path="prefix2", members=bindist._tar_strip_component(tar, common_prefix) + path="prefix2", + members=spack.binary_distribution._tar_strip_component(tar, common_prefix), ) # Verify files are all there at the correct level. @@ -1066,7 +950,7 @@ def test_tarball_common_prefix(dummy_prefix, tmp_path: pathlib.Path): def test_tarfile_missing_binary_distribution_file(tmp_path: pathlib.Path): """A tarfile that does not contain a .spack/binary_distribution file cannot be used to install.""" - with fs.working_dir(str(tmp_path)): + with working_dir(str(tmp_path)): # An empty .spack dir. with tarfile.open("empty.tar", mode="w") as tar: tarinfo = tarfile.TarInfo(name="example/.spack") @@ -1074,13 +958,13 @@ def test_tarfile_missing_binary_distribution_file(tmp_path: pathlib.Path): tar.addfile(tarinfo) with pytest.raises(ValueError, match="missing binary_distribution file"): - bindist._ensure_common_prefix(tarfile.open("empty.tar", mode="r")) + spack.binary_distribution._ensure_common_prefix(tarfile.open("empty.tar", mode="r")) def test_tarfile_without_common_directory_prefix_fails(tmp_path: pathlib.Path): """A tarfile that only contains files without a common package directory should fail to extract, as we won't know where to put the files.""" - with fs.working_dir(str(tmp_path)): + with working_dir(str(tmp_path)): # Create a broken tarball with just a file, no directories. with tarfile.open("empty.tar", mode="w") as tar: tar.addfile( @@ -1089,12 +973,12 @@ def test_tarfile_without_common_directory_prefix_fails(tmp_path: pathlib.Path): ) with pytest.raises(ValueError, match="Tarball does not contain a common prefix"): - bindist._ensure_common_prefix(tarfile.open("empty.tar", mode="r")) + spack.binary_distribution._ensure_common_prefix(tarfile.open("empty.tar", mode="r")) def test_tarfile_with_files_outside_common_prefix(tmp_path: pathlib.Path, dummy_prefix): """If a file is outside of the common prefix, we should fail.""" - with fs.working_dir(str(tmp_path)): + with working_dir(str(tmp_path)): with tarfile.open("broken.tar", mode="w") as tar: tar.add(name=dummy_prefix) tar.addfile(tarfile.TarInfo(name="/etc/config_file"), fileobj=io.BytesIO(b"hello")) @@ -1102,7 +986,7 @@ def test_tarfile_with_files_outside_common_prefix(tmp_path: pathlib.Path, dummy_ with pytest.raises( ValueError, match="Tarball contains file /etc/config_file outside of prefix" ): - bindist._ensure_common_prefix(tarfile.open("broken.tar", mode="r")) + spack.binary_distribution._ensure_common_prefix(tarfile.open("broken.tar", mode="r")) def test_tarfile_of_spec_prefix(tmp_path: pathlib.Path): @@ -1127,7 +1011,7 @@ def test_tarfile_of_spec_prefix(tmp_path: pathlib.Path): file = tmp_path / "example.tar" with tarfile.open(str(file), mode="w") as tar: - bindist.tarfile_of_spec_prefix(tar, str(prefix), prefixes_to_relocate=[]) + spack.binary_distribution.tarfile_of_spec_prefix(tar, str(prefix), prefixes_to_relocate=[]) expected_prefix = str(prefix).lstrip("/") @@ -1180,19 +1064,19 @@ def test_get_valid_spec_file(tmp_path: pathlib.Path, layout, expect_success): assert expect_success assert spec_dict_disk == spec_dict assert layout_disk == effective_layout - except bindist.InvalidMetadataFile: + except spack.binary_distribution.InvalidMetadataFile: assert not expect_success def test_get_valid_spec_file_doesnt_exist(tmp_path: pathlib.Path): - with pytest.raises(bindist.InvalidMetadataFile, match="No such file"): + with pytest.raises(spack.binary_distribution.InvalidMetadataFile, match="No such file"): get_valid_spec_file(str(tmp_path / "no-such-file"), max_supported_layout=1) @pytest.mark.parametrize("filename", ["spec.json", "spec.json.sig"]) def test_get_valid_spec_file_no_json(tmp_path: pathlib.Path, filename): tmp_path.joinpath(filename).write_text("not json") - with pytest.raises(bindist.InvalidMetadataFile): + with pytest.raises(spack.binary_distribution.InvalidMetadataFile): get_valid_spec_file(str(tmp_path / filename), max_supported_layout=1) @@ -1212,7 +1096,9 @@ def test_url_buildcache_entry_v3(monkeypatch, tmp_path: pathlib.Path): # Push libdwarf to buildcache buildcache_cmd("push", "-u", str(mirror_dir), s.name) - cache_class = get_url_buildcache_class(bindist.CURRENT_BUILD_CACHE_LAYOUT_VERSION) + cache_class = get_url_buildcache_class( + spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION + ) build_cache = cache_class(mirror_url, s, allow_unsigned=True) manifest = build_cache.read_manifest() @@ -1261,7 +1147,9 @@ def test_relative_path_components(): ) def test_default_tag(spec: str): """Make sure that computed image tags are valid.""" - assert re.fullmatch(spack.oci.image.tag, bindist._oci_default_tag(spack.spec.Spec(spec))) + assert re.fullmatch( + spack.oci.image.tag, spack.binary_distribution._oci_default_tag(spack.spec.Spec(spec)) + ) class IndexInformation(NamedTuple): @@ -1278,11 +1166,11 @@ class IndexInformation(NamedTuple): def mock_index(tmp_path: pathlib.Path, monkeypatch) -> IndexInformation: mirror_root = tmp_path / "mymirror" index_json = '{"Hello": "World"}' - index_json_hash = bindist.compute_hash(index_json) + index_json_hash = spack.binary_distribution.compute_hash(index_json) fetched = False cache_class = get_url_buildcache_class( - layout_version=bindist.CURRENT_BUILD_CACHE_LAYOUT_VERSION + layout_version=spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION ) index_blob_path = os.path.join( @@ -1297,7 +1185,7 @@ def mock_index(tmp_path: pathlib.Path, monkeypatch) -> IndexInformation: with open(index_blob_path, "w", encoding="utf-8") as fd: fd.write(index_json) - index_blob_record = bindist.BlobRecord( + index_blob_record = spack.binary_distribution.BlobRecord( os.stat(index_blob_path).st_size, cache_class.BUILDCACHE_INDEX_MEDIATYPE, "none", @@ -1356,16 +1244,16 @@ def response_304(request: urllib.request.Request): ) assert False, "Unexpected request {}".format(url) - fetcher = bindist.EtagIndexFetcher( - bindist.MirrorURLAndVersion( - "https://www.example.com", bindist.CURRENT_BUILD_CACHE_LAYOUT_VERSION + fetcher = spack.binary_distribution.EtagIndexFetcher( + spack.binary_distribution.MirrorMetadata( + "https://www.example.com", spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION ), etag="112a8bbc1b3f7f185621c1ee335f0502", urlopen=response_304, ) result = fetcher.conditional_fetch() - assert isinstance(result, bindist.FetchIndexResult) + assert isinstance(result, spack.binary_distribution.FetchIndexResult) assert result.fresh @@ -1383,16 +1271,16 @@ def response_200(request: urllib.request.Request): ) assert False, "Unexpected request {}".format(url) - fetcher = bindist.EtagIndexFetcher( - bindist.MirrorURLAndVersion( - "https://www.example.com", bindist.CURRENT_BUILD_CACHE_LAYOUT_VERSION + fetcher = spack.binary_distribution.EtagIndexFetcher( + spack.binary_distribution.MirrorMetadata( + "https://www.example.com", spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION ), etag="112a8bbc1b3f7f185621c1ee335f0502", urlopen=response_200, ) result = fetcher.conditional_fetch() - assert isinstance(result, bindist.FetchIndexResult) + assert isinstance(result, spack.binary_distribution.FetchIndexResult) assert not result.fresh assert mock_index.fetched_blob() assert result.etag == mock_index.manifest_etag @@ -1411,15 +1299,15 @@ def response_404(request: urllib.request.Request): fp=None, ) - fetcher = bindist.EtagIndexFetcher( - bindist.MirrorURLAndVersion( - "https://www.example.com", bindist.CURRENT_BUILD_CACHE_LAYOUT_VERSION + fetcher = spack.binary_distribution.EtagIndexFetcher( + spack.binary_distribution.MirrorMetadata( + "https://www.example.com", spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION ), etag="112a8bbc1b3f7f185621c1ee335f0502", urlopen=response_404, ) - with pytest.raises(bindist.FetchIndexError): + with pytest.raises(spack.binary_distribution.FetchIndexError): fetcher.conditional_fetch() @@ -1437,9 +1325,9 @@ def urlopen(request: urllib.request.Request): assert False, "Unexpected request {}".format(url) - fetcher = bindist.DefaultIndexFetcher( - bindist.MirrorURLAndVersion( - "https://www.example.com", bindist.CURRENT_BUILD_CACHE_LAYOUT_VERSION + fetcher = spack.binary_distribution.DefaultIndexFetcher( + spack.binary_distribution.MirrorMetadata( + "https://www.example.com", spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION ), local_hash="outdated", urlopen=urlopen, @@ -1447,7 +1335,7 @@ def urlopen(request: urllib.request.Request): result = fetcher.conditional_fetch() - assert isinstance(result, bindist.FetchIndexResult) + assert isinstance(result, spack.binary_distribution.FetchIndexResult) assert not result.fresh assert mock_index.fetched_blob() assert result.etag == mock_index.manifest_etag @@ -1466,15 +1354,15 @@ def urlopen(request: urllib.request.Request): fp=None, ) - fetcher = bindist.DefaultIndexFetcher( - bindist.MirrorURLAndVersion( - "https://www.example.com", bindist.CURRENT_BUILD_CACHE_LAYOUT_VERSION + fetcher = spack.binary_distribution.DefaultIndexFetcher( + spack.binary_distribution.MirrorMetadata( + "https://www.example.com", spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION ), local_hash=None, urlopen=urlopen, ) - with pytest.raises(bindist.FetchIndexError): + with pytest.raises(spack.binary_distribution.FetchIndexError): fetcher.conditional_fetch() @@ -1493,9 +1381,9 @@ def urlopen(request: urllib.request.Request): # No other request should be made. assert False, "Unexpected request {}".format(url) - fetcher = bindist.DefaultIndexFetcher( - bindist.MirrorURLAndVersion( - "https://www.example.com", bindist.CURRENT_BUILD_CACHE_LAYOUT_VERSION + fetcher = spack.binary_distribution.DefaultIndexFetcher( + spack.binary_distribution.MirrorMetadata( + "https://www.example.com", spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION ), local_hash=mock_index.index_hash, urlopen=urlopen, @@ -1503,3 +1391,77 @@ def urlopen(request: urllib.request.Request): assert fetcher.conditional_fetch().fresh assert not mock_index.fetched_blob() + + +@pytest.mark.usefixtures("install_mockery", "mock_packages") +def test_get_entries_from_cache_nested_mirrors(monkeypatch, tmp_path: pathlib.Path): + """Make sure URLBuildcacheEntry behaves as expected""" + + # Create a temp mirror directory for buildcache usage + mirror_dir = tmp_path / "mirror_dir" + mirror_url = url_util.path_to_file_url(str(mirror_dir)) + + # Install and push libdwarf to the root mirror + s = spack.concretize.concretize_one("libdwarf") + install_cmd("--fake", s.name) + buildcache_cmd("push", "-u", str(mirror_dir), s.name) + + # Install and push libzlib to the nested mirror + s = spack.concretize.concretize_one("zlib") + install_cmd("--fake", s.name) + buildcache_cmd("push", "-u", str(mirror_dir / "nested"), s.name) + + spec_manifests, _ = get_entries_from_cache( + str(mirror_url), str(tmp_path / "stage"), BuildcacheComponent.SPEC + ) + + nested_mirror_url = url_util.path_to_file_url(str(mirror_dir / "nested")) + spec_manifests_nested, _ = get_entries_from_cache( + str(nested_mirror_url), str(tmp_path / "stage"), BuildcacheComponent.SPEC + ) + + # Expected specs in root mirror + # - gcc-runtime + # - compiler-wrapper + # - libelf + # - libdwarf + assert len(spec_manifests) == 4 + # Expected specs in nested mirror + # - zlib + assert len(spec_manifests_nested) == 1 + + +def test_mirror_metadata(): + mirror_metadata = spack.binary_distribution.MirrorMetadata("https://dummy.io/__v3", 3) + as_str = str(mirror_metadata) + from_str = spack.binary_distribution.MirrorMetadata.from_string(as_str) + + # Verify values + assert mirror_metadata.url == "https://dummy.io/__v3" + assert mirror_metadata.version == 3 + assert mirror_metadata.view is None + + # Verify round trip + assert mirror_metadata == from_str + assert as_str == str(from_str) + + with pytest.raises(spack.url_buildcache.MirrorMetadataError, match="Malformed string"): + spack.binary_distribution.MirrorMetadata.from_string("https://dummy.io/__v3@@4") + + +def test_mirror_metadata_with_view(): + mirror_metadata = spack.binary_distribution.MirrorMetadata( + "https://dummy.io/__v3__@aview", 3, "aview" + ) + as_str = str(mirror_metadata) + from_str = spack.binary_distribution.MirrorMetadata.from_string(as_str) + + # Verify round trip + assert mirror_metadata.url == "https://dummy.io/__v3__@aview" + assert mirror_metadata.version == 3 + assert mirror_metadata.view == "aview" + assert mirror_metadata == from_str + assert as_str == str(from_str) + + with pytest.raises(spack.url_buildcache.MirrorMetadataError, match="Malformed string"): + spack.binary_distribution.MirrorMetadata.from_string("https://dummy.io/__v3%asdf__@aview") diff --git a/lib/spack/spack/test/bootstrap.py b/lib/spack/spack/test/bootstrap.py index b02a9fb11a36ac..2a9744a397e0dd 100644 --- a/lib/spack/spack/test/bootstrap.py +++ b/lib/spack/spack/test/bootstrap.py @@ -7,6 +7,7 @@ import pytest import spack.bootstrap +import spack.bootstrap.clingo import spack.bootstrap.config import spack.bootstrap.core import spack.compilers.config @@ -136,6 +137,7 @@ def test_bootstrap_disables_modulefile_generation(mutable_config): def test_bootstrap_search_for_compilers_with_no_environment(no_packages_yaml, mock_packages): assert not spack.compilers.config.all_compilers(init_config=False) with spack.bootstrap.ensure_bootstrap_configuration(): + spack.bootstrap.clingo._add_compilers_if_missing() assert spack.compilers.config.all_compilers(init_config=False) assert not spack.compilers.config.all_compilers(init_config=False) @@ -147,6 +149,7 @@ def test_bootstrap_search_for_compilers_with_environment_active( ): assert not spack.compilers.config.all_compilers(init_config=False) with spack.bootstrap.ensure_bootstrap_configuration(): + spack.bootstrap.clingo._add_compilers_if_missing() assert spack.compilers.config.all_compilers(init_config=False) assert not spack.compilers.config.all_compilers(init_config=False) diff --git a/lib/spack/spack/test/build_environment.py b/lib/spack/spack/test/build_environment.py index 2f6c8c01aee895..1ccd2d4be3e718 100644 --- a/lib/spack/spack/test/build_environment.py +++ b/lib/spack/spack/test/build_environment.py @@ -761,11 +761,13 @@ def test_optimization_flags_are_using_node_target(default_mock_concretization, m """\ gcc: externals: - - spec: gcc@14.2.0 languages=c + - spec: gcc@14.2.0 languages:=c,c++,fortran prefix: /fake/path1 extra_attributes: compilers: c: /fake/path1 + cxx: /fake/path1 + fortran: /fake/path1 extra_rpaths: - /extra/rpaths1 - /extra/rpaths2 @@ -776,11 +778,13 @@ def test_optimization_flags_are_using_node_target(default_mock_concretization, m """\ gcc: externals: - - spec: gcc@14.2.0 languages=c + - spec: gcc@14.2.0 languages=c,c++,fortran prefix: /fake/path1 extra_attributes: compilers: c: /fake/path1 + cxx: /fake/path1 + fortran: /fake/path1 """, None, ), diff --git a/lib/spack/spack/test/buildrequest.py b/lib/spack/spack/test/buildrequest.py index bea3badd45c871..da27cdbc29d37a 100644 --- a/lib/spack/spack/test/buildrequest.py +++ b/lib/spack/spack/test/buildrequest.py @@ -56,29 +56,21 @@ def test_build_request_strings(install_mockery): @pytest.mark.parametrize( - "package_cache_only,dependencies_cache_only,package_deptypes,dependencies_deptypes", + "root_policy,dependencies_policy,package_deptypes,dependencies_deptypes", [ - (False, False, dt.BUILD | dt.LINK | dt.RUN, dt.BUILD | dt.LINK | dt.RUN), - (True, False, dt.LINK | dt.RUN, dt.BUILD | dt.LINK | dt.RUN), - (False, True, dt.BUILD | dt.LINK | dt.RUN, dt.LINK | dt.RUN), - (True, True, dt.LINK | dt.RUN, dt.LINK | dt.RUN), + ("auto", "auto", dt.BUILD | dt.LINK | dt.RUN, dt.BUILD | dt.LINK | dt.RUN), + ("cache_only", "auto", dt.LINK | dt.RUN, dt.BUILD | dt.LINK | dt.RUN), + ("auto", "cache_only", dt.BUILD | dt.LINK | dt.RUN, dt.LINK | dt.RUN), + ("cache_only", "cache_only", dt.LINK | dt.RUN, dt.LINK | dt.RUN), ], ) def test_build_request_deptypes( - install_mockery, - package_cache_only, - dependencies_cache_only, - package_deptypes, - dependencies_deptypes, + install_mockery, root_policy, dependencies_policy, package_deptypes, dependencies_deptypes ): s = spack.concretize.concretize_one("dependent-install") build_request = inst.BuildRequest( - s.package, - { - "package_cache_only": package_cache_only, - "dependencies_cache_only": dependencies_cache_only, - }, + s.package, {"root_policy": root_policy, "dependencies_policy": dependencies_policy} ) actual_package_deptypes = build_request.get_depflags(s.package) diff --git a/lib/spack/spack/test/ci.py b/lib/spack/spack/test/ci.py index be55433cdf9940..72916555c1a3c8 100644 --- a/lib/spack/spack/test/ci.py +++ b/lib/spack/spack/test/ci.py @@ -14,7 +14,7 @@ import spack.environment as ev import spack.error import spack.llnl.util.filesystem as fs -import spack.paths as spack_paths +import spack.paths import spack.repo as repo import spack.util.git from spack.spec import Spec @@ -32,7 +32,7 @@ def repro_dir(tmp_path: pathlib.Path): yield result -def test_get_added_versions_new_checksum(mock_git_package_changes): +def test_filter_added_checksums_new_checksum(mock_git_package_changes): repo, filename, commits = mock_git_package_changes checksum_versions = { @@ -43,14 +43,12 @@ def test_get_added_versions_new_checksum(mock_git_package_changes): } with fs.working_dir(repo.packages_path): - added_versions = ci.get_added_versions( - checksum_versions, filename, from_ref=commits[-1], to_ref=commits[-2] - ) - assert len(added_versions) == 1 - assert added_versions[0] == Version("2.1.5") + assert ci.filter_added_checksums( + checksum_versions.keys(), filename, from_ref=commits[-1], to_ref=commits[-2] + ) == ["3f6576971397b379d4205ae5451ff5a68edf6c103b2f03c4188ed7075fbb5f04"] -def test_get_added_versions_new_commit(mock_git_package_changes): +def test_filter_added_checksums_new_commit(mock_git_package_changes): repo, filename, commits = mock_git_package_changes checksum_versions = { @@ -62,11 +60,9 @@ def test_get_added_versions_new_commit(mock_git_package_changes): } with fs.working_dir(repo.packages_path): - added_versions = ci.get_added_versions( + assert ci.filter_added_checksums( checksum_versions, filename, from_ref=commits[-2], to_ref=commits[-3] - ) - assert len(added_versions) == 1 - assert added_versions[0] == Version("2.1.6") + ) == ["74253725f884e2424a0dd8ae3f69896d5377f325"] def test_pipeline_dag(config, repo_builder: RepoBuilder): @@ -194,7 +190,7 @@ def test_pipeline_dag(config, repo_builder: RepoBuilder): @pytest.mark.not_on_windows("Not supported on Windows (yet)") def test_import_signing_key(mock_gnupghome): - signing_key_dir = spack_paths.mock_gpg_keys_path + signing_key_dir = spack.paths.mock_gpg_keys_path signing_key_path = os.path.join(signing_key_dir, "package-signing-key") with open(signing_key_path, encoding="utf-8") as fd: signing_key = fd.read() @@ -209,7 +205,7 @@ def test_download_and_extract_artifacts(tmp_path: pathlib.Path, monkeypatch): url = "https://www.nosuchurlexists.itsfake/artifacts.zip" working_dir = tmp_path / "repro" test_artifacts_path = os.path.join( - spack_paths.test_path, "data", "ci", "gitlab", "artifacts.zip" + spack.paths.test_path, "data", "ci", "gitlab", "artifacts.zip" ) def _urlopen_OK(*args, **kwargs): @@ -220,7 +216,7 @@ def _urlopen_OK(*args, **kwargs): monkeypatch.setattr(ci, "urlopen", _urlopen_OK) - ci.download_and_extract_artifacts(url, working_dir) + ci.download_and_extract_artifacts(url, str(working_dir)) found_zip = fs.find(working_dir, "artifacts.zip") assert len(found_zip) == 0 @@ -234,7 +230,7 @@ def _urlopen_500(*args, **kwargs): monkeypatch.setattr(ci, "urlopen", _urlopen_500) with pytest.raises(spack.error.SpackError): - ci.download_and_extract_artifacts(url, working_dir) + ci.download_and_extract_artifacts(url, str(working_dir)) def test_ci_copy_stage_logs_to_artifacts_fail( diff --git a/lib/spack/spack/test/cmd/audit.py b/lib/spack/spack/test/cmd/audit.py index 4464899f67ed5d..2bea59f9f46928 100644 --- a/lib/spack/spack/test/cmd/audit.py +++ b/lib/spack/spack/test/cmd/audit.py @@ -3,7 +3,9 @@ # SPDX-License-Identifier: (Apache-2.0 OR MIT) import pytest +import spack.audit from spack.main import SpackCommand +from spack.test.conftest import MockHTTPResponse audit = SpackCommand("audit") @@ -33,7 +35,10 @@ def test_audit_configs(mutable_config, mock_packages): assert audit.returncode == 1 -def test_audit_packages_https(mutable_config, mock_packages): +def test_audit_packages_https(mutable_config, mock_packages, monkeypatch): + """Test audit packages-https with mocked network calls.""" + monkeypatch.setattr(spack.audit, "urlopen", lambda url: MockHTTPResponse(200, "OK")) + # Without providing --all should fail audit("packages-https", fail_on_error=False) # The mock configuration has duplicate definitions of some compilers diff --git a/lib/spack/spack/test/cmd/blame.py b/lib/spack/spack/test/cmd/blame.py index f96bb6e1fe89c7..f9b8d6345a2d5e 100644 --- a/lib/spack/spack/test/cmd/blame.py +++ b/lib/spack/spack/test/cmd/blame.py @@ -104,10 +104,9 @@ def test_blame_json(mock_packages): @pytest.mark.not_on_windows("git hangs") -def test_blame_by_git(mock_packages, capfd): +def test_blame_by_git(mock_packages): """Sanity check the blame command to make sure it works.""" - with capfd.disabled(): - out = blame("--git", "mpich") + out = blame("--git", "mpich") assert "class Mpich" in out assert ' homepage = "http://www.mpich.org"' in out @@ -194,7 +193,7 @@ def _git(*args, **kwargs): ensure_full_history(repo_path, filename) -def test_ensure_full_history_shallow_fails(mock_git_version_info, monkeypatch, capsys): +def test_ensure_full_history_shallow_fails(mock_git_version_info, monkeypatch, capfd): """Ensure a git that supports '--unshallow' but fails generates useful error.""" error_msg = "Mock git cannot fetch." @@ -214,11 +213,11 @@ def _git(*args, **kwargs): with pytest.raises(SystemExit): ensure_full_history(repo_path, filename) - out = capsys.readouterr() + out = capfd.readouterr() assert error_msg in out[1] -def test_ensure_full_history_shallow_old_git(mock_git_version_info, monkeypatch, capsys): +def test_ensure_full_history_shallow_old_git(mock_git_version_info, monkeypatch, capfd): """Ensure a git that doesn't support '--unshallow' fails.""" def _git(*args, **kwargs): @@ -234,5 +233,5 @@ def _git(*args, **kwargs): with pytest.raises(SystemExit): ensure_full_history(repo_path, filename) - out = capsys.readouterr() + out = capfd.readouterr() assert "Use a newer" in out[1] diff --git a/lib/spack/spack/test/cmd/bootstrap.py b/lib/spack/spack/test/cmd/bootstrap.py index aff82a257778a0..dbb77f2843d028 100644 --- a/lib/spack/spack/test/cmd/bootstrap.py +++ b/lib/spack/spack/test/cmd/bootstrap.py @@ -3,19 +3,17 @@ # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os import pathlib -import sys import pytest import spack.bootstrap import spack.bootstrap.core +import spack.cmd.mirror import spack.concretize import spack.config import spack.environment as ev import spack.main -import spack.mirrors.utils import spack.spec -from spack.llnl.path import convert_to_posix_path _bootstrap = spack.main.SpackCommand("bootstrap") @@ -34,15 +32,13 @@ def test_enable_and_disable(mutable_config, scope): @pytest.mark.parametrize("scope", [None, "site", "system", "user"]) -def test_root_get_and_set(mutable_config, scope): - scope_args, path = [], "/scratch/spack/bootstrap" +def test_root_get_and_set(mutable_config, tmp_path, scope): + scope_args, path = [], str(tmp_path) if scope: scope_args = ["--scope={0}".format(scope)] _bootstrap("root", path, *scope_args) - out = _bootstrap("root", *scope_args, output=str) - if sys.platform == "win32": - out = convert_to_posix_path(out) + out = _bootstrap("root", *scope_args) assert out.strip() == path @@ -101,15 +97,13 @@ def test_reset_in_file_scopes_overwrites_backup_files(mutable_config): assert os.path.exists(backup_file) -def test_list_sources(config, capsys): +def test_list_sources(config): # Get the merged list and ensure we get our defaults - with capsys.disabled(): - output = _bootstrap("list") + output = _bootstrap("list") assert "github-actions" in output # Ask for a specific scope and check that the list of sources is empty - with capsys.disabled(): - output = _bootstrap("list", "--scope", "user") + output = _bootstrap("list", "--scope", "user") assert "No method available" in output @@ -172,7 +166,7 @@ def test_remove_and_add_a_source(mutable_config): assert not sources # Add it back and check we restored the initial state - _bootstrap("add", "github-actions", "$spack/share/spack/bootstrap/github-actions-v0.6") + _bootstrap("add", "github-actions", "$spack/share/spack/bootstrap/github-actions-v2") sources = spack.bootstrap.core.bootstrapping_sources() assert len(sources) == 1 @@ -184,8 +178,8 @@ def test_bootstrap_mirror_metadata(mutable_config, linux_os, monkeypatch, tmp_pa `spack bootstrap add`. Here we don't download data, since that would be an expensive operation for a unit test. """ - old_create = spack.mirrors.utils.create - monkeypatch.setattr(spack.mirrors.utils, "create", lambda p, s: old_create(p, [])) + old_create = spack.cmd.mirror.create + monkeypatch.setattr(spack.cmd.mirror, "create", lambda p, s: old_create(p, [])) monkeypatch.setattr(spack.concretize, "concretize_one", lambda p: spack.spec.Spec(p)) # Create the mirror in a temporary folder diff --git a/lib/spack/spack/test/cmd/buildcache.py b/lib/spack/spack/test/cmd/buildcache.py index 6c82a0a4b26f77..4090d0e4037750 100644 --- a/lib/spack/spack/test/cmd/buildcache.py +++ b/lib/spack/spack/test/cmd/buildcache.py @@ -7,12 +7,15 @@ import os import pathlib import shutil +import urllib.parse +from datetime import datetime, timedelta from typing import Dict, List import pytest import spack.binary_distribution import spack.buildcache_migrate as migrate +import spack.buildcache_prune import spack.cmd.buildcache import spack.concretize import spack.environment as ev @@ -23,7 +26,8 @@ import spack.util.url as url_util import spack.util.web as web_util from spack.installer import PackageInstaller -from spack.llnl.util.filesystem import copy_tree, find +from spack.llnl.util.filesystem import copy_tree, find, getuid +from spack.llnl.util.lang import nullcontext from spack.paths import test_path from spack.url_buildcache import ( BuildcacheComponent, @@ -65,34 +69,29 @@ def mock_get_specs_multiarch(database, monkeypatch): @pytest.mark.db @pytest.mark.regression("13757") -def test_buildcache_list_duplicates(mock_get_specs, capsys): - with capsys.disabled(): - output = buildcache("list", "mpileaks", "@2.3") +def test_buildcache_list_duplicates(mock_get_specs): + output = buildcache("list", "mpileaks", "@2.3") assert output.count("mpileaks") == 3 @pytest.mark.db @pytest.mark.regression("17827") -def test_buildcache_list_allarch(database, mock_get_specs_multiarch, capsys): - with capsys.disabled(): - output = buildcache("list", "--allarch") - +def test_buildcache_list_allarch(database, mock_get_specs_multiarch): + output = buildcache("list", "--allarch") assert output.count("mpileaks") == 3 - with capsys.disabled(): - output = buildcache("list") - + output = buildcache("list") assert output.count("mpileaks") == 2 def tests_buildcache_create_env( - install_mockery, mock_fetch, monkeypatch, tmp_path: pathlib.Path, mutable_mock_env_path + install_mockery, mock_fetch, tmp_path: pathlib.Path, mutable_mock_env_path ): """ "Ensure that buildcache create creates output files from env""" pkg = "trivial-install-test-package" - env("create", "test") + env("create", "--without-view", "test") with ev.read("test"): add(pkg) install() @@ -118,8 +117,9 @@ def test_buildcache_create_fails_on_noargs(tmp_path: pathlib.Path): buildcache("push", "--unsigned", str(tmp_path)) +@pytest.mark.skipif(getuid() == 0, reason="user is root") def test_buildcache_create_fail_on_perm_denied( - install_mockery, mock_fetch, monkeypatch, tmp_path: pathlib.Path + install_mockery, mock_fetch, tmp_path: pathlib.Path ): """Ensure that buildcache create fails on permission denied error.""" install("trivial-install-test-package") @@ -188,7 +188,6 @@ def test_buildcache_autopush(tmp_path: pathlib.Path, install_mockery, mock_fetch # Install and generate build cache index PackageInstaller([s.package], fake=True, explicit=True).install() - assert s.name is not None manifest_file = URLBuildcacheEntry.get_manifest_filename(s) specs_dirs = os.path.join( *URLBuildcacheEntry.get_relative_path_components(BuildcacheComponent.SPEC), s.name @@ -243,7 +242,7 @@ def verify_mirror_contents(): install("--fake", s.name) buildcache("push", "-u", "-f", src_mirror_url, s.name) - env("create", "test") + env("create", "--without-view", "test") with ev.read("test"): add(in_env_pkg) install() @@ -321,7 +320,6 @@ def test_buildcache_create_install( mock_packages, mock_fetch, mock_stage, - monkeypatch, tmp_path: pathlib.Path, ): """ "Ensure that buildcache create creates output files""" @@ -337,7 +335,6 @@ def test_buildcache_create_install( layout_version=spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION ) cache_entry = cache_class(mirror_url, spec, allow_unsigned=True) - assert spec.name is not None manifest_path = os.path.join( str(tmp_path), *cache_class.get_relative_path_components(BuildcacheComponent.SPEC), @@ -471,7 +468,11 @@ def test_push_and_install_with_mirror_marked_unsigned_does_not_require_extra_fla spec.package.do_uninstall(force=True) PackageInstaller( - [spec.package], explicit=True, cache_only=True, unsigned=True if signed else None + [spec.package], + explicit=True, + root_policy="cache_only", + dependencies_policy="cache_only", + unsigned=True if signed else None, ).install() @@ -546,18 +547,16 @@ def _layout(signedness: str = "signed"): return _layout -def test_check_mirror_for_layout(v2_buildcache_layout, mutable_config, capsys): +def test_check_mirror_for_layout(v2_buildcache_layout, mutable_config, capfd): """Check printed warning in the presence of v2 layout binary mirrors""" test_mirror_path = v2_buildcache_layout("unsigned") check_mirror_for_layout(spack.mirrors.mirror.Mirror.from_local_path(str(test_mirror_path))) - err = str(capsys.readouterr()[1]) + err = str(capfd.readouterr()[1]) assert all([word in err for word in ["Warning", "missing", "layout"]]) -def test_url_buildcache_entry_v2_exists( - capsys, v2_buildcache_layout, mock_packages, mutable_config -): +def test_url_buildcache_entry_v2_exists(v2_buildcache_layout, mock_packages, mutable_config): """Test existence check for v2 buildcache entries""" test_mirror_path = v2_buildcache_layout("unsigned") mirror_url = pathlib.Path(test_mirror_path).as_uri() @@ -598,14 +597,12 @@ def test_url_buildcache_entry_v2_exists( @pytest.mark.parametrize("signing", ["unsigned", "signed"]) def test_install_v2_layout( signing, - capsys, v2_buildcache_layout, mock_packages, mutable_config, mutable_mock_env_path, install_mockery, mock_gnupghome, - monkeypatch, ): """Ensure we can still install from signed and unsigned v2 buildcache""" test_mirror_path = v2_buildcache_layout(signing) @@ -614,8 +611,7 @@ def test_install_v2_layout( # Trust original signing key (no-op if this is the unsigned pass) buildcache("keys", "--install", "--trust") - with capsys.disabled(): - output = install("--fake", "--no-check-signature", "libdwarf") + output = install("--fake", "--no-check-signature", "libdwarf") assert "Extracting libelf" in output assert "libelf: Successfully installed" in output @@ -626,7 +622,7 @@ def test_install_v2_layout( assert "deprecated" in output -def test_basic_migrate_unsigned(capsys, v2_buildcache_layout, mutable_config): +def test_basic_migrate_unsigned(v2_buildcache_layout, mutable_config): """Make sure first unsigned migration results in usable buildcache, leaving the previous layout in place. Also test that a subsequent one doesn't need to migrate anything, and that using --delete-existing @@ -635,8 +631,7 @@ def test_basic_migrate_unsigned(capsys, v2_buildcache_layout, mutable_config): test_mirror_path = v2_buildcache_layout("unsigned") mirror("add", "my-mirror", str(test_mirror_path)) - with capsys.disabled(): - output = buildcache("migrate", "--unsigned", "my-mirror") + output = buildcache("migrate", "--unsigned", "my-mirror") # The output indicates both specs were migrated assert output.count("Successfully migrated") == 6 @@ -649,15 +644,11 @@ def test_basic_migrate_unsigned(capsys, v2_buildcache_layout, mutable_config): assert os.path.isdir(build_cache_path) # Now list the specs available under the new layout - with capsys.disabled(): - output = buildcache("list", "--allarch") + output = buildcache("list", "--allarch") assert "libdwarf" in output and "libelf" in output - with capsys.disabled(): - output = buildcache( - "migrate", "--unsigned", "--delete-existing", "--yes-to-all", "my-mirror" - ) + output = buildcache("migrate", "--unsigned", "--delete-existing", "--yes-to-all", "my-mirror") # A second migration of the same mirror indicates neither spec # needs to be migrated @@ -668,9 +659,7 @@ def test_basic_migrate_unsigned(capsys, v2_buildcache_layout, mutable_config): assert not os.path.exists(build_cache_path) -def test_basic_migrate_signed( - capsys, v2_buildcache_layout, monkeypatch, mock_gnupghome, mutable_config -): +def test_basic_migrate_signed(v2_buildcache_layout, mock_gnupghome, mutable_config): """Test a signed migration requires a signing key, requires the public key originally used to sign the pkgs, fails and prints reasonable messages if those requirements are unmet, and eventually succeeds when they are met.""" @@ -686,8 +675,7 @@ def test_basic_migrate_signed( # Create a signing key and trust the key used to sign the pkgs originally gpg("create", "New Test Signing Key", "noone@nowhere.org") - with capsys.disabled(): - output = buildcache("migrate", "my-mirror") + output = buildcache("migrate", "my-mirror") # Without trusting the original signing key, spack fails with an explanation assert "Failed to verify signature of libelf" in output @@ -696,38 +684,31 @@ def test_basic_migrate_signed( # Trust original signing key (since it's in the original layout location, # this is where the monkeypatched attribute is used) - with capsys.disabled(): - output = buildcache("keys", "--install", "--trust") + output = buildcache("keys", "--install", "--trust") - with capsys.disabled(): - output = buildcache("migrate", "my-mirror") + output = buildcache("migrate", "my-mirror") # Once we have the proper keys, migration should succeed assert "Successfully migrated libelf" in output assert "Successfully migrated libelf" in output # Now list the specs available under the new layout - with capsys.disabled(): - output = buildcache("list", "--allarch") + output = buildcache("list", "--allarch") assert "libdwarf" in output and "libelf" in output -def test_unsigned_migrate_of_signed_mirror(capsys, v2_buildcache_layout, mutable_config): +def test_unsigned_migrate_of_signed_mirror(v2_buildcache_layout, mutable_config): """Test spack can do an unsigned migration of a signed buildcache by ignoring signatures and skipping re-signing.""" test_mirror_path = v2_buildcache_layout("signed") mirror("add", "my-mirror", str(test_mirror_path)) - with capsys.disabled(): - output = buildcache( - "migrate", "--unsigned", "--delete-existing", "--yes-to-all", "my-mirror" - ) + output = buildcache("migrate", "--unsigned", "--delete-existing", "--yes-to-all", "my-mirror") # Now list the specs available under the new layout - with capsys.disabled(): - output = buildcache("list", "--allarch") + output = buildcache("list", "--allarch") assert "libdwarf" in output and "libelf" in output @@ -743,7 +724,7 @@ def test_unsigned_migrate_of_signed_mirror(capsys, v2_buildcache_layout, mutable assert json.load(fd) -def test_migrate_requires_index(capsys, v2_buildcache_layout, mutable_config): +def test_migrate_requires_index(v2_buildcache_layout, mutable_config): """Test spack fails with a reasonable error message when mirror does not have an index""" @@ -850,3 +831,490 @@ def test_buildcache_prune_orphaned_manifest(tmp_path, mutable_database, mock_gnu assert "Found 1 manifest(s) that are missing blobs" in output cache_entry.destroy() + + +@pytest.mark.parametrize("dry_run", [False, True]) +def test_buildcache_prune_direct_with_keeplist( + tmp_path: pathlib.Path, mutable_database, mock_gnupghome, dry_run +): + """Test direct pruning functionality with a keeplist file""" + mirror_directory = str(tmp_path) + mirror("add", "--unsigned", "my-mirror", mirror_directory) + + # Install and push multiple packages + specs = mutable_database.query_local("libelf", installed=True) + spec1 = specs[0] + + cache_entry = URLBuildcacheEntry( + mirror_url=f"file://{mirror_directory}", spec=spec1, allow_unsigned=True + ) + manifest_url = cache_entry.get_manifest_url(spec1, f"file://{mirror_directory}") + + # Push the first spec (package only, no dependencies) + buildcache("push", "--only", "package", "--update-index", "my-mirror", f"/{spec1.dag_hash()}") + + # Create a keeplist file that includes only spec1 + keeplist_file = tmp_path / "keeplist.txt" + keeplist_file.write_text(f"{spec1.dag_hash()}\n") + + # Run direct pruning + cmd_args = ["prune", "my-mirror", "--keeplist", str(keeplist_file)] + if dry_run: + cmd_args.append("--dry-run") + output = buildcache(*cmd_args) + + # Since all packages are in the keeplist, nothing should be pruned + assert web_util.url_exists(manifest_url) + assert "No specs to prune - all specs are in the keeplist" in output + + +@pytest.mark.parametrize("dry_run", [False, True]) +def test_buildcache_prune_direct_removes_unlisted( + tmp_path: pathlib.Path, mutable_database, mock_gnupghome, dry_run +): + """Test that direct pruning removes specs not in the keeplist""" + mirror_directory = str(tmp_path) + mirror("add", "--unsigned", "my-mirror", mirror_directory) + + # Install and push a package (package only, no dependencies) + specs = mutable_database.query_local("libelf", installed=True) + spec1 = specs[0] + buildcache("push", "--only", "package", "--update-index", "my-mirror", f"/{spec1.dag_hash()}") + + # Create a keeplist file that excludes the pushed package + keeplist_file = tmp_path / "keeplist.txt" + keeplist_file.write_text("0" * 32) + + cache_entry = URLBuildcacheEntry( + mirror_url=f"file://{mirror_directory}", spec=spec1, allow_unsigned=True + ) + manifest_url = cache_entry.get_manifest_url(spec1, f"file://{mirror_directory}") + + assert web_util.url_exists(manifest_url) + + # Run direct pruning + cmd_args = ["prune", "my-mirror", "--keeplist", str(keeplist_file)] + if dry_run: + cmd_args.append("--dry-run") + buildcache(*cmd_args) + + assert web_util.url_exists(manifest_url) == dry_run + + +def test_buildcache_prune_direct_empty_keeplist_fails( + tmp_path: pathlib.Path, mutable_database, mock_gnupghome +): + """Test that direct pruning fails with an empty keeplist file""" + mirror_directory = str(tmp_path) + mirror("add", "--unsigned", "my-mirror", mirror_directory) + + # Create empty keeplist file + keeplist_file = tmp_path / "empty_keeplist.txt" + keeplist_file.write_text("") + + # Should fail with empty keeplist + with pytest.raises(spack.buildcache_prune.BuildcachePruningException): + buildcache("prune", "my-mirror", "--keeplist", str(keeplist_file)) + + +@pytest.mark.parametrize("dry_run", [False, True]) +def test_buildcache_prune_with_invalid_keep_hash( + tmp_path: pathlib.Path, mutable_database, mock_gnupghome, dry_run: bool +): + mirror_directory = str(tmp_path) + mirror("add", "--unsigned", "my-mirror", mirror_directory) + + # Create a keeplist file that includes an invalid hash + keeplist_file = tmp_path / "keeplist.txt" + keeplist_file.write_text("this_is_an_invalid_hash") + + cmd_args = ["prune", "my-mirror", "--keeplist", str(keeplist_file)] + if dry_run: + cmd_args.append("--dry-run") + + with pytest.raises(spack.buildcache_prune.BuildcachePruningException): + buildcache(*cmd_args) + + +def test_buildcache_prune_new_specs_race_condition( + tmp_path: pathlib.Path, mutable_database, mock_gnupghome, monkeypatch: pytest.MonkeyPatch +): + """Test that specs uploaded after pruning begins are protected""" + mirror_directory = str(tmp_path) + mirror("add", "--unsigned", "my-mirror", mirror_directory) + + spec = mutable_database.query_local("libelf", installed=True)[0] + + buildcache("push", "--only", "package", "--update-index", "my-mirror", f"/{spec.dag_hash()}") + + cache_entry = URLBuildcacheEntry( + mirror_url=f"file://{mirror_directory}", spec=spec, allow_unsigned=True + ) + manifest_url = cache_entry.get_manifest_url(spec, f"file://{mirror_directory}") + + def mock_stat_url(url: str): + """ + Mock the stat_url function for testing. + + For the specific spec created in this test, fake its mtime so that it appears to + have been created after the pruning started. + """ + if url == manifest_url: + return 1, datetime.now().timestamp() + timedelta(minutes=10).total_seconds() + parsed_url = urllib.parse.urlparse(url) + stat_result = pathlib.Path(parsed_url.path).stat() + return stat_result.st_size, stat_result.st_mtime + + monkeypatch.setattr(web_util, "stat_url", mock_stat_url) + + keeplist_file = tmp_path / "keeplist.txt" + keeplist_file.write_text("0" * 32) + + # Run end-to-end buildcache prune - this should not delete `libelf`, despite it + # not being in the keeplist, because its mtime is after the pruning started + assert web_util.url_exists(manifest_url) + buildcache("prune", "my-mirror", "--keeplist", str(keeplist_file)) + assert web_util.url_exists(manifest_url) + + +def create_env_from_concrete_spec(spec: spack.spec.Spec): + """Build cache index view source is current active environment""" + # Create a unique environment for this spec only + env_name = f"specenv-{spec.dag_hash()}" + if not ev.exists(env_name): + env("create", "--without-view", env_name) + + e = ev.environment_from_name_or_dir(env_name) + with e: + add(f"{spec.name}/{spec.dag_hash()}") + # This should handle updating the environment to mark all packges as installed + install() + return e + + +def args_for_active_env(spec: spack.spec.Spec): + """Build cache index view source is an active environment""" + env = create_env_from_concrete_spec(spec) + return [env, []] + + +def args_for_env_by_path(spec: spack.spec.Spec): + """Build cache index view source is an environment path""" + env = create_env_from_concrete_spec(spec) + return [nullcontext(), [env.path]] + + +def args_for_env_by_name(spec: spack.spec.Spec): + """Build cache index view source is a managed environment name""" + env = create_env_from_concrete_spec(spec) + return [nullcontext(), [env.name]] + + +def read_specs_in_index(mirror_directory, view): + """Read specs hashes from a build cache index (ie. a database file) + + This assumes the layout of the database holds the spec hashes under: + database -> installs -> hashes... + """ + mirror_metadata = spack.binary_distribution.MirrorMetadata( + f"file://{mirror_directory}", spack.mirrors.mirror.SUPPORTED_LAYOUT_VERSIONS[0], view + ) + fetcher = spack.binary_distribution.DefaultIndexFetcher(mirror_metadata, None) + result = fetcher.conditional_fetch() + db_dict = json.loads(result.data) + return set([h for h in db_dict["database"]["installs"]]) + + +def test_buildcache_create_view_failure(tmp_path, mutable_config, mutable_mock_env_path): + mirror_directory = str(tmp_path) + mirror("add", "--unsigned", "my-mirror", mirror_directory) + + # No spec sources should raise an exception + with pytest.raises(spack.main.SpackCommandError): + command_args = ["update-index", "--name", "test_view", "my-mirror"] + buildcache(*command_args) + + # spec sources should raise an exception + expect = "no such environment 'DEADBEEF'" + with pytest.raises(spack.error.SpackError, match=expect): + command_args = ["update-index", "--name", "test_view", "my-mirror", "DEADBEEF"] + buildcache(*command_args) + + +def test_buildcache_create_view_empty( + tmp_path, mutable_config, mutable_database, mutable_mock_env_path +): + mirror_directory = str(tmp_path) + mirror("add", "--unsigned", "my-mirror", mirror_directory) + + # Push a spec to the cache + mpileaks_specs = mutable_database.query_local("mpileaks") + buildcache("push", "my-mirror", mpileaks_specs[0].format("{/hash}")) + + # Make sure the view doesn't exist yet + with pytest.raises(spack.binary_distribution.FetchIndexError): + hashes_in_view = read_specs_in_index(mirror_directory, "test_view") + + # Write a minimal lockfile (this is not validated/read by an enviornment) + empty_manifest = tmp_path / "emptylock" / "spack.yaml" + empty_manifest.parent.mkdir(exist_ok=False) + empty_manifest.write_text("spack: {}", encoding="utf-8") + empty_lockfile = tmp_path / "emptylock" / "spack.lock" + empty_lockfile.write_text( + '{"_meta": {"lockfile-version": 1}, "roots": [], "concrete_specs": {}}', encoding="utf-8" + ) + # Create a view with no specs + command_args = [ + "update-index", + "--force", + "--name", + "test_view", + "my-mirror", + str(empty_lockfile.parent), + ] + out = buildcache(*command_args) + assert "No specs found for view, creating an empty index" in out + hashes_in_view = read_specs_in_index(mirror_directory, "test_view") + # Assert there are no hashes in the view + assert not hashes_in_view + + +@pytest.mark.parametrize( + "source_args", (args_for_active_env, args_for_env_by_path, args_for_env_by_name) +) +def test_buildcache_create_view( + tmp_path, mutable_config, mutable_database, mutable_mock_env_path, source_args +): + mirror_directory = str(tmp_path) + mirror("add", "--unsigned", "my-mirror", mirror_directory) + + # Push all of the mpileaks packages to the cache + mpileaks_specs = mutable_database.query_local("mpileaks") + for s in mpileaks_specs: + buildcache("push", "my-mirror", s.format("{/hash}")) + + # Grab all of the hashes for each mpileaks spec + mpileaks_0_hashes = set([s.dag_hash() for s in mpileaks_specs[0].traverse()]) + + # Make sure the view doesn't exist yet + with pytest.raises(spack.binary_distribution.FetchIndexError): + hashes_in_view = read_specs_in_index(mirror_directory, "test_view") + + # Create a view using a parametrized source that contains one of the mpileaks + context, extra_args = source_args(mpileaks_specs[0]) + with context: + command_args = ["update-index", "--name", "test_view", "my-mirror"] + extra_args + buildcache(*command_args) + + hashes_in_view = read_specs_in_index(mirror_directory, "test_view") + # Assert all of the hashes for mpileaks_0_hashes exist in the view, and no other hashes + assert hashes_in_view == mpileaks_0_hashes + + # Test fail to overwrite without force + expect = "Index already exists. To overwrite or update pass --force or --append respectively" + with pytest.raises(spack.error.SpackError, match=expect): + command_args = [ + "update-index", + "--name", + "test_view", + "my-mirror", + mpileaks_specs[2].format("{/hash}"), + ] + buildcache(*command_args) + + hashes_in_view = read_specs_in_index(mirror_directory, "test_view") + # Assert all of the hashes for mpileaks_0_hashes exist in the view, and no other hashes + assert hashes_in_view == mpileaks_0_hashes + + +@pytest.mark.parametrize( + "source_args", (args_for_active_env, args_for_env_by_path, args_for_env_by_name) +) +def test_buildcache_create_view_append( + tmp_path, mutable_config, mutable_database, mutable_mock_env_path, source_args +): + mirror_directory = str(tmp_path) + mirror("add", "--unsigned", "my-mirror", mirror_directory) + + # Push all of the mpileaks packages to the cache + mpileaks_specs = mutable_database.query_local("mpileaks") + for s in mpileaks_specs: + buildcache("push", "my-mirror", s.format("{/hash}")) + + # Grab all of the hashes for each mpileaks spec + mpileaks_0_hashes = set([s.dag_hash() for s in mpileaks_specs[0].traverse()]) + mpileaks_1_hashes = set([s.dag_hash() for s in mpileaks_specs[1].traverse()]) + + # Make sure the view doesn't exist yet + with pytest.raises(spack.binary_distribution.FetchIndexError): + hashes_in_view = read_specs_in_index(mirror_directory, "test_view") + + # Test append to empty index view + context, extra_args = source_args(mpileaks_specs[0]) + with context: + command_args = [ + "update-index", + "-y", + "--append", + "--name", + "test_view", + "my-mirror", + ] + extra_args + buildcache(*command_args) + + hashes_in_view = read_specs_in_index(mirror_directory, "test_view") + # Assert all of the hashes for mpileaks_0_hashes exist in the view, and no other hashes + assert hashes_in_view == mpileaks_0_hashes + + # Test append to existing index view + context, extra_args = source_args(mpileaks_specs[1]) + with context: + command_args = [ + "update-index", + "-y", + "--append", + "--name", + "test_view", + "my-mirror", + ] + extra_args + buildcache(*command_args) + + hashes_in_view = read_specs_in_index(mirror_directory, "test_view") + # Assert all of the hashes for mpileaks_0_hashes and mpileaks_1_hashes exist in the view, + # and no other hashes + assert hashes_in_view == (mpileaks_0_hashes | mpileaks_1_hashes) + + +@pytest.mark.parametrize( + "source_args", (args_for_active_env, args_for_env_by_path, args_for_env_by_name) +) +def test_buildcache_create_view_overwrite( + tmp_path, mutable_config, mutable_database, mutable_mock_env_path, source_args +): + mirror_directory = str(tmp_path) + mirror("add", "--unsigned", "my-mirror", mirror_directory) + + # Push all of the mpileaks packages to the cache + mpileaks_specs = mutable_database.query_local("mpileaks") + for s in mpileaks_specs: + buildcache("push", "my-mirror", s.format("{/hash}")) + + # Grab all of the hashes for each mpileaks spec + mpileaks_0_hashes = set([s.dag_hash() for s in mpileaks_specs[0].traverse()]) + mpileaks_1_hashes = set([s.dag_hash() for s in mpileaks_specs[1].traverse()]) + + # Make sure the view doesn't exist yet + with pytest.raises(spack.binary_distribution.FetchIndexError): + hashes_in_view = read_specs_in_index(mirror_directory, "test_view") + + # Create a view using a parametrized source that contains one of the mpileaks + context, extra_args = source_args(mpileaks_specs[0]) + with context: + command_args = ["update-index", "--name", "test_view", "my-mirror"] + extra_args + buildcache(*command_args) + + hashes_in_view = read_specs_in_index(mirror_directory, "test_view") + # Assert all of the hashes for mpileaks_0_hashes exist in the view, and no other hashes + assert hashes_in_view == mpileaks_0_hashes + + # Override a view with force + context, extra_args = source_args(mpileaks_specs[1]) + with context: + command_args = ["update-index", "--force", "--name", "test_view", "my-mirror"] + extra_args + buildcache(*command_args) + + hashes_in_view = read_specs_in_index(mirror_directory, "test_view") + # Assert all of the hashes for mpileaks_1_hashes exist in the view, and no other hashes + assert hashes_in_view == mpileaks_1_hashes + + +def test_buildcache_create_view_non_active_env( + tmp_path, mutable_config, mutable_database, mutable_mock_env_path +): + mirror_directory = str(tmp_path) + mirror("add", "--unsigned", "my-mirror", mirror_directory) + + # Push all of the mpileaks packages to the cache + mpileaks_specs = mutable_database.query_local("mpileaks") + for s in mpileaks_specs: + buildcache("push", "my-mirror", s.format("{/hash}")) + + # Grab all of the hashes for each mpileaks spec + mpileaks_0_hashes = set([s.dag_hash() for s in mpileaks_specs[0].traverse()]) + + # Make sure the view doesn't exist yet + with pytest.raises(spack.binary_distribution.FetchIndexError): + hashes_in_view = read_specs_in_index(mirror_directory, "test_view") + + # Create a view from an environment name that is different from the current active environment + _, extra_args = args_for_env_by_name( + mpileaks_specs[0] + ) # Get args for env by name using mpileaks[0] + context, _ = args_for_active_env( + mpileaks_specs[1] + ) # Get the context for an active env using mpileaks[1] + with context: + command_args = ["update-index", "--name", "test_view", "my-mirror"] + extra_args + buildcache(*command_args) + + hashes_in_view = read_specs_in_index(mirror_directory, "test_view") + # Assert all of the hashes for mpileaks_0_hashes exist in the view, and no other hashes + assert hashes_in_view == mpileaks_0_hashes + + +@pytest.mark.parametrize("view", (None, "test_view")) +@pytest.mark.disable_clean_stage_check +def test_buildcache_check_index_full( + tmp_path, mutable_config, mutable_database, mutable_mock_env_path, view +): + view_args = ["--name", view] if view is not None else [] + mirror_directory = str(tmp_path) + mirror("add", "--unsigned", *view_args, "my-mirror", mirror_directory) + + # Push all of the mpileaks packages to the cache + mpileaks_specs = mutable_database.query_local("mpileaks") + for s in mpileaks_specs: + buildcache("push", "my-mirror", s.format("{/hash}")) + + # Update index using and active environment containing mpileaks[0] + context, extra_args = args_for_active_env(mpileaks_specs[0]) + with context: + buildcache("update-index", "my-mirror", *extra_args) + + out = buildcache("check-index", "--verify", "exists", "manifests", "blobs", "--", "my-mirror") + # Everything thing be good here + assert "Index exists in mirror: my-mirror" + assert "Missing specs: 0" + assert "Missing blobs: 0" + if view: + assert "Unindexed specs: n/a" in out + else: + assert "Unindexed specs: 0" in out + + # Remove the index blob + # This creates index not for both view and non-view indices + # For non-view indices this is also a missing blob + index_name = "index.manifest.json" + if view: + index_name = os.path.join(view, index_name) + blob_path = tmp_path / "blobs" / "sha256" + with open(tmp_path / "v3" / "manifests" / "index" / index_name, "r", encoding="utf-8") as fd: + manifest = json.load(fd) + print(manifest) + digest = manifest["data"][0]["checksum"] + blob_path = blob_path / digest[:2] / digest + + # Delete the index manifest + os.remove(blob_path) + + out = buildcache("check-index", "--verify", "exists", "manifests", "blobs", "--", "my-mirror") + # Everything thing be good here + assert "Index does not exist in mirror: my-mirror" + assert "Missing specs: 0" + if view: + assert "Unindexed specs: n/a" in out + assert "Missing blobs: 0" + else: + assert "The index blob is missing" in out + assert "Unindexed specs: 15" in out + assert "Missing blobs: 1" diff --git a/lib/spack/spack/test/cmd/checksum.py b/lib/spack/spack/test/cmd/checksum.py index b98a13edbf7cea..fa676aab11023b 100644 --- a/lib/spack/spack/test/cmd/checksum.py +++ b/lib/spack/spack/test/cmd/checksum.py @@ -309,14 +309,14 @@ def test_checksum_url(mock_packages, config): spack_checksum(f"{pkg_cls.url}") -def test_checksum_verification_fails(default_mock_concretization, capsys, can_fetch_versions): +def test_checksum_verification_fails(default_mock_concretization, capfd, can_fetch_versions): spec = spack.concretize.concretize_one("zlib") pkg = spec.package versions = list(pkg.versions.keys()) version_hashes = {versions[0]: "abadhash", Version("0.1"): "123456789"} with pytest.raises(SystemExit): spack.cmd.checksum.print_checksum_status(pkg, version_hashes) - out = str(capsys.readouterr()) + out = str(capfd.readouterr()) assert out.count("Correct") == 0 assert "No previous checksum" in out assert "Invalid checksum" in out diff --git a/lib/spack/spack/test/cmd/ci.py b/lib/spack/spack/test/cmd/ci.py index 7b67950d1c012e..5255aa838d72de 100644 --- a/lib/spack/spack/test/cmd/ci.py +++ b/lib/spack/spack/test/cmd/ci.py @@ -11,7 +11,6 @@ import spack.vendor.jsonschema -import spack import spack.binary_distribution import spack.ci as ci import spack.cmd @@ -20,11 +19,12 @@ import spack.environment as ev import spack.hash_types as ht import spack.main -import spack.paths as spack_paths +import spack.paths import spack.repo import spack.spec import spack.stage import spack.util.spack_yaml as syaml +import spack.util.web import spack.version from spack.ci import gitlab as gitlab_generator from spack.ci.common import PipelineDag, PipelineOptions, SpackCIConfig @@ -122,17 +122,11 @@ def ci_generate_test( def _func(spack_yaml_content, *args, fail_on_error=True): spack_yaml = tmp_path / "spack.yaml" spack_yaml.write_text(spack_yaml_content) - - env_cmd("create", "test", str(spack_yaml)) + ev.create("test", init_file=spack_yaml, with_view=False) outputfile = tmp_path / ".gitlab-ci.yml" with ev.read("test"): output = ci_cmd( - "generate", - "--output-file", - str(outputfile), - *args, - output=str, - fail_on_error=fail_on_error, + "generate", "--output-file", str(outputfile), *args, fail_on_error=fail_on_error ) return spack_yaml, outputfile, output @@ -224,7 +218,7 @@ def test_ci_generate_with_env_missing_section( specs: - archive-files mirrors: - buildcache-destination: {tmp_path / 'ci-mirror'} + buildcache-destination: {tmp_path / "ci-mirror"} """ expect = "Environment does not have a `ci` configuration" with pytest.raises(ci.SpackCIError, match=expect): @@ -361,7 +355,7 @@ def test_ci_generate_pkg_with_deps(ci_generate_test, tmp_path: pathlib.Path, ci_ specs: - dependent-install mirrors: - buildcache-destination: {tmp_path / 'ci-mirror'} + buildcache-destination: {tmp_path / "ci-mirror"} ci: pipeline-gen: - submapping: @@ -404,7 +398,7 @@ def test_ci_generate_for_pr_pipeline(ci_generate_test, tmp_path: pathlib.Path, m specs: - dependent-install mirrors: - buildcache-destination: {tmp_path / 'ci-mirror'} + buildcache-destination: {tmp_path / "ci-mirror"} ci: pipeline-gen: - submapping: @@ -480,7 +474,7 @@ def test_ci_rebuild_missing_config(tmp_path: pathlib.Path, working_env, mutable_ def _signing_key(): - signing_key_path = pathlib.Path(spack_paths.mock_gpg_keys_path) / "package-signing-key" + signing_key_path = pathlib.Path(spack.paths.mock_gpg_keys_path) / "package-signing-key" return signing_key_path.read_text() @@ -659,9 +653,9 @@ def mock_push_or_raise(*args, **kwargs): with working_dir(rebuild_env.env_dir): activate_rebuild_env(tmp_path, pkg_name, rebuild_env) - expect = f"Command exited with code {FAILED_CREATE_BUILDCACHE_CODE}" - with pytest.raises(spack.main.SpackCommandError, match=expect): - ci_cmd("rebuild", fail_on_error=True) + with pytest.raises(spack.main.SpackCommandError) as e: + ci_cmd("rebuild") + assert e.value.code == FAILED_CREATE_BUILDCACHE_CODE def test_ci_require_signing( @@ -694,12 +688,12 @@ def test_ci_require_signing( env_cmd("activate", "--without-view", "--sh", "-d", str(spack_yaml.parent)) # Run without the variable to make sure we don't accidentally require signing - output = ci_cmd("rebuild", output=str, fail_on_error=False) + output = ci_cmd("rebuild", fail_on_error=False) assert "spack must have exactly one signing key" not in output # Now run with the variable to make sure it works monkeypatch.setenv("SPACK_REQUIRE_SIGNING", "True") - output = ci_cmd("rebuild", output=str, fail_on_error=False) + output = ci_cmd("rebuild", fail_on_error=False) assert "spack must have exactly one signing key" in output env_cmd("deactivate") @@ -762,7 +756,7 @@ def test_ci_nothing_to_rebuild( } ) - ci_out = ci_cmd("rebuild", output=str) + ci_out = ci_cmd("rebuild") assert "No need to rebuild archive-files" in ci_out @@ -881,15 +875,13 @@ def test_push_to_build_cache( # Validate resulting buildcache (database) index layout_version = spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION - url_and_version = spack.binary_distribution.MirrorURLAndVersion( - mirror_url, layout_version - ) - index_fetcher = spack.binary_distribution.DefaultIndexFetcher(url_and_version, None) + mirror_metadata = spack.binary_distribution.MirrorMetadata(mirror_url, layout_version) + index_fetcher = spack.binary_distribution.DefaultIndexFetcher(mirror_metadata, None) result = index_fetcher.conditional_fetch() spack.vendor.jsonschema.validate(json.loads(result.data), db_idx_schema) # Now that index is regenerated, validate "buildcache list" output - assert "patchelf" in buildcache_cmd("list", output=str) + assert "patchelf" in buildcache_cmd("list") logs_dir = scratch / "logs_dir" logs_dir.mkdir() @@ -897,7 +889,7 @@ def test_push_to_build_cache( assert "spack-build-out.txt.gz" in os.listdir(logs_dir) -def test_push_to_build_cache_exceptions(monkeypatch, tmp_path: pathlib.Path, capsys): +def test_push_to_build_cache_exceptions(monkeypatch, tmp_path: pathlib.Path, capfd): def push_or_raise(*args, **kwargs): raise spack.binary_distribution.PushToBuildCacheError("Error: Access Denied") @@ -906,7 +898,7 @@ def push_or_raise(*args, **kwargs): # Input doesn't matter, as we are faking exceptional output url = tmp_path.as_uri() ci.push_to_build_cache(spack.spec.Spec(), url, False) - assert f"Problem writing to {url}: Error: Access Denied" in capsys.readouterr().err + assert f"Problem writing to {url}: Error: Access Denied" in capfd.readouterr().err @pytest.mark.parametrize("match_behavior", ["first", "merge"]) @@ -1055,7 +1047,7 @@ def test_ci_generate_override_runner_attrs( def test_ci_rebuild_index( - tmp_path: pathlib.Path, working_env, mutable_mock_env_path, install_mockery, mock_fetch, capsys + tmp_path: pathlib.Path, working_env, mutable_mock_env_path, install_mockery, mock_fetch ): scratch = tmp_path / "working_dir" mirror_dir = scratch / "mirror" @@ -1092,9 +1084,8 @@ def test_ci_rebuild_index( buildcache_cmd("push", "-u", "-f", mirror_url, "callpath") ci_cmd("rebuild-index") - with capsys.disabled(): - output = buildcache_cmd("list", "-L", "--allarch") - assert concrete_spec.dag_hash() + " callpath" in output + output = buildcache_cmd("list", "-L", "--allarch") + assert concrete_spec.dag_hash() + " callpath" in output def test_ci_get_stack_changed(mock_git_repo, monkeypatch): @@ -1145,7 +1136,7 @@ def fake_change_revisions(env_path): - pkg-a - pkg-d mirrors: - buildcache-destination: {tmp_path / 'ci-mirror'} + buildcache-destination: {tmp_path / "ci-mirror"} ci: pipeline-gen: - build-job: @@ -1223,7 +1214,7 @@ def test_ci_subcommands_without_mirror( ci_cmd("generate", "--output-file", str(tmp_path / ".gitlab-ci.yml")) # Also check the 'rebuild-index' subcommand - output = ci_cmd("rebuild-index", output=str, fail_on_error=False) + output = ci_cmd("rebuild-index", fail_on_error=False) assert "spack ci rebuild-index requires an env containing a mirror" in output @@ -1282,7 +1273,7 @@ def test_ci_generate_read_broken_specs_url( env_cmd("create", "test", "./spack.yaml") with ev.read("test"): # Check output of the 'generate' subcommand - output = ci_cmd("generate", output=str, fail_on_error=False) + output = ci_cmd("generate", fail_on_error=False) assert "known to be broken" in output expected = ( @@ -1432,7 +1423,6 @@ def fake_download_and_extract_artifacts(url, work_dir, merge_commit_test=True): "https://example.com/api/v1/projects/1/jobs/2/artifacts", "--working-dir", str(repro_dir), - output=str, ) # Make sure the script was generated assert (repro_dir / "start.sh").exists() @@ -1451,7 +1441,6 @@ def fake_download_and_extract_artifacts(url, work_dir, merge_commit_test=True): "https://example.com/api/v1/projects/1/jobs/2/artifacts", "--working-dir", str(repro_dir), - output=str, ) # Cleanup between tests @@ -1464,7 +1453,6 @@ def fake_download_and_extract_artifacts(url, work_dir, merge_commit_test=True): "--use-local-head", "--working-dir", str(repro_dir), - output=str, ) # Make sure we are checkout out the HEAD commit without a merge commit @@ -1486,7 +1474,6 @@ def fake_download_and_extract_artifacts(url, work_dir, merge_commit_test=True): "https://example.com/api/v1/projects/1/jobs/2/artifacts", "--working-dir", str(repro_dir), - output=str, ) # Make sure the script was generated assert (repro_dir / "start.sh").exists() @@ -1526,49 +1513,74 @@ def test_reproduce_build_url_validation(url_in, url_out): def test_reproduce_build_url_validation_fails(): """Wrong URLs should cause an exception""" - with pytest.raises(SystemExit): + with pytest.raises(spack.main.SpackCommandError): ci_cmd("reproduce-build", "example.com/spack/spack/-/jobs/123456/artifacts/download") - with pytest.raises(SystemExit): + with pytest.raises(spack.main.SpackCommandError): ci_cmd("reproduce-build", "https://example.com/spack/spack/-/issues") - with pytest.raises(SystemExit): + with pytest.raises(spack.main.SpackCommandError): ci_cmd("reproduce-build", "https://example.com/spack/spack/-") @pytest.mark.parametrize( "subcmd", [(""), ("generate"), ("rebuild-index"), ("rebuild"), ("reproduce-build")] ) -def test_ci_help(subcmd, capsys): +def test_ci_help(subcmd): """Make sure `spack ci` --help describes the (sub)command help.""" - out = spack.main.SpackCommand("ci", subprocess=True)(subcmd, "--help") + out = spack.main.SpackCommand("ci")(subcmd, "--help", fail_on_error=False) - usage = "usage: spack ci {0}{1}[".format(subcmd, " " if subcmd else "") + usage = " ci {0}{1}[".format(subcmd, " " if subcmd else "") assert usage in out -def test_cmd_first_line(): - """Explicitly test first_line since not picked up in test_ci_help.""" - first = "This is a test." - doc = """{0} +def test_docstring_utils(): + def example_function(): + """\ + this is the first line - Is there more to be said?""".format( - first - ) + this is not the first line + """ + pass - assert spack.cmd.first_line(doc) == first + assert spack.cmd.doc_first_line(example_function) == "this is the first line" + assert spack.cmd.doc_dedented(example_function) == ( + "this is the first line\n\nthis is not the first line\n" + ) -@pytest.mark.skip(reason="Gitlab CI was removed from Spack") def test_gitlab_config_scopes(install_mockery, ci_generate_test, tmp_path: pathlib.Path): - """Test pipeline generation with real configs included""" - configs_path = os.path.join(spack_paths.share_path, "gitlab", "cloud_pipelines", "configs") - _, outputfile, _ = ci_generate_test( + """Test pipeline generation with included configs""" + # Create an included config scope + configs_path = tmp_path / "gitlab" / "configs" + configs_path.mkdir(parents=True, exist_ok=True) + with open(configs_path / "ci.yaml", "w", encoding="utf-8") as fd: + fd.write( + """ +ci: + pipeline-gen: + - reindex-job: + variables: + CI_JOB_SIZE: small + KUBERNETES_CPU_REQUEST: 10 + KUBERNETES_MEMORY_REQUEST: 100 + tags: ["spack", "service"] +""" + ) + + rel_configs_path = configs_path.relative_to(tmp_path) + manifest, outputfile, _ = ci_generate_test( f"""\ spack: config: - install_tree: {tmp_path / "opt"} - include: [{configs_path}] + install_tree: + root: {tmp_path / "opt"} + include: + - {rel_configs_path} + - path: {rel_configs_path} + - {configs_path} + - when: 'False' + path: https://dummy.io view: false specs: - dependent-install @@ -1596,6 +1608,34 @@ def test_gitlab_config_scopes(install_mockery, ci_generate_test, tmp_path: pathl expected_vars = ["CI_JOB_SIZE", "KUBERNETES_CPU_REQUEST", "KUBERNETES_MEMORY_REQUEST"] assert all([v in rebuild_vars for v in expected_vars]) + # Read the concrete environment and ensure the relative path was updated + conc_env_path = tmp_path / "jobs_scratch_dir" / "concrete_environment" + conc_env_manifest = conc_env_path / "spack.yaml" + + env_manifest = syaml.load(conc_env_manifest.read_text()) + assert "include" in env_manifest["spack"] + + # Ensure relative path include correctly updated + # Ensure the relocated concrete env includes point to the same location + rel_conc_path = env_manifest["spack"]["include"][0] + abs_conc_path = (conc_env_path / rel_conc_path).absolute().resolve() + assert str(abs_conc_path) == os.path.join(ev.as_env_dir("test"), "gitlab", "configs") + + # Ensure relative path include with "path" correctly updated + # Ensure the relocated concrete env includes point to the same location + rel_conc_path = env_manifest["spack"]["include"][1]["path"] + abs_conc_path = (conc_env_path / rel_conc_path).absolute().resolve() + assert str(abs_conc_path) == os.path.join(ev.as_env_dir("test"), "gitlab", "configs") + + # Ensure absolute path is unchanged + # Ensure the relocated concrete env includes point to the same location + abs_config_path = env_manifest["spack"]["include"][2] + assert str(abs_config_path) == str(configs_path) + + # Ensure URL path is unchanged + url_config_path = env_manifest["spack"]["include"][3]["path"] + assert str(url_config_path) == "https://dummy.io" + def test_ci_generate_mirror_config( tmp_path: pathlib.Path, @@ -1900,6 +1940,57 @@ def test_ci_generate_alternate_target( assert "externaltest" in pipeline_doc +def test_ci_generate_forward_variables( + ci_generate_test, + tmp_path: pathlib.Path, + mutable_mock_env_path, + install_mockery, + mock_packages, + ci_base_environment, +): + """Ensure the above pipeline generator was correctly registered and + is used to generate a pipeline for the stack/config defined here.""" + bin_mirror_url = tmp_path / "ci-bin-mirror" + + spack_yaml_contents = f""" +spack: + specs: + - archive-files + - externaltest + mirrors: + buildcache-destination: {bin_mirror_url} + ci: + target: gitlab + pipeline-gen: + - submapping: + - match: + - archive-files + build-job: + tags: + - donotcare + image: donotcare +""" + noforward_vars = ["NO_FORWARD_VAR"] + forward_vars = ["TEST_VAR", "ANOTHER_TEST_VAR"] + for v in forward_vars + noforward_vars: + os.environ[v] = f"{v}_BEEF" + + fwd_arg = " --forward-variable " + _, output_file, _ = ci_generate_test( + spack_yaml_contents, fwd_arg.strip(), *fwd_arg.join(forward_vars).split() + ) + + with open(output_file, encoding="utf-8") as fd: + pipeline_yaml = syaml.load(fd.read()) + + for v in forward_vars: + assert v in pipeline_yaml["variables"] + assert pipeline_yaml["variables"][v] == f"{v}_BEEF" + + for v in noforward_vars: + assert v not in pipeline_yaml["variables"] + + @pytest.fixture def fetch_versions_match(monkeypatch): """Fake successful checksums returned from downloaded tarballs.""" @@ -1909,6 +2000,7 @@ def get_checksums_for_versions(url_by_version, package_name, **kwargs): return {v: pkg_cls.versions[v]["sha256"] for v in url_by_version} monkeypatch.setattr(spack.stage, "get_checksums_for_versions", get_checksums_for_versions) + monkeypatch.setattr(spack.util.web, "url_exists", lambda url: True) @pytest.fixture @@ -1922,6 +2014,7 @@ def get_checksums_for_versions(url_by_version, package_name, **kwargs): } monkeypatch.setattr(spack.stage, "get_checksums_for_versions", get_checksums_for_versions) + monkeypatch.setattr(spack.util.web, "url_exists", lambda url: True) @pytest.mark.parametrize("versions", [["2.1.4"], ["2.1.4", "2.1.5"]]) @@ -2065,6 +2158,19 @@ def validate_standard_versions(pkg, versions): monkeypatch.setattr(spack.cmd.ci, "validate_standard_versions", validate_standard_versions) +@pytest.fixture +def verify_standard_versions_invalid_duplicates(monkeypatch): + def validate_standard_versions(pkg, versions): + for version in versions: + if str(version) == "2.1.7": + print(f"Validated {pkg.name}@{version}") + else: + print(f"Invalid checksum found {pkg.name}@{version}") + return False + + monkeypatch.setattr(spack.cmd.ci, "validate_standard_versions", validate_standard_versions) + + @pytest.fixture def verify_git_versions_invalid(monkeypatch): def validate_git_versions(pkg, versions): @@ -2091,7 +2197,7 @@ def test_ci_verify_versions_valid( assert "Validated diff-test@2.1.6" in out -def test_ci_verify_versions_standard_invalid( +def test_ci_verify_versions_invalid( monkeypatch, mock_packages, mock_git_package_changes, @@ -2107,6 +2213,22 @@ def test_ci_verify_versions_standard_invalid( assert "Invalid commit for diff-test@2.1.6" in out +def test_ci_verify_versions_standard_duplicates( + monkeypatch, + mock_packages, + mock_git_package_changes, + verify_standard_versions_invalid_duplicates, +): + repo, _, commits = mock_git_package_changes + with spack.repo.use_repositories(repo): + monkeypatch.setattr(spack.repo, "builtin_repo", lambda: repo) + + out = ci_cmd("verify-versions", commits[-3], commits[-4], fail_on_error=False) + print(f"'{out}'") + assert "Validated diff-test@2.1.7" in out + assert "Invalid checksum found diff-test@2.1.8" in out + + def test_ci_verify_versions_manual_package(monkeypatch, mock_packages, mock_git_package_changes): repo, _, commits = mock_git_package_changes with spack.repo.use_repositories(repo): diff --git a/lib/spack/spack/test/cmd/clean.py b/lib/spack/spack/test/cmd/clean.py index 3966b111c86492..c3ecb0ae8a53af 100644 --- a/lib/spack/spack/test/cmd/clean.py +++ b/lib/spack/spack/test/cmd/clean.py @@ -36,14 +36,16 @@ def __call__(self, *args, **kwargs): monkeypatch.setattr(spack.caches.MISC_CACHE, "destroy", Counter("caches")) monkeypatch.setattr(spack.store.STORE.failure_tracker, "clear_all", Counter("failures")) monkeypatch.setattr(spack.cmd.clean, "remove_python_cache", Counter("python_cache")) + monkeypatch.setattr(spack.cmd.clean, "remove_python_cache", Counter("python_cache")) + monkeypatch.setattr(fs, "remove_directory_contents", Counter("bootstrap")) yield counts -all_effects = ["stages", "downloads", "caches", "failures", "python_cache"] +all_effects = ["stages", "downloads", "caches", "failures", "python_cache", "bootstrap"] -@pytest.mark.usefixtures("mock_packages", "config") +@pytest.mark.usefixtures("mock_packages") @pytest.mark.parametrize( "command_line,effects", [ @@ -57,7 +59,9 @@ def __call__(self, *args, **kwargs): ("", []), ], ) -def test_function_calls(command_line, effects, mock_calls_for_clean): +def test_function_calls(command_line, effects, mock_calls_for_clean, mutable_config): + mutable_config.set("bootstrap", {"root": "fake"}) + # Call the command with the supplied command line clean(command_line) diff --git a/lib/spack/spack/test/cmd/commands.py b/lib/spack/spack/test/cmd/commands.py index caa617003a2a5b..ffd5547795ed82 100644 --- a/lib/spack/spack/test/cmd/commands.py +++ b/lib/spack/spack/test/cmd/commands.py @@ -6,33 +6,38 @@ import os import pathlib import shutil +import sys import textwrap import pytest import spack.cmd import spack.cmd.commands +import spack.config import spack.main import spack.paths from spack.cmd.commands import _dest_to_fish_complete, _positional_to_subroutine +from spack.util.executable import Executable -commands = spack.main.SpackCommand("commands", subprocess=True) -parser = spack.main.make_argument_parser() -spack.main.add_all_commands(parser) +def commands(*args: str) -> str: + """Run `spack commands args...` and return output as a string. It's a separate process so that + we run through the main Spack command logic and avoid caching issues.""" + python = Executable(sys.executable) + return python(spack.paths.spack_script, "commands", *args, output=str) def test_names(): """Test default output of spack commands.""" - out1 = commands().strip().split("\n") + out1 = commands().strip().splitlines() assert out1 == spack.cmd.all_commands() assert "rm" not in out1 - out2 = commands("--aliases").strip().split("\n") + out2 = commands("--aliases").strip().splitlines() assert out1 != out2 assert "rm" in out2 - out3 = commands("--format=names").strip().split("\n") + out3 = commands("--format=names").strip().splitlines() assert out1 == out3 @@ -59,22 +64,29 @@ def test_subcommands(): assert "spack compiler add" in out2 -@pytest.mark.not_on_windows("subprocess not supported on Windows") -def test_override_alias(): +def test_alias_overrides_builtin(mutable_config: spack.config.Configuration, capfd): """Test that spack commands cannot be overriden by aliases.""" - - install = spack.main.SpackCommand("install", subprocess=True) - instal = spack.main.SpackCommand("instal", subprocess=True) - - out = install(fail_on_error=False, global_args=["-c", "config:aliases:install:find"]) - assert "install requires a package argument or active environment" in out + mutable_config.set("config:aliases", {"install": "find"}) + cmd, args = spack.main.resolve_alias("install", ["install", "-v"]) + assert cmd == "install" and args == ["install", "-v"] + out = capfd.readouterr().err assert "Alias 'install' (mapping to 'find') attempts to override built-in command" in out - out = install(fail_on_error=False, global_args=["-c", "config:aliases:foo bar:find"]) + +def test_alias_with_space(mutable_config: spack.config.Configuration, capfd): + """Test that spack aliases with spaces are rejected.""" + mutable_config.set("config:aliases", {"foo bar": "find"}) + cmd, args = spack.main.resolve_alias("install", ["install", "-v"]) + assert cmd == "install" and args == ["install", "-v"] + out = capfd.readouterr().err assert "Alias 'foo bar' (mapping to 'find') contains a space, which is not supported" in out - out = instal(fail_on_error=False, global_args=["-c", "config:aliases:instal:find"]) - assert "install requires a package argument or active environment" not in out + +def test_alias_resolves_properly(mutable_config: spack.config.Configuration): + """Test that spack aliases resolve properly.""" + mutable_config.set("config:aliases", {"my_find": "find"}) + cmd, args = spack.main.resolve_alias("my_find", ["my_find", "-v"]) + assert cmd == "find" and args == ["find", "-v"] def test_rst(): diff --git a/lib/spack/spack/test/cmd/common/arguments.py b/lib/spack/spack/test/cmd/common/arguments.py index 220697785c91f3..d688713f4eb852 100644 --- a/lib/spack/spack/test/cmd/common/arguments.py +++ b/lib/spack/spack/test/cmd/common/arguments.py @@ -153,3 +153,32 @@ def test_use_buildcache_type(): with pytest.raises(argparse.ArgumentTypeError): assert arguments.use_buildcache("sometimes") + + +def test_missing_config_scopes_are_valid_scope_arguments(mock_missing_dir_include_scopes): + """Test that if an included scope does not have a directory or file, + we can still specify it as a scope as an argument""" + a = argparse.ArgumentParser() + a.add_argument( + "--scope", + action=arguments.ConfigScope, + default=lambda: spack.config.default_modify_scope(), + help="configuration scope to modify", + ) + namespace = a.parse_args(["--scope", "sub_base"]) + assert namespace.scope == "sub_base" + + +def test_missing_config_scopes_not_valid_read_scope(mock_missing_dir_include_scopes): + """Ensures that if a missing include scope is the subject of a read + operation, we fail at the argparse level""" + a = argparse.ArgumentParser() + a.add_argument( + "--scope", + action=arguments.ConfigScope, + type=arguments.config_scope_readable_validator, + default=lambda: spack.config.default_modify_scope(), + help="configuration scope to modify", + ) + with pytest.raises(SystemExit): + a.parse_args(["--scope", "sub_base"]) diff --git a/lib/spack/spack/test/cmd/config.py b/lib/spack/spack/test/cmd/config.py index cbcf2ef97d2204..406d1bbeaf1dce 100644 --- a/lib/spack/spack/test/cmd/config.py +++ b/lib/spack/spack/test/cmd/config.py @@ -2,6 +2,7 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import functools +import json import os import pathlib import re @@ -80,22 +81,107 @@ def test_config_scopes(path, types, mutable_mock_env_path): assert all(os.sep in x for x in paths) -def test_config_scopes_include(): - scopes_cmd = ["scopes", "-t", "include"] - output = config(*scopes_cmd).split() - assert not output or all(":" in x for x in output) +@pytest.mark.parametrize("type", ["path", "include", "internal", "env"]) +def test_config_scopes_include(type): + """Ensure that `spack config scopes -vt TYPE outputs only scopes of that type.""" + scopes_cmd = ["scopes", "-vt", type] + output = config(*scopes_cmd).strip() + lines = output.split("\n") + assert not output or all([type in line for line in lines[1:]]) + + +def test_config_scopes_section(mutable_config): + scopes_cmd = ["scopes", "-v", "packages"] + output = config(*scopes_cmd).strip() + lines = output.split("\n") + + lines_by_scope_name = {line.split()[0]: line for line in lines} + assert "absent" in lines_by_scope_name["command_line"] + assert "absent" in lines_by_scope_name["_builtin"] + assert "active" in lines_by_scope_name["site"] + + +def test_include_overrides(mutable_config): + output = config("scopes").strip() + lines = output.split("\n") + assert "user" in lines + assert "system" in lines + assert "site" in lines + assert "_builtin" in lines + + mutable_config.push_scope(spack.config.InternalConfigScope("override", {"include:": []})) + + # overridden scopes are not shown wtihout `-v` + output = config("scopes").strip() + lines = output.split("\n") + assert "user" not in lines + assert "system" not in lines + assert "site" not in lines + + # scopes with ConfigScopePriority.DEFAULTS remain + assert "_builtin" in lines + + # overridden scopes are shown wtih `-v` and marked 'override' + output = config("scopes", "-v").strip() + lines = output.split("\n") + assert "override" in next(line for line in lines if line.startswith("user")) + assert "override" in next(line for line in lines if line.startswith("system")) + assert "override" in next(line for line in lines if line.startswith("site")) + +def test_blame_override(mutable_config): + # includes are present when section is specified + output = config("blame", "include").strip() + include_path = re.escape(os.path.join(mutable_config.scopes["site"].path, "include.yaml")) + assert re.search(rf"include:\n{include_path}:\d+\s+\- path: base", output) -def test_config_scopes_path_section(): - output = config("scopes", "-t", "include", "-p", "modules") - assert "_builtin" not in output - assert "site" not in output + # includes are also present when section is NOT specified + output = config("blame").strip() + assert re.search(rf"include:\n{include_path}:\d+\s+\- path: base", output) + + mutable_config.push_scope(spack.config.InternalConfigScope("override", {"include:": []})) + + # site includes are not present when overridden + output = config("blame", "include").strip() + assert not re.search(rf"include:\n{include_path}:\d+\s+\- path: base", output) + assert "include: []" in output + + output = config("blame").strip() + assert not re.search(rf"include:\n{include_path}:\d+\s+\- path: base", output) + assert "include: []" in output + + +def test_config_scopes_path(mutable_config): + scopes_cmd = ["scopes", "-p"] + output = config(*scopes_cmd).strip() + lines = output.split("\n") + + lines_by_scope_name = {line.split()[0]: line for line in lines} + assert f"{os.sep}user{os.sep}" in lines_by_scope_name["user"] + assert f"{os.sep}system{os.sep}" in lines_by_scope_name["system"] + assert f"{os.sep}site{os.sep}" in lines_by_scope_name["site"] def test_get_config_scope(mock_low_high_config): assert config("get", "compilers").strip() == "compilers: {}" +def test_get_config_roundtrip(mutable_config): + """Test that ``spack config get [--json]
`` roundtrips correctly.""" + json_roundtrip = json.loads(config("get", "--json", "config")) + yaml_roundtrip = syaml.load(config("get", "config")) + assert json_roundtrip["config"] == yaml_roundtrip["config"] == mutable_config.get("config") + + +def test_get_all_config_roundtrip(mutable_config): + """Test that ``spack config get [--json]`` roundtrips correctly.""" + json_roundtrip = json.loads(config("get", "--json")) + yaml_roundtrip = syaml.load(config("get")) + assert json_roundtrip == yaml_roundtrip + for section in spack.config.SECTION_SCHEMAS: + assert json_roundtrip["spack"][section] == mutable_config.get(section) + + def test_get_config_scope_merged(mock_low_high_config): low_path = mock_low_high_config.scopes["low"].path high_path = mock_low_high_config.scopes["high"].path @@ -273,7 +359,7 @@ def test_config_with_c_argument(mutable_empty_config): assert config_file in args.config_vars # Add the path to the config - config("add", args.config_vars[0], scope="command_line") + config("add", args.config_vars[0]) output = config("get", "config") assert "config:\n install_tree:\n root: /path/to/config.yaml" in output @@ -558,15 +644,6 @@ def test_config_remove_from_env(mutable_empty_config, mutable_mock_env_path): assert "dirty: true" not in output -def test_config_update_config(config_yaml_v015): - config_yaml_v015() - config("update", "-y", "config") - - # Check the entires have been transformed - data = spack.config.get("config") - check_config_updated(data) - - def test_config_update_not_needed(mutable_config): data_before = spack.config.get("repos") config("update", "-y", "repos") @@ -574,62 +651,6 @@ def test_config_update_not_needed(mutable_config): assert data_before == data_after -@pytest.mark.regression("18031") -def test_config_update_can_handle_comments(mutable_config): - # Create an outdated config file with comments - scope = spack.config.default_modify_scope() - cfg_file = spack.config.CONFIG.get_config_filename(scope, "config") - with open(cfg_file, mode="w", encoding="utf-8") as f: - f.write( - """ -config: - # system cmake in /usr - install_tree: './foo' - # Another comment after the outdated section - install_hash_length: 7 -""" - ) - - # Try to update it, it should not raise errors - config("update", "-y", "config") - - # Check data - data = spack.config.get("config", scope=scope) - assert "root" in data["install_tree"] - - # Check the comment is there - with open(cfg_file, encoding="utf-8") as f: - text = "".join(f.readlines()) - - assert "# system cmake in /usr" in text - assert "# Another comment after the outdated section" in text - - -@pytest.mark.regression("18050") -def test_config_update_works_for_empty_paths(mutable_config): - scope = spack.config.default_modify_scope() - cfg_file = spack.config.CONFIG.get_config_filename(scope, "config") - with open(cfg_file, mode="w", encoding="utf-8") as f: - f.write( - """ -config: - install_tree: '' -""" - ) - - # Try to update it, it should not raise errors - output = config("update", "-y", "config") - - # This ensures that we updated the configuration - assert "[backup=" in output - - -def check_config_updated(data): - assert isinstance(data["install_tree"], dict) - assert data["install_tree"]["root"] == "/fake/path" - assert data["install_tree"]["projections"] == {"all": "{name}-{version}"} - - def test_config_update_shared_linking(mutable_config): # Old syntax: config:shared_linking:rpath/runpath # New syntax: config:shared_linking:{type:rpath/runpath,bind:True/False} diff --git a/lib/spack/spack/test/cmd/create.py b/lib/spack/spack/test/cmd/create.py index 029897e443e2c1..d521d47e6ce9cd 100644 --- a/lib/spack/spack/test/cmd/create.py +++ b/lib/spack/spack/test/cmd/create.py @@ -189,7 +189,7 @@ def test_get_name_urls(url, expected): assert name == expected -def test_get_name_error(monkeypatch, capsys): +def test_get_name_error(monkeypatch, capfd): """Test get_name UndetectableNameError exception path.""" def _parse_name_offset(path, v): @@ -201,7 +201,7 @@ def _parse_name_offset(path, v): with pytest.raises(SystemExit): spack.cmd.create.get_name(None, url) - captured = capsys.readouterr() + captured = capfd.readouterr() assert "Couldn't guess a name" in str(captured) diff --git a/lib/spack/spack/test/cmd/dependents.py b/lib/spack/spack/test/cmd/dependents.py index ceb804deaa475f..2d13c2e0740d3c 100644 --- a/lib/spack/spack/test/cmd/dependents.py +++ b/lib/spack/spack/test/cmd/dependents.py @@ -31,20 +31,20 @@ def test_immediate_dependents(mock_packages): def test_transitive_dependents(mock_packages): out = dependents("--transitive", "libelf") actual = set(re.split(r"\s+", out.strip())) - assert actual == set( - [ - "callpath", - "dyninst", - "libdwarf", - "mpileaks", - "multivalue-variant", - "singlevalue-variant-dependent", - "patch-a-dependency", - "patch-several-dependencies", - "quantum-espresso", - "conditionally-patch-dependency", - ] - ) + assert actual == { + "callpath", + "dyninst", + "libdwarf", + "mixing-parent", + "mpileaks", + "multivalue-variant", + "singlevalue-variant-dependent", + "trilinos", + "patch-a-dependency", + "patch-several-dependencies", + "quantum-espresso", + "conditionally-patch-dependency", + } @pytest.mark.db @@ -70,7 +70,7 @@ def test_transitive_installed_dependents(mock_packages, database): with color_when(False): out = dependents("--installed", "--transitive", "fake") - lines = [li for li in out.strip().split("\n") if not li.startswith("--")] + lines = [li for li in out.strip().split("\n") if li and not li.startswith("--")] hashes = set([re.split(r"\s+", li)[0] for li in lines]) expected = set( diff --git a/lib/spack/spack/test/cmd/dev_build.py b/lib/spack/spack/test/cmd/dev_build.py index 847cdec306544f..dba112b2691f06 100644 --- a/lib/spack/spack/test/cmd/dev_build.py +++ b/lib/spack/spack/test/cmd/dev_build.py @@ -11,6 +11,7 @@ import spack.environment as ev import spack.error import spack.llnl.util.filesystem as fs +import spack.main import spack.repo import spack.spec import spack.store @@ -110,7 +111,7 @@ def test_dev_build_before_until(tmp_path: pathlib.Path, install_mockery): with open(spec.package.filename, "w", encoding="utf-8") as f: f.write(spec.package.original_string) - with pytest.raises(SystemExit): + with pytest.raises(spack.main.SpackCommandError): dev_build("-u", "edit", "-b", "edit", "dev-build-test-install@0.0.0") bad_phase = "phase_that_does_not_exist" diff --git a/lib/spack/spack/test/cmd/develop.py b/lib/spack/spack/test/cmd/develop.py index 1bb3ba41abd37b..b029ca38b1abd3 100644 --- a/lib/spack/spack/test/cmd/develop.py +++ b/lib/spack/spack/test/cmd/develop.py @@ -17,6 +17,7 @@ import spack.util.git import spack.util.path from spack.error import SpackError +from spack.fetch_strategy import URLFetchStrategy from spack.main import SpackCommand add = SpackCommand("add") @@ -125,9 +126,90 @@ def test_develop_update_spec(self): self.check_develop(e, spack.spec.Spec("mpich@=2.0")) assert len(e.dev_specs) == 1 + def test_develop_applies_changes(self, monkeypatch): + env("create", "test") + with ev.read("test") as e: + e.add("mpich@1.0") + e.concretize() + e.write() + + monkeypatch.setattr(spack.stage.Stage, "steal_source", lambda x, y: None) + develop("mpich@1.0") + + # Check modifications actually worked + spec = next(e.roots()) + assert spec.satisfies("dev_path=*") + + def test_develop_applies_changes_parents(self, monkeypatch): + env("create", "test") + with ev.read("test") as e: + e.add("hdf5^mpich@1.0") + e.concretize() + e.write() + + orig_hash = next(e.roots()).dag_hash() + + monkeypatch.setattr(spack.stage.Stage, "steal_source", lambda x, y: None) + develop("mpich@1.0") + + # Check modifications actually worked + new_hdf5 = next(e.roots()) + assert new_hdf5.dag_hash() != orig_hash + assert new_hdf5["mpi"].satisfies("dev_path=*") + + def test_develop_applies_changes_spec_conflict(self, monkeypatch): + env("create", "test") + with ev.read("test") as e: + e.add("mpich@1.0") + e.concretize() + e.write() + + monkeypatch.setattr(spack.stage.Stage, "steal_source", lambda x, y: None) + with pytest.raises(ev.SpackEnvironmentDevelopError, match="conflicts with concrete"): + develop("mpich@1.1") + + def test_develop_applies_changes_path(self, monkeypatch): + env("create", "test") + with ev.read("test") as e: + e.add("mpich@1.0") + e.concretize() + e.write() + + # canonicalize paths relative to env + testpath1 = spack.util.path.canonicalize_path("test/path1", e.path) + testpath2 = spack.util.path.canonicalize_path("test/path2", e.path) + + monkeypatch.setattr(spack.stage.Stage, "steal_source", lambda x, y: None) + # Testing that second call to develop successfully changes both config and specs + for path in (testpath1, testpath2): + develop("--path", path, "mpich@1.0") + + # Check modifications actually worked + spec = next(e.roots()) + assert spec.satisfies(f"dev_path={path}") + assert spack.config.get("develop:mpich:path") == path + + def test_develop_no_modify(self, monkeypatch): + env("create", "test") + with ev.read("test") as e: + e.add("mpich@1.0") + e.concretize() + e.write() + + monkeypatch.setattr(spack.stage.Stage, "steal_source", lambda x, y: None) + develop("--no-modify-concrete-specs", "mpich@1.0") + + # Check modifications were not applied + spec = next(e.roots()) + assert not spec.satisfies("dev_path=*") + def test_develop_canonicalize_path(self, monkeypatch): env("create", "test") with ev.read("test") as e: + e.add("mpich@1.0") + e.concretize() + e.write() + path = "../$user" abspath = spack.util.path.canonicalize_path(path, e.path) @@ -140,12 +222,16 @@ def check_path(stage, dest): self.check_develop(e, spack.spec.Spec("mpich@=1.0"), path) # Check modifications actually worked - result = spack.concretize.concretize_one("mpich@1.0") - assert result.satisfies("dev_path=%s" % abspath) + spec = next(e.roots()) + assert spec.satisfies("dev_path=%s" % abspath) def test_develop_canonicalize_path_no_args(self, monkeypatch): env("create", "test") with ev.read("test") as e: + e.add("mpich@1.0") + e.concretize() + e.write() + path = "$user" abspath = spack.util.path.canonicalize_path(path, e.path) @@ -169,8 +255,8 @@ def check_path(stage, dest): self.check_develop(e, spack.spec.Spec("mpich@=1.0"), path) # Check modifications actually worked - result = spack.concretize.concretize_one("mpich@1.0") - assert result.satisfies("dev_path=%s" % abspath) + spec = next(e.roots()) + assert spec.satisfies("dev_path=%s" % abspath) def _git_commit_list(git_repo_dir): @@ -209,10 +295,13 @@ def test_develop_full_git_repo( # more than just one commit). env("create", "test") with ev.read("test") as e: - add("git-test-commit") + add("git-test-commit@1.2") + e.concretize() + e.write() + develop("git-test-commit@1.2") + e.write() - e.concretize() spec = e.all_specs()[0] develop_dir = spec.variants["dev_path"].value commits = _git_commit_list(develop_dir) @@ -225,6 +314,7 @@ def test_recursive(mutable_mock_env_path, install_mockery, mock_fetch): with ev.read("test") as e: add("indirect-mpich@1.0") e.concretize() + e.write() specs = e.all_specs() assert len(specs) > 1 @@ -234,6 +324,10 @@ def test_recursive(mutable_mock_env_path, install_mockery, mock_fetch): for spec in expected_dev_specs: assert spec in e.dev_specs + spec = next(e.roots()) + for dep in spec.traverse(): + assert dep.satisfies("dev_path=*") == (dep.name in expected_dev_specs) + def test_develop_fails_with_multiple_concrete_versions( mutable_mock_env_path, install_mockery, mock_fetch @@ -264,8 +358,9 @@ def test_concretize_dev_path_with_at_symbol_in_env( with ev.read("test_at_sym") as e: add(spec_like) - develop(f"--path={develop_dir}", spec_like) e.concretize() + e.write() + develop(f"--path={develop_dir}", spec_like) result = e.concrete_roots() assert len(result) == 1 @@ -273,3 +368,51 @@ def test_concretize_dev_path_with_at_symbol_in_env( assert cspec.satisfies(spec_like), cspec assert cspec.is_develop, cspec assert str(develop_dir) in cspec.variants["dev_path"], cspec + + +def _failing_fn(*args, **kwargs): + # This stands in for a function that should never be called as + # part of a test. + assert False + + +@pytest.mark.parametrize("_devpath_should_exist", [True, False]) +@pytest.mark.disable_clean_stage_check +def test_develop_with_devpath_staging( + monkeypatch, + mutable_mock_env_path, + mock_packages, + tmp_path: pathlib.Path, + mock_archive, + install_mockery, + mock_fetch, + mock_resource_fetch, + mock_stage, + _devpath_should_exist, +): + # If the specified develop path exists, a resource should not be + # downloaded at all at install time. Otherwise, it should be. + + env("create", "test") + + develop_dir = tmp_path / "build@location" + if _devpath_should_exist: + develop_dir.mkdir() + monkeypatch.setattr(URLFetchStrategy, "fetch", _failing_fn) + + spec_like = "simple-resource@1.0" + + with ev.read("test") as e: + e.add(spec_like) + e.concretize() + e.write() + develop(f"--path={develop_dir}", spec_like) + + e.install_all() + + expected_resource_path = develop_dir / "resource.tgz" + if _devpath_should_exist: + # If we made it here, we didn't try to download anything. + pass + else: + assert os.path.exists(expected_resource_path) diff --git a/lib/spack/spack/test/cmd/edit.py b/lib/spack/spack/test/cmd/edit.py index 6b791da7eb1ff0..47f52fc6d66f87 100644 --- a/lib/spack/spack/test/cmd/edit.py +++ b/lib/spack/spack/test/cmd/edit.py @@ -3,7 +3,9 @@ # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os +import pathlib +import spack.paths import spack.repo import spack.util.editor from spack.main import SpackCommand @@ -43,3 +45,23 @@ def editor(*args: str, **kwargs): monkeypatch.setattr(spack.util.editor, "editor", editor) edit("--build-system", "autotools", "cmake") assert called + + +def test_edit_non_default_build_system(monkeypatch, mock_packages, mutable_config): + called = False + + def editor(*args: str, **kwargs): + nonlocal called + called = True + from spack_repo.builtin_mock.build_systems import autotools, cmake # type: ignore + + assert os.path.samefile(args[0], autotools.__file__) + assert os.path.samefile(args[1], cmake.__file__) + + monkeypatch.setattr(spack.util.editor, "editor", editor) + + # set up an additional repo + extra_repo_dir = pathlib.Path(spack.paths.test_repos_path) / "spack_repo" / "requirements_test" + with spack.repo.use_repositories(str(extra_repo_dir), override=False): + edit("--build-system", "builtin_mock.autotools", "builtin_mock.cmake") + assert called diff --git a/lib/spack/spack/test/cmd/env.py b/lib/spack/spack/test/cmd/env.py index 729ccc45c59c88..8ea37d3997d7b4 100644 --- a/lib/spack/spack/test/cmd/env.py +++ b/lib/spack/spack/test/cmd/env.py @@ -31,10 +31,8 @@ import spack.paths import spack.repo import spack.solver.asp -import spack.spec import spack.stage import spack.store -import spack.test.conftest import spack.util.environment import spack.util.spack_json as sjson import spack.util.spack_yaml @@ -54,7 +52,7 @@ pytestmark = [ pytest.mark.usefixtures("mutable_config", "mutable_mock_env_path", "mutable_mock_repo"), pytest.mark.maybeslow, - pytest.mark.not_on_windows("Envs unsupported on Window"), + pytest.mark.not_on_windows("Envs unsupported on Windows"), ] env = SpackCommand("env") @@ -119,22 +117,20 @@ def check_viewdir_removal(viewdir: pathlib.Path): ] -def test_env_track_nonexistant_path_fails(capfd): +def test_env_track_nonexistent_path_fails(): with pytest.raises(spack.main.SpackCommandError): env("track", "path/does/not/exist") - out, _ = capfd.readouterr() - assert "doesn't contain an environment" in out + assert "doesn't contain an environment" in env.output -def test_env_track_existing_env_fails(capfd): +def test_env_track_existing_env_fails(): env("create", "track_test") with pytest.raises(spack.main.SpackCommandError): env("track", "--name", "track_test", ev.environment_dir_from_name("track_test")) - out, _ = capfd.readouterr() - assert "environment named track_test already exists" in out + assert "environment named track_test already exists" in env.output def test_env_track_valid(tmp_path: pathlib.Path): @@ -160,21 +156,21 @@ def test_env_untrack_valid(tmp_path: pathlib.Path): env("track", "--name", "test_untrack", ".") env("untrack", "--yes-to-all", "test_untrack") - # check that environment was sucessfully untracked + # check that environment was successfully untracked out = env("ls") assert "test_untrack" not in out def test_env_untrack_invalid_name(): # test untracking an environment that doesn't exist - env_name = "invalid_enviornment_untrack" + env_name = "invalid_environment_untrack" out = env("untrack", env_name) assert f"Environment '{env_name}' does not exist" in out -def test_env_untrack_when_active(tmp_path: pathlib.Path, capfd): +def test_env_untrack_when_active(tmp_path: pathlib.Path): env_name = "test_untrack_active" with fs.working_dir(str(tmp_path)): @@ -186,19 +182,18 @@ def test_env_untrack_when_active(tmp_path: pathlib.Path, capfd): active_env = ev.read(env_name) with active_env: - with pytest.raises(spack.main.SpackCommandError): - env("untrack", "--yes-to-all", env_name) + output = env("untrack", "--yes-to-all", env_name, fail_on_error=False) + assert env.error is not None # check that environment could not be untracked while active - out, _ = capfd.readouterr() - assert f"'{env_name}' can't be untracked while activated" in out + assert f"'{env_name}' can't be untracked while activated" in output env("untrack", "-f", env_name) out = env("ls") assert env_name not in out -def test_env_untrack_managed(capfd): +def test_env_untrack_managed(): env_name = "test_untrack_managed" # create an managed environment @@ -208,8 +203,7 @@ def test_env_untrack_managed(capfd): env("untrack", env_name) # check that environment could not be untracked while active - out, _ = capfd.readouterr() - assert f"'{env_name}' is not a tracked env" in out + assert f"'{env_name}' is not a tracked env" in env.output @pytest.fixture() @@ -223,11 +217,13 @@ def _installed_environment(content): spack_yaml.write_text(content) with fs.working_dir(tmp_path): env("create", "test", "./spack.yaml") - with ev.read("test"): - install("--fake") + with ev.read("test") as current_environment: + current_environment.concretize() + current_environment.install_all(fake=True) + current_environment.write(regenerate=True) - test = ev.read("test") - yield test + with ev.read("test") as current_environment: + yield current_environment return _installed_environment @@ -306,7 +302,7 @@ def test_env_add_virtual(): assert spec.intersects("mpi") -def test_env_add_nonexistant_fails(): +def test_env_add_nonexistent_fails(): env("create", "test") e = ev.read("test") @@ -335,7 +331,7 @@ def test_env_list(mutable_mock_env_path): assert ".DS_Store" not in out -def test_env_remove(capfd): +def test_env_remove(): env("create", "foo") env("create", "bar") @@ -346,8 +342,7 @@ def test_env_remove(capfd): foo = ev.read("foo") with foo: with pytest.raises(SpackCommandError): - with capfd.disabled(): - env("remove", "-y", "foo") + env("remove", "-y", "foo") assert "foo" in env("list") env("remove", "-y", "foo") @@ -361,14 +356,11 @@ def test_env_remove(capfd): assert "bar" not in out -def test_env_rename_managed(capfd): +def test_env_rename_managed(): # Need real environment with pytest.raises(spack.main.SpackCommandError): env("rename", "foo", "bar") - assert ( - "The specified name does not correspond to a managed spack environment" - in capfd.readouterr()[0] - ) + assert "The specified name does not correspond to a managed spack environment" in env.output env("create", "foo") @@ -387,14 +379,14 @@ def test_env_rename_managed(capfd): # Cannot rename active environment with pytest.raises(spack.main.SpackCommandError): env("rename", "bar", "baz") - assert "Cannot rename active environment" in capfd.readouterr()[0] + assert "Cannot rename active environment" in env.output env("create", "qux") # Cannot rename to an active environment (even with force flag) with pytest.raises(spack.main.SpackCommandError): env("rename", "-f", "qux", "bar") - assert "bar is an active environment" in capfd.readouterr()[0] + assert "bar is an active environment" in env.output # Can rename inactive environment when another's active out = env("rename", "qux", "quux") @@ -413,7 +405,7 @@ def test_env_rename_managed(capfd): "The new name corresponds to an existing environment;" " specify the --force flag to overwrite it." ) - assert errmsg in capfd.readouterr()[0] + assert errmsg in env.output env("rename", "-f", "bar", "baz") out = env("list") @@ -421,14 +413,11 @@ def test_env_rename_managed(capfd): assert "baz" in out -def test_env_rename_independent(capfd, tmp_path: pathlib.Path): +def test_env_rename_independent(tmp_path: pathlib.Path): # Need real environment with pytest.raises(spack.main.SpackCommandError): env("rename", "-d", "./non-existing", "./also-non-existing") - assert ( - "The specified path does not correspond to a valid spack environment" - in capfd.readouterr()[0] - ) + assert "The specified path does not correspond to a valid spack environment" in env.output anon_foo = str(tmp_path / "foo") env("create", "-d", anon_foo) @@ -444,7 +433,7 @@ def test_env_rename_independent(capfd, tmp_path: pathlib.Path): env("activate", "--sh", "-d", anon_bar) with pytest.raises(spack.main.SpackCommandError): env("rename", "-d", anon_bar, anon_baz) - assert "Cannot rename active environment" in capfd.readouterr()[0] + assert "Cannot rename active environment" in env.output env("deactivate", "--sh") assert ev.is_env_dir(anon_bar) @@ -458,7 +447,7 @@ def test_env_rename_independent(capfd, tmp_path: pathlib.Path): "The new path corresponds to an existing environment;" " specify the --force flag to overwrite it." ) - assert errmsg in capfd.readouterr()[0] + assert errmsg in env.output assert ev.is_env_dir(anon_bar) assert ev.is_env_dir(anon_baz) @@ -475,7 +464,7 @@ def test_env_rename_independent(capfd, tmp_path: pathlib.Path): with pytest.raises(spack.main.SpackCommandError): env("rename", "-d", anon_baz, anon_qux) errmsg = "The new path already exists; specify the --force flag to overwrite it." - assert errmsg in capfd.readouterr()[0] + assert errmsg in env.output env("rename", "-f", "-d", anon_baz, anon_qux) assert not ev.is_env_dir(anon_baz) @@ -578,7 +567,7 @@ def test_env_install_include_concrete_env(unify, install_mockery, mock_fetch, mu def test_env_roots_marked_explicit(install_mockery, mock_fetch): install = SpackCommand("install") - install("dependent-install") + install("--fake", "dependent-install") # Check one explicit, one implicit install dependent = spack.store.STORE.db.query(explicit=True) @@ -611,7 +600,7 @@ def setup_error(pkg, env): pkg = spack.repo.PATH.get_pkg_class("cmake-client") monkeypatch.setattr(pkg, "setup_run_environment", setup_error) - spack.environment.shell.activate(e) + ev.shell.activate(e) _, err = capfd.readouterr() assert "cmake-client had issues!" in err @@ -624,7 +613,7 @@ def test_activate_adds_transitive_run_deps_to_path(install_mockery, mock_fetch, e = ev.read("test") with e: - install("--add", "depends-on-run-env") + install("--add", "--fake", "depends-on-run-env") env_variables = {} spack.environment.shell.activate(e).apply_modifications(env_variables) @@ -651,7 +640,7 @@ def test_env_definition_symlink(install_mockery, mock_fetch, tmp_path: pathlib.P def test_env_install_two_specs_same_dep( - install_mockery, mock_fetch, tmp_path: pathlib.Path, capsys, monkeypatch + install_mockery, mock_fetch, tmp_path: pathlib.Path, monkeypatch ): """Test installation of two packages that share a dependency with no connection and the second specifying the dependency as a 'build' @@ -673,8 +662,7 @@ def test_env_install_two_specs_same_dep( env("create", "test", "spack.yaml") with ev.read("test"): - with capsys.disabled(): - out = install("--fake") + out = install("--fake") # Ensure both packages reach install phase processing and are installed out = str(out) @@ -835,25 +823,21 @@ def test_force_remove_included_env(): assert "test" not in list_output -def test_environment_status(capsys, tmp_path: pathlib.Path, monkeypatch): +def test_environment_status(tmp_path: pathlib.Path, monkeypatch): with fs.working_dir(str(tmp_path)): - with capsys.disabled(): - assert "No active environment" in env("status") + assert "No active environment" in env("status") with ev.create("test"): - with capsys.disabled(): - assert "In environment test" in env("status") + assert "In environment test" in env("status") with ev.create_in_dir("local_dir"): - with capsys.disabled(): - assert os.path.join(os.getcwd(), "local_dir") in env("status") + assert os.path.join(os.getcwd(), "local_dir") in env("status") e = ev.create_in_dir("myproject") e.write() with fs.working_dir(str(tmp_path / "myproject")): with e: - with capsys.disabled(): - assert "in current directory" in env("status") + assert "in current directory" in env("status") def test_env_status_broken_view( @@ -865,7 +849,7 @@ def test_env_status_broken_view( tmp_path: pathlib.Path, ): with ev.create_in_dir(tmp_path): - install("--add", "trivial-install-test-package") + install("--add", "--fake", "trivial-install-test-package") # switch to a new repo that doesn't include the installed package # test that Spack detects the missing package and warns the user @@ -884,7 +868,7 @@ def test_env_activate_broken_view( mutable_mock_env_path, mock_archive, mock_fetch, mock_custom_repository, install_mockery ): with ev.create("test"): - install("--add", "trivial-install-test-package") + install("--add", "--fake", "trivial-install-test-package") # switch to a new repo that doesn't include the installed package # test that Spack detects the missing package and fails gracefully @@ -1043,6 +1027,54 @@ def test_init_from_yaml(environment_from_manifest): assert not e2.specs_by_hash +@pytest.mark.parametrize("use_name", (True, False)) +def test_init_from_env(use_name, environment_from_manifest): + """Test that an environment can be instantiated from an environment dir""" + e1 = environment_from_manifest( + """ +spack: + specs: + - mpileaks + - hypre + - libelf +""" + ) + + with e1: + # Test that relative paths in the env are not rewritten + # Test that relative paths outside the env are + dev_config = { + "libelf": {"spec": "libelf", "path": "./libelf"}, + "mpileaks": {"spec": "mpileaks", "path": "../mpileaks"}, + } + spack.config.set("develop", dev_config) + fs.touch(os.path.join(e1.path, "libelf")) + + e1.concretize() + e1.write() + + e2 = _env_create("test2", init_file="test" if use_name else e1.path) + + for s1, s2 in zip(e1.user_specs, e2.user_specs): + assert s1 == s2 + + assert e2.concretized_order == e1.concretized_order + assert e2.concretized_user_specs == e1.concretized_user_specs + assert e2.specs_by_hash == e1.specs_by_hash + + assert os.path.exists(os.path.join(e2.path, "libelf")) + with e2: + assert e2.dev_specs["libelf"]["path"] == "./libelf" + assert e2.dev_specs["mpileaks"]["path"] == os.path.join( + os.path.dirname(e1.path), "mpileaks" + ) + + +def test_init_from_env_no_spackfile(tmp_path): + with pytest.raises(ev.SpackEnvironmentError, match="not a valid environment"): + _env_create("test", init_file=str(tmp_path)) + + def test_init_from_yaml_relative_includes(tmp_path: pathlib.Path): files = [ "relative_copied/packages.yaml", @@ -1767,9 +1799,9 @@ def check_stage(spec): def test_env_commands_die_with_no_env_arg(): # these fail in argparse when given no arg - with pytest.raises(SystemExit): + with pytest.raises(SpackCommandError): env("create") - with pytest.raises(SystemExit): + with pytest.raises(SpackCommandError): env("remove") # these have an optional env arg and raise errors via tty.die @@ -1798,7 +1830,7 @@ def test_roots_display_with_variants(): add("boost+shared") with ev.read("test"): - out = find(output=str) + out = find() assert "boost+shared" in out @@ -2036,7 +2068,7 @@ def test_env_include_concrete_old_env(format): def test_env_bad_include_concrete_env(): with pytest.raises(ev.SpackEnvironmentError): - env("create", "--include-concrete", "nonexistant_env", "combined_env") + env("create", "--include-concrete", "nonexistent_env", "combined_env") def test_env_not_concrete_include_concrete_env(): @@ -2241,7 +2273,7 @@ def test_env_include_concrete_reuse(do_not_check_runtimes_on_reuse, reuse_mode): @pytest.mark.parametrize("unify", [True, False, "when_possible"]) def test_env_include_concrete_env_reconcretized(unify): """Double check to make sure that concrete_specs for the local specs is empty - after recocnretizing. + after reconcretizing. """ _, _, combined = setup_combined_multiple_env() @@ -3034,7 +3066,7 @@ def test_view_link_run( ) with ev.Environment(envdir): - install() + install("--fake") # make sure transitive run type deps are in the view for pkg in ("dtrun1", "dtrun3"): @@ -3579,7 +3611,7 @@ def test_modules_relative_to_views(environment_from_manifest, install_mockery, m ) with ev.read("test") as e: - install() + install("--fake") spec = e.specs_by_hash[e.concretized_order[0]] view_prefix = e.default_view.get_projection_for_spec(spec) @@ -3706,14 +3738,14 @@ def test_query_develop_specs(tmp_path: pathlib.Path): @pytest.mark.parametrize( "env,no_env,env_dir", [("b", False, None), (None, True, None), (None, False, "path/")] ) -def test_activation_and_deactiviation_ambiguities(method, env, no_env, env_dir, capsys): +def test_activation_and_deactivation_ambiguities(method, env, no_env, env_dir, capfd): """spack [-e x | -E | -D x/] env [activate | deactivate] y are ambiguous""" args = Namespace( shell="sh", env_name="a", env=env, no_env=no_env, env_dir=env_dir, keep_relative=False ) with pytest.raises(SystemExit): method(args) - _, err = capsys.readouterr() + _, err = capfd.readouterr() assert "is ambiguous" in err @@ -3735,7 +3767,7 @@ def test_custom_store_in_environment(mutable_config, tmp_path: pathlib.Path): ) current_store_root = str(spack.store.STORE.root) assert str(current_store_root) != str(install_root) - with spack.environment.Environment(str(tmp_path)): + with ev.Environment(str(tmp_path)): assert str(spack.store.STORE.root) == str(install_root) assert str(spack.store.STORE.root) == current_store_root @@ -4342,7 +4374,7 @@ def test_env_include_packages_url( """Test inclusion of a (GitHub) URL.""" develop_url = "https://github.com/fake/fake/blob/develop/" default_packages = develop_url + "etc/fake/defaults/packages.yaml" - sha256 = "6a1b26c857ca7e5bcd7342092e2f218da43d64b78bd72771f603027ea3c8b4af" + sha256 = "8d428c600b215e3b4a207a08236659dfc2c9ae2782c35943a00ee4204a135702" spack_yaml = tmp_path / "spack.yaml" with open(spack_yaml, "w", encoding="utf-8") as f: f.write( diff --git a/lib/spack/spack/test/cmd/extensions.py b/lib/spack/spack/test/cmd/extensions.py index be80ef9828cc85..361ba4784344e8 100644 --- a/lib/spack/spack/test/cmd/extensions.py +++ b/lib/spack/spack/test/cmd/extensions.py @@ -23,21 +23,20 @@ def python_database(mock_packages, mutable_database): @pytest.mark.not_on_windows("All Fetchers Failed") @pytest.mark.db -def test_extensions(mock_packages, python_database, capsys): +def test_extensions(mock_packages, python_database): ext2 = spack.concretize.concretize_one("py-extension2") def check_output(ni): - with capsys.disabled(): - output = extensions("python") - packages = extensions("-s", "packages", "python") - installed = extensions("-s", "installed", "python") + output = extensions("python") + packages = extensions("-s", "packages", "python") + installed = extensions("-s", "installed", "python") assert "==> python@2.7.11" in output - assert "==> 3 extensions" in output + assert "==> 4 extensions" in output assert "py-extension1" in output assert "py-extension2" in output assert "python-venv" in output - assert "==> 3 extensions" in packages + assert "==> 4 extensions" in packages assert "py-extension1" in packages assert "py-extension2" in packages assert "python-venv" in packages diff --git a/lib/spack/spack/test/cmd/external.py b/lib/spack/spack/test/cmd/external.py index 202e9068298afc..8c0d06d3bc192a 100644 --- a/lib/spack/spack/test/cmd/external.py +++ b/lib/spack/spack/test/cmd/external.py @@ -7,7 +7,6 @@ import pytest -import spack import spack.cmd.external import spack.config import spack.cray_manifest diff --git a/lib/spack/spack/test/cmd/find.py b/lib/spack/spack/test/cmd/find.py index a72a57b86f8cfa..aa5ac1ee676e91 100644 --- a/lib/spack/spack/test/cmd/find.py +++ b/lib/spack/spack/test/cmd/find.py @@ -6,12 +6,10 @@ import json import os import pathlib -import sys -from textwrap import dedent import pytest -import spack.cmd as cmd +import spack.cmd import spack.cmd.find import spack.concretize import spack.environment as ev @@ -137,28 +135,12 @@ def test_namespaces_shown_correctly(args, with_namespace, database): @pytest.mark.db def test_find_cli_output_format(database, mock_tty_stdout): - # Currently logging on Windows detaches stdout - # from the terminal so we miss some output during tests - # TODO: (johnwparent): Once logging is amended on Windows, - # restore this test - out = find("zmpi") - if not sys.platform == "win32": - assert out.endswith( - dedent( - """\ - zmpi@1.0 - ==> 1 installed package - """ - ) - ) - else: - assert out.endswith( - dedent( - """\ - zmpi@1.0 - """ - ) - ) + assert find("zmpi").endswith( + """\ +zmpi@1.0 +==> 1 installed package +""" + ) def _check_json_output(spec_list): @@ -202,34 +184,34 @@ def test_find_json_deps(database): @pytest.mark.db -def test_display_json(database, capsys): +def test_display_json(database, capfd): specs = [ spack.concretize.concretize_one(s) for s in ["mpileaks ^zmpi", "mpileaks ^mpich", "mpileaks ^mpich2"] ] - cmd.display_specs_as_json(specs) - spec_list = json.loads(capsys.readouterr()[0]) + spack.cmd.display_specs_as_json(specs) + spec_list = json.loads(capfd.readouterr()[0]) _check_json_output(spec_list) - cmd.display_specs_as_json(specs + specs + specs) - spec_list = json.loads(capsys.readouterr()[0]) + spack.cmd.display_specs_as_json(specs + specs + specs) + spec_list = json.loads(capfd.readouterr()[0]) _check_json_output(spec_list) @pytest.mark.db -def test_display_json_deps(database, capsys): +def test_display_json_deps(database, capfd): specs = [ spack.concretize.concretize_one(s) for s in ["mpileaks ^zmpi", "mpileaks ^mpich", "mpileaks ^mpich2"] ] - cmd.display_specs_as_json(specs, deps=True) - spec_list = json.loads(capsys.readouterr()[0]) + spack.cmd.display_specs_as_json(specs, deps=True) + spec_list = json.loads(capfd.readouterr()[0]) _check_json_output_deps(spec_list) - cmd.display_specs_as_json(specs + specs + specs, deps=True) - spec_list = json.loads(capsys.readouterr()[0]) + spack.cmd.display_specs_as_json(specs + specs + specs, deps=True) + spec_list = json.loads(capfd.readouterr()[0]) _check_json_output_deps(spec_list) @@ -322,9 +304,8 @@ def test_find_very_long(database, config): @pytest.mark.db -def test_find_not_found(database, config, capsys): - with capsys.disabled(): - output = find("foobarbaz", fail_on_error=False) +def test_find_not_found(database, config): + output = find("foobarbaz", fail_on_error=False) assert "No package matches the query: foobarbaz" in output assert find.returncode == 1 diff --git a/lib/spack/spack/test/cmd/gpg.py b/lib/spack/spack/test/cmd/gpg.py index eb6934639df1cd..db95110699fd41 100644 --- a/lib/spack/spack/test/cmd/gpg.py +++ b/lib/spack/spack/test/cmd/gpg.py @@ -7,9 +7,8 @@ import pytest -import spack.binary_distribution as bindist +import spack.binary_distribution import spack.llnl.util.filesystem as fs -import spack.util.executable import spack.util.gpg from spack.main import SpackCommand from spack.paths import mock_gpg_data_path, mock_gpg_keys_path @@ -173,7 +172,7 @@ def test_gpg(tmp_path: pathlib.Path, mutable_config, mock_gnupghome): # Verification should now succeed again. gpg("verify", str(test_path)) - relative_keys_path = bindist.buildcache_relative_keys_path() + relative_keys_path = spack.binary_distribution.buildcache_relative_keys_path() # Publish the keys using a directory path test_path = tmp_path / "dir_cache" diff --git a/lib/spack/spack/test/cmd/help.py b/lib/spack/spack/test/cmd/help.py index 30761f3486deae..468c4f85ff69f5 100644 --- a/lib/spack/spack/test/cmd/help.py +++ b/lib/spack/spack/test/cmd/help.py @@ -6,23 +6,25 @@ def test_reuse_after_help(): """Test `spack help` can be called twice with the same SpackCommand.""" - help_cmd = SpackCommand("help", subprocess=True) + help_cmd = SpackCommand("help") help_cmd() help_cmd() def test_help(): """Sanity check the help command to make sure it works.""" - help_cmd = SpackCommand("help", subprocess=True) + help_cmd = SpackCommand("help") out = help_cmd() - assert "These are common spack commands:" in out + assert "Common spack commands:" in out + assert "Options:" in out def test_help_all(): """Test the spack help --all flag""" - help_cmd = SpackCommand("help", subprocess=True) + help_cmd = SpackCommand("help") out = help_cmd("--all") - assert "Complete list of spack commands:" in out + assert "Commands:" in out + assert "Options:" in out def test_help_spec(): diff --git a/lib/spack/spack/test/cmd/info.py b/lib/spack/spack/test/cmd/info.py index 45b8629e127a8a..812a6630d17e70 100644 --- a/lib/spack/spack/test/cmd/info.py +++ b/lib/spack/spack/test/cmd/info.py @@ -2,77 +2,51 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import argparse +import re import pytest -import spack.cmd.info -from spack.main import SpackCommand +from spack.main import SpackCommand, SpackCommandError pytestmark = [pytest.mark.usefixtures("mock_packages")] info = SpackCommand("info") -@pytest.fixture(scope="module") -def parser(): - """Returns the parser for the module command""" - prs = argparse.ArgumentParser() - spack.cmd.info.setup_parser(prs) - return prs +def test_deprecated_option_warns(): + info("--variants-by-name", "vtk-m") + assert "--variants-by-name is deprecated" in info.output -@pytest.fixture() -def print_buffer(monkeypatch): - buffer = [] +# no specs, more than one spec +@pytest.mark.parametrize("args", [[], ["vtk-m", "zmpi"]]) +def test_info_failures(args): + with pytest.raises(SpackCommandError): + info(*args) - def _print(*args, **kwargs): - buffer.extend(args) - monkeypatch.setattr(spack.cmd.info.color, "cprint", _print, raising=False) - return buffer - - -@pytest.mark.parametrize("extra_args", [[], ["--variants-by-name"]]) -def test_it_just_runs(extra_args): - info("vtk-m", *extra_args) - - -def test_info_noversion(print_buffer): +def test_info_noversion(): """Check that a mock package with no versions outputs None.""" - info("noversion") - - line_iter = iter(print_buffer) - for line in line_iter: - if "version" in line: - has = [desc in line for desc in ["Preferred", "Safe", "Deprecated"]] - if not any(has): - continue - else: - continue + output = info("noversion") - assert "None" in next(line_iter).strip() + assert "Preferred\n None" not in output + assert "Safe\n None" not in output + assert "Deprecated\n None" not in output @pytest.mark.parametrize( "pkg_query,expected", [("zlib", "False"), ("find-externals1", "True (version)")] ) -def test_is_externally_detectable(pkg_query, expected, parser, print_buffer): - args = parser.parse_args(["--detectable", pkg_query]) - spack.cmd.info.info(parser, args) - - line_iter = iter(print_buffer) - for line in line_iter: - if "Externally Detectable" in line: - is_externally_detectable = next(line_iter).strip() - assert is_externally_detectable == expected +def test_is_externally_detectable(pkg_query, expected): + output = info("--detectable", pkg_query) + assert f"Externally Detectable:\n {expected}" in output @pytest.mark.parametrize( "pkg_query", ["vtk-m", "gcc"] # This should ensure --test's c_names processing loop covered ) -@pytest.mark.parametrize("extra_args", [[], ["--variants-by-name"]]) -def test_info_fields(pkg_query, extra_args, parser, print_buffer): +@pytest.mark.parametrize("extra_args", [[], ["--by-name"]]) +def test_info_fields(pkg_query, extra_args): expected_fields = ( "Description:", "Homepage:", @@ -85,8 +59,94 @@ def test_info_fields(pkg_query, extra_args, parser, print_buffer): "Licenses:", ) - args = parser.parse_args(["--all", pkg_query] + extra_args) - spack.cmd.info.info(parser, args) + output = info("--all", *extra_args, pkg_query) + assert all(field in output for field in expected_fields) + - for text in expected_fields: - assert any(x for x in print_buffer if text in x) +@pytest.mark.parametrize( + "args,in_output,not_in_output", + [ + # no variants + (["package-base-extendee"], [r"Variants:\n\s*None"], []), + # test that long lines wrap around + ( + ["long-boost-dependency+longdep"], + [ + r"boost\+atomic\+chrono\+date_time\+filesystem\+graph\+iostreams\+locale\n" + r"\s*build, link" + ], + [], + ), + ( + ["long-boost-dependency~longdep"], + [], + [ + r"boost\+atomic\+chrono\+date_time\+filesystem\+graph\+iostreams\+locale\n" + r"\s*build, link" + ], + ), + # conditional licenses change output + (["licenses-1 +foo"], ["MIT"], ["Apache-2.0"]), + (["licenses-1 ~foo"], ["Apache-2.0"], ["MIT"]), + # filtering bowtie versions + (["bowtie"], ["1.4.0", "1.3.0", "1.2.2", "1.2.0"], []), + (["bowtie@1.2:"], ["1.4.0", "1.3.0", "1.2.2", "1.2.0"], []), + (["bowtie@1.3:"], ["1.4.0", "1.3.0"], ["1.2.2", "1.2.0"]), + (["bowtie@1.2"], ["1.2.2", "1.2.0"], ["1.3.0"]), # 1.4.0 still shown as preferred + # many dependencies with suggestion to filter + ( + ["many-conditional-deps"], + ["consider this for a simpler view:\n spack info many-conditional-deps~cuda~rocm"], + [], + ), + ( + ["many-conditional-deps ~cuda"], + ["consider this for a simpler view:\n spack info many-conditional-deps~cuda~rocm"], + [], + ), + ( + ["many-conditional-deps ~rocm"], + ["consider this for a simpler view:\n spack info many-conditional-deps~cuda~rocm"], + [], + ), + (["many-conditional-deps ~cuda ~rocm"], [], ["consider this for a simpler view:"]), + # Ensure spack info knows that build_system is a single value variant + ( + ["dual-cmake-autotools"], + [r"when\s*build_system=cmake", r"when\s*build_system=autotools"], + [], + ), + ( + ["dual-cmake-autotools build_system=cmake"], + [r"when\s*build_system=cmake"], + [r"when\s*build_system=autotools"], + ), + # Ensure that gemerator=make implies build_system=cmake and therefore no autotools + ( + ["dual-cmake-autotools generator=make"], + [r"when\s*build_system=cmake"], + [r"when\s*build_system=autotools"], + ), + ( + ["optional-dep-test"], + [ + r"when \^pkg-g", + r"when \%intel", + r"when \%intel\@64\.1", + r"when \%clang@34\:40", + r"when \^pkg\-f", + ], + [], + ), + ], +) +@pytest.mark.parametrize("by_name", [True, False]) +def test_info_output(by_name, args, in_output, not_in_output, monkeypatch): + monkeypatch.setenv("COLUMNS", "80") + by_name_arg = ["--by-name"] if by_name else ["--by-when"] + output = info(*(by_name_arg + args)) + + for io in in_output: + assert re.search(io, output) + for nio in not_in_output: + assert not re.search(nio, output) diff --git a/lib/spack/spack/test/cmd/install.py b/lib/spack/spack/test/cmd/install.py index 0ebf10014e9a05..c3a6307fb80950 100644 --- a/lib/spack/spack/test/cmd/install.py +++ b/lib/spack/spack/test/cmd/install.py @@ -132,7 +132,7 @@ def test_install_dirty_flag(arguments, expected): assert args.dirty == expected -def test_package_output(capsys, install_mockery, mock_fetch): +def test_package_output(install_mockery, mock_fetch): """ Ensure output printed from pkgs is captured by output redirection. """ @@ -164,17 +164,14 @@ def test_package_output(capsys, install_mockery, mock_fetch): @pytest.mark.disable_clean_stage_check -def test_install_output_on_build_error( - mock_packages, mock_archive, mock_fetch, install_mockery, capfd -): +def test_install_output_on_build_error(mock_packages, mock_archive, mock_fetch, install_mockery): """ This test used to assume receiving full output, but since we've updated spack to generate logs on the level of phases, it will only return the last phase, install. """ # capfd interferes with Spack's capturing - with capfd.disabled(): - out = install("-v", "build-error", fail_on_error=False) + out = install("-v", "build-error", fail_on_error=False) assert "Installing build-error" in out @@ -204,12 +201,11 @@ def test_install_env_variables(mock_packages, mock_archive, mock_fetch, install_ @pytest.mark.disable_clean_stage_check -def test_show_log_on_error(mock_packages, mock_archive, mock_fetch, install_mockery, capfd): +def test_show_log_on_error(mock_packages, mock_archive, mock_fetch, install_mockery): """ Make sure --show-log-on-error works. """ - with capfd.disabled(): - out = install("--show-log-on-error", "build-error", fail_on_error=False) + out = install("--show-log-on-error", "build-error", fail_on_error=False) assert isinstance(install.error, spack.build_environment.ChildError) assert install.error.pkg.name == "build-error" @@ -504,50 +500,45 @@ def test_extra_files_are_archived(mock_packages, mock_archive, mock_fetch, insta @pytest.mark.disable_clean_stage_check def test_cdash_report_concretization_error( - tmp_path: pathlib.Path, mock_fetch, install_mockery, capfd, conflict_spec + tmp_path: pathlib.Path, mock_fetch, install_mockery, conflict_spec ): - # capfd interferes with Spack's capturing - with capfd.disabled(): - with fs.working_dir(str(tmp_path)): - with pytest.raises(SpackError): - install("--log-format=cdash", "--log-file=cdash_reports", conflict_spec) - report_dir = tmp_path / "cdash_reports" - assert report_dir in list(tmp_path.iterdir()) - report_file = report_dir / "Update.xml" - assert report_file in list(report_dir.iterdir()) - content = report_file.read_text() - assert "" in content - # The message is different based on using the - # new or the old concretizer - expected_messages = ("Conflicts in concretized spec", "conflicts with") - assert any(x in content for x in expected_messages) + with fs.working_dir(str(tmp_path)): + with pytest.raises(SpackError): + install("--log-format=cdash", "--log-file=cdash_reports", conflict_spec) + report_dir = tmp_path / "cdash_reports" + assert report_dir in list(tmp_path.iterdir()) + report_file = report_dir / "Update.xml" + assert report_file in list(report_dir.iterdir()) + content = report_file.read_text() + assert "" in content + # The message is different based on using the + # new or the old concretizer + expected_messages = ("Conflicts in concretized spec", "conflicts with") + assert any(x in content for x in expected_messages) @pytest.mark.not_on_windows("Windows log_output logs phase header out of order") @pytest.mark.disable_clean_stage_check -def test_cdash_upload_build_error(tmp_path: pathlib.Path, mock_fetch, install_mockery, capfd): - # capfd interferes with Spack's capturing - with capfd.disabled(): - with fs.working_dir(str(tmp_path)): - with pytest.raises(SpackError): - install( - "--log-format=cdash", - "--log-file=cdash_reports", - "--cdash-upload-url=http://localhost/fakeurl/submit.php?project=Spack", - "build-error", - ) - report_dir = tmp_path / "cdash_reports" - assert report_dir in list(tmp_path.iterdir()) - report_file = report_dir / "Build.xml" - assert report_file in list(report_dir.iterdir()) - content = report_file.read_text() - assert "configure: error: in /path/to/some/file:" in content +def test_cdash_upload_build_error(capfd, tmp_path: pathlib.Path, mock_fetch, install_mockery): + with fs.working_dir(str(tmp_path)): + with pytest.raises(SpackError): + install( + "--log-format=cdash", + "--log-file=cdash_reports", + "--cdash-upload-url=http://localhost/fakeurl/submit.php?project=Spack", + "build-error", + ) + report_dir = tmp_path / "cdash_reports" + assert report_dir in list(tmp_path.iterdir()) + report_file = report_dir / "Build.xml" + assert report_file in list(report_dir.iterdir()) + content = report_file.read_text() + assert "configure: error: in /path/to/some/file:" in content @pytest.mark.disable_clean_stage_check -def test_cdash_upload_clean_build(tmp_path: pathlib.Path, mock_fetch, install_mockery, capfd): - # capfd interferes with Spack's capturing of e.g., Build.xml output - with capfd.disabled(), fs.working_dir(str(tmp_path)): +def test_cdash_upload_clean_build(tmp_path: pathlib.Path, mock_fetch, install_mockery): + with fs.working_dir(str(tmp_path)): install("--log-file=cdash_reports", "--log-format=cdash", "pkg-c") report_dir = tmp_path / "cdash_reports" assert report_dir in list(tmp_path.iterdir()) @@ -559,9 +550,8 @@ def test_cdash_upload_clean_build(tmp_path: pathlib.Path, mock_fetch, install_mo @pytest.mark.disable_clean_stage_check -def test_cdash_upload_extra_params(tmp_path: pathlib.Path, mock_fetch, install_mockery, capfd): - # capfd interferes with Spack's capture of e.g., Build.xml output - with capfd.disabled(), fs.working_dir(str(tmp_path)): +def test_cdash_upload_extra_params(tmp_path: pathlib.Path, mock_fetch, install_mockery): + with fs.working_dir(str(tmp_path)): install( "--log-file=cdash_reports", "--log-format=cdash", @@ -581,9 +571,8 @@ def test_cdash_upload_extra_params(tmp_path: pathlib.Path, mock_fetch, install_m @pytest.mark.disable_clean_stage_check -def test_cdash_buildstamp_param(tmp_path: pathlib.Path, mock_fetch, install_mockery, capfd): - # capfd interferes with Spack's capture of e.g., Build.xml output - with capfd.disabled(), fs.working_dir(str(tmp_path)): +def test_cdash_buildstamp_param(tmp_path: pathlib.Path, mock_fetch, install_mockery): + with fs.working_dir(str(tmp_path)): cdash_track = "some_mocked_track" buildstamp_format = f"%Y%m%d-%H%M-{cdash_track}" buildstamp = time.strftime(buildstamp_format, time.localtime(int(time.time()))) @@ -603,10 +592,9 @@ def test_cdash_buildstamp_param(tmp_path: pathlib.Path, mock_fetch, install_mock @pytest.mark.disable_clean_stage_check def test_cdash_install_from_spec_json( - tmp_path: pathlib.Path, mock_fetch, install_mockery, capfd, mock_packages, mock_archive + tmp_path: pathlib.Path, mock_fetch, install_mockery, mock_packages, mock_archive ): - # capfd interferes with Spack's capturing - with capfd.disabled(), fs.working_dir(str(tmp_path)): + with fs.working_dir(str(tmp_path)): spec_json_path = str(tmp_path / "spec.json") pkg_spec = spack.concretize.concretize_one("pkg-c") @@ -637,38 +625,29 @@ def test_cdash_install_from_spec_json( @pytest.mark.disable_clean_stage_check -def test_build_error_output(mock_fetch, install_mockery, capfd): - with capfd.disabled(): - msg = "" - try: - install("build-error") - assert False, "no exception was raised!" - except spack.build_environment.ChildError as e: - msg = e.long_message - - assert "configure: error: in /path/to/some/file:" in msg - assert "configure: error: cannot run C compiled programs." in msg +def test_build_error_output(capfd, mock_fetch, install_mockery): + with pytest.raises(spack.build_environment.ChildError) as e: + install("build-error") + assert "configure: error: in /path/to/some/file:" in install.output + assert "configure: error: in /path/to/some/file:" in e.value.long_message + assert "configure: error: cannot run C compiled programs." in install.output + assert "configure: error: cannot run C compiled programs." in e.value.long_message @pytest.mark.disable_clean_stage_check -def test_build_warning_output(mock_fetch, install_mockery, capfd): - with capfd.disabled(): - msg = "" - try: - install("build-warnings") - assert False, "no exception was raised!" - except spack.build_environment.ChildError as e: - msg = e.long_message +def test_build_warning_output(mock_fetch, install_mockery): + with pytest.raises(spack.build_environment.ChildError) as e: + install("build-warnings") + assert "WARNING: ALL CAPITAL WARNING!" in install.output + assert "WARNING: ALL CAPITAL WARNING!" in e.value.long_message + assert "foo.c:89: warning: some weird warning!" in install.output + assert "foo.c:89: warning: some weird warning!" in e.value.long_message - assert "WARNING: ALL CAPITAL WARNING!" in msg - assert "foo.c:89: warning: some weird warning!" in msg - -def test_cache_only_fails(mock_fetch, install_mockery, capfd): +def test_cache_only_fails(mock_fetch, install_mockery): # libelf from cache fails to install, which automatically removes the # the libdwarf build task - with capfd.disabled(): - out = install("--cache-only", "libdwarf", fail_on_error=False) + out = install("--cache-only", "libdwarf", fail_on_error=False) assert "Failed to install gcc-runtime" in out assert "Skipping build of libdwarf" in out @@ -692,13 +671,12 @@ def test_install_only_dependencies(mock_fetch, install_mockery): assert not os.path.exists(root.prefix) -def test_install_only_package(mock_fetch, install_mockery, capfd): +def test_install_only_package(mock_fetch, install_mockery): msg = "" - with capfd.disabled(): - try: - install("--only", "package", "dependent-install") - except spack.error.InstallError as e: - msg = str(e) + try: + install("--only", "package", "dependent-install") + except spack.error.InstallError as e: + msg = str(e) assert "Cannot proceed with dependent-install" in msg assert "1 uninstalled dependency" in msg @@ -807,12 +785,12 @@ def test_install_no_add_in_env( # Activate the environment with e: # Assert using --no-add with a spec not in the env fails - inst_out = install("--fake", "--no-add", "boost", fail_on_error=False, output=str) + inst_out = install("--fake", "--no-add", "boost", fail_on_error=False) - assert "You can add specs to the environment with 'spack add " in inst_out + assert "Specs can be added to the environment with 'spack add " in inst_out # Without --add, ensure that two packages "a" get installed - inst_out = install("--fake", "pkg-a", output=str) + inst_out = install("--fake", "pkg-a") assert len([x for x in e.all_specs() if x.installed and x.name == "pkg-a"]) == 2 # Install an unambiguous dependency spec (that already exists as a dep @@ -820,7 +798,7 @@ def test_install_no_add_in_env( # but is not added to the environment. install("dyninst") - find_output = find("-l", output=str) + find_output = find("-l") assert "dyninst" in find_output assert "libdwarf" in find_output assert "libelf" in find_output @@ -838,7 +816,7 @@ def test_install_no_add_in_env( install(str(mpi_spec_json_path)) assert mpi_spec not in e.roots() - find_output = find("-l", output=str) + find_output = find("-l") assert mpi_spec.name in find_output # Install an unambiguous depependency spec (that already exists as a @@ -859,14 +837,11 @@ def test_install_no_add_in_env( assert not any([s.name == "bowtie" for s in e.uninstalled_specs()]) -def test_install_help_does_not_show_cdash_options(capsys): +def test_install_help_does_not_show_cdash_options(): """ Make sure `spack install --help` does not describe CDash arguments """ - with pytest.raises(SystemExit): - install("--help") - captured = capsys.readouterr() - assert "CDash URL" not in captured.out + assert "CDash URL" not in install("--help") def test_install_help_cdash(): @@ -877,9 +852,8 @@ def test_install_help_cdash(): @pytest.mark.disable_clean_stage_check -def test_cdash_auth_token(tmp_path: pathlib.Path, mock_fetch, install_mockery, monkeypatch, capfd): - # capfd interferes with Spack's capturing - with fs.working_dir(str(tmp_path)), capfd.disabled(): +def test_cdash_auth_token(tmp_path: pathlib.Path, mock_fetch, install_mockery, monkeypatch): + with fs.working_dir(str(tmp_path)): monkeypatch.setenv("SPACK_CDASH_AUTH_TOKEN", "asdf") out = install("--fake", "-v", "--log-file=cdash_reports", "--log-format=cdash", "pkg-a") assert "Using CDash auth token from environment" in out @@ -887,9 +861,8 @@ def test_cdash_auth_token(tmp_path: pathlib.Path, mock_fetch, install_mockery, m @pytest.mark.not_on_windows("Windows log_output logs phase header out of order") @pytest.mark.disable_clean_stage_check -def test_cdash_configure_warning(tmp_path: pathlib.Path, mock_fetch, install_mockery, capfd): - # capfd interferes with Spack's capturing of e.g., Build.xml output - with capfd.disabled(), fs.working_dir(str(tmp_path)): +def test_cdash_configure_warning(tmp_path: pathlib.Path, mock_fetch, install_mockery): + with fs.working_dir(str(tmp_path)): # Test would fail if install raised an error. # Ensure that even on non-x86_64 architectures, there are no @@ -999,7 +972,6 @@ def test_installation_fail_tests(install_mockery, mock_fetch, name, method): # Unit tests should not be affected by the user's managed environments @pytest.mark.not_on_windows("Buildcache not supported on windows") def test_install_use_buildcache( - capsys, mutable_mock_env_path, mock_packages, mock_fetch, @@ -1059,21 +1031,20 @@ def install_use_buildcache(opt): # Configure the mirror where we put that buildcache w/ the compiler mirror("add", "test-mirror", mirror_url) - with capsys.disabled(): - # Install using the matrix of possible combinations with --use-buildcache - for pkg, deps in itertools.product(["auto", "only", "never"], repeat=2): - tty.debug( - "Testing `spack install --use-buildcache package:{0},dependencies:{1}`".format( - pkg, deps - ) + # Install using the matrix of possible combinations with --use-buildcache + for pkg, deps in itertools.product(["auto", "only", "never"], repeat=2): + tty.debug( + "Testing `spack install --use-buildcache package:{0},dependencies:{1}`".format( + pkg, deps ) - install_use_buildcache("package:{0},dependencies:{1}".format(pkg, deps)) - install_use_buildcache("dependencies:{0},package:{1}".format(deps, pkg)) + ) + install_use_buildcache("package:{0},dependencies:{1}".format(pkg, deps)) + install_use_buildcache("dependencies:{0},package:{1}".format(deps, pkg)) - # Install using a default override option - # Alternative to --cache-only (always) or --no-cache (never) - for opt in ["auto", "only", "never"]: - install_use_buildcache(opt) + # Install using a default override option + # Alternative to --cache-only (always) or --no-cache (never) + for opt in ["auto", "only", "never"]: + install_use_buildcache(opt) @pytest.mark.not_on_windows("Windows logger I/O operation on closed file when install fails") @@ -1084,7 +1055,7 @@ def test_padded_install_runtests_root(install_mockery, mock_fetch): output = install( "--verbose", "--test=root", "--no-cache", "test-build-callbacks", fail_on_error=False ) - assert output.count("method not implemented") == 1 + assert "method not implemented [undefined-build-test]" in output @pytest.mark.regression("35337") @@ -1111,7 +1082,7 @@ def test_invalid_concurrent_packages_flag(mutable_config): """Test that an invalid value for --concurrent-packages CLI flag raises a ValueError""" install = SpackCommand("install") with pytest.raises(ValueError, match="expected a positive integer"): - install("--concurrent-packages", "-2", fail_on_error=False) + install("--concurrent-packages", "-2") @pytest.mark.skipif(sys.platform == "win32", reason="Feature disabled on windows due to locking") diff --git a/lib/spack/spack/test/cmd/is_git_repo.py b/lib/spack/spack/test/cmd/is_git_repo.py index ffacd8d3f74081..3169f6ead3920b 100644 --- a/lib/spack/spack/test/cmd/is_git_repo.py +++ b/lib/spack/spack/test/cmd/is_git_repo.py @@ -8,7 +8,6 @@ import pytest -import spack import spack.cmd import spack.fetch_strategy from spack.llnl.util.filesystem import mkdirp, working_dir diff --git a/lib/spack/spack/test/cmd/list.py b/lib/spack/spack/test/cmd/list.py index 9a1b602cda1ede..ee45f163a91e5d 100644 --- a/lib/spack/spack/test/cmd/list.py +++ b/lib/spack/spack/test/cmd/list.py @@ -4,8 +4,6 @@ import os import pathlib -import sys -from textwrap import dedent import pytest @@ -25,25 +23,13 @@ def test_list(): def test_list_cli_output_format(mock_tty_stdout): - out = list("mpileaks") - # Currently logging on Windows detaches stdout - # from the terminal so we miss some output during tests - # TODO: (johnwparent): Once logging is amended on Windows, - # restore this test - if not sys.platform == "win32": - out_str = dedent( - """\ - mpileaks - ==> 1 packages - """ - ) - else: - out_str = dedent( - """\ - mpileaks - """ - ) - assert out == out_str + assert ( + list("mpileaks") + == """\ +mpileaks +==> 1 packages +""" + ) def test_list_filter(): diff --git a/lib/spack/spack/test/cmd/location.py b/lib/spack/spack/test/cmd/location.py index cb3426212fc6bd..4a9f5830a39c16 100644 --- a/lib/spack/spack/test/cmd/location.py +++ b/lib/spack/spack/test/cmd/location.py @@ -9,11 +9,12 @@ import spack.concretize import spack.environment as ev +import spack.main import spack.paths import spack.repo import spack.stage from spack.llnl.util.filesystem import mkdirp -from spack.main import SpackCommand, SpackCommandError +from spack.main import SpackCommand # Everything here uses (or can use) the mock config and database. pytestmark = [pytest.mark.usefixtures("mutable_config", "mutable_database")] @@ -75,8 +76,9 @@ def test_location_source_dir_missing(): ) def test_location_cmd_error(options): """Ensure the proper error is raised with problematic location options.""" - with pytest.raises(SpackCommandError, match="Command exited with code 1"): + with pytest.raises(spack.main.SpackCommandError) as e: location(*options) + assert e.value.code == 1 def test_location_env_exists(mutable_mock_env_path): @@ -94,27 +96,6 @@ def test_location_with_active_env(mutable_mock_env_path): assert location("--env").strip() == e.path -def test_location_env_flag_interference(mutable_mock_env_path): - """ - Tests that specifying an active environment using `spack -e x location ...` - does not interfere with the location command flags. - """ - - # create two environments - env("create", "first_env") - env("create", "second_env") - - global_args = ["-e", "first_env"] - - # `spack -e first_env location -e second_env` should print the env - # path of second_env - assert "first_env" not in location("-e", "second_env", global_args=global_args) - - # `spack -e first_env location --packages` should not print - # the environment path of first_env. - assert "first_env" not in location("--packages", global_args=global_args) - - def test_location_env_missing(): """Tests spack location --env.""" missing_env_name = "missing-env" diff --git a/lib/spack/spack/test/cmd/logs.py b/lib/spack/spack/test/cmd/logs.py index 0f4d48955f2f6e..1004306c24e715 100644 --- a/lib/spack/spack/test/cmd/logs.py +++ b/lib/spack/spack/test/cmd/logs.py @@ -11,10 +11,9 @@ import pytest -import spack import spack.cmd.logs import spack.concretize -import spack.main +import spack.error import spack.spec from spack.main import SpackCommand @@ -47,25 +46,19 @@ def _rewind_collect_and_decode(rw_stream): return rw_stream.read().decode("utf-8") -@pytest.fixture -def disable_capture(capfd): - with capfd.disabled(): - yield - - def test_logs_cmd_errors(install_mockery, mock_fetch, mock_archive, mock_packages): spec = spack.concretize.concretize_one("pkg-c") assert not spec.installed - with pytest.raises(spack.main.SpackCommandError, match="is not installed or staged"): + with pytest.raises(spack.error.SpackError, match="is not installed or staged"): logs("pkg-c") - with pytest.raises(spack.main.SpackCommandError, match="Too many specs"): + with pytest.raises(spack.error.SpackError, match="Too many specs"): logs("pkg-c mpi") install("pkg-c") os.remove(spec.package.install_log_path) - with pytest.raises(spack.main.SpackCommandError, match="No logs are available"): + with pytest.raises(spack.error.SpackError, match="No logs are available"): logs("pkg-c") @@ -75,7 +68,7 @@ def _write_string_to_path(string, path): f.write(string.encode("utf-8")) -def test_dump_logs(install_mockery, mock_fetch, mock_archive, mock_packages, disable_capture): +def test_dump_logs(install_mockery, mock_fetch, mock_archive, mock_packages): """Test that ``spack log`` can find (and print) the logs for partial builds and completed installs. diff --git a/lib/spack/spack/test/cmd/maintainers.py b/lib/spack/spack/test/cmd/maintainers.py index eea680f977954b..6c040016280ce5 100644 --- a/lib/spack/spack/test/cmd/maintainers.py +++ b/lib/spack/spack/test/cmd/maintainers.py @@ -38,9 +38,8 @@ def test_unmaintained(): assert out == sorted(set(spack.repo.all_package_names()) - set(MAINTAINED_PACKAGES)) -def test_all(capfd): - with capfd.disabled(): - out = split(maintainers("--all")) +def test_all(): + out = split(maintainers("--all")) assert out == [ "gcc-runtime:", "haampie", @@ -60,14 +59,12 @@ def test_all(capfd): "user2", ] - with capfd.disabled(): - out = split(maintainers("--all", "maintainers-1")) + out = split(maintainers("--all", "maintainers-1")) assert out == ["maintainers-1:", "user1,", "user2"] -def test_all_by_user(capfd): - with capfd.disabled(): - out = split(maintainers("--all", "--by-user")) +def test_all_by_user(): + out = split(maintainers("--all", "--by-user")) assert out == [ "haampie:", "gcc-runtime", @@ -87,8 +84,7 @@ def test_all_by_user(capfd): "maintainers-3", ] - with capfd.disabled(): - out = split(maintainers("--all", "--by-user", "user1", "user2")) + out = split(maintainers("--all", "--by-user", "user1", "user2")) assert out == [ "user1:", "maintainers-1,", @@ -113,43 +109,36 @@ def test_no_args_by_user(): def test_mutex_args_fail(): - with pytest.raises(SystemExit): + with pytest.raises(spack.main.SpackCommandError): maintainers("--maintained", "--unmaintained") -def test_maintainers_list_packages(capfd): - with capfd.disabled(): - out = split(maintainers("maintainers-1")) +def test_maintainers_list_packages(): + out = split(maintainers("maintainers-1")) assert out == ["user1", "user2"] - with capfd.disabled(): - out = split(maintainers("maintainers-1", "maintainers-2")) + out = split(maintainers("maintainers-1", "maintainers-2")) assert out == ["user1", "user2", "user3"] - with capfd.disabled(): - out = split(maintainers("maintainers-2")) + out = split(maintainers("maintainers-2")) assert out == ["user2", "user3"] -def test_maintainers_list_fails(capfd): +def test_maintainers_list_fails(): out = maintainers("pkg-a", fail_on_error=False) assert not out assert maintainers.returncode == 1 -def test_maintainers_list_by_user(capfd): - with capfd.disabled(): - out = split(maintainers("--by-user", "user1")) +def test_maintainers_list_by_user(): + out = split(maintainers("--by-user", "user1")) assert out == ["maintainers-1", "maintainers-3", "py-extension1"] - with capfd.disabled(): - out = split(maintainers("--by-user", "user1", "user2")) + out = split(maintainers("--by-user", "user1", "user2")) assert out == ["maintainers-1", "maintainers-2", "maintainers-3", "py-extension1"] - with capfd.disabled(): - out = split(maintainers("--by-user", "user2")) + out = split(maintainers("--by-user", "user2")) assert out == ["maintainers-1", "maintainers-2", "maintainers-3", "py-extension1"] - with capfd.disabled(): - out = split(maintainers("--by-user", "user3")) + out = split(maintainers("--by-user", "user3")) assert out == ["maintainers-2", "maintainers-3"] diff --git a/lib/spack/spack/test/cmd/mark.py b/lib/spack/spack/test/cmd/mark.py index e8aa7ba653be2d..c47b2544f28ee6 100644 --- a/lib/spack/spack/test/cmd/mark.py +++ b/lib/spack/spack/test/cmd/mark.py @@ -18,7 +18,7 @@ @pytest.mark.db def test_mark_mode_required(mutable_database): - with pytest.raises(SystemExit): + with pytest.raises(SpackCommandError): mark("-a") diff --git a/lib/spack/spack/test/cmd/mirror.py b/lib/spack/spack/test/cmd/mirror.py index bf5ac2a3aba9cf..a6dd5605cb34d6 100644 --- a/lib/spack/spack/test/cmd/mirror.py +++ b/lib/spack/spack/test/cmd/mirror.py @@ -7,19 +7,19 @@ import pytest -import spack.binary_distribution as bindist +import spack.binary_distribution import spack.cmd.mirror import spack.concretize import spack.config import spack.environment as ev import spack.error -import spack.mirrors.utils import spack.package_base import spack.spec import spack.util.git import spack.util.url as url_util import spack.version from spack.main import SpackCommand, SpackCommandError +from spack.mirrors.utils import MirrorStatsForAllSpecs, MirrorStatsForOneSpec config = SpackCommand("config") mirror = SpackCommand("mirror") @@ -35,9 +35,8 @@ @pytest.mark.disable_clean_stage_check @pytest.mark.regression("8083") -def test_regression_8083(tmp_path: pathlib.Path, capfd, mock_packages, mock_fetch, config): - with capfd.disabled(): - output = mirror("create", "-d", str(tmp_path), "externaltool") +def test_regression_8083(tmp_path: pathlib.Path, mock_packages, mock_fetch, config): + output = mirror("create", "-d", str(tmp_path), "externaltool") assert "Skipping" in output assert "as it is an external spec" in output @@ -64,6 +63,107 @@ def test_mirror_from_env(mutable_mock_env_path, tmp_path: pathlib.Path, mock_pac assert mirror_res == expected +def test_mirror_cli_parallel_args( + tmp_path, mock_packages, mock_fetch, mutable_mock_env_path, monkeypatch +): + """Test the CLI parallel args""" + mirror_dir = str(tmp_path / "mirror") + env_name = "test-parallel" + + def mock_create_mirror_for_all_specs(mirror_specs, path, skip_unstable_versions, workers): + assert path == mirror_dir + assert workers == 2 + + monkeypatch.setattr( + spack.cmd.mirror, "create_mirror_for_all_specs", mock_create_mirror_for_all_specs + ) + + env("create", env_name) + with ev.read(env_name): + add("trivial-install-test-package") + add("git-test") + concretize() + with spack.config.override("config:checksum", False): + mirror("create", "-d", mirror_dir, "--all", "-j", "2") + + +def test_mirror_from_env_parallel(tmp_path, mock_packages, mock_fetch, mutable_mock_env_path): + """Directly test create_mirror_for_all_specs with parallel option""" + mirror_dir = str(tmp_path / "mirror") + env_name = "test-parallel" + + env("create", env_name) + with ev.read(env_name): + add("trivial-install-test-package") + add("git-test") + concretize() + + e = ev.read(env_name) + specs = list(e.specs_by_hash.values()) + + with spack.config.override("config:checksum", False): + mirror_stats = spack.cmd.mirror.create_mirror_for_all_specs( + specs, mirror_dir, False, workers=2 + ) + + assert len(mirror_stats.errors) == 0 + assert set(os.listdir(mirror_dir)) == set([s.name for s in e.user_specs]) + for spec in e.specs_by_hash.values(): + mirror_res = os.listdir(os.path.join(mirror_dir, spec.name)) + expected = ["%s.tar.gz" % spec.format("{name}-{version}")] + assert mirror_res == expected + + +def test_mirror_stats_merge(): + """Test MirrorStats merge functionality""" + spec1 = "package@1.0" + spec2 = "package@2.0" + spec3 = "package@3.0" + + s1 = MirrorStatsForOneSpec(spec1) + s1.added("/test/path/1") + s1.added("/test/path/2") + s1.finalize() + + s2 = MirrorStatsForOneSpec(spec2) + s2.already_existed("/test/path/3") + s2.finalize() + + all_stats = MirrorStatsForAllSpecs() + + # Check before merge, should be empty + present, mirrored, errors = all_stats.stats() + assert len(present) == 0 + assert len(mirrored) == 0 + assert len(errors) == 0 + + # Merge package 1 and 2 + all_stats.merge(s1) + all_stats.merge(s2) + + # Check after merge + present, mirrored, errors = all_stats.stats() + assert present.count(spec2) == 1 + assert mirrored.count(spec1) == 1 + assert len(present) == 1 + assert len(mirrored) == 1 + assert len(errors) == 0 + + # Merge package 3 + s3 = MirrorStatsForOneSpec(spec3) + s3.already_existed("/test/path/4") + s3.added("/test/path/5") + s3.finalize() + all_stats.merge(s3) + + present, mirrored, errors = all_stats.stats() + assert present.count(spec3) == 1 + assert mirrored.count(spec3) == 1 + assert len(present) == 2 + assert len(mirrored) == 2 + assert len(errors) == 0 + + # Test for command line-specified spec in concretized environment def test_mirror_spec_from_env( mutable_mock_env_path, tmp_path: pathlib.Path, mock_packages, mock_fetch @@ -104,7 +204,7 @@ def test_mirror_skip_unstable( specs = [ spack.concretize.concretize_one(x) for x in ["git-test", "trivial-pkg-with-valid-hash"] ] - spack.mirrors.utils.create(mirror_dir, specs, skip_unstable_versions=True) + spack.cmd.mirror.create(mirror_dir, specs, skip_unstable_versions=True) assert set(os.listdir(mirror_dir)) - set(["_source-cache"]) == set( ["trivial-pkg-with-valid-hash"] @@ -140,7 +240,7 @@ def test_exclude_specs(mock_packages, config): specs=["mpich"], versions_per_spec="all", exclude_specs="mpich@3.0.1:3.0.2 mpich@1.0" ) - mirror_specs, _ = spack.cmd.mirror._specs_and_action(args) + mirror_specs = spack.cmd.mirror._specs_to_mirror(args) expected_include = set( spack.concretize.concretize_one(x) for x in ["mpich@3.0.3", "mpich@3.0.4", "mpich@3.0"] ) @@ -157,7 +257,7 @@ def test_exclude_specs_public_mirror(mock_packages, config): private=False, ) - mirror_specs, _ = spack.cmd.mirror._specs_and_action(args) + mirror_specs = spack.cmd.mirror._specs_to_mirror(args) assert not any(s.name == "no-redistribute" for s in mirror_specs) assert any(s.name == "no-redistribute-dependent" for s in mirror_specs) @@ -174,7 +274,7 @@ def test_exclude_file(mock_packages, tmp_path: pathlib.Path, config): args = MockMirrorArgs(specs=["mpich"], versions_per_spec="all", exclude_file=str(exclude_path)) - mirror_specs, _ = spack.cmd.mirror._specs_and_action(args) + mirror_specs = spack.cmd.mirror._specs_to_mirror(args) expected_include = set( spack.concretize.concretize_one(x) for x in ["mpich@3.0.3", "mpich@3.0.4", "mpich@3.0"] ) @@ -183,176 +283,179 @@ def test_exclude_file(mock_packages, tmp_path: pathlib.Path, config): assert not any(spec.satisfies(y) for spec in mirror_specs for y in expected_exclude) -def test_mirror_crud(mutable_config, capsys): - with capsys.disabled(): - mirror("add", "mirror", "http://spack.io") +def test_mirror_remove_by_scope(mutable_config, tmp_path: pathlib.Path): + # add a new mirror to two scopes + mirror("add", "--scope=site", "mock", str(tmp_path / "mock_mirror")) + mirror("add", "--scope=system", "mock", str(tmp_path / "mock_mirror")) - output = mirror("remove", "mirror") - assert "Removed mirror" in output + # Confirm that it is not removed when the scope is incorrect + with pytest.raises(SpackCommandError): + mirror("remove", "--scope=user", "mock") + output = mirror("list") + assert "mock" in output - mirror("add", "mirror", "http://spack.io") + # Confirm that when the scope is specified, it is only removed from that scope + mirror("remove", "--scope=site", "mock") + site_output = mirror("list", "--scope=site") + system_output = mirror("list", "--scope=system") + assert "mock" not in site_output + assert "mock" in system_output - # no-op - output = mirror("set-url", "mirror", "http://spack.io") - assert "No changes made" in output + # Confirm that when the scope is not specified, it is removed from top scope + mirror("add", "--scope=site", "mock", str(tmp_path / "mockrepo")) + mirror("remove", "mock") + site_output = mirror("list", "--scope=site") + system_output = mirror("list", "--scope=system") + assert "mock" not in site_output + assert "mock" in system_output - output = mirror("set-url", "--push", "mirror", "s3://spack-public") - assert not output + # Check that the `--all-scopes` option works + mirror("add", "--scope=site", "mock", str(tmp_path / "mockrepo")) + mirror("remove", "--all-scopes", "mock") + output = mirror("list") + assert "mock" not in output - # no-op - output = mirror("set-url", "--push", "mirror", "s3://spack-public") - assert "No changes made" in output - output = mirror("remove", "mirror") - assert "Removed mirror" in output +def test_mirror_crud(mutable_config): + mirror("add", "mirror", "http://spack.io") - # Test S3 connection info token - mirror("add", "--s3-access-token", "aaaaaazzzzz", "mirror", "s3://spack-public") + output = mirror("remove", "mirror") + assert "Removed mirror" in output - output = mirror("remove", "mirror") - assert "Removed mirror" in output + mirror("add", "mirror", "http://spack.io") - # Test S3 connection info token as variable - mirror("add", "--s3-access-token-variable", "aaaaaazzzzz", "mirror", "s3://spack-public") + # no-op + output = mirror("set-url", "mirror", "http://spack.io") + assert "No changes made" in output - output = mirror("remove", "mirror") - assert "Removed mirror" in output + output = mirror("set-url", "--push", "mirror", "s3://spack-public") + assert not output - def do_add_set_seturl_access_pair( - id_arg, secret_arg, mirror_name="mirror", mirror_url="s3://spack-public" - ): - # Test S3 connection info id/key - output = mirror("add", id_arg, "foo", secret_arg, "bar", mirror_name, mirror_url) - if "variable" not in secret_arg: - assert ( - f"Configuring mirror secrets as plain text with {secret_arg} is deprecated. " - in output - ) + # no-op + output = mirror("set-url", "--push", "mirror", "s3://spack-public") + assert "No changes made" in output - output = config("blame", "mirrors") - assert all([x in output for x in ("foo", "bar", mirror_name, mirror_url)]) - # Mirror access_pair deprecation warning should not be in blame output - assert "support for plain text secrets" not in output + output = mirror("remove", "mirror") + assert "Removed mirror" in output - output = mirror("set", id_arg, "foo_set", secret_arg, "bar_set", mirror_name) - if "variable" not in secret_arg: - assert "support for plain text secrets" in output - output = config("blame", "mirrors") - assert all([x in output for x in ("foo_set", "bar_set", mirror_name, mirror_url)]) - if "variable" not in secret_arg: - output = mirror( - "set", id_arg, "foo_set", secret_arg + "-variable", "bar_set_var", mirror_name - ) - assert "support for plain text secrets" not in output - output = config("blame", "mirrors") - assert all( - [x in output for x in ("foo_set", "bar_set_var", mirror_name, mirror_url)] - ) + # Test S3 connection info token as variable + mirror("add", "--s3-access-token-variable", "aaaaaazzzzz", "mirror", "s3://spack-public") - output = mirror( - "set-url", - id_arg, - "foo_set_url", - secret_arg, - "bar_set_url", - "--push", - mirror_name, - mirror_url + "-push", - ) - output = config("blame", "mirrors") - assert all( - [ - x in output - for x in ("foo_set_url", "bar_set_url", mirror_name, mirror_url + "-push") - ] - ) + output = mirror("remove", "mirror") + assert "Removed mirror" in output - output = mirror("set", id_arg, "a", mirror_name) - assert "No changes made to mirror" not in output + def do_add_set_seturl_access_pair( + id_arg, secret_arg, mirror_name="mirror", mirror_url="s3://spack-public" + ): + # Test connection info id/key + output = mirror("add", id_arg, "foo", secret_arg, "bar", mirror_name, mirror_url) - output = mirror("set", secret_arg, "b", mirror_name) - assert "No changes made to mirror" not in output + output = config("blame", "mirrors") + assert all([x in output for x in ("foo", "bar", mirror_name, mirror_url)]) - output = mirror("set-url", id_arg, "c", mirror_name, mirror_url) - assert "No changes made to mirror" not in output + output = mirror("set", id_arg, "foo_set", secret_arg, "bar_set", mirror_name) + output = config("blame", "mirrors") + assert all([x in output for x in ("foo_set", "bar_set", mirror_name, mirror_url)]) + if "variable" not in secret_arg: + output = mirror( + "set", id_arg, "foo_set", secret_arg + "-variable", "bar_set_var", mirror_name + ) + assert "support for plain text secrets" not in output + output = config("blame", "mirrors") + assert all([x in output for x in ("foo_set", "bar_set_var", mirror_name, mirror_url)]) + + output = mirror( + "set-url", + id_arg, + "foo_set_url", + secret_arg, + "bar_set_url", + "--push", + mirror_name, + mirror_url + "-push", + ) + output = config("blame", "mirrors") + assert all( + [ + x in output + for x in ("foo_set_url", "bar_set_url", mirror_name, mirror_url + "-push") + ] + ) - output = mirror("set-url", secret_arg, "d", mirror_name, mirror_url) - assert "No changes made to mirror" not in output + output = mirror("set", id_arg, "a", mirror_name) + assert "No changes made to mirror" not in output - output = mirror("remove", mirror_name) - assert "Removed mirror" in output + output = mirror("set", secret_arg, "b", mirror_name) + assert "No changes made to mirror" not in output - output = mirror("add", id_arg, "foo", mirror_name, mirror_url) - assert "Expected both parts of the access pair to be specified. " in output + output = mirror("set-url", id_arg, "c", mirror_name, mirror_url) + assert "No changes made to mirror" not in output - output = mirror("set-url", id_arg, "bar", mirror_name, mirror_url) - assert "Expected both parts of the access pair to be specified. " in output + output = mirror("set-url", secret_arg, "d", mirror_name, mirror_url) + assert "No changes made to mirror" not in output - output = mirror("set", id_arg, "bar", mirror_name) - assert "Expected both parts of the access pair to be specified. " in output + output = mirror("remove", mirror_name) + assert "Removed mirror" in output - output = mirror("remove", mirror_name) - assert "Removed mirror" in output + output = mirror("add", id_arg, "foo", mirror_name, mirror_url) + assert "Expected both parts of the access pair to be specified. " in output - output = mirror("add", secret_arg, "bar", mirror_name, mirror_url) - assert "Expected both parts of the access pair to be specified. " in output + output = mirror("set-url", id_arg, "bar", mirror_name, mirror_url) + assert "Expected both parts of the access pair to be specified. " in output - output = mirror("set-url", secret_arg, "bar", mirror_name, mirror_url) - assert "Expected both parts of the access pair to be specified. " in output + output = mirror("set", id_arg, "bar", mirror_name) + assert "Expected both parts of the access pair to be specified. " in output - output = mirror("set", secret_arg, "bar", mirror_name) - assert "Expected both parts of the access pair to be specified. " in output + output = mirror("remove", mirror_name) + assert "Removed mirror" in output - output = mirror("remove", mirror_name) - assert "Removed mirror" in output + output = mirror("add", secret_arg, "bar", mirror_name, mirror_url) + assert "Expected both parts of the access pair to be specified. " in output - output = mirror("list") - assert "No mirrors configured" in output + output = mirror("set-url", secret_arg, "bar", mirror_name, mirror_url) + assert "Expected both parts of the access pair to be specified. " in output - do_add_set_seturl_access_pair("--s3-access-key-id", "--s3-access-key-secret") - do_add_set_seturl_access_pair("--s3-access-key-id", "--s3-access-key-secret-variable") - do_add_set_seturl_access_pair( - "--s3-access-key-id-variable", "--s3-access-key-secret-variable" - ) - with pytest.raises( - spack.error.SpackError, match="Cannot add mirror with a variable id and text secret" - ): - do_add_set_seturl_access_pair("--s3-access-key-id-variable", "--s3-access-key-secret") - - # Test OCI connection info user/password - do_add_set_seturl_access_pair("--oci-username", "--oci-password") - do_add_set_seturl_access_pair("--oci-username", "--oci-password-variable") - do_add_set_seturl_access_pair("--oci-username-variable", "--oci-password-variable") - with pytest.raises( - spack.error.SpackError, match="Cannot add mirror with a variable id and text secret" - ): - do_add_set_seturl_access_pair("--s3-access-key-id-variable", "--s3-access-key-secret") - - # Test S3 connection info with endpoint URL - mirror( - "add", - "--s3-access-token", - "aaaaaazzzzz", - "--s3-endpoint-url", - "http://localhost/", - "mirror", - "s3://spack-public", - ) + output = mirror("set", secret_arg, "bar", mirror_name) + assert "Expected both parts of the access pair to be specified. " in output - output = mirror("remove", "mirror") + output = mirror("remove", mirror_name) assert "Removed mirror" in output output = mirror("list") assert "No mirrors configured" in output - # Test GCS Mirror - mirror("add", "mirror", "gs://spack-test") + do_add_set_seturl_access_pair("--s3-access-key-id", "--s3-access-key-secret-variable") + do_add_set_seturl_access_pair("--s3-access-key-id-variable", "--s3-access-key-secret-variable") - output = mirror("remove", "mirror") - assert "Removed mirror" in output + # Test OCI connection info user/password + do_add_set_seturl_access_pair("--oci-username", "--oci-password-variable") + do_add_set_seturl_access_pair("--oci-username-variable", "--oci-password-variable") - output = mirror("list") - assert "No mirrors configured" in output + # Test S3 connection info with endpoint URL + mirror( + "add", + "--s3-access-token-variable", + "aaaaaazzzzz", + "--s3-endpoint-url", + "http://localhost/", + "mirror", + "s3://spack-public", + ) + + output = mirror("remove", "mirror") + assert "Removed mirror" in output + + output = mirror("list") + assert "No mirrors configured" in output + + # Test GCS Mirror + mirror("add", "mirror", "gs://spack-test") + + output = mirror("remove", "mirror") + assert "Removed mirror" in output + + output = mirror("list") + assert "No mirrors configured" in output def test_mirror_nonexisting(mutable_config): @@ -392,7 +495,7 @@ def test_mirror_destroy( install("--fake", "--no-cache", spec_name) buildcache("push", "-u", "-f", str(mirror_dir), spec_name) - blobs_path = bindist.buildcache_relative_blobs_path() + blobs_path = spack.binary_distribution.buildcache_relative_blobs_path() contents = os.listdir(str(mirror_dir)) assert blobs_path in contents @@ -411,7 +514,7 @@ class TestMirrorCreate: @pytest.mark.regression("31736", "31985") def test_all_specs_with_all_versions_dont_concretize(self): args = MockMirrorArgs(all=True, exclude_file=None, exclude_specs=None) - mirror_specs, _ = spack.cmd.mirror._specs_and_action(args) + mirror_specs = spack.cmd.mirror._specs_to_mirror(args) assert all(not s.concrete for s in mirror_specs) @pytest.mark.parametrize( @@ -469,7 +572,7 @@ def test_error_conditions(self, cli_args, error_str): ], ) def test_exclude_specs_from_user(self, cli_args, not_expected, config): - mirror_specs, _ = spack.cmd.mirror._specs_and_action(MockMirrorArgs(**cli_args)) + mirror_specs = spack.cmd.mirror._specs_to_mirror(MockMirrorArgs(**cli_args)) assert not any(s.satisfies(y) for s in mirror_specs for y in not_expected) @pytest.mark.parametrize("abstract_specs", [("bowtie", "callpath")]) @@ -537,13 +640,16 @@ def test_mirror_set_2(mutable_config): "http://example2.com", "--s3-access-key-id", "username", - "--s3-access-key-secret", + "--s3-access-key-secret-variable", "password", ) assert spack.config.get("mirrors:example") == { "url": "http://example.com", - "push": {"url": "http://example2.com", "access_pair": ["username", "password"]}, + "push": { + "url": "http://example2.com", + "access_pair": {"id": "username", "secret_variable": "password"}, + }, } @@ -583,7 +689,7 @@ def test_mirror_add_set_autopush(mutable_config): @pytest.mark.require_provenance @pytest.mark.disable_clean_stage_check @pytest.mark.parametrize("mirror_knows_commit", (True, False)) -def test_binary_provenance_url_fails_mirror_resolves_commit( +def test_git_provenance_url_fails_mirror_resolves_commit( git, mock_git_repository, mock_packages, @@ -609,14 +715,15 @@ def test_binary_provenance_url_fails_mirror_resolves_commit( mirror("add", "--type", "source", "test-mirror", mirror_path) spec = spack.concretize.concretize_one("git-test-commit@main") - assert spec.package.stage.archive_file + + assert spec.package.fetcher.source_id() == gold_commit assert "commit" in spec.variants assert spec.variants["commit"].value == gold_commit @pytest.mark.require_provenance @pytest.mark.disable_clean_stage_check -def test_binary_provenance_relative_to_mirror( +def test_git_provenance_relative_to_mirror( git, mock_git_version_info, mock_packages, monkeypatch, tmp_path: pathlib.Path, mutable_config ): """Integration test to evaluate how commit resolution should behave with a mirror diff --git a/lib/spack/spack/test/cmd/pkg.py b/lib/spack/spack/test/cmd/pkg.py index cafb6597abbd2c..6a1f6dfd1244c1 100644 --- a/lib/spack/spack/test/cmd/pkg.py +++ b/lib/spack/spack/test/cmd/pkg.py @@ -344,11 +344,10 @@ def test_group_arguments( @pytest.mark.skipif(not spack.cmd.pkg.get_grep(), reason="grep is not installed") -def test_pkg_grep(mock_packages, capfd): +def test_pkg_grep(mock_packages): # only splice-* mock packages have the string "splice" in them pkg("grep", "-l", "splice") - output, _ = capfd.readouterr() - assert output.strip() == "\n".join( + assert pkg.output.strip() == "\n".join( spack.repo.PATH.get_pkg_class(name).module.__file__ for name in [ "depends-on-manyvariants", @@ -370,8 +369,7 @@ def test_pkg_grep(mock_packages, capfd): with pytest.raises(spack.main.SpackCommandError): pkg("grep", "abcdefghijklmnopqrstuvwxyz") assert pkg.returncode == 1 - output, _ = capfd.readouterr() - assert output.strip() == "" + assert pkg.output.strip() == "" # ensure that we return > 1 for an error with pytest.raises(spack.main.SpackCommandError): diff --git a/lib/spack/spack/test/cmd/print_shell_vars.py b/lib/spack/spack/test/cmd/print_shell_vars.py index f8630b0c7dcafb..866d4a3da9e46c 100644 --- a/lib/spack/spack/test/cmd/print_shell_vars.py +++ b/lib/spack/spack/test/cmd/print_shell_vars.py @@ -5,9 +5,9 @@ from spack.main import print_setup_info -def test_print_shell_vars_sh(capsys): +def test_print_shell_vars_sh(capfd): print_setup_info("sh") - out, _ = capsys.readouterr() + out, _ = capfd.readouterr() assert "_sp_sys_type=" in out assert "_sp_tcl_roots=" in out @@ -15,9 +15,9 @@ def test_print_shell_vars_sh(capsys): assert "_sp_module_prefix" not in out -def test_print_shell_vars_csh(capsys): +def test_print_shell_vars_csh(capfd): print_setup_info("csh") - out, _ = capsys.readouterr() + out, _ = capfd.readouterr() assert "set _sp_sys_type = " in out assert "set _sp_tcl_roots = " in out @@ -25,9 +25,9 @@ def test_print_shell_vars_csh(capsys): assert "set _sp_module_prefix = " not in out -def test_print_shell_vars_sh_modules(capsys): +def test_print_shell_vars_sh_modules(capfd): print_setup_info("sh", "modules") - out, _ = capsys.readouterr() + out, _ = capfd.readouterr() assert "_sp_sys_type=" in out assert "_sp_tcl_roots=" in out @@ -35,9 +35,9 @@ def test_print_shell_vars_sh_modules(capsys): assert "_sp_module_prefix=" in out -def test_print_shell_vars_csh_modules(capsys): +def test_print_shell_vars_csh_modules(capfd): print_setup_info("csh", "modules") - out, _ = capsys.readouterr() + out, _ = capfd.readouterr() assert "set _sp_sys_type = " in out assert "set _sp_tcl_roots = " in out diff --git a/lib/spack/spack/test/cmd/reindex.py b/lib/spack/spack/test/cmd/reindex.py index e0a3401eb41242..5e854ecade16b1 100644 --- a/lib/spack/spack/test/cmd/reindex.py +++ b/lib/spack/spack/test/cmd/reindex.py @@ -69,6 +69,7 @@ def test_reindex_with_deprecated_packages( new_libelf = db.query_local_by_spec_hash( db.query_local("libelf@0.8.13", installed=True)[0].dag_hash() ) + assert old_libelf is not None and new_libelf is not None assert old_libelf.deprecated_for == new_libelf.spec.dag_hash() assert new_libelf.deprecated_for is None assert new_libelf.ref_count == 1 diff --git a/lib/spack/spack/test/cmd/repo.py b/lib/spack/spack/test/cmd/repo.py index 4242d17b5bd3cc..1aa874dca66ec6 100644 --- a/lib/spack/spack/test/cmd/repo.py +++ b/lib/spack/spack/test/cmd/repo.py @@ -17,18 +17,16 @@ import spack.repo_migrate from spack.error import SpackError from spack.llnl.util.filesystem import working_dir -from spack.main import SpackCommand from spack.util.executable import Executable repo = spack.main.SpackCommand("repo") -env = SpackCommand("env") +env = spack.main.SpackCommand("env") def test_help_option(): # Test 'spack repo --help' to check basic import works # and the command exits successfully - with pytest.raises(SystemExit): - repo("--help") + repo("--help") assert repo.returncode in (None, 0) @@ -40,12 +38,46 @@ def test_create_add_list_remove(mutable_config, tmp_path: pathlib.Path): # Add the new repository and check it appears in the list output repo("add", "--scope=site", str(tmp_path / "spack_repo" / "mockrepo")) - output = repo("list", "--scope=site", output=str) + output = repo("list", "--scope=site") assert "mockrepo" in output # Then remove it and check it's not there repo("remove", "--scope=site", str(tmp_path / "spack_repo" / "mockrepo")) - output = repo("list", "--scope=site", output=str) + output = repo("list", "--scope=site") + assert "mockrepo" not in output + + +def test_repo_remove_by_scope(mutable_config, tmp_path: pathlib.Path): + # Create and add a new repo + repo("create", str(tmp_path), "mockrepo") + repo("add", "--scope=site", str(tmp_path / "spack_repo" / "mockrepo")) + repo("add", "--scope=system", str(tmp_path / "spack_repo" / "mockrepo")) + + # Confirm that it is not removed when the scope is incorrect + with pytest.raises(spack.main.SpackCommandError): + repo("remove", "--scope=user", "mockrepo") + output = repo("list") + assert "mockrepo" in output + + # Confirm that when the scope is specified, it is only removed from that scope + repo("remove", "--scope=site", "mockrepo") + site_output = repo("list", "--scope=site") + system_output = repo("list", "--scope=system") + assert "mockrepo" not in site_output + assert "mockrepo" in system_output + + # Confirm that when the scope is not specified, it is removed from top scope with it present + repo("add", "--scope=site", str(tmp_path / "spack_repo" / "mockrepo")) + repo("remove", "mockrepo") + site_output = repo("list", "--scope=site") + system_output = repo("list", "--scope=system") + assert "mockrepo" not in site_output + assert "mockrepo" in system_output + + # Check that the `--all-scopes` option removes from all scopes + repo("add", "--scope=site", str(tmp_path / "spack_repo" / "mockrepo")) + repo("remove", "--all-scopes", "mockrepo") + output = repo("list") assert "mockrepo" not in output @@ -605,7 +637,7 @@ def mock_parse_config_descriptor(name, entry, lock): assert repos_config["auto_name_repo"] == str(tmp_path) -def test_add_repo_partial_repo_construction_warning(monkeypatch, capsys): +def test_add_repo_partial_repo_construction_warning(monkeypatch, capfd): """Test that _add_repo issues warnings for repos that can't be constructed but succeeds if at least one can be.""" @@ -631,7 +663,7 @@ def mock_parse_config_descriptor(name, entry, lock): assert key == "test_mixed_repo" # Check that a warning was issued for the failed repo - captured = capsys.readouterr() + captured = capfd.readouterr() assert "Skipping package repository" in captured.err @@ -765,18 +797,18 @@ def test_repo_list_format_flags( ) # Test default table format, which shows one line per package repository - table_output = repo("list", output=str) + table_output = repo("list") assert "[+] repo_one" in table_output assert "[+] repo_two" in table_output assert " - uninitialized" in table_output assert "[-] misconfigured" in table_output # Test --namespaces flag - namespaces_output = repo("list", "--namespaces", output=str) + namespaces_output = repo("list", "--namespaces") assert namespaces_output.strip().split("\n") == ["repo_one", "repo_two"] # Test --names flag - config_names_output = repo("list", "--names", output=str) + config_names_output = repo("list", "--names") config_names_lines = config_names_output.strip().split("\n") assert config_names_lines == ["monorepo", "uninitialized", "misconfigured"] diff --git a/lib/spack/spack/test/cmd/resource.py b/lib/spack/spack/test/cmd/resource.py index e9e9ec85f07667..e14be3efc6403a 100644 --- a/lib/spack/spack/test/cmd/resource.py +++ b/lib/spack/spack/test/cmd/resource.py @@ -35,9 +35,8 @@ ) -def test_resource_list(mock_packages, capfd): - with capfd.disabled(): - out = resource("list") +def test_resource_list(mock_packages): + out = resource("list") for h in mock_hashes: assert h in out @@ -57,22 +56,20 @@ def test_resource_list(mock_packages, capfd): assert "patched by: builtin_mock.patch-a-dependency" in out -def test_resource_list_only_hashes(mock_packages, capfd): - with capfd.disabled(): - out = resource("list", "--only-hashes") +def test_resource_list_only_hashes(mock_packages): + out = resource("list", "--only-hashes") for h in mock_hashes: assert h in out -def test_resource_show(mock_packages, capfd): +def test_resource_show(mock_packages): test_hash = ( "c45c1564f70def3fc1a6e22139f62cb21cd190cc3a7dbe6f4120fa59ce33dcb8" if sys.platform != "win32" else "3c5b65abcd6a3b2c714dbf7c31ff65fe3748a1adc371f030c283007ca5534f11" ) - with capfd.disabled(): - out = resource("show", test_hash) + out = resource("show", test_hash) assert out.startswith(test_hash) assert ( diff --git a/lib/spack/spack/test/cmd/spec.py b/lib/spack/spack/test/cmd/spec.py index bd053e05541db9..a8cb2d88de9ef3 100644 --- a/lib/spack/spack/test/cmd/spec.py +++ b/lib/spack/spack/test/cmd/spec.py @@ -175,6 +175,7 @@ def test_env_aware_spec(mutable_mock_env_path): ("develop-branch-version", "git.foo=0.2.15", None), ], ) +@pytest.mark.use_package_hash def test_spec_version_assigned_git_ref_as_version(name, version, error): if error: with pytest.raises(error): diff --git a/lib/spack/spack/test/cmd/style.py b/lib/spack/spack/test/cmd/style.py index 8973406cffd3f5..fb983bf3fc657e 100644 --- a/lib/spack/spack/test/cmd/style.py +++ b/lib/spack/spack/test/cmd/style.py @@ -80,7 +80,7 @@ def flake8_package_with_errors(scope="function"): yield tmp -def test_changed_files_from_git_rev_base(git, tmp_path: pathlib.Path, capfd): +def test_changed_files_from_git_rev_base(git, tmp_path: pathlib.Path): """Test arbitrary git ref as base.""" with working_dir(str(tmp_path)): git("init") @@ -229,7 +229,7 @@ def test_fix_style(external_style_root): @pytest.mark.skipif(not ISORT, reason="isort is not installed.") @pytest.mark.skipif(not MYPY, reason="mypy is not installed.") @pytest.mark.skipif(not BLACK, reason="black is not installed.") -def test_external_root(external_style_root, capfd): +def test_external_root(external_style_root): """Ensure we can run in a separate root directory w/o configuration files.""" tmp_path, py_file = external_style_root @@ -326,6 +326,12 @@ def foo(config: "spack.error.SpackError"): spack.util.executable.Executable("example") print(spack.__version__) print(spack.repo_utils.__file__) + +import spack.enums +from spack.enums import ConfigScopePriority + +import spack.util.url as url_util +def something(y: spack.util.url.Url): ... ''' file.write_text(contents) root = str(tmp_path) @@ -343,8 +349,10 @@ def foo(config: "spack.error.SpackError"): assert "issues.py: redundant import: spack.cmd" in output assert "issues.py: redundant import: spack.repo" in output assert "issues.py: redundant import: spack.config" not in output # comment prevents removal + assert "issues.py: redundant import: spack.enums" in output # imported via from-import assert "issues.py: missing import: spack" in output # used by spack.__version__ assert "issues.py: missing import: spack.util.executable" in output + assert "issues.py: missing import: spack.util.url" in output # used in type hint assert "issues.py: missing import: spack.error" not in output # not directly used assert exit_code == 1 assert file.read_text() == contents # fix=False should not change the file @@ -362,8 +370,10 @@ def foo(config: "spack.error.SpackError"): output = output_buf.getvalue() assert exit_code == 1 assert "issues.py: redundant import: spack.cmd" in output + assert "issues.py: redundant import: spack.enums" in output assert "issues.py: missing import: spack" in output assert "issues.py: missing import: spack.util.executable" in output + assert "issues.py: missing import: spack.util.url" in output # after fix a second fix is idempotent output_buf = io.StringIO() @@ -382,8 +392,10 @@ def foo(config: "spack.error.SpackError"): # check that the file was fixed new_contents = file.read_text() assert "import spack.cmd" not in new_contents + assert "import spack.enums" not in new_contents assert "import spack\n" in new_contents assert "import spack.util.executable\n" in new_contents + assert "import spack.util.url\n" in new_contents @pytest.mark.skipif(sys.version_info < (3, 9), reason="requires Python 3.9+") diff --git a/lib/spack/spack/test/cmd/tags.py b/lib/spack/spack/test/cmd/tags.py index dd435f90316892..150f9112d4239f 100644 --- a/lib/spack/spack/test/cmd/tags.py +++ b/lib/spack/spack/test/cmd/tags.py @@ -6,6 +6,7 @@ import spack.main import spack.repo from spack.installer import PackageInstaller +from spack.tag import TagIndex tags = spack.main.SpackCommand("tags") @@ -38,10 +39,7 @@ def test_tags_all_mock_tag_packages(mock_packages): def test_tags_no_tags(monkeypatch): - class tag_path: - tag_index = dict() - - monkeypatch.setattr(spack.repo, "PATH", tag_path) + monkeypatch.setattr(spack.repo.PATH, "tag_index", TagIndex()) out = tags() assert "No tagged" in out diff --git a/lib/spack/spack/test/cmd/test.py b/lib/spack/spack/test/cmd/test.py index 036808c4516e52..609d1f79e718cc 100644 --- a/lib/spack/spack/test/cmd/test.py +++ b/lib/spack/spack/test/cmd/test.py @@ -47,9 +47,7 @@ def test_test_dirty_flag(arguments, expected): assert args.dirty == expected -def test_test_dup_alias( - mock_test_stage, mock_packages, mock_archive, mock_fetch, install_mockery, capfd -): +def test_test_dup_alias(mock_test_stage, mock_packages, mock_archive, mock_fetch, install_mockery): """Ensure re-using an alias fails with suggestion to change.""" install("--fake", "libdwarf") @@ -57,8 +55,7 @@ def test_test_dup_alias( spack_test("run", "--alias", "libdwarf", "libdwarf") # Try again with the alias but don't let it fail on the error - with capfd.disabled(): - out = spack_test("run", "--alias", "libdwarf", "libdwarf", fail_on_error=False) + out = spack_test("run", "--alias", "libdwarf", "libdwarf", fail_on_error=False) assert "already exists" in out and "Try another alias" in out @@ -143,7 +140,6 @@ def test_cdash_output_test_error( mock_packages, mock_archive, mock_test_stage, - capfd, ): """Confirm stand-alone test error expected outputs in CDash reporting.""" install("test-error") @@ -184,12 +180,10 @@ def test_cdash_upload_clean_test( assert "" not in content -def test_test_help_does_not_show_cdash_options(mock_test_stage, capsys): +def test_test_help_does_not_show_cdash_options(mock_test_stage): """Make sure `spack test --help` does not describe CDash arguments""" - with pytest.raises(SystemExit): - spack_test("run", "--help") - captured = capsys.readouterr() - assert "CDash URL" not in captured.out + spack_test("run", "--help") + assert "CDash URL" not in spack_test.output def test_test_help_cdash(mock_test_stage): @@ -201,23 +195,22 @@ def test_test_help_cdash(mock_test_stage): def test_test_list_all(mock_packages): """Confirm `spack test list --all` returns all packages with test methods""" pkgs = spack_test("list", "--all").strip().split() - assert set(pkgs) == set( - [ - "fail-test-audit", - "fail-test-audit-deprecated", - "fail-test-audit-docstring", - "fail-test-audit-impl", - "mpich", - "perl-extension", - "printing-package", - "py-extension1", - "py-extension2", - "py-test-callback", - "simple-standalone-test", - "test-error", - "test-fail", - ] - ) + assert set(pkgs) == { + "py-numpy", + "fail-test-audit", + "fail-test-audit-deprecated", + "fail-test-audit-docstring", + "fail-test-audit-impl", + "mpich", + "perl-extension", + "printing-package", + "py-extension1", + "py-extension2", + "py-test-callback", + "simple-standalone-test", + "test-error", + "test-fail", + } def test_test_list(mock_packages, mock_archive, mock_fetch, install_mockery): diff --git a/lib/spack/spack/test/cmd/unit_test.py b/lib/spack/spack/test/cmd/unit_test.py index cc48c6330c968d..8b5df315c110d2 100644 --- a/lib/spack/spack/test/cmd/unit_test.py +++ b/lib/spack/spack/test/cmd/unit_test.py @@ -4,8 +4,12 @@ import os +import pytest + from spack.main import SpackCommand +pytest.skip("Recursive pytest is brittle", allow_module_level=True) + spack_test = SpackCommand("unit-test") cmd_test_py = os.path.join("lib", "spack", "spack", "test", "cmd", "unit_test.py") @@ -30,9 +34,8 @@ def test_list_with_keywords(): assert cmd_test_py in output.strip() -def test_list_long(capsys): - with capsys.disabled(): - output = spack_test("--list-long") +def test_list_long(): + output = spack_test("--list-long") assert "unit_test.py::\n" in output assert "test_list" in output assert "test_list_with_pytest_arg" in output @@ -47,9 +50,8 @@ def test_list_long(capsys): assert "test_test_deptype" in output -def test_list_long_with_pytest_arg(capsys): - with capsys.disabled(): - output = spack_test("--list-long", cmd_test_py) +def test_list_long_with_pytest_arg(): + output = spack_test("--list-long", cmd_test_py) assert "unit_test.py::\n" in output assert "test_list" in output diff --git a/lib/spack/spack/test/cmd/url.py b/lib/spack/spack/test/cmd/url.py index eb05e01373de13..c4dc6f45e1d553 100644 --- a/lib/spack/spack/test/cmd/url.py +++ b/lib/spack/spack/test/cmd/url.py @@ -115,30 +115,29 @@ def test_url_summary(mock_packages): assert out_correct_versions == correct_versions -def test_url_stats(capfd, mock_packages): - with capfd.disabled(): - output = url("stats") - npkgs = "%d packages" % len(spack.repo.all_package_names()) - assert npkgs in output - assert "url" in output - assert "git" in output - assert "schemes" in output - assert "versions" in output - assert "resources" in output - - output = url("stats", "--show-issues") - npkgs = "%d packages" % len(spack.repo.all_package_names()) - assert npkgs in output - assert "url" in output - assert "git" in output - assert "schemes" in output - assert "versions" in output - assert "resources" in output - - assert "Package URLs with md5 hashes" in output - assert "needs-relocation" in output - assert "https://cmake.org/files/v3.4/cmake-0.0.0.tar.gz" in output - - assert "Package URLs with http urls" in output - assert "zmpi" in output - assert "http://www.spack-fake-zmpi.org/downloads/zmpi-1.0.tar.gz" in output +def test_url_stats(mock_packages): + output = url("stats") + npkgs = "%d packages" % len(spack.repo.all_package_names()) + assert npkgs in output + assert "url" in output + assert "git" in output + assert "schemes" in output + assert "versions" in output + assert "resources" in output + + output = url("stats", "--show-issues") + npkgs = "%d packages" % len(spack.repo.all_package_names()) + assert npkgs in output + assert "url" in output + assert "git" in output + assert "schemes" in output + assert "versions" in output + assert "resources" in output + + assert "Package URLs with md5 hashes" in output + assert "needs-relocation" in output + assert "https://cmake.org/files/v3.4/cmake-0.0.0.tar.gz" in output + + assert "Package URLs with http urls" in output + assert "zmpi" in output + assert "http://www.spack-fake-zmpi.org/downloads/zmpi-1.0.tar.gz" in output diff --git a/lib/spack/spack/test/cmd/verify.py b/lib/spack/spack/test/cmd/verify.py index 343e3016800b3e..9ba39e1ff48593 100644 --- a/lib/spack/spack/test/cmd/verify.py +++ b/lib/spack/spack/test/cmd/verify.py @@ -18,6 +18,7 @@ import spack.util.spack_json as sjson import spack.verify from spack.main import SpackCommand, SpackCommandError +from spack.spec import Spec verify = SpackCommand("verify") install = SpackCommand("install") @@ -144,3 +145,23 @@ def test_libraries(tmp_path: pathlib.Path, install_mockery, mock_fetch): # And check that we can make it pass by ignoring it. assert spack.cmd.verify._verify_libraries(s, ["libf.so"]) is None + + +def test_verify_versions(mock_packages): + missing = "thisisnotapackage" + unknown = "deprecated-versions@=thisisnotaversion" + deprecated = "deprecated-versions@=1.1.0" + good = "deprecated-versions@=1.0.0" + + strs = (missing, unknown, deprecated, good) + + specs = [Spec(c) for c in strs] + [Spec(f"deprecated-client@=1.1.0^{c}") for c in strs] + for spec in specs: + spec._mark_concrete() + + msg_lines = spack.cmd.verify._verify_version(specs) + assert "3 installed packages have unknown/deprecated" in msg_lines[0] + assert "thisisnotapackage" in msg_lines[1] + assert "Cannot load package" in msg_lines[1] + assert "version thisisnotaversion unknown to Spack" in msg_lines[2] + assert "deprecated version 1.1.0" in msg_lines[3] diff --git a/lib/spack/spack/test/cmd/versions.py b/lib/spack/spack/test/cmd/versions.py index 00214f23f5a796..4680bd8c35b838 100644 --- a/lib/spack/spack/test/cmd/versions.py +++ b/lib/spack/spack/test/cmd/versions.py @@ -4,6 +4,7 @@ import pytest +import spack.url from spack.main import SpackCommand from spack.version import Version @@ -13,27 +14,31 @@ pytestmark = [pytest.mark.usefixtures("mock_packages")] +def _mock_find_versions_of_archive(*args, **kwargs): + return { + Version("1.3.1"): "https://zlib.net/zlib-1.3.1.tar.gz", + Version("1.3"): "https://zlib.net/zlib-1.3.tar.gz", + Version("1.2.13"): "https://zlib.net/zlib-1.2.13.tar.gz", + } + + def test_safe_versions(): """Only test the safe versions of a package.""" + assert versions("--safe", "zlib") == " 1.2.11\n 1.2.8\n 1.2.3\n" - versions("--safe", "zlib") - -@pytest.mark.maybeslow -def test_remote_versions(): +def test_remote_versions(monkeypatch): """Test a package for which remote versions should be available.""" - - versions("zlib") + monkeypatch.setattr(spack.url, "find_versions_of_archive", _mock_find_versions_of_archive) + assert versions("zlib") == " 1.2.11\n 1.2.8\n 1.2.3\n 1.3.1\n 1.3\n 1.2.13\n" -@pytest.mark.maybeslow -def test_remote_versions_only(): +def test_remote_versions_only(monkeypatch): """Test a package for which remote versions should be available.""" - - versions("--remote", "zlib") + monkeypatch.setattr(spack.url, "find_versions_of_archive", _mock_find_versions_of_archive) + assert versions("--remote", "zlib") == " 1.3.1\n 1.3\n 1.2.13\n" -@pytest.mark.usefixtures("mock_packages") def test_new_versions_only(monkeypatch): """Test a package for which new versions should be available.""" from spack_repo.builtin_mock.packages.brillig.package import Brillig # type: ignore[import] @@ -62,22 +67,27 @@ def mock_fetch_remote_versions(*args, **kwargs): assert v.strip(" \n\t") == "99.99.99\n 3.2.1" -@pytest.mark.maybeslow -def test_no_unchecksummed_versions(): +def test_no_unchecksummed_versions(monkeypatch): """Test a package for which no unchecksummed versions are available.""" + def mock_find_versions_of_archive(*args, **kwargs): + """Mock find_versions_of_archive to avoid network calls.""" + # Return some fake versions for bzip2 + return { + Version("1.0.8"): "https://sourceware.org/pub/bzip2/bzip2-1.0.8.tar.gz", + Version("1.0.7"): "https://sourceware.org/pub/bzip2/bzip2-1.0.7.tar.gz", + } + + monkeypatch.setattr(spack.url, "find_versions_of_archive", mock_find_versions_of_archive) + versions("bzip2") -@pytest.mark.maybeslow def test_versions_no_url(): """Test a package with versions but without a ``url`` attribute.""" + assert versions("attributes-foo-app") == " 1.0\n" - versions("attributes-foo-app") - -@pytest.mark.maybeslow def test_no_versions_no_url(): """Test a package without versions or a ``url`` attribute.""" - - versions("no-url-or-version") + assert versions("no-url-or-version") == "" diff --git a/lib/spack/spack/test/cmd/view.py b/lib/spack/spack/test/cmd/view.py index 8e8b7238e61dd8..6717abb97db6dc 100644 --- a/lib/spack/spack/test/cmd/view.py +++ b/lib/spack/spack/test/cmd/view.py @@ -9,6 +9,7 @@ import pytest import spack.concretize +import spack.main import spack.util.spack_yaml as s_yaml from spack.installer import PackageInstaller from spack.llnl.util.filesystem import _windows_can_symlink @@ -200,7 +201,7 @@ def test_view_fails_with_missing_projections_file(tmp_path: pathlib.Path): viewpath = str(tmp_path / "view") (tmp_path / "view").mkdir() projection_file = str(tmp_path / "nonexistent") - with pytest.raises(SystemExit): + with pytest.raises(spack.main.SpackCommandError): view("symlink", "--projection-file", projection_file, viewpath, "foo") diff --git a/lib/spack/spack/test/cmd_extensions.py b/lib/spack/spack/test/cmd_extensions.py index 32208ab8bc4549..93dcf527535d90 100644 --- a/lib/spack/spack/test/cmd_extensions.py +++ b/lib/spack/spack/test/cmd_extensions.py @@ -180,7 +180,7 @@ def test_multi_extension_search(hello_world_extension, extension_creator): assert ("Hello world") in spack.main.SpackCommand("hello-world")() -def test_duplicate_module_load(hello_world_cmd, capsys): +def test_duplicate_module_load(hello_world_cmd, capfd): """Ensure duplicate module load attempts are successful. The command module will already have been loaded once by the @@ -190,7 +190,7 @@ def test_duplicate_module_load(hello_world_cmd, capsys): args = [] hw_cmd = spack.cmd.get_command(hello_world_cmd.command_name) hw_cmd(parser, args) - captured = capsys.readouterr() + captured = capfd.readouterr() assert captured == ("Hello world!\n", "") @@ -245,7 +245,7 @@ def test_extension_naming(tmp_path: pathlib.Path, extension_path, expected_excep spack.cmd.get_module("no-such-command") -def test_missing_command_function(extension_creator, capsys): +def test_missing_command_function(extension_creator, capfd): """Ensure we die as expected if a command module does not have the expected command function defined. """ @@ -253,7 +253,7 @@ def test_missing_command_function(extension_creator, capsys): extension.add_command("bad-cmd", """\ndescription = "Empty command implementation"\n""") with pytest.raises(SystemExit): spack.cmd.get_module("bad-cmd") - capture = capsys.readouterr() + capture = capfd.readouterr() assert "must define function 'bad_cmd'." in capture[1] diff --git a/lib/spack/spack/test/concretization/compiler_runtimes.py b/lib/spack/spack/test/concretization/compiler_runtimes.py index 62c457369855a4..8343494ea18b14 100644 --- a/lib/spack/spack/test/concretization/compiler_runtimes.py +++ b/lib/spack/spack/test/concretization/compiler_runtimes.py @@ -16,14 +16,26 @@ import spack.solver.asp import spack.spec from spack.environment.environment import ViewDescriptor +from spack.solver.reuse import SpecFilter, create_external_parser +from spack.solver.runtimes import external_config_with_implicit_externals from spack.version import Version -def _concretize_with_reuse(*, root_str, reused_str): +def _concretize_with_reuse(*, root_str, reused_str, config): reused_spec = spack.concretize.concretize_one(reused_str) + packages_with_externals = external_config_with_implicit_externals(config) + completion_mode = config.get("concretizer:externals:completion") + external_specs = SpecFilter.from_packages_yaml( + external_parser=create_external_parser(packages_with_externals, completion_mode), + packages_with_externals=packages_with_externals, + include=[], + exclude=[], + ).selected_specs() setup = spack.solver.asp.SpackSolverSetup(tests=False) driver = spack.solver.asp.PyclingoDriver() - result, _, _ = driver.solve(setup, [spack.spec.Spec(f"{root_str}")], reuse=[reused_spec]) + result, _, _ = driver.solve( + setup, [spack.spec.Spec(f"{root_str}")], reuse=[reused_spec] + external_specs + ) root = result.specs[0] return root, reused_spec @@ -107,13 +119,17 @@ def test_external_nodes_do_not_have_runtimes(runtime_repo, mutable_config, tmp_p ], ) @pytest.mark.regression("44444") -def test_reusing_specs_with_gcc_runtime(root_str, reused_str, expected, nruntime, runtime_repo): +def test_reusing_specs_with_gcc_runtime( + root_str, reused_str, expected, nruntime, runtime_repo, mutable_config +): """Tests that we can reuse specs with a "gcc-runtime" leaf node. In particular, checks that the semantic for gcc-runtimes versions accounts for reused packages too. Reusable runtime versions should be lower, or equal, to that of parent nodes. """ - root, reused_spec = _concretize_with_reuse(root_str=root_str, reused_str=reused_str) + root, reused_spec = _concretize_with_reuse( + root_str=root_str, reused_str=reused_str, config=mutable_config + ) runtime_a = root.dependencies("gcc-runtime")[0] assert runtime_a.satisfies(expected["pkg-a"]), runtime_a.tree() @@ -133,12 +149,21 @@ def test_reusing_specs_with_gcc_runtime(root_str, reused_str, expected, nruntime ], ) def test_views_can_handle_duplicate_runtime_nodes( - root_str, reused_str, expected, not_expected, runtime_repo, tmp_path: pathlib.Path, monkeypatch + root_str, + reused_str, + expected, + not_expected, + runtime_repo, + tmp_path: pathlib.Path, + monkeypatch, + mutable_config, ): """Tests that an environment is able to select the latest version of a runtime node to be linked in a view, in case more than one compatible version is in the DAG. """ - root, reused_spec = _concretize_with_reuse(root_str=root_str, reused_str=reused_str) + root, reused_spec = _concretize_with_reuse( + root_str=root_str, reused_str=reused_str, config=mutable_config + ) # Mock the installation status to allow selecting nodes for the view monkeypatch.setattr(spack.spec.Spec, "installed", True) @@ -164,11 +189,13 @@ def test_runtimes_can_be_concretized_as_standalone(runtime_repo): assert gcc_runtime.version == gcc.version -def test_runtimes_are_not_reused_if_compiler_not_used(runtime_repo): +def test_runtimes_are_not_reused_if_compiler_not_used(runtime_repo, mutable_config): """Tests that, if we can reuse specs with a more recent runtime version than the compiler we asked for, we will not end-up with a DAG using the recent runtime, and the old compiler. """ - root, reused = _concretize_with_reuse(root_str="pkg-a %gcc@9", reused_str="pkg-a %gcc@10") + root, reused = _concretize_with_reuse( + root_str="pkg-a %gcc@9", reused_str="pkg-a %gcc@10", config=mutable_config + ) assert "gcc-runtime" in root gcc_runtime, gcc = root["gcc-runtime"], root["gcc"] diff --git a/lib/spack/spack/test/concretization/core.py b/lib/spack/spack/test/concretization/core.py index bf80317da71e7a..548a774ddd23e8 100644 --- a/lib/spack/spack/test/concretization/core.py +++ b/lib/spack/spack/test/concretization/core.py @@ -1,9 +1,12 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) +import difflib +import json import os import pathlib import platform +import re import sys from typing import Any, Dict @@ -12,13 +15,15 @@ import spack.vendor.archspec.cpu import spack.vendor.jinja2 +import spack.archspec import spack.binary_distribution import spack.cmd import spack.compilers.config +import spack.compilers.libraries import spack.concretize import spack.config import spack.deptypes as dt -import spack.detection +import spack.environment as ev import spack.error import spack.hash_types as ht import spack.llnl.util.lang @@ -31,14 +36,16 @@ import spack.solver.core import spack.solver.reuse import spack.solver.runtimes -import spack.solver.versions import spack.spec -import spack.store -import spack.test.conftest import spack.util.file_cache +import spack.util.hash import spack.util.spack_yaml as syaml import spack.variant as vt +from spack.externals import ExternalDependencyError from spack.installer import PackageInstaller +from spack.solver.asp import Result +from spack.solver.reuse import SpecFilter, create_external_parser +from spack.solver.runtimes import external_config_with_implicit_externals from spack.spec import Spec from spack.test.conftest import RepoBuilder from spack.version import Version, VersionList, ver @@ -79,6 +86,10 @@ def check_concretize(abstract_spec): return concrete +def _true(): + return True + + @pytest.fixture(scope="function", autouse=True) def binary_compatibility(monkeypatch, request): """Selects whether we use OS compatibility for binaries, or libc compatibility.""" @@ -93,9 +104,9 @@ def binary_compatibility(monkeypatch, request): # Databases have been created without glibc support return - monkeypatch.setattr(spack.solver.core, "using_libc_compatibility", lambda: True) - monkeypatch.setattr(spack.solver.runtimes, "using_libc_compatibility", lambda: True) - monkeypatch.setattr(spack.solver.asp, "using_libc_compatibility", lambda: True) + monkeypatch.setattr(spack.solver.core, "using_libc_compatibility", _true) + monkeypatch.setattr(spack.solver.runtimes, "using_libc_compatibility", _true) + monkeypatch.setattr(spack.solver.asp, "using_libc_compatibility", _true) @pytest.fixture( @@ -155,6 +166,9 @@ def current_host(request, monkeypatch): cpu, _, is_preference = request.param.partition("-") monkeypatch.setattr(spack.platforms.Test, "default", cpu) + monkeypatch.setattr( + spack.archspec, "HOST_TARGET_FAMILY", spack.vendor.archspec.cpu.TARGETS["x86_64"] + ) if not is_preference: target = spack.vendor.archspec.cpu.TARGETS[cpu] monkeypatch.setattr(spack.vendor.archspec.cpu, "host", lambda: target) @@ -288,18 +302,28 @@ def change(self, changes=None): @pytest.fixture() def clang12_with_flags(compiler_factory): - c = compiler_factory(spec="llvm@12.2.0 os=redhat6") + c = compiler_factory(spec="llvm@12.2.0+clang os=redhat6") c["extra_attributes"]["flags"] = {"cflags": "-O3", "cxxflags": "-O3"} return c @pytest.fixture() def gcc11_with_flags(compiler_factory): - c = compiler_factory(spec="gcc@11.1.0 os=redhat6") + c = compiler_factory(spec="gcc@11.1.0 languages:=c,c++,fortran os=redhat6") c["extra_attributes"]["flags"] = {"cflags": "-O0 -g", "cxxflags": "-O0 -g", "fflags": "-O0 -g"} return c +def weights_from_result(result: Result, *, name: str) -> Dict[str, int]: + weights = {} + for x in result.criteria: + if x.name == name and x.kind == spack.solver.asp.OptimizationKind.CONCRETE: + weights["reused"] = x.value + elif x.name == name and x.kind == spack.solver.asp.OptimizationKind.BUILD: + weights["built"] = x.value + return weights + + # This must use the mutable_config fixture because the test # adjusting_default_target_based_on_compiler uses the current_host fixture, # which changes the config. @@ -468,6 +492,104 @@ def test_mixing_compilers_only_affects_subdag(self): assert x.satisfies("%clang") is not expected_gcc assert x.satisfies("%gcc") is expected_gcc + def test_disable_mixing_prevents_mixing(self): + with spack.config.override("concretizer", {"compiler_mixing": False}): + with pytest.raises(spack.error.UnsatisfiableSpecError): + spack.concretize.concretize_one("dt-diamond%clang ^dt-diamond-bottom%gcc") + + def test_disable_mixing_override_by_package(self): + with spack.config.override("concretizer", {"compiler_mixing": ["dt-diamond-bottom"]}): + root = spack.concretize.concretize_one("dt-diamond%clang ^dt-diamond-bottom%gcc") + assert root.satisfies("%clang") + assert root["dt-diamond-bottom"].satisfies("%gcc") + assert root["dt-diamond-left"].satisfies("%clang") + + with pytest.raises(spack.error.UnsatisfiableSpecError): + spack.concretize.concretize_one("dt-diamond%clang ^dt-diamond-left%gcc") + + def test_disable_mixing_reuse(self, fake_db_install): + # Install a spec + left = spack.concretize.concretize_one("dt-diamond-left %gcc") + fake_db_install(left) + assert left.satisfies("%c=gcc") + lefthash = left.dag_hash()[:7] + + # Check if mixing works when it's allowed + spack.concretize.concretize_one(f"dt-diamond%clang ^/{lefthash}") + + # Now try to use it with compiler mixing disabled + with spack.config.override("concretizer", {"compiler_mixing": False}): + with pytest.raises(spack.error.UnsatisfiableSpecError): + spack.concretize.concretize_one(f"dt-diamond%clang ^/{lefthash}") + + # Should be able to reuse if the compilers match + spack.concretize.concretize_one(f"dt-diamond%gcc ^/{lefthash}") + + def test_disable_mixing_reuse_and_built(self, fake_db_install): + r"""In this case we have + + x + |\ + y z + + Where y is a link dependency and z is a build dependency. + We install y with a compiler c1, and we make sure we cannot + ask for `x%c2 ^z%c1 ^/y + + This looks similar to `test_disable_mixing_reuse`. But the + compiler nodes are handled differently in this case: this + is the only test that explicitly exercises compiler unmixing + rule #2. + """ + dep1 = spack.concretize.concretize_one("libdwarf %gcc") + fake_db_install(dep1) + assert dep1.satisfies("%c=gcc") + dep1hash = dep1.dag_hash()[:7] + + spack.concretize.concretize_one(f"mixing-parent%clang ^cmake%gcc ^/{dep1hash}") + + with spack.config.override("concretizer", {"compiler_mixing": False}): + with pytest.raises(spack.error.UnsatisfiableSpecError, match="mixing is disabled"): + spack.concretize.concretize_one(f"mixing-parent%clang ^cmake%gcc ^/{dep1hash}") + + def test_disable_mixing_allow_compiler_link(self): + """Check if we can use a compiler when mixing is disabled, and + still depend on a separate compiler package (in the latter case + not using it as a compiler but rather for some utility it + provides). + """ + with spack.config.override("concretizer", {"compiler_mixing": False}): + x = spack.concretize.concretize_one("llvm-client%gcc") + assert x.satisfies("%cxx=gcc") + assert x.satisfies("%c=gcc") + assert "llvm" in x + + def test_disable_mixing_env( + self, mutable_mock_env_path, tmp_path: pathlib.Path, mock_packages, mutable_config + ): + spack_yaml = tmp_path / ev.manifest_name + spack_yaml.write_text( + """\ +spack: + specs: + - dt-diamond%gcc + - dt-diamond%clang + concretizer: + compiler_mixing: false + unify: when_possible +""" + ) + + with ev.Environment(tmp_path) as e: + e.concretize() + for root in e.roots(): + if root.satisfies("%gcc"): + assert root["dt-diamond-left"].satisfies("%gcc") + assert root["dt-diamond-bottom"].satisfies("%gcc") + else: + assert root["dt-diamond-left"].satisfies("%llvm") + assert root["dt-diamond-bottom"].satisfies("%llvm") + def test_compiler_inherited_upwards(self): spec = spack.concretize.concretize_one("dt-diamond ^dt-diamond-bottom%clang") for x in spec.traverse(deptype=("link", "run")): @@ -480,11 +602,15 @@ def test_architecture_deep_inheritance(self, mock_targets, compiler_factory): information from the root even when partial architecture information is provided by an intermediate dependency. """ - cnl_compiler = compiler_factory(spec="gcc@4.5.0 os=CNL target=nocona") + cnl_compiler = compiler_factory( + spec="gcc@4.5.0 os=CNL languages:=c,c++,fortran target=nocona" + ) with spack.config.override("packages", {"gcc": {"externals": [cnl_compiler]}}): spec_str = "mpileaks os=CNL target=nocona %gcc@4.5.0 ^dyninst os=CNL ^callpath os=CNL" spec = spack.concretize.concretize_one(spec_str) for s in spec.traverse(root=False, deptype=("link", "run")): + if s.external: + continue assert s.architecture.target == spec.architecture.target def test_compiler_flags_from_user_are_grouped(self): @@ -717,7 +843,7 @@ def test_concretize_propagate_variant_second_level_dep_not_in_source(self): def test_no_matching_compiler_specs(self): s = Spec("pkg-a %gcc@0.0.0") - with pytest.raises(spack.solver.asp.UnsatisfiableSpecError): + with pytest.raises(spack.solver.asp.InvalidVersionError): spack.concretize.concretize_one(s) def test_no_compilers_for_arch(self): @@ -1599,7 +1725,7 @@ def test_sticky_variant_in_package(self): [ ("sticky-variant@1.0+allow-gcc", True), ("sticky-variant@1.0~allow-gcc", False), - ("sticky-variant@1.0", False), + # FIXME (externals as concrete) ("sticky-variant@1.0", False), ], ) def test_sticky_variant_in_external(self, spec, allow_gcc): @@ -1871,37 +1997,226 @@ def test_misleading_error_message_on_version(self, mutable_database): solver.driver.solve(setup, [root_spec], reuse=reusable_specs) @pytest.mark.regression("31148") - def test_version_weight_and_provenance(self): + def test_version_weight_and_provenance(self, mutable_config): """Test package preferences during concretization.""" reusable_specs = [ spack.concretize.concretize_one(spec_str) for spec_str in ("pkg-b@0.9", "pkg-b@1.0") ] root_spec = Spec("pkg-a foobar=bar") + packages_with_externals = external_config_with_implicit_externals(mutable_config) + completion_mode = mutable_config.get("concretizer:externals:completion") + external_specs = SpecFilter.from_packages_yaml( + external_parser=create_external_parser(packages_with_externals, completion_mode), + packages_with_externals=packages_with_externals, + include=[], + exclude=[], + ).selected_specs() with spack.config.override("concretizer:reuse", True): solver = spack.solver.asp.Solver() setup = spack.solver.asp.SpackSolverSetup() - result, _, _ = solver.driver.solve(setup, [root_spec], reuse=reusable_specs) + result, _, _ = solver.driver.solve( + setup, [root_spec], reuse=reusable_specs + external_specs + ) # Version badness should be > 0 only for reused specs. For instance, for pkg-b # the version provenance is: # - # version_declared("pkg-b","1.0",0,"package_py"). - # version_declared("pkg-b","0.9",1,"package_py"). - # version_declared("pkg-b","1.0",2,"installed"). - # version_declared("pkg-b","0.9",3,"installed"). - weights = {} - for x in [x for x in result.criteria if x.name == "version badness (non roots)"]: - if x.kind == spack.solver.asp.OptimizationKind.CONCRETE: - weights["reused"] = x.value - else: - weights["built"] = x.value + # pkg_fact("pkg-b", version_declared("1.0", 0)). + # pkg_fact("pkg-b", version_origin("1.0", "installed")). + # pkg_fact("pkg-b", version_origin("1.0", "package_py")). + # pkg_fact("pkg-b", version_declared("0.9", 1)). + # pkg_fact("pkg-b", version_origin("0.9", "installed")). + # pkg_fact("pkg-b", version_origin("0.9", "package_py")). - assert weights["reused"] > 2 and weights["built"] == 0 + weights = weights_from_result(result, name="version badness (non roots)") + assert weights["reused"] == 3 and weights["built"] == 0 result_spec = result.specs[0] assert result_spec.satisfies("^pkg-b@1.0") assert result_spec["pkg-b"].dag_hash() == reusable_specs[1].dag_hash() + @pytest.mark.regression("51112") + def test_variant_penalty(self, mutable_config): + """Test package preferences during concretization.""" + packages_with_externals = external_config_with_implicit_externals(mutable_config) + completion_mode = mutable_config.get("concretizer:externals:completion") + external_specs = SpecFilter.from_packages_yaml( + external_parser=create_external_parser(packages_with_externals, completion_mode), + packages_with_externals=packages_with_externals, + include=[], + exclude=[], + ).selected_specs() + + # The variant definition is similar to + # + # % Variant cxxstd in package trilinos + # pkg_fact("trilinos",variant_definition("cxxstd",195)). + # variant_type(195,"single"). + # pkg_fact("trilinos",variant_default_value_from_package_py(195,"14")). + # pkg_fact("trilinos",variant_penalty(195,"14",1)). + # pkg_fact("trilinos",variant_penalty(195,"17",2)). + # pkg_fact("trilinos",variant_penalty(195,"20",3)). + # pkg_fact("trilinos",variant_possible_value(195,"14")). + # pkg_fact("trilinos",variant_possible_value(195,"17")). + # pkg_fact("trilinos",variant_possible_value(195,"20")). + + solver = spack.solver.asp.Solver() + setup = spack.solver.asp.SpackSolverSetup() + + # Ensure that since the default value of 14 cannot be taken, we select "17" + result, _, _ = solver.driver.solve(setup, [Spec("trilinos")], reuse=external_specs) + + weights = weights_from_result(result, name="variant penalty (roots)") + assert weights["reused"] == 0 and weights["built"] == 2 + + trilinos = result.specs[0] + assert trilinos.satisfies("cxxstd=17") + + # If we disable "17", then "20" is next, and the penalty is higher + result, _, _ = solver.driver.solve( + setup, [Spec("trilinos+disable17")], reuse=external_specs + ) + + weights = weights_from_result(result, name="variant penalty (roots)") + assert weights["reused"] == 0 and weights["built"] == 3 + + trilinos = result.specs[0] + assert trilinos.satisfies("cxxstd=20") + + # Test a disjoint set of values to ensure declared package order is respected + result, _, _ = solver.driver.solve(setup, [Spec("mvapich2")], reuse=external_specs) + + weights = weights_from_result(result, name="variant penalty (roots)") + assert weights["reused"] == 0 and weights["built"] == 0 + mvapich2 = result.specs[0] + assert mvapich2.satisfies("file_systems=auto") + + result, _, _ = solver.driver.solve(setup, [Spec("mvapich2+noauto")], reuse=external_specs) + + weights = weights_from_result(result, name="variant penalty (roots)") + assert weights["reused"] == 0 and weights["built"] == 2 + mvapich2 = result.specs[0] + assert mvapich2.satisfies("file_systems=lustre") + + @pytest.mark.regression("51267") + @pytest.mark.parametrize( + "packages_config,expected", + [ + # Two preferences on different virtuals + ( + """ + packages: + c: + prefer: + - clang + mpi: + prefer: + - mpich2 + """, + [ + 'provider_weight_from_config("mpi","mpich2",0).', + 'provider_weight_from_config("c","clang",0).', + ], + ), + # A requirement and a preference on the same virtual + ( + """ + packages: + c: + require: + - gcc + prefer: + - clang + """, + [ + 'provider_weight_from_config("c","gcc",0).', + 'provider_weight_from_config("c","clang",1).', + ], + ), + ( + """ + packages: + c: + require: + - clang + prefer: + - gcc + """, + [ + 'provider_weight_from_config("c","gcc",1).', + 'provider_weight_from_config("c","clang",0).', + ], + ), + # Multiple requirements with priorities + ( + """ + packages: + all: + providers: + mpi: [low-priority-mpi] + mpi: + require: + - any_of: [mpich2, zmpi] + prefer: + - mpich + """, + [ + 'provider_weight_from_config("mpi","mpich2",0).', + 'provider_weight_from_config("mpi","zmpi",1).', + 'provider_weight_from_config("mpi","mpich",2).', + 'provider_weight_from_config("mpi","low-priority-mpi",3).', + ], + ), + # Configuration with conflicts + ( + """ + packages: + all: + providers: + mpi: [mpich, low-priority-mpi] + mpi: + require: + - mpich2 + conflict: + - mpich + """, + [ + 'provider_weight_from_config("mpi","mpich2",0).', + 'provider_weight_from_config("mpi","low-priority-mpi",1).', + ], + ), + ( + """ + packages: + all: + providers: + mpi: [mpich, low-priority-mpi] + mpi: + require: + - mpich2 + conflict: + - mpich@1 + """, + [ + 'provider_weight_from_config("mpi","mpich2",0).', + 'provider_weight_from_config("mpi","mpich",1).', + 'provider_weight_from_config("mpi","low-priority-mpi",2).', + ], + ), + ], + ) + def test_requirements_and_weights(self, packages_config, expected, mutable_config): + """Checks that requirements and strong preferences on virtual packages influence the + weights for providers, even if "package preferences" are not set consistently. + """ + packages_yaml = syaml.load_config(packages_config) + mutable_config.set("packages", packages_yaml["packages"]) + + setup = spack.solver.asp.SpackSolverSetup() + asp_problem = setup.setup([Spec("mpileaks")], reuse=[], allow_deprecated=False).asp_problem + + assert all(x in asp_problem for x in expected) + def test_reuse_succeeds_with_config_compatible_os(self): root_spec = Spec("pkg-b") s = spack.concretize.concretize_one(root_spec) @@ -2012,7 +2327,7 @@ def test_installed_specs_disregard_conflicts(self, mutable_database, monkeypatch """ # Add a conflict to "mpich" that match an already installed "mpich~debug" pkg_cls = spack.repo.PATH.get_pkg_class("mpich") - monkeypatch.setitem(pkg_cls.conflicts, Spec(), [("~debug", None)]) + monkeypatch.setitem(pkg_cls.conflicts, Spec(), [(Spec("~debug"), None)]) # If we concretize with --fresh the conflict is taken into account with spack.config.override("concretizer:reuse", False): @@ -2041,130 +2356,29 @@ def test_require_targets_are_allowed(self, mutable_config, mutable_database): for s in spec.traverse(deptype=("link", "run")): assert s.satisfies(f"target={required_target}") - def test_external_python_extensions_have_dependency(self): - """Test that python extensions have access to a python dependency - - when python is otherwise in the DAG""" - external_conf = { - "py-extension1": { - "buildable": False, - "externals": [{"spec": "py-extension1@2.0", "prefix": "/fake"}], - } - } - spack.config.set("packages", external_conf) - - spec = spack.concretize.concretize_one("py-extension2") - - assert "python" in spec["py-extension1"] - assert spec["python"] == spec["py-extension1"]["python"] - target = spack.platforms.test.Test.default - @pytest.mark.parametrize( - "python_spec", - [ - "python@configured", - "python@configured platform=test", - "python@configured os=debian", - "python@configured target=%s" % target, - ], - ) - def test_external_python_extension_find_dependency_from_config(self, python_spec): - fake_path = os.path.sep + "fake" - - external_conf = { - "py-extension1": { - "buildable": False, - "externals": [{"spec": "py-extension1@2.0", "prefix": fake_path}], - }, - "python": {"externals": [{"spec": python_spec, "prefix": fake_path}]}, - } - spack.config.set("packages", external_conf) - - spec = spack.concretize.concretize_one("py-extension1") - - assert "python" in spec["py-extension1"] - assert spec["python"].prefix == fake_path - # The spec is not equal to Spec("python@configured") because it gets a - # namespace and an external prefix before marking concrete - assert spec["python"].satisfies(python_spec) - - def test_external_python_extension_find_dependency_from_installed(self, monkeypatch): - fake_path = os.path.sep + "fake" - - external_conf = { - "py-extension1": { - "buildable": False, - "externals": [{"spec": "py-extension1@2.0", "prefix": fake_path}], - }, - "python": { - "buildable": False, - "externals": [{"spec": "python@installed", "prefix": fake_path}], - }, - } - spack.config.set("packages", external_conf) - - # install python external - python = spack.concretize.concretize_one("python") - - def query(*args, **kwargs): - return [python] - - monkeypatch.setattr(spack.store.STORE.db, "query", query) - - # ensure that we can't be faking this by getting it from config - external_conf.pop("python") - spack.config.set("packages", external_conf) - - spec = spack.concretize.concretize_one("py-extension1") - - assert "python" in spec["py-extension1"] - assert spec["python"].prefix == fake_path - # The spec is not equal to Spec("python@configured") because it gets a - # namespace and an external prefix before marking concrete - assert spec["python"].satisfies(python) - - def test_external_python_extension_find_dependency_from_detection(self, monkeypatch): - """Test that python extensions have access to a python dependency - - when python isn't otherwise in the DAG""" - prefix = os.path.sep + "fake" - python_spec = Spec.from_detection("python@=detected", external_path=prefix) - - def find_fake_python(classes, path_hints, **kwargs): - return { - "python": [Spec.from_detection("python@=detected", external_path=path_hints[0])] - } - - monkeypatch.setattr(spack.detection, "by_path", find_fake_python) - external_conf = { - "py-extension1": { - "buildable": False, - "externals": [{"spec": "py-extension1@2.0", "prefix": "%s" % prefix}], - } - } - spack.config.set("packages", external_conf) - - spec = spack.concretize.concretize_one("py-extension1") - - assert "python" in spec["py-extension1"] - assert spec["python"].prefix == prefix - assert spec["python"].external - assert spec["python"].satisfies(python_spec) - - def test_external_python_extension_find_unified_python(self): - """Test that python extensions use the same python as other specs in unified env""" - external_conf = { - "py-extension1": { - "buildable": False, - "externals": [{"spec": "py-extension1@2.0", "prefix": os.path.sep + "fake"}], - } - } - spack.config.set("packages", external_conf) + def test_external_python_extension_find_dependency_from_config(self, mutable_config, tmp_path): + """Tests that an external Python extension gets a dependency on Python.""" + packages_yaml = f""" +packages: + py-extension1: + buildable: false + externals: + - spec: py-extension1@2.0 + prefix: {tmp_path / "py-extension1"} + python: + externals: + - spec: python@3.8.13 + prefix: {tmp_path / "python"} +""" + configuration = syaml.load_config(packages_yaml) + mutable_config.set("packages", configuration["packages"]) + py_extension = spack.concretize.concretize_one("py-extension1") - abstract_specs = [Spec(s) for s in ["py-extension1", "python"]] - specs = spack.concretize._concretize_specs_together(abstract_specs) - assert specs[0]["python"] == specs[1]["python"] + assert py_extension.external + assert py_extension["python"].external + assert py_extension["python"].prefix == str(tmp_path / "python") @pytest.mark.regression("36190") @pytest.mark.parametrize( @@ -2175,15 +2389,22 @@ def test_external_python_extension_find_unified_python(self): ["v1-consumer ^conditional-provider@1:1 +disable-v1"], ], ) - def test_result_specs_is_not_empty(self, specs): + def test_result_specs_is_not_empty(self, mutable_config, specs): """Check that the implementation of "result.specs" is correct in cases where we know a concretization exists. """ specs = [Spec(s) for s in specs] + packages_with_externals = external_config_with_implicit_externals(mutable_config) + completion_mode = mutable_config.get("concretizer:externals:completion") + external_specs = SpecFilter.from_packages_yaml( + external_parser=create_external_parser(packages_with_externals, completion_mode), + packages_with_externals=packages_with_externals, + include=[], + exclude=[], + ).selected_specs() solver = spack.solver.asp.Solver() setup = spack.solver.asp.SpackSolverSetup() - result, _, _ = solver.driver.solve(setup, specs, reuse=[]) - + result, _, _ = solver.driver.solve(setup, specs, reuse=external_specs) assert result.specs @pytest.mark.regression("38664") @@ -2451,7 +2672,8 @@ def test_exclude_specs_from_reuse(self, monkeypatch): # Exclude dyninst from reuse, so we expect that the old version is not taken into account with spack.config.override( - "concretizer:reuse", {"from": [{"type": "buildcache", "exclude": ["dyninst"]}]} + "concretizer:reuse", + {"from": [{"type": "buildcache", "exclude": ["dyninst"]}, {"type": "external"}]}, ): result = spack.concretize.concretize_one(request_str) @@ -2984,16 +3206,16 @@ def test_concretization_version_order(): result = [ v for v, _ in sorted( - versions, key=spack.solver.versions.concretization_version_order, reverse=True + versions, key=spack.package_base.concretization_version_order, reverse=True ) ] assert result == [ Version("0.9"), # preferred + Version("2.0"), # deprecation is accounted for separately Version("1.1"), # latest non-deprecated final version Version("1.0"), # latest non-deprecated final version Version("1.1alpha1"), # prereleases Version("develop"), # likely development version - Version("2.0"), # deprecated ] @@ -3005,14 +3227,16 @@ def test_concretization_version_order(): {"roots": True, "include": ["^mpich"]}, ["^mpich"], ["^mpich2", "^zmpi"], - 2, + # Reused from store + externals + 2 + 15, ), ( ["mpileaks"], {"roots": True, "include": ["externaltest"]}, ["externaltest"], ["^mpich", "^mpich2", "^zmpi"], - 1, + # Reused from store + externals + 1 + 15, ), ], ) @@ -3024,22 +3248,41 @@ def test_filtering_reused_specs( """Tests that we can select which specs are to be reused, using constraints as filters""" # Assume all specs have a runtime dependency mutable_config.set("concretizer:reuse", reuse_yaml) - selector = spack.solver.asp.ReusableSpecsSelector(mutable_config) + packages_with_externals = spack.solver.runtimes.external_config_with_implicit_externals( + mutable_config + ) + completion_mode = mutable_config.get("concretizer:externals:completion") + selector = spack.solver.asp.ReusableSpecsSelector( + mutable_config, + external_parser=create_external_parser(packages_with_externals, completion_mode), + packages_with_externals=packages_with_externals, + ) specs = selector.reusable_specs(roots) assert len(specs) == expected_length for constraint in expected: - assert all(x.satisfies(constraint) for x in specs) + assert all(x.satisfies(constraint) for x in specs if not x.external) for constraint in not_expected: - assert all(not x.satisfies(constraint) for x in specs) + assert all(not x.satisfies(constraint) for x in specs if not x.external) @pytest.mark.usefixtures("mutable_database", "mock_store") @pytest.mark.parametrize( "reuse_yaml,expected_length", - [({"from": [{"type": "local"}]}, 19), ({"from": [{"type": "buildcache"}]}, 0)], + [ + ( + {"from": [{"type": "local"}]}, + # Local store + externals + 19 + 15, + ), + ( + {"from": [{"type": "buildcache"}]}, + # Local store + externals + 0 + 15, + ), + ], ) @pytest.mark.not_on_windows("Expected length is different on Windows") def test_selecting_reused_sources( @@ -3048,7 +3291,15 @@ def test_selecting_reused_sources( """Tests that we can turn on/off sources of reusable specs""" # Assume all specs have a runtime dependency mutable_config.set("concretizer:reuse", reuse_yaml) - selector = spack.solver.asp.ReusableSpecsSelector(mutable_config) + packages_with_externals = spack.solver.runtimes.external_config_with_implicit_externals( + mutable_config + ) + completion_mode = mutable_config.get("concretizer:externals:completion") + selector = spack.solver.asp.ReusableSpecsSelector( + mutable_config, + external_parser=create_external_parser(packages_with_externals, completion_mode), + packages_with_externals=packages_with_externals, + ) specs = selector.reusable_specs(["mpileaks"]) assert len(specs) == expected_length @@ -3068,7 +3319,7 @@ def test_selecting_reused_sources( def test_spec_filters(specs, include, exclude, expected): specs = [Spec(x) for x in specs] expected = [Spec(x) for x in expected] - f = spack.solver.asp.SpecFilter( + f = spack.solver.reuse.SpecFilter( factory=lambda: specs, is_usable=lambda x: True, include=include, exclude=exclude ) assert f.selected_specs() == expected @@ -3135,6 +3386,15 @@ def test_spec_unification(unify, mutable_config, mock_packages): _ = spack.cmd.parse_specs([a_restricted, b], concretize=True) +@pytest.mark.not_on_windows("parallelism unsupported on Windows") +@pytest.mark.enable_parallelism +def test_parallel_concretization(mutable_config, mock_packages): + """Test whether parallel unify-false style concretization works.""" + specs = [(Spec("pkg-a"), None), (Spec("pkg-b"), None)] + result = spack.concretize.concretize_separately(specs) + assert {s.name for s, _ in result} == {"pkg-a", "pkg-b"} + + @pytest.mark.usefixtures("mutable_config", "mock_packages", "do_not_check_runtimes_on_reuse") @pytest.mark.parametrize( "spec_str, error_type", @@ -3229,59 +3489,6 @@ def test_commit_variant_can_be_reused(installed_commit, incoming_commit, reusabl assert (spec1.dag_hash() == spec2.dag_hash()) == reusable -def test_concretization_cache_roundtrip( - mock_packages, use_concretization_cache, monkeypatch, mutable_config -): - """Tests whether we can write the results of a clingo solve to the cache - and load the same spec request from the cache to produce identical specs""" - # Force determinism: - # Solver setup is normally non-deterministic due to non-determinism in - # asp solver setup logic generation. The only other inputs to the cache keys are - # the .lp files, which are invariant over the course of this test. - # This method forces the same setup to be produced for the same specs - # which gives us a guarantee of cache hits, as it removes the only - # element of non deterministic solver setup for the same spec - # Basically just a quick and dirty memoization - solver_setup = spack.solver.asp.SpackSolverSetup.setup - - def _setup(self, specs, *, reuse=None, allow_deprecated=False): - if not getattr(_setup, "cache_setup", None): - cache_setup = solver_setup(self, specs, reuse=reuse, allow_deprecated=allow_deprecated) - setattr(_setup, "cache_setup", cache_setup) - return getattr(_setup, "cache_setup") - - # monkeypatch our forced determinism setup method into solver setup - monkeypatch.setattr(spack.solver.asp.SpackSolverSetup, "setup", _setup) - - assert spack.config.get("config:concretization_cache:enable") - - # run one standard concretization to populate the cache and the setup method - # memoization - h = spack.concretize.concretize_one("hdf5") - - # due to our forced determinism above, we should not be observing - # cache misses, assert that we're not storing any new cache entries - def _ensure_no_store(self, problem: str, result, statistics, test=False): - # always throw, we never want to reach this code path - assert False, "Concretization cache hit expected" - - # Assert that we're actually hitting the cache - cache_fetch = spack.solver.asp.ConcretizationCache.fetch - - def _ensure_cache_hits(self, problem: str): - result, statistics = cache_fetch(self, problem) - assert result, "Expected successful concretization cache hit" - assert statistics, "Expected statistics to be non null on cache hit" - return result, statistics - - monkeypatch.setattr(spack.solver.asp.ConcretizationCache, "store", _ensure_no_store) - monkeypatch.setattr(spack.solver.asp.ConcretizationCache, "fetch", _ensure_cache_hits) - # ensure subsequent concretizations of the same spec produce the same spec - # object - for _ in range(5): - assert h == spack.concretize.concretize_one("hdf5") - - @pytest.mark.regression("42679") @pytest.mark.parametrize("compiler_str", ["gcc@=9.4.0", "gcc@=9.4.0-foo"]) def test_selecting_compiler_with_suffix(mutable_config, mock_packages, compiler_str): @@ -3351,16 +3558,17 @@ def test_compiler_can_depend_on_themselves_to_build( def test_compiler_attribute_is_tolerated_in_externals( mutable_config, mock_packages, tmp_path: pathlib.Path ): - """Tests that we don't error out if an external specifies a compiler, even though externals - don't have dependencies. + """Tests that we don't error out if an external specifies a compiler in the old way, + provided that a suitable external compiler exists. """ packages_yaml = syaml.load_config( f""" packages: cmake: externals: - - spec: "cmake@3.27.4 %gcc@14.1.0" + - spec: "cmake@3.27.4 %gcc@10" prefix: {tmp_path} + buildable: false """ ) mutable_config.set("packages", packages_yaml["packages"]) @@ -3397,7 +3605,7 @@ def test_compiler_match_for_externals_is_taken_into_account( packages: libelf: externals: - - spec: "libelf@0.8.12 %gcc" + - spec: "libelf@0.8.12 %gcc@10" prefix: {tmp_path / 'gcc'} - spec: "libelf@0.8.13 %clang" prefix: {tmp_path / 'clang'} @@ -3433,7 +3641,7 @@ def test_compiler_match_for_externals_with_versions( externals: - spec: "libelf@0.8.12 %gcc@10" prefix: {tmp_path / 'libelf-gcc10'} - - spec: "libelf@0.8.13 %gcc@9" + - spec: "libelf@0.8.13 %gcc@9.4.0" prefix: {tmp_path / 'libelf-gcc9'} """ ) @@ -3519,9 +3727,10 @@ def test_input_analysis_and_conditional_requirements(default_mock_concretization @pytest.mark.parametrize( "compiler_str,expected,not_expected", [ - # Compiler queries are as specific as the constraint on the external + # Compilers are matched to some other external, so the compiler that picked is concrete ("gcc@10", ["%gcc", "%gcc@10"], ["%clang", "%gcc@9"]), - ("gcc", ["%gcc"], ["%clang", "%gcc@9", "%gcc@10"]), + ("gcc@9.4.0", ["%gcc", "%gcc@9"], ["%clang", "%gcc@10"]), + ("clang", ["%clang", "%llvm+clang"], ["%gcc", "%gcc@9", "%gcc@10"]), ], ) @pytest.mark.regression("49841") @@ -3736,7 +3945,7 @@ def test_spec_parts_on_fresh_compilers( llvm:: buildable: false externals: - - spec: "llvm+clang@20 {constraint_in_yaml}" + - spec: "llvm@20 +clang {constraint_in_yaml}" prefix: {tmp_path / 'llvm-20'} """ ) @@ -3968,7 +4177,7 @@ def test_selecting_externals_with_compilers_as_root(mutable_config, mock_package packages_yaml = syaml.load_config( """ packages: - gcc: + gcc:: externals: - spec: "gcc@9.4.0 languages='c,c++'" prefix: /path @@ -3976,7 +4185,7 @@ def test_selecting_externals_with_compilers_as_root(mutable_config, mock_package compilers: c: /path/bin/gcc cxx: /path/bin/g++ - llvm: + llvm:: buildable: false externals: - spec: "llvm@20 +clang" @@ -4018,18 +4227,10 @@ def test_selecting_externals_with_compilers_as_root(mutable_config, mock_package @pytest.mark.not_on_windows("Tests use linux paths") @pytest.mark.regression("51001") @pytest.mark.parametrize( - "external_compiler,spec_str,expected_raising", - [ - # Overspecify the compiler in the input spec. This should raise, because we - # don't know if we can satisfy the constraint - ("gcc", "mpich %gcc@9", True), - pytest.param("gcc@9", "mpich %gcc@9.4", True, marks=pytest.mark.xfail), - # This is ok - ("gcc@9.4.0", "mpich %gcc@9", False), - ], + "external_compiler,spec_str", [("gcc@8", "mpich %gcc@8.4"), ("gcc@8.4.0", "mpich %gcc@8")] ) def test_selecting_externals_with_compilers_and_versions( - external_compiler, spec_str, expected_raising, mutable_config, mock_packages + external_compiler, spec_str, mutable_config, mock_packages ): """Tests different scenarios of having a compiler specified with a version constraint, either in the input spec or in the external spec. @@ -4039,7 +4240,7 @@ def test_selecting_externals_with_compilers_and_versions( packages: gcc: externals: - - spec: "gcc@9.4.0 languages='c,c++'" + - spec: "gcc@8.4.0 languages='c,c++'" prefix: /path extra_attributes: compilers: @@ -4055,16 +4256,41 @@ def test_selecting_externals_with_compilers_and_versions( """ ) mutable_config.set("packages", packages_yaml["packages"]) + s = spack.concretize.concretize_one(spec_str) + assert s.external + assert s.prefix == "/path/mpich/gcc" - if expected_raising: - with pytest.raises( - spack.solver.asp.UnsatisfiableSpecError, match="Omit version requirement" - ): - _ = spack.concretize.concretize_one(spec_str) - else: - s = spack.concretize.concretize_one(spec_str) - assert s.external - assert s.prefix == "/path/mpich/gcc" + +@pytest.mark.regression("51001") +@pytest.mark.parametrize( + "external_compiler,spec_str,error_match", + [ + # Compiler is underspecified + ("gcc", "mpich %gcc", "there are multiple external specs"), + ("gcc@9", "mpich %gcc", "there are multiple external specs"), + # Compiler does not exist + ("%oneapi", "mpich %gcc@8", "there is no"), + ], +) +def test_errors_when_specifying_externals_with_compilers( + external_compiler, spec_str, error_match, mutable_config, mock_packages +): + """Tests different errors that can occur in an external spec with a compiler specified.""" + packages_yaml = syaml.load_config( + f""" +packages: + mpich: + buildable: false + externals: + - spec: "mpich@3.4.3 %{external_compiler}" + prefix: /path/mpich/gcc + - spec: "mpich@3.4.3 %clang" + prefix: /path/mpich/clang +""" + ) + mutable_config.set("packages", packages_yaml["packages"]) + with pytest.raises(ExternalDependencyError, match=error_match): + _ = spack.concretize.concretize_one(spec_str) @pytest.mark.regression("51146,51067") @@ -4078,7 +4304,6 @@ def test_caret_in_input_cannot_set_transitive_build_dependencies(default_mock_co @pytest.mark.regression("51167") @pytest.mark.require_provenance -@pytest.mark.xfail(reason="This is a bug in the solver, related to the 'commit=' variant") def test_commit_variant_enters_the_hash(mutable_config, mock_packages, monkeypatch): """Tests that an implicit commit variant, obtained from resolving the commit sha of a branch, enters the hash of the spec. @@ -4086,18 +4311,14 @@ def test_commit_variant_enters_the_hash(mutable_config, mock_packages, monkeypat first_call = True - def _mock_resolve(pkg_self) -> None: + def _mock_resolve(spec) -> None: if first_call: - pkg_self.spec.variants["commit"] = spack.variant.SingleValuedVariant( - "commit", f"{'b' * 40}" - ) + spec.variants["commit"] = vt.SingleValuedVariant("commit", f"{'b' * 40}") return - pkg_self.spec.variants["commit"] = spack.variant.SingleValuedVariant( - "commit", f"{'a' * 40}" - ) + spec.variants["commit"] = vt.SingleValuedVariant("commit", f"{'a' * 40}") - monkeypatch.setattr(spack.package_base.PackageBase, "resolve_binary_provenance", _mock_resolve) + monkeypatch.setattr(spack.package_base.PackageBase, "_resolve_git_provenance", _mock_resolve) before = spack.concretize.concretize_one("git-ref-package@develop") first_call = False @@ -4164,3 +4385,464 @@ def test_when_possible_above_all(mutable_config, mock_packages): for result in solver.solve_in_rounds(specs): criteria = sorted(result.criteria, reverse=True) assert criteria[0].name == "number of input specs not concretized" + + +def test_concretization_cache_roundtrip( + mock_packages, use_concretization_cache, monkeypatch, mutable_config +): + """Tests whether we can write the results of a clingo solve to the cache + and load the same spec request from the cache to produce identical specs""" + + assert spack.config.get("concretizer:concretization_cache:enable") + + # run one standard concretization to populate the cache and the setup method + # memoization + h = spack.concretize.concretize_one("hdf5") + + # ASP output should be stable, concretizing the same spec + # should have the same problem output + # assert that we're not storing any new cache entries + def _ensure_no_store(self, problem: str, result, statistics, test=False): + # always throw, we never want to reach this code path + assert False, "Concretization cache hit expected" + + # Assert that we're actually hitting the cache + cache_fetch = spack.solver.asp.ConcretizationCache.fetch + + def _ensure_cache_hits(self, problem: str): + result, statistics = cache_fetch(self, problem) + assert result, "Expected successful concretization cache hit" + assert statistics, "Expected statistics to be non null on cache hit" + return result, statistics + + monkeypatch.setattr(spack.solver.asp.ConcretizationCache, "store", _ensure_no_store) + monkeypatch.setattr(spack.solver.asp.ConcretizationCache, "fetch", _ensure_cache_hits) + # ensure subsequent concretizations of the same spec produce the same spec + # object + for _ in range(5): + assert h == spack.concretize.concretize_one("hdf5") + + +def test_concretization_cache_roundtrip_result(use_concretization_cache): + """Ensure the concretization cache doesn't change Solver Result objects.""" + specs = [Spec("hdf5")] + solver = spack.solver.asp.Solver() + + result1 = solver.solve(specs) + result2 = solver.solve(specs) + + assert result1 == result2 + + +def test_concretization_cache_count_cleanup(use_concretization_cache, mutable_config): + """Tests to ensure we are cleaning the cache when we should be respective to the + number of entries allowed in the cache""" + conc_cache_dir = use_concretization_cache + + spack.config.set("concretizer:concretization_cache:entry_limit", 1000) + + def names(): + return set( + x.name + for x in conc_cache_dir.iterdir() + if (not x.is_dir() and not x.name.startswith(".")) + ) + + assert len(names()) == 0 + + for i in range(1000): + name = spack.util.hash.b32_hash(f"mock_cache_file_{i}") + mock_cache_file = conc_cache_dir / name + mock_cache_file.touch() + + before = names() + assert len(before) == 1000 + + # cleanup should be run after the 1,001st execution + spack.concretize.concretize_one("hdf5") + + # ensure that half the elements were removed and that one more was created + after = names() + assert len(after) == 501 + assert len(after - before) == 1 # one additional hash added by 1001st concretization + + +def test_concretization_cache_uncompressed_entry(use_concretization_cache, monkeypatch): + def _store(self, problem, result, statistics): + cache_path = self._cache_path_from_problem(problem) + with self.write_transaction(cache_path) as exists: + if exists: + return + try: + with open(cache_path, "x", encoding="utf-8") as cache_entry: + cache_dict = {"results": result.to_dict(), "statistics": statistics} + cache_entry.write(json.dumps(cache_dict)) + except FileExistsError: + pass + + monkeypatch.setattr(spack.solver.asp.ConcretizationCache, "store", _store) + # Store the results in plaintext + spack.concretize.concretize_one("zlib") + # Ensure fetch can handle the plaintext cache entry + spack.concretize.concretize_one("zlib") + + +@pytest.mark.parametrize( + "asp_file", + [ + "concretize.lp", + "heuristic.lp", + "display.lp", + "direct_dependency.lp", + "when_possible.lp", + "libc_compatibility.lp", + "os_compatibility.lp", + "splices.lp", + ], +) +def test_concretization_cache_asp_canonicalization(asp_file): + path = os.path.join(os.path.dirname(spack.solver.asp.__file__), asp_file) + + with open(path, "r", encoding="utf-8") as f: + original = [line.strip() for line in f.readlines()] + stripped = spack.solver.asp.strip_asp_problem(original) + + diff = list(difflib.unified_diff(original, stripped)) + + assert all( + [ + line == "-" or line.startswith("-%") + for line in diff + if line.startswith("-") and not line.startswith("---") + ] + ) + + +@pytest.mark.parametrize( + "node_completion,expected,not_expected", + [ + ("architecture_only", ["+clang", "~flang", "platform=test"], ["lld=*"]), + ( + "default_variants", + ["+clang", "~flang", "+lld", "platform=test"], + ["~clang", "+flang", "~lld"], + ), + ], +) +def test_external_node_completion_from_config( + node_completion, expected, not_expected, mutable_config, mock_packages +): + """Tests the different options for external node completion in the configuration file.""" + mutable_config.set("concretizer:externals:completion", node_completion) + + s = spack.concretize.concretize_one("llvm") + + assert s.external + assert all(s.satisfies(c) for c in expected) + assert all(not s.satisfies(c) for c in not_expected) + + +@pytest.mark.parametrize( + "spec_str,packages_yaml,expected", + [ + ( + "mpileaks", + """ +packages: + mpileaks: + externals: + - spec: "mpileaks@2.3~debug+opt" + prefix: /user/path + dependencies: + - id: callpath_id + deptypes: link + - id: mpich_id + deptypes: + - "build" + - "link" + virtuals: "mpi" + callpath: + externals: + - spec: "callpath@1.0" + prefix: /user/path + id: callpath_id + dependencies: + - id: mpich_id + deptypes: + - "build" + - "link" + virtuals: "mpi" + mpich: + externals: + - spec: "mpich@3.0.4" + prefix: /user/path + id: mpich_id +""", + [ + "%mpi=mpich@3.0.4", + "^callpath %mpi=mpich@3.0.4", + "%[deptypes=link] callpath", + "%[deptypes=build,link] mpich", + ], + ), + # Same, but using `spec:` instead of `id:` for dependencies + ( + "mpileaks", + """ +packages: + mpileaks: + externals: + - spec: "mpileaks@2.3~debug+opt" + prefix: /user/path + dependencies: + - spec: callpath + deptypes: link + - spec: mpich + virtuals: "mpi" + callpath: + externals: + - spec: "callpath@1.0" + prefix: /user/path + dependencies: + - spec: mpich + virtuals: "mpi" + mpich: + externals: + - spec: "mpich@3.0.4" + prefix: /user/path +""", + [ + "%mpi=mpich@3.0.4", + "^callpath %mpi=mpich@3.0.4", + "%[deptypes=link] callpath", + "%[deptypes=build,link] mpich", + ], + ), + ], +) +def test_external_specs_with_dependencies( + spec_str, packages_yaml, expected, mutable_config, mock_packages +): + """Tests that we can reconstruct external specs with dependencies.""" + configuration = syaml.load_config(packages_yaml) + mutable_config.set("packages", configuration["packages"]) + s = spack.concretize.concretize_one(spec_str) + assert all(node.external for node in s.traverse()) + assert all(s.satisfies(c) for c in expected) + + +@pytest.mark.parametrize( + "default_target,expected", + [ + # Specific target requested + ("x86_64_v3", ["callpath target=x86_64_v3", "^mpich target=x86_64_v3"]), + # With ranges, be conservative by default + (":x86_64_v3", ["callpath target=x86_64", "^mpich target=x86_64"]), + ("x86_64:x86_64_v3", ["callpath target=x86_64", "^mpich target=x86_64"]), + ("x86_64:", ["callpath target=x86_64", "^mpich target=x86_64"]), + ], +) +@pytest.mark.skipif( + spack.vendor.archspec.cpu.host().family != "x86_64", reason="test data for x86_64" +) +def test_target_requirements(default_target, expected, mutable_config, mock_packages): + """Tests different scenarios where targets might be constrained by configuration and are not + specified in external specs + """ + configuration = syaml.load_config( + f""" +packages: + all: + require: + - "target={default_target}" + callpath: + buildable: false + externals: + - spec: "callpath@1.0" + prefix: /user/path + id: callpath_id + dependencies: + - id: mpich_id + deptypes: + - "build" + - "link" + virtuals: "mpi" + mpich: + externals: + - spec: "mpich@3.0.4" + prefix: /user/path + id: mpich_id +""" + ) + mutable_config.set("packages", configuration["packages"]) + s = spack.concretize.concretize_one("callpath") + assert s.external + assert all(s.satisfies(x) for x in expected), s.tree() + + +@pytest.mark.parametrize( + "spec_str,inline,yaml", + [ + ( + "cmake-client", + """ +packages: + cmake-client: + externals: + - spec: cmake-client@1.0 %cmake + prefix: /mock + cmake: + externals: + - spec: cmake@3.23.0 + prefix: /mock +""", + """ +packages: + cmake-client: + externals: + - spec: cmake-client@1.0 + prefix: /mock + dependencies: + - spec: cmake + cmake: + externals: + - spec: cmake@3.23.0 + prefix: /mock +""", + ), + ( + "mpileaks", + """ +packages: + mpileaks: + externals: + - spec: "mpileaks@2.3~debug+opt %mpi=mpich %[deptypes=link] callpath" + prefix: /user/path + callpath: + externals: + - spec: "callpath@1.0 %mpi=mpich" + prefix: /user/path + mpich: + externals: + - spec: "mpich@3.0.4" + prefix: /user/path +""", + """ +packages: + mpileaks: + externals: + - spec: "mpileaks@2.3~debug+opt" + prefix: /user/path + dependencies: + - spec: callpath + deptypes: link + - spec: mpich + virtuals: "mpi" + callpath: + externals: + - spec: "callpath@1.0" + prefix: /user/path + dependencies: + - spec: mpich + virtuals: "mpi" + mpich: + externals: + - spec: "mpich@3.0.4" + prefix: /user/path +""", + ), + ], +) +def test_external_inline_equivalent_to_yaml(spec_str, inline, yaml, mutable_config, mock_packages): + """Tests that the inline syntax for external specs is equivalent to the YAML syntax.""" + configuration = syaml.load_config(inline) + mutable_config.set("packages", configuration["packages"]) + inline_spec = spack.concretize.concretize_one(spec_str) + + configuration = syaml.load_config(yaml) + mutable_config.set("packages", configuration["packages"]) + yaml_spec = spack.concretize.concretize_one(spec_str) + + assert inline_spec == yaml_spec + + +@pytest.mark.regression("51556") +def test_reusing_gcc_same_version_different_libcs(monkeypatch, mutable_config, mock_packages): + """Tests that Spack can solve for specs when it reuses 2 GCCs at the same version, + but injecting different libcs. + """ + packages_yaml = syaml.load_config( + """ +packages: + gcc: + externals: + - spec: "gcc@12.3.0 languages='c,c++,fortran' os=debian6" + prefix: /path + extra_attributes: + compilers: + c: /path/bin/gcc + cxx: /path/bin/g++ + fortran: /path/bin/gfortran + - spec: "gcc@12.3.0 languages='c,c++,fortran' os=redhat6" + prefix: /path + extra_attributes: + compilers: + c: /path/bin/gcc + cxx: /path/bin/g++ + fortran: /path/bin/gfortran +""" + ) + mutable_config.set("packages", packages_yaml["packages"]) + mutable_config.set("concretizer:reuse", True) + + def _mock_libc(self): + if self.spec.satisfies("os=debian6"): + return spack.spec.Spec("glibc@=2.31", external_path="/rocky9/path") + return spack.spec.Spec("glibc@=2.28", external_path="/rocky8/path") + + monkeypatch.setattr( + spack.compilers.libraries.CompilerPropertyDetector, "default_libc", _mock_libc + ) + + # This should not raise + mpileaks = spack.concretize.concretize_one("mpileaks %c=gcc@12") + + assert mpileaks.satisfies("%c=gcc@12") + + +@pytest.mark.regression("51683") +def test_activating_variant_for_conditional_language_dependency(default_mock_concretization): + """Tests that a dependency on a conditional language can be concretized, and that the solver + turn on the correct variant to enable the language dependency + """ + # To trigger the bug, we need at least another node needing fortran, in this case mpich + s = default_mock_concretization("mpileaks %fortran=gcc %mpi=mpich") + assert s.satisfies("+fortran") + + # Try just asking for fortran, without the provider + s = default_mock_concretization("mpileaks %fortran %mpi=mpich") + assert s.satisfies("+fortran") + + +def test_imposed_spec_dependency_duplication(mock_packages: spack.repo.Repo): + """Tests that imposed dependenies triggered by identical conditions are grouped together, + and that imposed dependencies that differ on a deptype are not grouped together.""" + # The trigger-and-effect-deps pkg has 4 conditions, 2 triggers, and 4 effects in total: + # +x -> depends on pkg-a with deptype link + # +x -> depends on pkg-b with deptype link + # +y -> depends on pkg-a with deptype run + # +y -> depends on pkg-b with deptype run + pkg = mock_packages.get_pkg_class("trigger-and-effect-deps") + setup = spack.solver.asp.SpackSolverSetup() + setup.gen = spack.solver.asp.ProblemInstanceBuilder() + setup.package_dependencies_rules(pkg) + setup.trigger_rules() + setup.effect_rules() + asp = setup.gen.asp_problem + + # There should be 4 conditions total + assert len([line for line in asp if re.search(r"condition\(\d+\)", line)]) == 4 + # There should be 2 triggers total + assert len([line for line in asp if re.search(r"trigger_id\(\d+\)", line)]) == 2 + # There should be 4 effects total + assert len([line for line in asp if re.search(r"effect_id\(\d+\)", line)]) == 4 diff --git a/lib/spack/spack/test/concretization/errors.py b/lib/spack/spack/test/concretization/errors.py index 474ec45e4afe5c..017fd8e6e46954 100644 --- a/lib/spack/spack/test/concretization/errors.py +++ b/lib/spack/spack/test/concretization/errors.py @@ -21,16 +21,8 @@ ] external_error_messages = [ - ( - "Attempted to build package quantum-espresso which is not buildable and does not have" - " a satisfying external" - ), - ( - " 'quantum-espresso~veritas' is an external constraint for quantum-espresso" - " which was not satisfied" - ), - " 'quantum-espresso+veritas' required", - " required because quantum-espresso+veritas requested explicitly", + "Cannot build quantum-espresso, since it is configured `buildable:false` and " + "no externals satisfy the request" ] variant_error_messages = [ @@ -64,7 +56,26 @@ def test_error_messages(error_messages, config_set, spec, mock_packages, mutable _ = spack.concretize.concretize_one(spec) for em in error_messages: - assert em in str(e.value) + assert em in str(e.value), str(e.value) + + +@pytest.mark.parametrize( + "spec", ["deprecated-versions@1.1.0", "deprecated-client ^deprecated-versions@1.1.0"] +) +def test_deprecated_version_error(spec, mock_packages, mutable_config): + with pytest.raises(spack.solver.asp.DeprecatedVersionError, match="deprecated-versions@1.1.0"): + _ = spack.concretize.concretize_one(spec) + + spack.config.set("config:deprecated", True) + spack.concretize.concretize_one(spec) + + +@pytest.mark.parametrize( + "spec", ["deprecated-versions@99.9", "deprecated-client ^deprecated-versions@99.9"] +) +def test_nonexistent_version_error(spec, mock_packages, mutable_config): + with pytest.raises(spack.solver.asp.InvalidVersionError, match="deprecated-versions@99.9"): + _ = spack.concretize.concretize_one(spec) def test_internal_error_handling_formatting(tmp_path: pathlib.Path): diff --git a/lib/spack/spack/test/concretization/flag_mixing.py b/lib/spack/spack/test/concretization/flag_mixing.py index 421e681640774d..59e1ee6e583957 100644 --- a/lib/spack/spack/test/concretization/flag_mixing.py +++ b/lib/spack/spack/test/concretization/flag_mixing.py @@ -78,7 +78,7 @@ def _compiler_cfg_one_entry_with_cflags(cflags): packages: gcc: externals: - - spec: gcc@12.100.100 + - spec: gcc@12.100.100 languages:=c,c++ prefix: /fake extra_attributes: compilers: @@ -279,3 +279,26 @@ def test_flag_injection_different_compilers(mock_packages, mutable_config): s = spack.concretize.concretize_one('mpileaks cflags=="-O2" %gcc ^callpath %llvm') assert s.satisfies('cflags="-O2"') and s["c"].name == "gcc" assert not s["callpath"].satisfies('cflags="-O2"') and s["callpath"]["c"].name == "llvm" + + +@pytest.mark.regression("51209") +@pytest.mark.parametrize( + "spec_str,expected,not_expected", + [ + # gcc using flags compiled with another gcc not using flags + ("gcc@14 cflags='-O3'", ["gcc@14 cflags='-O3'", "%gcc@10"], ["%gcc cflags='-O3'"]), + # Parent and child, imposing different flags on gmake + ( + "7zip-dependent %gmake cflags='-O2' ^7zip %gmake cflags='-g'", + ["%gmake cflags='-O2'", "^7zip %gmake cflags='-g'"], + ["%gmake cflags='-g'"], + ), + ], +) +def test_flags_and_duplicate_nodes(spec_str, expected, not_expected, default_mock_concretization): + """Tests that we can concretize a spec with flags on a node that is present with duplicates + in the DAG. For instance, a compiler built with a previous version of itself. + """ + s = default_mock_concretization(spec_str) + assert all(s.satisfies(x) for x in expected) + assert all(not s.satisfies(x) for x in not_expected) diff --git a/lib/spack/spack/test/concretization/preferences.py b/lib/spack/spack/test/concretization/preferences.py index c5dbc3a5c049f0..42362366f9aa45 100644 --- a/lib/spack/spack/test/concretization/preferences.py +++ b/lib/spack/spack/test/concretization/preferences.py @@ -11,7 +11,6 @@ import spack.config import spack.package_prefs import spack.repo -import spack.spec import spack.util.module_cmd import spack.util.spack_yaml as syaml from spack.error import ConfigError @@ -504,3 +503,28 @@ def test_default_preference_variant_different_type_does_not_error(self): with spack.config.override("packages:all", {"variants": "+foo"}): s = spack.concretize.concretize_one("pkg-a") assert s.satisfies("foo=bar") + + def test_version_preference_cannot_generate_buildable_versions(self): + """Tests that a version preference not mentioned in package.py cannot be used in + a built spec. + """ + mpileaks_external = syaml.load_config( + """ + mpileaks: + # Version 0.9 is not mentioned in package.py + version: ["0.9"] + buildable: true + externals: + - spec: mpileaks@0.9 +debug + prefix: /path + """ + ) + + with spack.config.override("packages", mpileaks_external): + # Asking for mpileaks+debug results in the external being chosen + mpileaks = spack.concretize.concretize_one("mpileaks+debug") + assert mpileaks.external and mpileaks.satisfies("@0.9 +debug") + + # Asking for ~debug results in the highest known version being chosen + mpileaks = spack.concretize.concretize_one("mpileaks~debug") + assert not mpileaks.external and mpileaks.satisfies("@2.3 ~debug") diff --git a/lib/spack/spack/test/concretization/requirements.py b/lib/spack/spack/test/concretization/requirements.py index 733146f018ea19..76e4d5a242dff7 100644 --- a/lib/spack/spack/test/concretization/requirements.py +++ b/lib/spack/spack/test/concretization/requirements.py @@ -11,6 +11,7 @@ import spack.installer import spack.package_base import spack.paths +import spack.platforms import spack.repo import spack.solver.asp import spack.spec @@ -19,6 +20,8 @@ import spack.version from spack.installer import PackageInstaller from spack.solver.asp import InternalConcretizerError, UnsatisfiableSpecError +from spack.solver.reuse import SpecFilter, create_external_parser +from spack.solver.runtimes import external_config_with_implicit_externals from spack.spec import Spec from spack.util.url import path_to_file_url @@ -1302,7 +1305,12 @@ def test_virtual_requirement_respects_any_of(concretize_scope, mock_packages): ) @pytest.mark.regression("49847") def test_requirements_on_compilers_and_reuse( - concretize_scope, mock_packages, packages_yaml, expected_reuse, expected_contraints + concretize_scope, + mock_packages, + mutable_config, + packages_yaml, + expected_reuse, + expected_contraints, ): """Tests that we can require compilers with `%` in configuration files, and still get reuse of specs (even though reused specs have no build dependency in the ASP encoding). @@ -1313,11 +1321,19 @@ def test_requirements_on_compilers_and_reuse( reused_nodes = list(reused_spec.traverse()) update_packages_config(packages_yaml) root_specs = [Spec(input_spec)] + packages_with_externals = external_config_with_implicit_externals(mutable_config) + completion_mode = mutable_config.get("concretizer:externals:completion") + external_specs = SpecFilter.from_packages_yaml( + external_parser=create_external_parser(packages_with_externals, completion_mode), + packages_with_externals=packages_with_externals, + include=[], + exclude=[], + ).selected_specs() with spack.config.override("concretizer:reuse", True): solver = spack.solver.asp.Solver() setup = spack.solver.asp.SpackSolverSetup() - result, _, _ = solver.driver.solve(setup, root_specs, reuse=reused_nodes) + result, _, _ = solver.driver.solve(setup, root_specs, reuse=reused_nodes + external_specs) pkga = result.specs[0] is_pkgb_reused = pkga["pkg-b"].dag_hash() == reused_spec.dag_hash() @@ -1384,7 +1400,7 @@ def test_multiple_externals_and_requirement( mpich: buildable: false externals: - - spec: "mpich@4.3.0 %gcc" + - spec: "mpich@4.3.0 %gcc@10" prefix: {tmp_path / "gcc"} - spec: "mpich@4.3.0 %clang" prefix: {tmp_path / "clang"} @@ -1397,3 +1413,237 @@ def test_multiple_externals_and_requirement( assert concrete.satisfies("%gcc") assert concrete["mpi"].satisfies("mpich@4.3.0") assert concrete["mpi"].prefix == str(tmp_path / "gcc") + + +@pytest.mark.regression("51262") +@pytest.mark.parametrize( + "input_constraint", + [ + # Override the compiler preference with a different version of gcc + "%c=gcc@10", + # Same, but without specifying the virtual + "%gcc@10", + # Override the mpi preference with a different version of mpich + "%mpi=mpich@3 ~debug", + # Override the mpi preference with a different provider + "%mpi=mpich2", + ], +) +def test_overriding_preference_with_provider_details( + input_constraint, concretize_scope, mock_packages, tmp_path: pathlib.Path +): + """Tests that if we have a preference with provider details, such as a version range, + or a variant, we can override it from the command line, while we can't do the same + when we have a requirement. + """ + # A preference can be overridden + packages_yaml = """ +packages: + c: + prefer: + - gcc@9 + mpi: + prefer: + - mpich@3 +debug +""" + update_packages_config(packages_yaml) + concrete = spack.concretize.concretize_one(f"mpileaks {input_constraint}") + assert concrete.satisfies(input_constraint) + + # A requirement cannot + packages_yaml = """ + packages: + c: + require: + - gcc@9 + mpi: + require: + - mpich@3 +debug + """ + update_packages_config(packages_yaml) + with pytest.raises(UnsatisfiableSpecError): + spack.concretize.concretize_one(f"mpileaks {input_constraint}") + + +@pytest.mark.parametrize( + "initial_preference,current_preference", + [ + # Different provider + ("llvm", "gcc"), + ("gcc", "llvm"), + # Different version of the same provider + ("gcc@9", "gcc@10"), + ("gcc@10", "gcc@9"), + # Different configuration of the same provider + ("llvm+lld", "llvm~lld"), + ("llvm~lld", "llvm+lld"), + ], +) +@pytest.mark.parametrize("constraint_kind", ["require", "prefer"]) +def test_language_preferences_and_reuse( + initial_preference, + current_preference, + constraint_kind, + concretize_scope, + mutable_config, + mock_packages, +): + """Tests that language preferences are respected when reusing specs.""" + + # Install mpileaks with a non-default variant to avoid "accidental" reuse + packages_yaml = f""" +packages: + c: + {constraint_kind}: + - {initial_preference} + cxx: + {constraint_kind}: + - {initial_preference} + llvm: + externals: + - spec: "llvm@15.0.0 +clang~flang ~lld" + prefix: /path1 + extra_attributes: + compilers: + c: /path1/bin/clang + cxx: /path1/bin/clang++ +""" + update_packages_config(packages_yaml) + initial_mpileaks = spack.concretize.concretize_one("mpileaks+debug") + reused_nodes = list(initial_mpileaks.traverse()) + packages_with_externals = external_config_with_implicit_externals(mutable_config) + completion_mode = mutable_config.get("concretizer:externals:completion") + external_specs = SpecFilter.from_packages_yaml( + external_parser=create_external_parser(packages_with_externals, completion_mode), + packages_with_externals=packages_with_externals, + include=[], + exclude=[], + ).selected_specs() + + # Ask for just "mpileaks" and check the spec is reused + with spack.config.override("concretizer:reuse", True): + solver = spack.solver.asp.Solver() + setup = spack.solver.asp.SpackSolverSetup() + result, _, _ = solver.driver.solve( + setup, [Spec("mpileaks")], reuse=reused_nodes + external_specs + ) + reused_mpileaks = result.specs[0] + + assert reused_mpileaks.dag_hash() == initial_mpileaks.dag_hash() + + # Change the language preferences and verify reuse is not happening + packages_yaml = f""" +packages: + c: + {constraint_kind}: + - {current_preference} + cxx: + {constraint_kind}: + - {current_preference} + llvm: + externals: + - spec: "llvm@15.0.0 +clang~flang ~lld" + prefix: /path1 + extra_attributes: + compilers: + c: /path1/bin/clang + cxx: /path1/bin/clang++ +""" + update_packages_config(packages_yaml) + with spack.config.override("concretizer:reuse", True): + solver = spack.solver.asp.Solver() + setup = spack.solver.asp.SpackSolverSetup() + result, _, _ = solver.driver.solve( + setup, [Spec("mpileaks")], reuse=reused_nodes + external_specs + ) + mpileaks = result.specs[0] + + assert initial_mpileaks.dag_hash() != mpileaks.dag_hash() + for node in mpileaks.traverse(): + assert node.satisfies(f"%[when=%c]c={current_preference}") + assert node.satisfies(f"%[when=%cxx]cxx={current_preference}") + + +def test_external_spec_completion_with_targets_required( + concretize_scope, mock_packages, tmp_path: pathlib.Path +): + """Tests that we can concretize a spec needing externals, when we require a specific target, + without extra configuration. + """ + current_platform = spack.platforms.host() + packages_yaml = f""" + packages: + all: + require: + - target={current_platform.default} + mpich: + buildable: false + externals: + - spec: "mpich@4.3.0" + prefix: {tmp_path / "mpich"} + """ + update_packages_config(packages_yaml) + + s = spack.spec.Spec("mpileaks") + concrete = spack.concretize.concretize_one(s) + + assert concrete.satisfies(f"target={current_platform.default}") + + +def test_penalties_for_language_preferences(concretize_scope, mock_packages): + """Tests the default behavior when we use more than one compiler package in a DAG, + under different scenarios. + """ + # This test uses gcc compilers providing c,cxx and fortran, and clang providing only c and cxx + dependency_names = ["mpi", "callpath", "libdwarf", "libelf"] + + # If we don't express requirements, Spack tries to use a single compiler package if possible + s = spack.concretize.concretize_one("mpileaks %c=gcc@10") + assert s.satisfies("%c=gcc@10") + assert all(s[name].satisfies("%c=gcc@10") for name in dependency_names) + + # Same with clang, if nothing else requires fortran + s = spack.concretize.concretize_one("mpileaks %c=clang ^mpi=mpich2") + assert s.satisfies("%c=clang") + assert all(s[name].satisfies("%c=clang") for name in dependency_names) + + # If something brings in fortran that node is compiled entirely with gcc, + # because currently we prefer to use a single toolchain for any node + s = spack.concretize.concretize_one("mpileaks %c=clang ^mpi=mpich") + assert s.satisfies("%c=clang") + assert s["mpich"].satisfies("%c,cxx,fortran=gcc@10") + + # If we prefer compilers in configuration, that has a higher priority + update_packages_config( + """ + packages: + c: + prefer: [gcc] + cxx: + prefer: [gcc] + fortran: + prefer: [gcc] +""" + ) + + s = spack.concretize.concretize_one("mpileaks %c=clang ^mpi=mpich2") + assert s.satisfies("%c=clang") + assert all(s[name].satisfies("%c=gcc@10") for name in dependency_names) + + # Mixed compilers in the preferences + update_packages_config( + """ + packages: + c: + prefer: [llvm] + cxx: + prefer: [llvm] + fortran: + prefer: [gcc] +""" + ) + + s = spack.concretize.concretize_one("mpileaks %c=gcc ^mpi=mpich") + assert s.satisfies("%c=gcc@10") + assert all(s[name].satisfies("%c=clang") for name in dependency_names) + assert s["mpi"].satisfies("%c,cxx=clang %fortran=gcc@10") diff --git a/lib/spack/spack/test/concretization/splicing.py b/lib/spack/spack/test/concretization/splicing.py index 5dbd5303c4b8e3..0b331d89011fcb 100644 --- a/lib/spack/spack/test/concretization/splicing.py +++ b/lib/spack/spack/test/concretization/splicing.py @@ -159,7 +159,7 @@ def test_virtual_multi_splices_in(original_spec, goal_spec, install_specs, mutab original = install_specs(original_spec)[0] mutable_config.set("packages", _make_specs_non_buildable(["depends-on-virtual-with-abi"])) - with pytest.raises(SolverError): + with pytest.raises(UnsatisfiableSpecError): spack.concretize.concretize_one(goal_spec) _enable_splicing() diff --git a/lib/spack/spack/test/config.py b/lib/spack/spack/test/config.py index 04150948b9933c..68fd5d9fb20e9c 100644 --- a/lib/spack/spack/test/config.py +++ b/lib/spack/spack/test/config.py @@ -3,11 +3,12 @@ # SPDX-License-Identifier: (Apache-2.0 OR MIT) import collections -import getpass import io import os import pathlib +import sys import tempfile +import textwrap from datetime import date import pytest @@ -30,10 +31,13 @@ import spack.schema.repos import spack.spec import spack.store +import spack.util.executable +import spack.util.git import spack.util.path as spack_path import spack.util.spack_yaml as syaml from spack.enums import ConfigScopePriority -from spack.llnl.util.filesystem import join_path, touch +from spack.llnl.util.filesystem import getuid, join_path, touch +from spack.util.spack_yaml import DictWithLineInfo # sample config data config_low = { @@ -77,13 +81,6 @@ def env_yaml(tmp_path: pathlib.Path): return env_yaml -def cross_plat_join(*pths): - """os.path.join does not prepend paths to other paths - beginning with a Windows drive label i.e. D:\\ - """ - return os.sep.join([pth for pth in pths]) - - def check_compiler_config(comps, *compiler_names): """Check that named compilers in comps match Spack's config.""" config = spack.config.get("compilers") @@ -335,43 +332,54 @@ def __init__(self, path): self.path = path -def test_substitute_config_variables(mock_low_high_config, monkeypatch): - prefix = spack.paths.prefix.lstrip("/") - assert cross_plat_join( - os.sep + os.path.join("foo", "bar", "baz"), prefix - ) == spack_path.canonicalize_path("/foo/bar/baz/$spack") +def test_substitute_config_variables(mock_low_high_config, monkeypatch, tmp_path: pathlib.Path): + # Test $spack substitution at the start (valid on all platforms) + assert os.path.join(spack.paths.prefix, "foo", "bar", "baz") == spack_path.canonicalize_path( + "$spack/foo/bar/baz/" + ) - assert cross_plat_join( - spack.paths.prefix, os.path.join("foo", "bar", "baz") - ) == spack_path.canonicalize_path("$spack/foo/bar/baz/") + assert os.path.join(spack.paths.prefix, "foo", "bar", "baz") == spack_path.canonicalize_path( + "${spack}/foo/bar/baz/" + ) - assert cross_plat_join( - os.sep + os.path.join("foo", "bar", "baz"), prefix, os.path.join("foo", "bar", "baz") - ) == spack_path.canonicalize_path("/foo/bar/baz/$spack/foo/bar/baz/") + # Test $spack substitution in the middle. This only makes sense when using posix paths. + if sys.platform != "win32": + prefix = spack.paths.prefix.lstrip(os.sep) + base = str(tmp_path) - assert cross_plat_join( - os.sep + os.path.join("foo", "bar", "baz"), prefix - ) == spack_path.canonicalize_path("/foo/bar/baz/${spack}") + assert os.path.join(base, "foo", "bar", "baz", prefix) == spack_path.canonicalize_path( + os.path.join(base, "foo", "bar", "baz", "$spack") + ) - assert cross_plat_join( - spack.paths.prefix, os.path.join("foo", "bar", "baz") - ) == spack_path.canonicalize_path("${spack}/foo/bar/baz/") + assert os.path.join( + base, "foo", "bar", "baz", prefix, "foo", "bar", "baz" + ) == spack_path.canonicalize_path( + os.path.join(base, "foo", "bar", "baz", "$spack", "foo", "bar", "baz") + ) - assert cross_plat_join( - os.sep + os.path.join("foo", "bar", "baz"), prefix, os.path.join("foo", "bar", "baz") - ) == spack_path.canonicalize_path("/foo/bar/baz/${spack}/foo/bar/baz/") + assert os.path.join(base, "foo", "bar", "baz", prefix) == spack_path.canonicalize_path( + os.path.join(base, "foo", "bar", "baz", "${spack}") + ) - assert cross_plat_join( - os.sep + os.path.join("foo", "bar", "baz"), prefix, os.path.join("foo", "bar", "baz") - ) != spack_path.canonicalize_path("/foo/bar/baz/${spack/foo/bar/baz/") + assert os.path.join( + base, "foo", "bar", "baz", prefix, "foo", "bar", "baz" + ) == spack_path.canonicalize_path( + os.path.join(base, "foo", "bar", "baz", "${spack}", "foo", "bar", "baz") + ) + + assert os.path.join( + base, "foo", "bar", "baz", prefix, "foo", "bar", "baz" + ) != spack_path.canonicalize_path( + os.path.join(base, "foo", "bar", "baz", "${spack", "foo", "bar", "baz") + ) # $env replacement is a no-op when no environment is active assert spack_path.canonicalize_path( - os.sep + os.path.join("foo", "bar", "baz", "$env") - ) == os.sep + os.path.join("foo", "bar", "baz", "$env") + os.path.join(str(tmp_path), "foo", "bar", "baz", "$env") + ) == os.path.join(str(tmp_path), "foo", "bar", "baz", "$env") # Fake an active environment and $env is replaced properly - fake_env_path = os.sep + os.path.join("quux", "quuux") + fake_env_path = str(tmp_path / "quux" / "quuux") monkeypatch.setattr(ev, "active_environment", lambda: MockEnv(fake_env_path)) assert spack_path.canonicalize_path("$env/foo/bar/baz") == os.path.join( fake_env_path, os.path.join("foo", "bar", "baz") @@ -432,18 +440,17 @@ def test_merge_with_defaults(mock_low_high_config, write_config_file): assert cfg["baz"]["version"] == ["c"] -def test_substitute_user(mock_low_high_config): - user = getpass.getuser() - assert os.sep + os.path.join( - "foo", "bar" - ) + os.sep + user + os.sep + "baz" == spack_path.canonicalize_path( - os.sep + os.path.join("foo", "bar", "$user", "baz") +def test_substitute_user(mock_low_high_config, tmp_path: pathlib.Path): + user = spack_path.get_user() + base = str(tmp_path) + assert os.path.join(base, "foo", "bar", user, "baz") == spack_path.canonicalize_path( + os.path.join(base, "foo", "bar", "$user", "baz") ) def test_substitute_user_cache(mock_low_high_config): user_cache_path = spack.paths.user_cache_path - assert user_cache_path + os.sep + "baz" == spack_path.canonicalize_path( + assert os.path.join(user_cache_path, "baz") == spack_path.canonicalize_path( os.path.join("$user_cache_path", "baz") ) @@ -451,7 +458,7 @@ def test_substitute_user_cache(mock_low_high_config): def test_substitute_tempdir(mock_low_high_config): tempdir = tempfile.gettempdir() assert tempdir == spack_path.canonicalize_path("$tempdir") - assert tempdir + os.sep + os.path.join("foo", "bar", "baz") == spack_path.canonicalize_path( + assert os.path.join(tempdir, "foo", "bar", "baz") == spack_path.canonicalize_path( os.path.join("$tempdir", "foo", "bar", "baz") ) @@ -478,19 +485,23 @@ def test_substitute_spack_version(): @pytest.mark.parametrize( - "config_settings,expected", + "config_settings_fn,expected_fn", [ - ([], [None, None, None]), - ([["config:install_tree:root", os.sep + "path"]], [os.sep + "path", None, None]), - ([["config:install_tree", os.sep + "path"]], [os.sep + "path", None, None]), + (lambda p: [], lambda p: [None, None, None]), ( - [["config:install_tree:projections", {"all": "{name}"}]], - [None, None, {"all": "{name}"}], + lambda p: [["config:install_tree:root", os.path.join(str(p), "path")]], + lambda p: [os.path.join(str(p), "path"), None, None], + ), + ( + lambda p: [["config:install_tree:projections", {"all": "{name}"}]], + lambda p: [None, None, {"all": "{name}"}], ), - ([["config:install_path_scheme", "{name}"]], [None, None, {"all": "{name}"}]), ], ) -def test_parse_install_tree(config_settings, expected, mutable_config): +def test_parse_install_tree(config_settings_fn, expected_fn, mutable_config, tmp_path): + config_settings = config_settings_fn(tmp_path) + expected = expected_fn(tmp_path) + expected_root = expected[0] or mutable_config.get("config:install_tree:root") expected_unpadded_root = expected[1] or expected_root expected_proj = expected[2] or spack.directory_layout.default_projections @@ -551,10 +562,6 @@ def change_fn(self, section): [["config:install_tree:root", "/path/$padding:11"]], [os.path.join(os.sep + "path", PAD_STRING[:5]), os.sep + "path", None], ), - ( - [["config:install_tree", "/path/${padding:11}"]], - [os.path.join(os.sep + "path", PAD_STRING[:5]), os.sep + "path", None], - ), ([["config:install_tree:padded_length", False]], [None, None, None]), ( [ @@ -563,14 +570,6 @@ def change_fn(self, section): ], [full_padded_string, os.sep + "path", None], ), - ( - [["config:install_tree:", os.sep + "path$padding"]], - [full_padded_string, os.sep + "path", None], - ), - ( - [["config:install_tree:", os.sep + "path" + os.sep + "${padding}"]], - [full_padded_string, os.sep + "path", None], - ), ], ) def test_parse_install_tree_padded(config_settings, expected, mutable_config): @@ -1157,18 +1156,18 @@ def test_bad_path_double_override(config): pass -def test_license_dir_config(mutable_config, mock_packages): +def test_license_dir_config(mutable_config, mock_packages, tmp_path): """Ensure license directory is customizable""" expected_dir = spack.paths.default_license_dir assert spack.config.get("config:license_dir") == expected_dir assert spack.package_base.PackageBase.global_license_dir == expected_dir assert spack.repo.PATH.get_pkg_class("pkg-a").global_license_dir == expected_dir - rel_path = os.path.join(os.path.sep, "foo", "bar", "baz") - spack.config.set("config:license_dir", rel_path) - assert spack.config.get("config:license_dir") == rel_path - assert spack.package_base.PackageBase.global_license_dir == rel_path - assert spack.repo.PATH.get_pkg_class("pkg-a").global_license_dir == rel_path + abs_path = str(tmp_path / "foo" / "bar" / "baz") + spack.config.set("config:license_dir", abs_path) + assert spack.config.get("config:license_dir") == abs_path + assert spack.package_base.PackageBase.global_license_dir == abs_path + assert spack.repo.PATH.get_pkg_class("pkg-a").global_license_dir == abs_path @pytest.mark.regression("22547") @@ -1234,27 +1233,225 @@ def test_default_install_tree(monkeypatch, default_config): assert s.format(projections["all"]) == "foo-baz/nonexistent-x.y.z-abc123" -def test_local_config_can_be_disabled(working_env): +@pytest.fixture +def mock_include_scope(tmp_path): + for subdir in ["defaults", "test1", "test2", "test3"]: + path = tmp_path / subdir + path.mkdir() + + include = tmp_path / "include.yaml" + with include.open("w", encoding="utf-8") as f: + f.write( + textwrap.dedent( + """\ + include:: + - name: "test1" + path: "test1" + when: '"SPACK_DISABLE_LOCAL_CONFIG" not in env' + + - name: "test2" + path: "test2" + + - name: "test3" + path: "test3" + when: '"SPACK_DISABLE_LOCAL_CONFIG" not in env' + """ + ) + ) + + yield tmp_path + + +@pytest.fixture +def include_config_factory(mock_include_scope): + def make_config(): + cfg = spack.config.create() + cfg.push_scope( + spack.config.DirectoryConfigScope("defaults", str(mock_include_scope / "defaults")), + priority=ConfigScopePriority.DEFAULTS, + ) + cfg.push_scope( + spack.config.DirectoryConfigScope("tmp_path", str(mock_include_scope)), + priority=ConfigScopePriority.CONFIG_FILES, + ) + return cfg + + yield make_config + + +def test_modify_scope_precedence(working_env, include_config_factory, tmp_path): + """Test how spack selects the scope to modify when commands write config.""" + + cfg = include_config_factory() + + # ensure highest precedence writable scope is selected by default + assert cfg.highest_precedence_scope().name == "tmp_path" + + include_yaml = tmp_path / "include.yaml" + subdir = tmp_path / "subdir" + subdir2 = tmp_path / "subdir2" + subdir.mkdir() + subdir2.mkdir() + + with include_yaml.open("w", encoding="utf-8") as f: + f.write( + textwrap.dedent( + """\ + include:: + - name: "subdir" + path: "subdir" + """ + ) + ) + + cfg.push_scope( + spack.config.DirectoryConfigScope("override", str(tmp_path)), + priority=ConfigScopePriority.CONFIG_FILES, + ) + + # ensure override scope is selected when it is on top + assert cfg.highest_precedence_scope().name == "override" + + cfg.remove_scope("override") + + with include_yaml.open("w", encoding="utf-8") as f: + f.write( + textwrap.dedent( + """\ + include:: + - name: "subdir" + path: "subdir" + prefer_modify: true + """ + ) + ) + + cfg.push_scope( + spack.config.DirectoryConfigScope("override", str(tmp_path)), + priority=ConfigScopePriority.CONFIG_FILES, + ) + + # if the top scope prefers another, ensure it is selected + assert cfg.highest_precedence_scope().name == "subdir" + + cfg.remove_scope("override") + + with include_yaml.open("w", encoding="utf-8") as f: + f.write( + textwrap.dedent( + """\ + include:: + - name: "subdir" + path: "subdir" + - name: "subdir2" + path: "subdir2" + prefer_modify: true + """ + ) + ) + + cfg.push_scope( + spack.config.DirectoryConfigScope("override", str(tmp_path)), + priority=ConfigScopePriority.CONFIG_FILES, + ) + + # if there are multiple scopes and one is preferred, make sure it's that one + assert cfg.highest_precedence_scope().name == "subdir2" + + +def test_local_config_can_be_disabled(working_env, include_config_factory): + """Ensure that SPACK_DISABLE_LOCAL_CONFIG disables configurations with `when:`.""" os.environ["SPACK_DISABLE_LOCAL_CONFIG"] = "true" - cfg = spack.config.create() + cfg = include_config_factory() assert "defaults" in cfg.scopes - assert "system" not in cfg.scopes - assert "site" in cfg.scopes - assert "user" not in cfg.scopes + assert "test1" not in cfg.scopes + assert "test2" in cfg.scopes + assert "test3" not in cfg.scopes os.environ["SPACK_DISABLE_LOCAL_CONFIG"] = "" - cfg = spack.config.create() + cfg = include_config_factory() assert "defaults" in cfg.scopes - assert "system" not in cfg.scopes - assert "site" in cfg.scopes - assert "user" not in cfg.scopes + assert "test1" not in cfg.scopes + assert "test2" in cfg.scopes + assert "test3" not in cfg.scopes del os.environ["SPACK_DISABLE_LOCAL_CONFIG"] - cfg = spack.config.create() + cfg = include_config_factory() assert "defaults" in cfg.scopes - assert "system" in cfg.scopes - assert "site" in cfg.scopes - assert "user" in cfg.scopes + assert "test1" in cfg.scopes + assert "test2" in cfg.scopes + assert "test3" in cfg.scopes + + +def test_override_included_config(working_env, tmp_path, include_config_factory): + override_scope = tmp_path / "override" + override_scope.mkdir() + + include_yaml = override_scope / "include.yaml" + subdir = override_scope / "subdir" + subdir.mkdir() + + with include_yaml.open("w", encoding="utf-8") as f: + f.write( + textwrap.dedent( + """\ + include:: + - name: "subdir" + path: "subdir" + """ + ) + ) + + # check the mock config is correct + cfg = include_config_factory() + + assert "defaults" in cfg.scopes + assert "test1" in cfg.scopes + assert "test2" in cfg.scopes + assert "test3" in cfg.scopes + + active_names = [s.name for s in cfg.active_scopes] + assert "defaults" in active_names + assert "test1" in active_names + assert "test2" in active_names + assert "test3" in active_names + + # push a scope that overrides everything under it but includes a subdir. + # its included subdir should be active, but scopes *not* included by the overriding + # scope should not. + cfg.push_scope( + spack.config.DirectoryConfigScope("override", str(override_scope)), + priority=ConfigScopePriority.CONFIG_FILES, + ) + + assert "defaults" in cfg.scopes + assert "test1" in cfg.scopes + assert "test2" in cfg.scopes + assert "test3" in cfg.scopes + assert "override" in cfg.scopes + assert "subdir" in cfg.scopes + + active_names = [s.name for s in cfg.active_scopes] + assert "defaults" in active_names + assert "test1" not in active_names + assert "test2" not in active_names + assert "test3" not in active_names + assert "override" in active_names + assert "subdir" in active_names + + # remove the override and ensure everything is back to normal + cfg.remove_scope("override") + + assert "defaults" in cfg.scopes + assert "test1" in cfg.scopes + assert "test2" in cfg.scopes + assert "test3" in cfg.scopes + + active_names = [s.name for s in cfg.active_scopes] + assert "defaults" in active_names + assert "test1" in active_names + assert "test2" in active_names + assert "test3" in active_names def test_user_cache_path_is_overridable(working_env): @@ -1274,6 +1471,7 @@ def test_config_file_dir_failure(tmp_path: pathlib.Path, mutable_empty_config): @pytest.mark.not_on_windows("chmod not supported on Windows") +@pytest.mark.skipif(getuid() == 0, reason="user is root") def test_config_file_read_perms_failure(tmp_path: pathlib.Path, mutable_empty_config): """Test reading a configuration file without permissions to ensure ConfigFileError is raised.""" @@ -1392,3 +1590,373 @@ def highest_priority_scopes(config, *, nscopes): ) assert highest_priority_scopes(spack.config.CONFIG, nscopes=2) == expected_scopes_without_env + + +@pytest.mark.regression("51059") +def test_config_include_similar_name(tmp_path: pathlib.Path): + config_a = tmp_path / "a" / "config" + config_b = tmp_path / "b" / "config" + + os.makedirs(config_a) + with open(config_a / "config.yaml", "w", encoding="utf-8") as fd: + syaml.dump_config({"config": {"install_tree": {"root": str(tmp_path)}}}, fd) + + os.makedirs(config_b) + with open(config_b / "config.yaml", "w", encoding="utf-8") as fd: + syaml.dump_config({"config": {"install_tree": {"padded_length": 64}}}, fd) + + with open(tmp_path / "include.yaml", "w", encoding="utf-8") as fd: + syaml.dump_config({"include": [str(config_a), str(config_b)]}, fd) + + config = spack.config.create_from(spack.config.DirectoryConfigScope("test", str(tmp_path))) + + # Ensure all of the scopes are found + assert len(config.matching_scopes("^test$")) == 1 + assert len(config.matching_scopes("^test:a/config$")) == 1 + assert len(config.matching_scopes("^test:b/config$")) == 1 + + +def test_deepcopy_as_builtin(env_yaml): + cfg = spack.config.create_from( + spack.config.SingleFileScope("env", env_yaml, spack.schema.env.schema, yaml_path=["spack"]) + ) + config_copy = cfg.deepcopy_as_builtin("config") + assert config_copy == cfg.get_config("config") + assert type(config_copy) is DictWithLineInfo + assert type(config_copy["verify_ssl"]) is bool + + packages_copy = cfg.deepcopy_as_builtin("packages") + assert type(packages_copy) is DictWithLineInfo + assert type(packages_copy["all"]) is DictWithLineInfo + assert type(packages_copy["all"]["compiler"]) is list + assert type(packages_copy["all"]["compiler"][0]) is str + + +def test_included_optional_include_scopes(): + with pytest.raises(NotImplementedError): + spack.config.OptionalInclude({}).scopes(spack.config.ConfigScope("fail")) + + +def test_included_path_string( + tmp_path: pathlib.Path, mock_low_high_config, ensure_debug, monkeypatch, capfd +): + path = tmp_path / "local" / "config.yaml" + path.parent.mkdir() + include = spack.config.included_path(path) + assert isinstance(include, spack.config.IncludePath) + assert include.path == str(path) + assert not include.optional + assert include.evaluate_condition() + + parent_scope = mock_low_high_config.scopes["low"] + + # Trigger failure when required path does not exist + with pytest.raises(ValueError, match="does not exist"): + include.scopes(parent_scope) + + # First successful pass builds the scope + path.touch() + scopes = include.scopes(parent_scope) + assert scopes and len(scopes) == 1 + assert isinstance(scopes[0], spack.config.SingleFileScope) + + # Second pass uses the scopes previously built + assert include._scopes is not None + scopes = include.scopes(parent_scope) + captured = capfd.readouterr()[1] + assert "Using existing scopes" in captured + + +def test_included_path_string_no_parent_path( + tmp_path: pathlib.Path, config, ensure_debug, monkeypatch +): + """Use a relative include path and no parent scope path so destination + will be rooted in the current working directory (usually SPACK_ROOT).""" + entry = {"path": "config.yaml", "optional": True} + include = spack.config.included_path(entry) + parent_scope = spack.config.InternalConfigScope("parent-scope") + included_scopes = include.scopes(parent_scope) + # ensure scope is returned even if there is no parent path + assert len(included_scopes) == 1 + # ensure scope for include is singlefile as it ends in .yaml + assert isinstance(included_scopes[0], spack.config.SingleFileScope) + destination = include.destination + curr_dir = os.getcwd() + assert curr_dir == os.path.commonprefix([curr_dir, destination]) # type: ignore[list-item] + + +def test_included_path_conditional_bad_when( + tmp_path: pathlib.Path, mock_low_high_config, ensure_debug, capfd +): + path = tmp_path / "local" + path.mkdir() + entry = {"path": str(path), "when": 'platform == "nosuchplatform"', "optional": True} + include = spack.config.included_path(entry) + assert isinstance(include, spack.config.IncludePath) + assert include.path == entry["path"] + assert include.when == entry["when"] + assert include.optional + assert not include.evaluate_condition() + + scopes = include.scopes(mock_low_high_config.scopes["low"]) + captured = capfd.readouterr()[1] + assert "condition is not satisfied" in captured + assert not scopes + + +def test_included_path_conditional_success(tmp_path: pathlib.Path, mock_low_high_config): + path = tmp_path / "local" + path.mkdir() + entry = {"path": str(path), "when": 'platform == "test"', "optional": True} + include = spack.config.included_path(entry) + assert isinstance(include, spack.config.IncludePath) + assert include.path == entry["path"] + assert include.when == entry["when"] + assert include.optional + assert include.evaluate_condition() + + scopes = include.scopes(mock_low_high_config.scopes["low"]) + assert scopes and len(scopes) == 1 + assert isinstance(scopes[0], spack.config.DirectoryConfigScope) + + +def test_included_path_git_missing_args(): + # must have one or more of: branch, tag and commit so fail if missing any + entry = {"git": "https://example.com/windows/configs.git", "paths": ["config.yaml"]} + with pytest.raises(spack.error.ConfigError, match="specify one or more"): + spack.config.included_path(entry) + + # must have one or more paths + entry["tag"] = "v1.0" + entry["paths"] = [] + with pytest.raises(spack.error.ConfigError, match="must include one or more"): + spack.config.included_path(entry) + + +def test_included_path_git_unsat( + tmp_path: pathlib.Path, mock_low_high_config, ensure_debug, monkeypatch, capfd +): + paths = ["config.yaml", "packages.yaml"] + entry = { + "git": "https://example.com/windows/configs.git", + "tag": "v1.0", + "paths": paths, + "when": 'platform == "nosuchplatform"', + } + include = spack.config.included_path(entry) + assert isinstance(include, spack.config.GitIncludePaths) + assert include.repo == entry["git"] + assert include.tag == entry["tag"] + assert include.paths == entry["paths"] + assert include.when == entry["when"] + assert not include.optional and not include.evaluate_condition() + + scopes = include.scopes(mock_low_high_config.scopes["low"]) + captured = capfd.readouterr()[1] + assert "condition is not satisfied" in captured + assert not scopes + + +@pytest.mark.parametrize( + "key,value", [("branch", "main"), ("commit", "abcdef123456"), ("tag", "v1.0")] +) +def test_included_path_git( + tmp_path: pathlib.Path, mock_low_high_config, ensure_debug, monkeypatch, key, value, capfd +): + monkeypatch.setattr(spack.paths, "user_cache_path", str(tmp_path)) + + class MockIncludeGit(spack.util.executable.Executable): + def __init__(self, required: bool): + pass + + def __call__(self, *args, **kwargs) -> str: # type: ignore + action = args[0] + + if action == "config": + return "origin" + + return "" + + paths = ["config.yaml", "packages.yaml"] + entry = { + "git": "https://example.com/windows/configs.git", + key: value, + "paths": paths, + "when": 'platform == "test"', + } + include = spack.config.included_path(entry) + assert isinstance(include, spack.config.GitIncludePaths) + assert not include.optional and include.evaluate_condition() + + destination = include._destination() + assert not os.path.exists(destination) + + # set up minimal git and repository operations + monkeypatch.setattr(spack.util.git, "git", MockIncludeGit) + + def _init_repo(*args, **kwargs): + fs.mkdirp(fs.join_path(destination, ".git")) + + def _checkout(*args, **kwargs): + # Make sure the files exist at the clone destination + with fs.working_dir(destination): + for p in paths: + fs.touch(p) + + monkeypatch.setattr(spack.util.git, "init_git_repo", _init_repo) + monkeypatch.setattr(spack.util.git, f"pull_checkout_{key}", _checkout) + + # First successful pass builds the scope + parent_scope = mock_low_high_config.scopes["low"] + scopes = include.scopes(parent_scope) + assert scopes and len(scopes) == len(paths) + for scope in scopes: + assert isinstance(scope, spack.config.SingleFileScope) + assert os.path.basename(scope.path) in paths # type: ignore[union-attr] + + # Second pass uses the scopes previously built. + # Only need to do this for one of the parameters. + if key == "branch": + assert include._scopes is not None + scopes = include.scopes(parent_scope) + captured = capfd.readouterr()[1] + assert "Using existing scopes" in captured + + # A direct clone now returns already cloned destination and debug message. + # Again only need to run this test once. + if key == "tag": + assert include._clone() == include.destination + captured = capfd.readouterr()[1] + assert "already cloned" in captured + + +def test_included_path_git_errs(tmp_path: pathlib.Path, mock_low_high_config, monkeypatch): + monkeypatch.setattr(spack.paths, "user_cache_path", str(tmp_path)) + + paths = ["concretizer.yaml"] + entry = { + "git": "https://example.com/linux/configs.git", + "branch": "develop", + "paths": paths, + "when": 'platform == "test"', + } + include = spack.config.included_path(entry) + parent_scope = mock_low_high_config.scopes["low"] + + # fail to initialize the repository + def _failing_init(*args, **kwargs): + raise spack.util.executable.ProcessError("mock init repo failure") + + monkeypatch.setattr(spack.util.git, "init_git_repo", _failing_init) + + with pytest.raises(spack.error.ConfigError, match="Unable to initialize"): + include.scopes(parent_scope) + + # fail in git config (so use default remote) *and* git checkout + def _init_repo(*args, **kwargs): + fs.mkdirp(fs.join_path(include.destination, ".git")) + + class MockIncludeGit(spack.util.executable.Executable): + def __init__(self, required: bool): + pass + + def __call__(self, *args, **kwargs) -> str: # type: ignore + raise spack.util.executable.ProcessError("mock git failure") + + monkeypatch.setattr(spack.util.git, "init_git_repo", _init_repo) + monkeypatch.setattr(spack.util.git, "git", MockIncludeGit) + + with pytest.raises(spack.error.ConfigError, match="Unable to check out"): + include.scopes(parent_scope) + + # set up invalid option failure + include.branch = "" # type: ignore[union-attr] + with pytest.raises(spack.error.ConfigError, match="Missing or unsupported options"): + include.scopes(parent_scope) + + +def test_missing_include_scope_list(mock_missing_dir_include_scopes): + """Tests that an included scope with a non existent file/directory + is still listed as a scope under spack.config.CONFIG.scopes""" + assert "sub_base" in list( + spack.config.CONFIG.scopes + ), "Missing Optional Scope Missing from Config Scopes" + + +def test_missing_include_scope_writable_list(mock_missing_dir_include_scopes): + """Tests that missing include scopes are included in writeable config lists""" + assert [x for x in spack.config.CONFIG.writable_scopes if x.name == "sub_base"] + + +def test_missing_include_scope_not_readable_list(mock_missing_dir_include_scopes): + """Tests that missing include scopes are not included in existing config lists""" + existing_scopes = [x for x in spack.config.CONFIG.existing_scopes if x.name != "sub_base"] + assert len(existing_scopes) == 1 + assert existing_scopes[0].name != "sub_base" + + +def test_missing_include_scope_default_created_as_dir_scope(mock_missing_dir_include_scopes): + """Tests that an optional include with no existing file/directory and no yaml extention + is created as a directoryscope object""" + missing_inc_scope = spack.config.CONFIG.scopes["sub_base"] + assert isinstance(missing_inc_scope, spack.config.DirectoryConfigScope) + + +def test_missing_include_scope_yaml_ext_is_file_scope(mock_missing_file_include_scopes): + """Tests that an optional include scope with no existing file/directory and a + yaml extension is created as a file scope""" + missing_inc_scope = spack.config.CONFIG.scopes["sub_base"] + assert isinstance(missing_inc_scope, spack.config.SingleFileScope) + + +def test_missing_include_scope_writeable_not_readable(mock_missing_dir_include_scopes): + """Tests that an included scope with a non existent file/directory + can be written to (and created)""" + assert spack.config.CONFIG.scopes[ + "sub_base" + ].writable, "Missing Optional Scope should be writable" + assert not spack.config.CONFIG.scopes[ + "sub_base" + ].exists, "Missing Optional Scope should not exist" + + +def test_missing_include_scope_empty_read(mock_missing_dir_include_scopes): + """Tests that an included scope with a non existent file/directory + returns an empty dict on read and has "exists" set to false""" + assert ( + spack.config.CONFIG.get("config", scope="sub_base") == {} + ), "Missing optional include scope does not return an empty value." + assert not spack.config.CONFIG.scopes[ + "sub_base" + ].exists, "Missing optional include should not be created on read" + + +def test_missing_include_scope_file_empty_read(mock_missing_file_include_scopes): + """Tests that an include scope with a non existent file returns an empty + dict and has exists set to false""" + assert ( + spack.config.CONFIG.get("config", scope="sub_base") == {} + ), "Missing optional include scope does not return an empty value." + assert not spack.config.CONFIG.scopes[ + "sub_base" + ].exists, "Missing optional include should not be created on read" + + +def test_missing_include_scope_write_directory(mock_missing_dir_include_scopes): + """Tests that an include scope with a non existent directory + creates said directory and the appropriate section file on write""" + install_tree = syaml.syaml_dict({"install_tree": {"root": "$spack/tmp/spack"}}) + spack.config.CONFIG.set("config", install_tree, scope="sub_base") + assert os.path.exists(spack.config.CONFIG.scopes["sub_base"].path) + install_root = spack.config.CONFIG.get("config:install_tree:root", scope="sub_base") + assert install_root == "$spack/tmp/spack" + + +def test_missing_include_scope_write_file(mock_missing_file_include_scopes): + """Tests that an include scope with a non existent file creates said file + with the appropriate section entry""" + install_tree = syaml.syaml_dict({"install_tree": {"root": "$spack/tmp/spack"}}) + spack.config.CONFIG.set("config", install_tree, scope="sub_base") + assert os.path.exists(spack.config.CONFIG.scopes["sub_base"].path) + install_root = spack.config.CONFIG.get("config:install_tree:root", scope="sub_base") + assert install_root == "$spack/tmp/spack" diff --git a/lib/spack/spack/test/conftest.py b/lib/spack/spack/test/conftest.py index 1f1794a4f044d3..bf7d7dd1a62ec0 100644 --- a/lib/spack/spack/test/conftest.py +++ b/lib/spack/spack/test/conftest.py @@ -2,6 +2,7 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) +import base64 import collections import datetime import email.message @@ -19,7 +20,7 @@ import tempfile import xml.etree.ElementTree from pathlib import Path -from typing import List, Optional, Tuple +from typing import Callable, List, Optional, Tuple import pytest @@ -28,7 +29,7 @@ import spack.vendor.archspec.cpu.schema import spack.binary_distribution -import spack.bootstrap.core +import spack.bootstrap import spack.caches import spack.compilers.config import spack.compilers.libraries @@ -37,15 +38,19 @@ import spack.directives_meta import spack.environment as ev import spack.error +import spack.extensions +import spack.hash_types import spack.llnl.util.lang import spack.llnl.util.lock import spack.llnl.util.tty as tty +import spack.llnl.util.tty.color import spack.modules.common import spack.package_base import spack.paths import spack.platforms import spack.repo import spack.solver.asp +import spack.solver.reuse import spack.spec import spack.stage import spack.store @@ -267,9 +272,11 @@ def mock_git_package_changes(git, tmp_path: Path, override_git_repos_cache_path, The structure of commits in this repo is as follows:: - o diff-test: add v1.2 (from a git ref) + o diff-test: add v2.1.7 and v2.1.8 (invalid duplicated checksum) | - o diff-test: add v1.1 (from source tarball) + o diff-test: add v2.1.6 (from a git ref) + | + o diff-test: add v2.1.5 (from source tarball) | o diff-test: new package (testing multiple added versions) @@ -312,24 +319,30 @@ def latest_commit(): os.makedirs(os.path.dirname(filename)) - # add pkg-a as a new package to the repository + # add diff-test as a new package to the repository shutil.copy2(f"{spack.paths.test_path}/data/conftest/diff-test/package-0.txt", filename) git("add", filename) commit("diff-test: new package") commits.append(latest_commit()) - # add v2.1.5 to pkg-a + # add v2.1.5 to diff-test shutil.copy2(f"{spack.paths.test_path}/data/conftest/diff-test/package-1.txt", filename) git("add", filename) commit("diff-test: add v2.1.5") commits.append(latest_commit()) - # add v2.1.6 to pkg-a + # add v2.1.6 to diff-test shutil.copy2(f"{spack.paths.test_path}/data/conftest/diff-test/package-2.txt", filename) git("add", filename) commit("diff-test: add v2.1.6") commits.append(latest_commit()) + # add v2.1.7 and v2.1.8 to diff-test + shutil.copy2(f"{spack.paths.test_path}/data/conftest/diff-test/package-3.txt", filename) + git("add", filename) + commit("diff-test: add v2.1.7 and v2.1.8") + commits.append(latest_commit()) + # The commits are ordered with the last commit first in the list commits = list(reversed(commits)) @@ -340,7 +353,7 @@ def latest_commit(): @pytest.fixture(autouse=True) def clear_recorded_monkeypatches(): yield - spack.subprocess_context.clear_patches() + spack.subprocess_context.MONKEYPATCHES.clear() @pytest.fixture(scope="session", autouse=True) @@ -350,7 +363,7 @@ def record_monkeypatch_setattr(): saved_setattr = _pytest.monkeypatch.MonkeyPatch.setattr def record_setattr(cls, target, name, value, *args, **kwargs): - spack.subprocess_context.append_patch((target, name, value)) + spack.subprocess_context.MONKEYPATCHES.append((target, name)) saved_setattr(cls, target, name, value, *args, **kwargs) _pytest.monkeypatch.MonkeyPatch.setattr = record_setattr @@ -429,15 +442,16 @@ def pytest_collection_modifyitems(config, items): @pytest.fixture(scope="function") -def use_concretization_cache(mutable_config, tmp_path: Path): +def use_concretization_cache(mock_packages, mutable_config, tmp_path: Path): """Enables the use of the concretization cache""" - spack.config.set("config:concretization_cache:enable", True) - # ensure we have an isolated concretization cache conc_cache_dir = tmp_path / "concretization" conc_cache_dir.mkdir() - new_conc_cache_loc = str(conc_cache_dir) - spack.config.set("config:concretization_cache:path", new_conc_cache_loc) - yield + + # ensure we have an isolated concretization cache while using fixture + with spack.config.override( + "concretizer:concretization_cache", {"enable": True, "url": str(conc_cache_dir)} + ): + yield conc_cache_dir # @@ -499,6 +513,25 @@ def mock_stage(tmp_path_factory: pytest.TempPathFactory, monkeypatch, request): shutil.rmtree(new_stage, onerror=onerror) +@pytest.fixture(scope="session") +def mock_stage_for_database(tmp_path_factory: pytest.TempPathFactory, monkeypatch_session): + """A session-scoped analog of mock_stage, so that the mock_store + fixture uses its own stage vs. the global stage root for spack. + """ + new_stage = tmp_path_factory.mktemp("mock-stage") + + source_path = new_stage / spack.stage._source_path_subdir + source_path.mkdir(parents=True, exist_ok=True) + + monkeypatch_session.setattr(spack.stage, "_stage_root", str(new_stage)) + + yield str(new_stage) + + # Clean up the test stage directory + if new_stage.is_dir(): + shutil.rmtree(new_stage, onerror=onerror) + + @pytest.fixture(scope="session") def ignore_stage_files(): """Session-scoped helper for check_for_leftover_stage_files. @@ -678,6 +711,28 @@ def mock_pkg_install(monkeypatch): monkeypatch.setattr(spack.package_base.PackageBase, "install", _pkg_install_fn, raising=False) +@pytest.fixture(scope="function") +def fake_db_install(tmp_path): + """This fakes "enough" of the installation process to make Spack + think of a spec as being installed as far as the concretizer + and parser are concerned. It does not run any build phase defined + in the package, simply acting as though the installation had + completed successfully. + + It allows doing things like + + ``spack.concretize.concretize_one(f"x ^/hash-of-y")`` + + after doing something like ``fake_db_install(y)`` + """ + with spack.store.use_store(str(tmp_path)) as the_store: + + def _install(a_spec): + the_store.db.add(a_spec) + + yield _install + + @pytest.fixture(scope="function") def mock_packages(mock_packages_repo, mock_pkg_install, request): """Use the 'builtin_mock' repository instead of 'builtin'""" @@ -705,6 +760,7 @@ def __init__(self, root_directory: str) -> None: namespace = f"test_namespace_{RepoBuilder._counter}" repo_root = os.path.join(root_directory, namespace) os.makedirs(repo_root, exist_ok=True) + self.template_dirs = (os.path.join(spack.paths.share_path, "templates"),) self.root, self.namespace = spack.repo.create_repo(repo_root, namespace) self.build_system_name = f"test_build_system_{self.namespace}" self._add_build_system() @@ -727,7 +783,9 @@ def add_package( "cls_name": spack.util.naming.pkg_name_to_class_name(name), "dependencies": dependencies, } - template = spack.tengine.make_environment().get_template("mock-repository/package.pyt") + template = spack.tengine.make_environment_from_dirs(self.template_dirs).get_template( + "mock-repository/package.pyt" + ) package_py = self._recipe_filename(name) os.makedirs(os.path.dirname(package_py), exist_ok=True) with open(package_py, "w", encoding="utf-8") as f: @@ -740,7 +798,7 @@ def remove(self, name: str) -> None: def _add_build_system(self) -> None: """Add spack_repo..build_systems.test_build_system with build_system=test_build_system_.""" - template = spack.tengine.make_environment().get_template( + template = spack.tengine.make_environment_from_dirs(self.template_dirs).get_template( "mock-repository/build_system.pyt" ) text = template.render({"build_system_name": self.build_system_name}) @@ -885,7 +943,7 @@ def _create_mock_configuration_scopes(configuration_dir): """Create the configuration scopes used in `config` and `mutable_config`.""" return [ ( - ConfigScopePriority.BUILTIN, + ConfigScopePriority.DEFAULTS, spack.config.InternalConfigScope("_builtin", spack.config.CONFIG_DEFAULTS), ), ( @@ -951,16 +1009,10 @@ def monkeypatch_session(): yield monkeypatch -@pytest.fixture(scope="session", autouse=True) -def mock_wsdk_externals(monkeypatch_session): - """Skip check for required external packages on Windows during testing - Note: In general this should cover this behavior for all tests, - however any session scoped fixture involving concretization should - include this fixture - """ - monkeypatch_session.setattr( - spack.bootstrap.core, "ensure_winsdk_external_or_raise", _return_none - ) +@pytest.fixture(autouse=True) +def mock_wsdk_externals(monkeypatch): + """Skip check for required external packages on Windows during testing.""" + monkeypatch.setattr(spack.bootstrap, "ensure_winsdk_external_or_raise", _return_none) @pytest.fixture(scope="function") @@ -1000,6 +1052,49 @@ def mock_low_high_config(tmp_path: Path): yield config +def create_config_scope(path: Path, name: str) -> spack.config.DirectoryConfigScope: + """helper for creating config scopes with included file/directory scopes + that do not have existing representation on the filesystem""" + base_scope_dir = path / "base" + config_data = syaml.syaml_dict( + { + "include": [ + { + "name": "sub_base", + "path": str(path / name), + "optional": True, + "prefer_modify": True, + } + ] + } + ) + base_scope_dir.mkdir() + with open(str(base_scope_dir / "include.yaml"), "w+", encoding="utf-8") as f: + syaml.dump_config(config_data, stream=f, default_flow_style=False) + scope = spack.config.DirectoryConfigScope("base", str(base_scope_dir)) + return scope + + +@pytest.fixture() +def mock_missing_dir_include_scopes(tmp_path: Path): + """Mocks a config scope containing optional directory scope + includes that do not have represetation on the filesystem""" + scope = create_config_scope(tmp_path, "sub") + + with spack.config.use_configuration(scope) as config: + yield config + + +@pytest.fixture +def mock_missing_file_include_scopes(tmp_path: Path): + """Mocks a config scope containing optional file scope + includes that do not have represetation on the filesystem""" + scope = create_config_scope(tmp_path, "sub.yaml") + + with spack.config.use_configuration(scope) as config: + yield config + + def _populate(mock_db): r"""Populate a mock database with packages. @@ -1046,10 +1141,10 @@ def _store_dir_and_cache(tmp_path_factory: pytest.TempPathFactory): @pytest.fixture(scope="session") def mock_store( tmp_path_factory: pytest.TempPathFactory, - mock_wsdk_externals, mock_packages_repo, mock_configuration_scopes, _store_dir_and_cache: Tuple[Path, Path], + mock_stage_for_database, ): """Creates a read-only mock database with some packages installed note that the ref count for dyninst here will be 3, as it's recycled @@ -1060,6 +1155,7 @@ def mock_store( """ store_path, store_cache = _store_dir_and_cache + _mock_wsdk_externals = spack.bootstrap.ensure_winsdk_external_or_raise # Make the DB filesystem read-only to ensure constructors don't modify anything in it. # We want Spack to be able to point to a DB on a read-only filesystem easily. @@ -1072,7 +1168,11 @@ def mock_store( with spack.repo.use_repositories(mock_packages_repo): # make the DB filesystem writable only while we populate it _recursive_chmod(store_path, 0o755) - _populate(store.db) + try: + spack.bootstrap.ensure_winsdk_external_or_raise = _return_none + _populate(store.db) + finally: + spack.bootstrap.ensure_winsdk_external_or_raise = _mock_wsdk_externals _recursive_chmod(store_path, 0o555) _recursive_chmod(store_cache, 0o755) @@ -1165,32 +1265,11 @@ def install_mockery(temporary_store: spack.store.Store, mutable_config, mock_pac temporary_store.failure_tracker.clear_all() -@pytest.fixture(scope="module") -def temporary_mirror_dir(tmp_path_factory: pytest.TempPathFactory): - dir = tmp_path_factory.mktemp("mirror") - yield str(dir) - - -@pytest.fixture(scope="function") -def temporary_mirror(temporary_mirror_dir): - mirror_url = url_util.path_to_file_url(temporary_mirror_dir) - mirror_cmd("add", "--scope", "site", "test-mirror-func", mirror_url) - yield temporary_mirror_dir - mirror_cmd("rm", "--scope=site", "test-mirror-func") - - @pytest.fixture(scope="function") -def mutable_temporary_mirror_dir(tmp_path_factory: pytest.TempPathFactory): - dir = tmp_path_factory.mktemp("mirror") - yield str(dir) - - -@pytest.fixture(scope="function") -def mutable_temporary_mirror(mutable_temporary_mirror_dir): - mirror_url = url_util.path_to_file_url(mutable_temporary_mirror_dir) - mirror_cmd("add", "--scope", "site", "test-mirror-func", mirror_url) - yield mutable_temporary_mirror_dir - mirror_cmd("rm", "--scope=site", "test-mirror-func") +def temporary_mirror(mutable_config, tmp_path_factory): + mirror_dir = tmp_path_factory.mktemp("mirror") + mirror_cmd("add", "test-mirror-func", mirror_dir.as_uri()) + yield str(mirror_dir) @pytest.fixture(scope="function") @@ -1212,6 +1291,21 @@ def mock_fetch(mock_archive, monkeypatch): ) +class MockResourceFetcherGenerator: + def __init__(self, url): + self.url = url + + def _generate_fetchers(self, *args, **kwargs): + return [URLFetchStrategy(url=self.url)] + + +@pytest.fixture() +def mock_resource_fetch(mock_archive, monkeypatch): + """Fake fetcher generator that works with resource stages to redirect to a file.""" + mfg = MockResourceFetcherGenerator(mock_archive.url) + monkeypatch.setattr(spack.stage.ResourceStage, "_generate_fetchers", mfg._generate_fetchers) + + class MockLayout: def __init__(self, root): self.root = root @@ -1621,6 +1715,10 @@ def mock_git_repository(git, tmp_path_factory: pytest.TempPathFactory): rev_hash = lambda x: git("rev-parse", x, output=str).strip() r2 = rev_hash(default_branch) + # annotated tag + a_tag = "annotated-tag" + git("tag", "-a", a_tag, "-m", "annotated tag") + # Record the commit hash of the (only) commit from test-branch and # the file added by that commit r1 = rev_hash(branch) @@ -1659,6 +1757,7 @@ def mock_git_repository(git, tmp_path_factory: pytest.TempPathFactory): ), "tag": Bunch(revision=tag, file=tag_file, args={"git": url, "tag": tag}), "commit": Bunch(revision=r1, file=r1_file, args={"git": url, "commit": r1}), + "annotated-tag": Bunch(revision=a_tag, file=r2_file, args={"git": url, "tag": a_tag}), # In this case, the version() args do not include a 'git' key: # this is the norm for packages, so this tests how the fetching logic # would most-commonly assemble a Git fetcher @@ -1972,6 +2071,32 @@ def brand_new_binary_cache(): ) +def _trivial_package_hash(spec: spack.spec.Spec) -> str: + """Return a trivial package hash for tests to avoid expensive AST parsing.""" + # Pad package name to consistent length and cap at 32 chars for realistic hash length + return base64.b32encode(f"{spec.name:<32}".encode()[:32]).decode().lower() + + +@pytest.fixture(autouse=True) +def mock_package_hash_for_tests(request, monkeypatch): + """Replace expensive package hash computation with trivial one for tests. + Tests can force the real package hash by using the @pytest.mark.use_package_hash marker.""" + if "use_package_hash" in request.keywords: + yield + return + pkg_hash = spack.hash_types.package_hash + idx = spack.hash_types.HASHES.index(pkg_hash) + mock_pkg_hash = spack.hash_types.SpecHashDescriptor( + depflag=0, package_hash=True, name="package_hash", override=_trivial_package_hash + ) + monkeypatch.setattr(spack.hash_types, "package_hash", mock_pkg_hash) + try: + spack.hash_types.HASHES[idx] = mock_pkg_hash + yield + finally: + spack.hash_types.HASHES[idx] = pkg_hash + + @pytest.fixture() def noncyclical_dir_structure(tmp_path: Path): """ @@ -2073,7 +2198,10 @@ def _fetch_text_file(url, dest_dir): @pytest.fixture(scope="function") def mock_tty_stdout(monkeypatch): + """Make sys.stdout.isatty() return True, while forcing no color output.""" monkeypatch.setattr(sys.stdout, "isatty", lambda: True) + with spack.llnl.util.tty.color.color_when("never"): + yield @pytest.fixture @@ -2131,7 +2259,9 @@ def concretized_specs_cache(): @pytest.fixture -def default_mock_concretization(config, mock_packages, concretized_specs_cache): +def default_mock_concretization( + config, mock_packages, concretized_specs_cache +) -> Callable[[str], spack.spec.Spec]: """Return the default mock concretization of a spec literal, obtained using the mock repository and the mock configuration. @@ -2190,14 +2320,12 @@ def pytest_runtest_setup(item): pytest.skip(*only_windows_marker.args) -def _sequential_executor(*args, **kwargs): - return spack.util.parallel.SequentialExecutor() - - @pytest.fixture(autouse=True) -def disable_parallel_buildcache_push(monkeypatch): - """Disable process pools in tests.""" - monkeypatch.setattr(spack.util.parallel, "make_concurrent_executor", _sequential_executor) +def disable_parallelism(monkeypatch, request): + """Disable process pools in tests. Enabled by default to avoid oversubscription when running + under pytest-xdist. Can be overridden with `@pytest.mark.enable_parallelism`.""" + if "enable_parallelism" not in request.keywords: + monkeypatch.setattr(spack.util.parallel, "ENABLE_PARALLELISM", False) def _root_path(x, y, *, path): @@ -2236,12 +2364,12 @@ def _true(x): def _libc_from_python(self): - return spack.spec.Spec("glibc@=2.28") + return spack.spec.Spec("glibc@=2.28", external_path="/some/path") @pytest.fixture() def do_not_check_runtimes_on_reuse(monkeypatch): - monkeypatch.setattr(spack.solver.asp, "_has_runtime_dependencies", _true) + monkeypatch.setattr(spack.solver.reuse, "_has_runtime_dependencies", _true) @pytest.fixture(autouse=True, scope="session") @@ -2285,6 +2413,9 @@ def with_json(cls, status, reason, headers=None, body=None): body = io.BytesIO(json.dumps(body).encode("utf-8")) return cls(status, reason, headers, body) + def readable(self): + return True + def read(self, *args, **kwargs): return self._body.read(*args, **kwargs) @@ -2363,7 +2494,7 @@ def skip_provenance_check(monkeypatch, request): @pytest.mark.require_provenance decorator """ if "require_provenance" not in request.keywords: - monkeypatch.setattr(spack.package_base.PackageBase, "resolve_binary_provenance", _noop) + monkeypatch.setattr(spack.package_base.PackageBase, "_resolve_git_provenance", _noop) @pytest.fixture(scope="function") @@ -2387,3 +2518,38 @@ def config_two_gccs(mutable_config): }, ], ) + + +@pytest.fixture(scope="function") +def mock_util_executable(monkeypatch): + logger = [] + should_fail = [] + registered_reponses = {} + + def mock_call(self, *args, **kwargs): + cmd = self.exe + list(args) + str_cmd = " ".join(map(str, cmd)) + logger.append(str_cmd) + for failure_key in should_fail: + if failure_key in str_cmd: + self.returncode = 1 + if kwargs.get("fail_on_error", True): + raise spack.util.executable.ProcessError(f"Failed: {str_cmd}") + return + for key, value in registered_reponses.items(): + if key in str_cmd: + return value + self.returncode = 0 + + monkeypatch.setattr(spack.util.executable.Executable, "__call__", mock_call) + yield logger, should_fail, registered_reponses + + +@pytest.fixture() +def reset_extension_paths(): + """Clears the cache used for entry points, both in setup and tear-down. + Needed if a test stresses parts related to computing paths for Spack extensions + """ + spack.extensions.extension_paths_from_entry_points.cache_clear() + yield + spack.extensions.extension_paths_from_entry_points.cache_clear() diff --git a/lib/spack/spack/test/container/cli.py b/lib/spack/spack/test/container/cli.py index 73221a2e0bfe3e..3b7f970908a0e3 100644 --- a/lib/spack/spack/test/container/cli.py +++ b/lib/spack/spack/test/container/cli.py @@ -10,10 +10,9 @@ containerize = spack.main.SpackCommand("containerize") -def test_command(default_config, container_config_dir, capsys): - with capsys.disabled(): - with fs.working_dir(container_config_dir): - output = containerize() +def test_command(default_config, container_config_dir): + with fs.working_dir(container_config_dir): + output = containerize() assert "FROM spack/ubuntu-jammy" in output @@ -26,16 +25,15 @@ def test_listing_possible_os(): @pytest.mark.maybeslow @pytest.mark.requires_executables("git") -def test_bootstrap_phase(minimal_configuration, config_dumper, capsys): +def test_bootstrap_phase(minimal_configuration, config_dumper): minimal_configuration["spack"]["container"]["images"] = { "os": "amazonlinux:2", "spack": {"resolve_sha": False}, } spack_yaml_dir = config_dumper(minimal_configuration) - with capsys.disabled(): - with fs.working_dir(spack_yaml_dir): - output = containerize() + with fs.working_dir(spack_yaml_dir): + output = containerize() # Check for the presence of the Git commands assert "git init" in output diff --git a/lib/spack/spack/test/container/images.py b/lib/spack/spack/test/container/images.py index 0a2836c17b86d0..7feaef2dd5c8d2 100644 --- a/lib/spack/spack/test/container/images.py +++ b/lib/spack/spack/test/container/images.py @@ -36,7 +36,7 @@ def test_package_info(image): ({"modules": {"enable": ["tcl"]}}, 'the subsection "modules" in'), ({"concretizer": {"unify": False}}, '"concretizer:unify" is not set to "true"'), ( - {"config": {"install_tree": "/some/dir"}}, + {"config": {"install_tree": {"root": "/some/dir"}}}, 'the "config:install_tree" attribute has been set', ), ({"view": "/some/dir"}, 'the "view" attribute has been set'), diff --git a/lib/spack/spack/test/cray_manifest.py b/lib/spack/spack/test/cray_manifest.py index 30fa776ccb3fbc..61ec79fbcd6bbc 100644 --- a/lib/spack/spack/test/cray_manifest.py +++ b/lib/spack/spack/test/cray_manifest.py @@ -15,15 +15,14 @@ import spack.vendor.archspec.cpu -import spack import spack.cmd import spack.cmd.external import spack.compilers.config import spack.concretize -import spack.cray_manifest as cray_manifest +import spack.cray_manifest import spack.platforms import spack.platforms.test -import spack.solver.asp +import spack.solver.reuse import spack.spec import spack.store from spack.cray_manifest import compiler_from_entry, entries_to_specs @@ -283,7 +282,7 @@ def the_host_is_linux(): [("nvidia", "nvhpc"), ("rocm", "llvm-amdgpu"), ("clang", "llvm")], ) def test_translated_compiler_name(name_in_manifest, expected_name): - assert cray_manifest.translated_compiler_name(name_in_manifest) == expected_name + assert spack.cray_manifest.translated_compiler_name(name_in_manifest) == expected_name def test_failed_translate_compiler_name(_common_arch): @@ -325,7 +324,7 @@ def test_read_cray_manifest(temporary_store, manifest_file): """Check that (a) we can read the cray manifest and add it to the Spack Database and (b) we can concretize specs based on that. """ - cray_manifest.read(str(manifest_file), True) + spack.cray_manifest.read(str(manifest_file), True) query_specs = temporary_store.db.query("openmpi") assert any(x.dag_hash() == "openmpifakehasha" for x in query_specs) @@ -342,9 +341,9 @@ def _mock(entry, *, manifest_path): raise RuntimeError("cannot determine the compiler") return spack.spec.Spec(f"{entry['name']}@{entry['version']}") - monkeypatch.setattr(cray_manifest, "compiler_from_entry", _mock) + monkeypatch.setattr(spack.cray_manifest, "compiler_from_entry", _mock) - cray_manifest.read(str(manifest_file), True) + spack.cray_manifest.read(str(manifest_file), True) query_specs = spack.store.STORE.db.query("openmpi") assert any(x.dag_hash() == "openmpifakehasha" for x in query_specs) @@ -355,11 +354,11 @@ def test_read_cray_manifest_twice_no_duplicates( def _mock(entry, *, manifest_path): return spack.spec.Spec(f"{entry['name']}@{entry['version']}", external_path=str(tmp_path)) - monkeypatch.setattr(cray_manifest, "compiler_from_entry", _mock) + monkeypatch.setattr(spack.cray_manifest, "compiler_from_entry", _mock) # Read the manifest twice - cray_manifest.read(str(manifest_file), True) - cray_manifest.read(str(manifest_file), True) + spack.cray_manifest.read(str(manifest_file), True) + spack.cray_manifest.read(str(manifest_file), True) config_data = mutable_config.get("packages")["gcc"] assert "externals" in config_data @@ -385,7 +384,7 @@ def test_read_old_manifest_v1_2(tmp_path: pathlib.Path, temporary_store): } """ ) - cray_manifest.read(str(manifest), True) + spack.cray_manifest.read(str(manifest), True) def test_convert_validation_error( @@ -401,8 +400,8 @@ def test_convert_validation_error( { """ ) - with pytest.raises(cray_manifest.ManifestValidationError) as e: - cray_manifest.read(invalid_json_path, True) + with pytest.raises(spack.cray_manifest.ManifestValidationError) as e: + spack.cray_manifest.read(invalid_json_path, True) str(e) # Valid JSON, but does not conform to schema (schema-version is not a string @@ -421,8 +420,8 @@ def test_convert_validation_error( } """ ) - with pytest.raises(cray_manifest.ManifestValidationError) as e: - cray_manifest.read(invalid_schema_path, True) + with pytest.raises(spack.cray_manifest.ManifestValidationError) as e: + spack.cray_manifest.read(invalid_schema_path, True) @pytest.fixture @@ -450,13 +449,13 @@ def test_find_external_nonempty_default_manifest_dir( def test_reusable_externals_cray_manifest(temporary_store, manifest_file): """The concretizer should be able to reuse specs imported from a manifest without a externals config entry in packages.yaml""" - cray_manifest.read(path=str(manifest_file), apply_updates=True) + spack.cray_manifest.read(path=str(manifest_file), apply_updates=True) # Get any imported spec spec = temporary_store.db.query_local()[0] # Reusable if imported locally - assert spack.solver.asp._is_reusable(spec, packages={}, local=True) + assert spack.solver.reuse._is_reusable(spec, packages_with_externals={}, local=True) # If cray manifest entries end up in a build cache somehow, they are not reusable - assert not spack.solver.asp._is_reusable(spec, packages={}, local=False) + assert not spack.solver.reuse._is_reusable(spec, packages_with_externals={}, local=False) diff --git a/lib/spack/spack/test/data/config/base/config.yaml b/lib/spack/spack/test/data/config/base/config.yaml new file mode 100644 index 00000000000000..044bdf10c4ca9d --- /dev/null +++ b/lib/spack/spack/test/data/config/base/config.yaml @@ -0,0 +1,3 @@ +# This file is here strictly so that the base include directory will work +config: + dirty: false diff --git a/lib/spack/spack/test/data/config/bootstrap.yaml b/lib/spack/spack/test/data/config/bootstrap.yaml index 43c4405350acf8..c9c5e1d5949c18 100644 --- a/lib/spack/spack/test/data/config/bootstrap.yaml +++ b/lib/spack/spack/test/data/config/bootstrap.yaml @@ -1,5 +1,5 @@ bootstrap: sources: - name: 'github-actions' - metadata: $spack/share/spack/bootstrap/github-actions-v0.6 + metadata: $spack/share/spack/bootstrap/github-actions-v2 trusted: {} diff --git a/lib/spack/spack/test/data/config/concretizer.yaml b/lib/spack/spack/test/data/config/concretizer.yaml index a89a42a90660d0..a1a30ff0280bde 100644 --- a/lib/spack/spack/test/data/config/concretizer.yaml +++ b/lib/spack/spack/test/data/config/concretizer.yaml @@ -5,3 +5,5 @@ concretizer: host_compatible: false duplicates: strategy: minimal + concretization_cache: + enable: false diff --git a/lib/spack/spack/test/data/config/config.yaml b/lib/spack/spack/test/data/config/config.yaml index fc50b4b7c02f0b..e6867adb3db9b2 100644 --- a/lib/spack/spack/test/data/config/config.yaml +++ b/lib/spack/spack/test/data/config/config.yaml @@ -14,5 +14,3 @@ config: checksum: true dirty: false locks: {1} - concretization_cache: - enable: false diff --git a/lib/spack/spack/test/data/config/include.yaml b/lib/spack/spack/test/data/config/include.yaml new file mode 100644 index 00000000000000..8c794d698f1b35 --- /dev/null +++ b/lib/spack/spack/test/data/config/include.yaml @@ -0,0 +1,2 @@ +include: + - path: base diff --git a/lib/spack/spack/test/data/config/packages.yaml b/lib/spack/spack/test/data/config/packages.yaml index ecec68eb0433e5..989425e07f551f 100644 --- a/lib/spack/spack/test/data/config/packages.yaml +++ b/lib/spack/spack/test/data/config/packages.yaml @@ -60,19 +60,19 @@ packages: # Compilers gcc: externals: - - spec: "gcc@9.4.0 languages='c,c++' os={linux_os.name}{linux_os.version} target={target}" + - spec: "gcc@9.4.0 languages='c,c++' os={linux_os.name}{linux_os.version}" prefix: /path extra_attributes: compilers: c: /path/bin/gcc cxx: /path/bin/g++ - - spec: "gcc@9.4.0 languages='c,c++' os=redhat6 target={target}" + - spec: "gcc@9.4.1 languages='c,c++' os=redhat6" prefix: /path extra_attributes: compilers: c: /path/bin/gcc cxx: /path/bin/g++ - - spec: "gcc@10.2.1 languages='c,c++,fortran' os={linux_os.name}{linux_os.version} target={target}" + - spec: "gcc@10.2.1 languages='c,c++,fortran' os={linux_os.name}{linux_os.version}" prefix: /path extra_attributes: compilers: @@ -81,9 +81,11 @@ packages: fortran: /path/bin/gfortran-10 llvm: externals: - - spec: "llvm@15.0.0 +clang~flang os={linux_os.name}{linux_os.version} target={target}" + - spec: "llvm@15.0.0 +clang~flang os={linux_os.name}{linux_os.version}" prefix: /path extra_attributes: compilers: c: /path/bin/clang cxx: /path/bin/clang++ + glibc: + buildable: false diff --git a/lib/spack/spack/test/data/conftest/diff-test/package-3.txt b/lib/spack/spack/test/data/conftest/diff-test/package-3.txt new file mode 100644 index 00000000000000..ba0b16d09e8721 --- /dev/null +++ b/lib/spack/spack/test/data/conftest/diff-test/package-3.txt @@ -0,0 +1,24 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +from spack.package_base import PackageBase +from spack.package import * + + +class DiffTest(PackageBase): + """zlib replacement with optimizations for next generation systems.""" + + homepage = "https://github.com/zlib-ng/zlib-ng" + url = "https://github.com/zlib-ng/zlib-ng/archive/2.0.0.tar.gz" + git = "https://github.com/zlib-ng/zlib-ng.git" + + license("Zlib") + + version("2.1.8", sha256="59e68f67cbb16999842daeb517cdd86fc25b177b4affd335cd72b76ddc2a46d8") + version("2.1.7", sha256="59e68f67cbb16999842daeb517cdd86fc25b177b4affd335cd72b76ddc2a46d8") + version("2.1.6", tag="2.1.6", commit="74253725f884e2424a0dd8ae3f69896d5377f325") + version("2.1.5", sha256="3f6576971397b379d4205ae5451ff5a68edf6c103b2f03c4188ed7075fbb5f04") + version("2.1.4", sha256="a0293475e6a44a3f6c045229fe50f69dc0eebc62a42405a51f19d46a5541e77a") + version("2.0.7", sha256="6c0853bb27738b811f2b4d4af095323c3d5ce36ceed6b50e5f773204fb8f7200") + version("2.0.0", sha256="86993903527d9b12fc543335c19c1d33a93797b3d4d37648b5addae83679ecd8") diff --git a/lib/spack/spack/test/data/jsonschema_meta.json b/lib/spack/spack/test/data/jsonschema_meta.json deleted file mode 100644 index bcbb84743e3838..00000000000000 --- a/lib/spack/spack/test/data/jsonschema_meta.json +++ /dev/null @@ -1,149 +0,0 @@ -{ - "id": "http://json-schema.org/draft-04/schema#", - "$schema": "http://json-schema.org/draft-04/schema#", - "description": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "positiveInteger": { - "type": "integer", - "minimum": 0 - }, - "positiveIntegerDefault0": { - "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] - }, - "simpleTypes": { - "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "minItems": 1, - "uniqueItems": true - } - }, - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "$schema": { - "type": "string" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "multipleOf": { - "type": "number", - "minimum": 0, - "exclusiveMinimum": true - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "boolean", - "default": false - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "boolean", - "default": false - }, - "maxLength": { "$ref": "#/definitions/positiveInteger" }, - "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "#/definitions/positiveInteger" }, - "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxProperties": { "$ref": "#/definitions/positiveInteger" }, - "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "enum": { - "type": "array", - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "format": { "type": "string" }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "dependencies": { - "exclusiveMaximum": [ "maximum" ], - "exclusiveMinimum": [ "minimum" ] - }, - "default": {} -} diff --git a/lib/spack/spack/test/data/sourceme_first.bat b/lib/spack/spack/test/data/sourceme_first.bat new file mode 100644 index 00000000000000..bf7f6d80767ed6 --- /dev/null +++ b/lib/spack/spack/test/data/sourceme_first.bat @@ -0,0 +1,10 @@ +@echo off +rem C:\lib\spack\spack\test\data +rem +rem Copyright 2013-2024 Lawrence Livermore National Security, LLC and other +rem Spack Project Developers. See the top-level COPYRIGHT file for details. +rem +rem SPDX-License-Identifier: (Apache-2.0 OR MIT) + +set NEW_VAR=new +set UNSET_ME=overridden diff --git a/lib/spack/spack/test/data/sourceme_modules.bat b/lib/spack/spack/test/data/sourceme_modules.bat new file mode 100644 index 00000000000000..ad87d909d714a1 --- /dev/null +++ b/lib/spack/spack/test/data/sourceme_modules.bat @@ -0,0 +1,27 @@ +@echo off +setlocal + +rem C:\lib\spack\spack\test\data +rem +rem Copyright 2013-2024 Lawrence Livermore National Security, LLC and other +rem Spack Project Developers. See the top-level COPYRIGHT file for details. +rem +rem SPDX-License-Identifier: (Apache-2.0 OR MIT) + +:_module_raw val_1 + +exit /b 0 + +:module +exit /b 0 + +:ml +exit /b 0 + +set "_module_raw=call :_module_raw" +set "mod=call :mod" +set "ml=call :ml" + +set MODULES_AUTO_HANDLING=1 +set __MODULES_LMCONFLICT=bar^&foo +set NEW_VAR=new diff --git a/lib/spack/spack/test/data/sourceme_parameters.bat b/lib/spack/spack/test/data/sourceme_parameters.bat new file mode 100644 index 00000000000000..515814667ee612 --- /dev/null +++ b/lib/spack/spack/test/data/sourceme_parameters.bat @@ -0,0 +1,13 @@ +@echo off +rem "C:\lib\spack\spack\test\data +rem +rem Copyright 2013-2024 Lawrence Livermore National Security, LLC and other +rem Spack Project Developers. See the top-level COPYRIGHT file for details. +rem +rem SPDX-License-Identifier: (Apache-2.0 OR MIT) + +if "%1" == "intel64" ( + set FOO=intel64 +) else ( + set FOO=default +) diff --git a/lib/spack/spack/test/data/sourceme_second.bat b/lib/spack/spack/test/data/sourceme_second.bat new file mode 100644 index 00000000000000..013ccd34d659ac --- /dev/null +++ b/lib/spack/spack/test/data/sourceme_second.bat @@ -0,0 +1,10 @@ +@echo off +rem "C:\lib\spack\spack\test\data +rem +rem Copyright 2013-2024 Lawrence Livermore National Security, LLC and other +rem Spack Project Developers. See the top-level COPYRIGHT file for details. +rem +rem SPDX-License-Identifier: (Apache-2.0 OR MIT) + +set PATH_LIST=C:\path\first;C:\path\second;C:\path\fourth +set EMPTY_PATH_LIST= diff --git a/lib/spack/spack/test/data/sourceme_unicode.bat b/lib/spack/spack/test/data/sourceme_unicode.bat new file mode 100644 index 00000000000000..1189bbea87eaa0 --- /dev/null +++ b/lib/spack/spack/test/data/sourceme_unicode.bat @@ -0,0 +1,19 @@ +@echo off +rem "C:\lib\spack\spack\test\data +rem +rem Copyright 2013-2024 Lawrence Livermore National Security, LLC and other +rem Spack Project Developers. See the top-level COPYRIGHT file for details. +rem +rem SPDX-License-Identifier: (Apache-2.0 OR MIT) + + + +rem Set an environment variable with some unicode in it to ensure that +rem Spack can decode it. +rem +rem This has caused squashed commits on develop to break, as some +rem committers use unicode in their messages, and Travis sets the +rem current commit message in an environment variable. +chcp 65001 > nul +set UNICODE_VAR=don\xe2\x80\x99t +chcp 437 > nul diff --git a/lib/spack/spack/test/data/sourceme_unset.bat b/lib/spack/spack/test/data/sourceme_unset.bat new file mode 100644 index 00000000000000..56965c16064614 --- /dev/null +++ b/lib/spack/spack/test/data/sourceme_unset.bat @@ -0,0 +1,9 @@ +@echo off +rem C:\lib\spack\spack\test\data +rem +rem Copyright 2013-2024 Lawrence Livermore National Security, LLC and other +rem Spack Project Developers. See the top-level COPYRIGHT file for details. +rem +rem SPDX-License-Identifier: (Apache-2.0 OR MIT) + +set UNSET_ME= diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py index 3227d9e1a7b1aa..7e208f6fb736b0 100644 --- a/lib/spack/spack/test/database.py +++ b/lib/spack/spack/test/database.py @@ -243,7 +243,7 @@ def fail_for_z(spec): def test_removed_upstream_dep( - upstream_and_downstream_db, capsys, config, repo_builder: RepoBuilder + upstream_and_downstream_db, capfd, config, repo_builder: RepoBuilder ): upstream_db, downstream_db = upstream_and_downstream_db @@ -269,7 +269,7 @@ def test_removed_upstream_dep( downstream_db._read_from_file(downstream_db._index_path) assert ( f"Missing dependency not in database: y/{y.dag_hash(7)} needs z" - in capsys.readouterr().err + in capfd.readouterr().err ) @@ -838,13 +838,13 @@ def test_regression_issue_8036(mutable_database, usr_folder_exists): @pytest.mark.regression("11118") -def test_old_external_entries_prefix(mutable_database): +def test_old_external_entries_prefix(mutable_database: spack.database.Database): with open(spack.store.STORE.db._index_path, "r", encoding="utf-8") as f: db_obj = json.loads(f.read()) spack.vendor.jsonschema.validate(db_obj, schema) - s = spack.concretize.concretize_one("externaltool") + s, *_ = mutable_database.query("externaltool") db_obj["database"]["installs"][s.dag_hash()]["path"] = "None" @@ -855,7 +855,7 @@ def test_old_external_entries_prefix(mutable_database): f.write(str(uuid.uuid4())) record = spack.store.STORE.db.get_record(s) - + assert record is not None assert record.path is None assert record.spec._prefix is None assert record.spec.prefix == record.spec.external_path @@ -989,7 +989,7 @@ def _is(self, spec): @pytest.mark.db -def test_mark_failed(mutable_database, monkeypatch, tmp_path: pathlib.Path, capsys): +def test_mark_failed(mutable_database, monkeypatch, tmp_path: pathlib.Path, capfd): """Add coverage to mark_failed.""" def _raise_exc(lock): @@ -1002,7 +1002,7 @@ def _raise_exc(lock): monkeypatch.setattr(lk.Lock, "acquire_write", _raise_exc) spack.store.STORE.failure_tracker.mark(s) - out = str(capsys.readouterr()[1]) + out = str(capfd.readouterr()[1]) assert "Unable to mark pkg-a as failed" in out spack.store.STORE.failure_tracker.clear_all() diff --git a/lib/spack/spack/test/directives.py b/lib/spack/spack/test/directives.py index 8cb686c26f51e6..8a129140ad95e6 100644 --- a/lib/spack/spack/test/directives.py +++ b/lib/spack/spack/test/directives.py @@ -84,7 +84,7 @@ def test_conditionally_extends_direct_dep(config, mock_packages): def test_error_on_anonymous_dependency(config, mock_packages): pkg = spack.repo.PATH.get_pkg_class("pkg-a") with pytest.raises(spack.directives.DependencyError): - spack.directives._depends_on(pkg, spack.spec.Spec("@4.5")) + spack.directives._execute_depends_on(pkg, spack.spec.Spec("@4.5")) @pytest.mark.regression("34879") @@ -151,11 +151,11 @@ def test_version_type_validation(): # Pass a float with pytest.raises(spack.version.VersionError, match=msg): - spack.directives._execute_version(package(name="python"), 3.10) + spack.directives._execute_version(package(name="python"), ver=3.10, kwargs={}) # Try passing a bogus type; it's just that we want a nice error message with pytest.raises(spack.version.VersionError, match=msg): - spack.directives._execute_version(package(name="python"), {}) + spack.directives._execute_version(package(name="python"), ver={}, kwargs={}) @pytest.mark.parametrize( @@ -196,3 +196,19 @@ class MockPackage: spack.directives._execute_redistribute(cls, source=None, binary=False, when="@1.0") assert cls.disable_redistribute[spec_key].binary assert cls.disable_redistribute[spec_key].source + + +@pytest.mark.regression("51248") +def test_direct_dependencies_from_when_context_are_retained(mock_packages): + """Tests that direct dependencies from the "when" context manager don't lose the "direct" + attribute when turned into directives on the package class. + """ + pkg_cls = spack.repo.PATH.get_pkg_class("with-constraint-met") + # Direct dependency in a "when" single context manager + assert spack.spec.Spec("%pkg-b") in pkg_cls.dependencies + # Direct dependency in a "when" nested context manager + assert spack.spec.Spec("@2 %c=gcc %pkg-c %pkg-b@:4.0") in pkg_cls.dependencies + # Nested ^foo followed by %foo + assert spack.spec.Spec("%pkg-c") in pkg_cls.dependencies + # Nested ^foo followed by ^foo %gcc + assert spack.spec.Spec("^pkg-c %gcc") in pkg_cls.dependencies diff --git a/lib/spack/spack/test/directory_layout.py b/lib/spack/spack/test/directory_layout.py index 63e60500c4c526..c74827f081dff1 100644 --- a/lib/spack/spack/test/directory_layout.py +++ b/lib/spack/spack/test/directory_layout.py @@ -15,7 +15,6 @@ import spack.hash_types import spack.paths import spack.repo -import spack.spec import spack.util.file_cache from spack.directory_layout import DirectoryLayout, InvalidDirectoryLayoutParametersError from spack.llnl.path import path_to_os_path diff --git a/lib/spack/spack/test/entry_points.py b/lib/spack/spack/test/entry_points.py index 1482782740dff3..b70b72c3c3f5e3 100644 --- a/lib/spack/spack/test/entry_points.py +++ b/lib/spack/spack/test/entry_points.py @@ -65,7 +65,7 @@ def entry_points(group=None): @pytest.fixture() -def mock_get_entry_points(tmp_path: pathlib.Path, monkeypatch): +def mock_get_entry_points(tmp_path: pathlib.Path, reset_extension_paths, monkeypatch): entry_points = entry_points_factory(tmp_path) monkeypatch.setattr(spack.llnl.util.lang, "get_entry_points", entry_points) diff --git a/lib/spack/spack/test/env.py b/lib/spack/spack/test/env.py index f94c69ae3a809a..1aed072fc4fd5d 100644 --- a/lib/spack/spack/test/env.py +++ b/lib/spack/spack/test/env.py @@ -37,6 +37,14 @@ def test_environment_dir_from_name(self, mutable_mock_env_path): with pytest.raises(ev.SpackEnvironmentError, match="environment already exists"): ev.environment_dir_from_name("test", exists_ok=False) + def test_environment_dir_from_nested_name(self, mutable_mock_env_path): + """Test the function mapping a nested managed environment name to its folder.""" + env = ev.create("group/test") + environment_dir = ev.environment_dir_from_name("group/test") + assert env.path == environment_dir + with pytest.raises(ev.SpackEnvironmentError, match="environment already exists"): + ev.environment_dir_from_name("group/test", exists_ok=False) + def test_hash_change_no_rehash_concrete(tmp_path: pathlib.Path, config): # create an environment @@ -132,7 +140,7 @@ def test_env_change_spec_in_matrix_raises_error(tmp_path: pathlib.Path, mutable_ e.concretize() e.write() - with pytest.raises(spack.environment.SpackEnvironmentError) as error: + with pytest.raises(ev.SpackEnvironmentError) as error: e.change_existing_spec(spack.spec.Spec("mpileaks@2.2")) assert "Cannot directly change specs in matrices" in str(error) @@ -924,6 +932,16 @@ def test_environment_from_name_or_dir(mutable_mock_env_path): assert dir_env.name == test_env.name assert dir_env.path == test_env.path + nested_test_env = ev.create("group/test") + + nested_name_env = ev.environment_from_name_or_dir(nested_test_env.name) + assert nested_name_env.name == nested_test_env.name + assert nested_name_env.path == nested_test_env.path + + nested_dir_env = ev.environment_from_name_or_dir(nested_test_env.path) + assert nested_dir_env.name == nested_test_env.name + assert nested_dir_env.path == nested_test_env.path + with pytest.raises(ev.SpackEnvironmentError, match="no such environment"): _ = ev.environment_from_name_or_dir("fake-env") @@ -1252,3 +1270,363 @@ def test_mixing_toolchains_in_an_input_spec(unify, tmp_path: pathlib.Path, mutab libelf = mpileaks["libelf"] assert libelf.satisfies("%[virtuals=c] gcc") # libelf only depends on c + + +def test_reuse_environment_dependencies(tmp_path: pathlib.Path, mutable_config): + """Tests reusing specs from a separate, and concrete, environment.""" + base = tmp_path / "base" + base.mkdir() + + # Concretize the first environment asking for a non-default spec. In this way we'll know + # that reuse from the derived environment is not accidental. + manifest_base = base / "spack.yaml" + manifest_base.write_text( + """ +spack: + specs: + - pkg-a@1.0 + packages: + pkg-b: + require: + - "@0.9" +""" + ) + with ev.Environment(base) as e: + e.concretize() + # We need the spack.lock for reuse in the derived environment + e.write(regenerate=False) + base_pkga = e.concrete_roots()[0] + + # Create a second environment, reuse from the previous one and check pkg-a is the same + derived = tmp_path / "derived" + derived.mkdir() + manifest_derived = derived / "spack.yaml" + manifest_derived.write_text( + f""" +spack: + specs: + - pkg-a + concretizer: + reuse: + from: + - type: environment + path: {base} +""" + ) + with ev.Environment(derived) as e: + e.concretize() + derived_pkga = e.concrete_roots()[0] + + assert base_pkga.dag_hash() == derived_pkga.dag_hash() + + +@pytest.mark.parametrize( + "spack_yaml", + [ + # Use a plain requirement for callpath + """ +spack: + specs: + - mpileaks %%c,cxx=gcc + - mpileaks %%c,cxx=llvm + packages: + callpath: + require: + - "%c=gcc" + concretizer: + unify: false +""", + # Propagate a toolchain + """ +spack: + specs: + - mpileaks %%c,cxx=gcc + - mpileaks %%llvm_toolchain + toolchains: + llvm_toolchain: + - spec: "%c=llvm" + when: "%c" + - spec: "%cxx=llvm" + when: "%cxx" + packages: + callpath: + require: + - "%c=gcc" + concretizer: + unify: false +""", + # Override callpath from input spec + """ +spack: + specs: + - mpileaks %%c,cxx=gcc ^callpath %c=gcc + - mpileaks %%llvm_toolchain ^callpath %c=gcc + toolchains: + llvm_toolchain: + - spec: "%c=llvm" + when: "%c" + - spec: "%cxx=llvm" + when: "%cxx" + concretizer: + unify: false +""", + ], +) +def test_dependency_propagation_in_environments(spack_yaml, tmp_path, mutable_config): + """Tests that we can enforce compiler preferences using %% in environments.""" + manifest = tmp_path / "spack.yaml" + manifest.write_text(spack_yaml) + with ev.Environment(tmp_path) as e: + e.concretize() + roots = e.concrete_roots() + + mpileaks_gcc = [s for s in roots if s.satisfies("mpileaks %c=gcc")][0] + for c in ("%[when=%c]c=gcc", "%[when=%cxx]cxx=gcc"): + assert all(x.satisfies(c) for x in mpileaks_gcc.traverse() if x.name != "callpath") + + mpileaks_llvm = [s for s in roots if s.satisfies("mpileaks %c=llvm")][0] + for c in ("%[when=%c]c=llvm", "%[when=%cxx]cxx=llvm"): + assert all(x.satisfies(c) for x in mpileaks_llvm.traverse() if x.name != "callpath") + + assert mpileaks_gcc["callpath"].satisfies("%c=gcc") + assert mpileaks_llvm["callpath"].satisfies("%c=gcc") + + +@pytest.mark.parametrize( + "spack_yaml,exception_nodes", + [ + # trilinos and its link/run subdag are compiled with clang, all other nodes use gcc + ( + """ +spack: + specs: + - trilinos %%c,cxx=clang + packages: + c: + prefer: + - gcc + cxx: + prefer: + - gcc +""", + set(), + ), + # callpath and its link/run subdag are compiled with clang, all other nodes use gcc + ( + """ +spack: + specs: + - trilinos ^callpath %%c,cxx=clang + packages: + c: + prefer: + - gcc + cxx: + prefer: + - gcc +""", + {"trilinos", "mpich", "py-numpy"}, + ), + # trilinos and its link/run subdag, with the exception of mpich, are compiled with clang. + # All other nodes use gcc. + ( + """ +spack: + specs: + - trilinos %%c,cxx=clang ^mpich %c=gcc + packages: + c: + prefer: + - gcc + cxx: + prefer: + - gcc +""", + {"mpich"}, + ), + ( + """ +spack: + specs: + - trilinos %%c,cxx=clang + packages: + c: + prefer: + - gcc + cxx: + prefer: + - gcc + mpich: + require: + - "%c=gcc" +""", + {"mpich"}, + ), + ], +) +def test_double_percent_semantics(spack_yaml, exception_nodes, tmp_path, mutable_config): + """Tests semantics of %% in environments, when combined with other features. + + The test assumes clang is the propagated compiler, and gcc is the preferred compiler. + """ + manifest = tmp_path / "spack.yaml" + manifest.write_text(spack_yaml) + with ev.Environment(tmp_path) as e: + e.concretize() + trilinos = e.concrete_roots()[0] + + runtime_nodes = [ + x for x in trilinos.traverse(deptype=("link", "run")) if x.name not in exception_nodes + ] + remaining_nodes = [x for x in trilinos.traverse() if x not in runtime_nodes] + + for x in runtime_nodes: + error_msg = f"\n{x.tree()} does not use clang while expected to" + assert x.satisfies("%[when=%c]c=clang %[when=%cxx]cxx=clang"), error_msg + + for x in remaining_nodes: + error_msg = f"\n{x.tree()} does not use gcc while expected to" + assert x.satisfies("%[when=%c]c=gcc %[when=%cxx]cxx=gcc"), error_msg + + +def test_cannot_use_double_percent_with_require(tmp_path, mutable_config): + """Tests that %% cannot be used with a requirement on languages, since they'll conflict.""" + # trilinos wants to use clang, but we require gcc, so Spack will error + spack_yaml = """ +spack: + specs: + - trilinos %%c,cxx=clang + packages: + c: + require: + - gcc + cxx: + require: + - gcc +""" + manifest = tmp_path / "spack.yaml" + manifest.write_text(spack_yaml) + with ev.Environment(tmp_path) as e: + with pytest.raises(spack.solver.asp.UnsatisfiableSpecError, match="failed to concretize"): + e.concretize() + + +@pytest.mark.parametrize( + "spack_yaml", + [ + # Specs with reuse on + """ +spack: + specs: + - trilinos + - mpileaks + concretizer: + reuse: true +""", + # Package with conditional dependency + """ +spack: + specs: + - ascent+adios2 + - fftw+mpi +""", + """ +spack: + specs: + - ascent~adios2 + - fftw~mpi +""", + """ +spack: + specs: + - ascent+adios2 + - fftw~mpi +""", + ], +) +def test_static_analysis_in_environments(spack_yaml, tmp_path, mutable_config): + """Tests that concretizations with and without static analysis produce the same results.""" + manifest = tmp_path / "spack.yaml" + manifest.write_text(spack_yaml) + with ev.Environment(tmp_path) as e: + e.concretize() + no_static_analysis = {x.dag_hash() for x in e.concrete_roots()} + + mutable_config.set("concretizer:static_analysis", True) + with ev.Environment(tmp_path) as e: + e.concretize() + static_analysis = {x.dag_hash() for x in e.concrete_roots()} + + assert no_static_analysis == static_analysis + + +@pytest.mark.regression("51606") +def test_ids_when_using_toolchain_twice_in_a_spec(tmp_path, mutable_config): + """Tests that using the same toolchain twice in a spec constructs different objects""" + spack_yaml = """ +spack: + toolchains: + llvmtc: + - spec: "%c=llvm" + when: "%c" + - spec: "%cxx=llvm" + when: "%cxx" + gnu: + - spec: "%c=gcc@10" + when: "%c" + - spec: "%cxx=gcc@10" + when: "%cxx" + # This is missing the conditional when= on purpose + - spec: "%fortran=gcc@10" +""" + manifest = tmp_path / "spack.yaml" + manifest.write_text(spack_yaml) + with ev.Environment(tmp_path): + # We rely on this behavior when emitting facts for the solver + s = spack.spec.Spec("mpileaks %gnu ^callpath %gnu") + assert id(s["gcc"]) != id(s["callpath"]["gcc"]) + + +def test_installed_specs_disregards_deprecation(tmp_path, mutable_config): + """Tests that installed specs disregard deprecation. This is to avoid weird ordering issues, + where an old version that _is not_ declared in package.py is considered as _not_ deprecated, + and is preferred to a newer version that is explicitly marked as deprecated. + """ + spack_yaml = """ +spack: + specs: + - mpileaks + packages: + c: + require: + - gcc + cxx: + require: + - gcc + gcc:: + externals: + - spec: gcc@7.3.1 languages:='c,c++,fortran' + prefix: /path + extra_attributes: + compilers: + c: /path/bin/gcc + cxx: /path/bin/g++ + fortran: /path/bin/gfortran + - spec: gcc@=12.4.0 languages:='c,c++,fortran' + prefix: /usr + extra_attributes: + compilers: + c: /usr/bin/gcc + cxx: /usr/bin/g++ + fortran: /usr/bin/gfortran +""" + manifest = tmp_path / "spack.yaml" + manifest.write_text(spack_yaml) + with ev.Environment(tmp_path) as e: + e.concretize() + mpileaks = e.concrete_roots()[0] + + for node in mpileaks.traverse(): + if node.satisfies("%c"): + assert node.satisfies("%c=gcc@12"), node.tree() + assert not node.satisfies("%c=gcc@7"), node.tree() diff --git a/lib/spack/spack/test/environment/mutate.py b/lib/spack/spack/test/environment/mutate.py new file mode 100644 index 00000000000000..bc4677ab635b44 --- /dev/null +++ b/lib/spack/spack/test/environment/mutate.py @@ -0,0 +1,156 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +import platform + +import pytest + +import spack.concretize +import spack.config +import spack.environment as ev +import spack.spec +from spack.main import SpackCommand + +pytestmark = [ + pytest.mark.usefixtures("mutable_config", "mutable_mock_env_path", "mutable_mock_repo"), + pytest.mark.not_on_windows("Envs unsupported on Windows"), +] + +# See lib/spack/spack/platforms/test.py for how targets are defined on the Test platform +test_targets = ("m1", "aarch64") if platform.machine() == "arm64" else ("core2", "x86_64") + +change = SpackCommand("change") + + +@pytest.mark.parametrize("dep", [True, False]) +@pytest.mark.parametrize( + "orig_constraint,mutated_constraint", + [ + ("@3.23.1", "@3.4.3"), + ("cflags=-O3", "cflags='-O0 -g'"), + ("os=debian6", "os=redhat6"), + (f"target={test_targets[0]}", f"target={test_targets[1]}"), + ("build_system=generic", "build_system=foo"), + ( + f"@3.4.3 cflags=-g os=debian6 target={test_targets[1]} build_system=generic", + f"@3.23.1 cflags=-O3 os=redhat6 target={test_targets[0]} build_system=foo", + ), + ], +) +def test_mutate_internals(dep, orig_constraint, mutated_constraint): + """ + Check that Environment.mutate and Spec.mutate work for several different constraint types. + + Includes check that environment.mutate rehashing gets the same answer as spec.mutate rehashing. + """ + ev.create("test") + env = ev.read("test") + + spack.config.set("packages:cmake", {"require": orig_constraint}) + + root_name = "cmake-client" if dep else "cmake" + env.add(root_name) + env.concretize() + + root_spec = next(env.roots()).copy() + cmake_spec = root_spec["cmake"] if dep else root_spec + orig_cmake_spec = cmake_spec.copy() + orig_hash = root_spec.dag_hash() + + for spec in env.all_specs_generator(): + if spec.name == "cmake": + assert spec.satisfies(orig_constraint) + + selector = spack.spec.Spec("cmake") + mutator = spack.spec.Spec(mutated_constraint) + env.mutate(selector=selector, mutator=mutator) + cmake_spec.mutate(mutator) + + for spec in env.all_specs_generator(): + if spec.name == "cmake": + assert spec.satisfies(mutated_constraint) + assert cmake_spec.satisfies(mutated_constraint) + + # Make sure that we're not changing variant types single/multi + for name, variant in cmake_spec.variants.items(): + assert variant.type == orig_cmake_spec.variants[name].type + + new_hash = next(env.roots()).dag_hash() + assert new_hash != orig_hash + assert root_spec.dag_hash() != orig_hash + assert root_spec.dag_hash() == new_hash + + +@pytest.mark.parametrize("constraint", ["foo", "foo.bar", "foo%cmake@1.0", "foo@1.1:", "foo/abc"]) +def test_mutate_spec_invalid(constraint): + spec = spack.concretize.concretize_one("cmake-client") + with pytest.raises(spack.spec.SpecMutationError): + spec.mutate(spack.spec.Spec(constraint)) + + +def _test_mutate_from_cli(args, create=True): + if create: + ev.create("test") + + env = ev.read("test") + + if create: + env.add("cmake-client%cmake@3.4.3") + env.add("cmake-client%cmake@3.23.1") + env.concretize() + env.write() + + with env: + change(*args) + + return list(env.roots()) + + +def test_mutate_from_cli(): + match_spec = "%cmake@3.4.3" + constraint = "@3.0" + args = ["--concrete", f"--match-spec={match_spec}", constraint] + roots = _test_mutate_from_cli(args) + + assert any(r.satisfies(match_spec) for r in roots) + for root in roots: + if root.satisfies("match_spec"): + assert root.satisfies(constraint) + + +def test_mutate_from_cli_multiple(): + match_spec = "%cmake@3.4.3" + constraint1 = "@3.0" + constraint2 = "build_system=foo" + args = ["--concrete", f"--match-spec={match_spec}", constraint1, constraint2] + roots = _test_mutate_from_cli(args) + + assert any(r.satisfies(match_spec) for r in roots) + for root in roots: + if root.satisfies("match_spec"): + assert root.satisfies(constraint1) + assert root.satisfies(constraint2) + + +def test_mutate_from_cli_no_abstract(): + match_spec = "cmake" + constraint = "@3.0" + args = ["--concrete", f"--match-spec={match_spec}", constraint] + + with pytest.raises(ValueError, match="Cannot change abstract spec"): + _ = _test_mutate_from_cli(args) + + args = ["--concrete-only"] + args[1:] + roots = _test_mutate_from_cli(args, create=False) + + for root in roots: + assert root[match_spec].satisfies(constraint) + + +def test_mutate_from_cli_all_no_match_spec(): + constraint = "cmake-client@3.0" + args = ["--concrete", "--all", constraint] + roots = _test_mutate_from_cli(args) + + for root in roots: + assert root.satisfies(constraint) diff --git a/lib/spack/spack/test/environment_modifications.py b/lib/spack/spack/test/environment_modifications.py index 8034cadba07333..23e23f9d553988 100644 --- a/lib/spack/spack/test/environment_modifications.py +++ b/lib/spack/spack/test/environment_modifications.py @@ -4,6 +4,7 @@ import os import pathlib +import sys import pytest @@ -22,6 +23,45 @@ datadir = os.path.join(spack_root, "lib", "spack", "spack", "test", "data") +shell_extension = ".bat" if sys.platform == "win32" else ".sh" + +# Returns a list of paths, including system ones +miscellaneous_unix_sys_paths = [ + "/usr/include", + "/usr/local/lib", + "/usr/local", + "/usr/local/include", + "/usr/local/lib64", + "/usr/local/../bin", + "/lib", + "/", + "/usr", + "/usr/", + "/usr/bin", + "/bin64", + "/lib64", + "/include", + "/include/", +] + +miscellaneous_win_sys_paths = [ + "C:\\Users", + "C:\\", + "C:\\Program Files", + "C:\\Program Files (x86)", + "C:\\ProgramData", +] + +miscellaneous_win_paths = ["C:\\dev\\spack_window"] + +miscellaneous_unix_paths = [ + "/usr/local/Cellar/gcc/5.3.0/lib", + "/usr/local/opt/some-package/lib", + "/usr/opt/lib", + "/opt/some-package/include", + "/opt/some-package/local/..", +] + def test_inspect_path(tmp_path: pathlib.Path): inspections = { @@ -62,16 +102,54 @@ def test_exclude_paths_from_inspection(): assert len(env) == 0 +def make_path(*path): + """Joins given components a,b,c... to form a valid absolute + path for the current host OS being tested. + Created path does not necessarily exist on system. + """ + abs_path_prefix = "C:\\" if sys.platform == "win32" else "/" + return os.path.join(abs_path_prefix, *path) + + +def make_pathlist(paths): + """Makes a fake list of platform specific paths""" + return os.pathsep.join( + [make_path(*path) if isinstance(path, list) else make_path(path) for path in paths] + ) + + +@pytest.fixture +def system_paths_for_os(): + return miscellaneous_win_sys_paths if sys.platform == "win32" else miscellaneous_unix_sys_paths + + +@pytest.fixture +def non_system_paths_for_os(): + return miscellaneous_win_paths if sys.platform == "win32" else miscellaneous_unix_paths + + @pytest.fixture() -def prepare_environment_for_tests(working_env): +def prepare_environment_for_tests(working_env, system_paths_for_os): """Sets a few dummy variables in the current environment, that will be useful for the tests below. """ os.environ["UNSET_ME"] = "foo" os.environ["EMPTY_PATH_LIST"] = "" - os.environ["PATH_LIST"] = "/path/second:/path/third" - os.environ["REMOVE_PATH_LIST"] = "/a/b:/duplicate:/a/c:/remove/this:/a/d:/duplicate/:/f/g" - os.environ["PATH_LIST_WITH_SYSTEM_PATHS"] = "/usr/include:" + os.environ["REMOVE_PATH_LIST"] + os.environ["PATH_LIST"] = make_pathlist([["path", "second"], ["path", "third"]]) + os.environ["REMOVE_PATH_LIST"] = make_pathlist( + [ + ["a", "b"], + ["duplicate"], + ["a", "c"], + ["remove", "this"], + ["a", "d"], + ["duplicate"], + ["f", "g"], + ] + ) + # grab arbitrary system path + sys_path = system_paths_for_os[0] + os.pathsep + os.environ["PATH_LIST_WITH_SYSTEM_PATHS"] = sys_path + os.environ["REMOVE_PATH_LIST"] os.environ["PATH_LIST_WITH_DUPLICATES"] = os.environ["REMOVE_PATH_LIST"] @@ -81,40 +159,14 @@ def env(prepare_environment_for_tests): return EnvironmentModifications() -@pytest.fixture -def miscellaneous_paths(): - """Returns a list of paths, including system ones.""" - return [ - "/usr/local/Cellar/gcc/5.3.0/lib", - "/usr/local/lib", - "/usr/local", - "/usr/local/include", - "/usr/local/lib64", - "/usr/local/opt/some-package/lib", - "/usr/opt/lib", - "/usr/local/../bin", - "/lib", - "/", - "/usr", - "/usr/", - "/usr/bin", - "/bin64", - "/lib64", - "/include", - "/include/", - "/opt/some-package/include", - "/opt/some-package/local/..", - ] - - @pytest.fixture def files_to_be_sourced(): """Returns a list of files to be sourced""" return [ - os.path.join(datadir, "sourceme_first.sh"), - os.path.join(datadir, "sourceme_second.sh"), - os.path.join(datadir, "sourceme_parameters.sh"), - os.path.join(datadir, "sourceme_unicode.sh"), + os.path.join(datadir, "sourceme_first" + shell_extension), + os.path.join(datadir, "sourceme_second" + shell_extension), + os.path.join(datadir, "sourceme_parameters" + shell_extension), + os.path.join(datadir, "sourceme_unicode" + shell_extension), ] @@ -158,75 +210,75 @@ def test_unset(env): os.environ["UNSET_ME"] -@pytest.mark.not_on_windows("Not supported on Windows (yet)") -def test_filter_system_paths(miscellaneous_paths): +def test_filter_system_paths(system_paths_for_os, non_system_paths_for_os): """Tests that the filtering of system paths works as expected.""" - filtered = filter_system_paths(miscellaneous_paths) - expected = [ - "/usr/local/Cellar/gcc/5.3.0/lib", - "/usr/local/opt/some-package/lib", - "/usr/opt/lib", - "/opt/some-package/include", - "/opt/some-package/local/..", - ] - assert filtered == expected + filtered = filter_system_paths(system_paths_for_os + non_system_paths_for_os) + assert filtered == non_system_paths_for_os -# TODO 27021 -@pytest.mark.not_on_windows("Not supported on Windows (yet)") def test_set_path(env): """Tests setting paths in an environment variable.""" - - # Check setting paths with the default separator - env.set_path("A", ["foo", "bar", "baz"]) + name = "A" + elements = ["foo", "bar", "baz"] + # Check setting paths with a specific separator + env.set_path(name, elements, separator=os.pathsep) env.apply_modifications() - assert "foo:bar:baz" == os.environ["A"] - - env.set_path("B", ["foo", "bar", "baz"], separator=";") - env.apply_modifications() - - assert "foo;bar;baz" == os.environ["B"] + expected = os.pathsep.join(elements) + assert expected == os.environ[name] -@pytest.mark.not_on_windows("Not supported on Windows (yet)") def test_path_manipulation(env): """Tests manipulating list of paths in the environment.""" + env.prepend_path("PATH_LIST", make_path("path", "first")) + env.append_path("PATH_LIST", make_path("path", "fourth")) + env.append_path("PATH_LIST", make_path("path", "last")) - env.append_path("PATH_LIST", "/path/last") - env.prepend_path("PATH_LIST", "/path/first") - - env.append_path("EMPTY_PATH_LIST", "/path/middle") - env.append_path("EMPTY_PATH_LIST", "/path/last") - env.prepend_path("EMPTY_PATH_LIST", "/path/first") - - env.append_path("NEWLY_CREATED_PATH_LIST", "/path/middle") - env.append_path("NEWLY_CREATED_PATH_LIST", "/path/last") - env.prepend_path("NEWLY_CREATED_PATH_LIST", "/path/first") + env.remove_path("REMOVE_PATH_LIST", make_path("remove", "this")) + env.remove_path("REMOVE_PATH_LIST", make_path("duplicate") + os.sep) - env.remove_path("REMOVE_PATH_LIST", "/remove/this") - env.remove_path("REMOVE_PATH_LIST", "/duplicate/") - - env.deprioritize_system_paths("PATH_LIST_WITH_SYSTEM_PATHS") env.prune_duplicate_paths("PATH_LIST_WITH_DUPLICATES") env.apply_modifications() - expected = "/path/first:/path/second:/path/third:/path/last" - assert os.environ["PATH_LIST"] == expected - - expected = "/path/first:/path/middle:/path/last" - assert os.environ["EMPTY_PATH_LIST"] == expected + assert os.environ["PATH_LIST"] == make_pathlist( + [ + ["path", "first"], + ["path", "second"], + ["path", "third"], + ["path", "fourth"], + ["path", "last"], + ] + ) + assert os.environ["REMOVE_PATH_LIST"] == make_pathlist( + [["a", "b"], ["a", "c"], ["a", "d"], ["f", "g"]] + ) + + assert os.environ["PATH_LIST_WITH_DUPLICATES"].count(make_path("duplicate")) == 1 + + +@pytest.mark.not_on_windows("Skip unix path tests on Windows") +def test_unix_system_path_manipulation(env): + """Tests manipulting paths that have special meaning as system paths on Unix""" + env.deprioritize_system_paths("PATH_LIST_WITH_SYSTEM_PATHS") + env.apply_modifications() - expected = "/path/first:/path/middle:/path/last" - assert os.environ["NEWLY_CREATED_PATH_LIST"] == expected + assert not os.environ["PATH_LIST_WITH_SYSTEM_PATHS"].startswith( + make_pathlist([["usr", "include" + os.pathsep]]) + ) + assert os.environ["PATH_LIST_WITH_SYSTEM_PATHS"].endswith(make_pathlist([["usr", "include"]])) - assert os.environ["REMOVE_PATH_LIST"] == "/a/b:/a/c:/a/d:/f/g" - assert not os.environ["PATH_LIST_WITH_SYSTEM_PATHS"].startswith("/usr/include:") - assert os.environ["PATH_LIST_WITH_SYSTEM_PATHS"].endswith(":/usr/include") +@pytest.mark.skipif(sys.platform != "win32", reason="Skip Windows paths on not Windows") +def test_windows_system_path_manipulation(env): + """Tests manipulting paths that have special meaning as system paths on Windows""" + env.deprioritize_system_paths("PATH_LIST_WITH_SYSTEM_PATHS") + env.apply_modifications() - assert os.environ["PATH_LIST_WITH_DUPLICATES"].count("/duplicate") == 1 + assert not os.environ["PATH_LIST_WITH_SYSTEM_PATHS"].startswith( + make_pathlist([["C:", "Users" + os.pathsep]]) + ) + assert os.environ["PATH_LIST_WITH_SYSTEM_PATHS"].endswith(make_pathlist([["C:", "Users"]])) def test_extend(env): @@ -243,7 +295,6 @@ def test_extend(env): assert x is y -@pytest.mark.not_on_windows("Not supported on Windows (yet)") @pytest.mark.usefixtures("prepare_environment_for_tests") def test_source_files(files_to_be_sourced): """Tests the construction of a list of environment modifications that are @@ -251,7 +302,7 @@ def test_source_files(files_to_be_sourced): """ env = EnvironmentModifications() for filename in files_to_be_sourced: - if filename.endswith("sourceme_parameters.sh"): + if filename.endswith("sourceme_parameters" + shell_extension): env.extend(EnvironmentModifications.from_sourcing_file(filename, "intel64")) else: env.extend(EnvironmentModifications.from_sourcing_file(filename)) @@ -284,11 +335,11 @@ def test_source_files(files_to_be_sourced): assert len(modifications["PATH_LIST"]) == 3 assert isinstance(modifications["PATH_LIST"][0], RemovePath) - assert modifications["PATH_LIST"][0].value == "/path/third" + assert modifications["PATH_LIST"][0].value == make_path("path", "third") assert isinstance(modifications["PATH_LIST"][1], AppendPath) - assert modifications["PATH_LIST"][1].value == "/path/fourth" + assert modifications["PATH_LIST"][1].value == make_path("path", "fourth") assert isinstance(modifications["PATH_LIST"][2], PrependPath) - assert modifications["PATH_LIST"][2].value == "/path/first" + assert modifications["PATH_LIST"][2].value == make_path("path", "first") @pytest.mark.regression("8345") @@ -307,42 +358,53 @@ def test_preserve_environment(prepare_environment_for_tests): assert "NOT_SET" not in os.environ assert os.environ["UNSET_ME"] == "foo" - assert os.environ["PATH_LIST"] == "/path/second:/path/third" + assert os.environ["PATH_LIST"] == make_pathlist([["path", "second"], ["path", "third"]]) -@pytest.mark.not_on_windows("Not supported on Windows (yet)") @pytest.mark.parametrize( "files,expected,deleted", [ # Sets two variables ( - (os.path.join(datadir, "sourceme_first.sh"),), + (os.path.join(datadir, "sourceme_first" + shell_extension),), {"NEW_VAR": "new", "UNSET_ME": "overridden"}, [], ), # Check if we can set a variable to different values depending # on command line parameters - ((os.path.join(datadir, "sourceme_parameters.sh"),), {"FOO": "default"}, []), - (([os.path.join(datadir, "sourceme_parameters.sh"), "intel64"],), {"FOO": "intel64"}, []), + ( + (os.path.join(datadir, "sourceme_parameters" + shell_extension),), + {"FOO": "default"}, + [], + ), + ( + ([os.path.join(datadir, "sourceme_parameters" + shell_extension), "intel64"],), + {"FOO": "intel64"}, + [], + ), # Check unsetting variables ( - (os.path.join(datadir, "sourceme_second.sh"),), - {"PATH_LIST": "/path/first:/path/second:/path/fourth"}, + (os.path.join(datadir, "sourceme_second" + shell_extension),), + { + "PATH_LIST": make_pathlist( + [["path", "first"], ["path", "second"], ["path", "fourth"]] + ) + }, ["EMPTY_PATH_LIST"], ), # Check that order of sourcing matters ( ( - os.path.join(datadir, "sourceme_unset.sh"), - os.path.join(datadir, "sourceme_first.sh"), + os.path.join(datadir, "sourceme_unset" + shell_extension), + os.path.join(datadir, "sourceme_first" + shell_extension), ), {"NEW_VAR": "new", "UNSET_ME": "overridden"}, [], ), ( ( - os.path.join(datadir, "sourceme_first.sh"), - os.path.join(datadir, "sourceme_unset.sh"), + os.path.join(datadir, "sourceme_first" + shell_extension), + os.path.join(datadir, "sourceme_unset" + shell_extension), ), {"NEW_VAR": "new"}, ["UNSET_ME"], @@ -440,34 +502,34 @@ def test_sanitize_regex(env, exclude, include, expected, deleted): ({"FOO": "foo"}, {}, [environment.UnsetEnv("FOO")]), # Append paths to an environment variable ( - {"FOO_PATH": "/a/path"}, - {"FOO_PATH": "/a/path:/b/path"}, - [environment.AppendPath("FOO_PATH", "/b/path")], + {"FOO_PATH": make_pathlist([["a", "path"]])}, + {"FOO_PATH": make_pathlist([["a", "path"], ["b", "path"]])}, + [environment.AppendPath("FOO_PATH", make_pathlist([["b", "path"]]))], ), ( {}, - {"FOO_PATH": "/a/path" + os.sep + "/b/path"}, - [environment.AppendPath("FOO_PATH", "/a/path" + os.sep + "/b/path")], + {"FOO_PATH": make_pathlist([["a", "path"], ["b", "path"]])}, + [environment.AppendPath("FOO_PATH", make_pathlist([["a", "path"], ["b", "path"]]))], ), ( - {"FOO_PATH": "/a/path:/b/path"}, - {"FOO_PATH": "/b/path"}, - [environment.RemovePath("FOO_PATH", "/a/path")], + {"FOO_PATH": make_pathlist([["a", "path"], ["b", "path"]])}, + {"FOO_PATH": make_pathlist([["b", "path"]])}, + [environment.RemovePath("FOO_PATH", make_pathlist([["a", "path"]]))], ), ( - {"FOO_PATH": "/a/path:/b/path"}, - {"FOO_PATH": "/a/path:/c/path"}, + {"FOO_PATH": make_pathlist([["a", "path"], ["b", "path"]])}, + {"FOO_PATH": make_pathlist([["a", "path"], ["c", "path"]])}, [ - environment.RemovePath("FOO_PATH", "/b/path"), - environment.AppendPath("FOO_PATH", "/c/path"), + environment.RemovePath("FOO_PATH", make_pathlist([["b", "path"]])), + environment.AppendPath("FOO_PATH", make_pathlist([["c", "path"]])), ], ), ( - {"FOO_PATH": "/a/path:/b/path"}, - {"FOO_PATH": "/c/path:/a/path"}, + {"FOO_PATH": make_pathlist([["a", "path"], ["b", "path"]])}, + {"FOO_PATH": make_pathlist([["c", "path"], ["a", "path"]])}, [ - environment.RemovePath("FOO_PATH", "/b/path"), - environment.PrependPath("FOO_PATH", "/c/path"), + environment.RemovePath("FOO_PATH", make_pathlist([["b", "path"]])), + environment.PrependPath("FOO_PATH", make_pathlist([["c", "path"]])), ], ), # Modify two variables in the same environment @@ -497,11 +559,10 @@ def test_exclude_lmod_variables(): assert not any(x.startswith("LMOD_") for x in modifications) -@pytest.mark.not_on_windows("Not supported on Windows (yet)") @pytest.mark.regression("13504") def test_exclude_modules_variables(): # Construct the list of environment modifications - file = os.path.join(datadir, "sourceme_modules.sh") + file = os.path.join(datadir, "sourceme_modules" + shell_extension) env = EnvironmentModifications.from_sourcing_file(file) # Check that variables related to modules are not in there diff --git a/lib/spack/spack/test/error_messages.py b/lib/spack/spack/test/error_messages.py new file mode 100644 index 00000000000000..795145303d6888 --- /dev/null +++ b/lib/spack/spack/test/error_messages.py @@ -0,0 +1,542 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +import os +import os.path +import re +from contextlib import contextmanager +from typing import Iterable, Optional + +import pytest + +import spack.config +import spack.error +import spack.repo +import spack.util.file_cache +import spack.util.spack_yaml as syaml +from spack.concretize import concretize_one +from spack.main import SpackCommand + +solve = SpackCommand("solve") + + +def update_packages_config(conf_str): + conf = syaml.load_config(conf_str) + spack.config.set("packages", conf["packages"], scope="concretize") + + +_pkgx1 = ( + "x1", + """\ +class X1(Package): + version("1.2") + version("1.1") + + depends_on("x2") + depends_on("x3") +""", +) + + +_pkgx2 = ( + "x2", + """\ +class X2(Package): + version("2.1") + version("2.0") + + depends_on("x4@4.1") +""", +) + + +_pkgx3 = ( + "x3", + """\ +class X3(Package): + version("3.5") + version("3.4") + + depends_on("x4@4.0") +""", +) + + +_pkgx4 = ( + "x4", + """\ +class X4(Package): + version("4.1") + version("4.0") +""", +) + + +_pkgy1 = ( + "y1", + """\ +class Y1(Package): + version("1.2") + version("1.1") + + depends_on("y2+v1") + depends_on("y3") +""", +) + + +_pkgy2 = ( + "y2", + """\ +class Y2(Package): + version("2.1") + version("2.0") + + variant("v1", default=True) + + depends_on("y4@4.1", when="+v1") + depends_on("y4") +""", +) + + +_pkgy3 = ( + "y3", + """\ +class Y3(Package): + version("3.5") + version("3.4") + + depends_on("y4@4.0") +""", +) + + +_pkgy4 = ( + "y4", + """\ +class Y4(Package): + version("4.1") + version("4.0") +""", +) + + +_pkgz1 = ( + "z1", + """\ +class Z1(Package): + version("1.2") + version("1.1") + + variant("v1", default=True) + + depends_on("z2") + + depends_on("z3") + depends_on("z3+v2", when="~v1") + + conflicts("+v1", when="@:1.1") +""", +) + + +_pkgz2 = ( + "z2", + """\ +class Z2(Package): + version("3.1") + version("3.0") + + depends_on("z3@:2.0") +""", +) + + +_pkgz3 = ( + "z3", + """\ +class Z3(Package): + version("2.1") + version("2.0") + + variant("v2", default=True, when="@2.1:") +""", +) + + +# Cluster of packages that includes requirements - goal is to "chain" +# the requirements like other constraints. +_pkgw4 = ( + "w4", + """\ +class W4(Package): + version("2.1") + version("2.0") + + variant("v1", default=True) + + depends_on("w2") + depends_on("w2@:2.0", when="@:2.0") + + depends_on("w3") + depends_on("w3+v1", when="@2.0") +""", +) + + +_pkgw3 = ( + "w3", + """\ +class W3(Package): + version("2.1") + version("2.0") + + variant("v1", default=True) + + requires("~v1", when="@2.1") + + depends_on("w1") +""", +) + + +_pkgw2 = ( + "w2", + """\ +class W2(Package): + version("2.1") + version("2.0") + + variant("v1", default=True) + + depends_on("w1") +""", +) + + +_pkgw1 = ( + "w1", + """\ +class W1(Package): + version("2.1") + version("2.0") + + variant("v1", default=True) +""", +) + + +# Like the W* packages, but encodes the config requirements constraints +# into the packages to see if that improves the error from +# test_errmsg_requirements_2 +_pkgt4 = ( + "t4", + """\ +class T4(Package): + version("2.1") + version("2.0") + + variant("v1", default=True) + + depends_on("t2") + depends_on("t2@:2.0", when="@:2.0") + + depends_on("t3") + depends_on("t3~v1", when="@2.0") +""", +) + + +_pkgt3 = ( + "t3", + """\ +class T3(Package): + version("2.1") + version("2.0") + + variant("v1", default=True) + + requires("+v1", when="@2.1") + + depends_on("t1") +""", +) + + +_pkgt2 = ( + "t2", + """\ +class T2(Package): + version("2.1") + version("2.0") + + variant("v1", default=True) + + requires("~v1", when="@:2.0") + + depends_on("t1") +""", +) + + +_pkgt1 = ( + "t1", + """\ +class T1(Package): + version("2.1") + version("2.0") + + variant("v1", default=True) +""", +) + + +all_pkgs = [ + _pkgx1, + _pkgx2, + _pkgx3, + _pkgx4, + _pkgy1, + _pkgy2, + _pkgy3, + _pkgy4, + _pkgz1, + _pkgz2, + _pkgz3, + _pkgw1, + _pkgw2, + _pkgw3, + _pkgw4, + _pkgt1, + _pkgt2, + _pkgt3, + _pkgt4, +] + + +def _add_import(pkg_def): + return ( + """\ +from spack.package import * +from spack.package import Package +""" + + pkg_def + ) + + +all_pkgs = list((x, _add_import(y)) for (x, y) in all_pkgs) + + +_repo_name_id = 0 + + +def create_test_repo(tmp_path, pkg_name_content_tuples): + global _repo_name_id + + repo_name = f"testrepo{str(_repo_name_id)}" + repo_path = tmp_path / "spack_repo" / repo_name + os.makedirs(repo_path) + with open(repo_path / "__init__.py", "w", encoding="utf-8"): + pass + repo_yaml = os.path.join(repo_path, "repo.yaml") + with open(str(repo_yaml), "w", encoding="utf-8") as f: + f.write( + f"""\ +repo: + namespace: {repo_name} + api: v2.1 +""" + ) + + _repo_name_id += 1 + + packages_dir = repo_path / "packages" + os.mkdir(packages_dir) + with open(packages_dir / "__init__.py", "w", encoding="utf-8"): + pass + for pkg_name, pkg_str in pkg_name_content_tuples: + pkg_dir = packages_dir / pkg_name + os.mkdir(pkg_dir) + pkg_file = pkg_dir / "package.py" + with open(str(pkg_file), "w", encoding="utf-8") as f: + f.write(pkg_str) + + repo_cache = spack.util.file_cache.FileCache(str(tmp_path / "cache")) + return spack.repo.Repo(str(repo_path), cache=repo_cache) + + +@pytest.fixture +def _create_test_repo(tmp_path, mutable_config): + yield create_test_repo(tmp_path, all_pkgs) + + +@pytest.fixture +def test_repo(_create_test_repo, monkeypatch, mock_stage): + with spack.repo.use_repositories(_create_test_repo) as mock_repo_path: + yield mock_repo_path + + +@contextmanager +def expect_failure_and_print(should_mention=None): + got_an_error_as_expected = False + err_msg = None + try: + yield + except spack.error.UnsatisfiableSpecError as e: + got_an_error_as_expected = True + err_msg = str(e) + if not got_an_error_as_expected: + raise ValueError("A failure was supposed to occur in this context manager") + elif not err_msg: + raise ValueError("No error message for failed concretization") + print(err_msg) + check_error(err_msg, should_mention) + + +def check_error(msg, should_mention: Optional[Iterable] = None): + excludes = [ + "failed to concretize .* for the following reasons:", + "Cannot satisfy .*", + "required because .* requested explicitly", + "cannot satisfy a requirement for package .*", + ] + lines = msg.split("\n") + should_mention = set(should_mention) if should_mention else set() + should_mention_hits = set() + remaining = [] + for line in lines: + for p in should_mention: + if re.search(p, line): + should_mention_hits.add(p) + if any(re.search(p, line) for p in excludes): + continue + remaining.append(line) + if not remaining: + raise ValueError("The error message contains only generic statements") + should_mention_misses = should_mention - should_mention_hits + if should_mention_misses: + raise ValueError(f"The error message did not contain: {sorted(should_mention_misses)}") + + +def test_diamond_with_pkg_conflict1(concretize_scope, test_repo): + concretize_one("x2") + concretize_one("x3") + concretize_one("x4") + + important_points = ["x2 depends on x4@4.1", "x3 depends on x4@4.0"] + + with expect_failure_and_print(should_mention=important_points): + concretize_one("x1") + + +def test_diamond_with_pkg_conflict2(concretize_scope, test_repo): + important_points = [ + r"y2 depends on y4@4.1 when \+v1", + r"y1 depends on y2\+v1", + r"y3 depends on y4@4.0", + ] + + with expect_failure_and_print(should_mention=important_points): + concretize_one("y1") + + +@pytest.mark.xfail(reason="Not addressed yet") +def test_version_range_null(concretize_scope, test_repo): + with expect_failure_and_print(): + concretize_one("x2@3:4") + + +# This error message is hard to follow: neither z2 or z3 +# are mentioned, so if this hierarchy had 10 other "OK" +# packages, a user would be conducting a tedious manual +# search +@pytest.mark.xfail(reason="Not addressed yet") +def test_null_variant_for_requested_version(concretize_scope, test_repo): + r""" + Z1_ (@:1.1 -> !v1) + | \ + Z2 | + \ | + \| + Z3 (z1~v1 -> z3+v2) + (z2 ^z3:2.0) + (v2 only exists for @2.1:) + """ + concretize_one("z1") + + with expect_failure_and_print(should_mention=["z2"]): + concretize_one("z1@1.1") + + +def test_errmsg_requirements_1(concretize_scope, test_repo): + # w4 has: depends_on("w3+v1", when="@2.0") + # w3 has: requires("~v1", when="@2.1") + + important_points = [ + r"w4 depends on w3\+v1 when @2.0", + r"w4@:2.0 \^w3@2.1 requested explicitly", + r"~v1 is a requirement for package w3 when @2.1", + ] + + with expect_failure_and_print(should_mention=important_points): + concretize_one("w4@:2.0 ^w3@2.1") + + +def test_errmsg_requirements_cfg(concretize_scope, test_repo): + conf_str = """\ +packages: + w2: + require: + - one_of: ["~v1"] + when: "@2.0" +""" + update_packages_config(conf_str) + + important_points = [ + r"~v1 is a requirement for package w2 when @2.0", + r"w4 depends on w2@:2.0 when @:2.0", + r"w4@2.0 \^w2\+v1 requested explicitly", + ] + + # w4 has: depends_on("w2@:2.0", when="@:2.0") + with expect_failure_and_print(should_mention=important_points): + concretize_one("w4@2.0 ^w2+v1") + + +# This reencodes prior test test_errmsg_requirements_cfg +# in terms of package `requires`, +def test_errmsg_requirements_directives(concretize_scope, test_repo): + # t4 has: depends_on("t2@:2.0", when="@:2.0") + # t2 has: requires("~v1", when="@:2.0") + + important_points = [ + r"~v1 is a requirement for package t2 when @:2.0", + r"t4 depends on t2@:2.0 when @:2.0", + r"t4@:2.0 \^t2\+v1 requested explicitly", + ] + + with expect_failure_and_print(should_mention=important_points): + concretize_one("t4@:2.0 ^t2+v1") + + +# Simulates a user error: package is specified as external with a version, +# but a different version was required in config. +def test_errmsg_requirements_external_mismatch(concretize_scope, test_repo): + conf_str = """\ +packages: + t1: + buildable: false + externals: + - spec: "t1@2.1" + prefix: /a/path/that/doesnt/need/to/exist/ + require: + - spec: "t1@2.0" +""" + update_packages_config(conf_str) + + important_points = ["no externals satisfy the request"] + + with expect_failure_and_print(should_mention=important_points): + concretize_one("t1") diff --git a/lib/spack/spack/test/externals.py b/lib/spack/spack/test/externals.py new file mode 100644 index 00000000000000..a57a70297405b4 --- /dev/null +++ b/lib/spack/spack/test/externals.py @@ -0,0 +1,342 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +from typing import List + +import pytest + +from spack.vendor.archspec.cpu import TARGETS + +import spack.archspec +import spack.traverse +from spack.externals import ( + DuplicateExternalError, + ExternalDict, + ExternalSpecError, + ExternalSpecsParser, + complete_architecture, + complete_variants_and_architecture, +) + +pytestmark = pytest.mark.usefixtures("config", "mock_packages") + + +@pytest.mark.parametrize( + "externals_dict,expected_length,expected_queries", + [ + # Empty dictionary case + ([], 0, {"gmake": 0}), + # Single spec case + ( + [{"spec": "gmake@1.0", "prefix": "/path/to/gmake"}], + 1, + {"gmake": 1, "gmake@1.0": 1, "gmake@2.0": 0}, + ), + # Multiple specs case + ( + [ + {"spec": "gmake@1.0", "prefix": "/path/to/gmake1"}, + {"spec": "gmake@2.0", "prefix": "/path/to/gmake2"}, + {"spec": "gcc@1.0", "prefix": "/path/to/gcc"}, + ], + 3, + {"gmake": 2, "gmake@2": 1, "gcc": 1, "baz": 0}, + ), + # Case with modules and extra attributes + ( + [ + { + "spec": "gmake@1.0", + "prefix": "/path/to/gmake", + "modules": ["module1", "module2"], + "extra_attributes": {"attr1": "value1"}, + } + ], + 1, + {"gmake": 1}, + ), + ], +) +def test_basic_parsing(externals_dict, expected_length, expected_queries): + """Tests parsing external specs, in some basic cases""" + parser = ExternalSpecsParser(externals_dict) + + assert len(parser.all_specs()) == expected_length + assert len(parser.specs_by_external_id) == expected_length + for node in parser.all_specs(): + assert node.concrete + + for query, expected in expected_queries.items(): + assert len(parser.query(query)) == expected + + +@pytest.mark.parametrize( + "externals_dict,expected_triplet", + [ + ([{"spec": "gmake@1.0", "prefix": "/path/to/gmake1"}], ("test", "debian6", "aarch64")), + ( + [{"spec": "gmake@1.0 target=icelake", "prefix": "/path/to/gmake1"}], + ("test", "debian6", "icelake"), + ), + ( + [{"spec": "gmake@1.0 platform=linux target=icelake", "prefix": "/path/to/gmake1"}], + ("linux", "debian6", "icelake"), + ), + ( + [{"spec": "gmake@1.0 os=rhel8", "prefix": "/path/to/gmake1"}], + ("test", "rhel8", "aarch64"), + ), + ], +) +def test_external_specs_architecture_completion( + externals_dict: List[ExternalDict], expected_triplet, monkeypatch +): + """Tests the completion of external specs architectures when using the default behavior""" + monkeypatch.setattr(spack.archspec, "HOST_TARGET_FAMILY", TARGETS["aarch64"]) + parser = ExternalSpecsParser(externals_dict) + + expected_platform, expected_os, expected_target = expected_triplet + + for node in parser.all_specs(): + assert node.architecture is not None + assert node.architecture.platform == expected_platform + assert node.architecture.os == expected_os + assert node.target == expected_target + + +def test_external_specs_parser_with_missing_packages(): + """Tests the parsing of external specs when some packages are missing""" + externals_dict: List[ExternalDict] = [ + {"spec": "gmake@1.0", "prefix": "/path/to/gmake1"}, + {"spec": "gmake@2.0", "prefix": "/path/to/gmake2"}, + {"spec": "gcc@1.0", "prefix": "/path/to/gcc"}, + # This package does not exist in the builtin_mock repository + {"spec": "baz@1.0", "prefix": "/path/to/baz"}, + ] + + external_specs = ExternalSpecsParser(externals_dict, allow_nonexisting=True).all_specs() + assert len(external_specs) == 3 + assert len([x for x in external_specs if x.satisfies("gmake")]) == 2 + assert len([x for x in external_specs if x.satisfies("gcc")]) == 1 + + with pytest.raises(ExternalSpecError, match="Package 'baz' does not exist"): + ExternalSpecsParser(externals_dict, allow_nonexisting=False) + + +def test_externals_with_duplicate_id(): + """Tests the parsing of external specs when some specs have the same id""" + externals_dict: List[ExternalDict] = [ + {"spec": "gmake@1.0", "prefix": "/path/to/gmake1", "id": "gmake"}, + {"spec": "gmake@2.0", "prefix": "/path/to/gmake2", "id": "gmake"}, + {"spec": "gcc@1.0", "prefix": "/path/to/gcc", "id": "gcc"}, + ] + + with pytest.raises(DuplicateExternalError, match="cannot have the same external id"): + ExternalSpecsParser(externals_dict) + + +@pytest.mark.parametrize( + "externals_dicts,expected,not_expected", + [ + # o ascent@0.9.2 + # o adios2@2.7.1 + # o bzip2@1.0.8 + ( + [ + { + "spec": "ascent@0.9.2+adios2+shared", + "prefix": "/user/path", + "id": "ascent", + "dependencies": [{"id": "adios2", "deptypes": ["build", "link"]}], + }, + { + "spec": "adios2@2.7.1+shared", + "prefix": "/user/path", + "id": "adios2", + "dependencies": [{"id": "bzip2", "deptypes": ["build", "link"]}], + }, + {"spec": "bzip2@1.0.8+shared", "prefix": "/user/path", "id": "bzip2"}, + ], + { + "ascent": ["%[deptypes=build,link] adios2@2.7.1"], + "adios2": ["%[deptypes=build,link] bzip2@1.0.8"], + }, + {}, + ), + # o ascent@0.9.2 + # |\ + # | o adios2@2.7.1 + # |/ + # o bzip2@1.0.8 + ( + [ + { + "spec": "ascent@0.9.2+adios2+shared", + "prefix": "/user/path", + "id": "ascent", + "dependencies": [ + {"id": "adios2", "deptypes": "link"}, + {"id": "bzip2", "deptypes": "run"}, + ], + }, + { + "spec": "adios2@2.7.1+shared", + "prefix": "/user/path", + "id": "adios2", + "dependencies": [{"id": "bzip2", "deptypes": ["build", "link"]}], + }, + {"spec": "bzip2@1.0.8+shared", "prefix": "/user/path", "id": "bzip2"}, + ], + { + "ascent": ["%[deptypes=link] adios2@2.7.1", "%[deptypes=run] bzip2@1.0.8"], + "adios2": ["%[deptypes=build,link] bzip2@1.0.8"], + }, + { + "ascent": [ + "%[deptypes=build] adios2@2.7.1", + "%[deptypes=run] adios2@2.7.1", + "%[deptypes=build] bzip2@1.0.8", + "%[deptypes=link] bzip2@1.0.8", + ] + }, + ), + # Same, but specifying dependencies by spec: instead of id: + ( + [ + { + "spec": "ascent@0.9.2+adios2+shared", + "prefix": "/user/path", + "dependencies": [ + {"spec": "adios2", "deptypes": "link"}, + {"spec": "bzip2", "deptypes": "run"}, + ], + }, + { + "spec": "adios2@2.7.1+shared", + "prefix": "/user/path", + "dependencies": [{"spec": "bzip2", "deptypes": ["build", "link"]}], + }, + {"spec": "bzip2@1.0.8+shared", "prefix": "/user/path"}, + ], + { + "ascent": ["%[deptypes=link] adios2@2.7.1", "%[deptypes=run] bzip2@1.0.8"], + "adios2": ["%[deptypes=build,link] bzip2@1.0.8"], + }, + { + "ascent": [ + "%[deptypes=build] adios2@2.7.1", + "%[deptypes=run] adios2@2.7.1", + "%[deptypes=build] bzip2@1.0.8", + "%[deptypes=link] bzip2@1.0.8", + ] + }, + ), + # Inline specification for + # o mpileaks@2.2 + # | \ + # | o callpath@1.0 + # | / + # o gcc@15.0.1 + ( + [ + [ + {"spec": "mpileaks@2.2 %gcc %callpath", "prefix": "/user/path"}, + {"spec": "callpath@1.0", "prefix": "/user/path"}, + {"spec": "gcc@15.0.1 languages=c,c++", "prefix": "/user/path"}, + ], + {"mpileaks": ["%[deptypes=build] gcc@15", "%[deptypes=build,link] callpath@1.0"]}, + {"mpileaks": ["%[deptypes=link] gcc@15"]}, + ] + ), + # CMake dependency should be inferred of `deptypes=build` + # o cmake-client + # | + # o cmake@3.23.1 + ( + [ + [ + {"spec": "cmake-client@1.0 %cmake", "prefix": "/user/path"}, + {"spec": "cmake@3.23.1", "prefix": "/user/path"}, + ], + {"cmake-client": ["%[deptypes=build] cmake"]}, + {"cmake-client": ["%[deptypes=link] cmake", "%[deptypes=run] cmake"]}, + ] + ), + ], +) +def test_externals_with_dependencies(externals_dicts: List[ExternalDict], expected, not_expected): + """Tests constructing externals with dependencies""" + parser = ExternalSpecsParser(externals_dicts) + + for query_spec, expected_list in expected.items(): + result = parser.query(query_spec) + assert len(result) == 1 + assert all(result[0].satisfies(c) for c in expected_list) + + for query_spec, not_expected_list in not_expected.items(): + result = parser.query(query_spec) + assert len(result) == 1 + assert all(not result[0].satisfies(c) for c in not_expected_list) + + # Assert all nodes have the namespace set + for node in spack.traverse.traverse_nodes(parser.all_specs()): + assert node.namespace is not None + + +@pytest.mark.parametrize( + "externals_dicts,expected_length,not_expected", + [ + ([{"spec": "mpileaks", "prefix": "/user/path", "id": "mpileaks"}], 0, ["mpileaks"]), + ([{"spec": "mpileaks@2:", "prefix": "/user/path", "id": "mpileaks"}], 0, ["mpileaks"]), + ], +) +def test_externals_without_concrete_version( + externals_dicts: List[ExternalDict], expected_length, not_expected +): + """Tests parsing externals, when some dicts are malformed and don't have a concrete version""" + parser = ExternalSpecsParser(externals_dicts) + result = parser.all_specs() + + assert len(result) == expected_length + for c in not_expected: + assert all(not s.satisfies(c) for s in result) + + +@pytest.mark.parametrize( + "externals_dict,completion_fn,expected,not_expected", + [ + ( + [{"spec": "mpileaks@2.3", "prefix": "/user/path"}], + complete_architecture, + {"mpileaks": ["platform=test"]}, + {"mpileaks": ["debug=*", "opt=*", "shared=*", "static=*"]}, + ), + ( + [{"spec": "mpileaks@2.3", "prefix": "/user/path"}], + complete_variants_and_architecture, + {"mpileaks": ["platform=test", "~debug", "~opt", "+shared", "+static"]}, + {"mpileaks": ["+debug", "+opt", "~shared", "~static"]}, + ), + ], +) +def test_external_node_completion( + externals_dict: List[ExternalDict], completion_fn, expected, not_expected +): + """Tests the completion of external specs with different node completion""" + parser = ExternalSpecsParser(externals_dict, complete_node=completion_fn) + + for query_spec, expected_list in expected.items(): + result = parser.query(query_spec) + assert len(result) == 1 + for expected in expected_list: + assert result[0].satisfies(expected) + + for query_spec, expected_list in not_expected.items(): + result = parser.query(query_spec) + assert len(result) == 1 + for expected in expected_list: + assert not result[0].satisfies(expected) + + # Assert all nodes have the namespace set + for node in spack.traverse.traverse_nodes(parser.all_specs()): + assert node.namespace is not None diff --git a/lib/spack/spack/test/git_fetch.py b/lib/spack/spack/test/git_fetch.py index 18a26f8d261040..717bb0a1ead6e8 100644 --- a/lib/spack/spack/test/git_fetch.py +++ b/lib/spack/spack/test/git_fetch.py @@ -16,6 +16,7 @@ import spack.package_base import spack.platforms import spack.repo +import spack.util.git from spack.fetch_strategy import GitFetchStrategy from spack.llnl.util.filesystem import mkdirp, touch, working_dir from spack.package_base import PackageBase @@ -25,6 +26,7 @@ from spack.version import Version _mock_transport_error = "Mock HTTP transport error" +min_opt_string = ".".join(map(str, spack.util.git.MIN_OPT_VERSION)) @pytest.fixture(params=[None, "1.8.5.2", "1.8.5.1", "1.7.10", "1.7.1", "1.7.0"]) @@ -36,7 +38,7 @@ def git_version(git, request, monkeypatch): paths for old versions still work, we fake it out here and make it use the backward-compatibility code paths with newer git versions. """ - real_git_version = spack.fetch_strategy.GitFetchStrategy.version_from_git(git) + real_git_version = Version(spack.util.git.extract_git_version_str(git)) if request.param is None: # Don't patch; run with the real git_version method. @@ -49,26 +51,20 @@ def git_version(git, request, monkeypatch): # Patch the fetch strategy to think it's using a lower git version. # we use this to test what we'd need to do with older git versions # using a newer git installation. - monkeypatch.setattr(GitFetchStrategy, "git_version", test_git_version) + monkeypatch.setattr(spack.util.git, "extract_git_version_str", lambda _: request.param) yield test_git_version @pytest.fixture -def mock_bad_git(monkeypatch): +def mock_bad_git(mock_util_executable): """ Test GitFetchStrategy behavior with a bad git command for git >= 1.7.1 to trigger a SpackError. """ - def bad_git(*args, **kwargs): - """Raise a SpackError with the transport message.""" - raise spack.error.SpackError(_mock_transport_error) - - # Patch the fetch strategy to think it's using a git version that - # will error out when git is called. - monkeypatch.setattr(GitFetchStrategy, "git", bad_git) - monkeypatch.setattr(GitFetchStrategy, "git_version", Version("1.7.1")) - yield + _, should_fail, registered_respones = mock_util_executable + should_fail.extend(["clone", "fetch"]) + registered_respones["--version"] = "1.7.1" def test_bad_git(tmp_path: pathlib.Path, mock_bad_git): @@ -181,13 +177,18 @@ def test_fetch_pkg_attr_submodule_init( ) @pytest.mark.disable_clean_stage_check def test_adhoc_version_submodules( - mock_git_repository, config, mutable_mock_repo, monkeypatch, mock_stage + mock_git_repository, + config, + mutable_mock_repo, + monkeypatch, + mock_stage, + override_git_repos_cache_path, ): t = mock_git_repository.checks["tag"] # Construct the package under test pkg_class = spack.repo.PATH.get_pkg_class("git-test") monkeypatch.setitem(pkg_class.versions, Version("git"), t.args) - monkeypatch.setattr(pkg_class, "git", "file://%s" % mock_git_repository.path, raising=False) + monkeypatch.setattr(pkg_class, "git", mock_git_repository.url, raising=False) spec = spack.concretize.concretize_one( Spec("git-test@{0}".format(mock_git_repository.unversioned_commit)) @@ -250,7 +251,7 @@ def test_get_full_repo( ): """Ensure that we can clone a full repository.""" - if git_version < Version("1.7.1"): + if git_version < Version(min_opt_string): pytest.skip("Not testing get_full_repo for older git {0}".format(git_version)) secure = True @@ -438,7 +439,7 @@ def test_git_sparse_paths_partial_clone( @pytest.mark.regression("50699") def test_git_sparse_path_have_unique_mirror_projections( - git, mock_git_repository, mutable_mock_repo, monkeypatch + git, mock_git_repository, mutable_mock_repo, monkeypatch, mutable_config ): """ Confirm two packages with different sparse paths but the same git commit diff --git a/lib/spack/spack/test/install.py b/lib/spack/spack/test/install.py index 36807edf948e5c..b2e100b65493d6 100644 --- a/lib/spack/spack/test/install.py +++ b/lib/spack/spack/test/install.py @@ -10,6 +10,7 @@ import pytest import spack.build_environment +import spack.builder import spack.concretize import spack.config import spack.database @@ -128,6 +129,7 @@ def remove_prefix(self): def test_partial_install_delete_prefix_and_stage(install_mockery, mock_fetch, working_env): s = spack.concretize.concretize_one("canfail") + s.package.succeed = False instance_rm_prefix = s.package.remove_prefix @@ -141,7 +143,9 @@ def test_partial_install_delete_prefix_and_stage(install_mockery, mock_fetch, wo # must clear failure markings for the package before re-installing it spack.store.STORE.failure_tracker.clear(s, True) - s.package.set_install_succeed() + s.package.succeed = True + spack.builder._BUILDERS.clear() # the builder is cached with a copy of the pkg's __dict__. + PackageInstaller([s.package], explicit=True, restage=True).install() assert rm_prefix_checker.removed assert s.package.spec.installed @@ -158,11 +162,12 @@ def test_failing_overwrite_install_should_keep_previous_installation( """ # Do a successful install s = spack.concretize.concretize_one("canfail") - s.package.set_install_succeed() + s.package.succeed = True # Do a failing overwrite install PackageInstaller([s.package], explicit=True).install() - s.package.set_install_fail() + s.package.succeed = False + spack.builder._BUILDERS.clear() # the builder is cached with a copy of the pkg's __dict__. kwargs = {"overwrite": [s.dag_hash()]} with pytest.raises(Exception): @@ -283,6 +288,7 @@ def test_installed_upstream(install_upstream, mock_fetch): @pytest.mark.disable_clean_stage_check def test_partial_install_keep_prefix(install_mockery, mock_fetch, monkeypatch, working_env): s = spack.concretize.concretize_one("canfail") + s.package.succeed = False # If remove_prefix is called at any point in this test, that is an error monkeypatch.setattr(spack.package_base.PackageBase, "remove_prefix", mock_remove_prefix) @@ -293,7 +299,8 @@ def test_partial_install_keep_prefix(install_mockery, mock_fetch, monkeypatch, w # must clear failure markings for the package before re-installing it spack.store.STORE.failure_tracker.clear(s, True) - s.package.set_install_succeed() + s.package.succeed = True + spack.builder._BUILDERS.clear() # the builder is cached with a copy of the pkg's __dict__. PackageInstaller([s.package], explicit=True, keep_prefix=True).install() assert s.package.spec.installed @@ -302,12 +309,12 @@ def test_second_install_no_overwrite_first(install_mockery, mock_fetch, monkeypa s = spack.concretize.concretize_one("canfail") monkeypatch.setattr(spack.package_base.PackageBase, "remove_prefix", mock_remove_prefix) - s.package.set_install_succeed() + s.package.succeed = True PackageInstaller([s.package], explicit=True).install() assert s.package.spec.installed # If Package.install is called after this point, it will fail - s.package.set_install_fail() + s.package.succeed = False PackageInstaller([s.package], explicit=True).install() @@ -334,7 +341,7 @@ def test_store(install_mockery, mock_fetch): @pytest.mark.disable_clean_stage_check -def test_failing_build(install_mockery, mock_fetch, capfd): +def test_failing_build(install_mockery, mock_fetch): spec = spack.concretize.concretize_one("failing-build") pkg = spec.package @@ -361,6 +368,7 @@ def test_uninstall_by_spec_errors(mutable_database): @pytest.mark.disable_clean_stage_check +@pytest.mark.use_package_hash def test_nosource_pkg_install(install_mockery, mock_fetch, mock_packages, capfd, ensure_debug): """Test install phases with the nosource package.""" spec = spack.concretize.concretize_one("nosource") @@ -594,8 +602,8 @@ def test_install_from_binary_with_missing_patch_succeeds( PackageInstaller( [s.package], explicit=True, - package_cache_only=True, - dependencies_cache_only=True, + root_policy="cache_only", + dependencies_policy="cache_only", unsigned=True, ).install() diff --git a/lib/spack/spack/test/installer.py b/lib/spack/spack/test/installer.py index 35936f591e300b..4a5a1955ca5f9a 100644 --- a/lib/spack/spack/test/installer.py +++ b/lib/spack/spack/test/installer.py @@ -25,11 +25,10 @@ import spack.package_base import spack.package_prefs as prefs import spack.repo +import spack.report import spack.spec import spack.store -import spack.test.conftest import spack.util.lock as lk -from spack.installer import PackageInstaller from spack.main import SpackCommand from spack.test.conftest import RepoBuilder @@ -142,8 +141,8 @@ def test_install_from_cache_errors(install_mockery): with pytest.raises( spack.error.InstallError, match="No binary found when cache-only was specified" ): - PackageInstaller( - [spec.package], package_cache_only=True, dependencies_cache_only=True + inst.PackageInstaller( + [spec.package], root_policy="cache_only", dependencies_policy="cache_only" ).install() assert not spec.package.installed_from_binary_cache @@ -321,7 +320,7 @@ def test_installer_ensure_ready_errors(install_mockery, monkeypatch): installer._ensure_install_ready(spec.package) -def test_ensure_locked_err(install_mockery, monkeypatch, tmp_path: pathlib.Path, capsys): +def test_ensure_locked_err(install_mockery, monkeypatch, tmp_path: pathlib.Path, capfd): """Test _ensure_locked when a non-lock exception is raised.""" mock_err_msg = "Mock exception error" @@ -336,12 +335,12 @@ def _raise(lock, timeout=None): with pytest.raises(RuntimeError): installer._ensure_locked("read", spec.package) - out = str(capsys.readouterr()[1]) + out = str(capfd.readouterr()[1]) assert "Failed to acquire a read lock" in out assert mock_err_msg in out -def test_ensure_locked_have(install_mockery, tmp_path: pathlib.Path, capsys): +def test_ensure_locked_have(install_mockery, tmp_path: pathlib.Path, capfd): """Test _ensure_locked when already have lock.""" installer = create_installer(["trivial-install-test-package"], {}) spec = installer.build_requests[0].pkg.spec @@ -361,7 +360,7 @@ def test_ensure_locked_have(install_mockery, tmp_path: pathlib.Path, capsys): with pytest.raises(ulk.LockUpgradeError, match=err): installer._ensure_locked(lock_type, spec.package) - out = str(capsys.readouterr()[1]) + out = str(capfd.readouterr()[1]) assert "Failed to upgrade to a write lock" in out assert "exception when releasing read lock" in out @@ -388,7 +387,7 @@ def test_ensure_locked_new_lock(install_mockery, tmp_path: pathlib.Path, lock_ty assert lock._writes == writes -def test_ensure_locked_new_warn(install_mockery, monkeypatch, capsys): +def test_ensure_locked_new_warn(install_mockery, monkeypatch, capfd): orig_pl = spack.database.SpecLocker.lock def _pl(db, spec, timeout): @@ -406,7 +405,7 @@ def _pl(db, spec, timeout): assert ltype == lock_type assert lock is not None - out = str(capsys.readouterr()[1]) + out = str(capfd.readouterr()[1]) assert "Expected prefix lock timeout" in out @@ -443,7 +442,7 @@ def test_dump_packages_deps_ok(install_mockery, tmp_path: pathlib.Path, mock_pac assert os.path.isfile(dest_pkg) -def test_dump_packages_deps_errs(install_mockery, tmp_path: pathlib.Path, monkeypatch, capsys): +def test_dump_packages_deps_errs(install_mockery, tmp_path: pathlib.Path, monkeypatch, capfd): """Test error paths for dump_packages with dependencies.""" orig_bpp = spack.store.STORE.layout.build_packages_path orig_dirname = spack.repo.Repo.dirname_for_package_name @@ -480,7 +479,7 @@ def _repoerr(repo, name): with pytest.raises(spack.repo.RepoError, match=repo_err_msg): inst.dump_packages(spec, path) - out = str(capsys.readouterr()[1]) + out = str(capfd.readouterr()[1]) assert "Couldn't copy in provenance for cmake" in out @@ -511,7 +510,8 @@ def test_clear_failures_success(tmp_path: pathlib.Path): @pytest.mark.not_on_windows("chmod does not prevent removal on Win") -def test_clear_failures_errs(tmp_path: pathlib.Path, capsys): +@pytest.mark.skipif(fs.getuid() == 0, reason="user is root") +def test_clear_failures_errs(tmp_path: pathlib.Path, capfd): """Test the clear_failures exception paths.""" failures = spack.database.FailureTracker(str(tmp_path), default_timeout=0.1) spec = spack.spec.Spec("pkg-a") @@ -525,7 +525,7 @@ def test_clear_failures_errs(tmp_path: pathlib.Path, capsys): failures.clear_all() # Ensure expected warning generated - out = str(capsys.readouterr()[1]) + out = str(capfd.readouterr()[1]) assert "Unable to remove failure" in out failures.dir.chmod(0o750) @@ -646,8 +646,24 @@ def test_installer_init_requests(install_mockery): assert request.pkg.name == spec_name +def false(*args, **kwargs): + return False + + +def test_rewire_task_no_tarball(monkeypatch, mock_packages): + spec = spack.concretize.concretize_one("splice-t") + dep = spack.concretize.concretize_one("splice-h+foo") + out = spec.splice(dep) + + rewire_task = inst.RewireTask(out.package, inst.BuildRequest(out.package, {})) + monkeypatch.setattr(inst, "_process_binary_cache_tarball", false) + monkeypatch.setattr(spack.report.InstallRecord, "succeed", lambda x: None) + + assert rewire_task.complete() == inst.ExecuteResult.MISSING_BUILD_SPEC + + @pytest.mark.parametrize("transitive", [True, False]) -def test_install_spliced(install_mockery, mock_fetch, monkeypatch, capsys, transitive): +def test_install_spliced(install_mockery, mock_fetch, monkeypatch, transitive): """Test installing a spliced spec""" spec = spack.concretize.concretize_one("splice-t") dep = spack.concretize.concretize_one("splice-h+foo") @@ -662,14 +678,14 @@ def test_install_spliced(install_mockery, mock_fetch, monkeypatch, capsys, trans @pytest.mark.parametrize("transitive", [True, False]) -def test_install_spliced_build_spec_installed(install_mockery, capfd, mock_fetch, transitive): +def test_install_spliced_build_spec_installed(install_mockery, mock_fetch, transitive): """Test installing a spliced spec with the build spec already installed""" spec = spack.concretize.concretize_one("splice-t") dep = spack.concretize.concretize_one("splice-h+foo") # Do the splice. out = spec.splice(dep, transitive) - PackageInstaller([out.build_spec.package]).install() + inst.PackageInstaller([out.build_spec.package]).install() installer = create_installer([out], {"verbose": True, "fail_fast": True}) installer._init_queue() @@ -688,19 +704,14 @@ def test_install_spliced_build_spec_installed(install_mockery, capfd, mock_fetch "root_str", ["splice-t^splice-h~foo", "splice-h~foo", "splice-vt^splice-a"] ) def test_install_splice_root_from_binary( - mutable_mock_env_path, - install_mockery, - mock_fetch, - mutable_temporary_mirror, - transitive, - root_str, + mutable_mock_env_path, install_mockery, mock_fetch, temporary_mirror, transitive, root_str ): """Test installing a spliced spec with the root available in binary cache""" # Test splicing and rewiring a spec with the same name, different hash. original_spec = spack.concretize.concretize_one(root_str) spec_to_splice = spack.concretize.concretize_one("splice-h+foo") - PackageInstaller([original_spec.package, spec_to_splice.package]).install() + inst.PackageInstaller([original_spec.package, spec_to_splice.package]).install() out = original_spec.splice(spec_to_splice, transitive) @@ -709,7 +720,7 @@ def test_install_splice_root_from_binary( "push", "--unsigned", "--update-index", - mutable_temporary_mirror, + temporary_mirror, str(original_spec), str(spec_to_splice), ) @@ -717,7 +728,7 @@ def test_install_splice_root_from_binary( uninstall = SpackCommand("uninstall") uninstall("-ay") - PackageInstaller([out.package], unsigned=True).install() + inst.PackageInstaller([out.package], unsigned=True).install() assert len(spack.store.STORE.db.query()) == len(list(out.traverse())) @@ -754,7 +765,7 @@ def test_installing_task_use_cache(install_mockery, monkeypatch): assert request.pkg_id in installer.installed -def test_install_task_requeue_build_specs(install_mockery, monkeypatch, capfd): +def test_install_task_requeue_build_specs(install_mockery, monkeypatch): """Check that a missing build_spec spec is added by _complete_task.""" # This test also ensures coverage of most of the new @@ -780,7 +791,7 @@ def _missing(*args, **kwargs): assert inst.package_id(popped_task.pkg.spec) in installer.build_tasks -def test_release_lock_write_n_exception(install_mockery, tmp_path: pathlib.Path, capsys): +def test_release_lock_write_n_exception(install_mockery, tmp_path: pathlib.Path, capfd): """Test _release_lock for supposed write lock with exception.""" installer = create_installer(["trivial-install-test-package"], {}) @@ -791,7 +802,7 @@ def test_release_lock_write_n_exception(install_mockery, tmp_path: pathlib.Path, assert lock._writes == 0 installer._release_lock(pkg_id) - out = str(capsys.readouterr()[1]) + out = str(capfd.readouterr()[1]) msg = "exception when releasing write lock for {0}".format(pkg_id) assert msg in out @@ -896,7 +907,7 @@ def _chgrp(path, group, follow_symlinks=True): assert expected_msg in out -def test_cleanup_failed_err(install_mockery, tmp_path: pathlib.Path, monkeypatch, capsys): +def test_cleanup_failed_err(install_mockery, tmp_path: pathlib.Path, monkeypatch, capfd): """Test _cleanup_failed exception path.""" msg = "Fake release_write exception" @@ -912,7 +923,7 @@ def _raise_except(lock): installer.failed[pkg_id] = lock installer._cleanup_failed(pkg_id) - out = str(capsys.readouterr()[1]) + out = str(capfd.readouterr()[1]) assert "exception when removing failure tracking" in out assert msg in out @@ -928,7 +939,7 @@ def test_update_failed_no_dependent_task(install_mockery): assert installer.failed[task.pkg_id] is None -def test_install_uninstalled_deps(install_mockery, monkeypatch, capsys): +def test_install_uninstalled_deps(install_mockery, monkeypatch, capfd): """Test install with uninstalled dependencies.""" installer = create_installer(["parallel-package-a"], {}) @@ -943,11 +954,11 @@ def test_install_uninstalled_deps(install_mockery, monkeypatch, capsys): with pytest.raises(spack.error.InstallError, match=msg): installer.install() - out = str(capsys.readouterr()) + out = str(capfd.readouterr()) assert "Detected uninstalled dependencies for" in out -def test_install_failed(install_mockery, monkeypatch, capsys): +def test_install_failed(install_mockery, monkeypatch, capfd): """Test install with failed install.""" installer = create_installer(["parallel-package-a"], {}) @@ -957,12 +968,12 @@ def test_install_failed(install_mockery, monkeypatch, capsys): with pytest.raises(spack.error.InstallError, match="request failed"): installer.install() - out = str(capsys.readouterr()) + out = str(capfd.readouterr()) assert installer.build_requests[0].pkg_id in out assert "failed to install" in out -def test_install_failed_not_fast(install_mockery, monkeypatch, capsys): +def test_install_failed_not_fast(install_mockery, monkeypatch, capfd): """Test install with failed install.""" installer = create_installer(["parallel-package-a"], {"fail_fast": False}) @@ -972,7 +983,7 @@ def test_install_failed_not_fast(install_mockery, monkeypatch, capsys): with pytest.raises(spack.error.InstallError, match="request failed"): installer.install() - out = str(capsys.readouterr()) + out = str(capfd.readouterr()) assert "failed to install" in out assert "Skipping build of parallel-package-a" in out @@ -1052,7 +1063,7 @@ def test_install_fail_multi(install_mockery, mock_fetch, monkeypatch): assert not any(pkg_id.startswith("pkg-a-") for pkg_id in installer.installed) -def test_install_fail_fast_on_detect(install_mockery, monkeypatch, capsys): +def test_install_fail_fast_on_detect(install_mockery, monkeypatch, capfd): """Test fail_fast install when an install failure is detected.""" a = spack.concretize.concretize_one("parallel-package-a") @@ -1077,7 +1088,7 @@ def test_install_fail_fast_on_detect(install_mockery, monkeypatch, capsys): ), "Package a cannot install due to its dependencies failing" # check that b's active process got killed when c failed - assert f"{b_id} failed to install" in capsys.readouterr().err + assert f"{b_id} failed to install" in capfd.readouterr().err def _test_install_fail_fast_on_except_patch(installer, **kwargs): @@ -1088,7 +1099,7 @@ def _test_install_fail_fast_on_except_patch(installer, **kwargs): @pytest.mark.disable_clean_stage_check -def test_install_fail_fast_on_except(install_mockery, monkeypatch, capsys): +def test_install_fail_fast_on_except(install_mockery, monkeypatch, capfd): """Test fail_fast install when an install failure results from an error.""" installer = create_installer(["pkg-a"], {"fail_fast": True}) @@ -1103,7 +1114,7 @@ def test_install_fail_fast_on_except(install_mockery, monkeypatch, capsys): with pytest.raises(spack.error.InstallError, match="mock patch failure"): installer.install() - out = str(capsys.readouterr()) + out = str(capfd.readouterr()) assert "Skipping build of pkg-a" in out @@ -1352,7 +1363,7 @@ def test_print_install_test_log_skipped(install_mockery, mock_packages, capfd, r pkg = s.package pkg.run_tests = run_tests - spack.installer.print_install_test_log(pkg) + inst.print_install_test_log(pkg) out = capfd.readouterr()[0] assert out == "" @@ -1369,12 +1380,12 @@ def test_print_install_test_log_failures( pkg.run_tests = True pkg.tester.test_log_file = str(tmp_path / "test-log.txt") pkg.tester.add_failure(AssertionError("test"), "test-failure") - spack.installer.print_install_test_log(pkg) + inst.print_install_test_log(pkg) err = capfd.readouterr()[1] assert "no test log file" in err # Having test log results in path being output fs.touch(pkg.tester.test_log_file) - spack.installer.print_install_test_log(pkg) + inst.print_install_test_log(pkg) out = capfd.readouterr()[0] assert "See test results at" in out diff --git a/lib/spack/spack/test/installer_build_graph.py b/lib/spack/spack/test/installer_build_graph.py new file mode 100644 index 00000000000000..0aa5ca30069920 --- /dev/null +++ b/lib/spack/spack/test/installer_build_graph.py @@ -0,0 +1,586 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +"""Tests for BuildGraph class in new_installer""" +import sys +from typing import Dict, List, Tuple, Union + +import pytest + +if sys.platform == "win32": + pytest.skip("Skipping new installer tests on Windows", allow_module_level=True) + +import spack.deptypes as dt +import spack.error +import spack.traverse +from spack.new_installer import BuildGraph +from spack.spec import Spec +from spack.store import Store + + +def create_dag( + nodes: List[str], edges: List[Tuple[str, str, Union[dt.DepType, Tuple[dt.DepType, ...]]]] +) -> Dict[str, Spec]: + """ + Create a DAG of concrete specs, as a mapping from package name to Spec. + + Arguments: + nodes: list of unique package names + edges: list of tuples (parent, child, deptype) + """ + specs = {name: Spec(name) for name in nodes} + for parent, child, deptypes in edges: + depflag = deptypes if isinstance(deptypes, dt.DepFlag) else dt.canonicalize(deptypes) + specs[parent].add_dependency_edge(specs[child], depflag=depflag, virtuals=()) + + # Mark all specs as concrete + for spec in specs.values(): + spec._mark_concrete() + + return specs + + +def install_spec_in_db(spec: Spec, store: Store): + """Helper to install a spec in the database for testing.""" + prefix = store.layout.path_for_spec(spec) + spec.set_prefix(prefix) + # Use the layout to create a proper installation directory structure + store.layout.create_install_directory(spec) + store.db.add(spec, explicit=False) + + +@pytest.fixture +def mock_specs(): + """Create a set of mock specs for testing. + + DAG structure: + root -> dep1 -> dep2 + root -> dep3 + """ + return create_dag( + nodes=["root", "dep1", "dep2", "dep3"], + edges=[ + ("root", "dep1", ("build", "link")), + ("root", "dep3", ("build", "link")), + ("dep1", "dep2", ("build", "link")), + ], + ) + + +@pytest.fixture +def diamond_dag(): + """Create a diamond-shaped DAG to test shared dependencies. + + DAG structure: + root -> dep1 -> shared + root -> dep2 -> shared + """ + return create_dag( + nodes=["root", "dep1", "dep2", "shared"], + edges=[ + ("root", "dep1", ("build", "link")), + ("root", "dep2", ("build", "link")), + ("dep1", "shared", ("build", "link")), + ("dep2", "shared", ("build", "link")), + ], + ) + + +@pytest.fixture +def specs_with_build_deps(): + """Create specs with different dependency types for testing build dep filtering. + + DAG structure: + root -> link_dep (link only) + root -> build_dep (build only) + root -> all_dep (build, link, run) + """ + return create_dag( + nodes=["root", "link_dep", "build_dep", "all_dep"], + edges=[ + ("root", "link_dep", "link"), + ("root", "build_dep", "build"), + ("root", "all_dep", ("build", "link", "run")), + ], + ) + + +@pytest.fixture +def complex_pruning_dag(): + """Create a complex DAG for testing re-parenting logic. + + DAG structure: + parent1 -> middle -> child1 + parent2 -> middle -> child2 + + When 'middle' is installed and pruned, both parent1 and parent2 should + become direct parents of both child1 and child2 (full Cartesian product). + """ + return create_dag( + nodes=["parent1", "parent2", "middle", "child1", "child2"], + edges=[ + ("parent1", "middle", ("build", "link")), + ("parent2", "middle", ("build", "link")), + ("middle", "child1", ("build", "link")), + ("middle", "child2", ("build", "link")), + ], + ) + + +class TestBuildGraph: + """Tests for the BuildGraph class.""" + + def test_basic_graph_construction(self, mock_specs: Dict[str, Spec], temporary_store: Store): + """Test basic graph construction with all specs to be installed.""" + graph = BuildGraph( + specs=[mock_specs["root"]], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=True, + install_deps=True, + database=temporary_store.db, + ) + + # Root should be in roots set + assert mock_specs["root"].dag_hash() in graph.roots + # All uninstalled specs should be in nodes + assert len(graph.nodes) == 4 # root, dep1, dep2, dep3 + # Root should have 2 children (dep1, dep3) + assert len(graph.parent_to_child[mock_specs["root"].dag_hash()]) == 2 + + def test_install_package_only_mode(self, mock_specs: Dict[str, Spec], temporary_store: Store): + """Test that install_package=False removes root specs from graph.""" + graph = BuildGraph( + specs=[mock_specs["root"]], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=False, # Only install dependencies + install_deps=True, + database=temporary_store.db, + ) + + # Root should NOT be in nodes when install_package=False + assert mock_specs["root"].dag_hash() not in graph.nodes + # But its dependencies should be + assert mock_specs["dep1"].dag_hash() in graph.nodes + + def test_install_deps_false_with_uninstalled_deps( + self, mock_specs: Dict[str, Spec], temporary_store: Store + ): + """Test that install_deps=False raises error when dependencies are not installed.""" + # Should raise error because dependencies are not installed + with pytest.raises( + spack.error.InstallError, match="package only mode.*dependency.*not installed" + ): + BuildGraph( + specs=[mock_specs["root"]], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=True, + install_deps=False, # Don't install dependencies + database=temporary_store.db, + ) + + def test_multiple_roots(self, mock_specs: Dict[str, Spec], temporary_store: Store): + """Test graph construction with multiple root specs.""" + graph = BuildGraph( + specs=[mock_specs["root"], mock_specs["dep1"]], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=True, + install_deps=True, + database=temporary_store.db, + ) + + # Both should be in roots + assert mock_specs["root"].dag_hash() in graph.roots + assert mock_specs["dep1"].dag_hash() in graph.roots + + def test_parent_child_mappings(self, mock_specs: Dict[str, Spec], temporary_store: Store): + """Test that parent-child mappings are correctly constructed.""" + spec_root = mock_specs["root"] + graph = BuildGraph( + specs=[spec_root], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=True, + install_deps=True, + database=temporary_store.db, + ) + + # Verify parent_to_child and child_to_parent are inverse mappings + for parent, children in graph.parent_to_child.items(): + for child in children: + assert child in graph.child_to_parent + assert parent in graph.child_to_parent[child] + + def test_diamond_dag_with_shared_dependency( + self, diamond_dag: Dict[str, Spec], temporary_store: Store + ): + """Test graph construction with a diamond DAG where a dependency has multiple parents.""" + graph = BuildGraph( + specs=[diamond_dag["root"]], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=True, + install_deps=True, + database=temporary_store.db, + ) + + # Shared dependency should have two parents + shared_hash = diamond_dag["shared"].dag_hash() + assert len(graph.child_to_parent[shared_hash]) == 2 + # Both dep1 and dep2 should be parents of shared + assert diamond_dag["dep1"].dag_hash() in graph.child_to_parent[shared_hash] + assert diamond_dag["dep2"].dag_hash() in graph.child_to_parent[shared_hash] + + def test_pruning_installed_specs(self, mock_specs: Dict[str, Spec], temporary_store: Store): + """Test that installed specs are correctly pruned from the graph.""" + # Install dep2 in the database + dep2 = mock_specs["dep2"] + install_spec_in_db(dep2, temporary_store) + + graph = BuildGraph( + specs=[mock_specs["root"]], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=True, + install_deps=True, + database=temporary_store.db, + ) + + # dep2 should be pruned since it's installed + assert dep2.dag_hash() not in graph.nodes + # But dep1 (its parent) should still be in the graph + assert mock_specs["dep1"].dag_hash() in graph.nodes + # And dep1 should have no children (since dep2 was pruned) + assert len(graph.parent_to_child[mock_specs["dep1"].dag_hash()]) == 0 + + def test_pruning_with_shared_dependency_partially_installed( + self, diamond_dag: Dict[str, Spec], temporary_store: Store + ): + """Test that pruning a shared dependency correctly updates all parents.""" + # Install the shared dependency + shared = diamond_dag["shared"] + install_spec_in_db(shared, temporary_store) + graph = BuildGraph( + specs=[diamond_dag["root"]], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=True, + install_deps=True, + database=temporary_store.db, + ) + + # Shared should be pruned + assert shared.dag_hash() not in graph.nodes + # Both dep1 and dep2 should have no children + assert len(graph.parent_to_child[diamond_dag["dep1"].dag_hash()]) == 0 + assert len(graph.parent_to_child[diamond_dag["dep2"].dag_hash()]) == 0 + + def test_overwrite_set_prevents_pruning( + self, mock_specs: Dict[str, Spec], temporary_store: Store + ): + """Test that specs in overwrite_set are not pruned even if installed.""" + # Install dep2 in the database + dep2 = mock_specs["dep2"] + install_spec_in_db(dep2, temporary_store) + + # Create graph with dep2 in the overwrite set + graph = BuildGraph( + specs=[mock_specs["root"]], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=True, + install_deps=True, + database=temporary_store.db, + overwrite_set={dep2.dag_hash()}, + ) + + # dep2 should NOT be pruned since it's in overwrite_set + assert dep2.dag_hash() in graph.nodes + # dep1 should still have dep2 as a child + assert dep2.dag_hash() in graph.parent_to_child[mock_specs["dep1"].dag_hash()] + # dep2 should have dep1 as a parent + assert mock_specs["dep1"].dag_hash() in graph.child_to_parent[dep2.dag_hash()] + + def test_installed_root_excludes_build_deps_even_when_requested( + self, specs_with_build_deps: Dict[str, Spec], temporary_store: Store + ): + """Test that installed root specs never include build deps, even with + include_build_deps=True.""" + root = specs_with_build_deps["root"] + install_spec_in_db(root, temporary_store) + + graph = BuildGraph( + specs=[root], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=True, # Should be ignored for installed root + install_package=True, + install_deps=True, + database=temporary_store.db, + ) + + # build_dep should NOT be in the graph (installed root never needs build deps) + assert specs_with_build_deps["build_dep"].dag_hash() not in graph.nodes + # link_dep and all_dep should be in the graph (link/run deps) + assert specs_with_build_deps["link_dep"].dag_hash() in graph.nodes + assert specs_with_build_deps["all_dep"].dag_hash() in graph.nodes + + def test_cache_only_excludes_build_deps( + self, specs_with_build_deps: Dict[str, Spec], temporary_store: Store + ): + """Test that cache_only policy excludes build deps when include_build_deps=False.""" + specs = [specs_with_build_deps["root"]] + graph = BuildGraph( + specs=specs, + root_policy="cache_only", + dependencies_policy="auto", + include_build_deps=False, # exclude build deps when possible + install_package=True, + install_deps=True, + database=temporary_store.db, + ) + + assert specs_with_build_deps["build_dep"].dag_hash() not in graph.nodes + assert specs_with_build_deps["link_dep"].dag_hash() in graph.nodes + assert specs_with_build_deps["all_dep"].dag_hash() in graph.nodes + + # Verify that the entire graph has a prefix assigned, which avoids that the subprocess has + # to obtain a read lock on the database. + for s in spack.traverse.traverse_nodes(specs): + assert s._prefix is not None + + def test_cache_only_includes_build_deps_when_requested( + self, specs_with_build_deps: Dict[str, Spec], temporary_store: Store + ): + """Test that cache_only policy includes build deps when include_build_deps=True.""" + graph = BuildGraph( + specs=[specs_with_build_deps["root"]], + root_policy="cache_only", + dependencies_policy="cache_only", + include_build_deps=True, + install_package=True, + install_deps=True, + database=temporary_store.db, + ) + + # All dependencies should be in the graph, including build_dep + assert specs_with_build_deps["build_dep"].dag_hash() in graph.nodes + assert specs_with_build_deps["link_dep"].dag_hash() in graph.nodes + assert specs_with_build_deps["all_dep"].dag_hash() in graph.nodes + + def test_install_deps_false_with_all_deps_installed( + self, mock_specs: Dict[str, Spec], temporary_store: Store + ): + """Test successful package-only install when all dependencies are already installed.""" + # Install all dependencies + for dep_name in ["dep1", "dep2", "dep3"]: + install_spec_in_db(mock_specs[dep_name], temporary_store) + + # Should succeed since all dependencies are installed + graph = BuildGraph( + specs=[mock_specs["root"]], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=True, + install_deps=False, + database=temporary_store.db, + ) + + # Only the root should be in the graph + assert len(graph.nodes) == 1 + assert mock_specs["root"].dag_hash() in graph.nodes + # Root should have no children (all deps pruned) + assert len(graph.parent_to_child.get(mock_specs["root"].dag_hash(), [])) == 0 + + def test_pruning_creates_cartesian_product_of_connections( + self, complex_pruning_dag: Dict[str, Spec], temporary_store: Store + ): + """Test that pruning creates full Cartesian product of parent-child connections. + + When a node with multiple parents and multiple children is pruned, + all parents should be connected to all children (parents x children). + + DAG structure: + parent1 -> middle -> child1 + parent2 -> middle -> child2 + + After pruning 'middle': + parent1 -> child1 + parent1 -> child2 + parent2 -> child1 + parent2 -> child2 + """ + # Install the middle node + middle = complex_pruning_dag["middle"] + install_spec_in_db(middle, temporary_store) + + # Use parent1 as the root to build the graph + graph = BuildGraph( + specs=[complex_pruning_dag["parent1"], complex_pruning_dag["parent2"]], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=True, + install_deps=True, + database=temporary_store.db, + ) + + parent1_hash = complex_pruning_dag["parent1"].dag_hash() + parent2_hash = complex_pruning_dag["parent2"].dag_hash() + middle_hash = middle.dag_hash() + child1_hash = complex_pruning_dag["child1"].dag_hash() + child2_hash = complex_pruning_dag["child2"].dag_hash() + + # middle should be pruned since it's installed + assert middle_hash not in graph.nodes + + # All other nodes should be in the graph + assert parent1_hash in graph.nodes + assert parent2_hash in graph.nodes + assert child1_hash in graph.nodes + assert child2_hash in graph.nodes + + # Verify full Cartesian product: each parent should be connected to each child + # parent1 -> child1, child2 + assert child1_hash in graph.parent_to_child[parent1_hash] + assert child2_hash in graph.parent_to_child[parent1_hash] + + # parent2 -> child1, child2 + assert child1_hash in graph.parent_to_child[parent2_hash] + assert child2_hash in graph.parent_to_child[parent2_hash] + + # Verify reverse mapping: each child should have both parents + # child1 <- parent1, parent2 + assert parent1_hash in graph.child_to_parent[child1_hash] + assert parent2_hash in graph.child_to_parent[child1_hash] + + # child2 <- parent1, parent2 + assert parent1_hash in graph.child_to_parent[child2_hash] + assert parent2_hash in graph.child_to_parent[child2_hash] + + # middle should not appear in any parent-child relationships + assert middle_hash not in graph.parent_to_child + assert middle_hash not in graph.child_to_parent + + def test_empty_graph_all_specs_installed( + self, mock_specs: Dict[str, Spec], temporary_store: Store + ): + """Test that the graph is empty when all specs are already installed.""" + # Install all specs in the DAG + for spec_name in ["root", "dep1", "dep2", "dep3"]: + install_spec_in_db(mock_specs[spec_name], temporary_store) + + graph = BuildGraph( + specs=[mock_specs["root"]], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=True, + install_deps=True, + database=temporary_store.db, + ) + + # All nodes should be pruned, resulting in an empty graph + assert len(graph.nodes) == 0 + assert len(graph.parent_to_child) == 0 + assert len(graph.child_to_parent) == 0 + + def test_empty_graph_install_package_false_all_deps_installed( + self, mock_specs: Dict[str, Spec], temporary_store: Store + ): + """Test empty graph when install_package=False and all dependencies are installed.""" + # Install all dependencies (but not the root) + for dep_name in ["dep1", "dep2", "dep3"]: + install_spec_in_db(mock_specs[dep_name], temporary_store) + + graph = BuildGraph( + specs=[mock_specs["root"]], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=False, # Don't install the root + install_deps=True, + database=temporary_store.db, + ) + + # Root is pruned because install_package=False + # Dependencies are pruned because they're installed + # Result: empty graph + assert len(graph.nodes) == 0 + assert len(graph.parent_to_child) == 0 + assert len(graph.child_to_parent) == 0 + + def test_pruning_leaf_node(self, mock_specs: Dict[str, Spec], temporary_store: Store): + """Test that pruning a leaf node (no children) works correctly. + + This ensures the pruning logic handles the boundary condition where + a node has no children to re-wire. + """ + # Install dep2, which is a leaf node (no children) + dep2 = mock_specs["dep2"] + install_spec_in_db(dep2, temporary_store) + + graph = BuildGraph( + specs=[mock_specs["root"]], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=True, + install_deps=True, + database=temporary_store.db, + ) + + dep2_hash = dep2.dag_hash() + dep1_hash = mock_specs["dep1"].dag_hash() + + # dep2 should be pruned + assert dep2_hash not in graph.nodes + # dep1 (parent of dep2) should have no children now + assert len(graph.parent_to_child[dep1_hash]) == 0 + # dep2 should not appear in any mappings + assert dep2_hash not in graph.parent_to_child + assert dep2_hash not in graph.child_to_parent + + def test_pruning_root_node_with_install_package_false( + self, mock_specs: Dict[str, Spec], temporary_store: Store + ): + """Test that pruning a root node (no parents in the context) works correctly. + + When install_package=False, root nodes are marked for pruning. This ensures + the pruning logic handles the boundary condition where a node has no parents. + """ + graph = BuildGraph( + specs=[mock_specs["dep1"]], + root_policy="auto", + dependencies_policy="auto", + include_build_deps=False, + install_package=False, # Prune the root + install_deps=True, + database=temporary_store.db, + ) + + dep1_hash = mock_specs["dep1"].dag_hash() + dep2_hash = mock_specs["dep2"].dag_hash() + + # dep1 should be pruned (it's the root and install_package=False) + assert dep1_hash not in graph.nodes + # dep2 (child of dep1) should still be in the graph + assert dep2_hash in graph.nodes + # dep2 should have no parents now (its only parent was pruned) + assert not graph.child_to_parent.get(dep2_hash) + # dep1 should not appear in any mappings + assert dep1_hash not in graph.parent_to_child + assert dep1_hash not in graph.child_to_parent diff --git a/lib/spack/spack/test/installer_tui.py b/lib/spack/spack/test/installer_tui.py new file mode 100644 index 00000000000000..55f39eb3ddac3d --- /dev/null +++ b/lib/spack/spack/test/installer_tui.py @@ -0,0 +1,1082 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +"""Tests for the BuildStatus terminal UI in new_installer.py""" + +import io +import os +import sys +from typing import List, Optional, Tuple + +import pytest + +if sys.platform == "win32": + pytest.skip("No Windows support", allow_module_level=True) + +import spack.new_installer as inst +from spack.new_installer import BuildStatus + + +class MockConnection: + """Mock multiprocessing.Connection for testing""" + + def fileno(self): + return -1 + + +class MockSpec: + """Minimal mock for spack.spec.Spec""" + + def __init__( + self, name: str, version: str = "1.0", external: bool = False, prefix: Optional[str] = None + ) -> None: + self.name = name + self.version = version + self.external = external + self.prefix = prefix or f"/fake/prefix/{name}" + self._hash = name # Simple hash based on name + + def dag_hash(self, length: Optional[int] = None) -> str: + if length: + return self._hash[:length] + return self._hash + + +class SimpleTextIOWrapper(io.TextIOWrapper): + """TextIOWrapper around a BytesIO buffer for testing of stdout behavior""" + + def __init__(self, tty: bool) -> None: + self._buffer = io.BytesIO() + self._tty = tty + super().__init__(self._buffer, encoding="utf-8", line_buffering=True) + + def isatty(self) -> bool: + return self._tty + + def getvalue(self) -> str: + self.flush() + return self._buffer.getvalue().decode("utf-8") + + def clear(self): + self.flush() + self._buffer.truncate(0) + self._buffer.seek(0) + + +def create_build_status( + is_tty: bool = True, terminal_cols: int = 80, terminal_rows: int = 24, total: int = 0 +) -> Tuple[BuildStatus, List[float], SimpleTextIOWrapper]: + """Helper function to create BuildStatus with mocked dependencies""" + fake_stdout = SimpleTextIOWrapper(tty=is_tty) + # Easy way to set the current time in tests before running UI updates + time_values = [0.0] + + def mock_get_time(): + return time_values[-1] + + def mock_get_terminal_size(): + return os.terminal_size((terminal_cols, terminal_rows)) + + status = BuildStatus( + total=total, + stdout=fake_stdout, + get_terminal_size=mock_get_terminal_size, + get_time=mock_get_time, + is_tty=is_tty, + ) + + return status, time_values, fake_stdout + + +def add_mock_builds(status: BuildStatus, count: int) -> List[MockSpec]: + """Helper function to add builds to a BuildStatus instance""" + specs = [MockSpec(f"pkg{i}", f"{i}.0") for i in range(count)] + for spec in specs: + status.add_build(spec, explicit=True, control_w_conn=MockConnection()) # type: ignore + return specs + + +class TestBasicStateManagement: + """Test basic state management operations""" + + def test_add_build(self): + """Test that add_build adds builds correctly""" + status, _, _ = create_build_status(total=2) + spec1 = MockSpec("pkg1", "1.0") + spec2 = MockSpec("pkg2", "2.0") + + status.add_build(spec1, explicit=True, control_w_conn=MockConnection()) + assert len(status.builds) == 1 + assert spec1.dag_hash() in status.builds + assert status.builds[spec1.dag_hash()].name == "pkg1" + assert status.builds[spec1.dag_hash()].explicit is True + assert status.dirty is True + + status.add_build(spec2, explicit=False, control_w_conn=MockConnection()) + assert len(status.builds) == 2 + assert spec2.dag_hash() in status.builds + assert status.builds[spec2.dag_hash()].explicit is False + + def test_update_state_transitions(self): + """Test that update_state transitions states properly""" + status, fake_time, _ = create_build_status() + (spec,) = add_mock_builds(status, 1) + build_id = spec.dag_hash() + + # Update to 'building' state + status.update_state(build_id, "building") + assert status.builds[build_id].state == "building" + assert status.builds[build_id].progress_percent is None + assert status.completed == 0 + + # Update to 'finished' state + status.update_state(build_id, "finished") + assert status.builds[build_id].state == "finished" + assert status.completed == 1 + assert status.builds[build_id].finished_time == fake_time[0] + inst.CLEANUP_TIMEOUT + + def test_update_state_failed(self): + """Test that failed state increments completed counter""" + status, fake_time, _ = create_build_status() + (spec,) = add_mock_builds(status, 1) + build_id = spec.dag_hash() + + status.update_state(build_id, "failed") + assert status.builds[build_id].state == "failed" + assert status.completed == 1 + assert status.builds[build_id].finished_time == fake_time[0] + inst.CLEANUP_TIMEOUT + + def test_update_progress(self): + """Test that update_progress updates percentages""" + status, _, _ = create_build_status() + (spec,) = add_mock_builds(status, 1) + build_id = spec.dag_hash() + + # Update progress + status.update_progress(build_id, 50, 100) + assert status.builds[build_id].progress_percent == 50 + assert status.dirty is True + + # Same percentage shouldn't mark dirty again + status.dirty = False + status.update_progress(build_id, 50, 100) + assert status.dirty is False + + # Different percentage should mark dirty + status.update_progress(build_id, 75, 100) + assert status.builds[build_id].progress_percent == 75 + assert status.dirty is True + + def test_completion_counter(self): + """Test that completion counter increments correctly""" + status, _, _ = create_build_status(total=3) + specs = add_mock_builds(status, 3) + + assert status.completed == 0 + + status.update_state(specs[0].dag_hash(), "finished") + assert status.completed == 1 + + status.update_state(specs[1].dag_hash(), "failed") + assert status.completed == 2 + + status.update_state(specs[2].dag_hash(), "finished") + assert status.completed == 3 + + +class TestOutputRendering: + """Test output rendering for TTY and non-TTY modes""" + + def test_non_tty_output(self): + """Test that non-TTY mode prints simple state changes""" + status, _, fake_stdout = create_build_status(is_tty=False) + spec = MockSpec("mypackage", "1.0") + + status.add_build(spec, explicit=True, control_w_conn=MockConnection()) + build_id = spec.dag_hash() + + status.update_state(build_id, "finished") + + output = fake_stdout.getvalue() + assert "mypackage" in output + assert "1.0" in output + assert "finished" in output + # Non-TTY output should not contain ANSI escape codes + assert "\033[" not in output + + def test_tty_output_contains_ansi(self): + """Test that TTY mode produces ANSI codes""" + status, _, fake_stdout = create_build_status() + add_mock_builds(status, 1) + + # Call update to render + status.update() + + output = fake_stdout.getvalue() + # Should contain ANSI escape sequences + assert "\033[" in output + # Should contain progress header + assert "Progress:" in output + + def test_no_output_when_not_dirty(self): + """Test that update() skips rendering when not dirty""" + status, _, fake_stdout = create_build_status() + add_mock_builds(status, 1) + status.update() + + # Clear stdout and mark not dirty + fake_stdout.clear() + status.dirty = False + + # Update should not produce output + status.update() + assert fake_stdout.getvalue() == "" + + def test_update_throttling(self): + """Test that update() throttles redraws""" + status, fake_time, fake_stdout = create_build_status() + add_mock_builds(status, 1) + + # First update at time 0 + fake_time[0] = 0.0 + status.update() + first_output = fake_stdout.getvalue() + assert first_output != "" + + # Mark dirty and try to update immediately + fake_stdout.clear() + status.dirty = True + fake_time[0] = 0.01 # Very small time advance + + # Should be throttled (next_update not reached) + status.update() + assert fake_stdout.getvalue() == "" + + # Advance time past throttle and try again + fake_time[0] = 1.0 + status.update() + assert fake_stdout.getvalue() != "" + + def test_cursor_movement_vs_newlines(self): + """Test that finished builds get newlines, active builds get cursor movements""" + status, fake_time, fake_stdout = create_build_status(total=5) + specs = add_mock_builds(status, 3) + + # First update renders 3 active builds + fake_time[0] = 0.0 + status.update() + output1 = fake_stdout.getvalue() + + # Count newlines (\n) and cursor movements (\033[1E = move down 1 line) + newlines1 = output1.count("\n") + cursor_moves1 = output1.count("\033[1E") + + # Initially all lines should be newlines (nothing in history yet) + assert newlines1 > 0 + assert cursor_moves1 == 0 + + # Now finish 2 builds and add 2 more + fake_stdout.clear() + fake_time[0] = inst.CLEANUP_TIMEOUT + 0.1 + status.update_state(specs[0].dag_hash(), "finished") + status.update_state(specs[1].dag_hash(), "finished") + + spec4 = MockSpec("pkg3", "3.0") + spec5 = MockSpec("pkg4", "4.0") + status.add_build(spec4, explicit=True, control_w_conn=MockConnection()) + status.add_build(spec5, explicit=True, control_w_conn=MockConnection()) + + # Second update: finished builds persist (newlines), active area updates (cursor moves) + status.update() + output2 = fake_stdout.getvalue() + + newlines2 = output2.count("\n") + cursor_moves2 = output2.count("\033[1E") + + # Should have newlines for the 2 finished builds persisted to history + # and cursor movements for the active area (header + 3 active builds) + assert newlines2 > 0, "Should have newlines for finished builds" + assert cursor_moves2 > 0, "Should have cursor movements for active area" + + # Finished builds should be printed with newlines + assert "pkg0" in output2 + assert "pkg1" in output2 + + +class TestTimeBasedBehavior: + """Test time-based behaviors like spinner and cleanup""" + + def test_spinner_updates(self): + """Test that spinner advances over time""" + status, fake_time, _ = create_build_status() + add_mock_builds(status, 1) + + # Initial spinner index + initial_index = status.spinner_index + + # Advance time past spinner interval + fake_time[0] = inst.SPINNER_INTERVAL + 0.01 + status.update() + + # Spinner should have advanced + assert status.spinner_index == (initial_index + 1) % len(status.spinner_chars) + + def test_finished_package_cleanup(self): + """Test that finished packages are cleaned up after timeout""" + status, fake_time, _ = create_build_status() + (spec,) = add_mock_builds(status, 1) + build_id = spec.dag_hash() + + # Mark as finished + fake_time[0] = 0.0 + status.update_state(build_id, "finished") + + # Build should still be in active builds + assert build_id in status.builds + assert len(status.finished_builds) == 0 + + # Advance time past cleanup timeout + fake_time[0] = inst.CLEANUP_TIMEOUT + 0.01 + status.update() + + # Build should now be moved to finished_builds and removed from active + assert build_id not in status.builds + # Note: finished_builds is cleared after rendering, so check it happened via side effects + assert status.dirty or build_id not in status.builds + + def test_failed_packages_not_cleaned_up(self): + """Test that failed packages stay in active builds""" + status, fake_time, _ = create_build_status() + (spec,) = add_mock_builds(status, 1) + build_id = spec.dag_hash() + + # Mark as failed + fake_time[0] = 0.0 + status.update_state(build_id, "failed") + + # Advance time past cleanup timeout + fake_time[0] = inst.CLEANUP_TIMEOUT + 0.01 + status.update() + + # Failed build should remain in active builds + assert build_id in status.builds + + +class TestSearchAndFilter: + """Test search mode and filtering""" + + def test_enter_search_mode(self): + """Test that enter_search enables search mode""" + status, _, _ = create_build_status() + assert status.search_mode is False + + status.enter_search() + assert status.search_mode is True + assert status.dirty is True + + def test_search_input_printable(self): + """Test that printable characters are added to search term""" + status, _, _ = create_build_status() + status.enter_search() + + status.search_input("a") + assert status.search_term == "a" + + status.search_input("b") + assert status.search_term == "ab" + + status.search_input("c") + assert status.search_term == "abc" + + def test_search_input_backspace(self): + """Test that backspace removes characters""" + status, _, _ = create_build_status() + status.enter_search() + + status.search_input("a") + status.search_input("b") + status.search_input("c") + assert status.search_term == "abc" + + status.search_input("\x7f") # Backspace + assert status.search_term == "ab" + + status.search_input("\b") # Alternative backspace + assert status.search_term == "a" + + def test_search_input_escape(self): + """Test that escape exits search mode""" + status, _, _ = create_build_status() + status.enter_search() + status.search_input("test") + + status.search_input("\x1b") # Escape + assert status.search_mode is False + assert status.search_term == "" + + def test_is_displayed_filters_by_name(self): + """Test that _is_displayed filters by package name""" + status, _, _ = create_build_status(total=3) + + spec1 = MockSpec("package-foo", "1.0") + spec2 = MockSpec("package-bar", "1.0") + spec3 = MockSpec("other", "1.0") + + status.add_build(spec1, explicit=True, control_w_conn=MockConnection()) + status.add_build(spec2, explicit=True, control_w_conn=MockConnection()) + status.add_build(spec3, explicit=True, control_w_conn=MockConnection()) + + build1 = status.builds[spec1.dag_hash()] + build2 = status.builds[spec2.dag_hash()] + build3 = status.builds[spec3.dag_hash()] + + # No search term: all displayed + status.search_term = "" + assert status._is_displayed(build1) + assert status._is_displayed(build2) + assert status._is_displayed(build3) + + # Search for "package" + status.search_term = "package" + assert status._is_displayed(build1) + assert status._is_displayed(build2) + assert not status._is_displayed(build3) + + # Search for "foo" + status.search_term = "foo" + assert status._is_displayed(build1) + assert not status._is_displayed(build2) + assert not status._is_displayed(build3) + + def test_is_displayed_filters_by_hash(self): + """Test that _is_displayed filters by hash prefix""" + status, _, _ = create_build_status(total=2) + + spec1 = MockSpec("pkg1", "1.0") + spec1._hash = "abc123" + spec2 = MockSpec("pkg2", "1.0") + spec2._hash = "def456" + + status.add_build(spec1, explicit=True, control_w_conn=MockConnection()) + status.add_build(spec2, explicit=True, control_w_conn=MockConnection()) + + build1 = status.builds[spec1.dag_hash()] + build2 = status.builds[spec2.dag_hash()] + + # Search by hash prefix + status.search_term = "abc" + assert status._is_displayed(build1) + assert not status._is_displayed(build2) + + status.search_term = "def" + assert not status._is_displayed(build1) + assert status._is_displayed(build2) + + +class TestNavigation: + """Test navigation between builds""" + + def test_get_next_basic(self): + """Test basic next/previous navigation""" + status, _, _ = create_build_status(total=3) + specs = add_mock_builds(status, 3) + + # Get first build + first_id = status._get_next(1) + assert first_id == specs[0].dag_hash() + + # Set tracked and get next + status.tracked_build_id = first_id + next_id = status._get_next(1) + assert next_id == specs[1].dag_hash() + + # Get next again + status.tracked_build_id = next_id + next_id = status._get_next(1) + assert next_id == specs[2].dag_hash() + + # Wrap around + status.tracked_build_id = next_id + next_id = status._get_next(1) + assert next_id == specs[0].dag_hash() + + def test_get_next_previous(self): + """Test backward navigation""" + status, _, _ = create_build_status(total=3) + specs = add_mock_builds(status, 3) + + # Start at second build + status.tracked_build_id = specs[1].dag_hash() + + # Go backward + prev_id = status._get_next(-1) + assert prev_id == specs[0].dag_hash() + + # Go backward again (wrap around) + status.tracked_build_id = prev_id + prev_id = status._get_next(-1) + assert prev_id == specs[2].dag_hash() + + def test_get_next_with_filter(self): + """Test navigation respects search filter""" + status, _, _ = create_build_status(total=4) + + specs = [ + MockSpec("package-a", "1.0"), + MockSpec("package-b", "1.0"), + MockSpec("other-c", "1.0"), + MockSpec("package-d", "1.0"), + ] + for spec in specs: + status.add_build(spec, explicit=True, control_w_conn=MockConnection()) + + # Filter to only "package-*" + status.search_term = "package" + + # Should only navigate through matching builds + first_id = status._get_next(1) + assert first_id and first_id == specs[0].dag_hash() + + status.tracked_build_id = first_id + next_id = status._get_next(1) + assert next_id and next_id == specs[1].dag_hash() + + status.tracked_build_id = next_id + next_id = status._get_next(1) + # Should skip "other-c" and go to "package-d" + assert next_id and next_id == specs[3].dag_hash() + + def test_get_next_skips_finished(self): + """Test that navigation skips finished builds""" + status, _, _ = create_build_status(total=3) + specs = add_mock_builds(status, 3) + + # Mark middle build as finished + status.update_state(specs[1].dag_hash(), "finished") + + # Navigate from first + status.tracked_build_id = specs[0].dag_hash() + next_id = status._get_next(1) + # Should skip finished build and go to third + assert next_id == specs[2].dag_hash() + + def test_get_next_no_matching(self): + """Test that _get_next returns None when no builds match""" + status, _, _ = create_build_status(total=2) + specs = add_mock_builds(status, 2) + + # Mark both as finished + for spec in specs: + status.update_state(spec.dag_hash(), "finished") + + # Should return None since no unfinished builds + result = status._get_next(1) + assert result is None + + def test_get_next_fallback_when_tracked_filtered_out(self): + """Test that _get_next falls back correctly when tracked build no longer matches filter""" + status, _, _ = create_build_status(total=3) + + specs = [ + MockSpec("package-a", "1.0"), + MockSpec("package-b", "1.0"), + MockSpec("other-c", "1.0"), + ] + for spec in specs: + status.add_build(spec, explicit=True, control_w_conn=MockConnection()) + + # Start tracking "other-c" + status.tracked_build_id = specs[2].dag_hash() + + # Now apply a filter that excludes the tracked build + status.search_term = "package" + + # _get_next should fall back to first matching build (forward) + next_id = status._get_next(1) + assert next_id == specs[0].dag_hash() + + # Test backward direction, should fall back to last matching build + status.tracked_build_id = specs[2].dag_hash() # Reset to filtered-out build + prev_id = status._get_next(-1) + assert prev_id == specs[1].dag_hash() + + +class TestTerminalSizes: + """Test behavior with different terminal sizes""" + + def test_small_terminal_truncation(self): + """Test that output is truncated for small terminals""" + status, _, fake_stdout = create_build_status(total=10, terminal_cols=80, terminal_rows=10) + + # Add more builds than can fit on screen + add_mock_builds(status, 10) + + status.update() + output = fake_stdout.getvalue() + + # Should contain "more..." message indicating truncation + assert "more..." in output + + def test_large_terminal_no_truncation(self): + """Test that all builds shown on large terminal""" + status, _, fake_stdout = create_build_status(total=3, terminal_cols=120) + add_mock_builds(status, 3) + + status.update() + output = fake_stdout.getvalue() + + # Should not contain truncation message + assert "more..." not in output + # Should contain all package names + for i in range(3): + assert f"pkg{i}" in output + + def test_narrow_terminal_short_header(self): + """Test that narrow terminals get shortened header""" + status, _, fake_stdout = create_build_status(total=1, terminal_cols=40) + add_mock_builds(status, 1) + + status.update() + output = fake_stdout.getvalue() + + # Should not contain the full header with hints + assert "filter" not in output + # But should contain progress + assert "Progress:" in output + + +class TestBuildInfo: + """Test the BuildInfo dataclass""" + + def test_build_info_creation(self): + """Test that BuildInfo is created correctly""" + spec = MockSpec("mypackage", "1.0") + + build_info = inst.BuildInfo(spec, explicit=True, control_w_conn=MockConnection()) + + assert build_info.name == "mypackage" + assert build_info.version == "1.0" + assert build_info.explicit is True + assert build_info.external is False + assert build_info.state == "starting" + assert build_info.finished_time is None + assert build_info.progress_percent is None + + def test_build_info_external_package(self): + """Test BuildInfo for external package""" + spec = MockSpec("external-pkg", "1.0", external=True) + + build_info = inst.BuildInfo(spec, explicit=False, control_w_conn=MockConnection()) + + assert build_info.external is True + + +class TestLogFollowing: + """Test log following and print_logs functionality""" + + def test_print_logs_when_following(self): + """Test that logs are printed when following a specific build""" + status, _, fake_stdout = create_build_status() + (spec,) = add_mock_builds(status, 1) + build_id = spec.dag_hash() + + # Switch to log-following mode + status.overview_mode = False + status.tracked_build_id = build_id + + # Send some log data + log_data = b"Building package...\nRunning tests...\n" + status.print_logs(build_id, log_data) + + # Check that logs were echoed to stdout + assert fake_stdout._buffer.getvalue() == log_data + + def test_print_logs_discarded_when_in_overview_mode(self): + """Test that logs are discarded when in overview mode""" + status, _, fake_stdout = create_build_status() + (spec,) = add_mock_builds(status, 1) + build_id = spec.dag_hash() + + # Stay in overview mode + assert status.overview_mode is True + + # Try to print logs + log_data = b"Should not be printed\n" + status.print_logs(build_id, log_data) + + # Nothing should be printed + assert fake_stdout.getvalue() == "" + + def test_print_logs_discarded_when_not_tracked(self): + """Test that logs from non-tracked builds are discarded""" + status, _, fake_stdout = create_build_status(total=2) + spec1, spec2 = add_mock_builds(status, 2) + + # Switch to log-following mode for spec1 + status.overview_mode = False + status.tracked_build_id = spec1.dag_hash() + + # Try to print logs from spec2 (not tracked) + log_data = b"Logs from pkg2\n" + status.print_logs(spec2.dag_hash(), log_data) + + # Nothing should be printed since we're tracking pkg1, not pkg2 + assert fake_stdout.getvalue() == "" + + def test_cannot_follow_failed_build(self): + """Test that navigation skips failed builds""" + status, _, _ = create_build_status(total=3) + specs = add_mock_builds(status, 3) + + # Mark the middle build as failed + status.update_state(specs[1].dag_hash(), "failed") + + # The failed build should have finished_time set + assert status.builds[specs[1].dag_hash()].finished_time is not None + + # Try to get next build, should skip the failed one + status.tracked_build_id = specs[0].dag_hash() + next_id = status._get_next(1) + + # Should skip pkg1 (failed) and return pkg2 + assert next_id == specs[2].dag_hash() + + +class TestNavigationIntegration: + """Test the next() method and navigation between builds""" + + def test_next_switches_from_overview_to_logs(self): + """Test that next() switches from overview mode to log-following mode""" + status, _, fake_stdout = create_build_status(total=2) + specs = add_mock_builds(status, 2) + + # Start in overview mode + assert status.overview_mode is True + assert status.tracked_build_id == "" + + # Call next() to start following first build + status.next() + + # Should have switched to log-following mode + assert status.overview_mode is False + assert status.tracked_build_id == specs[0].dag_hash() + + # Should have printed "Following logs" message + output = fake_stdout.getvalue() + assert "Following logs of" in output + assert "pkg0" in output + + def test_next_cycles_through_builds(self): + """Test that next() cycles through multiple builds""" + status, _, fake_stdout = create_build_status(total=3) + specs = add_mock_builds(status, 3) + + # Start following first build + status.next() + assert status.tracked_build_id == specs[0].dag_hash() + + fake_stdout.clear() + + # Navigate to next + status.next(1) + assert status.tracked_build_id == specs[1].dag_hash() + assert "pkg1" in fake_stdout.getvalue() + + fake_stdout.clear() + + # Navigate to next (third build) + status.next(1) + assert status.tracked_build_id == specs[2].dag_hash() + assert "pkg2" in fake_stdout.getvalue() + + fake_stdout.clear() + + # Navigate to next (should wrap to first) + status.next(1) + assert status.tracked_build_id == specs[0].dag_hash() + assert "pkg0" in fake_stdout.getvalue() + + def test_next_backward_navigation(self): + """Test that next(-1) navigates backward""" + status, _, _ = create_build_status(total=3) + specs = add_mock_builds(status, 3) + + # Start at first build + status.next() + assert status.tracked_build_id == specs[0].dag_hash() + + # Go backward (should wrap to last) + status.next(-1) + assert status.tracked_build_id == specs[2].dag_hash() + + # Go backward again + status.next(-1) + assert status.tracked_build_id == specs[1].dag_hash() + + def test_next_does_nothing_when_no_builds(self): + """Test that next() does nothing when no unfinished builds exist""" + status, _, _ = create_build_status(total=1) + (spec,) = add_mock_builds(status, 1) + + # Mark as finished + status.update_state(spec.dag_hash(), "finished") + + # Try to navigate + initial_mode = status.overview_mode + initial_tracked = status.tracked_build_id + + status.next() + + # Nothing should change + assert status.overview_mode == initial_mode + assert status.tracked_build_id == initial_tracked + + def test_next_does_nothing_when_same_build(self): + """Test that next() doesn't re-print when already on the same build""" + status, _, fake_stdout = create_build_status(total=1) + (spec,) = add_mock_builds(status, 1) + + # Start following + status.next() + assert status.tracked_build_id == spec.dag_hash() + + # Clear output + fake_stdout.clear() + + # Try to navigate to "next" (which is the same build) + status.next() + + # Should not print anything + assert fake_stdout.getvalue() == "" + + +class TestToggle: + """Test toggle() method for switching between overview and log-following modes""" + + def test_toggle_from_overview_calls_next(self): + """Test that toggle() from overview mode calls next()""" + status, _, fake_stdout = create_build_status(total=2) + add_mock_builds(status, 2) + + # Start in overview mode + assert status.overview_mode is True + + # Toggle should call next() + status.toggle() + + # Should now be following logs + assert status.overview_mode is False + assert status.tracked_build_id != "" + assert "Following logs of" in fake_stdout.getvalue() + + def test_toggle_from_logs_returns_to_overview(self): + """Test that toggle() from log-following mode returns to overview""" + status, _, _ = create_build_status(total=2) + add_mock_builds(status, 2) + + # Switch to log-following mode first + status.next() + assert status.overview_mode is False + tracked_id = status.tracked_build_id + assert tracked_id != "" + + # Set some search state to verify cleanup + status.search_term = "test" + status.search_mode = True + status.active_area_rows = 5 + + # Toggle back to overview + status.toggle() + + # Should be back in overview mode with cleaned state + assert status.overview_mode is True + assert status.tracked_build_id == "" + assert status.search_term == "" + assert status.search_mode is False + assert status.active_area_rows == 0 + assert status.dirty is True + + def test_update_state_finished_triggers_toggle_when_tracking(self): + """Test that finishing a tracked build triggers toggle back to overview""" + status, _, _ = create_build_status(total=2) + specs = add_mock_builds(status, 2) + + # Start tracking first build + status.next() + assert status.overview_mode is False + assert status.tracked_build_id == specs[0].dag_hash() + + # Mark the tracked build as finished + status.update_state(specs[0].dag_hash(), "finished") + + # Should have toggled back to overview mode + assert status.overview_mode is True + assert status.tracked_build_id == "" + + +class TestSearchFilteringIntegration: + """Test search mode with display filtering""" + + def test_search_mode_filters_displayed_builds(self): + """Test that search mode actually filters what's displayed""" + status, _, fake_stdout = create_build_status(total=4) + + specs = [ + MockSpec("package-foo", "1.0"), + MockSpec("package-bar", "2.0"), + MockSpec("other-thing", "3.0"), + MockSpec("package-baz", "4.0"), + ] + for spec in specs: + status.add_build(spec, explicit=True, control_w_conn=MockConnection()) + + # Enter search mode and search for "package" + status.enter_search() + assert status.search_mode is True + + for character in "package": + status.search_input(character) + + assert status.search_term == "package" + + # Update to render + status.update() + output = fake_stdout.getvalue() + + # Should contain filtered builds + assert "package-foo" in output + assert "package-bar" in output + assert "package-baz" in output + # Should not contain the filtered-out build + assert "other-thing" not in output + + # Should show filter prompt + assert "filter>" in output + assert status.search_term in output + + def test_search_mode_with_navigation(self): + """Test that navigation respects search filter""" + status, _, _ = create_build_status(total=4) + + specs = [ + MockSpec("package-a", "1.0"), + MockSpec("other-b", "2.0"), + MockSpec("package-c", "3.0"), + MockSpec("other-d", "4.0"), + ] + for spec in specs: + status.add_build(spec, explicit=True, control_w_conn=MockConnection()) + + # Set search term to filter for "package" + status.search_term = "package" + + # Start navigating, should only go through "package-a" and "package-c" + status.next() + assert status.tracked_build_id == specs[0].dag_hash() # package-a + + status.next(1) + # Should skip other-b and go to package-c + assert status.tracked_build_id == specs[2].dag_hash() # package-c + + status.next(1) + # Should wrap around to package-a + assert status.tracked_build_id == specs[0].dag_hash() # package-a + + def test_search_input_enter_navigates_to_next(self): + """Test that pressing enter in search mode navigates to next match""" + status, _, _ = create_build_status(total=3) + specs = add_mock_builds(status, 3) + + # Enter search mode + status.enter_search() + for character in "pkg": + status.search_input(character) + + # Press enter (should navigate to first match) + status.search_input("\r") + + # Should have started following first matching build + assert status.overview_mode is False + assert status.tracked_build_id == specs[0].dag_hash() + + def test_clearing_search_shows_all_builds(self): + """Test that clearing search term shows all builds again""" + status, _, fake_stdout = create_build_status(total=3) + + specs = [ + MockSpec("package-a", "1.0"), + MockSpec("other-b", "2.0"), + MockSpec("package-c", "3.0"), + ] + for spec in specs: + status.add_build(spec, explicit=True, control_w_conn=MockConnection()) + + # Enter search and type something + status.enter_search() + status.search_input("p") + status.search_input("a") + status.search_input("c") + assert status.search_term == "pac" + + # Clear it with backspace + status.search_input("\x7f") # backspace + status.search_input("\x7f") # backspace + status.search_input("\x7f") # backspace + assert status.search_term == "" + + # Update to render + status.update() + output = fake_stdout.getvalue() + + # All builds should be visible now + assert "package-a" in output + assert "other-b" in output + assert "package-c" in output + + +class TestEdgeCases: + """Test edge cases and error conditions""" + + def test_empty_build_list(self): + """Test update with no builds""" + status, _, fake_stdout = create_build_status(total=0) + + status.update() + output = fake_stdout.getvalue() + + # Should render header but no builds + assert "Progress:" in output + assert "0/0" in output + + def test_all_builds_finished(self): + """Test when all builds are finished""" + status, fake_time, _ = create_build_status(total=2) + specs = add_mock_builds(status, 2) + + # Mark all as finished + for spec in specs: + status.update_state(spec.dag_hash(), "finished") + + # Advance time and update + fake_time[0] = inst.CLEANUP_TIMEOUT + 0.01 + status.update() + + # All should be cleaned up + assert len(status.builds) == 0 + assert status.completed == 2 + + def test_update_progress_rounds_correctly(self): + """Test that progress percentage rounding works""" + status, _, _ = create_build_status() + (spec,) = add_mock_builds(status, 1) + build_id = spec.dag_hash() + + # Test rounding + status.update_progress(build_id, 1, 3) + assert status.builds[build_id].progress_percent == 33 # int(100/3) + + status.update_progress(build_id, 2, 3) + assert status.builds[build_id].progress_percent == 66 # int(200/3) + + status.update_progress(build_id, 3, 3) + assert status.builds[build_id].progress_percent == 100 diff --git a/lib/spack/spack/test/jobserver.py b/lib/spack/spack/test/jobserver.py new file mode 100644 index 00000000000000..9dff1066ec998c --- /dev/null +++ b/lib/spack/spack/test/jobserver.py @@ -0,0 +1,277 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +import sys + +import pytest + +if sys.platform == "win32": + pytest.skip("Jobserver tests are not supported on Windows", allow_module_level=True) + +import fcntl +import os +import pathlib +import stat + +from spack.new_installer import ( + JobServer, + create_jobserver_fifo, + get_jobserver_config, + open_existing_jobserver_fifo, +) +from spack.spec import Spec + + +class TestGetJobserverConfig: + """Test parsing of MAKEFLAGS for jobserver configuration.""" + + def test_empty_makeflags(self): + """Empty MAKEFLAGS should return None.""" + assert get_jobserver_config("") is None + + def test_no_jobserver_flag(self): + """MAKEFLAGS without jobserver flag should return None.""" + assert get_jobserver_config(" -j4 --silent") is None + + def test_fifo_format_new(self): + """Parse new FIFO format""" + assert get_jobserver_config(" -j4 --jobserver-auth=fifo:/tmp/my_fifo") == "/tmp/my_fifo" + + def test_pipe_format_new(self): + """Parse new pipe format""" + assert get_jobserver_config(" -j4 --jobserver-auth=3,4") == (3, 4) + + def test_pipe_format_old(self): + """Parse old pipe format (on old versions of gmake this was not publicized)""" + assert get_jobserver_config(" -j4 --jobserver-fds=5,6") == (5, 6) + + def test_multiple_flags_last_wins(self): + """When multiple jobserver flags exist, last one wins.""" + makeflags = " --jobserver-fds=3,4 --jobserver-auth=fifo:/tmp/fifo --jobserver-auth=7,8" + assert get_jobserver_config(makeflags) == (7, 8) + + def test_invalid_format(self): + assert get_jobserver_config(" --jobserver-auth=3") is None + assert get_jobserver_config(" --jobserver-auth=a,b") is None + assert get_jobserver_config(" --jobserver-auth=3,b") is None + assert get_jobserver_config(" --jobserver-auth=3,4,5") is None + assert get_jobserver_config(" --jobserver-auth=") is None + + +class TestCreateJobserverFifo: + """Test FIFO creation for jobserver.""" + + def test_creates_fifo(self): + """Should create a FIFO with correct properties.""" + r, w, path = create_jobserver_fifo(4) + try: + assert os.path.exists(path) + assert stat.S_ISFIFO(os.stat(path).st_mode) + assert (os.stat(path).st_mode & 0o777) == 0o600 + assert fcntl.fcntl(r, fcntl.F_GETFD) != -1 + assert fcntl.fcntl(w, fcntl.F_GETFD) != -1 + assert fcntl.fcntl(r, fcntl.F_GETFL) & os.O_NONBLOCK + finally: + os.close(r) + os.close(w) + os.unlink(path) + os.rmdir(os.path.dirname(path)) + + def test_writes_correct_tokens(self): + """Should write num_jobs - 1 tokens.""" + r, w, path = create_jobserver_fifo(5) + try: + assert os.read(r, 10) == b"++++" # 4 tokens for 5 jobs + finally: + os.close(r) + os.close(w) + os.unlink(path) + os.rmdir(os.path.dirname(path)) + + def test_single_job_no_tokens(self): + """Single job should write 0 tokens.""" + r, w, path = create_jobserver_fifo(1) + try: + with pytest.raises(BlockingIOError): + os.read(r, 10) # No tokens for 1 job + finally: + os.close(r) + os.close(w) + os.unlink(path) + os.rmdir(os.path.dirname(path)) + + +class TestOpenExistingJobserverFifo: + """Test opening existing jobserver FIFOs.""" + + def test_opens_existing_fifo(self, tmp_path: pathlib.Path): + """Should successfully open an existing FIFO.""" + fifo_path = str(tmp_path / "test_fifo") + os.mkfifo(fifo_path, 0o600) + + result = open_existing_jobserver_fifo(fifo_path) + assert result is not None + + r, w = result + assert fcntl.fcntl(r, fcntl.F_GETFD) != -1 + assert fcntl.fcntl(w, fcntl.F_GETFD) != -1 + assert fcntl.fcntl(r, fcntl.F_GETFL) & os.O_NONBLOCK + + os.close(r) + os.close(w) + + def test_returns_none_for_missing_fifo(self, tmp_path: pathlib.Path): + """Should return None if FIFO doesn't exist.""" + result = open_existing_jobserver_fifo(str(tmp_path / "nonexistent_fifo")) + assert result is None + + +class TestJobServer: + """Test JobServer class functionality.""" + + def test_creates_new_jobserver(self): + """Should create a new FIFO-based jobserver when none exists.""" + js = JobServer(4) + + try: + assert js.created is True + assert js.fifo_path is not None + assert os.path.exists(js.fifo_path) + assert js.tokens_acquired == 0 + assert fcntl.fcntl(js.r, fcntl.F_GETFD) != -1 + assert fcntl.fcntl(js.w, fcntl.F_GETFD) != -1 + finally: + js.close() + + def test_attaches_to_existing_fifo(self): + """Should attach to existing FIFO jobserver from environment.""" + js1 = JobServer(4) + assert js1.fifo_path + + try: + fifo_config = get_jobserver_config(f" -j4 --jobserver-auth=fifo:{js1.fifo_path}") + assert fifo_config == js1.fifo_path + + result = open_existing_jobserver_fifo(js1.fifo_path) + assert result is not None + + r, w = result + os.close(r) + os.close(w) + + finally: + js1.close() + + def test_acquire_tokens(self): + """Should acquire tokens from jobserver.""" + js = JobServer(5) + + try: + assert js.acquire(2) == 2 + assert js.tokens_acquired == 2 + + assert js.acquire(2) == 2 + assert js.tokens_acquired == 4 + + assert js.acquire(2) == 0 + assert js.tokens_acquired == 4 + + finally: + js.close() + + def test_release_tokens(self): + """Should release tokens back to jobserver.""" + js = JobServer(5) + + try: + assert js.acquire(2) == 2 + assert js.tokens_acquired == 2 + + js.release() + assert js.tokens_acquired == 1 + + assert js.acquire(1) == 1 + assert js.tokens_acquired == 2 + + finally: + js.close() + + def test_release_without_tokens_is_noop(self): + """Releasing without acquired tokens should be a no-op.""" + js = JobServer(4) + + try: + assert js.tokens_acquired == 0 + js.release() + assert js.tokens_acquired == 0 + finally: + js.close() + + def test_makeflags_fifo_gmake_44(self): + """Should return FIFO format for gmake >= 4.4.""" + js = JobServer(8) + + try: + flags = js.makeflags(Spec("gmake@=4.4")) + assert flags == f" -j8 --jobserver-auth=fifo:{js.fifo_path}" + finally: + js.close() + + def test_makeflags_pipe_gmake_40(self): + """Should return pipe format for gmake 4.0-4.3.""" + js = JobServer(8) + + try: + flags = js.makeflags(Spec("gmake@=4.0")) + assert flags == f" -j8 --jobserver-auth={js.r},{js.w}" + finally: + js.close() + + def test_makeflags_old_format_gmake_3(self): + """Should return old --jobserver-fds format for gmake < 4.0.""" + js = JobServer(8) + + try: + flags = js.makeflags(Spec("gmake@=3.9")) + assert flags == f" -j8 --jobserver-fds={js.r},{js.w}" + finally: + js.close() + + def test_makeflags_no_gmake(self): + """Should return FIFO format when no gmake (modern default).""" + js = JobServer(6) + + try: + flags = js.makeflags(None) + assert flags == f" -j6 --jobserver-auth=fifo:{js.fifo_path}" + finally: + js.close() + + def test_close_removes_created_fifo(self): + """Should remove FIFO and directory if created by this instance.""" + js = JobServer(4) + fifo_path = js.fifo_path + assert fifo_path and os.path.exists(fifo_path) + js.close() + assert not os.path.exists(os.path.dirname(fifo_path)) + + def test_file_descriptors_are_inheritable(self): + """Should set file descriptors as inheritable for child processes.""" + js = JobServer(4) + + try: + assert os.get_inheritable(js.r) + assert os.get_inheritable(js.w) + finally: + js.close() + + def test_connection_objects_exist(self): + """Should create Connection objects for fd inheritance.""" + js = JobServer(4) + + try: + assert js.r_conn is not None and js.r_conn.fileno() == js.r + assert js.w_conn is not None and js.w_conn.fileno() == js.w + finally: + js.close() diff --git a/lib/spack/spack/test/llnl/util/lang.py b/lib/spack/spack/test/llnl/util/lang.py index c7967c21cb6a12..7effe85ebd0b83 100644 --- a/lib/spack/spack/test/llnl/util/lang.py +++ b/lib/spack/spack/test/llnl/util/lang.py @@ -11,7 +11,7 @@ import pytest import spack.llnl.util.lang -from spack.llnl.util.lang import dedupe, match_predicate, memoized, pretty_date, stable_args +from spack.llnl.util.lang import dedupe, match_predicate, memoized, pretty_date @pytest.fixture() @@ -223,28 +223,6 @@ def _cmp_key(self): assert hash(b) == hash(b2) -@pytest.mark.parametrize( - "args1,kwargs1,args2,kwargs2", - [ - # Ensure tuples passed in args are disambiguated from equivalent kwarg items. - (("a", 3), {}, (), {"a": 3}) - ], -) -def test_unequal_args(args1, kwargs1, args2, kwargs2): - assert stable_args(*args1, **kwargs1) != stable_args(*args2, **kwargs2) - - -@pytest.mark.parametrize( - "args1,kwargs1,args2,kwargs2", - [ - # Ensure that kwargs are stably sorted. - ((), {"a": 3, "b": 4}, (), {"b": 4, "a": 3}) - ], -) -def test_equal_args(args1, kwargs1, args2, kwargs2): - assert stable_args(*args1, **kwargs1) == stable_args(*args2, **kwargs2) - - @pytest.mark.parametrize("args, kwargs", [((1,), {}), ((), {"a": 3}), ((1,), {"a": 3})]) def test_memoized(args, kwargs): @memoized @@ -252,9 +230,8 @@ def f(*args, **kwargs): return "return-value" assert f(*args, **kwargs) == "return-value" - key = stable_args(*args, **kwargs) - assert list(f.cache.keys()) == [key] - assert f.cache[key] == "return-value" + assert f(*args, **kwargs) == "return-value" + assert f.cache_info().hits == 1 @pytest.mark.parametrize("args, kwargs", [(([1],), {}), ((), {"a": [1]})]) @@ -265,12 +242,8 @@ def test_memoized_unhashable(args, kwargs): def f(*args, **kwargs): return None - with pytest.raises(spack.llnl.util.lang.UnhashableArguments) as exc_info: + with pytest.raises(TypeError, match="unhashable type:"): f(*args, **kwargs) - exc_msg = str(exc_info.value) - key = stable_args(*args, **kwargs) - assert str(key) in exc_msg - assert "function 'f'" in exc_msg def test_dedupe(): diff --git a/lib/spack/spack/test/llnl/util/lock.py b/lib/spack/spack/test/llnl/util/lock.py index 21adef5c35ddcf..26262132100e26 100644 --- a/lib/spack/spack/test/llnl/util/lock.py +++ b/lib/spack/spack/test/llnl/util/lock.py @@ -663,6 +663,7 @@ def test_upgrade_read_to_write(private_lock_path): assert lock._file is None +@pytest.mark.skipif(getuid() == 0, reason="user is root") def test_upgrade_read_to_write_fails_with_readonly_file(private_lock_path): """Test that read-only file can be read-locked but not write-locked.""" # ensure lock file exists the first time diff --git a/lib/spack/spack/test/llnl/util/tty/colify.py b/lib/spack/spack/test/llnl/util/tty/colify.py new file mode 100644 index 00000000000000..3dcbe0d5262594 --- /dev/null +++ b/lib/spack/spack/test/llnl/util/tty/colify.py @@ -0,0 +1,86 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +import re +import sys + +import pytest + +from spack.llnl.util.tty.colify import colify, colify_table + +# table as 3 rows x 6 columns +lorem_table = [ + ["There", "are", "many", "variations", "of", "passages"], + ["of", "Lorem", "Ipsum", "available", "but", "many"], + ["have", "suffered", "alteration", "in", "some", "form"], +] + +# width of each column in above table +lorem_table_col_starts = [0, 7, 17, 29, 41, 47] + +# table in a single list +lorem_words = lorem_table[0] + lorem_table[1] + lorem_table[2] + + +@pytest.mark.parametrize("console_cols", [10, 20, 40, 60, 80, 100, 120]) +def test_fixed_column_table(console_cols, capfd): + "ensure output is a fixed table regardless of size" + colify_table(lorem_table, output=sys.stdout, console_cols=console_cols) + output, _ = capfd.readouterr() + + # 3 rows + assert output.strip().count("\n") == 2 + + # right spacing + lines = output.strip().split("\n") + for line in lines: + assert [line[w - 1] for w in lorem_table_col_starts[1:]] == [" "] * 5 + + # same data + stripped_lines = [re.sub(r"\s+", " ", line.strip()) for line in lines] + assert stripped_lines == [" ".join(row) for row in lorem_table] + + +@pytest.mark.parametrize( + "console_cols,expected_rows,expected_cols", + [ + (10, 18, 1), + (20, 18, 1), + (40, 5, 4), + (60, 3, 6), + (80, 2, 9), + (100, 2, 9), + (120, 2, 9), + (140, 1, 18), + ], +) +def test_variable_width_columns(console_cols, expected_rows, expected_cols, capfd): + colify(lorem_words, tty=True, output=sys.stdout, console_cols=console_cols) + output, _ = capfd.readouterr() + + print(output) + # expected rows + assert output.strip().count("\n") == expected_rows - 1 + + # right cols + lines = output.strip().split("\n") + assert all(len(re.split(r"\s+", line)) <= expected_cols for line in lines) + + # padding between columns + rows = [re.split(r"\s+", line) for line in lines] + cols = list(zip(*rows)) + + max_col_widths = [max(len(s) for s in col) for col in cols] + col_start = 0 + for w in max_col_widths: + col_start += w + 2 # plus padding + + # verify that every column boundary is at max width + padding + assert all( + [ + line[col_start - 1] == " " and line[col_start] != " " + for line in lines + if col_start < len(line) + ] + ) diff --git a/lib/spack/spack/test/llnl/util/tty/color.py b/lib/spack/spack/test/llnl/util/tty/color.py new file mode 100644 index 00000000000000..14bef046a3edaf --- /dev/null +++ b/lib/spack/spack/test/llnl/util/tty/color.py @@ -0,0 +1,51 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +import re +import textwrap + +import pytest + +import spack.llnl.util.tty.color as color + +test_text = [ + "@r{The quick brown fox jumps over the lazy yellow dog.", + "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt " + "ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco " + "laboris nisi ut aliquip ex ea commodo consequat.}", + "@c{none, gfx1010, gfx1011, gfx1012, gfx1013, gfx1030, gfx1031, gfx1032, gfx1033, gfx1034}", + "none, @c{gfx1010}, gfx1011, @r{gfx1012}, gfx1013, @b{gfx1030}, gfx1031, gfx1032, gfx1033", + "@c{none, 10, 100, 100a, 100f, 101, 101a, 101f, 103, 103a, 103f, 11, 12, 120, 120a, 120f}", + "@c{none, 10, 100, 100a, 100f, 101, 101a, 101f, 103, 103a, 103f, 11, 12, 120}", + "none, @c{10}, @b{100}, 100a, @r{100f}, 101, @g{101a}, 101f, @c{103}, 103a, 103f" + "@g{build}, @c{link}, @r{run}", +] + + +@pytest.mark.parametrize("cols", list(range(30, 101, 10))) +@pytest.mark.parametrize("text", test_text) +@pytest.mark.parametrize("indent", [0, 4, 8]) +def test_color_wrap(cols, text, indent): + colorized = color.colorize(text, color=True) # True to force color + plain = color.csub(colorized) + + spaces = indent * " " + + color_wrapped = " ".join( + color.cwrap(colorized, width=cols, initial_indent=spaces, subsequent_indent=spaces) + ) + plain_cwrapped = " ".join( + color.cwrap(plain, width=cols, initial_indent=spaces, subsequent_indent=spaces) + ) + wrapped = " ".join( + textwrap.wrap(plain, width=cols, initial_indent=spaces, subsequent_indent=spaces) + ) + + # make sure the concatenated, non-indented wrapped version is the same as the + # original, modulo any spaces consumed while wrapping. + assert re.sub(r"\s+", " ", color_wrapped).lstrip() == re.sub(r"\s+", " ", colorized) + + # make sure we wrap the same as textwrap + assert color.csub(color_wrapped) == wrapped + assert plain_cwrapped == wrapped diff --git a/lib/spack/spack/test/llnl/util/tty/log.py b/lib/spack/spack/test/llnl/util/tty/log.py index 5e79ce2c5f6c32..19872dd812553a 100644 --- a/lib/spack/spack/test/llnl/util/tty/log.py +++ b/lib/spack/spack/test/llnl/util/tty/log.py @@ -12,7 +12,7 @@ import spack.llnl.util.tty.log as log from spack.llnl.util.filesystem import working_dir -from spack.util.executable import which +from spack.util.executable import Executable termios: Optional[ModuleType] = None try: @@ -150,35 +150,19 @@ def test_log_output_with_filter_and_append(capfd, tmp_path: pathlib.Path): assert f.read() == "foo blah\nblah foo\nfoo foo\nmore foo more blah\n" -@pytest.mark.skipif(not which("echo"), reason="needs echo command") -def test_log_subproc_and_echo_output_no_capfd(capfd, tmp_path: pathlib.Path): - echo = which("echo", required=True) +def test_log_subproc_and_echo_output(capfd, tmp_path: pathlib.Path): + python = Executable(sys.executable) - # this is split into two tests because capfd interferes with the - # output logged to file when using a subprocess. We test the file - # here, and echoing in test_log_subproc_and_echo_output_capfd below. - with capfd.disabled(): - with working_dir(str(tmp_path)): - with log.log_output("foo.txt") as logger: - with logger.force_echo(): - echo("echo") - print("logged") - - with open("foo.txt", encoding="utf-8") as f: - assert f.read() == "echo\nlogged\n" - - -@pytest.mark.skipif(not which("echo"), reason="needs echo command") -def test_log_subproc_and_echo_output_capfd(capfd, tmp_path: pathlib.Path): - echo = which("echo", required=True) - - # This tests *only* what is echoed when using a subprocess, as capfd - # interferes with the logged data. See - # test_log_subproc_and_echo_output_no_capfd for tests on the logfile. with working_dir(str(tmp_path)): with log.log_output("foo.txt") as logger: with logger.force_echo(): - echo("echo") + python("-c", "print('echo')") print("logged") + # Check log file content + with open("foo.txt", encoding="utf-8") as f: + assert f.read() == "echo\nlogged\n" + + # Check captured output (echoed content) + # Note: 'logged' is not echoed because force_echo() scope ended assert capfd.readouterr()[0] == "echo\n" diff --git a/lib/spack/spack/test/main.py b/lib/spack/spack/test/main.py index 6baa9e077eb9a0..d31a8a442b2cae 100644 --- a/lib/spack/spack/test/main.py +++ b/lib/spack/spack/test/main.py @@ -83,13 +83,13 @@ def test_get_version_no_git(working_env, monkeypatch): assert spack.spack_version == spack.get_version() -def test_main_calls_get_version(capsys, working_env, monkeypatch): +def test_main_calls_get_version(capfd, working_env, monkeypatch): # act like git is not found in the PATH monkeypatch.setattr(spack.util.git, "git", lambda: None) # make sure we get a bare version (without commit) when this happens spack.main.main(["-V"]) - out, err = capsys.readouterr() + out, err = capfd.readouterr() assert spack.spack_version == out.strip() @@ -107,11 +107,6 @@ def test_get_version_bad_git(tmp_path: pathlib.Path, working_env, monkeypatch): assert spack.spack_version == spack.get_version() -def fail_if_add_env(env): - """Pass to add_command_line_scopes. Will raise if called""" - assert False, "Should not add env from scope test." - - def test_bad_command_line_scopes(tmp_path: pathlib.Path, config): cfg = spack.config.Configuration() file_path = tmp_path / "file_instead_of_dir" @@ -120,10 +115,10 @@ def test_bad_command_line_scopes(tmp_path: pathlib.Path, config): file_path.write_text("") with pytest.raises(spack.error.ConfigError): - spack.main.add_command_line_scopes(cfg, [str(file_path)], fail_if_add_env) + spack.main.add_command_line_scopes(cfg, [str(file_path)]) with pytest.raises(spack.error.ConfigError): - spack.main.add_command_line_scopes(cfg, [str(non_existing_path)], fail_if_add_env) + spack.main.add_command_line_scopes(cfg, [str(non_existing_path)]) def test_add_command_line_scopes(tmp_path: pathlib.Path, mutable_config): @@ -137,7 +132,7 @@ def test_add_command_line_scopes(tmp_path: pathlib.Path, mutable_config): """ ) - spack.main.add_command_line_scopes(mutable_config, [str(tmp_path)], fail_if_add_env) + spack.main.add_command_line_scopes(mutable_config, [str(tmp_path)]) assert mutable_config.get("config:verify_ssl") is False assert mutable_config.get("config:dirty") is False @@ -167,12 +162,12 @@ def test_add_command_line_scope_env(tmp_path: pathlib.Path, mutable_mock_env_pat ) config = spack.config.Configuration() - spack.main.add_command_line_scopes(config, ["example", str(tmp_path)], fail_if_add_env) + spack.main.add_command_line_scopes(config, ["example", str(tmp_path)]) assert len(config.scopes) == 2 assert config.get("config:install_tree:root") == "/tmp/second" config = spack.config.Configuration() - spack.main.add_command_line_scopes(config, [str(tmp_path), "example"], fail_if_add_env) + spack.main.add_command_line_scopes(config, [str(tmp_path), "example"]) assert len(config.scopes) == 2 assert config.get("config:install_tree:root") == "/tmp/first" @@ -240,9 +235,7 @@ def write_python_cfg(_spec, _cfg_name): assert not spack.config.get("config:dirty") - spack.main.add_command_line_scopes( - mock_low_high_config, [os.path.dirname(filename)], fail_if_add_env - ) + spack.main.add_command_line_scopes(mock_low_high_config, [os.path.dirname(filename)]) assert spack.config.get("config:dirty") python_reqs = spack.config.get("packages")["python"]["require"] @@ -269,15 +262,11 @@ def write_configs(include_path, debug_data): system_config = {"config": {"debug": False}} write_configs(system_filename, system_config) - spack.main.add_command_line_scopes( - mutable_config, [os.path.dirname(system_filename)], fail_if_add_env - ) + spack.main.add_command_line_scopes(mutable_config, [os.path.dirname(system_filename)]) site_config = {"config": {"debug": True}} write_configs(site_filename, site_config) - spack.main.add_command_line_scopes( - mutable_config, [os.path.dirname(site_filename)], fail_if_add_env - ) + spack.main.add_command_line_scopes(mutable_config, [os.path.dirname(site_filename)]) # Ensure takes the last value of the option pushed onto the stack assert mutable_config.get("config:debug") == site_config["config"]["debug"] @@ -293,9 +282,7 @@ def test_include_recurse_limit(tmp_path: pathlib.Path, mutable_config): syaml.dump_config(include_list, f) with pytest.raises(spack.config.RecursiveIncludeError, match="recursion exceeded"): - spack.main.add_command_line_scopes( - mutable_config, [os.path.dirname(include_path)], fail_if_add_env - ) + spack.main.add_command_line_scopes(mutable_config, [os.path.dirname(include_path)]) # TODO: Fix this once recursive includes are processed in the expected order. @@ -339,7 +326,7 @@ def include_contents(paths): write(b_yaml, include_contents([debug_yaml, d_yaml] if child == "b" else [d_yaml])) write(c_yaml, include_contents([debug_yaml, d_yaml] if child == "c" else [d_yaml])) - spack.main.add_command_line_scopes(mutable_config, [str(tmp_path)], fail_if_add_env) + spack.main.add_command_line_scopes(mutable_config, [str(tmp_path)]) try: assert mutable_config.get("config:debug") is expected diff --git a/lib/spack/spack/test/mirror.py b/lib/spack/spack/test/mirror.py index 2b20439faa2f5f..bf4219aeadfccf 100644 --- a/lib/spack/spack/test/mirror.py +++ b/lib/spack/spack/test/mirror.py @@ -9,6 +9,7 @@ import pytest import spack.caches +import spack.cmd.mirror import spack.concretize import spack.config import spack.fetch_strategy @@ -17,7 +18,6 @@ import spack.mirrors.utils import spack.patch import spack.stage -import spack.util.executable import spack.util.spack_json as sjson import spack.util.url as url_util from spack.cmd.common.arguments import mirror_name_or_url @@ -61,7 +61,7 @@ def check_mirror(): with spack.config.override("mirrors", mirrors): with spack.config.override("config:checksum", False): specs = [spack.concretize.concretize_one(x) for x in repos] - spack.mirrors.utils.create(mirror_root, specs) + spack.cmd.mirror.create(mirror_root, specs) # Stage directory exists assert os.path.isdir(mirror_root) @@ -254,7 +254,7 @@ def successful_make_alias(*args, **kwargs): ) with spack.config.override("config:checksum", False): - spack.mirrors.utils.create(mirror_root, list(spec.traverse())) + spack.cmd.mirror.create(mirror_root, list(spec.traverse())) assert { "abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234", diff --git a/lib/spack/spack/test/module_parsing.py b/lib/spack/spack/test/module_parsing.py index fac03dd702cedf..419255527ac7f1 100644 --- a/lib/spack/spack/test/module_parsing.py +++ b/lib/spack/spack/test/module_parsing.py @@ -7,7 +7,6 @@ import pytest -import spack import spack.util.module_cmd from spack.util.module_cmd import ( get_path_args_from_module_line, diff --git a/lib/spack/spack/test/modules/lmod.py b/lib/spack/spack/test/modules/lmod.py index 33c6babaf7d617..7bb1a8fd0a6051 100644 --- a/lib/spack/spack/test/modules/lmod.py +++ b/lib/spack/spack/test/modules/lmod.py @@ -91,11 +91,14 @@ def test_file_layout(self, compiler, provider, factory, module_configuration): # hash has been disallowed in the configuration file path_parts = layout.available_path_parts service_part = spec_string.replace("@", "/") - service_part = "-".join([service_part, layout.spec.dag_hash(length=7)]) + # JCSDA fork only - no hashes in service_part + # service_part = "-".join([service_part, layout.spec.dag_hash(length=7)]) if "mpileaks" in spec_string: # It's a user, not a provider, so create the provider string - service_part = layout.spec["mpi"].format("{name}/{version}-{hash:7}") + # JCSDA fork only - no hashes in service_part + # service_part = layout.spec["mpi"].format("{name}/{version}-{hash:7}") + service_part = layout.spec["mpi"].format("{name}/{version}") else: # Only relevant for providers, not users, of virtuals assert service_part in path_parts @@ -113,7 +116,7 @@ def test_compilers_provided_different_name( self, factory, module_configuration, compiler_factory ): with spack.config.override( - "packages", {"llvm": {"externals": [compiler_factory(spec="llvm@3.3")]}} + "packages", {"llvm": {"externals": [compiler_factory(spec="llvm@3.3 +clang")]}} ): module_configuration("complex_hierarchy") module, spec = factory("intel-oneapi-compilers%clang@3.3") @@ -280,9 +283,11 @@ def test_no_hash(self, factory, module_configuration): path = module.layout.filename mpi_spec = spec["mpi"] - mpi_element = "{0}/{1}-{2}/".format( - mpi_spec.name, mpi_spec.version, mpi_spec.dag_hash(length=7) - ) + # JCSDA fork only + mpi_element = "{0}/{1}/".format(mpi_spec.name, mpi_spec.version) + # mpi_element = "{0}/{1}-{2}/".format( + # mpi_spec.name, mpi_spec.version, mpi_spec.dag_hash(length=7) + # ) assert mpi_element in path diff --git a/lib/spack/spack/test/new_installer.py b/lib/spack/spack/test/new_installer.py new file mode 100644 index 00000000000000..fa649b00e5d1c4 --- /dev/null +++ b/lib/spack/spack/test/new_installer.py @@ -0,0 +1,201 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +"""Tests for the new_installer.py module""" + +import pathlib as pathlb +import sys + +import pytest + +if sys.platform == "win32": + pytest.skip("No Windows support", allow_module_level=True) + +import spack.error +from spack.new_installer import OVERWRITE_GARBAGE_SUFFIX, PrefixPivoter + + +@pytest.fixture +def existing_prefix(tmp_path: pathlb.Path) -> pathlb.Path: + """Creates a standard existing prefix with content.""" + prefix = tmp_path / "existing_prefix" + prefix.mkdir() + (prefix / "old_file").write_text("old content") + return prefix + + +class TestPrefixPivoter: + """Tests for the PrefixPivoter class.""" + + def test_no_existing_prefix(self, tmp_path: pathlb.Path): + """Test installation when prefix doesn't exist yet.""" + prefix = tmp_path / "new_prefix" + + with PrefixPivoter(str(prefix), overwrite=False): + prefix.mkdir() + (prefix / "installed_file").write_text("content") + + assert prefix.exists() + assert (prefix / "installed_file").read_text() == "content" + + def test_existing_prefix_no_overwrite_raises(self, existing_prefix: pathlb.Path): + """Test that existing prefix raises error when overwrite=False.""" + with pytest.raises(spack.error.InstallError, match="already exists"): + with PrefixPivoter(str(existing_prefix), overwrite=False): + pass + + def test_overwrite_success_cleans_up_old_prefix( + self, tmp_path: pathlb.Path, existing_prefix: pathlb.Path + ): + """Test that overwrite=True moves old prefix and cleans it up on success.""" + with PrefixPivoter(str(existing_prefix), overwrite=True): + assert not existing_prefix.exists() + existing_prefix.mkdir() + (existing_prefix / "new_file").write_text("new content") + + assert existing_prefix.exists() + assert (existing_prefix / "new_file").exists() + assert not (existing_prefix / "old_file").exists() + # Only the existing_prefix directory should remain + assert len(list(tmp_path.iterdir())) == 1 + + def test_overwrite_failure_restores_original_prefix( + self, tmp_path: pathlb.Path, existing_prefix: pathlb.Path + ): + """Test that original prefix is restored when installation fails. + + Note: keep_prefix=True is passed but should be ignored since overwrite=True + takes precedence.""" + with pytest.raises(RuntimeError, match="simulated failure"): + with PrefixPivoter(str(existing_prefix), overwrite=True, keep_prefix=True): + existing_prefix.mkdir() + (existing_prefix / "partial_file").write_text("partial") + raise RuntimeError("simulated failure") + + assert existing_prefix.exists() + assert (existing_prefix / "old_file").read_text() == "old content" + assert not (existing_prefix / "partial_file").exists() + # Only the existing_prefix directory should remain + assert len(list(tmp_path.iterdir())) == 1 + + def test_overwrite_failure_no_partial_prefix_created(self, existing_prefix: pathlb.Path): + """Test restoration when failure occurs before any prefix is created.""" + with pytest.raises(RuntimeError, match="early failure"): + with PrefixPivoter(str(existing_prefix), overwrite=True): + raise RuntimeError("early failure") + + assert existing_prefix.exists() + assert (existing_prefix / "old_file").read_text() == "old content" + + def test_overwrite_true_no_existing_prefix(self, tmp_path: pathlb.Path): + """Test that overwrite=True works fine when prefix doesn't exist.""" + prefix = tmp_path / "new_prefix" + with PrefixPivoter(str(prefix), overwrite=True): + prefix.mkdir() + (prefix / "installed_file").write_text("content") + + assert prefix.exists() + # Only the new_prefix directory should remain + assert len(list(tmp_path.iterdir())) == 1 + + def test_keep_prefix_true_leaves_failed_install(self, tmp_path: pathlb.Path): + """Test that keep_prefix=True preserves the failed installation.""" + prefix = tmp_path / "new_prefix" + + with pytest.raises(RuntimeError, match="simulated failure"): + with PrefixPivoter(str(prefix), overwrite=False, keep_prefix=True): + prefix.mkdir() + (prefix / "partial_file").write_text("partial content") + raise RuntimeError("simulated failure") + + # Failed prefix should still exist + assert prefix.exists() + assert (prefix / "partial_file").exists() + assert (prefix / "partial_file").read_text() == "partial content" + # Only the failed prefix should remain + assert len(list(tmp_path.iterdir())) == 1 + + def test_keep_prefix_false_removes_failed_install(self, tmp_path: pathlb.Path): + """Test that keep_prefix=False removes the failed installation.""" + prefix = tmp_path / "new_prefix" + + with pytest.raises(RuntimeError, match="simulated failure"): + with PrefixPivoter(str(prefix), overwrite=False, keep_prefix=False): + prefix.mkdir() + (prefix / "partial_file").write_text("partial content") + raise RuntimeError("simulated failure") + + # Failed prefix should be removed + assert not prefix.exists() + # Nothing should remain + assert len(list(tmp_path.iterdir())) == 0 + + +class FailingPrefixPivoter(PrefixPivoter): + """Test subclass that can simulate filesystem failures.""" + + def __init__( + self, + prefix: str, + overwrite: bool, + keep_prefix: bool = False, + fail_on_restore: bool = False, + fail_on_move_garbage: bool = False, + ): + super().__init__(prefix, overwrite, keep_prefix) + self.fail_on_restore = fail_on_restore + self.fail_on_move_garbage = fail_on_move_garbage + self.restore_rename_count = 0 + + def _rename(self, src: str, dst: str) -> None: + if ( + self.fail_on_restore + and self.tmp_prefix + and src == self.tmp_prefix + and dst == self.prefix + ): + self.restore_rename_count += 1 + raise OSError("Simulated rename failure during restore") + + if self.fail_on_move_garbage and dst.endswith(OVERWRITE_GARBAGE_SUFFIX): + raise OSError("Simulated rename failure moving to garbage") + + super()._rename(src, dst) + + +class TestPrefixPivoterFailureRecovery: + """Tests for edge cases and failure recovery in PrefixPivoter.""" + + def test_restore_failure_leaves_backup( + self, tmp_path: pathlb.Path, existing_prefix: pathlb.Path + ): + """Test that if restoration fails, the backup is not deleted.""" + pivoter = FailingPrefixPivoter(str(existing_prefix), overwrite=True, fail_on_restore=True) + + with pytest.raises(OSError, match="Simulated rename failure during restore"): + with pivoter: + existing_prefix.mkdir() + (existing_prefix / "partial_file").write_text("partial") + raise RuntimeError("simulated failure") + + assert pivoter.restore_rename_count > 0 + # Backup directory should still exist (plus the failed prefix) + assert len(list(tmp_path.iterdir())) == 2 + + def test_garbage_move_failure_leaves_backup( + self, tmp_path: pathlb.Path, existing_prefix: pathlb.Path + ): + """Test that if moving the failed install to garbage fails, the backup is preserved.""" + pivoter = FailingPrefixPivoter( + str(existing_prefix), overwrite=True, fail_on_move_garbage=True + ) + + with pytest.raises(OSError, match="Simulated rename failure moving to garbage"): + with pivoter: + existing_prefix.mkdir() + (existing_prefix / "partial_file").write_text("partial") + raise RuntimeError("simulated failure") + + assert (existing_prefix / "partial_file").exists() + # Backup directory, failed prefix, and empty garbage directory should exist + assert len(list(tmp_path.iterdir())) == 3 diff --git a/lib/spack/spack/test/oci/image.py b/lib/spack/spack/test/oci/image.py index 3fa3610ff78a80..bf724e20a402e4 100644 --- a/lib/spack/spack/test/oci/image.py +++ b/lib/spack/spack/test/oci/image.py @@ -83,3 +83,17 @@ def test_digest(): # Missing algorithm with pytest.raises(ValueError): Digest.from_string(valid_digest) + + +def test_url_with_scheme(): + """Test that scheme=http translates to http:// URLs""" + http = ImageReference.from_string("localhost:1234/myimage:abc", scheme="http") + https = ImageReference.from_string("localhost:1234/myimage:abc", scheme="https") + default = ImageReference.from_string("localhost:1234/myimage:abc") + + assert http != https + assert https == default + + assert http.manifest_url() == "http://localhost:1234/v2/myimage/manifests/abc" + assert https.manifest_url() == "https://localhost:1234/v2/myimage/manifests/abc" + assert default.manifest_url() == "https://localhost:1234/v2/myimage/manifests/abc" diff --git a/lib/spack/spack/test/oci/integration_test.py b/lib/spack/spack/test/oci/integration_test.py index 4179bb4067520f..a6e74ee4d87078 100644 --- a/lib/spack/spack/test/oci/integration_test.py +++ b/lib/spack/spack/test/oci/integration_test.py @@ -15,7 +15,6 @@ import pytest -import spack import spack.binary_distribution import spack.database import spack.deptypes as dt @@ -231,7 +230,7 @@ def test_uploading_with_base_image_in_docker_image_manifest_v2_format( (rootfs / "bin").mkdir(parents=True) (rootfs / "bin" / "sh").write_text("hello world") tarball = tmp_path / "base.tar.gz" - with gzip_compressed_tarfile(tarball) as (tar, tar_gz_checksum, tar_checksum): + with gzip_compressed_tarfile(str(tarball)) as (tar, tar_gz_checksum, tar_checksum): tar.add(rootfs, arcname=".") tar_gz_digest = Digest.from_sha256(tar_gz_checksum.hexdigest()) tar_digest = Digest.from_sha256(tar_checksum.hexdigest()) diff --git a/lib/spack/spack/test/oci/mock_registry.py b/lib/spack/spack/test/oci/mock_registry.py index b35bb4f96951b3..aa98ab07771e9b 100644 --- a/lib/spack/spack/test/oci/mock_registry.py +++ b/lib/spack/spack/test/oci/mock_registry.py @@ -291,8 +291,8 @@ def https_open(self, req: Request): return self.servers[domain].handle(req) -class InMemoryOCIRegistryWithAuth(InMemoryOCIRegistry): - """This is another in-memory OCI registry, but it requires authentication.""" +class InMemoryOCIRegistryWithBearerAuth(InMemoryOCIRegistry): + """This is another in-memory OCI registry requiring bearer token authentication.""" def __init__( self, domain, token: Optional[str], realm: str, allow_single_post: bool = True @@ -330,6 +330,41 @@ def unauthorized(self): ) +class InMemoryOCIRegistryWithBasicAuth(InMemoryOCIRegistry): + """This is another in-memory OCI registry requiring basic authentication.""" + + def __init__( + self, domain, username: str, password: str, realm: str, allow_single_post: bool = True + ) -> None: + super().__init__(domain, allow_single_post) + self.username = username + self.password = password + self.realm = realm + self.router.add_middleware(self.authenticate) + + def authenticate(self, req: Request): + # Any request needs an Authorization header + authorization = req.get_header("Authorization") + + if authorization is None: + raise MiddlewareError(self.unauthorized()) + + # Ensure that the username and password are correct + assert authorization.startswith("Basic ") + auth = base64.b64decode(authorization[6:]).decode("utf-8") + username, password = auth.split(":", 1) + + if username != self.username or password != self.password: + raise MiddlewareError(self.unauthorized()) + + return req + + def unauthorized(self): + return MockHTTPResponse( + 401, "Unauthorized", {"www-authenticate": f'Basic realm="{self.realm}"'} + ) + + class MockBearerTokenServer(DummyServer): """Simulates a basic server that hands out bearer tokens at the /login endpoint for the following services: @@ -356,6 +391,8 @@ def login(self, req: Request): return self.public_auth(req) elif service == "private.example.com": return self.private_auth(req) + elif service == "oauth.example.com": + return self.oauth_auth(req) return MockHTTPResponse(404, "Not found") @@ -364,6 +401,10 @@ def public_auth(self, req: Request): assert req.get_header("Authorization") is None return MockHTTPResponse.with_json(200, "OK", body={"token": "public_token"}) + def oauth_auth(self, req: Request): + assert req.get_header("Authorization") is None + return MockHTTPResponse.with_json(200, "OK", body={"access_token": "oauth_token"}) + def private_auth(self, req: Request): # For the private registry we need to login with username and password auth_value = req.get_header("Authorization") diff --git a/lib/spack/spack/test/oci/urlopen.py b/lib/spack/spack/test/oci/urlopen.py index 2d283036d5d8aa..9c340a78bc148d 100644 --- a/lib/spack/spack/test/oci/urlopen.py +++ b/lib/spack/spack/test/oci/urlopen.py @@ -28,9 +28,10 @@ Challenge, RealmServiceScope, UsernamePassword, + _get_basic_challenge, + _get_bearer_challenge, credentials_from_mirrors, default_retry, - get_bearer_challenge, parse_www_authenticate, ) from spack.test.conftest import MockHTTPResponse @@ -38,7 +39,8 @@ DummyServer, DummyServerUrllibHandler, InMemoryOCIRegistry, - InMemoryOCIRegistryWithAuth, + InMemoryOCIRegistryWithBasicAuth, + InMemoryOCIRegistryWithBearerAuth, MiddlewareError, MockBearerTokenServer, create_opener, @@ -52,7 +54,7 @@ def test_parse_www_authenticate(): www_authenticate = 'Bearer realm="https://spack.io/authenticate",service="spack-registry",scope="repository:spack-registry:pull,push"' assert parse_www_authenticate(www_authenticate) == [ Challenge( - "Bearer", + "bearer", [ ("realm", "https://spack.io/authenticate"), ("service", "spack-registry"), @@ -61,18 +63,18 @@ def test_parse_www_authenticate(): ) ] - assert parse_www_authenticate("Bearer") == [Challenge("Bearer")] + assert parse_www_authenticate("Bearer") == [Challenge("bearer")] assert parse_www_authenticate("MethodA, MethodB,MethodC") == [ - Challenge("MethodA"), - Challenge("MethodB"), - Challenge("MethodC"), + Challenge("methoda"), + Challenge("methodb"), + Challenge("methodc"), ] assert parse_www_authenticate( 'Digest realm="Digest Realm", nonce="1234567890", algorithm=MD5, qop="auth"' ) == [ Challenge( - "Digest", + "digest", [ ("realm", "Digest Realm"), ("nonce", "1234567890"), @@ -85,8 +87,12 @@ def test_parse_www_authenticate(): assert parse_www_authenticate( r'Newauth realm="apps", type=1, title="Login to \"apps\"", Basic realm="simple"' ) == [ - Challenge("Newauth", [("realm", "apps"), ("type", "1"), ("title", 'Login to "apps"')]), - Challenge("Basic", [("realm", "simple")]), + Challenge("newauth", [("realm", "apps"), ("type", "1"), ("title", 'Login to "apps"')]), + Challenge("basic", [("realm", "simple")]), + ] + + assert parse_www_authenticate(r'BASIC REALM="simple"') == [ + Challenge("basic", [("realm", "simple")]) ] @@ -114,15 +120,71 @@ def test_invalid_www_authenticate(invalid_str): parse_www_authenticate(invalid_str) +def test_get_basic_challenge(): + """Test extracting Basic challenge from a list of challenges""" + + # No basic challenge + assert ( + _get_basic_challenge( + [ + Challenge( + "bearer", + [ + ("realm", "https://spack.io/authenticate"), + ("service", "spack-registry"), + ("scope", "repository:spack-registry:pull,push"), + ], + ), + Challenge( + "digest", + [ + ("realm", "Digest Realm"), + ("nonce", "1234567890"), + ("algorithm", "MD5"), + ("qop", "auth"), + ], + ), + ] + ) + is None + ) + + # Multiple challenges, should pick the basic one and return its realm. + assert ( + _get_basic_challenge( + [ + Challenge( + "dummy", + [ + ("realm", "https://example.com/"), + ("service", "service"), + ("scope", "scope"), + ], + ), + Challenge("basic", [("realm", "simple")]), + Challenge( + "bearer", + [ + ("realm", "https://spack.io/authenticate"), + ("service", "spack-registry"), + ("scope", "repository:spack-registry:pull,push"), + ], + ), + ] + ) + == "simple" + ) + + def test_get_bearer_challenge(): """Test extracting Bearer challenge from a list of challenges""" # Only an incomplete bearer challenge, missing service and scope, not usable. assert ( - get_bearer_challenge( + _get_bearer_challenge( [ - Challenge("Bearer", [("realm", "https://spack.io/authenticate")]), - Challenge("Basic", [("realm", "simple")]), + Challenge("bearer", [("realm", "https://spack.io/authenticate")]), + Challenge("basic", [("realm", "simple")]), Challenge( "Digest", [ @@ -138,14 +200,14 @@ def test_get_bearer_challenge(): ) # Multiple challenges, should pick the bearer one. - assert get_bearer_challenge( + assert _get_bearer_challenge( [ Challenge( - "Dummy", + "dummy", [("realm", "https://example.com/"), ("service", "service"), ("scope", "scope")], ), Challenge( - "Bearer", + "bearer", [ ("realm", "https://spack.io/authenticate"), ("service", "spack-registry"), @@ -163,16 +225,17 @@ def test_get_bearer_challenge(): [ ("public.example.com/spack-registry:latest", "public_token"), ("private.example.com/spack-registry:latest", "private_token"), + ("oauth.example.com/spack-registry:latest", "oauth_token"), ], ) -def test_automatic_oci_authentication(image_ref, token): +def test_automatic_oci_bearer_authentication(image_ref: str, token: str): image = ImageReference.from_string(image_ref) def credentials_provider(domain: str): return UsernamePassword("user", "pass") if domain == "private.example.com" else None opener = create_opener( - InMemoryOCIRegistryWithAuth( + InMemoryOCIRegistryWithBearerAuth( image.domain, token=token, realm="https://auth.example.com/login" ), MockBearerTokenServer("auth.example.com"), @@ -184,13 +247,34 @@ def credentials_provider(domain: str): assert opener.open(image.endpoint()).status == 200 +def test_automatic_oci_basic_authentication(): + image = ImageReference.from_string("private.example.com/image") + server = InMemoryOCIRegistryWithBasicAuth( + image.domain, username="user", password="pass", realm="example.com" + ) + + # With correct credentials we should get a 200 + opener_with_correct_auth = create_opener( + server, credentials_provider=lambda domain: UsernamePassword("user", "pass") + ) + assert opener_with_correct_auth.open(image.endpoint()).status == 200 + + # With wrong credentials we should get a 401 + opener_with_wrong_auth = create_opener( + server, credentials_provider=lambda domain: UsernamePassword("wrong", "wrong") + ) + with pytest.raises(urllib.error.HTTPError) as e: + opener_with_wrong_auth.open(image.endpoint()) + assert e.value.getcode() == 401 + + def test_wrong_credentials(): """Test that when wrong credentials are rejected by the auth server, we get a 401 error.""" credentials_provider = lambda domain: UsernamePassword("wrong", "wrong") image = ImageReference.from_string("private.example.com/image") opener = create_opener( - InMemoryOCIRegistryWithAuth( + InMemoryOCIRegistryWithBearerAuth( image.domain, token="something", realm="https://auth.example.com/login" ), MockBearerTokenServer("auth.example.com"), @@ -210,7 +294,7 @@ def test_wrong_bearer_token_returned_by_auth_server(): registry, etc.""" image = ImageReference.from_string("private.example.com/image") opener = create_opener( - InMemoryOCIRegistryWithAuth( + InMemoryOCIRegistryWithBearerAuth( image.domain, token="other_token_than_token_server_provides", realm="https://auth.example.com/login", @@ -250,7 +334,7 @@ def test_registry_with_short_lived_bearer_tokens(): credentials_provider = lambda domain: UsernamePassword("user", "pass") auth_server = TrivialAuthServer("auth.example.com", token="token") - registry_server = InMemoryOCIRegistryWithAuth( + registry_server = InMemoryOCIRegistryWithBearerAuth( image.domain, token="token", realm="https://auth.example.com/login" ) urlopen = create_opener( @@ -307,8 +391,8 @@ def unsupported_auth_method(self, req: Request): [ # missing service and scope ('Bearer realm="https://auth.example.com/login"', "unsupported authentication scheme"), - # we don't do basic auth - ('Basic realm="https://auth.example.com/login"', "unsupported authentication scheme"), + # missing realm + ("Basic", "unsupported authentication scheme"), # multiple unsupported challenges ( "CustomChallenge method=unsupported, OtherChallenge method=x,param=y", @@ -476,6 +560,13 @@ def test_image_from_mirror(): assert image_from_mirror(mirror) == ImageReference.from_string("example.com/image") +def test_image_from_mirror_with_http_scheme(): + image = image_from_mirror(spack.mirrors.mirror.Mirror({"url": "oci+http://example.com/image"})) + assert image.scheme == "http" + assert image.with_tag("latest").scheme == "http" + assert image.with_digest(f"sha256:{1234:064x}").scheme == "http" + + def test_image_reference_str(): """Test that with_digest() works with Digest and str.""" digest_str = f"sha256:{1234:064x}" diff --git a/lib/spack/spack/test/package_class.py b/lib/spack/spack/test/package_class.py index 5eb94a3fac3e16..ccc3f7df047596 100644 --- a/lib/spack/spack/test/package_class.py +++ b/lib/spack/spack/test/package_class.py @@ -9,6 +9,7 @@ """ import os +import pathlib import shutil import pytest @@ -26,6 +27,7 @@ from spack.error import InstallError from spack.package_base import PackageBase from spack.solver.input_analysis import NoStaticAnalysis, StaticAnalysis +from spack.version import Version @pytest.fixture(scope="module") @@ -225,14 +227,14 @@ def test_cache_extra_sources(install_mockery, spec, sources, extras, expect): shutil.rmtree(os.path.dirname(source_path)) -def test_cache_extra_sources_fails(install_mockery): +def test_cache_extra_sources_fails(install_mockery, tmp_path: pathlib.Path): s = spack.concretize.concretize_one("pkg-a") with pytest.raises(InstallError) as exc_info: - spack.install_test.cache_extra_test_sources(s.package, ["/a/b", "no-such-file"]) + spack.install_test.cache_extra_test_sources(s.package, [str(tmp_path), "no-such-file"]) errors = str(exc_info.value) - assert "'/a/b') must be relative" in errors + assert f"'{tmp_path}') must be relative" in errors assert "'no-such-file') for the copy does not exist" in errors @@ -273,7 +275,7 @@ def test_package_license(): class BaseTestPackage(PackageBase): - extendees = None # currently a required attribute for is_extension() + extendees = {} # currently a required attribute for is_extension() def test_package_version_fails(): @@ -331,16 +333,16 @@ def test_deserialize_preserves_package_attribute(default_mock_concretization): @pytest.mark.require_provenance -def test_binary_provenance_commit_version(mock_packages): - spec = spack.concretize.concretize_one("git-ref-package@stable") +def test_git_provenance_commit_version(default_mock_concretization): + spec = default_mock_concretization("git-ref-package@stable") assert spec.satisfies(f"commit={'c' * 40}") -@pytest.mark.parametrize("version", ("main", "tag")) +@pytest.mark.parametrize("version", ("main", "tag", "annotated-tag")) @pytest.mark.parametrize("pre_stage", (True, False)) @pytest.mark.require_provenance @pytest.mark.disable_clean_stage_check -def test_binary_provenance_find_commit_ls_remote( +def test_git_provenance_find_commit_ls_remote( git, mock_git_repository, mock_packages, config, monkeypatch, version, pre_stage ): repo_path = mock_git_repository.path @@ -366,16 +368,41 @@ def test_binary_provenance_find_commit_ls_remote( vattrs = spec.package.versions[spec.version] git_ref = vattrs.get("tag") or vattrs.get("branch") - actual_commit = git("-C", repo_path, "rev-parse", git_ref, output=str, error=str).strip() + # add the ^{} suffix to the ref so it redirects to the first parent git object + # for branches and lightweight tags the suffix makes no difference since it is + # always a commit SHA, but for annotated tags the SHA shifts from the tag SHA + # back to the commit SHA, which is what we want + actual_commit = git( + "-C", repo_path, "rev-parse", f"{git_ref}^{{}}", output=str, error=str + ).strip() assert spec.variants["commit"].value == actual_commit @pytest.mark.require_provenance @pytest.mark.disable_clean_stage_check -def test_binary_provenance_cant_resolve_commit(mock_packages, monkeypatch, config, capsys): +def test_git_provenance_cant_resolve_commit(mock_packages, monkeypatch, config, capfd, tmp_path): """Fail all attempts to resolve git commits""" + repo_path = str(tmp_path / "non_existent") + monkeypatch.setattr(spack.package_base.PackageBase, "git", repo_path, raising=False) monkeypatch.setattr(spack.package_base.PackageBase, "do_fetch", lambda *args, **kwargs: None) spec = spack.concretize.concretize_one("git-ref-package@develop") - captured = capsys.readouterr() + captured = capfd.readouterr() assert "commit" not in spec.variants assert "Warning: Unable to resolve the git commit" in captured.err + + +@pytest.mark.parametrize( + "pkg_name,preferred_version", + [ + # This package has a deprecated v1.1.0 which should not be the preferred + ("deprecated_versions", "1.0.0"), + # Python has v2.7.11 marked as preferred and newer v3 versions + ("python", "2.7.11"), + # This package has various versions, some deprecated, plus "main" and "develop" + ("git-ref-package", "3.0.1"), + ], +) +def test_package_preferred_version(mock_packages, config, pkg_name, preferred_version): + """Tests retrieving the preferred version of a package.""" + pkg_cls = mock_packages.get_pkg_class(pkg_name) + assert spack.package_base.preferred_version(pkg_cls) == Version(preferred_version) diff --git a/lib/spack/spack/test/packages.py b/lib/spack/spack/test/packages.py index 62980e5c253e5c..75378a0e974473 100644 --- a/lib/spack/spack/test/packages.py +++ b/lib/spack/spack/test/packages.py @@ -13,6 +13,7 @@ import spack.directives import spack.error import spack.fetch_strategy +import spack.package import spack.package_base import spack.repo from spack.paths import mock_packages_path @@ -364,6 +365,21 @@ def test_package_can_have_sparse_checkout_properties_with_gitversion( assert fetcher.git_sparse_paths == pkg_cls.git_sparse_paths +def test_package_version_can_have_sparse_checkout_properties( + mock_packages, mock_fetch, mock_stage +): + spec = Spec("git-sparsepaths-version") + pkg_cls = spack.repo.PATH.get_pkg_class(spec.name) + + fetcher = spack.fetch_strategy.for_package_version(pkg_cls(spec), version="1.0") + assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy) + assert fetcher.git_sparse_paths == ["foo", "bar"] + + fetcher = spack.fetch_strategy.for_package_version(pkg_cls(spec), version="0.9") + assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy) + assert fetcher.git_sparse_paths is None + + def test_package_can_depend_on_commit_of_dependency(mock_packages, config): spec = spack.concretize.concretize_one(Spec("git-ref-commit-dep@1.0.0")) assert spec.satisfies(f"^git-ref-package commit={'a' * 40}") @@ -399,3 +415,10 @@ def test_pkg_name_can_only_be_derived_when_package_module(): with pytest.raises(ValueError, match="Package ExamplePackage is not a known Spack package"): ExamplePackage.name + + +def test_spack_package_api_versioning(): + """Test that the symbols in spack.package.api match the public API.""" + assert spack.package.__all__ == [ + symbol for symbols in spack.package.api.values() for symbol in symbols + ] diff --git a/lib/spack/spack/test/packaging.py b/lib/spack/spack/test/packaging.py index c219f64ac8d83c..d38dda9ea80106 100644 --- a/lib/spack/spack/test/packaging.py +++ b/lib/spack/spack/test/packaging.py @@ -15,13 +15,13 @@ import pytest -import spack.binary_distribution as bindist +import spack.binary_distribution import spack.cmd.buildcache as buildcache +import spack.cmd.mirror import spack.concretize import spack.config import spack.error import spack.fetch_strategy -import spack.mirrors.utils import spack.package_base import spack.stage import spack.util.gpg @@ -54,7 +54,7 @@ def test_buildcache(mock_archive, tmp_path: pathlib.Path, monkeypatch, mutable_c # Create the build cache and put it directly into the mirror mirror_path = str(tmp_path / "test-mirror") - spack.mirrors.utils.create(mirror_path, specs=[]) + spack.cmd.mirror.create(mirror_path, specs=[]) # register mirror with spack config mirrors = {"spack-mirror-test": url_util.path_to_file_url(mirror_path)} @@ -92,7 +92,7 @@ def test_buildcache(mock_archive, tmp_path: pathlib.Path, monkeypatch, mutable_c assert "dummy.txt" in files # Validate the relocation information - buildinfo = bindist.read_buildinfo_file(spec.prefix) + buildinfo = spack.binary_distribution.read_buildinfo_file(spec.prefix) assert buildinfo["relocate_textfiles"] == ["dummy.txt"] assert buildinfo["relocate_links"] == ["link_to_dummy.txt"] diff --git a/lib/spack/spack/test/patch.py b/lib/spack/spack/test/patch.py index e56eaff9082095..3a8b61b29b43a7 100644 --- a/lib/spack/spack/test/patch.py +++ b/lib/spack/spack/test/patch.py @@ -12,6 +12,7 @@ import pytest import spack.concretize +import spack.deptypes as dt import spack.error import spack.fetch_strategy import spack.patch @@ -127,7 +128,11 @@ def test_url_patch(mock_packages, mock_patch_stage, filename, sha256, archive_sh patch_stage.fetch() patch_stage.expand_archive() spack.patch.apply_patch( - stage, patch_stage.single_file, patch.level, patch.working_dir, patch.reverse + stage.source_path, + patch_stage.single_file, + patch.level, + patch.working_dir, + patch.reverse, ) with working_dir(stage.source_path): @@ -143,7 +148,11 @@ def test_url_patch(mock_packages, mock_patch_stage, filename, sha256, archive_sh patch_stage.fetch() patch_stage.expand_archive() spack.patch.apply_patch( - stage, patch_stage.single_file, patch.level, patch.working_dir, patch.reverse + stage.source_path, + patch_stage.single_file, + patch.level, + patch.working_dir, + patch.reverse, ) with working_dir(stage.source_path): @@ -212,25 +221,42 @@ def test_nested_directives(mock_packages): """Ensure pkg data structures are set up properly by nested directives.""" # this ensures that the patch() directive results were removed # properly from the DirectiveMeta._directives_to_be_executed list - patcher = spack.repo.PATH.get_pkg_class("patch-several-dependencies") - assert len(patcher.patches) == 0 + package = spack.repo.PATH.get_pkg_class("patch-several-dependencies") + assert len(package.patches) == 0 # this ensures that results of dependency patches were properly added # to Dependency objects. - deps_by_name = patcher.dependencies_by_name() - libelf_dep = deps_by_name["libelf"][0] - assert len(libelf_dep.patches) == 1 - assert len(libelf_dep.patches[Spec()]) == 1 - - libdwarf_dep = deps_by_name["libdwarf"][0] - assert len(libdwarf_dep.patches) == 2 - assert len(libdwarf_dep.patches[Spec()]) == 1 - assert len(libdwarf_dep.patches[Spec("@20111030")]) == 1 - - fake_dep = deps_by_name["fake"][0] - assert len(fake_dep.patches) == 1 - assert len(fake_dep.patches[Spec()]) == 2 + # package.dependencies is keyed by three when clauses + assert package.dependencies.keys() == {Spec(), Spec("+foo"), Spec("@1.0")} + + # fake and libelf are unconditional dependencies + when_unconditional = package.dependencies[Spec()] + assert when_unconditional.keys() == {"fake", "libelf"} + # fake has two unconditional URL patches + assert when_unconditional["fake"].patches.keys() == {Spec()} + assert len(when_unconditional["fake"].patches[Spec()]) == 2 + # libelf has one unconditional patch + assert when_unconditional["libelf"].patches.keys() == {Spec()} + assert len(when_unconditional["libelf"].patches[Spec()]) == 1 + + # there are multiple depends_on directives for libelf under the +foo when clause; these must be + # reduced to a single Dependency object. + when_foo = package.dependencies[Spec("+foo")] + assert when_foo.keys() == {"libelf"} + assert when_foo["libelf"].spec == Spec("libelf@0.8.10") + assert when_foo["libelf"].depflag == dt.BUILD | dt.LINK + # there is one unconditional patch for libelf under the +foo when clause + assert len(when_foo["libelf"].patches) == 1 + assert len(when_foo["libelf"].patches[Spec()]) == 1 + + # libdwarf is a dependency when @1.0 with two patches applied from a single depends_on + # statement, one conditional on the libdwarf version + when_1_0 = package.dependencies[Spec("@1.0")] + assert when_1_0.keys() == {"libdwarf"} + assert when_1_0["libdwarf"].patches.keys() == {Spec(), Spec("@20111030")} + assert len(when_1_0["libdwarf"].patches[Spec()]) == 1 + assert len(when_1_0["libdwarf"].patches[Spec("@20111030")]) == 1 @pytest.mark.not_on_windows("Test requires Autotools") @@ -437,7 +463,7 @@ def test_patch_no_file(): patch = spack.patch.Patch(fp, "nonexistent_file", 0, "") patch.path = "test" with pytest.raises(spack.error.NoSuchPatchError, match="No such patch:"): - spack.patch.apply_patch(Stage("https://example.com/foo.patch"), patch.path) + spack.patch.apply_patch(Stage("https://example.com/foo.patch").source_path, patch.path) def test_patch_no_sha256(): @@ -482,11 +508,11 @@ def test_sha256_setter(mock_packages, mock_patch_stage, config): def test_invalid_from_dict(mock_packages, config): dictionary = {} with pytest.raises(ValueError, match="Invalid patch dictionary:"): - spack.patch.from_dict(dictionary) + spack.patch.from_dict(dictionary, mock_packages) dictionary = {"owner": "patch"} with pytest.raises(ValueError, match="Invalid patch dictionary:"): - spack.patch.from_dict(dictionary) + spack.patch.from_dict(dictionary, mock_packages) dictionary = { "owner": "patch", @@ -497,4 +523,4 @@ def test_invalid_from_dict(mock_packages, config): "sha256": bar_sha256, } with pytest.raises(spack.fetch_strategy.ChecksumError, match="sha256 checksum failed for"): - spack.patch.from_dict(dictionary) + spack.patch.from_dict(dictionary, mock_packages) diff --git a/lib/spack/spack/test/pattern.py b/lib/spack/spack/test/pattern.py deleted file mode 100644 index 6667a38f887d50..00000000000000 --- a/lib/spack/spack/test/pattern.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright Spack Project Developers. See COPYRIGHT file for details. -# -# SPDX-License-Identifier: (Apache-2.0 OR MIT) - - -import pytest - -import spack.util.pattern as pattern - - -@pytest.fixture() -def interface(): - """Returns the interface class for the composite.""" - - class Base: - counter = 0 - - def add(self): - raise NotImplementedError("add not implemented") - - def subtract(self): - raise NotImplementedError("subtract not implemented") - - return Base - - -@pytest.fixture() -def implementation(interface): - """Returns an implementation of the interface""" - - class Implementation(interface): - def __init__(self, value): - self.value = value - - def add(self): - interface.counter += self.value - - def subtract(self): - interface.counter -= self.value - - return Implementation - - -@pytest.fixture(params=["interface", "method_list"]) -def composite(interface, implementation, request): - """Returns a composite that contains an instance of `implementation(1)` - and one of `implementation(2)`. - """ - if request.param == "interface": - - @pattern.composite(interface=interface) - class Composite: - pass - - else: - - @pattern.composite(method_list=["add", "subtract"]) - class Composite: - pass - - c = Composite() - c.append(implementation(1)) - c.append(implementation(2)) - - return c - - -def test_composite_interface_calls(interface, composite): - composite.add() - assert interface.counter == 3 - - composite.pop() - composite.subtract() - assert interface.counter == 2 - - -def test_composite_wrong_container(interface): - with pytest.raises(TypeError): - - @pattern.composite(interface=interface, container=2) - class CompositeFromInterface: - pass - - -def test_composite_no_methods(): - with pytest.raises(TypeError): - - @pattern.composite() - class CompositeFromInterface: - pass diff --git a/lib/spack/spack/test/repo.py b/lib/spack/spack/test/repo.py index d684b052f1987c..06a90559af9a3f 100644 --- a/lib/spack/spack/test/repo.py +++ b/lib/spack/spack/test/repo.py @@ -6,7 +6,6 @@ import pytest -import spack import spack.environment import spack.package_base import spack.paths @@ -154,18 +153,18 @@ def test_repo_path_handles_package_removal(mock_packages, repo_builder: RepoBuil def test_repo_dump_virtuals( - tmp_path: pathlib.Path, mutable_mock_repo, mock_packages, ensure_debug, capsys + tmp_path: pathlib.Path, mutable_mock_repo, mock_packages, ensure_debug, capfd ): # Start with a package-less virtual vspec = spack.spec.Spec("something") mutable_mock_repo.dump_provenance(vspec, str(tmp_path)) - captured = capsys.readouterr()[1] + captured = capfd.readouterr()[1] assert "does not have a package" in captured # Now with a virtual with a package vspec = spack.spec.Spec("externalvirtual") mutable_mock_repo.dump_provenance(vspec, str(tmp_path)) - captured = capsys.readouterr()[1] + captured = capfd.readouterr()[1] assert "Installing" in captured assert "package.py" in os.listdir(str(tmp_path)), "Expected the virtual's package to be copied" @@ -425,7 +424,7 @@ def test_mod_to_pkg_name_and_reverse(): assert spack.util.naming.pkg_name_to_pkg_dir("none", package_api=(2, 0)) == "none" -def test_repo_v2_invalid_module_name(tmp_path: pathlib.Path, capsys): +def test_repo_v2_invalid_module_name(tmp_path: pathlib.Path, capfd): # Create a repo with a v2 structure root, _ = spack.repo.create_repo(str(tmp_path), namespace="repo_1", package_api=(2, 0)) repo_dir = pathlib.Path(root) @@ -453,12 +452,12 @@ class Uppercase(PackageBase): with spack.repo.use_repositories(str(repo_dir)) as repo: assert len(repo.all_package_names()) == 0 - stderr = capsys.readouterr().err + stderr = capfd.readouterr().err assert "cannot be used because `zlib-ng` is not a valid Spack package module name" in stderr assert "cannot be used because `UPPERCASE` is not a valid Spack package module name" in stderr -def test_repo_v2_module_and_class_to_package_name(tmp_path: pathlib.Path, capsys): +def test_repo_v2_module_and_class_to_package_name(tmp_path: pathlib.Path): # Create a repo with a v2 structure root, _ = spack.repo.create_repo(str(tmp_path), namespace="repo_2", package_api=(2, 0)) repo_dir = pathlib.Path(root) @@ -716,7 +715,11 @@ def __call__(self, *args, **kwargs) -> str: # type: ignore action = args[0] if action == "ls-remote": - return "refs/heads/develop" + return """\ +a8eff4da7aab59bbf5996ac1720954bf82443247 HEAD +165c479984b94051c982a6be1bd850f8bae02858 refs/heads/feature-branch +a8eff4da7aab59bbf5996ac1720954bf82443247 refs/heads/develop +3bd0276ab0491552247fa055921a23d2ffd9443c refs/heads/releases/v0.20""" elif action == "rev-parse": return "develop" @@ -750,6 +753,8 @@ def __call__(self, *args, **kwargs) -> str: # type: ignore assert len(errors_1) == 1 assert all("No repo.yaml" in str(err) for err in errors_1.values()), errors_1 assert descriptors_1["foo"].relative_paths == ["spack_repo/foo"] + # Verify that the default branch was detected from ls-remote + assert descriptors_1["foo"].branch == "develop" # Do the same test with another instance: it should *not* clone a second time. repo_path_2, errors_2 = repos_2.construct(cache=cache, find_git=MockGit) @@ -804,7 +809,11 @@ def __call__(self, *args, **kwargs) -> str: # type: ignore action = args[0] if action == "ls-remote": - return "refs/heads/develop" + return """\ +a8eff4da7aab59bbf5996ac1720954bf82443247 HEAD +165c479984b94051c982a6be1bd850f8bae02858 refs/heads/feature-branch +a8eff4da7aab59bbf5996ac1720954bf82443247 refs/heads/develop +3bd0276ab0491552247fa055921a23d2ffd9443c refs/heads/releases/v0.20""" elif action == "rev-parse": return "develop" @@ -892,7 +901,10 @@ def __call__(self, *args, **kwargs) -> str: # type: ignore action = args[0] if action == "ls-remote": - return "bad string" + # HEAD ref exists, but no default branch (i.e. no refs/heads/*) + return "a8eff4da7aab59bbf5996ac1720954bf82443247 HEAD" + + return "" class MockGitFailed(spack.util.executable.Executable): def __init__(self): diff --git a/lib/spack/spack/test/schema.py b/lib/spack/spack/test/schema.py index fb82f788376055..52675874125c7f 100644 --- a/lib/spack/spack/test/schema.py +++ b/lib/spack/spack/test/schema.py @@ -2,16 +2,134 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import json +import importlib import os import pytest from spack.vendor import jsonschema -import spack.paths import spack.schema import spack.util.spack_yaml as syaml +from spack.llnl.util.lang import list_modules + +_draft_07_with_spack_extensions = { + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": {"type": "array", "minItems": 1, "items": {"$ref": "#"}}, + "nonNegativeInteger": {"type": "integer", "minimum": 0}, + "nonNegativeIntegerDefault0": { + "allOf": [{"$ref": "#/definitions/nonNegativeInteger"}, {"default": 0}] + }, + "simpleTypes": { + "enum": ["array", "boolean", "integer", "null", "number", "object", "string"] + }, + "stringArray": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": True, + "default": [], + }, + }, + "type": ["object", "boolean"], + "properties": { + "$id": {"type": "string", "format": "uri-reference"}, + "$schema": {"type": "string", "format": "uri"}, + "$ref": {"type": "string", "format": "uri-reference"}, + "$comment": {"type": "string"}, + "title": {"type": "string"}, + "description": {"type": "string"}, + "default": True, + "readOnly": {"type": "boolean", "default": False}, + "writeOnly": {"type": "boolean", "default": False}, + "examples": {"type": "array", "items": True}, + "multipleOf": {"type": "number", "exclusiveMinimum": 0}, + "maximum": {"type": "number"}, + "exclusiveMaximum": {"type": "number"}, + "minimum": {"type": "number"}, + "exclusiveMinimum": {"type": "number"}, + "maxLength": {"$ref": "#/definitions/nonNegativeInteger"}, + "minLength": {"$ref": "#/definitions/nonNegativeIntegerDefault0"}, + "pattern": {"type": "string", "format": "regex"}, + "additionalItems": {"$ref": "#"}, + "items": { + "anyOf": [{"$ref": "#"}, {"$ref": "#/definitions/schemaArray"}], + "default": True, + }, + "maxItems": {"$ref": "#/definitions/nonNegativeInteger"}, + "minItems": {"$ref": "#/definitions/nonNegativeIntegerDefault0"}, + "uniqueItems": {"type": "boolean", "default": False}, + "contains": {"$ref": "#"}, + "maxProperties": {"$ref": "#/definitions/nonNegativeInteger"}, + "minProperties": {"$ref": "#/definitions/nonNegativeIntegerDefault0"}, + "required": {"$ref": "#/definitions/stringArray"}, + "additionalProperties": {"$ref": "#"}, + "definitions": {"type": "object", "additionalProperties": {"$ref": "#"}, "default": {}}, + "properties": {"type": "object", "additionalProperties": {"$ref": "#"}, "default": {}}, + "patternProperties": { + "type": "object", + "additionalProperties": {"$ref": "#"}, + "propertyNames": {"format": "regex"}, + "default": {}, + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [{"$ref": "#"}, {"$ref": "#/definitions/stringArray"}] + }, + }, + "propertyNames": {"$ref": "#"}, + "const": True, + "enum": {"type": "array", "items": True, "minItems": 1, "uniqueItems": True}, + "type": { + "anyOf": [ + {"$ref": "#/definitions/simpleTypes"}, + { + "type": "array", + "items": {"$ref": "#/definitions/simpleTypes"}, + "minItems": 1, + "uniqueItems": True, + }, + ] + }, + "format": {"type": "string"}, + "contentMediaType": {"type": "string"}, + "contentEncoding": {"type": "string"}, + "if": {"$ref": "#"}, + "then": {"$ref": "#"}, + "else": {"$ref": "#"}, + "allOf": {"$ref": "#/definitions/schemaArray"}, + "anyOf": {"$ref": "#/definitions/schemaArray"}, + "oneOf": {"$ref": "#/definitions/schemaArray"}, + "not": {"$ref": "#"}, + # What follows is two Spack extensions to JSON Schema Draft 7: + # deprecatedProperties and additionalKeysAreSpecs + "deprecatedProperties": { + "type": "array", + "items": { + "type": "object", + "properties": { + "names": { + "type": "array", + "items": {"type": "string"}, + "minItems": 1, + "uniqueItems": True, + }, + "message": {"type": "string"}, + "error": {"type": "boolean"}, + }, + "required": ["names", "message"], + "additionalProperties": False, + }, + }, + "additionalKeysAreSpecs": {"type": "boolean"}, + }, + "default": True, + # note: not in draft-07, this is for catching typos + "additionalProperties": False, +} @pytest.fixture() @@ -46,15 +164,6 @@ def module_suffixes_schema(): } -@pytest.fixture(scope="module") -def meta_schema(): - """Meta schema for JSON schema validation (Draft 4)""" - meta_schema_file = os.path.join(spack.paths.test_path, "data", "jsonschema_meta.json") - with open(meta_schema_file, encoding="utf-8") as f: - ms = json.load(f) - return ms - - @pytest.mark.regression("9857") def test_validate_spec(validate_spec_schema): v = spack.schema.Validator(validate_spec_schema) @@ -78,33 +187,6 @@ def test_module_suffixes(module_suffixes_schema): v.validate(data) -@pytest.mark.regression("10246") -@pytest.mark.parametrize( - "config_name", - [ - "compilers", - "config", - "definitions", - "include", - "env", - "merged", - "mirrors", - "modules", - "packages", - "repos", - ], -) -def test_schema_validation(meta_schema, config_name): - import importlib - - module_name = "spack.schema.{0}".format(config_name) - module = importlib.import_module(module_name) - schema = getattr(module, "schema") - - # If this validation throws the test won't pass - jsonschema.validate(schema, meta_schema) - - def test_deprecated_properties(module_suffixes_schema): # Test that an error is reported when 'error: True' msg_fmt = r"{name} is deprecated" @@ -151,3 +233,22 @@ def test_list_merge_order(): result = spack.schema.merge_yaml(dest, source) assert ["a", "b", "c", "d", "e", "f"] == result + + +def test_spack_schemas_are_valid(): + """Test that the Spack schemas in spack.schema.*.schema are valid under JSON Schema Draft 7 + with Spack extensions *only*.""" + # Collect schema submodules, and verify we have at least a few known ones + schema_submodules = ( + importlib.import_module(f"spack.schema.{name}") + for name in list_modules(os.path.dirname(spack.schema.__file__)) + ) + schemas = {m.__name__: m.schema for m in schema_submodules if hasattr(m, "schema")} + assert set(schemas) >= {"spack.schema.config", "spack.schema.packages", "spack.schema.modules"} + + # Validate them using the meta-schema + for module_name, module_schema in schemas.items(): + try: + jsonschema.validate(module_schema, _draft_07_with_spack_extensions) + except jsonschema.ValidationError as e: + raise RuntimeError(f"Invalid JSON schema in {module_name}: {e.message}") from e diff --git a/lib/spack/spack/test/spack_yaml.py b/lib/spack/spack/test/spack_yaml.py index 73712b86c500d0..ae04e0396127a3 100644 --- a/lib/spack/spack/test/spack_yaml.py +++ b/lib/spack/spack/test/spack_yaml.py @@ -8,6 +8,7 @@ import pytest import spack.util.spack_yaml as syaml +from spack.util.spack_yaml import DictWithLineInfo @pytest.fixture() @@ -154,3 +155,36 @@ def test_sorted_dict(): "y": [{"w": [2, 1, 0], "x": 0}, 0], "z": 0, } + + +def test_deepcopy_to_native(): + yaml = """\ +a: + b: 1 + c: 1.0 + d: + - e: false + - f: null + - "string" + 2.0: "float key" + 1: "int key" +""" + allowed_types = {str, int, float, bool, type(None), DictWithLineInfo, list} + original = syaml.load(yaml) + copied = syaml.deepcopy_as_builtin(original) + assert original == copied + assert type(copied["a"]["b"]) is int + assert type(copied["a"]["c"]) is float + assert type(copied["a"]["d"][0]["e"]) is bool # edge case: bool is subclass of int + assert type(copied["a"]["d"][1]["f"]) is type(None) + assert type(copied["a"]["d"][2]) is str + + stack = [copied] + while stack: + obj = stack.pop() + assert type(obj) in allowed_types + if type(obj) is DictWithLineInfo: + stack.extend(obj.keys()) + stack.extend(obj.values()) + elif type(obj) is list: + stack.extend(obj) diff --git a/lib/spack/spack/test/spec_dag.py b/lib/spack/spack/test/spec_dag.py index 6fa025d21c812a..4a283c55be029a 100644 --- a/lib/spack/spack/test/spec_dag.py +++ b/lib/spack/spack/test/spec_dag.py @@ -11,7 +11,7 @@ import spack.error import spack.installer import spack.repo -import spack.test.conftest +import spack.solver.asp import spack.util.hash as hashutil import spack.version from spack.dependency import Dependency @@ -457,9 +457,9 @@ def test_dependents_and_dependencies_are_correct(self): @pytest.mark.parametrize( "constraint_str,spec_str", [ - ("mpich@1.0", "mpileaks ^mpich@2.0"), + ("mpich@1.0", "mpileaks ^mpich@3.0"), ("mpich%gcc", "mpileaks ^mpich%intel"), - ("mpich%gcc@4.6", "mpileaks ^mpich%gcc@4.5"), + ("mpich%gcc@2.0", "mpileaks ^mpich%gcc@3.0"), ], ) def test_unsatisfiable_cases(self, set_dependency, constraint_str, spec_str): @@ -475,7 +475,7 @@ def test_unsatisfiable_cases(self, set_dependency, constraint_str, spec_str): ) def test_invalid_dep(self, spec_str): spec = Spec(spec_str) - with pytest.raises(spack.error.SpecError): + with pytest.raises(spack.solver.asp.InvalidDependencyError): spack.concretize.concretize_one(spec) def test_equal(self): diff --git a/lib/spack/spack/test/spec_list.py b/lib/spack/spack/test/spec_list.py index 1a5c51f8df9a99..776392f4317ab0 100644 --- a/lib/spack/spack/test/spec_list.py +++ b/lib/spack/spack/test/spec_list.py @@ -205,3 +205,18 @@ def test_spec_list_exclude_with_abstract_hashes(self, install_mockery): # Ensure that only mpich~debug is selected, and that the assembled spec remains abstract. assert len(result.specs) == 1 assert result.specs[0] == Spec(f"mpileaks ^callpath ^mpich/{mpich_2.dag_hash(5)}") + + @pytest.mark.regression("51703") + def test_exclusion_with_conditional_dependencies(self): + """Tests that we can exclude some spec using conditional dependencies in the exclusion.""" + parser = SpecListParser() + result = parser.parse_user_specs( + name="specs", + yaml_list=[ + { + "matrix": [["libunwind"], ["%[when=%c]c=gcc", "%[when=%c]c=llvm"]], + "exclude": ["libunwind %[when=%c]c=gcc"], + } + ], + ) + assert len(result.specs) == 1 diff --git a/lib/spack/spack/test/spec_semantics.py b/lib/spack/spack/test/spec_semantics.py index 7c5d73b4afd2a8..7046f1298db4cc 100644 --- a/lib/spack/spack/test/spec_semantics.py +++ b/lib/spack/spack/test/spec_semantics.py @@ -9,8 +9,8 @@ import spack.concretize import spack.deptypes as dt import spack.directives -import spack.error import spack.llnl.util.lang +import spack.package_base import spack.paths import spack.solver.asp import spack.spec @@ -18,7 +18,9 @@ import spack.store import spack.variant import spack.version as vn +from spack.enums import PropagationPolicy from spack.error import SpecError, UnsatisfiableSpecError +from spack.llnl.util.tty.color import colorize from spack.spec import ArchSpec, DependencySpec, Spec, SpecFormatSigilError, SpecFormatStringError from spack.variant import ( InvalidVariantValueError, @@ -1707,25 +1709,6 @@ def test_spec_dict_hashless_dep(): ) -@pytest.mark.parametrize( - "specs,expected", - [ - # Anonymous specs without dependencies - (["+baz", "+bar"], "+baz+bar"), - (["@2.0:", "@:5.1", "+bar"], "@2.0:5.1 +bar"), - # Anonymous specs with dependencies - (["^mpich@3.2", "^mpich@:4.0+foo"], "^mpich@3.2 +foo"), - # Mix a real package with a virtual one. This test - # should fail if we start using the repository - (["^mpich@3.2", "^mpi+foo"], "^mpich@3.2 ^mpi+foo"), - ], -) -def test_merge_abstract_anonymous_specs(specs, expected): - specs = [Spec(x) for x in specs] - result = spack.spec.merge_abstract_anonymous_specs(*specs) - assert result == Spec(expected) - - @pytest.mark.parametrize( "anonymous,named,expected", [ @@ -2339,3 +2322,175 @@ def test_edge_equality_accounts_for_when_condition(): edge1 = DependencySpec(parent, child, depflag=0, virtuals=(), when=Spec("%c")) edge2 = DependencySpec(parent, child, depflag=0, virtuals=()) assert edge1 != edge2 + + +def test_long_spec(): + """Test that long_spec preserves dependency types and has correct ordering.""" + assert Spec("foo %m %l ^k %n %j").long_spec == "foo %l %m ^k %j %n" + + +@pytest.mark.parametrize( + "constraints,expected", + [ + # Anonymous specs without dependencies + (["+baz", "+bar"], "+baz+bar"), + (["@2.0:", "@:5.1", "+bar"], "@2.0:5.1 +bar"), + # Anonymous specs with dependencies + (["^mpich@3.2", "^mpich@:4.0+foo"], "^mpich@3.2 +foo"), + # Mix a real package with a virtual one. This test + # should fail if we start using the repository + (["^mpich@3.2", "^mpi+foo"], "^mpich@3.2 ^mpi+foo"), + # Non direct dependencies + direct dependencies + (["^mpich", "%mpich"], "%mpich"), + (["^foo", "^bar %foo"], "^foo ^bar%foo"), + (["^foo", "%bar %foo"], "%bar%foo"), + ], +) +def test_constrain_symbolically(constraints, expected): + """Tests the semantics of constraining a spec when we don't resolve virtuals.""" + merged = Spec() + for c in constraints: + merged._constrain_symbolically(c) + assert merged == Spec(expected) + + reverse_order = Spec() + for c in reversed(constraints): + reverse_order._constrain_symbolically(c) + assert reverse_order == Spec(expected) + + +@pytest.mark.parametrize( + "parent_str,child_str,kwargs,expected_str,expected_repr", + [ + ( + "mpileaks", + "callpath", + {"virtuals": ()}, + "mpileaks ^callpath", + "DependencySpec('mpileaks', 'callpath', depflag=0, virtuals=())", + ), + ( + "mpileaks", + "callpath", + {"virtuals": ("mpi", "lapack")}, + "mpileaks ^[virtuals=lapack,mpi] callpath", + "DependencySpec('mpileaks', 'callpath', depflag=0, virtuals=('lapack', 'mpi'))", + ), + ( + "", + "callpath", + {"virtuals": ("mpi", "lapack"), "direct": True}, + " %[virtuals=lapack,mpi] callpath", + "DependencySpec('', 'callpath', depflag=0, virtuals=('lapack', 'mpi'), direct=True)", + ), + ( + "", + "callpath", + { + "virtuals": ("mpi", "lapack"), + "direct": True, + "propagation": PropagationPolicy.PREFERENCE, + }, + " %%[virtuals=lapack,mpi] callpath", + "DependencySpec('', 'callpath', depflag=0, virtuals=('lapack', 'mpi'), direct=True," + " propagation=PropagationPolicy.PREFERENCE)", + ), + ( + "", + "callpath", + {"virtuals": (), "direct": True, "propagation": PropagationPolicy.PREFERENCE}, + " %%callpath", + "DependencySpec('', 'callpath', depflag=0, virtuals=(), direct=True," + " propagation=PropagationPolicy.PREFERENCE)", + ), + ( + "mpileaks+foo", + "callpath+bar", + {"virtuals": (), "direct": True, "propagation": PropagationPolicy.PREFERENCE}, + "mpileaks+foo %%callpath+bar", + "DependencySpec('mpileaks+foo', 'callpath+bar', depflag=0, virtuals=(), direct=True," + " propagation=PropagationPolicy.PREFERENCE)", + ), + ], +) +def test_edge_representation(parent_str, child_str, kwargs, expected_str, expected_repr): + """Tests the string representations of edges.""" + parent = Spec(parent_str) or Spec() + child = Spec(child_str) or Spec() + edge = DependencySpec(parent, child, depflag=0, **kwargs) + assert str(edge) == expected_str + assert repr(edge) == expected_repr + + +@pytest.mark.parametrize( + "spec_str,assertions", + [ + # Check =* semantics for a "regular" variant + ("mpileaks foo=abc", [("foo=*", True), ("bar=*", False)]), + # Check the semantics for architecture related key value pairs + ( + "mpileaks", + [ + ("target=*", False), + ("os=*", False), + ("platform=*", False), + ("target=* platform=*", False), + ], + ), + ( + "mpileaks target=x86_64", + [ + ("target=*", True), + ("os=*", False), + ("platform=*", False), + ("target=* platform=*", False), + ], + ), + ("mpileaks os=debian6", [("target=*", False), ("os=*", True), ("platform=*", False)]), + ("mpileaks platform=linux", [("target=*", False), ("os=*", False), ("platform=*", True)]), + ("mpileaks platform=linux", [("target=*", False), ("os=*", False), ("platform=*", True)]), + ( + "mpileaks platform=linux target=x86_64", + [ + ("target=*", True), + ("os=*", False), + ("platform=*", True), + ("target=* platform=*", True), + ], + ), + ], +) +def test_attribute_existence_in_satisfies(spec_str, assertions, mock_packages, config): + """Tests the semantics of =* when used in Spec.satisfies""" + s = Spec(spec_str) + for test, expected in assertions: + assert s.satisfies(test) is expected + + +@pytest.mark.regression("51768") +@pytest.mark.parametrize("spec_str", ["mpi", "%mpi", "^mpi", "%foo", "%c=gcc", "%[when=%c]c=gcc"]) +def test_specs_semantics_on_self(spec_str, mock_packages, config): + """Tests that an abstract spec satisfies and intersects with itself.""" + s = Spec(spec_str) + assert s.satisfies(s) + assert s.intersects(s) + + +@pytest.mark.parametrize( + "spec_str,expected_fmt", + [ + ("mpileaks@2.2", "mpileaks@_R{@=2.2}"), + ("mpileaks@2.3", "mpileaks@c{@=2.3}"), + ("mpileaks+debug", "@_R{+debug}"), + ], +) +def test_highlighting_spec_parts(spec_str, expected_fmt, default_mock_concretization): + """Tests correct highlighting of non-default versions and variants""" + s = default_mock_concretization(spec_str) + expected = colorize(expected_fmt, color=True) + colorized_str = s.format( + color=True, + highlight_version_fn=spack.package_base.non_preferred_version, + highlight_variant_fn=spack.package_base.non_default_variant, + ) + assert expected in colorized_str diff --git a/lib/spack/spack/test/spec_syntax.py b/lib/spack/spack/test/spec_syntax.py index 8402e073ad3bd1..f1e43742d2ea41 100644 --- a/lib/spack/spack/test/spec_syntax.py +++ b/lib/spack/spack/test/spec_syntax.py @@ -13,6 +13,7 @@ import spack.cmd import spack.concretize import spack.config +import spack.error import spack.llnl.util.filesystem as fs import spack.platforms.test import spack.repo @@ -92,7 +93,7 @@ def _specfile_for(spec_str, filename): ( "platform=test", [Token(SpecTokens.KEY_VALUE_PAIR, value="platform=test")], - "arch=test-None-None", + "platform=test", ), # Multiple tokens anonymous specs ( @@ -392,27 +393,27 @@ def _specfile_for(spec_str, filename): ( r"os=fe", # Various translations associated with the architecture [Token(SpecTokens.KEY_VALUE_PAIR, value="os=fe")], - "arch=test-debian6-None", + "platform=test os=debian6", ), ( r"os=default_os", [Token(SpecTokens.KEY_VALUE_PAIR, value="os=default_os")], - "arch=test-debian6-None", + "platform=test os=debian6", ), ( r"target=be", [Token(SpecTokens.KEY_VALUE_PAIR, value="target=be")], - f"arch=test-None-{spack.platforms.test.Test.default}", + f"platform=test target={spack.platforms.test.Test.default}", ), ( r"target=default_target", [Token(SpecTokens.KEY_VALUE_PAIR, value="target=default_target")], - f"arch=test-None-{spack.platforms.test.Test.default}", + f"platform=test target={spack.platforms.test.Test.default}", ), ( r"platform=linux", [Token(SpecTokens.KEY_VALUE_PAIR, value="platform=linux")], - r"arch=linux-None-None", + r"platform=linux", ), # Version hash pair ( @@ -495,7 +496,7 @@ def _specfile_for(spec_str, filename): ( r"target=:broadwell,icelake", [Token(SpecTokens.KEY_VALUE_PAIR, value="target=:broadwell,icelake")], - r"arch=None-None-:broadwell,icelake", + r"target=:broadwell,icelake", ), # Hash pair version followed by a variant ( @@ -635,7 +636,7 @@ def _specfile_for(spec_str, filename): Token(SpecTokens.VERSION, value="@10.4.0:10,11.3.0:"), Token(SpecTokens.KEY_VALUE_PAIR, value="target=aarch64:"), ], - "@10.4.0:10,11.3.0: arch=None-None-aarch64:", + "@10.4.0:10,11.3.0: target=aarch64:", ), ( "@:0.4 % nvhpc", @@ -883,7 +884,7 @@ def _specfile_for(spec_str, filename): Token(SpecTokens.KEY_VALUE_PAIR, "languages:=c,c++"), Token(SpecTokens.KEY_VALUE_PAIR, "target=x86_64"), ], - "mvapich %gcc languages:='c,c++' arch=None-None-x86_64", + "mvapich %gcc languages:='c,c++' target=x86_64", ), # Test conditional dependencies ( @@ -920,6 +921,46 @@ def _specfile_for(spec_str, filename): ], "foo ^[when='%c'] c=gcc", ), + # Test dependency propagation + ( + "foo %%gcc", + [ + Token(SpecTokens.UNQUALIFIED_PACKAGE_NAME, "foo"), + Token(SpecTokens.DEPENDENCY, "%%"), + Token(SpecTokens.UNQUALIFIED_PACKAGE_NAME, "gcc"), + ], + "foo %%gcc", + ), + ( + "foo %%c,cxx=gcc", + [ + Token(SpecTokens.UNQUALIFIED_PACKAGE_NAME, "foo"), + Token(SpecTokens.DEPENDENCY, "%%c,cxx=gcc", virtuals="c,cxx", substitute="gcc"), + ], + "foo %%c,cxx=gcc", + ), + ( + "foo %%[when='%c'] c=gcc", + [ + Token(SpecTokens.UNQUALIFIED_PACKAGE_NAME, "foo"), + Token(SpecTokens.START_EDGE_PROPERTIES, "%%["), + Token(SpecTokens.KEY_VALUE_PAIR, "when='%c'"), + Token(SpecTokens.END_EDGE_PROPERTIES, "] c=gcc", virtuals="c", substitute="gcc"), + ], + "foo %%[when='%c'] c=gcc", + ), + ( + "foo %%[when='%c' virtuals=c] gcc", + [ + Token(SpecTokens.UNQUALIFIED_PACKAGE_NAME, "foo"), + Token(SpecTokens.START_EDGE_PROPERTIES, "%%["), + Token(SpecTokens.KEY_VALUE_PAIR, "when='%c'"), + Token(SpecTokens.KEY_VALUE_PAIR, "virtuals=c"), + Token(SpecTokens.END_EDGE_PROPERTIES, "]"), + Token(SpecTokens.UNQUALIFIED_PACKAGE_NAME, "gcc"), + ], + "foo %%[when='%c'] c=gcc", + ), ], ) def test_parse_single_spec(spec_str, tokens, expected_roundtrip, mock_git_test_package): @@ -1470,55 +1511,57 @@ def test_error_conditions(text, match_string): [ # Specfile related errors pytest.param( - "/bogus/path/libdwarf.yaml", spack.spec.NoSuchSpecFileError, marks=SKIP_ON_WINDOWS + "/bogus/path/libdwarf.yaml", spack.error.NoSuchSpecFileError, marks=SKIP_ON_WINDOWS + ), + pytest.param( + "../../libdwarf.yaml", spack.error.NoSuchSpecFileError, marks=SKIP_ON_WINDOWS ), - pytest.param("../../libdwarf.yaml", spack.spec.NoSuchSpecFileError, marks=SKIP_ON_WINDOWS), - pytest.param("./libdwarf.yaml", spack.spec.NoSuchSpecFileError, marks=SKIP_ON_WINDOWS), + pytest.param("./libdwarf.yaml", spack.error.NoSuchSpecFileError, marks=SKIP_ON_WINDOWS), pytest.param( "libfoo ^/bogus/path/libdwarf.yaml", - spack.spec.NoSuchSpecFileError, + spack.error.NoSuchSpecFileError, marks=SKIP_ON_WINDOWS, ), pytest.param( - "libfoo ^../../libdwarf.yaml", spack.spec.NoSuchSpecFileError, marks=SKIP_ON_WINDOWS + "libfoo ^../../libdwarf.yaml", spack.error.NoSuchSpecFileError, marks=SKIP_ON_WINDOWS ), pytest.param( - "libfoo ^./libdwarf.yaml", spack.spec.NoSuchSpecFileError, marks=SKIP_ON_WINDOWS + "libfoo ^./libdwarf.yaml", spack.error.NoSuchSpecFileError, marks=SKIP_ON_WINDOWS ), pytest.param( "/bogus/path/libdwarf.yamlfoobar", - spack.spec.NoSuchSpecFileError, + spack.error.NoSuchSpecFileError, marks=SKIP_ON_WINDOWS, ), pytest.param( "libdwarf^/bogus/path/libelf.yamlfoobar ^/path/to/bogus.yaml", - spack.spec.NoSuchSpecFileError, + spack.error.NoSuchSpecFileError, marks=SKIP_ON_WINDOWS, ), pytest.param( - "c:\\bogus\\path\\libdwarf.yaml", spack.spec.NoSuchSpecFileError, marks=SKIP_ON_UNIX + "c:\\bogus\\path\\libdwarf.yaml", spack.error.NoSuchSpecFileError, marks=SKIP_ON_UNIX ), - pytest.param("..\\..\\libdwarf.yaml", spack.spec.NoSuchSpecFileError, marks=SKIP_ON_UNIX), - pytest.param(".\\libdwarf.yaml", spack.spec.NoSuchSpecFileError, marks=SKIP_ON_UNIX), + pytest.param("..\\..\\libdwarf.yaml", spack.error.NoSuchSpecFileError, marks=SKIP_ON_UNIX), + pytest.param(".\\libdwarf.yaml", spack.error.NoSuchSpecFileError, marks=SKIP_ON_UNIX), pytest.param( "libfoo ^c:\\bogus\\path\\libdwarf.yaml", - spack.spec.NoSuchSpecFileError, + spack.error.NoSuchSpecFileError, marks=SKIP_ON_UNIX, ), pytest.param( - "libfoo ^..\\..\\libdwarf.yaml", spack.spec.NoSuchSpecFileError, marks=SKIP_ON_UNIX + "libfoo ^..\\..\\libdwarf.yaml", spack.error.NoSuchSpecFileError, marks=SKIP_ON_UNIX ), pytest.param( - "libfoo ^.\\libdwarf.yaml", spack.spec.NoSuchSpecFileError, marks=SKIP_ON_UNIX + "libfoo ^.\\libdwarf.yaml", spack.error.NoSuchSpecFileError, marks=SKIP_ON_UNIX ), pytest.param( "c:\\bogus\\path\\libdwarf.yamlfoobar", - spack.spec.SpecFilenameError, + spack.error.SpecFilenameError, marks=SKIP_ON_UNIX, ), pytest.param( "libdwarf^c:\\bogus\\path\\libelf.yamlfoobar ^c:\\path\\to\\bogus.yaml", - spack.spec.SpecFilenameError, + spack.error.SpecFilenameError, marks=SKIP_ON_UNIX, ), ], diff --git a/lib/spack/spack/test/spec_yaml.py b/lib/spack/spack/test/spec_yaml.py index 4a195b67d1a257..ade35eca9bea72 100644 --- a/lib/spack/spack/test/spec_yaml.py +++ b/lib/spack/spack/test/spec_yaml.py @@ -26,7 +26,6 @@ import spack.paths import spack.repo import spack.spec -import spack.test.conftest import spack.util.spack_json as sjson import spack.util.spack_yaml as syaml from spack.spec import Spec, save_dependency_specfiles @@ -547,3 +546,25 @@ def test_direct_edges_and_round_tripping_to_dict(spec_str, default_mock_concreti continue for dependency_data in node["dependencies"]: assert "direct" not in dependency_data["parameters"] + + +def test_pickle_preserves_identity_and_prefix(default_mock_concretization): + """When pickling multiple specs that share dependencies, the identity of those dependencies + should be preserved when unpickling.""" + mpileaks_before: Spec = default_mock_concretization("mpileaks") + callpath_before = mpileaks_before.dependencies("callpath")[0] + callpath_before.set_prefix("/fake/prefix/callpath") + specs_before = [mpileaks_before, callpath_before] + specs_after = pickle.loads(pickle.dumps(specs_before)) + mpileaks_after, callpath_after = specs_after + + # Test whether the mpileaks<->callpath link is preserved and corresponds to the same object + assert mpileaks_after is callpath_after.dependents("mpileaks")[0] + assert callpath_after is mpileaks_after.dependencies("callpath")[0] + + # Test that we have the exact same number of unique Spec objects before and after pickling + num_unique_specs = lambda specs: len({id(s) for r in specs for s in r.traverse()}) + assert num_unique_specs(specs_before) == num_unique_specs(specs_after) + + # Test that the specs are the same as dicts + assert mpileaks_before.to_dict() == mpileaks_after.to_dict() diff --git a/lib/spack/spack/test/stage.py b/lib/spack/spack/test/stage.py index 3a8fd8abf9b7c1..aa817b4b53a097 100644 --- a/lib/spack/spack/test/stage.py +++ b/lib/spack/spack/test/stage.py @@ -19,6 +19,7 @@ import spack.fetch_strategy import spack.stage import spack.util.executable +import spack.util.path import spack.util.url as url_util from spack.llnl.util.filesystem import getuid, mkdirp, partition_path, readlink, touch, working_dir from spack.resource import Resource @@ -707,39 +708,45 @@ def test_create_stage_root(self, tmp_path: pathlib.Path, no_path_access): except OSError: pass - def test_resolve_paths(self): + def test_resolve_paths(self, monkeypatch): """Test _resolve_paths.""" assert spack.stage._resolve_paths([]) == [] - # resolved path without user appends user - paths = [os.path.join(os.path.sep, "a", "b", "c")] - can_paths = [paths[0]] - user = getpass.getuser() + user = "testuser" + monkeypatch.setattr(spack.util.path, "get_user", lambda: user) - if sys.platform != "win32": - can_paths = [os.path.join(paths[0], user)] - assert spack.stage._resolve_paths(paths) == can_paths + # Test that user is appended to path if not present (except on Windows) + if sys.platform == "win32": + path = r"C:\spack-test\a\b\c" + expected = path + else: + path = "/spack-test/a/b/c" + expected = os.path.join(path, user) - # resolved path with node including user does not append user - paths = [os.path.join(os.path.sep, "spack-{0}".format(user), "stage")] - assert spack.stage._resolve_paths(paths) == paths + assert spack.stage._resolve_paths([path]) == [expected] - tempdir = "$tempdir" - can_tempdir = canonicalize_path(tempdir) - user = getpass.getuser() - temp_has_user = user in can_tempdir.split(os.sep) + # Test that user is NOT appended if already present + if sys.platform == "win32": + path_with_user = rf"C:\spack-test\spack-{user}\stage" + else: + path_with_user = f"/spack-test/spack-{user}/stage" + + assert spack.stage._resolve_paths([path_with_user]) == [path_with_user] + + canonicalized_tempdir = canonicalize_path("$tempdir") + temp_has_user = user in canonicalized_tempdir.split(os.sep) paths = [ - os.path.join(tempdir, "stage"), - os.path.join(tempdir, "$user"), - os.path.join(tempdir, "$user", "$user"), - os.path.join(tempdir, "$user", "stage", "$user"), + os.path.join("$tempdir", "stage"), + os.path.join("$tempdir", "$user"), + os.path.join("$tempdir", "$user", "$user"), + os.path.join("$tempdir", "$user", "stage", "$user"), ] res_paths = [canonicalize_path(p) for p in paths] if temp_has_user: - res_paths[1] = can_tempdir - res_paths[2] = os.path.join(can_tempdir, user) - res_paths[3] = os.path.join(can_tempdir, "stage", user) + res_paths[1] = canonicalized_tempdir + res_paths[2] = os.path.join(canonicalized_tempdir, user) + res_paths[3] = os.path.join(canonicalized_tempdir, "stage", user) elif sys.platform != "win32": res_paths[0] = os.path.join(res_paths[0], user) @@ -766,16 +773,14 @@ def test_get_stage_root_bad_path(self, clear_stage_root): ) def test_stage_purge(self, tmp_path: pathlib.Path, clear_stage_root, path, purged): """Test purging of stage directories.""" - stage_dir = tmp_path / "stage" - stage_path = str(stage_dir) - - test_dir = stage_dir / path - test_dir.mkdir(parents=True) - test_path = str(test_dir) + stage_config_path = str(tmp_path / "stage") - with spack.config.override("config:build_stage", stage_path): + with spack.config.override("config:build_stage", stage_config_path): stage_root = spack.stage.get_stage_root() - assert stage_path == stage_root + + test_dir = pathlib.Path(stage_root) / path + test_dir.mkdir(parents=True) + test_path = str(test_dir) spack.stage.purge() @@ -883,13 +888,13 @@ def test_stage_create_replace_path(tmp_build_stage_dir): assert os.path.isdir(nondir) -def test_cannot_access(capsys): +def test_cannot_access(capfd): """Ensure can_access dies with the expected error.""" with pytest.raises(SystemExit): # It's far more portable to use a non-existent filename. spack.stage.ensure_access("/no/such/file") - captured = capsys.readouterr() + captured = capfd.readouterr() assert "Insufficient permissions" in str(captured) diff --git a/lib/spack/spack/test/tag.py b/lib/spack/spack/test/tag.py index 3a4e953dfbbb4a..8b9d565cf29189 100644 --- a/lib/spack/spack/test/tag.py +++ b/lib/spack/spack/test/tag.py @@ -6,6 +6,7 @@ import pytest +import spack.cmd.tags import spack.repo import spack.tag from spack.main import SpackCommand @@ -38,16 +39,9 @@ """ -def test_tag_copy(mock_packages): - index = spack.tag.TagIndex.from_json(io.StringIO(tags_json), repository=mock_packages) - new_index = index.copy() - - assert index.tags == new_index.tags - - def test_tag_get_all_available(mock_packages): for skip in [False, True]: - all_pkgs = spack.tag.packages_with_tags(None, False, skip) + all_pkgs = spack.cmd.tags.packages_with_tags(["tag1", "tag2", "tag3"], False, skip) assert sorted(all_pkgs["tag1"]) == ["mpich", "mpich2"] assert all_pkgs["tag2"] == ["mpich"] assert all_pkgs["tag3"] == ["mpich2"] @@ -73,11 +67,11 @@ def ensure_tags_results_equal(results, expected): ) def test_tag_get_available(tags, expected, mock_packages): # Ensure results for all tags - all_tag_pkgs = spack.tag.packages_with_tags(tags, False, False) + all_tag_pkgs = spack.cmd.tags.packages_with_tags(tags, False, False) ensure_tags_results_equal(all_tag_pkgs, expected) # Ensure results for tags expecting results since skipping otherwise - only_pkgs = spack.tag.packages_with_tags(tags, False, True) + only_pkgs = spack.cmd.tags.packages_with_tags(tags, False, True) if expected[tags[0]]: ensure_tags_results_equal(only_pkgs, expected) else: @@ -88,7 +82,7 @@ def test_tag_get_installed_packages(mock_packages, mock_archive, mock_fetch, ins install("--fake", "mpich") for skip in [False, True]: - all_pkgs = spack.tag.packages_with_tags(None, True, skip) + all_pkgs = spack.cmd.tags.packages_with_tags(["tag1", "tag2", "tag3"], True, skip) assert sorted(all_pkgs["tag1"]) == ["mpich"] assert all_pkgs["tag2"] == ["mpich"] assert skip or all_pkgs["tag3"] == [] @@ -103,23 +97,21 @@ def test_tag_index_round_trip(mock_packages): mock_index.to_json(ostream) istream = io.StringIO(ostream.getvalue()) - new_index = spack.tag.TagIndex.from_json(istream, repository=mock_packages) + new_index = spack.tag.TagIndex.from_json(istream) - assert mock_index == new_index + assert mock_index.tags == new_index.tags def test_tag_equal(mock_packages): - first_index = spack.tag.TagIndex.from_json(io.StringIO(tags_json), repository=mock_packages) - second_index = spack.tag.TagIndex.from_json(io.StringIO(tags_json), repository=mock_packages) + first_index = spack.tag.TagIndex.from_json(io.StringIO(tags_json)) + second_index = spack.tag.TagIndex.from_json(io.StringIO(tags_json)) - assert first_index == second_index + assert first_index.tags == second_index.tags def test_tag_merge(mock_packages): - first_index = spack.tag.TagIndex.from_json(io.StringIO(tags_json), repository=mock_packages) - second_index = spack.tag.TagIndex.from_json( - io.StringIO(more_tags_json), repository=mock_packages - ) + first_index = spack.tag.TagIndex.from_json(io.StringIO(tags_json)) + second_index = spack.tag.TagIndex.from_json(io.StringIO(more_tags_json)) assert first_index != second_index @@ -140,21 +132,21 @@ def test_tag_merge(mock_packages): def test_tag_not_dict(mock_packages): list_json = "[]" with pytest.raises(spack.tag.TagIndexError) as e: - spack.tag.TagIndex.from_json(io.StringIO(list_json), repository=mock_packages) + spack.tag.TagIndex.from_json(io.StringIO(list_json)) assert "not a dict" in str(e) def test_tag_no_tags(mock_packages): pkg_json = '{"packages": []}' with pytest.raises(spack.tag.TagIndexError) as e: - spack.tag.TagIndex.from_json(io.StringIO(pkg_json), repository=mock_packages) + spack.tag.TagIndex.from_json(io.StringIO(pkg_json)) assert "does not start with" in str(e) def test_tag_update_package(mock_packages): mock_index = mock_packages.tag_index - index = spack.tag.TagIndex(repository=mock_packages) + index = spack.tag.TagIndex() for name in spack.repo.all_package_names(): - index.update_package(name) + index.update_package(name, repo=mock_packages) ensure_tags_results_equal(mock_index.tags, index.tags) diff --git a/lib/spack/spack/test/test_suite.py b/lib/spack/spack/test/test_suite.py index 689bf3f57b4481..678a5db2fb3e87 100644 --- a/lib/spack/spack/test/test_suite.py +++ b/lib/spack/spack/test/test_suite.py @@ -211,11 +211,11 @@ def test_test_functions_fails(): spack.install_test.test_functions(str) -def test_test_functions_pkgless(mock_packages, install_mockery, ensure_debug, capsys): +def test_test_functions_pkgless(mock_packages, install_mockery, ensure_debug, capfd): """Confirm works for package providing a package-less virtual.""" spec = spack.concretize.concretize_one("simple-standalone-test") fns = spack.install_test.test_functions(spec.package, add_virtuals=True) - out = capsys.readouterr() + out = capfd.readouterr() assert len(fns) == 2, "Expected two test functions" for f in fns: assert f[1].__name__ in ["test_echo", "test_skip"] @@ -265,7 +265,7 @@ def test_package_copy_test_files_fails(mock_packages): assert "test suite is missing" in str(exc_info) -def test_package_copy_test_files_skips(mock_packages, ensure_debug, capsys): +def test_package_copy_test_files_skips(mock_packages, ensure_debug, capfd): """Confirm copy_test_files errors as expected if no package class found.""" # Try with a non-concrete spec and package with a test suite MockSuite = collections.namedtuple("TestSuite", ["specs"]) @@ -273,7 +273,7 @@ def test_package_copy_test_files_skips(mock_packages, ensure_debug, capsys): vspec = spack.spec.Spec("something") pkg = MyPackage("SomePackage", vspec, MockSuite([])) spack.install_test.copy_test_files(pkg, vspec) - out = capsys.readouterr()[1] + out = capfd.readouterr()[1] assert "skipping test data copy" in out assert "no package class found" in out diff --git a/lib/spack/spack/test/url_fetch.py b/lib/spack/spack/test/url_fetch.py index 80e53aa8e6a87f..16d3a16bddf59c 100644 --- a/lib/spack/spack/test/url_fetch.py +++ b/lib/spack/spack/test/url_fetch.py @@ -18,7 +18,6 @@ import spack.llnl.util.tty as tty import spack.url import spack.util.crypto as crypto -import spack.util.executable import spack.util.web as web_util import spack.version from spack.llnl.util.filesystem import is_exe, working_dir diff --git a/lib/spack/spack/test/util/archive.py b/lib/spack/spack/test/util/archive.py index 503f140426655b..1934eb41f89e24 100644 --- a/lib/spack/spack/test/util/archive.py +++ b/lib/spack/spack/test/util/archive.py @@ -196,15 +196,25 @@ def map_prefix(path: str) -> str: ] -@pytest.mark.parametrize("ref", ("test-branch", "test-tag")) +@pytest.mark.parametrize("ref", ("test-branch", "test-tag", "annotated-tag")) def test_get_commits_from_archive(mock_git_repository, tmp_path: Path, ref): + git_exe = mock_git_repository.git_exe with working_dir(str(tmp_path)): archive_file = str(tmp_path / "archive.tar.gz") path_to_name = lambda path: PurePath(path).relative_to(mock_git_repository.path).as_posix() + + # round trip the git repo, the desired ref will always be checked out + git_exe("-C", mock_git_repository.path, "checkout", ref) with gzip_compressed_tarfile(archive_file) as (tar, _, _): reproducible_tarfile_from_prefix( tar=tar, prefix=mock_git_repository.path, path_to_name=path_to_name ) + git_exe( + "-C", + mock_git_repository.path, + "checkout", + mock_git_repository.checks["default"].revision, + ) commit = retrieve_commit_from_archive(archive_file, ref) assert commit assert spack.version.is_git_commit_sha(commit) diff --git a/lib/spack/spack/test/util/executable.py b/lib/spack/spack/test/util/executable.py index 1b93da8cd10163..afd666bec27113 100644 --- a/lib/spack/spack/test/util/executable.py +++ b/lib/spack/spack/test/util/executable.py @@ -9,7 +9,6 @@ import pytest -import spack import spack.llnl.util.filesystem as fs import spack.main import spack.util.executable as ex diff --git a/lib/spack/spack/test/util/file_cache.py b/lib/spack/spack/test/util/file_cache.py index 73bb3daa521dda..47f4cd52961a12 100644 --- a/lib/spack/spack/test/util/file_cache.py +++ b/lib/spack/spack/test/util/file_cache.py @@ -83,6 +83,7 @@ def test_write_and_remove_cache_file(file_cache): @pytest.mark.not_on_windows("Not supported on Windows (yet)") +@pytest.mark.skipif(fs.getuid() == 0, reason="user is root") def test_cache_init_entry_fails(file_cache): """Test init_entry failures.""" relpath = fs.join_path("test-dir", "read-only-file.txt") @@ -106,6 +107,7 @@ def test_cache_init_entry_fails(file_cache): file_cache.init_entry(relpath) +@pytest.mark.skipif(fs.getuid() == 0, reason="user is root") def test_cache_write_readonly_cache_fails(file_cache): """Test writing a read-only cached file.""" filename = "read-only-file.txt" diff --git a/lib/spack/spack/test/util/git.py b/lib/spack/spack/test/util/git.py index 9b02d3d0176ffe..d2e9301166650f 100644 --- a/lib/spack/spack/test/util/git.py +++ b/lib/spack/spack/test/util/git.py @@ -33,19 +33,20 @@ def test_modified_files(mock_git_package_changes): assert files[0] == filename -def test_init_git_repo(git, tmp_path: pathlib.Path): - repo_url = "https://github.com/spack/spack.git" - destination = tmp_path / "test_git_init" +def test_init_git_repo(git, mock_git_version_info, tmp_path: pathlib.Path): + """Test that init_git_repo creates a new repo with remote but doesn't checkout.""" + repo, _, _ = mock_git_version_info - with working_dir(destination, create=True): - spack.util.git.init_git_repo(repo_url) + with working_dir(str(tmp_path / "test_git_init_repo"), create=True): + spack.util.git.init_git_repo(repo) + # Verify repo was initialized but no commits checked out yet assert "No commits yet" in git("status", output=str) -def test_pull_checkout_commit(git, tmp_path: pathlib.Path, mock_git_version_info): +def test_pull_checkout_commit_any_remote(git, tmp_path: pathlib.Path, mock_git_version_info): repo, _, commits = mock_git_version_info - destination = tmp_path / "test_git_checkout_commit" + destination = str(tmp_path / "test_git_checkout_commit") with working_dir(destination, create=True): spack.util.git.init_git_repo(repo) @@ -54,9 +55,21 @@ def test_pull_checkout_commit(git, tmp_path: pathlib.Path, mock_git_version_info assert commits[0] in git("rev-parse", "HEAD", output=str) +def test_pull_checkout_commit_specific_remote(git, tmp_path: pathlib.Path, mock_git_version_info): + """Test fetching a specific commit from a specific remote.""" + repo, _, commits = mock_git_version_info + destination = str(tmp_path / "test_git_checkout_commit_from_remote") + + with working_dir(destination, create=True): + spack.util.git.init_git_repo(repo) + spack.util.git.pull_checkout_commit(commits[0], remote="origin", depth=1) + + assert commits[0] in git("rev-parse", "HEAD", output=str) + + def test_pull_checkout_tag(git, tmp_path: pathlib.Path, mock_git_version_info): repo, _, _ = mock_git_version_info - destination = tmp_path / "test_git_checkout_tag" + destination = str(tmp_path / "test_git_checkout_tag") with working_dir(destination, create=True): spack.util.git.init_git_repo(repo) @@ -67,7 +80,7 @@ def test_pull_checkout_tag(git, tmp_path: pathlib.Path, mock_git_version_info): def test_pull_checkout_branch(git, tmp_path: pathlib.Path, mock_git_version_info): repo, _, _ = mock_git_version_info - destination = tmp_path / "test_git_checkout_branch" + destination = str(tmp_path / "test_git_checkout_branch") with working_dir(destination, create=True): spack.util.git.init_git_repo(repo) @@ -80,3 +93,61 @@ def test_pull_checkout_branch(git, tmp_path: pathlib.Path, mock_git_version_info with pytest.raises(exe.ProcessError): spack.util.git.pull_checkout_branch("main") + + +@pytest.mark.parametrize( + "input,answer", + ( + ["git version 1.7.1", (1, 7, 1)], + ["git version 2.34.1.windows.2", (2, 34, 1)], + ["git version 2.50.1 (Apple Git-155)", (2, 50, 1)], + ["git version 1.2.3.4.150.abcd10", (1, 2, 3, 4, 150)], + ), +) +def test_extract_git_version(mock_util_executable, input, answer): + _, _, registered_responses = mock_util_executable + registered_responses["--version"] = input + git = spack.util.git.GitExecutable() + assert git.version == answer + + +def test_mock_git_exe(mock_util_executable): + log, should_fail, _ = mock_util_executable + should_fail.append("clone") + git = spack.util.git.GitExecutable() + with pytest.raises(exe.ProcessError): + git("clone") + assert git.returncode == 1 + git("status") + assert git.returncode == 0 + assert "clone" in "\n".join(log) + assert "status" in "\n".join(log) + + +@pytest.mark.parametrize("git_version", ("1.5.0", "1.3.0")) +def test_git_exe_conditional_option(mock_util_executable, git_version): + log, _, registered_responses = mock_util_executable + min_version = (1, 4, 1) + registered_responses["git --version"] = git_version + git = spack.util.git.GitExecutable("git") + mock_opt = spack.util.git.VersionConditionalOption("--maybe", min_version=min_version) + args = mock_opt(git.version) + if git.version >= min_version: + assert "--maybe" in args + else: + assert not args + + +@pytest.mark.parametrize( + "git_version,ommitted_opts", + (("2.18.0", ["--filter=blob:none"]), ("1.8.0", ["--filter=blob:none", "--depth"])), +) +def test_git_init_fetch_ommissions(mock_util_executable, git_version, ommitted_opts): + log, _, registered_responses = mock_util_executable + registered_responses["git --version"] = git_version + git = spack.util.git.GitExecutable("git") + url = "https://foo.git" + ref = "v1.2.3" + spack.util.git.git_init_fetch(url, ref, git_exe=git) + for opt in ommitted_opts: + assert all(opt not in call for call in log) diff --git a/lib/spack/spack/test/util/ld_so_conf.py b/lib/spack/spack/test/util/ld_so_conf.py index cb70f9eb9b87f0..5b886481234754 100644 --- a/lib/spack/spack/test/util/ld_so_conf.py +++ b/lib/spack/spack/test/util/ld_so_conf.py @@ -4,10 +4,14 @@ import os import pathlib +import sys + +import pytest import spack.util.ld_so_conf as ld_so_conf +@pytest.mark.skipif(sys.platform == "win32", reason="Unix path") def test_ld_so_conf_parsing(tmp_path: pathlib.Path): cwd = os.getcwd() (tmp_path / "subdir").mkdir() diff --git a/lib/spack/spack/test/util/package_hash.py b/lib/spack/spack/test/util/package_hash.py index 72ad61e97195b8..58a6a3b0c85a5d 100644 --- a/lib/spack/spack/test/util/package_hash.py +++ b/lib/spack/spack/test/util/package_hash.py @@ -174,6 +174,9 @@ def method1(): def method2(): """ELEVEN""" return "TWELVE" + +def empty_func(): + """THIRTEEN""" ''' many_strings_no_docstrings = """\ @@ -189,6 +192,9 @@ def method1(): def method2(): return 'TWELVE' + +def empty_func(): + pass """ @@ -339,7 +345,7 @@ def test_remove_complex_package_logic_filtered(): # has @when("@4.1.0") and raw unicode literals ("mfem", "slf5qyyyhuj66mo5lpuhkrs35akh2zck"), ("mfem@4.0.0", "slf5qyyyhuj66mo5lpuhkrs35akh2zck"), - ("mfem@4.1.0", "yo3ymaulytctas67zjn663ixw5cfyh5u"), + ("mfem@4.1.0", "6tjbezoh2aquz6gmvoz7jf6j6lib65m2"), # has @when("@1.5.0:") ("py-torch", "m3ucsddqr7hjevtgx4cad34nrtqgyjfg"), ("py-torch@1.0", "m3ucsddqr7hjevtgx4cad34nrtqgyjfg"), diff --git a/lib/spack/spack/test/util/path.py b/lib/spack/spack/test/util/path.py index 209d08346fa4c0..8f4ad0c7122a69 100644 --- a/lib/spack/spack/test/util/path.py +++ b/lib/spack/spack/test/util/path.py @@ -135,16 +135,17 @@ def test_path_debug_padded_filter(debug, monkeypatch): assert expected == sup.debug_padded_filter(string) -@pytest.mark.parametrize( - "path,expected", - [ - ("/home/spack/path/to/file.txt", "/home/spack/path/to/file.txt"), - ("file:///home/another/config.yaml", "/home/another/config.yaml"), - ("path/to.txt", os.path.join(os.environ["SPACK_ROOT"], "path", "to.txt")), - (r"C:\Files (x86)\Windows\10", r"C:\Files (x86)\Windows\10"), - (r"E:/spack stage", "E:\\spack stage"), - ], -) -def test_canonicalize_file(path, expected): - """Confirm canonicalize path handles local files and file URLs.""" - assert sup.canonicalize_path(path) == os.path.normpath(expected) +@pytest.mark.not_on_windows("Unix path") +def test_canonicalize_file_unix(): + assert sup.canonicalize_path("/home/spack/path/to/file.txt") == "/home/spack/path/to/file.txt" + assert sup.canonicalize_path("file:///home/another/config.yaml") == "/home/another/config.yaml" + + +@pytest.mark.only_windows("Windows path") +def test_canonicalize_file_windows(): + assert sup.canonicalize_path(r"C:\Files (x86)\Windows\10") == r"C:\Files (x86)\Windows\10" + assert sup.canonicalize_path(r"E:/spack stage") == r"E:\spack stage" + + +def test_canonicalize_file_relative(): + assert sup.canonicalize_path("path/to.txt") == os.path.join(os.getcwd(), "path", "to.txt") diff --git a/lib/spack/spack/test/util/remote_file_cache.py b/lib/spack/spack/test/util/remote_file_cache.py index 1f1381a1f8b6e9..4c5e81332acae9 100644 --- a/lib/spack/spack/test/util/remote_file_cache.py +++ b/lib/spack/spack/test/util/remote_file_cache.py @@ -28,21 +28,25 @@ def test_rfc_local_path_bad_scheme(path, err): _ = rfc_util.local_path(path, "") -@pytest.mark.parametrize( - "path,expected", - [ - ("/a/b/c/d/e/config.py", "/a/b/c/d/e/config.py"), - ("file:///this/is/a/file/url/include.yaml", "/this/is/a/file/url/include.yaml"), - ( - "relative/packages.txt", - os.path.join(os.environ["SPACK_ROOT"], "relative", "packages.txt"), - ), - (r"C:\Files (x86)\Windows\10", r"C:\Files (x86)\Windows\10"), - (r"D:/spack stage", "D:\\spack stage"), - ], -) -def test_rfc_local_file(path, expected): - assert rfc_util.local_path(path, "") == os.path.normpath(expected) +@pytest.mark.not_on_windows("Unix path") +def test_rfc_local_file_unix(): + assert rfc_util.local_path("/a/b/c/d/e/config.py", "") == "/a/b/c/d/e/config.py" + assert ( + rfc_util.local_path("file:///this/is/a/file/url/include.yaml", "") + == "/this/is/a/file/url/include.yaml" + ) + + +@pytest.mark.only_windows("Windows path") +def test_rfc_local_file_windows(): + assert rfc_util.local_path(r"C:\Files (x86)\Windows\10", "") == r"C:\Files (x86)\Windows\10" + assert rfc_util.local_path(r"D:/spack stage", "") == r"D:\spack stage" + + +def test_rfc_local_file_relative(): + path = "relative/packages.txt" + expected = os.path.join(os.getcwd(), "relative", "packages.txt") + assert rfc_util.local_path(path, "") == expected def test_rfc_remote_local_path_no_dest(): @@ -52,9 +56,9 @@ def test_rfc_remote_local_path_no_dest(): packages_yaml_sha256 = ( - "6a1b26c857ca7e5bcd7342092e2f218da43d64b78bd72771f603027ea3c8b4af" + "8d428c600b215e3b4a207a08236659dfc2c9ae2782c35943a00ee4204a135702" if sys.platform != "win32" - else "ae3239d769f9e6dc137a998489b0d44c70b03e21de4ecd6a623a3463a1a5c3f4" + else "6c094ec3ee1eb5068860cdd97d8da965bf281be29e60ab9afc8f6e4d72d24f21" ) diff --git a/lib/spack/spack/test/util/spack_lock_wrapper.py b/lib/spack/spack/test/util/spack_lock_wrapper.py index 2cd5523d45b2f3..dc252103355738 100644 --- a/lib/spack/spack/test/util/spack_lock_wrapper.py +++ b/lib/spack/spack/test/util/spack_lock_wrapper.py @@ -33,6 +33,7 @@ def test_disable_locking(tmp_path: pathlib.Path): # "Disable" mock_stage fixture to avoid subdir permissions issues on cleanup. @pytest.mark.nomockstage +@pytest.mark.skipif(getuid() == 0, reason="user is root") def test_lock_checks_user(tmp_path: pathlib.Path): """Ensure lock checks work with a self-owned, self-group repo.""" uid = getuid() diff --git a/lib/spack/spack/test/util/unparse/unparse.py b/lib/spack/spack/test/util/unparse/unparse.py index f22d04fa57c02e..4d8a3b4f7005a3 100644 --- a/lib/spack/spack/test/util/unparse/unparse.py +++ b/lib/spack/spack/test/util/unparse/unparse.py @@ -3,7 +3,6 @@ # SPDX-License-Identifier: Python-2.0 import ast -import codecs import os import sys import tokenize @@ -20,7 +19,7 @@ def read_pyfile(filename): string), taking into account the file encoding.""" with open(filename, "rb") as pyfile: encoding = tokenize.detect_encoding(pyfile.readline)[0] - with codecs.open(filename, "r", encoding=encoding) as pyfile: + with open(filename, "r", encoding=encoding) as pyfile: source = pyfile.read() return source @@ -553,3 +552,37 @@ def test_async_with_as(): ) def test_match_literal(literal): check_ast_roundtrip(literal) + + +@pytest.mark.skipif(sys.version_info < (3, 14), reason="Not supported < 3.14") +def test_tstrings(): + check_ast_roundtrip("t'foo'") + check_ast_roundtrip("t'foo {bar}'") + check_ast_roundtrip("t'foo {bar!s:.2f}'") + check_ast_roundtrip("t'{a + b}'") + check_ast_roundtrip("t'{a + b:x}'") + check_ast_roundtrip("t'{a + b!s}'") + check_ast_roundtrip("t'{ {a}}'") + check_ast_roundtrip("t'{ {a}=}'") + check_ast_roundtrip("t'{{a}}'") + check_ast_roundtrip("t''") + + +def test_subscript_with_tuple(): + """Test change in visit_Subscript/visit_Index is_non_empty_tuple.""" + check_ast_roundtrip("a[()]") + check_ast_roundtrip("a[b]") + check_ast_roundtrip("a[(*b,)]") + check_ast_roundtrip("a[(1, 2)]") + check_ast_roundtrip("a[(1, *b)]") + + +@pytest.mark.skipif(sys.version_info < (3, 11), reason="Not supported < 3.11") +def test_subscript_without_tuple(): + """Test change in visit_Subscript/visit_Index is_non_empty_tuple.""" + check_ast_roundtrip("a[*b]") + check_ast_roundtrip("a[1, *b]") + + +def test_attribute_on_int(): + check_ast_roundtrip("1 .__abs__()") diff --git a/lib/spack/spack/test/util/util_url.py b/lib/spack/spack/test/util/util_url.py index eb4bdeb0b27982..af44ec8b28dbe9 100644 --- a/lib/spack/spack/test/util/util_url.py +++ b/lib/spack/spack/test/util/util_url.py @@ -20,6 +20,8 @@ def test_url_local_file_path(tmp_path: pathlib.Path): with open(path, "wb") as f: f.write(b"hello world") + assert url_util.path_to_file_url(path).startswith("file://") + # Go from path -> url -> path. roundtrip = url_util.local_file_path(url_util.path_to_file_url(path)) diff --git a/lib/spack/spack/test/utilities.py b/lib/spack/spack/test/utilities.py index 85be6eb3f40577..c5ddae6f4a827b 100644 --- a/lib/spack/spack/test/utilities.py +++ b/lib/spack/spack/test/utilities.py @@ -25,6 +25,5 @@ def __init__(self, command_name): def __call__(self, *argv, **kwargs): self.parser.add_command(self.command_name) - prepend = kwargs["global_args"] if "global_args" in kwargs else [] - args, unknown = self.parser.parse_known_args(prepend + [self.command_name] + list(argv)) + args, unknown = self.parser.parse_known_args([self.command_name] + list(argv)) return args diff --git a/lib/spack/spack/test/variant.py b/lib/spack/spack/test/variant.py index d32bb35fb336ff..a6de9fc91eb9aa 100644 --- a/lib/spack/spack/test/variant.py +++ b/lib/spack/spack/test/variant.py @@ -519,20 +519,20 @@ def test_disjoint_set_initialization(): assert d.default == "none" assert d.multi is True - assert set(x for x in d) == set(["none", "a", "b", "c", "e", "f"]) + assert list(d) == ["none", "a", "b", "c", "e", "f"] def test_disjoint_set_fluent_methods(): # Construct an object without the empty set d = disjoint_sets(("a",), ("b", "c"), ("e", "f")).prohibit_empty_set() - assert set(("none",)) not in d.sets + assert ("none",) not in d.sets # Call this 2 times to check that no matter whether # the empty set was allowed or not before, the state # returned is consistent. for _ in range(2): d = d.allow_empty_set() - assert set(("none",)) in d.sets + assert ("none",) in d.sets assert "none" in d assert "none" in [x for x in d] assert "none" in d.feature_values @@ -550,7 +550,7 @@ def test_disjoint_set_fluent_methods(): # returned is consistent. for _ in range(2): d = d.prohibit_empty_set() - assert set(("none",)) not in d.sets + assert ("none",) not in d.sets assert "none" not in d assert "none" not in [x for x in d] assert "none" not in d.feature_values diff --git a/lib/spack/spack/test/web.py b/lib/spack/spack/test/web.py index b71d86acbfc281..3a5d91b6f9a2ea 100644 --- a/lib/spack/spack/test/web.py +++ b/lib/spack/spack/test/web.py @@ -276,7 +276,7 @@ def head_object(self, Bucket=None, Key=None): raise self.ClientError -def test_gather_s3_information(monkeypatch, capfd): +def test_gather_s3_information(monkeypatch): mirror = spack.mirrors.mirror.Mirror( { "fetch": { @@ -332,7 +332,7 @@ def get_s3_session(url, method="fetch"): assert "Deleted keytwo" in err -def test_s3_url_exists(monkeypatch, capfd): +def test_s3_url_exists(monkeypatch): def get_s3_session(url, method="fetch"): return MockS3Client() diff --git a/lib/spack/spack/traverse.py b/lib/spack/spack/traverse.py index 5d1712c60a5670..f0f960d408a552 100644 --- a/lib/spack/spack/traverse.py +++ b/lib/spack/spack/traverse.py @@ -4,6 +4,7 @@ from collections import defaultdict, deque from typing import ( + TYPE_CHECKING, Any, Callable, Iterable, @@ -20,7 +21,9 @@ from spack.vendor.typing_extensions import Literal import spack.deptypes as dt -import spack.spec + +if TYPE_CHECKING: + import spack.spec # Export only the high-level API. __all__ = ["traverse_edges", "traverse_nodes", "traverse_tree"] @@ -227,10 +230,10 @@ def get_visitor_from_args( def with_artificial_edges(specs): """Initialize a deque of edges from an artificial root node to the root specs.""" + from spack.spec import DependencySpec + return deque( - EdgeAndDepth( - edge=spack.spec.DependencySpec(parent=None, spec=s, depflag=0, virtuals=()), depth=0 - ) + EdgeAndDepth(edge=DependencySpec(parent=None, spec=s, depflag=0, virtuals=()), depth=0) for s in specs ) diff --git a/lib/spack/spack/url.py b/lib/spack/spack/url.py index 8b6a3c7d90cbac..0aabc7e40d5525 100644 --- a/lib/spack/spack/url.py +++ b/lib/spack/spack/url.py @@ -10,7 +10,9 @@ **Example:** when spack is given the following URL: - https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.12/src/hdf-4.2.12.tar.gz +.. code-block:: + + https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.12/src/hdf-4.2.12.tar.gz It can figure out that the package name is ``hdf``, and that it is at version ``4.2.12``. This is useful for making the creation of packages simple: a user @@ -18,7 +20,9 @@ Spack can also figure out that it can most likely download 4.2.6 at this URL: - https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.6/src/hdf-4.2.6.tar.gz +.. code-block:: + + https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.6/src/hdf-4.2.6.tar.gz This is useful if a user asks for a package at a particular version number; spack doesn't need anyone to tell it where to get the tarball even though @@ -28,7 +32,7 @@ import os import pathlib import re -from typing import Any, Dict, Optional, Sequence, Union +from typing import Any, Dict, Optional, Sequence, Tuple, Union import spack.error import spack.llnl.url @@ -44,7 +48,7 @@ # -def strip_name_suffixes(path, version): +def strip_name_suffixes(path: str, version: Union[str, spack.version.StandardVersion]) -> str: """Most tarballs contain a package name followed by a version number. However, some also contain extraneous information in-between the name and version: @@ -63,8 +67,8 @@ def strip_name_suffixes(path, version): * ``jpeg`` Args: - path (str): The filename or URL for the package - version (str): The version detected for this URL + path: The filename or URL for the package + version: The version detected for this URL Returns: str: The ``path`` with any extraneous suffixes removed @@ -116,19 +120,20 @@ def strip_name_suffixes(path, version): return path -def parse_version_offset(path): +def parse_version_offset(path: str) -> Tuple[str, int, int, int, str]: """Try to extract a version string from a filename or URL. Args: path (str): The filename or URL for the package Returns: - tuple: A tuple containing: - version of the package, - first index of version, - length of version string, - the index of the matching regex, - the matching regex + A tuple containing + + * version of the package + * first index of version + * length of version string + * the index of the matching regex + * the matching regex Raises: UndetectableVersionError: If the URL does not match any regexes @@ -300,20 +305,23 @@ def parse_version(path: str) -> spack.version.StandardVersion: return spack.version.StandardVersion.from_string(version) -def parse_name_offset(path, v=None): +def parse_name_offset( + path: str, v: Optional[Union[str, spack.version.StandardVersion]] = None +) -> Tuple[str, int, int, int, str]: """Try to determine the name of a package from its filename or URL. Args: - path (str): The filename or URL for the package - v (str): The version of the package + path: The filename or URL for the package + v: The version of the package Returns: - tuple: A tuple containing: - name of the package, - first index of name, - length of name, - the index of the matching regex, - the matching regex + A tuple containing + + * name of the package + * first index of name + * length of name + * the index of the matching regex + * the matching regex Raises: UndetectableNameError: If the URL does not match any regexes @@ -429,12 +437,12 @@ def parse_name(path, ver=None): return name -def parse_name_and_version(path): +def parse_name_and_version(path: str) -> Tuple[str, spack.version.StandardVersion]: """Try to determine the name of a package and extract its version from its filename or URL. Args: - path (str): The filename or URL for the package + path: The filename or URL for the package Returns: tuple: a tuple containing the package (name, version) @@ -512,17 +520,17 @@ def substitute_version(path: str, new_version) -> str: Simple example: - .. code-block:: python + .. code-block:: pycon - substitute_version('http://www.mr511.de/software/libelf-0.8.13.tar.gz', '2.9.3') - >>> 'http://www.mr511.de/software/libelf-2.9.3.tar.gz' + >>> substitute_version("http://www.mr511.de/software/libelf-0.8.13.tar.gz", "2.9.3") + "http://www.mr511.de/software/libelf-2.9.3.tar.gz" Complex example: - .. code-block:: python + .. code-block:: pycon - substitute_version('https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.12/src/hdf-4.2.12.tar.gz', '2.3') - >>> 'https://www.hdfgroup.org/ftp/HDF/releases/HDF2.3/src/hdf-2.3.tar.gz' + >>> substitute_version("https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.12/src/hdf-4.2.12.tar.gz", "2.3") + "https://www.hdfgroup.org/ftp/HDF/releases/HDF2.3/src/hdf-2.3.tar.gz" """ (name, ns, nl, noffs, ver, vs, vl, voffs) = substitution_offsets(path) @@ -541,11 +549,11 @@ def color_url(path, **kwargs): """Color the parts of the url according to Spack's parsing. Colors are: - | Cyan: The version found by :func:`parse_version_offset`. - | Red: The name found by :func:`parse_name_offset`. - | Green: Instances of version string from :func:`substitute_version`. - | Magenta: Instances of the name (protected from substitution). + * Cyan: The version found by :func:`parse_version_offset`. + * Red: The name found by :func:`parse_name_offset`. + * Green: Instances of version string from :func:`substitute_version`. + * Magenta: Instances of the name (protected from substitution). Args: path (str): The filename or URL for the package diff --git a/lib/spack/spack/url_buildcache.py b/lib/spack/spack/url_buildcache.py index e73a718d6b86df..24a3c0cca61822 100644 --- a/lib/spack/spack/url_buildcache.py +++ b/lib/spack/spack/url_buildcache.py @@ -2,7 +2,6 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import codecs import enum import fnmatch import gzip @@ -11,7 +10,10 @@ import os import re import shutil +import urllib.parse from contextlib import closing, contextmanager +from datetime import datetime +from pathlib import Path from tempfile import TemporaryDirectory from typing import Any, Callable, Dict, List, Optional, Tuple, Type @@ -39,9 +41,6 @@ #: Version 3: Introduces content-addressable tarballs CURRENT_BUILD_CACHE_LAYOUT_VERSION = 3 -#: The layout version spack can current install -SUPPORTED_LAYOUT_VERSIONS = (3, 2) - #: The name of the default buildcache index manifest file INDEX_MANIFEST_FILE = "index.manifest.json" @@ -54,6 +53,8 @@ class BuildcacheComponent(enum.Enum): they're used to map buildcache objects to their respective media types. """ + # manifest files + MANIFEST = enum.auto() # metadata file for a binary package SPEC = enum.auto() # things that live in the blobs directory @@ -170,7 +171,7 @@ class URLBuildcacheEntry: To help with downloading, this class manages two spack.spec.Stage objects internally, which must be destroyed when finished. Specifically, if you call either of the following methods on an instance, you must eventually also - call destroy(): + call destroy():: fetch_metadata() fetch_archive() @@ -192,6 +193,7 @@ class URLBuildcacheEntry: PUBLIC_KEY_INDEX_MEDIATYPE = "application/vnd.spack.keyindex.v1+json" BUILDCACHE_INDEX_FILE = "index.manifest.json" COMPONENT_PATHS = { + BuildcacheComponent.MANIFEST: [f"v{LAYOUT_VERSION}", "manifests"], BuildcacheComponent.BLOB: ["blobs"], BuildcacheComponent.INDEX: [f"v{LAYOUT_VERSION}", "manifests", "index"], BuildcacheComponent.KEY: [f"v{LAYOUT_VERSION}", "manifests", "key"], @@ -245,7 +247,7 @@ def maybe_push_layout_json(cls, mirror_url: str) -> None: @classmethod def get_base_url(cls, manifest_url: str) -> str: - """Given any manifest url (i.e. one containing 'v3/manifests/') return the + """Given any manifest url (i.e. one containing ``v3/manifests/``) return the base part of the url""" rematch = cls.SPEC_URL_REGEX.match(manifest_url) if not rematch: @@ -253,11 +255,11 @@ def get_base_url(cls, manifest_url: str) -> str: return rematch.group(1) @classmethod - def get_index_url(cls, mirror_url: str): + def get_index_url(cls, mirror_url: str, view: Optional[str] = None): return url_util.join( mirror_url, *cls.get_relative_path_components(BuildcacheComponent.INDEX), - cls.BUILDCACHE_INDEX_FILE, + url_util.join(view or "", cls.BUILDCACHE_INDEX_FILE), ) @classmethod @@ -284,12 +286,12 @@ def get_manifest_url(cls, spec: spack.spec.Spec, mirror_url: str) -> str: @classmethod def get_buildcache_component_include_pattern( - cls, buildcache_component: Optional[BuildcacheComponent] = None + cls, buildcache_component: BuildcacheComponent ) -> str: """Given a buildcache component, return the glob pattern that can be used to match it in a directory listing. If None is provided, return a catch-all pattern that will match all buildcache components.""" - if buildcache_component is None: + if buildcache_component is BuildcacheComponent.MANIFEST: return "*.manifest.json" elif buildcache_component == BuildcacheComponent.SPEC: return "*.spec.manifest.json" @@ -424,7 +426,7 @@ def verify_and_extract_manifest(cls, manifest_contents: str, verify: bool = Fals magic_string = "-----BEGIN PGP SIGNED MESSAGE-----" if manifest_contents.startswith(magic_string): if verify: - # Rry to verify and raise if we fail + # Try to verify and raise if we fail with TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir: manifest_path = os.path.join(tmpdir, "manifest.json.sig") with open(manifest_path, "w", encoding="utf-8") as fd: @@ -433,10 +435,9 @@ def verify_and_extract_manifest(cls, manifest_contents: str, verify: bool = Fals raise NoVerifyException("Signature could not be verified") return spack.spec.Spec.extract_json_from_clearsig(manifest_contents) - else: - if verify: - raise NoVerifyException("Required signature was not found on manifest") - return json.loads(manifest_contents) + elif verify: + raise NoVerifyException("Required signature was not found on manifest") + return json.loads(manifest_contents) def read_manifest(self, manifest_url: Optional[str] = None) -> BuildcacheManifest: """Read and process the the buildcache entry manifest. @@ -465,7 +466,7 @@ def read_manifest(self, manifest_url: Optional[str] = None) -> BuildcacheManifes try: _, _, manifest_file = web_util.read_from_url(manifest_url) - manifest_contents = codecs.getreader("utf-8")(manifest_file).read() + manifest_contents = io.TextIOWrapper(manifest_file, encoding="utf-8").read() except (web_util.SpackWebError, OSError) as e: raise BuildcacheEntryError(f"Error reading manifest at {manifest_url}") from e @@ -566,6 +567,7 @@ def push_manifest( # write the manifest to a temporary location manifest_file_name = f"{manifest_name}.manifest.json" manifest_path = os.path.join(tmpdir, manifest_file_name) + os.makedirs(os.path.dirname(manifest_path), exist_ok=True) with open(manifest_path, "w", encoding="utf-8") as f: json.dump(manifest.to_dict(), f, indent=0, separators=(",", ":")) # Note: when using gpg clear sign, we need to avoid long lines (19995 @@ -592,8 +594,8 @@ def push_local_file_as_blob( compression: str = "none", ) -> None: """Convenience method to push a local file to a mirror as a blob. Both manifest - and blob are pushed as a component of the given component_type. If compression - is 'gzip' the blob will be compressed before pushing, otherwise it will be pushed + and blob are pushed as a component of the given component_type. If ``compression`` + is ``"gzip"`` the blob will be compressed before pushing, otherwise it will be pushed uncompressed.""" cache_class = get_url_buildcache_class() checksum_algo = "sha256" @@ -747,6 +749,7 @@ class URLBuildcacheEntryV2(URLBuildcacheEntry): LAYOUT_VERSION = 2 BUILDCACHE_INDEX_FILE = "index.json" COMPONENT_PATHS = { + BuildcacheComponent.MANIFEST: ["build_cache"], BuildcacheComponent.BLOB: ["build_cache"], BuildcacheComponent.INDEX: ["build_cache"], BuildcacheComponent.KEY: ["build_cache", "_pgp"], @@ -967,7 +970,7 @@ def get_manifest_url(cls, spec: spack.spec.Spec, mirror_url: str) -> str: @classmethod def get_buildcache_component_include_pattern( - cls, buildcache_component: Optional[BuildcacheComponent] = None + cls, buildcache_component: BuildcacheComponent ) -> str: raise BuildcacheEntryError("v2 buildcache entries do not have a manifest file") @@ -1076,9 +1079,7 @@ def check_mirror_for_layout(mirror: spack.mirrors.mirror.Mirror): tty.warn(msg) -def _entries_from_cache_aws_cli( - url: str, tmpspecsdir: str, component_type: Optional[BuildcacheComponent] = None -): +def _entries_from_cache_aws_cli(url: str, tmpspecsdir: str, component_type: BuildcacheComponent): """Use aws cli to sync all manifests into a local temporary directory. Args: @@ -1090,24 +1091,24 @@ def _entries_from_cache_aws_cli( A tuple where the first item is a list of local file paths pointing to the manifests that should be read from the mirror, and the second item is a function taking a url or file path and returning - a `URLBuildcacheEntry` for that manifest. + a :class:`URLBuildcacheEntry` for that manifest. """ read_fn = None file_list = None aws = which("aws") cache_class = get_url_buildcache_class(layout_version=CURRENT_BUILD_CACHE_LAYOUT_VERSION) - if not aws: tty.warn("Failed to use aws s3 sync to retrieve specs, falling back to parallel fetch") return file_list, read_fn def file_read_method(manifest_path: str) -> URLBuildcacheEntry: cache_entry = cache_class(mirror_url=url, allow_unsigned=True) - cache_entry.read_manifest(manifest_url=f"file://{manifest_path}") + cache_entry.read_manifest(manifest_url=manifest_path) return cache_entry include_pattern = cache_class.get_buildcache_component_include_pattern(component_type) + component_prefix = cache_class.get_relative_path_components(component_type) sync_command_args = [ "s3", @@ -1116,25 +1117,48 @@ def file_read_method(manifest_path: str) -> URLBuildcacheEntry: "*", "--include", include_pattern, - url, + url_util.join(url, *component_prefix), tmpspecsdir, ] + # Use aws s3 ls to get mtimes of manifests + ls_command_args = ["s3", "ls", "--recursive", url] + s3_ls_regex = re.compile(r"^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\s+\d+\s+(.+)$") + + filename_to_mtime: Dict[str, float] = {} + tty.debug(f"Using aws s3 sync to download manifests from {url} to {tmpspecsdir}") try: aws(*sync_command_args, output=os.devnull, error=os.devnull) file_list = fsys.find(tmpspecsdir, [include_pattern]) read_fn = file_read_method - except Exception: + + # Use `aws s3 ls` to get mtimes of manifests + for line in aws(*ls_command_args, output=str, error=os.devnull).splitlines(): + match = s3_ls_regex.match(line) + if match: + # Parse the url and use the S3 path of the file to derive the + # local path of the file (i.e. where `aws s3 sync` put it). + parsed_url = urllib.parse.urlparse(url) + s3_path = parsed_url.path.lstrip("/") + filename = match.group(2) + if s3_path and filename.startswith(s3_path): + filename = filename[len(s3_path) :].lstrip("/") + local_path = url_util.join(tmpspecsdir, filename) + + if Path(local_path).exists(): + filename_to_mtime[url_util.path_to_file_url(local_path)] = datetime.strptime( + match.group(1), "%Y-%m-%d %H:%M:%S" + ).timestamp() + except Exception as e: tty.warn("Failed to use aws s3 sync to retrieve specs, falling back to parallel fetch") + raise e - return file_list, read_fn + return filename_to_mtime, read_fn -def _entries_from_cache_fallback( - url: str, tmpspecsdir: str, component_type: Optional[BuildcacheComponent] = None -): +def _entries_from_cache_fallback(url: str, tmpspecsdir: str, component_type: BuildcacheComponent): """Use spack.util.web module to get a list of all the manifests at the remote url. Args: @@ -1146,10 +1170,10 @@ def _entries_from_cache_fallback( A tuple where the first item is a list of absolute file paths or urls pointing to the manifests that should be read from the mirror, and the second item is a function taking a url or file path of a manifest and - returning a `URLBuildcacheEntry` for that manifest. + returning a :class:`URLBuildcacheEntry` for that manifest. """ read_fn = None - file_list = None + filename_to_mtime = None cache_class = get_url_buildcache_class(layout_version=CURRENT_BUILD_CACHE_LAYOUT_VERSION) @@ -1159,25 +1183,26 @@ def url_read_method(manifest_url: str) -> URLBuildcacheEntry: return cache_entry try: - file_list = [ - url_util.join(url, entry) - for entry in web_util.list_url(url, recursive=True) - if fnmatch.fnmatch( - entry, cache_class.get_buildcache_component_include_pattern(component_type) - ) - ] + filename_to_mtime = {} + component_path_parts = cache_class.get_relative_path_components(component_type) + component_prefix: str = url_util.join(url, *component_path_parts) + component_pattern = cache_class.get_buildcache_component_include_pattern(component_type) + for entry in web_util.list_url(component_prefix, recursive=True): + if fnmatch.fnmatch(entry, component_pattern): + entry_url = url_util.join(component_prefix, entry) + stat_result = web_util.stat_url(entry_url) + if stat_result is not None: + filename_to_mtime[entry_url] = stat_result[1] # mtime is second element read_fn = url_read_method except Exception as err: # If we got some kind of S3 (access denied or other connection error), the first non # boto-specific class in the exception is Exception. Just print a warning and return tty.warn(f"Encountered problem listing packages at {url}: {err}") - return file_list, read_fn + return filename_to_mtime, read_fn -def get_entries_from_cache( - url: str, tmpspecsdir: str, component_type: Optional[BuildcacheComponent] = None -): +def get_entries_from_cache(url: str, tmpspecsdir: str, component_type: BuildcacheComponent): """Get a list of all the manifests in the mirror and a function to read them. Args: @@ -1189,7 +1214,7 @@ def get_entries_from_cache( A tuple where the first item is a list of absolute file paths or urls pointing to the manifests that should be read from the mirror, and the second item is a function taking a url or file path and - returning a `URLBuildcacheEntry` for that manifest. + returning a :class:`URLBuildcacheEntry` for that manifest. """ callbacks: List[Callable] = [] if url.startswith("s3://"): @@ -1198,9 +1223,9 @@ def get_entries_from_cache( callbacks.append(_entries_from_cache_fallback) for specs_from_cache_fn in callbacks: - file_list, read_fn = specs_from_cache_fn(url, tmpspecsdir, component_type) - if file_list: - return file_list, read_fn + file_to_mtime_mapping, read_fn = specs_from_cache_fn(url, tmpspecsdir, component_type) + if file_to_mtime_mapping: + return file_to_mtime_mapping, read_fn raise ListMirrorSpecsError("Failed to get list of entries from {0}".format(url)) @@ -1228,12 +1253,15 @@ def _get_compressor(compression: str, writable: io.BufferedIOBase) -> io.Buffere @contextmanager def compression_writer(output_path: str, compression: str, checksum_algo: str): """Create and return a writer capable of writing compressed data. Available - options for compression are "gzip" or "none", checksum_algo is used to pick - the checksum algorithm used by the ChecksumWriter. + options for ``compression`` are ``"gzip"`` or ``"none"``, ``checksum_algo`` is used to pick + the checksum algorithm used by the :class:`~spack.util.archive.ChecksumWriter`. - Yields a tuple containing: - io.IOBase: writer that can compress (or not) as it writes - ChecksumWriter: provides checksum and length of written data + Yields: + A tuple containing + + * An :class:`io.BufferedIOBase` writer that can compress (or not) as it writes + * A :class:`~spack.util.archive.ChecksumWriter` that provides checksum and length of + written data """ with open(output_path, "wb") as writer, ChecksumWriter( fileobj=writer, algorithm=hash_fun_for_algo(checksum_algo) @@ -1322,78 +1350,70 @@ def try_verify(specfile_path): return True -class MirrorURLAndVersion: +class MirrorMetadata: """Simple class to hold a mirror url and a buildcache layout version This class is used by BinaryCacheIndex to produce a key used to keep track of downloaded/processed buildcache index files from remote mirrors in some layout version.""" - url: str - version: int + __slots__ = ("url", "version", "view") - def __init__(self, url: str, version: int): + def __init__(self, url: str, version: int, view: Optional[str] = None): self.url = url self.version = version + self.view = view def __str__(self): - return f"{self.url}__v{self.version}" + s = f"{self.url}__v{self.version}" + if self.view: + s += f"__{self.view}" + return s def __eq__(self, other): - if isinstance(other, MirrorURLAndVersion): - return self.url == other.url and self.version == other.version - return False + if not isinstance(other, MirrorMetadata): + return NotImplemented + return self.url == other.url and self.version == other.version and self.view == other.view def __hash__(self): - return hash((self.url, self.version)) + return hash((self.url, self.version, self.view)) @classmethod def from_string(cls, s: str): - parts = s.split("__v") - return cls(parts[0], int(parts[1])) - + m = re.match(r"^(.*)__v([0-9]+)(?:__(.*))?$", s) + if not m: + raise MirrorMetadataError(f"Malformed string {s}") -class MirrorForSpec: - """Simple holder for a mirror (represented by a url and a layout version) and - an associated concrete spec""" + url, version, view = m.groups() + return cls(url, int(version), view) - url_and_version: MirrorURLAndVersion - spec: spack.spec.Spec - - def __init__(self, url_and_version: MirrorURLAndVersion, spec: spack.spec.Spec): - self.url_and_version = url_and_version - self.spec = spec + def strip_view(self) -> "MirrorMetadata": + return MirrorMetadata(self.url, self.version) class InvalidMetadataFile(spack.error.SpackError): """Raised when spack encounters a spec file it cannot understand or process""" - pass - class BuildcacheEntryError(spack.error.SpackError): """Raised for problems finding or accessing binary cache entry on mirror""" - pass - class NoSuchBlobException(spack.error.SpackError): """Raised when manifest does have some requested type of requested type""" - pass - class NoVerifyException(BuildcacheEntryError): """Raised if file fails signature verification""" - pass - class UnknownBuildcacheLayoutError(BuildcacheEntryError): """Raised when unrecognized buildcache layout version is encountered""" - pass - class ListMirrorSpecsError(spack.error.SpackError): """Raised when unable to retrieve list of specs from the mirror""" + + +class MirrorMetadataError(spack.error.SpackError): + """Raised when unable to interpret a MirrorMetadata string""" diff --git a/lib/spack/spack/user_environment.py b/lib/spack/spack/user_environment.py index 90bb379b8e957c..dca1e5437f1011 100644 --- a/lib/spack/spack/user_environment.py +++ b/lib/spack/spack/user_environment.py @@ -16,17 +16,16 @@ spack_loaded_hashes_var = "SPACK_LOADED_HASHES" -def prefix_inspections(platform): +def prefix_inspections(platform: str) -> dict: """Get list of prefix inspections for platform Arguments: - platform (str): the name of the platform to consider. The platform - determines what environment variables Spack will use for some - inspections. + platform: the name of the platform to consider. The platform determines what environment + variables Spack will use for some inspections. Returns: - A dictionary mapping subdirectory names to lists of environment - variables to modify with that directory if it exists. + A dictionary mapping subdirectory names to lists of environment variables to modify with + that directory if it exists. """ inspections = spack.config.get("modules:prefix_inspections") if isinstance(inspections, dict): @@ -117,4 +116,21 @@ def environment_modifications_for_specs( if view: project_env_mods(*topo_ordered, view=view, env=env) + # we don't want to set PYTHONPATH to the default search path in virtual environments + view_python_pattern = re.compile( + r"^" + re.escape(os.path.join(view.root, "lib")) + r"/python[^/]+/site-packages$" + ) + + mods = [ + mod.value + for mod in env.env_modifications + if ( + isinstance(mod, environment.PrependPath) + and mod.name == "PYTHONPATH" + and view_python_pattern.match(mod.value) + ) + ] + + for modif in mods: + env.remove_path("PYTHONPATH", modif) return env diff --git a/lib/spack/spack/util/archive.py b/lib/spack/spack/util/archive.py index c78566ad2aab51..99c155f1b21112 100644 --- a/lib/spack/spack/util/archive.py +++ b/lib/spack/spack/util/archive.py @@ -9,11 +9,11 @@ import tarfile from contextlib import closing, contextmanager from gzip import GzipFile -from typing import Callable, Dict, List, Tuple +from typing import Callable, Dict, Generator, List, Tuple from spack.llnl.util import tty from spack.llnl.util.filesystem import readlink -from spack.util.executable import ProcessError, which +from spack.util.git import is_git_commit_sha class ChecksumWriter(io.BufferedIOBase): @@ -97,15 +97,19 @@ def readline(self, size=-1): @contextmanager -def gzip_compressed_tarfile(path): +def gzip_compressed_tarfile( + path: str, +) -> Generator[Tuple[tarfile.TarFile, ChecksumWriter, ChecksumWriter], None, None]: """Create a reproducible, gzip compressed tarfile, and keep track of shasums of both the compressed and uncompressed tarfile. Reproduciblity is achived by normalizing the gzip header (no file name and zero mtime). - Yields a tuple of the following: - tarfile.TarFile: tarfile object - ChecksumWriter: checksum of the gzip compressed tarfile - ChecksumWriter: checksum of the uncompressed tarfile + Yields: + A tuple of three elements + + * :class:`tarfile.TarFile`: tarfile object + * :class:`ChecksumWriter`: checksum of the gzip compressed tarfile + * :class:`ChecksumWriter`: checksum of the uncompressed tarfile """ # Create gzip compressed tarball of the install prefix # 1) Use explicit empty filename and mtime 0 for gzip header reproducibility. @@ -242,35 +246,38 @@ def reproducible_tarfile_from_prefix( dir_stack.extend(reversed(new_dirs)) # we pop, so reverse to stay alphabetical -def _git_prefix(archive_path, tar): - # This is an annoying method, but since we always have a prefix and can't gaurantee what - # it is we need this. - paths = tar("-tf", archive_path, output=str, error=str, fail_on_error=False) - if paths: - paths = paths.strip().split() - for p in paths: - if p.endswith(".git/"): - return p[:-5] - return "" - - def retrieve_commit_from_archive(archive_path, ref): - """extract git data from an archive with out expanding it""" + """Extract git data from an archive with out expanding it + + Open the archive and searches for .git/HEAD. Return if HEAD is a commit (detached head or tag) + Otherwise attempt to read the ref that .git/HEAD is pointing to and return the commit + associated with it. + """ if not os.path.isfile(archive_path): raise FileNotFoundError(f"The file {archive_path} does not exist") - tar = which("tar", required=True) - prefix = _git_prefix(archive_path, tar) - # try branch, tags then detached states - for ref_path in [f"refs/heads/{ref}/", f"refs/tags/{ref}/", "HEAD"]: - try: - commit = tar( - "-Oxzf", archive_path, f"{prefix}.git/{ref_path}", output=str, error=str - ).strip() - if commit and len(commit) == 40: - return commit - except ProcessError: - pass - - tty.warn(f"Archive {archive_path} does not appear to contain git data") - return None + try: + with tarfile.open(archive_path, "r") as tar: + names = tar.getnames() + # since we always have a prefix and can't gaurantee the value we need this lookup. + prefix = "" + for name in names: + if name.endswith(".git"): + prefix = name[:-4] + break + if f"{prefix}.git/HEAD" in names: + head = tar.extractfile(f"{prefix}.git/HEAD").read().decode("utf-8").strip() + if is_git_commit_sha(head): + # detached HEAD/ lightweight tag + return head + else: + # refs in had have the format "ref " + ref = head.split()[1] + contents = ( + tar.extractfile(f"{prefix}.git/{ref}").read().decode("utf-8").strip() + ) + if is_git_commit_sha(contents): + return contents + except tarfile.ReadError: + tty.warn(f"Archive {archive_path} does not appear to contain git data") + return diff --git a/lib/spack/spack/util/compression.py b/lib/spack/spack/util/compression.py index f45f3f84b5c1f9..dc7ba72ca95d1a 100644 --- a/lib/spack/spack/util/compression.py +++ b/lib/spack/spack/util/compression.py @@ -279,7 +279,7 @@ def _system_7zip(archive_file): def decompressor_for(path: str, extension: Optional[str] = None): """Returns appropriate decompression/extraction algorithm function pointer for provided extension. If extension is none, it is computed - from the `path` and the decompression function is derived + from the ``path`` and the decompression function is derived from that information.""" if not extension: extension = extension_from_magic_numbers(path, decompress=True) @@ -558,7 +558,7 @@ def _maybe_abbreviate_extension(path: str, extension: str) -> str: def extension_from_magic_numbers(path: str, decompress: bool = False) -> Optional[str]: """Return typical extension without leading ``.`` of a compressed file or archive at the given - path, based on its magic numbers, similar to the `file` utility. Notice that the extension + path, based on its magic numbers, similar to the ``file`` utility. Notice that the extension returned from this function may not coincide with the file's given extension. Args: diff --git a/lib/spack/spack/util/crypto.py b/lib/spack/spack/util/crypto.py index 43e0a5b1b6b06b..9d514f6c514043 100644 --- a/lib/spack/spack/util/crypto.py +++ b/lib/spack/spack/util/crypto.py @@ -103,7 +103,7 @@ class Checker: with. e.g., if the digest is 32 hex characters long this will use md5. - Example: know your tarball should hash to 'abc123'. You want + Example: know your tarball should hash to ``abc123``. You want to check files against this. You would use this class like so:: hexdigest = 'abc123' diff --git a/lib/spack/spack/util/ctest_log_parser.py b/lib/spack/spack/util/ctest_log_parser.py index ae63fc2c4a9170..f8cfe16f2ae14f 100644 --- a/lib/spack/spack/util/ctest_log_parser.py +++ b/lib/spack/spack/util/ctest_log_parser.py @@ -59,7 +59,9 @@ This is a python port of the regular expressions CTest uses to parse log files here: - https://github.com/Kitware/CMake/blob/master/Source/CTest/cmCTestBuildHandler.cxx +.. code-block:: + + https://github.com/Kitware/CMake/blob/master/Source/CTest/cmCTestBuildHandler.cxx This file takes the regexes verbatim from there and adds some parsing algorithms that duplicate the way CTest scrapes log files. To keep this @@ -74,7 +76,7 @@ import threading import time from contextlib import contextmanager -from typing import Optional, TextIO, Union +from typing import List, Optional, TextIO, Tuple, Union _error_matches = [ "^FAIL: ", @@ -383,7 +385,9 @@ def stringify(elt): print("%16.2f %s" % (self.timings[index][i] * 1e6, stringify(elt))) index += 1 - def parse(self, stream: Union[str, TextIO], context: int = 6, jobs: Optional[int] = None): + def parse( + self, stream: Union[str, TextIO], context: int = 6, jobs: Optional[int] = None + ) -> Tuple[List[BuildError], List[BuildWarning]]: """Parse a log file by searching each line for errors and warnings. Args: @@ -391,8 +395,7 @@ def parse(self, stream: Union[str, TextIO], context: int = 6, jobs: Optional[int context: lines of context to extract around each log event Returns: - (tuple): two lists containing ``BuildError`` and - ``BuildWarning`` objects. + two lists containing :class:`BuildError` and :class:`BuildWarning` objects. """ if isinstance(stream, str): with open(stream) as f: diff --git a/lib/spack/spack/util/editor.py b/lib/spack/spack/util/editor.py index 489c557e3d80c2..de65088b74bd7f 100644 --- a/lib/spack/spack/util/editor.py +++ b/lib/spack/spack/util/editor.py @@ -64,9 +64,9 @@ def editor(*args: str, exec_fn: Callable[[str, List[str]], int] = os.execv) -> b This will try to execute the following, in order: - 1. $VISUAL # the "visual" editor (per POSIX) - 2. $EDITOR # the regular editor (per POSIX) - 3. some default editor (see ``_default_editors``) with + 1. ``$VISUAL ``: the "visual" editor (per POSIX) + 2. ``$EDITOR ``: the regular editor (per POSIX) + 3. some default editor (see ``_default_editors``) with If an environment variable isn't defined, it is skipped. If it points to something that can't be executed, we'll print a @@ -76,7 +76,6 @@ def editor(*args: str, exec_fn: Callable[[str, List[str]], int] = os.execv) -> b Arguments: args: args to pass to editor - Optional Arguments: exec_fn: invoke this function to run; use ``spack.util.editor.executable`` if you want something that returns, instead of the default ``os.execv()``. """ diff --git a/lib/spack/spack/util/elf.py b/lib/spack/spack/util/elf.py index 62b421ae066032..d46b50e47235c6 100644 --- a/lib/spack/spack/util/elf.py +++ b/lib/spack/spack/util/elf.py @@ -677,7 +677,7 @@ def substitute_rpath_and_pt_interp_in_place_or_raise( def pt_interp(path: str) -> Optional[str]: - """Retrieve the interpreter of an executable at `path`.""" + """Retrieve the interpreter of an executable at ``path``.""" try: with open(path, "rb") as f: elf = parse_elf(f, interpreter=True) diff --git a/lib/spack/spack/util/environment.py b/lib/spack/spack/util/environment.py index 3cc42ea2658e57..82140855cd360e 100644 --- a/lib/spack/spack/util/environment.py +++ b/lib/spack/spack/util/environment.py @@ -36,13 +36,15 @@ "C:\\ProgramData", ] SUFFIXES = [] + DEFAULT_SHELL = os.environ.get("SPACK_SHELL", "bat") else: SYSTEM_PATHS = ["/", "/usr", "/usr/local"] SUFFIXES = ["bin", "bin64", "include", "lib", "lib64"] + DEFAULT_SHELL = "sh" SYSTEM_DIRS = [os.path.join(p, s) for s in SUFFIXES for p in SYSTEM_PATHS] + SYSTEM_PATHS -#: used in the compiler wrapper's `/usr/lib|/usr/lib64|...)` case entry +#: used in the compiler wrapper's ``/usr/lib|/usr/lib64|...)`` case entry SYSTEM_DIR_CASE_ENTRY = "|".join(sorted(f'"{d}{suff}"' for d in SYSTEM_DIRS for suff in ("", "/"))) _SHELL_SET_STRINGS = { @@ -93,7 +95,7 @@ def prune_duplicate_paths(paths: List[Path]) -> List[Path]: def get_path(name: str) -> List[Path]: """Given the name of an environment variable containing multiple - paths separated by 'os.pathsep', returns a list of the paths. + paths separated by :data:`os.pathsep`, returns a list of the paths. """ path = os.environ.get(name, "").strip() if path: @@ -102,8 +104,8 @@ def get_path(name: str) -> List[Path]: def env_flag(name: str) -> bool: - """Given the name of an environment variable, returns True if it is set to - 'true' or to '1', False otherwise. + """Given the name of an environment variable, returns True if the lowercase value is set to + ``true`` or to ``1``, False otherwise. """ if name in os.environ: value = os.environ[name].lower() @@ -112,7 +114,7 @@ def env_flag(name: str) -> bool: def path_set(var_name: str, directories: List[Path]): - """Sets the variable passed as input to the `os.pathsep` joined list of directories.""" + """Sets the variable passed as input to the :data:`os.pathsep` joined list of directories.""" path_str = os.pathsep.join(str(dir) for dir in directories) os.environ[var_name] = path_str @@ -290,7 +292,7 @@ class NamePathModifier(NameValueModifier): """Base class for modifiers that modify the value of an environment variable that is a path.""" - __slots__ = ("name", "value", "separator", "trace") + __slots__ = () def __init__( self, @@ -569,7 +571,7 @@ def append_flags(self, name: str, value: str, sep: str = " ") -> None: Args: name: name of the environment variable value: flags to be appended - sep: separator for the flags (default: " ") + sep: separator for the flags (default: ``" "``) """ value = _validate_value(name, value) item = AppendFlagsEnv(name, value, separator=sep, trace=self._trace()) @@ -590,7 +592,7 @@ def remove_flags(self, name: str, value: str, sep: str = " ") -> None: Args: name: name of the environment variable value: flags to be removed - sep: separator for the flags (default: " ") + sep: separator for the flags (default: ``" "``) """ value = _validate_value(name, value) item = RemoveFlagsEnv(name, value, separator=sep, trace=self._trace()) @@ -603,7 +605,7 @@ def set_path(self, name: str, elements: ListOfPaths, separator: str = os.pathsep Args: name: name of the environment variable elements: ordered list paths - separator: separator for the paths (default: os.pathsep) + separator: separator for the paths (default: :data:`os.pathsep`) """ elements = [_validate_path_value(name, x) for x in elements] item = SetPath(name, elements, separator=separator, trace=self._trace()) @@ -617,7 +619,7 @@ def append_path( Args: name: name of the environment variable path: path to be appended - separator: separator for the paths (default: os.pathsep) + separator: separator for the paths (default: :data:`os.pathsep`) """ path = _validate_path_value(name, path) item = AppendPath(name, path, separator=separator, trace=self._trace()) @@ -631,7 +633,7 @@ def prepend_path( Args: name: name of the environment variable path: path to be prepended - separator: separator for the paths (default: os.pathsep) + separator: separator for the paths (default: :data:`os.pathsep`) """ path = _validate_path_value(name, path) item = PrependPath(name, path, separator=separator, trace=self._trace()) @@ -645,7 +647,7 @@ def remove_first_path( Args: name: name of the environment variable path: path to be removed - separator: separator for the paths (default: os.pathsep) + separator: separator for the paths (default: :data:`os.pathsep`) """ path = _validate_path_value(name, path) item = RemoveFirstPath(name, path, separator=separator, trace=self._trace()) @@ -659,7 +661,7 @@ def remove_last_path( Args: name: name of the environment variable path: path to be removed - separator: separator for the paths (default: os.pathsep) + separator: separator for the paths (default: :data:`os.pathsep`) """ path = _validate_path_value(name, path) item = RemoveLastPath(name, path, separator=separator, trace=self._trace()) @@ -673,7 +675,7 @@ def remove_path( Args: name: name of the environment variable path: path to be removed - separator: separator for the paths (default: os.pathsep) + separator: separator for the paths (default: :data:`os.pathsep`) """ path = _validate_path_value(name, path) item = RemovePath(name, path, separator=separator, trace=self._trace()) @@ -685,7 +687,7 @@ def deprioritize_system_paths(self, name: str, separator: str = os.pathsep) -> N Args: name: name of the environment variable - separator: separator for the paths (default: os.pathsep) + separator: separator for the paths (default: :data:`os.pathsep`) """ item = DeprioritizeSystemPaths(name, separator=separator, trace=self._trace()) self.env_modifications.append(item) @@ -696,7 +698,7 @@ def prune_duplicate_paths(self, name: str, separator: str = os.pathsep) -> None: Args: name: name of the environment variable - separator: separator for the paths (default: os.pathsep) + separator: separator for the paths (default: :data:`os.pathsep`) """ item = PruneDuplicatePaths(name, separator=separator, trace=self._trace()) self.env_modifications.append(item) @@ -717,7 +719,8 @@ def drop(self, *name) -> bool: return len(old_mods) != len(new_mods) def is_unset(self, variable_name: str) -> bool: - """Returns True if the last modification to a variable is to unset it, False otherwise.""" + """Returns :data:`True` if the last modification to a variable is to unset it, + :data:`False` otherwise.""" modifications = self.group_by_name() if variable_name not in modifications: return False @@ -775,7 +778,7 @@ def apply_modifications(self, env: Optional[MutableMapping[str, str]] = None): def shell_modifications( self, - shell: str = "sh" if sys.platform != "win32" else os.environ.get("SPACK_SHELL", "bat"), + shell: str = DEFAULT_SHELL, explicit: bool = False, env: Optional[MutableMapping[str, str]] = None, ) -> str: @@ -874,9 +877,20 @@ def from_sourcing_file( ] ) + before_kwargs = {**kwargs} + if sys.platform == "win32": + # Windows cannot source os.devnull, but it can echo from it + # so we override the "source" action in the method that + # extracts the env (environment_after_sourcing_files) + if "source_command" not in kwargs: + before_kwargs["source_command"] = "echo" + # Compute the environments before and after sourcing + + # First look at the environment after doing nothing to + # establish baseline before = sanitize( - environment_after_sourcing_files(os.devnull, **kwargs), + environment_after_sourcing_files(os.devnull, **before_kwargs), exclude=exclude, include=include, ) @@ -916,7 +930,7 @@ def from_environment_diff( modified_variables.sort() def return_separator_if_any(*args): - separators = ":", ";" + separators = [os.pathsep] if sys.platform == "win32" else [":", ";"] for separator in separators: for arg in args: if separator in arg: @@ -1061,19 +1075,19 @@ def inspect_path( ``/usr/include`` and ``/usr/lib64``. If found we want to prepend ``/usr/include`` to ``CPATH`` and ``/usr/lib64`` to ``MY_LIB64_PATH``. - .. code-block:: python + .. code-block:: python - # Set up the dictionary containing the inspection - inspections = { - 'include': ['CPATH'], - 'lib64': ['MY_LIB64_PATH'] - } + # Set up the dictionary containing the inspection + inspections = { + "include": ["CPATH"], + "lib64": ["MY_LIB64_PATH"] + } - # Get back the list of command needed to modify the environment - env = inspect_path('/usr', inspections) + # Get back the list of command needed to modify the environment + env = inspect_path("/usr", inspections) - # Eventually execute the commands - env.apply_modifications() + # Eventually execute the commands + env.apply_modifications() """ if exclude is None: exclude = lambda x: False @@ -1154,7 +1168,7 @@ def environment_after_sourcing_files( if sys.platform == "win32": shell_cmd = kwargs.get("shell", "cmd.exe") shell_options = kwargs.get("shell_options", "/C") - suppress_output = kwargs.get("suppress_output", "") + suppress_output = kwargs.get("suppress_output", "> nul") source_command = kwargs.get("source_command", "") else: shell_cmd = kwargs.get("shell", "/bin/bash") @@ -1178,11 +1192,13 @@ def _source_single_file(file_and_args, environment): [source_file, suppress_output, concatenate_on_success, dump_environment_cmd] ) + # Popens argument processing can break command invocations + # on Windows, compose to a string to avoid said processing + cmd = [shell_cmd, *shell_options_list, source_file_arguments] + cmd = " ".join(cmd) if sys.platform == "win32" else cmd + with subprocess.Popen( - [shell_cmd, *shell_options_list, source_file_arguments], - env=environment, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + cmd, env=environment, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) as shell: output, _ = shell.communicate() diff --git a/lib/spack/spack/util/executable.py b/lib/spack/spack/util/executable.py index 657a89a1bab5c6..f42417275647dc 100644 --- a/lib/spack/spack/util/executable.py +++ b/lib/spack/spack/util/executable.py @@ -28,12 +28,12 @@ class Executable: Example usage: - .. code-block:: python + .. code-block:: python - ls = Executable("ls") - ls.add_default_arg("-l") - ls.add_default_env("LC_ALL", "C") - output = ls("-a", output=str) # Run 'ls -l -a' and capture output as string + ls = Executable("ls") + ls.add_default_arg("-l") + ls.add_default_env("LC_ALL", "C") + output = ls("-a", output=str) # Run 'ls -l -a' and capture output as string """ def __init__(self, name: Union[str, Path]) -> None: @@ -437,10 +437,10 @@ def which( Parameters: *args: one or more executables to search for path: the path to search. Defaults to ``PATH`` - required: if set to True, raise an error if executable not found + required: if set to :data:`True`, raise an error if executable not found Returns: - Executable: The first executable that is found in the path + The first executable that is found in the path or :data:`None` if not found. """ exe = which_string(*args, path=path, required=required) return Executable(exe) if exe is not None else None diff --git a/lib/spack/spack/util/file_cache.py b/lib/spack/spack/util/file_cache.py index c4cf403cda09e7..55626da053e5e5 100644 --- a/lib/spack/spack/util/file_cache.py +++ b/lib/spack/spack/util/file_cache.py @@ -148,7 +148,7 @@ def read_transaction(self, key: Union[str, pathlib.Path]): """Get a read transaction on a file cache item. Returns a ReadTransaction context manager and opens the cache file for - reading. You can use it like this: + reading. You can use it like this:: with file_cache_object.read_transaction(key) as cache_file: cache_file.read() diff --git a/lib/spack/spack/util/filesystem.py b/lib/spack/spack/util/filesystem.py index aa5613b7221ff7..6946447aac87a2 100644 --- a/lib/spack/spack/util/filesystem.py +++ b/lib/spack/spack/util/filesystem.py @@ -19,13 +19,13 @@ def fix_darwin_install_name(path: str) -> None: There are two parts of this task: - 1. Use ``install_name("-id", ...)`` to change install name of a single lib - 2. Use ``install_name("-change", ...)`` to change the cross linking between libs. + 1. Use ``install_name -id ...`` to change install name of a single lib + 2. Use ``install_name -change ...`` to change the cross linking between libs. The function assumes that all libraries are in one folder and currently won't follow subfolders. Parameters: - path: directory in which .dylib files are located + path: directory in which ``.dylib`` files are located """ libs = glob.glob(os.path.join(path, "*.dylib")) install_name_tool = Executable("install_name_tool") diff --git a/lib/spack/spack/util/format.py b/lib/spack/spack/util/format.py index 28b5d7f8360749..70236d7c870e0c 100644 --- a/lib/spack/spack/util/format.py +++ b/lib/spack/spack/util/format.py @@ -2,20 +2,16 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -from typing import Optional - -def get_version_lines(version_hashes_dict: dict, url_dict: Optional[dict] = None) -> str: +def get_version_lines(version_hashes_dict: dict) -> str: """ Renders out a set of versions like those found in a package's package.py file for a given set of versions and hashes. Args: - version_hashes_dict (dict): A dictionary of the form: version -> checksum. - url_dict (dict): A dictionary of the form: version -> URL. + version_hashes_dict: A dictionary of the form: version -> checksum. - Returns: - (str): Rendered version lines. + Returns: Rendered version lines. """ version_lines = [] diff --git a/lib/spack/spack/util/gcs.py b/lib/spack/spack/util/gcs.py index c78778178b30cd..b1654a41cc00a8 100644 --- a/lib/spack/spack/util/gcs.py +++ b/lib/spack/spack/util/gcs.py @@ -11,6 +11,7 @@ import sys import urllib.parse import urllib.response +from typing import List from urllib.error import URLError from urllib.request import BaseHandler @@ -96,25 +97,23 @@ def blob(self, blob_path): return self.bucket.blob(blob_path) return None - def get_all_blobs(self, recursive=True, relative=True): + def get_all_blobs(self, recursive: bool = True, relative: bool = True) -> List[str]: """Get a list of all blobs - Returns a list of all blobs within this bucket. + + Returns: a list of all blobs within this bucket. Args: - relative: If true (default), print blob paths - relative to 'build_cache' directory. - If false, print absolute blob paths (useful for - destruction of bucket) + relative: If true (default), print blob paths relative to 'build_cache' directory. + If false, print absolute blob paths (useful for destruction of bucket) """ tty.debug("Getting GCS blobs... Recurse {0} -- Rel: {1}".format(recursive, relative)) - converter = str - if relative: - converter = self._relative_blob_name + converter = self._relative_blob_name if relative else str + + blob_list: List[str] = [] if self.exists(): all_blobs = self.bucket.list_blobs(prefix=self.prefix) - blob_list = [] base_dirs = len(self.prefix.split("/")) + 1 @@ -126,7 +125,7 @@ def get_all_blobs(self, recursive=True, relative=True): else: blob_list.append(converter(blob.name)) - return blob_list + return blob_list def _relative_blob_name(self, blob_name): return os.path.relpath(blob_name, self.prefix) diff --git a/lib/spack/spack/util/git.py b/lib/spack/spack/util/git.py index 62d31dd3986b8b..23510670113b39 100644 --- a/lib/spack/spack/util/git.py +++ b/lib/spack/spack/util/git.py @@ -4,14 +4,27 @@ """Single util module where Spack should get a git executable.""" import os +import re +import shutil import sys from typing import List, Optional, overload from spack.vendor.typing_extensions import Literal +import spack.llnl.util.filesystem as fs import spack.llnl.util.lang import spack.util.executable as exe +# regex for a commit version +COMMIT_VERSION = re.compile(r"^[a-f0-9]{40}$") + +# regex for a git version to extract only the numeric parts +GIT_VERSION = re.compile(r"(\d+(?:\.\d+)*)") + + +def is_git_commit_sha(string: str) -> bool: + return len(string) == 40 and bool(COMMIT_VERSION.match(string)) + @spack.llnl.util.lang.memoized def _find_git() -> Optional[str]: @@ -19,16 +32,77 @@ def _find_git() -> Optional[str]: return exe.which_string("git", required=False) +def extract_git_version_str(git_exe: exe.Executable) -> str: + match = re.search(GIT_VERSION, git_exe("--version", output=str)) + return match.group(1) if match else "" + + +class GitExecutable(exe.Executable): + """Specialized executable that encodes the git version for optimized option selection""" + + def __init__(self, name=None): + if not name: + name = _find_git() + super().__init__(name) + self._version = None + + @property + def version(self): + # lazy init git version + if not self._version: + v_string = extract_git_version_str(self) + self._version = tuple(int(i) for i in v_string.split(".")) + return self._version + + +class VersionConditionalOption: + def __init__(self, key, value=None, min_version=(0, 0, 0), max_version=(99, 99, 99)): + self.key = key + self.value = value + self.min_version = min_version + self.max_version = max_version + + def __call__(self, exe_version, value=None) -> List: + if (self.min_version <= exe_version) and (self.max_version >= exe_version): + option = [self.key] + if value: + option.append(value) + elif self.value: + option.append(self.value) + return option + else: + return [] + + +# The earliest git version where we start trying to optimize clones +# git@1.8.5 is when branch could also accept tag so we don't have to track ref types as closely +# This also corresponds to system git on RHEL7 +MIN_OPT_VERSION = (1, 8, 5, 2) + +# Technically the flags existed earlier but we are pruning our logic to 1.8.5 or greater +BRANCH = VersionConditionalOption("--branch", min_version=MIN_OPT_VERSION) +SINGLE_BRANCH = VersionConditionalOption("--single-branch", min_version=MIN_OPT_VERSION) +NO_SINGLE_BRANCH = VersionConditionalOption("--no-single-branch", min_version=MIN_OPT_VERSION) +# Depth was introduced in 1.7.11 but isn't worth much without the --branch options +DEPTH = VersionConditionalOption("--depth", 1, min_version=MIN_OPT_VERSION) + +FILTER_BLOB_NONE = VersionConditionalOption("--filter=blob:none", min_version=(2, 19, 0)) +NO_CHECKOUT = VersionConditionalOption("--no-checkout", min_version=(2, 34, 0)) +# technically sparse-checkout was added in 2.25, but we go forward since the model we use only +# works with the `--cone` option +SPARSE_CHECKOUT = VersionConditionalOption("sparse-checkout", "set", min_version=(2, 34, 0)) + + @overload -def git(required: Literal[True]) -> exe.Executable: ... +def git(required: Literal[True]) -> GitExecutable: ... @overload -def git(required: bool = ...) -> Optional[exe.Executable]: ... +def git(required: bool = ...) -> Optional[GitExecutable]: ... -def git(required: bool = False) -> Optional[exe.Executable]: - """Get a git executable. Raises CommandNotFoundError if `required` and git is not found.""" +def git(required: bool = False) -> Optional[GitExecutable]: + """Get a git executable. Raises CommandNotFoundError if ``required`` and git is not found.""" git_path = _find_git() if not git_path: @@ -36,7 +110,7 @@ def git(required: bool = False) -> Optional[exe.Executable]: raise exe.CommandNotFoundError("spack requires 'git'. Make sure it is in your path.") return None - git = exe.Executable(git_path) + git = GitExecutable(git_path) # If we're running under pytest, add this to ignore the fix for CVE-2022-39253 in # git 2.38.1+. Do this in one place; we need git to do this in all parts of Spack. @@ -59,12 +133,39 @@ def init_git_repo( git_exe("config", "feature.manyFiles", "true", ignore_errors=True) -def pull_checkout_commit(commit: str, git_exe: Optional[exe.Executable] = None): - """Fetch all remotes and checkout the specified commit.""" +def pull_checkout_commit( + commit: str, + remote: Optional[str] = None, + depth: Optional[int] = None, + git_exe: Optional[exe.Executable] = None, +): + """Checkout the specified commit (fetched if necessary).""" git_exe = git_exe or git(required=True) - git_exe("fetch", "--quiet", "--progress", "--all") - git_exe("checkout", commit) + # Do not do any fetching if the commit is already present. + try: + git_exe("checkout", "--quiet", commit, error=os.devnull) + return + except exe.ProcessError: + pass + + # First try to fetch the specific commit from a specific remote. This allows fixed depth, but + # the server needs to support it. + if remote is not None: + try: + flags = [] if depth is None else [f"--depth={depth}"] + git_exe("fetch", "--quiet", "--progress", *flags, remote, commit, error=os.devnull) + git_exe("checkout", "--quiet", commit) + return + except exe.ProcessError: + pass + + # Fall back to fetching all while unshallowing, to guarantee we get the commit. The depth flag + # is equivalent to --unshallow, and needed cause git can pedantically error with + # "--unshallow on a complete repository does not make sense". + remote_flag = "--all" if remote is None else remote + git_exe("fetch", "--quiet", "--progress", "--depth=2147483647", remote_flag) + git_exe("checkout", "--quiet", commit) def pull_checkout_tag( @@ -101,7 +202,7 @@ def pull_checkout_branch( raise ValueError("depth must be a positive integer") fetch_args.append(f"--depth={depth}") - git_exe("fetch", *fetch_args, remote, branch) + git_exe("fetch", *fetch_args, remote, f"{branch}:refs/remotes/{remote}/{branch}") git_exe("checkout", "--quiet", branch) try: @@ -114,10 +215,10 @@ def pull_checkout_branch( def get_modified_files( from_ref: str = "HEAD~1", to_ref: str = "HEAD", git_exe: Optional[exe.Executable] = None ) -> List[str]: - """Get a list of files modified between `from_ref` and `to_ref` + """Get a list of files modified between ``from_ref`` and ``to_ref`` Args: - from_ref (str): oldest git ref, defaults to `HEAD~1` - to_ref (str): newer git ref, defaults to `HEAD` + from_ref (str): oldest git ref, defaults to ``HEAD~1`` + to_ref (str): newer git ref, defaults to ``HEAD`` Returns: list of file paths """ git_exe = git_exe or git(required=True) @@ -130,8 +231,8 @@ def get_modified_files( def get_commit_sha(path: str, ref: str) -> Optional[str]: """Get a commit sha for an arbitrary ref using ls-remote""" - # search for matching branch, then tag - ref_list = [f"refs/heads/{ref}", f"refs/tags/{ref}"] + # search for matching branch, annotated tag's commit, then lightweight tag + ref_list = [f"refs/heads/{ref}", f"refs/tags/{ref}^{{}}", f"refs/tags/{ref}"] if os.path.isdir(path): # for the filesystem an unpacked mirror could be in a detached state from a depth 1 clone @@ -152,7 +253,151 @@ def get_commit_sha(path: str, ref: str) -> Optional[str]: if query: return query.strip().split()[0] - except spack.util.executable.ProcessError: + except exe.ProcessError: continue return None + + +def _exec_git_commands(git_exe, cmds, debug, dest=None): + dest_args = ["-C", dest] if dest else [] + error_stream = sys.stdout if debug else os.devnull # swallow extra output for non-debug + for cmd in cmds: + git_exe(*dest_args, *cmd, error=error_stream) + + +def _exec_git_commands_unique_dir(git_exe, cmds, debug, dest=None): + if dest: + # mimic creating a dir and clean up if there is a failure like git clone + assert not os.path.isdir(dest) + os.mkdir(dest) + try: + _exec_git_commands(git_exe, cmds, debug, dest) + except exe.ProcessError: + shutil.rmtree( + dest, ignore_errors=False, onerror=fs.readonly_file_handler(ignore_errors=True) + ) + raise + else: + _exec_git_commands(git_exe, cmds, debug, dest) + + +def protocol_supports_shallow_clone(url): + """Shallow clone operations (``--depth #``) are not supported by the basic + HTTP protocol or by no-protocol file specifications. + Use (e.g.) ``https://`` or ``file://`` instead.""" + return not (url.startswith("http://") or url.startswith("/")) + + +def git_init_fetch(url, ref, depth=None, debug=False, dest=None, git_exe=None): + """Utilize ``git init`` and then ``git fetch`` for a minimal clone of a single git ref + This method runs git init, repo add, fetch to get a minimal set of source data. + Profiling has shown this method can be 10-20% less storage than purely using sparse-checkout, + and is even smaller than git clone --depth 1. This makes it the preferred method for single + commit checkouts and source mirror population. + + There is a trade off since less git data means less flexibility with additional git operations. + Technically adding the remote is not necessary, but we do it since there are test cases where + we may want to fetch additional data. + + Checkout is explicitly deferred to a second method so we can intercept and add sparse-checkout + options uniformly whether we use `git clone` or `init fetch` + """ + git_exe = git_exe or git(required=True) + version = git_exe.version + # minimum criteria for fetching a single commit, but also requires server to be configured + # fall-back to a process error so an old git version or a fetch failure from an nonsupporting + # server can be caught the same way. + if ref and is_git_commit_sha(ref) and version < (2, 5, 0): + raise exe.ProcessError("Git older than 2.5 detected, can't fetch commit directly") + init = ["init"] + remote = ["remote", "add", "origin", url] + fetch = ["fetch"] + + if not debug: + fetch.append("--quiet") + if depth and protocol_supports_shallow_clone(url): + fetch.extend(DEPTH(version, str(depth))) + + fetch.extend([*FILTER_BLOB_NONE(version), url, ref]) + cmds = [init, remote, fetch] + _exec_git_commands_unique_dir(git_exe, cmds, debug, dest) + + +def git_checkout( + ref: Optional[str] = None, + sparse_paths: List[str] = [], + debug: bool = False, + dest: Optional[str] = None, + git_exe: Optional[GitExecutable] = None, +): + """A generic method for running ``git checkout`` that integrates sparse-checkout + Several methods in this module explicitly delay checkout so sparse-checkout can be called. + It is intended to be used with ``git clone --no-checkout`` or ``git init && git fetch``. + There is minimal impact to performance since the initial clone operation filters blobs and + has to download a minimal subset of git data. + """ + git_exe = git_exe or git(required=True) + checkout = ["checkout"] + sparse_checkout = SPARSE_CHECKOUT(git_exe.version) + + if not debug: + checkout.append("--quiet") + if ref: + checkout.append(ref) + + cmds = [] + if sparse_paths and sparse_checkout: + sparse_checkout.extend([*sparse_paths, "--cone"]) + cmds.append(sparse_checkout) + + cmds.append(checkout) + _exec_git_commands(git_exe, cmds, debug, dest) + + +def git_clone( + url: str, + ref: Optional[str] = None, + full_repo: bool = False, + depth: Optional[int] = None, + debug: bool = False, + dest: Optional[str] = None, + git_exe: Optional[GitExecutable] = None, +): + """A git clone that prefers deferring expensive blob fetching for modern git installations + This is our fallback method for capturing more git data than the ``init && fetch`` model. + It is still optimized to capture a minimal set of ``./.git`` data and expects to be paired with + a call to ``git checkout`` to fully download the source code. + """ + git_exe = git_exe or git(required=True) + version = git_exe.version + clone = ["clone"] + # only need fetch if it's a really old git so we don't fail a checkout + old = version < MIN_OPT_VERSION + fetch = ["fetch"] + + if not debug: + clone.append("--quiet") + fetch.append("--quiet") + + if not old and depth and not full_repo and protocol_supports_shallow_clone(url): + clone.extend(DEPTH(version, str(depth))) + + if full_repo: + if old: + fetch.extend(["--all"]) + else: + clone.extend(NO_SINGLE_BRANCH(version)) + elif ref and not is_git_commit_sha(ref): + if old: + fetch.extend(["origin", ref]) + else: + clone.extend([*SINGLE_BRANCH(version), *BRANCH(version, ref)]) + + clone.extend([*FILTER_BLOB_NONE(version), *NO_CHECKOUT(version), url]) + + if dest: + clone.append(dest) + _exec_git_commands(git_exe, [clone], debug) + if old: + _exec_git_commands(git_exe, [fetch], debug, dest) diff --git a/lib/spack/spack/util/gpg.py b/lib/spack/spack/util/gpg.py index 4ff6904d960f07..406c60b77fd695 100644 --- a/lib/spack/spack/util/gpg.py +++ b/lib/spack/spack/util/gpg.py @@ -36,7 +36,7 @@ def init(gnupghome=None, force=False): When calling any gpg executable, the GNUPGHOME environment variable is set to: - 1. The value of the `gnupghome` argument, if not None + 1. The value of the ``gnupghome`` argument, if not None 2. The value of the "SPACK_GNUPGHOME" environment variable, if set 3. The default gpg path for Spack otherwise diff --git a/lib/spack/spack/util/log_parse.py b/lib/spack/spack/util/log_parse.py index 2f177eadbc2b67..e27740ee741e21 100644 --- a/lib/spack/spack/util/log_parse.py +++ b/lib/spack/spack/util/log_parse.py @@ -3,38 +3,43 @@ # SPDX-License-Identifier: (Apache-2.0 OR MIT) import io +import shutil import sys +from typing import Optional, TextIO, Union -import spack.llnl.util.tty as tty from spack.llnl.util.tty.color import cescape, colorize from spack.util.ctest_log_parser import BuildError, BuildWarning, CTestLogParser __all__ = ["parse_log_events", "make_log_context"] -def parse_log_events(stream, context=6, jobs=None, profile=False): +def parse_log_events( + stream: Union[str, TextIO], context: int = 6, jobs: Optional[int] = None, profile: bool = False +): """Extract interesting events from a log file as a list of LogEvent. Args: - stream (str or typing.IO): build log name or file object - context (int): lines of context to extract around each log event - jobs (int): number of jobs to parse with; default ncpus - profile (bool): print out profile information for parsing + stream: build log name or file object + context: lines of context to extract around each log event + jobs: number of jobs to parse with; default ncpus + profile: print out profile information for parsing Returns: - (tuple): two lists containig ``BuildError`` and - ``BuildWarning`` objects. + two lists containing :class:`~spack.util.ctest_log_parser.BuildError` and + :class:`~spack.util.ctest_log_parser.BuildWarning` objects. - This is a wrapper around ``ctest_log_parser.CTestLogParser`` that + This is a wrapper around :class:`~spack.util.ctest_log_parser.CTestLogParser` that lazily constructs a single ``CTestLogParser`` object. This ensures that all the regex compilation is only done once. """ - if parse_log_events.ctest_parser is None: - parse_log_events.ctest_parser = CTestLogParser(profile=profile) + parser = getattr(parse_log_events, "ctest_parser", None) + if parser is None: + parser = CTestLogParser(profile=profile) + setattr(parse_log_events, "ctest_parser", parser) - result = parse_log_events.ctest_parser.parse(stream, context, jobs) + result = parser.parse(stream, context, jobs) if profile: - parse_log_events.ctest_parser.print_timings() + parser.print_timings() return result @@ -64,7 +69,7 @@ def make_log_context(log_events, width=None): str: context from the build log with errors highlighted Parses the log file for lines containing errors, and prints them out - with line numbers and context. Errors are highlighted with '>>' and + with line numbers and context. Errors are highlighted with ``>>`` and with red highlighting (if color is enabled). Events are sorted by line number before they are displayed. @@ -77,7 +82,7 @@ def make_log_context(log_events, width=None): indent = " " * (5 + num_width) if width is None: - _, width = tty.terminal_size() + width = shutil.get_terminal_size().columns if width <= 0: width = sys.maxsize wrap_width = width - num_width - 6 diff --git a/lib/spack/spack/util/naming.py b/lib/spack/spack/util/naming.py index cd8aa9ecfab344..6868ff6c98434e 100644 --- a/lib/spack/spack/util/naming.py +++ b/lib/spack/spack/util/naming.py @@ -66,17 +66,17 @@ def pkg_name_to_class_name(pkg_name: str): """Convert a Spack package name to a class name, based on `PEP-8 `_: - * Module and package names use lowercase_with_underscores. - * Class names use the CapWords convention. + * Module and package names use lowercase_with_underscores. + * Class names use the CapWords convention. Not all package names are valid Python identifiers: - * They can contain '-', but cannot start with '-'. - * They can start with numbers, e.g. "3proxy". + * They can contain ``-``, but cannot start with ``-``. + * They can start with numbers, e.g. ``3proxy``. - This function converts from the package name to the class convention by removing _ and - and - converting surrounding lowercase text to CapWords. If package name starts with a number, the - class name returned will be prepended with '_' to make a valid Python identifier. + This function converts from the package name to the class convention by removing ``_`` and + ``-``, and converting surrounding lowercase text to CapWords. If package name starts with a + number, the class name returned will be prepended with ``_`` to make a valid Python identifier. """ class_name = re.sub(r"[-_]+", "-", pkg_name) class_name = string.capwords(class_name, "-") diff --git a/lib/spack/spack/util/package_hash.py b/lib/spack/spack/util/package_hash.py index f8f4d21bf6e943..563843ae9f1369 100644 --- a/lib/spack/spack/util/package_hash.py +++ b/lib/spack/spack/util/package_hash.py @@ -43,6 +43,8 @@ class RemoveDocstrings(ast.NodeTransformer): def remove_docstring(self, node): if node.body: node.body = [child for child in node.body if not unused_string(child)] + if not node.body: + node.body = [ast.Pass()] self.generic_visit(node) return node @@ -257,7 +259,7 @@ def foo(self): print("implementation 4") when we know that they will not affect package behavior. If we're at version 4.0, we know that implementation 1 will win, because some @when - for 2, 3, and 4 will be `False`. We should only include implementation 1. + for 2, 3, and 4 will be ``False``. We should only include implementation 1. If we're at version 1.0, we know that implementation 2 will win, because it overrides implementation 1. We should only include implementation 2. @@ -333,7 +335,7 @@ def visit_FunctionDef(self, node: ast.FunctionDef) -> Optional[ast.FunctionDef]: def canonical_source( - spec, filter_multimethods: bool = True, source: Optional[bytes] = None + spec: spack.spec.Spec, filter_multimethods: bool = True, source: Optional[bytes] = None ) -> str: """Get canonical source for a spec's package.py by unparsing its AST. @@ -345,7 +347,7 @@ def canonical_source( return unparse(package_ast(spec, filter_multimethods, source=source), py_ver_consistent=True) -def package_hash(spec, source: Optional[bytes] = None) -> str: +def package_hash(spec: spack.spec.Spec, source: Optional[bytes] = None) -> str: """Get a hash of a package's canonical source code. This function is used to determine whether a spec needs a rebuild when a @@ -359,7 +361,9 @@ def package_hash(spec, source: Optional[bytes] = None) -> str: return spack.util.hash.b32_hash(source) -def package_ast(spec, filter_multimethods: bool = True, source: Optional[bytes] = None) -> ast.AST: +def package_ast( + spec: spack.spec.Spec, filter_multimethods: bool = True, source: Optional[bytes] = None +) -> ast.AST: """Get the AST for the ``package.py`` file corresponding to ``spec``. Arguments: @@ -367,8 +371,6 @@ def package_ast(spec, filter_multimethods: bool = True, source: Optional[bytes] statically to be unused. Supply False to disable. source: Optionally provide a string to read python code from. """ - spec = spack.spec.Spec(spec) - if source is None: filename = spack.repo.PATH.filename_for_package_name(spec.name) with open(filename, "rb") as f: diff --git a/lib/spack/spack/util/parallel.py b/lib/spack/spack/util/parallel.py index 259143cad4dd12..ae05905f062f48 100644 --- a/lib/spack/spack/util/parallel.py +++ b/lib/spack/spack/util/parallel.py @@ -10,6 +10,9 @@ import spack.config +#: Used in tests to disable parallelism, as tests themselves are parallelized +ENABLE_PARALLELISM = sys.platform != "win32" + class ErrorFromWorker: """Wrapper class to report an error from a worker process""" @@ -73,12 +76,13 @@ def imap_unordered( Raises: RuntimeError: if any error occurred in the worker processes """ - from spack.subprocess_context import GlobalStateMarshaler - if sys.platform in ("darwin", "win32") or len(list_of_args) == 1: + if not ENABLE_PARALLELISM or len(list_of_args) <= 1: yield from map(f, list_of_args) return + from spack.subprocess_context import GlobalStateMarshaler + marshaler = GlobalStateMarshaler() with multiprocessing.Pool( processes, initializer=marshaler.restore, maxtasksperchild=maxtaskperchild @@ -109,13 +113,15 @@ def make_concurrent_executor( if the platform does not enable forking as the default start method. Effectively require_fork=True makes the executor sequential in the current process on Windows, macOS, and Linux from Python 3.14+ (which changes defaults)""" - from spack.subprocess_context import GlobalStateMarshaler - if require_fork and multiprocessing.get_start_method() != "fork": + if ( + not ENABLE_PARALLELISM + or (require_fork and multiprocessing.get_start_method() != "fork") + or sys.version_info[:2] == (3, 6) + ): return SequentialExecutor() - if sys.version_info[:2] == (3, 6): - return SequentialExecutor() + from spack.subprocess_context import GlobalStateMarshaler jobs = jobs or spack.config.determine_number_of_jobs(parallel=True) marshaler = GlobalStateMarshaler() diff --git a/lib/spack/spack/util/pattern.py b/lib/spack/spack/util/pattern.py index 84fd4483cca276..730cfb024240cf 100644 --- a/lib/spack/spack/util/pattern.py +++ b/lib/spack/spack/util/pattern.py @@ -1,122 +1,6 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import collections.abc -import functools -import inspect - - -class Delegate: - def __init__(self, name, container): - self.name = name - self.container = container - - def __call__(self, *args, **kwargs): - return [getattr(item, self.name)(*args, **kwargs) for item in self.container] - - -class Composite(list): - def __init__(self, fns_to_delegate): - self.fns_to_delegate = fns_to_delegate - - def __getattr__(self, name): - if name != "fns_to_delegate" and name in self.fns_to_delegate: - return Delegate(name, self) - else: - return self.__getattribute__(name) - - -def composite(interface=None, method_list=None, container=list): - """Decorator implementing the GoF composite pattern. - - Args: - interface (type): class exposing the interface to which the - composite object must conform. Only non-private and - non-special methods will be taken into account - method_list (list): names of methods that should be part - of the composite - container (collections.abc.MutableSequence): container for the composite object - (default = list). Must fulfill the MutableSequence - contract. The composite class will expose the container API - to manage object composition - - Returns: - a class decorator that patches a class adding all the methods - it needs to be a composite for a given interface. - - """ - # Check if container fulfills the MutableSequence contract and raise an - # exception if it doesn't. The patched class returned by the decorator will - # inherit from the container class to expose the interface needed to manage - # objects composition - if not issubclass(container, collections.abc.MutableSequence): - raise TypeError("Container must fulfill the MutableSequence contract") - - # Check if at least one of the 'interface' or the 'method_list' arguments - # are defined - if interface is None and method_list is None: - raise TypeError( - "Either 'interface' or 'method_list' must be defined on a call " "to composite" - ) - - def cls_decorator(cls): - # Retrieve the base class of the composite. Inspect its methods and - # decide which ones will be overridden - def no_special_no_private(x): - return callable(x) and not x.__name__.startswith("_") - - # Patch the behavior of each of the methods in the previous list. - # This is done associating an instance of the descriptor below to - # any method that needs to be patched. - class IterateOver: - """Decorator used to patch methods in a composite. - - It iterates over all the items in the instance containing the - associated attribute and calls for each of them an attribute - with the same name - """ - - def __init__(self, name, func=None): - self.name = name - self.func = func - - def __get__(self, instance, owner): - def getter(*args, **kwargs): - for item in instance: - getattr(item, self.name)(*args, **kwargs) - - # If we are using this descriptor to wrap a method from an - # interface, then we must conditionally use the - # `functools.wraps` decorator to set the appropriate fields - if self.func is not None: - getter = functools.wraps(self.func)(getter) - return getter - - dictionary_for_type_call = {} - - # Construct a dictionary with the methods explicitly passed as name - if method_list is not None: - dictionary_for_type_call.update((name, IterateOver(name)) for name in method_list) - - # Construct a dictionary with the methods inspected from the interface - if interface is not None: - dictionary_for_type_call.update( - (name, IterateOver(name, method)) - for name, method in inspect.getmembers(interface, predicate=no_special_no_private) - ) - - # Get the methods that are defined in the scope of the composite - # class and override any previous definition - dictionary_for_type_call.update( - (name, method) for name, method in inspect.getmembers(cls, predicate=inspect.ismethod) - ) - - # Generate the new class on the fly and return it - # FIXME : inherit from interface if we start to use ABC classes? - wrapper_class = type(cls.__name__, (cls, container), dictionary_for_type_call) - return wrapper_class - - return cls_decorator class Bunch: diff --git a/lib/spack/spack/util/prefix.py b/lib/spack/spack/util/prefix.py index a0042a768c9f91..09184080720791 100644 --- a/lib/spack/spack/util/prefix.py +++ b/lib/spack/spack/util/prefix.py @@ -28,8 +28,8 @@ class Prefix(str): >>> prefix.join("dashed-directory").bin64 /usr/dashed-directory/bin64 - Prefix objects behave identically to strings. In fact, they subclass ``str``, so operators like - ``+`` are legal:: + Prefix objects behave identically to strings. In fact, they subclass :class:`str`, so operators + like ``+`` are legal:: print("foobar " + prefix) diff --git a/lib/spack/spack/util/remote_file_cache.py b/lib/spack/spack/util/remote_file_cache.py index ef1ff9cf903c0a..0897e33ca20778 100644 --- a/lib/spack/spack/util/remote_file_cache.py +++ b/lib/spack/spack/util/remote_file_cache.py @@ -66,7 +66,7 @@ def local_path(raw_path: str, sha256: str, make_dest: Optional[Callable[[], str] Args: raw_path: raw path with possible variables needing substitution sha256: the expected sha256 for the file - make_dest: function to create a stage for remote files, if needed (e.g., `mkdtemp`) + make_dest: function to create a stage for remote files, if needed (e.g., ``mkdtemp``) Returns: resolved, normalized local path diff --git a/lib/spack/spack/util/socket.py b/lib/spack/spack/util/socket.py index f70e280af559d8..50f68d9bc682eb 100644 --- a/lib/spack/spack/util/socket.py +++ b/lib/spack/spack/util/socket.py @@ -9,11 +9,11 @@ @spack.llnl.util.lang.memoized -def _getfqdn(): +def _gethostname(): """Memoized version of `getfqdn()`. If we call `getfqdn()` too many times, DNS can be very slow. We only need to call it one time per process, so we cache it here. """ - return socket.getfqdn() + return socket.gethostname() diff --git a/lib/spack/spack/util/spack_yaml.py b/lib/spack/spack/util/spack_yaml.py index c937af322f2f08..f1aa1dead2a779 100644 --- a/lib/spack/spack/util/spack_yaml.py +++ b/lib/spack/spack/util/spack_yaml.py @@ -63,6 +63,53 @@ def syaml_type(obj): return obj +class DictWithLineInfo(dict): + """A dictionary that preserves YAML line information.""" + + __slots__ = ("line_info",) + + def __init__(self, *args, line_info: str = "", **kwargs): + super().__init__(*args, **kwargs) + self.line_info = line_info + + +def _represent_dict_with_line_info(dumper, data): + return dumper.represent_dict(data) + + +def deepcopy_as_builtin(obj: Any, *, line_info: bool = False) -> Any: + """Deep copies a YAML object as built-in types (dict, list, str, int, ...). + + Args: + obj: object to be copied + line_info: if ``True``, add line information to the copied object + """ + if isinstance(obj, str): + return str(obj) + elif isinstance(obj, dict): + result = DictWithLineInfo() + result.update( + { + deepcopy_as_builtin(k): deepcopy_as_builtin(v, line_info=line_info) + for k, v in obj.items() + } + ) + if line_info: + result.line_info = _line_info(obj) + return result + elif isinstance(obj, list): + return [deepcopy_as_builtin(x, line_info=line_info) for x in obj] + elif isinstance(obj, bool): + return bool(obj) + elif isinstance(obj, int): + return int(obj) + elif isinstance(obj, float): + return float(obj) + elif obj is None: + return obj + raise ValueError(f"cannot convert {type(obj)} to built-in type") + + def markable(obj): """Whether an object can be marked.""" return type(obj) in markable_types @@ -229,12 +276,14 @@ def dump(data, stream=None, default_flow_style=False): return handler.dump(data, stream=stream) -def file_line(mark): +def _line_info(obj): """Format a mark as : information.""" - result = mark.name - if mark.line: - result += ":" + str(mark.line) - return result + m = get_mark_from_yaml_data(obj) + if m is None: + return "" + if m.line: + return f"{m.name}:{m.line:d}" + return m.name #: Global for interactions between LineAnnotationDumper and dump_annotated(). @@ -340,6 +389,7 @@ def __init__(self, yaml_type: YAMLType) -> None: else: self.yaml.Representer = OrderedLineRepresenter self.yaml.Constructor = OrderedLineConstructor + self.yaml.Representer.add_representer(DictWithLineInfo, _represent_dict_with_line_info) def load(self, stream: IO): """Loads the YAML data from a stream and returns it. @@ -358,15 +408,19 @@ def load(self, stream: IO): error_mark = e.context_mark if e.context_mark else e.problem_mark if error_mark: line, column = error_mark.line, error_mark.column - msg += f": near {error_mark.name}, {str(line)}, {str(column)}" + filename = error_mark.name + msg += f": near {filename}, {str(line)}, {str(column)}" else: + filename = stream.name msg += f": {stream.name}" msg += f": {e.problem}" - raise SpackYAMLError(msg, e) from e + + raise SpackYAMLError(msg, e, filename) from e except Exception as e: msg = "cannot load Spack YAML configuration" - raise SpackYAMLError(msg, e) from e + filename = stream.name + raise SpackYAMLError(msg, e, filename) from e def dump(self, data, stream: Optional[IO] = None, *, transform=None) -> None: """Dumps the YAML data to a stream. @@ -382,7 +436,8 @@ def dump(self, data, stream: Optional[IO] = None, *, transform=None) -> None: return self.yaml.dump(data, stream=stream, transform=transform) except Exception as e: msg = "cannot dump Spack YAML configuration" - raise SpackYAMLError(msg, str(e)) from e + filename = stream.name if stream else None + raise SpackYAMLError(msg, str(e), filename) from e def as_string(self, data) -> str: """Returns a string representing the YAML data passed as input.""" @@ -491,7 +546,8 @@ def anchorify(data: Union[dict, list], identifier: Callable[[Any], str] = repr) class SpackYAMLError(spack.error.SpackError): """Raised when there are issues with YAML parsing.""" - def __init__(self, msg, yaml_error): + def __init__(self, msg, yaml_error, filename=None): + self.filename = filename super().__init__(msg, str(yaml_error)) diff --git a/lib/spack/spack/util/typing.py b/lib/spack/spack/util/typing.py index b1800d18cb454c..9c499e3577edce 100644 --- a/lib/spack/spack/util/typing.py +++ b/lib/spack/spack/util/typing.py @@ -1,29 +1,43 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details.: object # # SPDX-License-Identifier: (Apache-2.0 OR MIT) +"""Extra support for type checking in Spack. -from typing import Any +Protocols here that have runtime overhead should be set to ``object`` when +``TYPE_CHECKING`` is not enabled, as they can incur unreasonable runtime overheads. + +In particular, Protocols intended for use on objects that have many ``isinstance()`` +calls can be very expensive. + +""" + + +from typing import TYPE_CHECKING, Any from spack.vendor.typing_extensions import Protocol +if TYPE_CHECKING: + + class SupportsRichComparison(Protocol): + """Objects that support =, !=, <, <=, >, and >=.""" -class SupportsRichComparison(Protocol): - """Objects that support =, !=, <, <=, >, and >=.""" + def __eq__(self, other: Any) -> bool: + raise NotImplementedError - def __eq__(self, other: Any) -> bool: - raise NotImplementedError + def __ne__(self, other: Any) -> bool: + raise NotImplementedError - def __ne__(self, other: Any) -> bool: - raise NotImplementedError + def __lt__(self, other: Any) -> bool: + raise NotImplementedError - def __lt__(self, other: Any) -> bool: - raise NotImplementedError + def __le__(self, other: Any) -> bool: + raise NotImplementedError - def __le__(self, other: Any) -> bool: - raise NotImplementedError + def __gt__(self, other: Any) -> bool: + raise NotImplementedError - def __gt__(self, other: Any) -> bool: - raise NotImplementedError + def __ge__(self, other: Any) -> bool: + raise NotImplementedError - def __ge__(self, other: Any) -> bool: - raise NotImplementedError +else: + SupportsRichComparison = object diff --git a/lib/spack/spack/util/unparse/__init__.py b/lib/spack/spack/util/unparse/__init__.py index c80554db18c6b5..fab5df35be579b 100644 --- a/lib/spack/spack/util/unparse/__init__.py +++ b/lib/spack/spack/util/unparse/__init__.py @@ -1,7 +1,6 @@ # Copyright (c) 2014-2021, Simon Percivall and Spack Project Developers. # # SPDX-License-Identifier: Python-2.0 -import io from .unparser import Unparser @@ -9,6 +8,5 @@ def unparse(tree, py_ver_consistent=False): - v = io.StringIO() - Unparser(py_ver_consistent=py_ver_consistent).visit(tree, v) - return v.getvalue().strip() + "\n" + unparser = Unparser(py_ver_consistent=py_ver_consistent) + return unparser.visit(tree) + "\n" diff --git a/lib/spack/spack/util/unparse/unparser.py b/lib/spack/spack/util/unparse/unparser.py index 374ed56f660e6e..7b5a2ab615ea0e 100644 --- a/lib/spack/spack/util/unparse/unparser.py +++ b/lib/spack/spack/util/unparse/unparser.py @@ -4,8 +4,10 @@ "Usage: unparse.py " import ast import sys +from ast import AST, FormattedValue, If, JoinedStr, Name, Tuple from contextlib import contextmanager -from io import StringIO +from enum import IntEnum, auto +from typing import Optional # TODO: if we require Python 3.7, use its `nullcontext()` @@ -14,89 +16,131 @@ def nullcontext(): yield +def is_non_empty_non_star_tuple(slice_value): + """True for `(1, 2)`, False for `()` and `(1, *b)`""" + return ( + isinstance(slice_value, Tuple) + and slice_value.elts + and not any(isinstance(elt, ast.Starred) for elt in slice_value.elts) + ) + + +def iter_fields(node): + """ + Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields`` + that is present on *node*. + """ + for field in node._fields: + try: + yield field, getattr(node, field) + except AttributeError: + pass + + +class NodeVisitor(object): + """ + A node visitor base class that walks the abstract syntax tree and calls a + visitor function for every node found. This function may return a value + which is forwarded by the `visit` method. + + This class is meant to be subclassed, with the subclass adding visitor + methods. + + Per default the visitor functions for the nodes are ``'visit_'`` + + class name of the node. So a `TryFinally` node visit function would + be `visit_TryFinally`. This behavior can be changed by overriding + the `visit` method. If no visitor function exists for a node + (return value `None`) the `generic_visit` visitor is used instead. + + Don't use the `NodeVisitor` if you want to apply changes to nodes during + traversing. For this a special visitor exists (`NodeTransformer`) that + allows modifications. + """ + + def visit(self, node): + """Visit a node.""" + method = "visit_" + node.__class__.__name__ + visitor = getattr(self, method, self.generic_visit) + return visitor(node) + + def generic_visit(self, node): + """Called if no explicit visitor function exists for a node.""" + for field, value in iter_fields(node): + if isinstance(value, list): + for item in value: + if isinstance(item, AST): + self.visit(item) + elif isinstance(value, AST): + self.visit(value) + + # Large float and imaginary literals get turned into infinities in the AST. # We unparse those infinities to INFSTR. -INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1) +_INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1) -class _Precedence: +class _Precedence(IntEnum): """Precedence table that originated from python grammar.""" - TUPLE = 0 - YIELD = 1 # 'yield', 'yield from' - TEST = 2 # 'if'-'else', 'lambda' - OR = 3 # 'or' - AND = 4 # 'and' - NOT = 5 # 'not' - CMP = 6 # '<', '>', '==', '>=', '<=', '!=', 'in', 'not in', 'is', 'is not' - EXPR = 7 + NAMED_EXPR = auto() # := + TUPLE = auto() # , + YIELD = auto() # 'yield', 'yield from' + TEST = auto() # 'if'-'else', 'lambda' + OR = auto() # 'or' + AND = auto() # 'and' + NOT = auto() # 'not' + CMP = auto() # '<', '>', '==', '>=', '<=', '!=', + # 'in', 'not in', 'is', 'is not' + EXPR = auto() BOR = EXPR # '|' - BXOR = 8 # '^' - BAND = 9 # '&' - SHIFT = 10 # '<<', '>>' - ARITH = 11 # '+', '-' - TERM = 12 # '*', '@', '/', '%', '//' - FACTOR = 13 # unary '+', '-', '~' - POWER = 14 # '**' - AWAIT = 15 # 'await' - ATOM = 16 - - -def pnext(precedence): - return min(precedence + 1, _Precedence.ATOM) - - -def interleave(inter, f, seq): - """Call f on each item in seq, calling inter() in between.""" - seq = iter(seq) - try: - f(next(seq)) - except StopIteration: - pass - else: - for x in seq: - inter() - f(x) + BXOR = auto() # '^' + BAND = auto() # '&' + SHIFT = auto() # '<<', '>>' + ARITH = auto() # '+', '-' + TERM = auto() # '*', '@', '/', '%', '//' + FACTOR = auto() # unary '+', '-', '~' + POWER = auto() # '**' + AWAIT = auto() # 'await' + ATOM = auto() + + def next(self): + try: + return self.__class__(self + 1) + except ValueError: + return self _SINGLE_QUOTES = ("'", '"') _MULTI_QUOTES = ('"""', "'''") -_ALL_QUOTES = _SINGLE_QUOTES + _MULTI_QUOTES +_ALL_QUOTES = (*_SINGLE_QUOTES, *_MULTI_QUOTES) -def is_simple_tuple(slice_value): - # when unparsing a non-empty tuple, the parantheses can be safely - # omitted if there aren't any elements that explicitly requires - # parantheses (such as starred expressions). - return ( - isinstance(slice_value, ast.Tuple) - and slice_value.elts - and not any(isinstance(elt, ast.Starred) for elt in slice_value.elts) - ) - - -class Unparser: +class Unparser(NodeVisitor): """Methods in this class recursively traverse an AST and output source code for the abstract syntax; original formatting is disregarded.""" def __init__(self, py_ver_consistent=False, _avoid_backslashes=False): - """Traverse an AST and generate its source. - - Arguments: - py_ver_consistent (bool): if True, generate unparsed code that is - consistent between Python versions 3.5-3.11. - - For legacy reasons, consistency is achieved by unparsing Python3 unicode literals - the way Python 2 would. This preserved Spack package hash consistency during the - python2/3 transition - """ - self.future_imports = [] + self._source = [] + self._precedences = {} + self._type_ignores = {} self._indent = 0 + self._in_try_star = False self._py_ver_consistent = py_ver_consistent - self._precedences = {} self._avoid_backslashes = _avoid_backslashes + def interleave(self, inter, f, seq): + """Call f on each item in seq, calling inter() in between.""" + seq = iter(seq) + try: + f(next(seq)) + except StopIteration: + pass + else: + for x in seq: + inter() + f(x) + def items_view(self, traverser, items): """Traverse and separate the given *items* with a comma and append it to the buffer. If *items* is a single item sequence, a trailing comma @@ -105,39 +149,46 @@ def items_view(self, traverser, items): traverser(items[0]) self.write(",") else: - interleave(lambda: self.write(", "), traverser, items) + self.interleave(lambda: self.write(", "), traverser, items) - def visit(self, tree, output_file): - """Traverse tree and write source code to output_file.""" - self.f = output_file - self.dispatch(tree) - self.f.flush() + def maybe_newline(self): + """Adds a newline if it isn't the start of generated source""" + if self._source: + self.write("\n") def fill(self, text=""): - "Indent a piece of text, according to the current indentation level" - self.f.write("\n" + " " * self._indent + text) - - def write(self, text): - "Append a piece of text to the current line." - self.f.write(str(text)) - - class _Block: - """A context manager for preparing the source for blocks. It adds - the character ':', increases the indentation on enter and decreases - the indentation on exit.""" + """Indent a piece of text and append it, according to the current + indentation level""" + self.maybe_newline() + self.write(" " * self._indent + text) - def __init__(self, unparser): - self.unparser = unparser + def write(self, *text): + """Add new source parts""" + self._source.extend(text) - def __enter__(self): - self.unparser.write(":") - self.unparser._indent += 1 + @contextmanager + def buffered(self, buffer=None): + if buffer is None: + buffer = [] - def __exit__(self, exc_type, exc_value, traceback): - self.unparser._indent -= 1 + original_source = self._source + self._source = buffer + yield buffer + self._source = original_source - def block(self): - return self._Block(self) + @contextmanager + def block(self, *, extra=None): + """A context manager for preparing the source for blocks. It adds + the character':', increases the indentation on enter and decreases + the indentation on exit. If *extra* is given, it will be directly + appended after the colon character. + """ + self.write(":") + if extra: + self.write(extra) + self._indent += 1 + yield + self._indent -= 1 @contextmanager def delimit(self, start, end): @@ -165,90 +216,126 @@ def set_precedence(self, precedence, *nodes): for node in nodes: self._precedences[node] = precedence - def dispatch(self, tree): - "Dispatcher function, dispatching tree type T to method _T." - if isinstance(tree, list): - for node in tree: - self.dispatch(node) - return - meth = getattr(self, "visit_" + tree.__class__.__name__) - meth(tree) - - # - # Unparsing methods - # - # There should be one method per concrete grammar type Constructors - # should be # grouped by sum type. Ideally, this would follow the order - # in the grammar, but currently doesn't. + def get_raw_docstring(self, node): + """If a docstring node is found in the body of the *node* parameter, + return that docstring node, None otherwise. + + Logic mirrored from ``_PyAST_GetDocString``.""" + if ( + not isinstance(node, (ast.AsyncFunctionDef, ast.FunctionDef, ast.ClassDef, ast.Module)) + or len(node.body) < 1 + ): + return None + node = node.body[0] + if not isinstance(node, ast.Expr): + return None + node = node.value + if _is_str_literal(node): + return node + + def get_type_comment(self, node): + # Python 3.8 introduced type_comment + # (enabled on compile(... ast.PyCF_TYPE_COMMENTS)) + comment = self._type_ignores.get(node.lineno) or getattr(node, "type_comment", None) + if comment is not None: + return f" # type: {comment}" + + def traverse(self, node): + if isinstance(node, list): + for item in node: + self.traverse(item) + else: + super().visit(node) + + # Note: as visit() resets the output text, do NOT rely on + # NodeVisitor.generic_visit to handle any nodes (as it calls back in to + # the subclass visit() method, which resets self._source to an empty list) + def visit(self, node): + """Outputs a source code string that, if converted back to an ast + (using ast.parse) will generate an AST equivalent to *node*""" + self._source = [] + self.traverse(node) + return "".join(self._source) + + def _write_docstring_and_traverse_body(self, node): + docstring = self.get_raw_docstring(node) + if docstring: + self._write_docstring(docstring) + self.traverse(node.body[1:]) + else: + self.traverse(node.body) - def visit_Module(self, tree): - for stmt in tree.body: - self.dispatch(stmt) + def visit_Module(self, node): + # Python 3.8 introduced types + self._type_ignores = { + ignore.lineno: f"ignore{ignore.tag}" for ignore in getattr(node, "type_ignores", ()) + } + self._write_docstring_and_traverse_body(node) + self._type_ignores.clear() - def visit_Interactive(self, tree): - for stmt in tree.body: - self.dispatch(stmt) + def visit_FunctionType(self, node): + with self.delimit("(", ")"): + self.interleave(lambda: self.write(", "), self.traverse, node.argtypes) - def visit_Expression(self, tree): - self.dispatch(tree.body) + self.write(" -> ") + self.traverse(node.returns) - # stmt - def visit_Expr(self, tree): + def visit_Expr(self, node): self.fill() - self.set_precedence(_Precedence.YIELD, tree.value) - self.dispatch(tree.value) + self.set_precedence(_Precedence.YIELD, node.value) + self.traverse(node.value) - def visit_NamedExpr(self, tree): - with self.require_parens(_Precedence.TUPLE, tree): - self.set_precedence(_Precedence.ATOM, tree.target, tree.value) - self.dispatch(tree.target) + def visit_NamedExpr(self, node): + with self.require_parens(_Precedence.NAMED_EXPR, node): + self.set_precedence(_Precedence.ATOM, node.target, node.value) + self.traverse(node.target) self.write(" := ") - self.dispatch(tree.value) + self.traverse(node.value) def visit_Import(self, node): self.fill("import ") - interleave(lambda: self.write(", "), self.dispatch, node.names) + self.interleave(lambda: self.write(", "), self.traverse, node.names) def visit_ImportFrom(self, node): - # A from __future__ import may affect unparsing, so record it. - if node.module and node.module == "__future__": - self.future_imports.extend(n.name for n in node.names) - self.fill("from ") - self.write("." * node.level) + self.write("." * (node.level or 0)) if node.module: self.write(node.module) self.write(" import ") - interleave(lambda: self.write(", "), self.dispatch, node.names) + self.interleave(lambda: self.write(", "), self.traverse, node.names) def visit_Assign(self, node): self.fill() for target in node.targets: - self.dispatch(target) + self.set_precedence(_Precedence.TUPLE, target) + self.traverse(target) self.write(" = ") - self.dispatch(node.value) + self.traverse(node.value) + type_comment = self.get_type_comment(node) + if type_comment: + self.write(type_comment) def visit_AugAssign(self, node): self.fill() - self.dispatch(node.target) + self.traverse(node.target) self.write(" " + self.binop[node.op.__class__.__name__] + "= ") - self.dispatch(node.value) + self.traverse(node.value) def visit_AnnAssign(self, node): self.fill() - with self.delimit_if("(", ")", not node.simple and isinstance(node.target, ast.Name)): - self.dispatch(node.target) + with self.delimit_if("(", ")", not node.simple and isinstance(node.target, Name)): + self.traverse(node.target) self.write(": ") - self.dispatch(node.annotation) + self.traverse(node.annotation) if node.value: self.write(" = ") - self.dispatch(node.value) + self.traverse(node.value) def visit_Return(self, node): self.fill("return") if node.value: self.write(" ") - self.dispatch(node.value) + self.traverse(node.value) def visit_Pass(self, node): self.fill("pass") @@ -261,22 +348,22 @@ def visit_Continue(self, node): def visit_Delete(self, node): self.fill("del ") - interleave(lambda: self.write(", "), self.dispatch, node.targets) + self.interleave(lambda: self.write(", "), self.traverse, node.targets) def visit_Assert(self, node): self.fill("assert ") - self.dispatch(node.test) + self.traverse(node.test) if node.msg: self.write(", ") - self.dispatch(node.msg) + self.traverse(node.msg) def visit_Global(self, node): self.fill("global ") - interleave(lambda: self.write(", "), self.write, node.names) + self.interleave(lambda: self.write(", "), self.write, node.names) def visit_Nonlocal(self, node): self.fill("nonlocal ") - interleave(lambda: self.write(", "), self.write, node.names) + self.interleave(lambda: self.write(", "), self.write, node.names) def visit_Await(self, node): with self.require_parens(_Precedence.AWAIT, node): @@ -284,7 +371,7 @@ def visit_Await(self, node): if node.value: self.write(" ") self.set_precedence(_Precedence.ATOM, node.value) - self.dispatch(node.value) + self.traverse(node.value) def visit_Yield(self, node): with self.require_parens(_Precedence.YIELD, node): @@ -292,63 +379,78 @@ def visit_Yield(self, node): if node.value: self.write(" ") self.set_precedence(_Precedence.ATOM, node.value) - self.dispatch(node.value) + self.traverse(node.value) def visit_YieldFrom(self, node): with self.require_parens(_Precedence.YIELD, node): - self.write("yield from") - if node.value: - self.write(" ") - self.set_precedence(_Precedence.ATOM, node.value) - self.dispatch(node.value) + self.write("yield from ") + if not node.value: + raise ValueError("Node can't be used without a value attribute.") + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) def visit_Raise(self, node): self.fill("raise") if not node.exc: - assert not node.cause + if node.cause: + raise ValueError("Node can't use cause without an exception.") return self.write(" ") - self.dispatch(node.exc) + self.traverse(node.exc) if node.cause: self.write(" from ") - self.dispatch(node.cause) + self.traverse(node.cause) - def visit_Try(self, node): + def do_visit_try(self, node): self.fill("try") with self.block(): - self.dispatch(node.body) + self.traverse(node.body) for ex in node.handlers: - self.dispatch(ex) + self.traverse(ex) if node.orelse: self.fill("else") with self.block(): - self.dispatch(node.orelse) + self.traverse(node.orelse) if node.finalbody: self.fill("finally") with self.block(): - self.dispatch(node.finalbody) + self.traverse(node.finalbody) + + def visit_Try(self, node): + prev_in_try_star = self._in_try_star + try: + self._in_try_star = False + self.do_visit_try(node) + finally: + self._in_try_star = prev_in_try_star + + def visit_TryStar(self, node): + prev_in_try_star = self._in_try_star + try: + self._in_try_star = True + self.do_visit_try(node) + finally: + self._in_try_star = prev_in_try_star def visit_ExceptHandler(self, node): - self.fill("except") + self.fill("except*" if self._in_try_star else "except") if node.type: self.write(" ") - self.dispatch(node.type) + self.traverse(node.type) if node.name: self.write(" as ") self.write(node.name) with self.block(): - self.dispatch(node.body) + self.traverse(node.body) def visit_ClassDef(self, node): - self.write("\n") + self.maybe_newline() for deco in node.decorator_list: self.fill("@") - self.dispatch(deco) + self.traverse(deco) self.fill("class " + node.name) - if getattr(node, "type_params", False): - self.write("[") - interleave(lambda: self.write(", "), self.dispatch, node.type_params) - self.write("]") + if hasattr(node, "type_params"): + self._type_params_helper(node.type_params) with self.delimit_if("(", ")", condition=node.bases or node.keywords): comma = False for e in node.bases: @@ -356,107 +458,137 @@ def visit_ClassDef(self, node): self.write(", ") else: comma = True - self.dispatch(e) + self.traverse(e) for e in node.keywords: if comma: self.write(", ") else: comma = True - self.dispatch(e) + self.traverse(e) + with self.block(): - self.dispatch(node.body) + self._write_docstring_and_traverse_body(node) def visit_FunctionDef(self, node): - self.__FunctionDef_helper(node, "def") + self._function_helper(node, "def") def visit_AsyncFunctionDef(self, node): - self.__FunctionDef_helper(node, "async def") + self._function_helper(node, "async def") - def __FunctionDef_helper(self, node, fill_suffix): - self.write("\n") + def _function_helper(self, node, fill_suffix): + self.maybe_newline() for deco in node.decorator_list: self.fill("@") - self.dispatch(deco) + self.traverse(deco) def_str = fill_suffix + " " + node.name self.fill(def_str) - if getattr(node, "type_params", False): - self.write("[") - interleave(lambda: self.write(", "), self.dispatch, node.type_params) - self.write("]") + if hasattr(node, "type_params"): + self._type_params_helper(node.type_params) with self.delimit("(", ")"): - self.dispatch(node.args) - if getattr(node, "returns", False): + self.traverse(node.args) + if node.returns: self.write(" -> ") - self.dispatch(node.returns) - with self.block(): - self.dispatch(node.body) + self.traverse(node.returns) + with self.block(extra=self.get_type_comment(node)): + self._write_docstring_and_traverse_body(node) + + def _type_params_helper(self, type_params): + if type_params is not None and len(type_params) > 0: + with self.delimit("[", "]"): + self.interleave(lambda: self.write(", "), self.traverse, type_params) + + def visit_TypeVar(self, node): + self.write(node.name) + if node.bound: + self.write(": ") + self.traverse(node.bound) + # Python 3.13 introduced default_value + if getattr(node, "default_value", False): + self.write(" = ") + self.traverse(node.default_value) + + def visit_TypeVarTuple(self, node): + self.write("*" + node.name) + # Python 3.13 introduced default_value + if getattr(node, "default_value", False): + self.write(" = ") + self.traverse(node.default_value) + + def visit_ParamSpec(self, node): + self.write("**" + node.name) + # Python 3.13 introduced default_value + if getattr(node, "default_value", False): + self.write(" = ") + self.traverse(node.default_value) + + def visit_TypeAlias(self, node): + self.fill("type ") + self.traverse(node.name) + self._type_params_helper(node.type_params) + self.write(" = ") + self.traverse(node.value) def visit_For(self, node): - self.__For_helper("for ", node) + self._for_helper("for ", node) def visit_AsyncFor(self, node): - self.__For_helper("async for ", node) + self._for_helper("async for ", node) - def __For_helper(self, fill, node): + def _for_helper(self, fill, node): self.fill(fill) - self.dispatch(node.target) + self.set_precedence(_Precedence.TUPLE, node.target) + self.traverse(node.target) self.write(" in ") - self.dispatch(node.iter) - with self.block(): - self.dispatch(node.body) + self.traverse(node.iter) + with self.block(extra=self.get_type_comment(node)): + self.traverse(node.body) if node.orelse: self.fill("else") with self.block(): - self.dispatch(node.orelse) + self.traverse(node.orelse) def visit_If(self, node): self.fill("if ") - self.dispatch(node.test) + self.traverse(node.test) with self.block(): - self.dispatch(node.body) + self.traverse(node.body) # collapse nested ifs into equivalent elifs. - while node.orelse and len(node.orelse) == 1 and isinstance(node.orelse[0], ast.If): + while node.orelse and len(node.orelse) == 1 and isinstance(node.orelse[0], If): node = node.orelse[0] self.fill("elif ") - self.dispatch(node.test) + self.traverse(node.test) with self.block(): - self.dispatch(node.body) + self.traverse(node.body) # final else if node.orelse: self.fill("else") with self.block(): - self.dispatch(node.orelse) + self.traverse(node.orelse) def visit_While(self, node): self.fill("while ") - self.dispatch(node.test) + self.traverse(node.test) with self.block(): - self.dispatch(node.body) + self.traverse(node.body) if node.orelse: self.fill("else") with self.block(): - self.dispatch(node.orelse) - - def _generic_With(self, node, async_=False): - self.fill("async with " if async_ else "with ") - if hasattr(node, "items"): - interleave(lambda: self.write(", "), self.dispatch, node.items) - else: - self.dispatch(node.context_expr) - if node.optional_vars: - self.write(" as ") - self.dispatch(node.optional_vars) - with self.block(): - self.dispatch(node.body) + self.traverse(node.orelse) def visit_With(self, node): - self._generic_With(node) + self.fill("with ") + self.interleave(lambda: self.write(", "), self.traverse, node.items) + with self.block(extra=self.get_type_comment(node)): + self.traverse(node.body) def visit_AsyncWith(self, node): - self._generic_With(node, async_=True) + self.fill("async with ") + self.interleave(lambda: self.write(", "), self.traverse, node.items) + with self.block(extra=self.get_type_comment(node)): + self.traverse(node.body) def _str_literal_helper( - self, string, quote_types=_ALL_QUOTES, escape_special_whitespace=False + self, string, *, quote_types=_ALL_QUOTES, escape_special_whitespace=False ): """Helper for writing string literals, minimizing escapes. Returns the tuple (string literal to write, possible quote types). @@ -494,119 +626,172 @@ def escape_char(c): escaped_string = escaped_string[:-1] + "\\" + escaped_string[-1] return escaped_string, possible_quotes - def _write_str_avoiding_backslashes(self, string, quote_types=_ALL_QUOTES): - """Write string literal value w/a best effort attempt to avoid backslashes.""" + def _write_str_avoiding_backslashes(self, string, *, quote_types=_ALL_QUOTES): + """Write string literal value with a best effort attempt to avoid backslashes.""" string, quote_types = self._str_literal_helper(string, quote_types=quote_types) quote_type = quote_types[0] - self.write("{quote_type}{string}{quote_type}".format(quote_type=quote_type, string=string)) + self.write(f"{quote_type}{string}{quote_type}") - # expr - def visit_Bytes(self, node): - self.write(repr(node.s)) + # Python < 3.8. Num, Str, Bytes, NameConstant, Ellipsis replaced with Constant + # https://github.com/python/cpython/commit/3f22811fef73aec848d961593d95fa877f77ecbf + if sys.version_info < (3, 8): - def visit_Str(self, tree): - # Python 3.5, 3.6, and 3.7 can't tell if something was written as a - # unicode constant. Try to make that consistent with 'u' for '\u- literals - if self._py_ver_consistent and repr(tree.s).startswith("'\\u"): - self.write("u") - self._write_constant(tree.s) + def visit_Num(self, node): + repr_n = repr(node.n) + self.write(repr_n.replace("inf", _INFSTR)) - def visit_JoinedStr(self, node): - # JoinedStr(expr* values) - self.write("f") + def visit_Str(self, node): + self._write_constant(node.s) - if self._avoid_backslashes: - string = StringIO() - self._fstring_JoinedStr(node, string.write) - self._write_str_avoiding_backslashes(string.getvalue()) - return + def visit_Bytes(self, node): + self.write(repr(node.s)) - # If we don't need to avoid backslashes globally (i.e., we only need - # to avoid them inside FormattedValues), it's cosmetically preferred - # to use escaped whitespace. That is, it's preferred to use backslashes - # for cases like: f"{x}\n". To accomplish this, we keep track of what - # in our buffer corresponds to FormattedValues and what corresponds to - # Constant parts of the f-string, and allow escapes accordingly. - buffer = [] - for value in node.values: - meth = getattr(self, "_fstring_" + type(value).__name__) - string = StringIO() - meth(value, string.write) - buffer.append((string.getvalue(), isinstance(value, ast.Constant))) - new_buffer = [] - quote_types = _ALL_QUOTES - for value, is_constant in buffer: - # Repeatedly narrow down the list of possible quote_types - value, quote_types = self._str_literal_helper( - value, quote_types=quote_types, escape_special_whitespace=is_constant - ) - new_buffer.append(value) - value = "".join(new_buffer) + def visit_NameConstant(self, node): + self.write(repr(node.value)) + + def visit_Ellipsis(self, node): + self.write("...") + + def _ftstring_helper(self, parts): + new_parts = [] + quote_types = list(_ALL_QUOTES) + fallback_to_repr = False + for value, is_constant in parts: + # Python 3.12 allows `f'{''}'`. + # But we unparse to `f'{""}'` for < 3.12 compat. + if True: + value, new_quote_types = self._str_literal_helper( + value, quote_types=quote_types, escape_special_whitespace=is_constant + ) + if set(new_quote_types).isdisjoint(quote_types): + fallback_to_repr = True + break + quote_types = new_quote_types + elif "\n" in value: + quote_types = [q for q in quote_types if q in _MULTI_QUOTES] + assert quote_types + new_parts.append(value) + + if fallback_to_repr: + # If we weren't able to find a quote type that works for all parts + # of the JoinedStr, fallback to using repr and triple single quotes. + quote_types = ["'''"] + new_parts.clear() + for value, is_constant in parts: + # Python 3.12 allows `f'{''}'`. + # We need to unparse to `f'{""}'` for < 3.12 compat. + if True: + value = repr('"' + value) # force repr to use single quotes + expected_prefix = "'\"" + assert value.startswith(expected_prefix), repr(value) + value = value[len(expected_prefix) : -1] + new_parts.append(value) + + value = "".join(new_parts) quote_type = quote_types[0] - self.write("{quote_type}{value}{quote_type}".format(quote_type=quote_type, value=value)) + self.write(f"{quote_type}{value}{quote_type}") + + def _write_ftstring(self, node, prefix): + self.write(prefix) + # Python 3.12 added support for backslashes inside format parts. + # We need to keep adding backslashes for python < 3.11 compat. + if self._avoid_backslashes: + with self.buffered() as buffer: + self._write_ftstring_inner(node) + return self._write_str_avoiding_backslashes("".join(buffer)) + fstring_parts = [] + for value in node.values: + with self.buffered() as buffer: + self._write_ftstring_inner(value) + fstring_parts.append(("".join(buffer), _is_str_literal(value))) + self._ftstring_helper(fstring_parts) + + def visit_JoinedStr(self, node): + self._write_ftstring(node, "f") + + def visit_TemplateStr(self, node): + self._write_ftstring(node, "t") + + def _write_ftstring_inner(self, node, is_format_spec=False): + if isinstance(node, JoinedStr): + # for both the f-string itself, and format_spec + for value in node.values: + self._write_ftstring_inner(value, is_format_spec=is_format_spec) + elif isinstance(node, FormattedValue): + self.visit_FormattedValue(node) + elif _is_interpolation(node): + self.visit_Interpolation(node) + else: # str literal + maybe_string = _get_str_literal_value(node) + if maybe_string is None: + raise ValueError(f"Unexpected node inside JoinedStr, {node!r}") + + value = maybe_string.replace("{", "{{").replace("}", "}}") + + if is_format_spec: + value = value.replace("\\", "\\\\") + value = value.replace("'", "\\'") + value = value.replace('"', '\\"') + value = value.replace("\n", "\\n") + self.write(value) + + def _unparse_interpolation_value(self, inner): + # Python <= 3.11 does not support backslashes inside format parts + unparser = type(self)(_avoid_backslashes=True) + unparser.set_precedence(_Precedence.TEST.next(), inner) + return unparser.visit(inner) + + def _write_interpolation(self, node, use_str_attr=False): + with self.delimit("{", "}"): + if use_str_attr: + expr = node.str + else: + expr = self._unparse_interpolation_value(node.value) + # Python <= 3.11 does not support backslash in formats part + if "\\" in expr: + raise ValueError( + "Unable to avoid backslash in f-string expression part (python 3.11)" + ) + if expr.startswith("{"): + # Separate pair of opening brackets as "{ {" + self.write(" ") + self.write(expr) + if node.conversion != -1: + self.write(f"!{chr(node.conversion)}") + if node.format_spec: + self.write(":") + self._write_ftstring_inner(node.format_spec, is_format_spec=True) def visit_FormattedValue(self, node): - # FormattedValue(expr value, int? conversion, expr? format_spec) - self.write("f") - string = StringIO() - self._fstring_JoinedStr(node, string.write) - self._write_str_avoiding_backslashes(string.getvalue()) + self._write_interpolation(node) - def _fstring_JoinedStr(self, node, write): - for value in node.values: - meth = getattr(self, "_fstring_" + type(value).__name__) - meth(value, write) - - def _fstring_Str(self, node, write): - value = node.s.replace("{", "{{").replace("}", "}}") - write(value) - - def _fstring_Constant(self, node, write): - assert isinstance(node.value, str) - value = node.value.replace("{", "{{").replace("}", "}}") - write(value) - - def _fstring_FormattedValue(self, node, write): - write("{") - - expr = StringIO() - unparser = type(self)(py_ver_consistent=self._py_ver_consistent, _avoid_backslashes=True) - unparser.set_precedence(pnext(_Precedence.TEST), node.value) - unparser.visit(node.value, expr) - expr = expr.getvalue().rstrip("\n") - - if expr.startswith("{"): - write(" ") # Separate pair of opening brackets as "{ {" - if "\\" in expr: - raise ValueError("Unable to avoid backslash in f-string expression part") - write(expr) - if node.conversion != -1: - conversion = chr(node.conversion) - assert conversion in "sra" - write("!{conversion}".format(conversion=conversion)) - if node.format_spec: - write(":") - meth = getattr(self, "_fstring_" + type(node.format_spec).__name__) - meth(node.format_spec, write) - write("}") + def visit_Interpolation(self, node): + # If `str` is set to `None`, use the `value` to generate the source code. + self._write_interpolation(node, use_str_attr=node.str is not None) def visit_Name(self, node): self.write(node.id) - def visit_NameConstant(self, node): - self.write(repr(node.value)) + def _write_docstring(self, node): + self.fill() + # Don't emit `u""` because it's not avail in python AST <= 3.7 + # Ubuntu 18's Python 3.6 doesn't have "kind" + if not self._py_ver_consistent and getattr(node, "kind", None) == "u": + self.write("u") + # Python 3.8 replaced Str with Constant + value = _get_str_literal_value(node) + if value is None: + raise ValueError(f"Node {node!r} is not a string literal.") + self._write_str_avoiding_backslashes(value, quote_types=_MULTI_QUOTES) def _write_constant(self, value): if isinstance(value, (float, complex)): - # Substitute overflowing decimal literal for AST infinities. - self.write(repr(value).replace("inf", INFSTR)) - elif isinstance(value, str) and self._py_ver_consistent: - # emulate a python 2 repr with raw unicode escapes - # see _Str for python 2 counterpart - raw = repr(value.encode("raw_unicode_escape")).lstrip("b") - if raw.startswith(r"'\\u"): - raw = "'\\" + raw[3:] - self.write(raw) + # Substitute overflowing decimal literal for AST infinities, + # and inf - inf for NaNs. + self.write( + repr(value).replace("inf", _INFSTR).replace("nan", f"({_INFSTR}-{_INFSTR})") + ) + # Python <= 3.11 does not support backslashes inside format parts elif self._avoid_backslashes and isinstance(value, str): self._write_str_avoiding_backslashes(value) else: @@ -617,81 +802,83 @@ def visit_Constant(self, node): if isinstance(value, tuple): with self.delimit("(", ")"): self.items_view(self._write_constant, value) - elif value is Ellipsis: # instead of `...` for Py2 compatibility + elif value is ...: self.write("...") else: - if node.kind == "u": + # Don't emit `u""` because it's not avail in python AST <= 3.7 + # Ubuntu 18's Python 3.6 doesn't have "kind" + if not self._py_ver_consistent and getattr(node, "kind", None) == "u": self.write("u") self._write_constant(node.value) - def visit_Num(self, node): - repr_n = repr(node.n) - self.write(repr_n.replace("inf", INFSTR)) - def visit_List(self, node): with self.delimit("[", "]"): - interleave(lambda: self.write(", "), self.dispatch, node.elts) + self.interleave(lambda: self.write(", "), self.traverse, node.elts) def visit_ListComp(self, node): with self.delimit("[", "]"): - self.dispatch(node.elt) + self.traverse(node.elt) for gen in node.generators: - self.dispatch(gen) + self.traverse(gen) def visit_GeneratorExp(self, node): with self.delimit("(", ")"): - self.dispatch(node.elt) + self.traverse(node.elt) for gen in node.generators: - self.dispatch(gen) + self.traverse(gen) def visit_SetComp(self, node): with self.delimit("{", "}"): - self.dispatch(node.elt) + self.traverse(node.elt) for gen in node.generators: - self.dispatch(gen) + self.traverse(gen) def visit_DictComp(self, node): with self.delimit("{", "}"): - self.dispatch(node.key) + self.traverse(node.key) self.write(": ") - self.dispatch(node.value) + self.traverse(node.value) for gen in node.generators: - self.dispatch(gen) + self.traverse(gen) def visit_comprehension(self, node): - if getattr(node, "is_async", False): + if node.is_async: self.write(" async for ") else: self.write(" for ") self.set_precedence(_Precedence.TUPLE, node.target) - self.dispatch(node.target) + self.traverse(node.target) self.write(" in ") - self.set_precedence(pnext(_Precedence.TEST), node.iter, *node.ifs) - self.dispatch(node.iter) + self.set_precedence(_Precedence.TEST.next(), node.iter, *node.ifs) + self.traverse(node.iter) for if_clause in node.ifs: self.write(" if ") - self.dispatch(if_clause) + self.traverse(if_clause) def visit_IfExp(self, node): with self.require_parens(_Precedence.TEST, node): - self.set_precedence(pnext(_Precedence.TEST), node.body, node.test) - self.dispatch(node.body) + self.set_precedence(_Precedence.TEST.next(), node.body, node.test) + self.traverse(node.body) self.write(" if ") - self.dispatch(node.test) + self.traverse(node.test) self.write(" else ") self.set_precedence(_Precedence.TEST, node.orelse) - self.dispatch(node.orelse) + self.traverse(node.orelse) def visit_Set(self, node): - assert node.elts # should be at least one element - with self.delimit("{", "}"): - interleave(lambda: self.write(", "), self.dispatch, node.elts) + if node.elts: + with self.delimit("{", "}"): + self.interleave(lambda: self.write(", "), self.traverse, node.elts) + else: + # `{}` would be interpreted as a dictionary literal, and + # `set` might be shadowed. Thus: + self.write("{*()}") def visit_Dict(self, node): def write_key_value_pair(k, v): - self.dispatch(k) + self.traverse(k) self.write(": ") - self.dispatch(v) + self.traverse(v) def write_item(item): k, v = item @@ -700,22 +887,28 @@ def write_item(item): # see PEP 448 for details self.write("**") self.set_precedence(_Precedence.EXPR, v) - self.dispatch(v) + self.traverse(v) else: write_key_value_pair(k, v) with self.delimit("{", "}"): - interleave(lambda: self.write(", "), write_item, zip(node.keys, node.values)) + self.interleave(lambda: self.write(", "), write_item, zip(node.keys, node.values)) def visit_Tuple(self, node): - with self.delimit("(", ")"): - self.items_view(self.dispatch, node.elts) + with self.delimit_if( + "(", + ")", + # Don't drop redundant parenthesis to mimic python <= 3.10 + self._py_ver_consistent + or len(node.elts) == 0 + or self.get_precedence(node) > _Precedence.TUPLE, + ): + self.items_view(self.traverse, node.elts) unop = {"Invert": "~", "Not": "not", "UAdd": "+", "USub": "-"} - unop_precedence = { - "~": _Precedence.FACTOR, "not": _Precedence.NOT, + "~": _Precedence.FACTOR, "+": _Precedence.FACTOR, "-": _Precedence.FACTOR, } @@ -727,10 +920,10 @@ def visit_UnaryOp(self, node): self.write(operator) # factor prefixes (+, -, ~) shouldn't be separated # from the value they belong, (e.g: +1 instead of + 1) - if operator_precedence != _Precedence.FACTOR: + if operator_precedence is not _Precedence.FACTOR: self.write(" ") self.set_precedence(operator_precedence, node.operand) - self.dispatch(node.operand) + self.traverse(node.operand) binop = { "Add": "+", @@ -771,17 +964,17 @@ def visit_BinOp(self, node): operator_precedence = self.binop_precedence[operator] with self.require_parens(operator_precedence, node): if operator in self.binop_rassoc: - left_precedence = pnext(operator_precedence) + left_precedence = operator_precedence.next() right_precedence = operator_precedence else: left_precedence = operator_precedence - right_precedence = pnext(operator_precedence) + right_precedence = operator_precedence.next() self.set_precedence(left_precedence, node.left) - self.dispatch(node.left) - self.write(" %s " % operator) + self.traverse(node.left) + self.write(f" {operator} ") self.set_precedence(right_precedence, node.right) - self.dispatch(node.right) + self.traverse(node.right) cmpops = { "Eq": "==", @@ -798,34 +991,32 @@ def visit_BinOp(self, node): def visit_Compare(self, node): with self.require_parens(_Precedence.CMP, node): - self.set_precedence(pnext(_Precedence.CMP), node.left, *node.comparators) - self.dispatch(node.left) + self.set_precedence(_Precedence.CMP.next(), node.left, *node.comparators) + self.traverse(node.left) for o, e in zip(node.ops, node.comparators): self.write(" " + self.cmpops[o.__class__.__name__] + " ") - self.dispatch(e) + self.traverse(e) boolops = {"And": "and", "Or": "or"} - boolop_precedence = {"and": _Precedence.AND, "or": _Precedence.OR} def visit_BoolOp(self, node): operator = self.boolops[node.op.__class__.__name__] + operator_precedence = self.boolop_precedence[operator] - # use a dict instead of nonlocal for Python 2 compatibility - op = {"precedence": self.boolop_precedence[operator]} + def increasing_level_traverse(node): + nonlocal operator_precedence + operator_precedence = operator_precedence.next() + self.set_precedence(operator_precedence, node) + self.traverse(node) - def increasing_level_dispatch(node): - op["precedence"] = pnext(op["precedence"]) - self.set_precedence(op["precedence"], node) - self.dispatch(node) - - with self.require_parens(op["precedence"], node): - s = " %s " % operator - interleave(lambda: self.write(s), increasing_level_dispatch, node.values) + with self.require_parens(operator_precedence, node): + s = f" {operator} " + self.interleave(lambda: self.write(s), increasing_level_traverse, node.values) def visit_Attribute(self, node: ast.Attribute): self.set_precedence(_Precedence.ATOM, node.value) - self.dispatch(node.value) + self.traverse(node.value) # Special case: 3.__abs__() is a syntax error, so if node.value # is an integer literal then we need to either parenthesize # it or add an extra space to get 3 .__abs__(). @@ -836,82 +1027,82 @@ def visit_Attribute(self, node: ast.Attribute): def visit_Call(self, node): self.set_precedence(_Precedence.ATOM, node.func) - - args = node.args - self.dispatch(node.func) - + self.traverse(node.func) with self.delimit("(", ")"): comma = False - - # NOTE: this code is no longer compatible with python versions 2.7:3.4 - # If you run on python@:3.4, you will see instability in package hashes - # across python versions - - for e in args: + for e in node.args: if comma: self.write(", ") else: comma = True - self.dispatch(e) - + self.traverse(e) for e in node.keywords: if comma: self.write(", ") else: comma = True - self.dispatch(e) + self.traverse(e) def visit_Subscript(self, node): + def is_non_empty_tuple(slice_value): + return isinstance(slice_value, Tuple) and slice_value.elts + self.set_precedence(_Precedence.ATOM, node.value) - self.dispatch(node.value) + self.traverse(node.value) with self.delimit("[", "]"): - if is_simple_tuple(node.slice): - self.items_view(self.dispatch, node.slice.elts) + # Python >= 3.11 supports `a[42, *b]` (same AST as a[(42, *b)]), + # but this is syntax error in 3.10. + # So, always emit parenthesis `a[(42, *b)]` + if is_non_empty_non_star_tuple(node.slice): + self.items_view(self.traverse, node.slice.elts) else: - self.dispatch(node.slice) + self.traverse(node.slice) def visit_Starred(self, node): self.write("*") self.set_precedence(_Precedence.EXPR, node.value) - self.dispatch(node.value) - - # slice - def visit_Ellipsis(self, node): - self.write("...") + self.traverse(node.value) - # used in Python <= 3.8 -- see _Subscript for 3.9+ + # Python 3.9 simplified Subscript(Index(value)) to Subscript(value) + # https://github.com/python/cpython/commit/13d52c268699f199a8e917a0f1dc4c51e5346c42 def visit_Index(self, node): - if is_simple_tuple(node.value): - self.set_precedence(_Precedence.ATOM, node.value) - self.items_view(self.dispatch, node.value.elts) + if is_non_empty_non_star_tuple(node.value): + self.items_view(self.traverse, node.value.elts) else: - self.set_precedence(_Precedence.TUPLE, node.value) - self.dispatch(node.value) + self.traverse(node.value) def visit_Slice(self, node): if node.lower: - self.dispatch(node.lower) + self.traverse(node.lower) self.write(":") if node.upper: - self.dispatch(node.upper) + self.traverse(node.upper) if node.step: self.write(":") - self.dispatch(node.step) + self.traverse(node.step) + + def visit_Match(self, node): + self.fill("match ") + self.traverse(node.subject) + with self.block(): + for case in node.cases: + self.traverse(case) + # Python 3.9 replaced ExtSlice(slices) with Tuple(slices, Load()) + # https://github.com/python/cpython/commit/13d52c268699f199a8e917a0f1dc4c51e5346c42 def visit_ExtSlice(self, node): - interleave(lambda: self.write(", "), self.dispatch, node.dims) + self.interleave(lambda: self.write(", "), self.traverse, node.dims) - # argument def visit_arg(self, node): self.write(node.arg) if node.annotation: self.write(": ") - self.dispatch(node.annotation) + self.traverse(node.annotation) - # others def visit_arguments(self, node): first = True # normal arguments + # Python 3.8 introduced position-only arguments (PEP 570) all_args = getattr(node, "posonlyargs", []) + node.args defaults = [None] * (len(all_args) - len(node.defaults)) + node.defaults for index, elements in enumerate(zip(all_args, defaults), 1): @@ -920,15 +1111,16 @@ def visit_arguments(self, node): first = False else: self.write(", ") - self.dispatch(a) + self.traverse(a) if d: self.write("=") - self.dispatch(d) + self.traverse(d) + # Python 3.8 introduced position-only arguments (PEP 570) if index == len(getattr(node, "posonlyargs", ())): self.write(", /") # varargs, or bare '*' if no varargs but keyword-only arguments present - if node.vararg or getattr(node, "kwonlyargs", False): + if node.vararg or node.kwonlyargs: if first: first = False else: @@ -938,19 +1130,16 @@ def visit_arguments(self, node): self.write(node.vararg.arg) if node.vararg.annotation: self.write(": ") - self.dispatch(node.vararg.annotation) + self.traverse(node.vararg.annotation) # keyword-only arguments - if getattr(node, "kwonlyargs", False): + if node.kwonlyargs: for a, d in zip(node.kwonlyargs, node.kw_defaults): - if first: - first = False - else: - self.write(", ") - self.dispatch(a), + self.write(", ") + self.traverse(a) if d: self.write("=") - self.dispatch(d) + self.traverse(d) # kwargs if node.kwarg: @@ -961,24 +1150,28 @@ def visit_arguments(self, node): self.write("**" + node.kwarg.arg) if node.kwarg.annotation: self.write(": ") - self.dispatch(node.kwarg.annotation) + self.traverse(node.kwarg.annotation) def visit_keyword(self, node): if node.arg is None: - # starting from Python 3.5 this denotes a kwargs part of the invocation self.write("**") else: self.write(node.arg) self.write("=") - self.dispatch(node.value) + self.traverse(node.value) def visit_Lambda(self, node): with self.require_parens(_Precedence.TEST, node): - self.write("lambda ") - self.dispatch(node.args) + self.write("lambda") + with self.buffered() as buffer: + self.traverse(node.args) + # Don't omit extra space to keep old package hash + # (extra space was removed in python 3.11) + if buffer or self._py_ver_consistent: + self.write(" ", *buffer) self.write(": ") self.set_precedence(_Precedence.TEST, node.body) - self.dispatch(node.body) + self.traverse(node.body) def visit_alias(self, node): self.write(node.name) @@ -986,77 +1179,78 @@ def visit_alias(self, node): self.write(" as " + node.asname) def visit_withitem(self, node): - self.dispatch(node.context_expr) + self.traverse(node.context_expr) if node.optional_vars: self.write(" as ") - self.dispatch(node.optional_vars) - - def visit_Match(self, node): - self.fill("match ") - self.dispatch(node.subject) - with self.block(): - for case in node.cases: - self.dispatch(case) + self.traverse(node.optional_vars) def visit_match_case(self, node): self.fill("case ") - self.dispatch(node.pattern) + self.traverse(node.pattern) if node.guard: self.write(" if ") - self.dispatch(node.guard) + self.traverse(node.guard) with self.block(): - self.dispatch(node.body) + self.traverse(node.body) def visit_MatchValue(self, node): - self.dispatch(node.value) + self.traverse(node.value) def visit_MatchSingleton(self, node): self._write_constant(node.value) def visit_MatchSequence(self, node): with self.delimit("[", "]"): - interleave(lambda: self.write(", "), self.dispatch, node.patterns) + self.interleave(lambda: self.write(", "), self.traverse, node.patterns) def visit_MatchStar(self, node): name = node.name if name is None: name = "_" - self.write("*{}".format(name)) + self.write(f"*{name}") def visit_MatchMapping(self, node): def write_key_pattern_pair(pair): k, p = pair - self.dispatch(k) + self.traverse(k) self.write(": ") - self.dispatch(p) + self.traverse(p) with self.delimit("{", "}"): keys = node.keys - interleave(lambda: self.write(", "), write_key_pattern_pair, zip(keys, node.patterns)) + self.interleave( + lambda: self.write(", "), + write_key_pattern_pair, + # (zip strict is >= Python 3.10) + zip(keys, node.patterns), + ) rest = node.rest if rest is not None: if keys: self.write(", ") - self.write("**{}".format(rest)) + self.write(f"**{rest}") def visit_MatchClass(self, node): self.set_precedence(_Precedence.ATOM, node.cls) - self.dispatch(node.cls) + self.traverse(node.cls) with self.delimit("(", ")"): patterns = node.patterns - interleave(lambda: self.write(", "), self.dispatch, patterns) + self.interleave(lambda: self.write(", "), self.traverse, patterns) attrs = node.kwd_attrs if attrs: def write_attr_pattern(pair): attr, pattern = pair - self.write("{}=".format(attr)) - self.dispatch(pattern) + self.write(f"{attr}=") + self.traverse(pattern) if patterns: self.write(", ") - interleave( - lambda: self.write(", "), write_attr_pattern, zip(attrs, node.kwd_patterns) + self.interleave( + lambda: self.write(", "), + write_attr_pattern, + # (zip strict is >= Python 3.10) + zip(attrs, node.kwd_patterns), ) def visit_MatchAs(self, node): @@ -1069,37 +1263,13 @@ def visit_MatchAs(self, node): else: with self.require_parens(_Precedence.TEST, node): self.set_precedence(_Precedence.BOR, node.pattern) - self.dispatch(node.pattern) - self.write(" as {}".format(node.name)) + self.traverse(node.pattern) + self.write(f" as {node.name}") def visit_MatchOr(self, node): with self.require_parens(_Precedence.BOR, node): - self.set_precedence(pnext(_Precedence.BOR), *node.patterns) - interleave(lambda: self.write(" | "), self.dispatch, node.patterns) - - def visit_TypeAlias(self, node): - self.fill("type ") - self.dispatch(node.name) - if node.type_params: - self.write("[") - interleave(lambda: self.write(", "), self.dispatch, node.type_params) - self.write("]") - self.write(" = ") - self.dispatch(node.value) - - def visit_TypeVar(self, node): - self.write(node.name) - if node.bound: - self.write(": ") - self.dispatch(node.bound) - - def visit_TypeVarTuple(self, node): - self.write("*") - self.write(node.name) - - def visit_ParamSpec(self, node): - self.write("**") - self.write(node.name) + self.set_precedence(_Precedence.BOR.next(), *node.patterns) + self.interleave(lambda: self.write(" | "), self.traverse, node.patterns) if sys.version_info >= (3, 8): @@ -1108,8 +1278,39 @@ def _is_int_literal(node: ast.AST) -> bool: """Check if a node represents a literal int.""" return isinstance(node, ast.Constant) and isinstance(node.value, int) + def _is_str_literal(node: ast.AST) -> bool: + """Check if a node represents a literal str.""" + return isinstance(node, ast.Constant) and isinstance(node.value, str) + + def _get_str_literal_value(node: ast.AST) -> Optional[str]: + """Get the string value of a literal str node.""" + if isinstance(node, ast.Constant) and isinstance(node.value, str): + return node.value + return None + else: def _is_int_literal(node: ast.AST) -> bool: """Check if a node represents a literal int.""" return isinstance(node, ast.Num) and isinstance(node.n, int) + + def _is_str_literal(node: ast.AST) -> bool: + """Check if a node represents a literal str.""" + return isinstance(node, ast.Str) + + def _get_str_literal_value(node: ast.AST) -> Optional[str]: + """Get the string value of a literal str node.""" + return node.s if isinstance(node, ast.Str) else None + + +if sys.version_info >= (3, 14): + + def _is_interpolation(node: ast.AST) -> bool: + """Check if a node represents a template string literal.""" + return isinstance(node, ast.Interpolation) + +else: + + def _is_interpolation(node: ast.AST) -> bool: + """Check if a node represents a template string literal.""" + return False diff --git a/lib/spack/spack/util/url.py b/lib/spack/spack/util/url.py index 9fc3b361541fec..85625e98b80ed2 100644 --- a/lib/spack/spack/util/url.py +++ b/lib/spack/spack/util/url.py @@ -6,11 +6,11 @@ Utility functions for parsing, formatting, and manipulating URLs. """ -import os import posixpath import re import urllib.parse import urllib.request +from pathlib import Path from typing import Optional from spack.util.path import sanitize_filename @@ -27,7 +27,7 @@ def validate_scheme(scheme): def local_file_path(url): """Get a local file path from a url. - If url is a file:// URL, return the absolute path to the local + If url is a ``file://`` URL, return the absolute path to the local file or directory referenced by it. Otherwise, return None. """ if isinstance(url, str): @@ -40,9 +40,7 @@ def local_file_path(url): def path_to_file_url(path): - if not os.path.isabs(path): - path = os.path.abspath(path) - return urllib.parse.urljoin("file:", urllib.request.pathname2url(path)) + return Path(path).absolute().as_uri() def file_url_string_to_path(url): @@ -83,17 +81,17 @@ def join(base: str, *components: str, resolve_href: bool = False, **kwargs) -> s parsed = urllib.parse.urlparse(base) if not parsed.path.endswith("/"): base = parsed._replace(path=f"{parsed.path}/").geturl() - uses_netloc = urllib.parse.uses_netloc - uses_relative = urllib.parse.uses_relative + old_netloc = urllib.parse.uses_netloc + old_relative = urllib.parse.uses_relative try: # NOTE: we temporarily modify urllib internals so s3 and gs schemes are treated like http. # This is non-portable, and may be forward incompatible with future cpython versions. - urllib.parse.uses_netloc = [*uses_netloc, "s3", "gs", "oci"] - urllib.parse.uses_relative = [*uses_relative, "s3", "gs", "oci"] + urllib.parse.uses_netloc = [*old_netloc, "s3", "gs", "oci", "oci+http"] # type: ignore + urllib.parse.uses_relative = [*old_relative, "s3", "gs", "oci", "oci+http"] # type: ignore return urllib.parse.urljoin(base, "/".join(components), **kwargs) finally: - urllib.parse.uses_netloc = uses_netloc - urllib.parse.uses_relative = uses_relative + urllib.parse.uses_netloc = old_netloc # type: ignore + urllib.parse.uses_relative = old_relative # type: ignore def default_download_filename(url: str) -> str: diff --git a/lib/spack/spack/util/web.py b/lib/spack/spack/util/web.py index 6246a9a0fc68fa..550cb5225690a9 100644 --- a/lib/spack/spack/util/web.py +++ b/lib/spack/spack/util/web.py @@ -2,9 +2,9 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import codecs import email.message import errno +import io import json import os import re @@ -56,11 +56,35 @@ def __reduce__(self): return DetailedHTTPError, (self.req, self.code, self.msg, self.hdrs, None) +class DetailedURLError(URLError): + def __init__(self, req: Request, reason): + super().__init__(reason) + self.req = req + + def __str__(self): + return f"{self.req.get_method()} {self.req.get_full_url()} errored with: {self.reason}" + + def __reduce__(self): + return DetailedURLError, (self.req, self.reason) + + class SpackHTTPDefaultErrorHandler(HTTPDefaultErrorHandler): def http_error_default(self, req, fp, code, msg, hdrs): raise DetailedHTTPError(req, code, msg, hdrs, fp) +class SpackHTTPSHandler(HTTPSHandler): + """A custom HTTPS handler that shows more detailed error messages on connection failure.""" + + def https_open(self, req): + try: + return super().https_open(req) + except HTTPError: + raise + except URLError as e: + raise DetailedURLError(req, e.reason) from e + + def custom_ssl_certs() -> Optional[Tuple[bool, str]]: """Returns a tuple (is_file, path) if custom SSL certifates are configured and valid.""" ssl_certs = spack.config.get("config:ssl_certs") @@ -120,12 +144,12 @@ def _urlopen(): # One opener with HTTPS ssl enabled with_ssl = build_opener( - s3, gcs, HTTPSHandler(context=ssl_create_default_context()), error_handler + s3, gcs, SpackHTTPSHandler(context=ssl_create_default_context()), error_handler ) # One opener with HTTPS ssl disabled without_ssl = build_opener( - s3, gcs, HTTPSHandler(context=ssl._create_unverified_context()), error_handler + s3, gcs, SpackHTTPSHandler(context=ssl._create_unverified_context()), error_handler ) # And dynamically dispatch based on the config:verify_ssl. @@ -283,8 +307,8 @@ def base_curl_fetch_args(url, timeout=0): It also uses the following configuration option to set an additional argument as needed: - * config:connect_timeout (int): connection timeout - * config:verify_ssl (str): Perform SSL verification + * config:connect_timeout (int): connection timeout + * config:verify_ssl (str): Perform SSL verification Arguments: url (str): URL whose contents will be fetched @@ -358,13 +382,13 @@ def fetch_url_text(url, curl: Optional[Executable] = None, dest_dir="."): """Retrieves text-only URL content using the configured fetch method. It determines the fetch method from: - * config:url_fetch_method (str): fetch method to use (e.g., 'curl') + * config:url_fetch_method (str): fetch method to use (e.g., 'curl') - If the method is `curl`, it also uses the following configuration + If the method is ``curl``, it also uses the following configuration options: - * config:connect_timeout (int): connection time out - * config:verify_ssl (str): Perform SSL verification + * config:connect_timeout (int): connection time out + * config:verify_ssl (str): Perform SSL verification Arguments: url (str): URL whose contents are to be fetched @@ -403,7 +427,7 @@ def fetch_url_text(url, curl: Optional[Executable] = None, dest_dir="."): try: _, _, response = read_from_url(url) - output = codecs.getreader("utf-8")(response).read() + output = io.TextIOWrapper(response, encoding="utf-8").read() if output: with working_dir(dest_dir, create=True): with open(filename, "w", encoding="utf-8") as f: @@ -420,9 +444,9 @@ def fetch_url_text(url, curl: Optional[Executable] = None, dest_dir="."): def url_exists(url, curl=None): """Determines whether url exists. - A scheme-specific process is used for Google Storage (`gs`) and Amazon - Simple Storage Service (`s3`) URLs; otherwise, the configured fetch - method defined by `config:url_fetch_method` is used. + A scheme-specific process is used for Google Storage (``gs``) and Amazon + Simple Storage Service (``s3``) URLs; otherwise, the configured fetch + method defined by ``config:url_fetch_method`` is used. Arguments: url (str): URL whose existence is being checked @@ -604,6 +628,46 @@ def list_url(url, recursive=False): return gcs.get_all_blobs(recursive=recursive) +def stat_url(url: str) -> Optional[Tuple[int, float]]: + """Get stat result for a URL. + + Args: + url: URL to get stat result for + Returns: + A tuple of (size, mtime) if the URL exists, None otherwise. + """ + parsed_url = urllib.parse.urlparse(url) + + if parsed_url.scheme == "file": + local_file_path = url_util.local_file_path(parsed_url) + assert isinstance(local_file_path, str) + try: + url_stat = Path(local_file_path).stat() + except FileNotFoundError: + return None + return url_stat.st_size, url_stat.st_mtime + + elif parsed_url.scheme == "s3": + s3_bucket = parsed_url.netloc + s3_key = parsed_url.path.lstrip("/") + + s3 = get_s3_session(url, method="fetch") + + try: + head_request = s3.head_object(Bucket=s3_bucket, Key=s3_key) + except s3.ClientError as e: + if e.response["Error"]["Code"] == "404": + return None + raise e + + mtime = head_request["LastModified"].timestamp() + size = head_request["ContentLength"] + return size, mtime + + else: + raise NotImplementedError(f"Unrecognized URL scheme: {parsed_url.scheme}") + + def spider( root_urls: Union[str, Iterable[str]], depth: int = 0, concurrency: Optional[int] = None ): @@ -681,7 +745,7 @@ def _spider(url: urllib.parse.ParseResult, collect_nested: bool, _visited: Set[s if not response_url or not response: return pages, links, subcalls, _visited - page = codecs.getreader("utf-8")(response).read() + page = io.TextIOWrapper(response, encoding="utf-8").read() pages[response_url] = page # Parse out the include-fragments in the page @@ -708,7 +772,7 @@ def _spider(url: urllib.parse.ParseResult, collect_nested: bool, _visited: Set[s if not fragment_response_url or not fragment_response: continue - fragment = codecs.getreader("utf-8")(fragment_response).read() + fragment = io.TextIOWrapper(fragment_response, encoding="utf-8").read() fragments.add(fragment) pages[fragment_response_url] = fragment diff --git a/lib/spack/spack/util/windows_registry.py b/lib/spack/spack/util/windows_registry.py index f03b0fecb2dd2c..12d2b3b2359ad7 100644 --- a/lib/spack/spack/util/windows_registry.py +++ b/lib/spack/spack/util/windows_registry.py @@ -293,7 +293,7 @@ def get_matching_subkeys(self, subkey_name): """Returns all subkeys regex matching subkey name Note: this method obtains only direct subkeys of the given key and does not - desced to transtitve subkeys. For this behavior, see `find_matching_subkeys`""" + descend to transitive subkeys. For this behavior, see ``find_matching_subkeys``""" self._regex_match_subkeys(subkey_name) def get_values(self): @@ -312,7 +312,7 @@ def _traverse_subkeys(self, stop_condition, collect_all_matching=False, recursiv all keys meeting stop condition. If false, once stop condition is met, the key that triggered the condition ' is returned. - recusrive: boolean value, if True perform a recursive search of subkeys + recursive: boolean value, if True perform a recursive search of subkeys Return: the key if stop_condition is triggered, or None if not """ @@ -331,13 +331,13 @@ def _traverse_subkeys(self, stop_condition, collect_all_matching=False, recursiv queue.extend(key.subkeys) return collection if collection else None - def find_subkey(self, subkey_name, recursive=True): + def find_subkey(self, subkey_name: str, recursive: bool = True): """Perform a BFS of subkeys until desired key is found Returns None or RegistryKey object corresponding to requested key name Args: - subkey_name (str): subkey to be searched for - recursive (bool): perform a recursive search + subkey_name: subkey to be searched for + recursive: perform a recursive search Return: the desired subkey as a RegistryKey object, or none """ @@ -345,13 +345,13 @@ def find_subkey(self, subkey_name, recursive=True): WindowsRegistryView.KeyMatchConditions.name_matcher(subkey_name), recursive=recursive ) - def find_matching_subkey(self, subkey_name, recursive=True): + def find_matching_subkey(self, subkey_name: str, recursive: bool = True): """Perform a BFS of subkeys until a key matching subkey name regex is found Returns None or the first RegistryKey object corresponding to requested key name Args: - subkey_name (str): subkey to be searched for - recursive (bool): perform a recursive search + subkey_name: subkey to be searched for + recursive: perform a recursive search Return: the desired subkey as a RegistryKey object, or none """ @@ -359,12 +359,12 @@ def find_matching_subkey(self, subkey_name, recursive=True): WindowsRegistryView.KeyMatchConditions.regex_matcher(subkey_name), recursive=recursive ) - def find_subkeys(self, subkey_name, recursive=True): + def find_subkeys(self, subkey_name: str, recursive: bool = True): """Exactly the same as find_subkey, except this function tries to match a regex to multiple keys Args: - subkey_name (str) + subkey_name: subkey to be searched for Return: the desired subkeys as a list of RegistryKey object, or none """ @@ -373,14 +373,14 @@ def find_subkeys(self, subkey_name, recursive=True): WindowsRegistryView.KeyMatchConditions.regex_matcher(subkey_name), **kwargs ) - def find_value(self, val_name, recursive=True): + def find_value(self, val_name: str, recursive: bool = True): """ If non recursive, return RegistryValue object corresponding to name Args: - val_name (str): name of value desired from registry - recursive (bool): optional argument, if True, the registry is searched recursively - for the value of name val_name, else only the current key is searched + val_name: name of value desired from registry + recursive: optional argument, if True, the registry is searched recursively + for the value of name val_name, else only the current key is searched Return: The desired registry value as a RegistryValue object if it exists, otherwise, None """ diff --git a/lib/spack/spack/variant.py b/lib/spack/spack/variant.py index 7271ca05d55d7c..30a0581e3a5d15 100644 --- a/lib/spack/spack/variant.py +++ b/lib/spack/spack/variant.py @@ -10,14 +10,29 @@ import functools import inspect import itertools -from typing import Any, Callable, Collection, Iterable, List, Optional, Tuple, Type, Union +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Collection, + Iterable, + List, + Optional, + Set, + Tuple, + Type, + Union, +) import spack.error import spack.llnl.util.lang as lang import spack.llnl.util.tty.color -import spack.spec import spack.spec_parser +if TYPE_CHECKING: + import spack.package_base + import spack.spec + #: These are variant names used by Spack internally; packages can't use them RESERVED_NAMES = { "arch", @@ -42,6 +57,7 @@ class VariantType(enum.IntEnum): BOOL = 1 SINGLE = 2 MULTI = 3 + INDICATOR = 4 # special type for placeholder variant values @property def string(self) -> str: @@ -50,7 +66,10 @@ def string(self) -> str: return "bool" elif self == VariantType.SINGLE: return "single" - return "multi" + elif self == VariantType.MULTI: + return "multi" + else: + return "indicator" class Variant: @@ -134,6 +153,9 @@ def isa_type(v): self.sticky = sticky self.precedence = precedence + def values_defined_by_validator(self) -> bool: + return self.values is None + def validate_or_raise(self, vspec: "VariantValue", pkg_name: str): """Validate a variant spec against this package variant. Raises an exception if any error is found. @@ -482,6 +504,13 @@ def BoolValuedVariant(name: str, value: bool, propagate: bool = False) -> Varian return VariantValue(VariantType.BOOL, name, (value,), propagate=propagate) +class VariantValueRemoval(VariantValue): + """Indicator class for Spec.mutate to remove a variant""" + + def __init__(self, name): + super().__init__(VariantType.INDICATOR, name, (None,)) + + # The class below inherit from Sequence to disguise as a tuple and comply # with the semantic expected by the 'values' argument of the variant directive class DisjointSetsOfValues(collections.abc.Sequence): @@ -494,23 +523,27 @@ class DisjointSetsOfValues(collections.abc.Sequence): *sets (list): mutually exclusive sets of values """ - _empty_set = {"none"} + _empty_set = ("none",) def __init__(self, *sets: Tuple[str, ...]) -> None: - self.sets = [set(_flatten(x)) for x in sets] + self.sets = [tuple(_flatten(x)) for x in sets] - # 'none' is a special value and can appear only in a set of - # a single element - if any("none" in s and s != {"none"} for s in self.sets): + # 'none' is a special value and can appear only in a set of a single element + if any("none" in s and s != self._empty_set for s in self.sets): raise spack.error.SpecError( - "The value 'none' represents the empty set," - " and must appear alone in a set. Use the " - "method 'allow_empty_set' to add it." + "The value 'none' represents the empty set, and must appear alone in a set. " + "Use the method 'allow_empty_set' to add it." ) # Sets should not intersect with each other - if any(s1 & s2 for s1, s2 in itertools.combinations(self.sets, 2)): - raise spack.error.SpecError("sets in input must be disjoint") + cumulated: Set[str] = set() + for current_set in self.sets: + if not cumulated.isdisjoint(current_set): + duplicates = ", ".join(sorted(cumulated.intersection(current_set))) + raise spack.error.SpecError( + f"sets in input must be disjoint, but {duplicates} appeared more than once" + ) + cumulated.update(current_set) #: Attribute used to track values which correspond to #: features which can be enabled or disabled as understood by the @@ -568,7 +601,7 @@ def __getitem__(self, idx): return tuple(itertools.chain.from_iterable(self.sets))[idx] def __len__(self): - return len(itertools.chain.from_iterable(self.sets)) + return sum(len(x) for x in self.sets) @property def validator(self): @@ -585,8 +618,8 @@ def _disjoint_set_validator(pkg_name, variant_name, values): return _disjoint_set_validator -def _a_single_value_or_a_combination(single_value, *values): - error = "the value '" + single_value + "' is mutually exclusive with any of the other values" +def _a_single_value_or_a_combination(single_value: str, *values: str) -> DisjointSetsOfValues: + error = f"the value '{single_value}' is mutually exclusive with any of the other values" return ( DisjointSetsOfValues((single_value,), values) .with_default(single_value) @@ -600,13 +633,15 @@ def _a_single_value_or_a_combination(single_value, *values): # TODO: a common namespace (like 'multi') in the future. -def any_combination_of(*values): +def any_combination_of(*values: str) -> DisjointSetsOfValues: """Multi-valued variant that allows either any combination of the specified values, or none - at all (using ``variant=none``). The literal value ``none`` is used as sentinel for the empty - set, since in the spec DSL we have to always specify a value for a variant. + at all (using ``variant=none``). The literal value ``none`` is used as sentinel for the empty + set, since in the spec DSL we have to always specify a value for a variant. It is up to the package implementation to handle the value ``none`` specially, if at all. + See also :func:`auto_or_any_combination_of` and :func:`disjoint_sets`. + Args: *values: allowed variant values @@ -620,9 +655,11 @@ def any_combination_of(*values): return _a_single_value_or_a_combination("none", *values) -def auto_or_any_combination_of(*values): - """Multi-valued variant that allows any combination of a set of values - (but not the empty set) or `"auto"`. +def auto_or_any_combination_of(*values: str) -> DisjointSetsOfValues: + """Multi-valued variant that allows any combination of a set of values (but not the empty set) + or ``auto``. + + See also :func:`any_combination_of` and :func:`disjoint_sets`. Args: *values: allowed variant values @@ -640,18 +677,16 @@ def auto_or_any_combination_of(*values): return _a_single_value_or_a_combination("auto", *values) -#: Multi-valued variant that allows any combination picking -#: from one of multiple disjoint sets -def disjoint_sets(*sets): - """Multi-valued variant that allows any combination picking from one - of multiple disjoint sets of values, and also allows the user to specify - 'none' (as a string) to choose none of them. +def disjoint_sets(*sets: Tuple[str, ...]) -> DisjointSetsOfValues: + """Multi-valued variant that allows any combination picking from one of multiple disjoint sets + of values, and also allows the user to specify ``none`` to choose none of them. + + It is up to the package implementation to handle the value ``none`` specially, if at all. - It is up to the package implementation to handle the value 'none' - specially, if at all. + See also :func:`any_combination_of` and :func:`auto_or_any_combination_of`. Args: - *sets: + *sets: sets of allowed values, each set is a tuple of strings Returns: a properly initialized instance of :class:`~spack.variant.DisjointSetsOfValues` diff --git a/lib/spack/spack/vendor/altgraph/__init__.py b/lib/spack/spack/vendor/altgraph/__init__.py index 45ce7bfe5f88d9..0fb21d778846e6 100644 --- a/lib/spack/spack/vendor/altgraph/__init__.py +++ b/lib/spack/spack/vendor/altgraph/__init__.py @@ -139,9 +139,8 @@ @contributor: U{Reka Albert } """ -import pkg_resources -__version__ = pkg_resources.require("spack.vendor.altgraph")[0].version +__version__ = "0.17.3" class GraphError(ValueError): diff --git a/lib/spack/spack/vendor/archspec/cpu/alias.py b/lib/spack/spack/vendor/archspec/cpu/alias.py index 783a67d3ea9563..2424c930d12d68 100644 --- a/lib/spack/spack/vendor/archspec/cpu/alias.py +++ b/lib/spack/spack/vendor/archspec/cpu/alias.py @@ -3,9 +3,11 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) """Aliases for microarchitecture features.""" +from typing import Callable, Dict + from .schema import TARGETS_JSON, LazyDictionary -_FEATURE_ALIAS_PREDICATE = {} +_FEATURE_ALIAS_PREDICATE: Dict[str, Callable] = {} class FeatureAliasTest: diff --git a/lib/spack/spack/vendor/archspec/cpu/detect.py b/lib/spack/spack/vendor/archspec/cpu/detect.py index f9f095f78a6c05..a6fa6a3549790a 100644 --- a/lib/spack/spack/vendor/archspec/cpu/detect.py +++ b/lib/spack/spack/vendor/archspec/cpu/detect.py @@ -10,7 +10,7 @@ import struct import subprocess import warnings -from typing import Dict, List, Optional, Set, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union from ..vendor.cpuid.cpuid import CPUID from .microarchitecture import TARGETS, Microarchitecture, generic_microarchitecture @@ -22,7 +22,7 @@ #: Mapping from micro-architecture families (x86_64, ppc64le, etc.) to #: functions checking the compatibility of the host with a given target -COMPATIBILITY_CHECKS = {} +COMPATIBILITY_CHECKS: Dict[str, Callable[[Microarchitecture, Microarchitecture], bool]] = {} # Constants for commonly used architectures X86_64 = "x86_64" @@ -68,7 +68,7 @@ def partial_uarch( @detection(operating_system="Linux") def proc_cpuinfo() -> Microarchitecture: """Returns a partial Microarchitecture, obtained from scanning ``/proc/cpuinfo``""" - data = {} + data: Dict[str, Any] = {} with open("/proc/cpuinfo") as file: # pylint: disable=unspecified-encoding for line in file: key, separator, value = line.partition(":") @@ -100,12 +100,15 @@ def proc_cpuinfo() -> Microarchitecture: if architecture in (PPC64LE, PPC64): generation_match = re.search(r"POWER(\d+)", data.get("cpu", "")) + # There might be no match under emulated environments. For instance + # emulating a ppc64le with QEMU and Docker still reports the host + # /proc/cpuinfo and not a Power + if generation_match is None: + return partial_uarch(generation=0) + try: generation = int(generation_match.group(1)) - except AttributeError: - # There might be no match under emulated environments. For instance - # emulating a ppc64le with QEMU and Docker still reports the host - # /proc/cpuinfo and not a Power + except ValueError: generation = 0 return partial_uarch(generation=generation) @@ -210,7 +213,7 @@ def _check_output(args, env): } -def _machine(): +def _machine() -> str: """Return the machine architecture we are on""" operating_system = platform.system() @@ -246,25 +249,31 @@ def sysctl_info() -> Microarchitecture: child_environment = _ensure_bin_usrbin_in_path() def sysctl(*args: str) -> str: - return _check_output(["sysctl"] + list(args), env=child_environment).strip() + return _check_output(["sysctl", *args], env=child_environment).strip() if _machine() == X86_64: - features = ( - f'{sysctl("-n", "machdep.cpu.features").lower()} ' - f'{sysctl("-n", "machdep.cpu.leaf7_features").lower()}' + raw_features = sysctl( + "-n", + "machdep.cpu.features", + "machdep.cpu.leaf7_features", + "machdep.cpu.extfeatures", ) - features = set(features.split()) + features = set(raw_features.lower().split()) # Flags detected on Darwin turned to their linux counterpart - for darwin_flag, linux_flag in TARGETS_JSON["conversions"]["darwin_flags"].items(): - if darwin_flag in features: - features.update(linux_flag.split()) + for darwin_flags, linux_flags in TARGETS_JSON["conversions"]["darwin_flags"].items(): + if all(x in features for x in darwin_flags.split()): + features.update(linux_flags.split()) return partial_uarch(vendor=sysctl("-n", "machdep.cpu.vendor"), features=features) model = "unknown" model_str = sysctl("-n", "machdep.cpu.brand_string").lower() - if "m2" in model_str: + if "m4" in model_str: + model = "m4" + elif "m3" in model_str: + model = "m3" + elif "m2" in model_str: model = "m2" elif "m1" in model_str: model = "m1" @@ -335,7 +344,7 @@ def compatible_microarchitectures(info: Microarchitecture) -> List[Microarchitec ] -def host(): +def host() -> Microarchitecture: """Detects the host micro-architecture and returns it.""" # Retrieve information on the host's cpu info = detected_info() @@ -374,7 +383,7 @@ def compatibility_check(architecture_family: Union[str, Tuple[str, ...]]): A compatibility check function takes a partial Microarchitecture object as a first argument, and an arbitrary target Microarchitecture as the second argument. It returns True if the - target is compatible with first argument, False otherwise. + target is compatible with the first argument, False otherwise. Args: architecture_family: architecture family for which this test can be used @@ -393,8 +402,8 @@ def decorator(func): @compatibility_check(architecture_family=(PPC64LE, PPC64)) def compatibility_check_for_power(info, target): """Compatibility check for PPC64 and PPC64LE architectures.""" - # We can use a target if it descends from our machine type and our - # generation (9 for POWER9, etc) is at least its generation. + # We can use a target if it descends from our machine type, and our + # generation (9 for POWER9, etc.) is at least its generation. arch_root = TARGETS[_machine()] return ( target == arch_root or arch_root in target.ancestors diff --git a/lib/spack/spack/vendor/archspec/cpu/microarchitecture.py b/lib/spack/spack/vendor/archspec/cpu/microarchitecture.py index 7f6f2eeb4c6051..86dddd648aba00 100644 --- a/lib/spack/spack/vendor/archspec/cpu/microarchitecture.py +++ b/lib/spack/spack/vendor/archspec/cpu/microarchitecture.py @@ -8,7 +8,7 @@ import re import sys import warnings -from typing import IO, List, Set, Tuple +from typing import IO, Any, Dict, List, Optional, Set, Tuple, Union from . import schema from .alias import FEATURE_ALIASES @@ -34,43 +34,48 @@ def _impl(self, other): class Microarchitecture: - """Represents a specific CPU micro-architecture. - - Args: - name (str): name of the micro-architecture (e.g. skylake). - parents (list): list of parents micro-architectures, if any. - Parenthood is considered by cpu features and not - chronologically. As such each micro-architecture is - compatible with its ancestors. For example "skylake", - which has "broadwell" as a parent, supports running binaries - optimized for "broadwell". - vendor (str): vendor of the micro-architecture - features (set of str): supported CPU flags. Note that the semantic - of the flags in this field might vary among architectures, if - at all present. For instance x86_64 processors will list all - the flags supported by a given CPU while Arm processors will - list instead only the flags that have been added on top of the - base model for the current micro-architecture. - compilers (dict): compiler support to generate tuned code for this - micro-architecture. This dictionary has as keys names of - supported compilers, while values are list of dictionaries - with fields: - - * name: name of the micro-architecture according to the - compiler. This is the name passed to the ``-march`` option - or similar. Not needed if the name is the same as that - passed in as argument above. - * versions: versions that support this micro-architecture. - - generation (int): generation of the micro-architecture, if relevant. - cpu_part (str): cpu part of the architecture, if relevant. - """ + """A specific CPU micro-architecture""" # pylint: disable=too-many-arguments,too-many-positional-arguments,too-many-instance-attributes #: Aliases for micro-architecture's features feature_aliases = FEATURE_ALIASES - def __init__(self, name, parents, vendor, features, compilers, generation=0, cpu_part=""): + def __init__( + self, + name: str, + parents: List["Microarchitecture"], + vendor: str, + features: Set[str], + compilers: Dict[str, List[Dict[str, str]]], + generation: int = 0, + cpu_part: str = "", + ): + """ + Args: + name: name of the micro-architecture (e.g. ``icelake``) + parents: list of parent micro-architectures, if any. Parenthood is considered by + cpu features and not chronologically. As such, each micro-architecture is + compatible with its ancestors. For example, ``skylake``, which has ``broadwell`` + as a parent, supports running binaries optimized for ``broadwell``. + vendor: vendor of the micro-architecture + features: supported CPU flags. Note that the semantic of the flags in this field might + vary among architectures, if at all present. For instance, x86_64 processors will + list all the flags supported by a given CPU, while Arm processors will list instead + only the flags that have been added on top of the base model for the current + micro-architecture. + compilers: compiler support to generate tuned code for this micro-architecture. This + dictionary has as keys names of supported compilers, while values are a list of + dictionaries with fields: + + * name: name of the micro-architecture according to the compiler. This is the name + passed to the ``-march`` option or similar. Not needed if it is the same as + ``self.name``. + * versions: versions that support this micro-architecture. + * flags: flags to be passed to the compiler to generate optimized code + + generation: generation of the micro-architecture, if relevant. + cpu_part: cpu part of the architecture, if relevant. + """ self.name = name self.parents = parents self.vendor = vendor @@ -82,14 +87,18 @@ def __init__(self, name, parents, vendor, features, compilers, generation=0, cpu self.cpu_part = cpu_part # Cache the "ancestor" computation - self._ancestors = None + self._ancestors: Optional[List["Microarchitecture"]] = None # Cache the "generic" computation - self._generic = None + self._generic: Optional["Microarchitecture"] = None # Cache the "family" computation - self._family = None + self._family: Optional["Microarchitecture"] = None + + # ssse3 implies sse3; on Linux sse3 is not mentioned in /proc/cpuinfo, so add it ad-hoc. + if "ssse3" in self.features: + self.features.add("sse3") @property - def ancestors(self): + def ancestors(self) -> List["Microarchitecture"]: """All the ancestors of this microarchitecture.""" if self._ancestors is None: value = self.parents[:] @@ -98,14 +107,14 @@ def ancestors(self): self._ancestors = value return self._ancestors - def _to_set(self): + def _to_set(self) -> Set[str]: """Returns a set of the nodes in this microarchitecture DAG.""" # This function is used to implement subset semantics with # comparison operators return set([str(self)] + [str(x) for x in self.ancestors]) @coerce_target_names - def __eq__(self, other): + def __eq__(self, other: Union[str, "Microarchitecture"]) -> bool: if not isinstance(other, Microarchitecture): return NotImplemented @@ -119,43 +128,43 @@ def __eq__(self, other): and self.cpu_part == other.cpu_part ) - def __hash__(self): + def __hash__(self) -> int: return hash(self.name) @coerce_target_names - def __ne__(self, other): + def __ne__(self, other: Union[str, "Microarchitecture"]) -> bool: return not self == other @coerce_target_names - def __lt__(self, other): + def __lt__(self, other: Union[str, "Microarchitecture"]) -> bool: if not isinstance(other, Microarchitecture): return NotImplemented return self._to_set() < other._to_set() @coerce_target_names - def __le__(self, other): + def __le__(self, other: Union[str, "Microarchitecture"]) -> bool: return (self == other) or (self < other) @coerce_target_names - def __gt__(self, other): + def __gt__(self, other: Union[str, "Microarchitecture"]) -> bool: if not isinstance(other, Microarchitecture): return NotImplemented return self._to_set() > other._to_set() @coerce_target_names - def __ge__(self, other): + def __ge__(self, other: Union[str, "Microarchitecture"]) -> bool: return (self == other) or (self > other) - def __repr__(self): + def __repr__(self) -> str: return f"{self.__class__.__name__}({self.name!r})" - def __str__(self): + def __str__(self) -> str: return self.name def tree(self, fp: IO[str] = sys.stdout, indent: int = 4) -> None: - """Format the partial order of ancestors of this microarchitecture as a tree.""" + """Format the partial order of this microarchitecture's ancestors as a tree.""" seen: Set[str] = set() stack: List[Tuple[int, Microarchitecture]] = [(0, self)] while stack: @@ -168,7 +177,7 @@ def tree(self, fp: IO[str] = sys.stdout, indent: int = 4) -> None: for parent in reversed(current.parents): stack.append((level + indent, parent)) - def __contains__(self, feature): + def __contains__(self, feature: str) -> bool: # Feature must be of a string type, so be defensive about that if not isinstance(feature, str): msg = "only objects of string types are accepted [got {0}]" @@ -184,7 +193,7 @@ def __contains__(self, feature): return match_alias(self) @property - def family(self): + def family(self) -> "Microarchitecture": """Returns the architecture family a given target belongs to""" if self._family is None: roots = [x for x in [self] + self.ancestors if not x.ancestors] @@ -196,14 +205,14 @@ def family(self): return self._family @property - def generic(self): + def generic(self) -> "Microarchitecture": """Returns the best generic architecture that is compatible with self""" if self._generic is None: generics = [x for x in [self] + self.ancestors if x.vendor == "generic"] self._generic = max(generics, key=lambda x: len(x.ancestors)) return self._generic - def to_dict(self): + def to_dict(self) -> Dict[str, Any]: """Returns a dictionary representation of this object.""" return { "name": str(self.name), @@ -228,20 +237,19 @@ def from_dict(data) -> "Microarchitecture": cpu_part=data.get("cpupart", ""), ) - def optimization_flags(self, compiler, version): - """Returns a string containing the optimization flags that needs - to be used to produce code optimized for this micro-architecture. + def optimization_flags(self, compiler: str, version: str) -> str: + """Returns a string containing the optimization flags that needs to be used to produce + code optimized for this micro-architecture. - The version is expected to be a string of dot separated digits. + The version is expected to be a string of dot-separated digits. - If there is no information on the compiler passed as argument the - function returns an empty string. If it is known that the compiler - version we want to use does not support this architecture the function - raises an exception. + If there is no information on the compiler passed as an argument, the function returns an + empty string. If it is known that the compiler version we want to use does not support + this architecture, the function raises an exception. Args: - compiler (str): name of the compiler to be used - version (str): version of the compiler to be used + compiler: name of the compiler to be used + version: version of the compiler to be used Raises: UnsupportedMicroarchitecture: if the requested compiler does not support @@ -252,7 +260,7 @@ def optimization_flags(self, compiler, version): if compiler not in self.family.compilers: return "" - # If we have information but it stops before this + # If we have information, but it stops before this # microarchitecture, fall back to the best known target if compiler not in self.compilers: best_target = [x for x in self.ancestors if compiler in x.compilers][0] @@ -325,16 +333,16 @@ def tuplify(ver): raise UnsupportedMicroarchitecture(msg) -def generic_microarchitecture(name): +def generic_microarchitecture(name: str) -> Microarchitecture: """Returns a generic micro-architecture with no vendor and no features. Args: - name (str): name of the micro-architecture + name: name of the micro-architecture """ return Microarchitecture(name, parents=[], vendor="generic", features=set(), compilers={}) -def version_components(version): +def version_components(version: str) -> Tuple[str, str]: """Decomposes the version passed as input in version number and suffix and returns them. @@ -342,7 +350,7 @@ def version_components(version): string is returned. Args: - version (str): version to be decomposed into its components + version: version to be decomposed into its components """ match = re.match(r"([\d.]*)(-?)(.*)", str(version)) if not match: @@ -356,7 +364,7 @@ def version_components(version): def _known_microarchitectures(): """Returns a dictionary of the known micro-architectures. If the - current host platform is unknown adds it too as a generic target. + current host platform is unknown, add it too as a generic target. """ def fill_target_from_dict(name, data, targets): diff --git a/lib/spack/spack/vendor/archspec/cpu/schema.py b/lib/spack/spack/vendor/archspec/cpu/schema.py index 0ffc2231650b69..a0f20f834612e2 100644 --- a/lib/spack/spack/vendor/archspec/cpu/schema.py +++ b/lib/spack/spack/vendor/archspec/cpu/schema.py @@ -9,7 +9,7 @@ import json import os import pathlib -from typing import Tuple +from typing import Optional, Tuple class LazyDictionary(collections.abc.MutableMapping): @@ -55,7 +55,9 @@ def __len__(self): EXTENSION_DIR_FROM_ENVIRONMENT = "ARCHSPEC_EXTENSION_CPU_DIR" -def _json_file(filename: str, allow_custom: bool = False) -> Tuple[pathlib.Path, pathlib.Path]: +def _json_file( + filename: str, allow_custom: bool = False +) -> Tuple[pathlib.Path, Optional[pathlib.Path]]: """Given a filename, returns the absolute path for the main JSON file, and an optional absolute path for an extension JSON file. @@ -98,8 +100,7 @@ def _load(json_file: pathlib.Path, extension_file: pathlib.Path): return data -#: In memory representation of the data in microarchitectures.json, -#: loaded on first access +#: In memory representation of the data in microarchitectures.json, loaded on first access TARGETS_JSON = LazyDictionary(_load, *_json_file("microarchitectures.json", allow_custom=True)) #: JSON schema for microarchitectures.json, loaded on first access diff --git a/lib/spack/spack/vendor/archspec/json/cpu/microarchitectures.json b/lib/spack/spack/vendor/archspec/json/cpu/microarchitectures.json index 84bced766b3a85..6d69dd1e3207a6 100644 --- a/lib/spack/spack/vendor/archspec/json/cpu/microarchitectures.json +++ b/lib/spack/spack/vendor/archspec/json/cpu/microarchitectures.json @@ -508,7 +508,9 @@ "ssse3", "sse4_1", "sse4_2", - "popcnt" + "popcnt", + "lahf_lm", + "cx16" ], "compilers": { "gcc": [ @@ -577,6 +579,8 @@ "sse4_1", "sse4_2", "popcnt", + "lahf_lm", + "cx16", "aes", "pclmulqdq" ], @@ -642,6 +646,8 @@ "sse4_1", "sse4_2", "popcnt", + "lahf_lm", + "cx16", "aes", "pclmulqdq", "avx" @@ -720,6 +726,8 @@ "sse4_1", "sse4_2", "popcnt", + "lahf_lm", + "cx16", "aes", "pclmulqdq", "avx", @@ -801,6 +809,10 @@ "sse4_1", "sse4_2", "popcnt", + "abm", + "lahf_lm", + "xsave", + "cx16", "aes", "pclmulqdq", "avx", @@ -885,7 +897,11 @@ "ssse3", "sse4_1", "sse4_2", + "abm", "popcnt", + "xsave", + "lahf_lm", + "cx16", "aes", "pclmulqdq", "avx", @@ -964,6 +980,10 @@ "sse4_1", "sse4_2", "popcnt", + "abm", + "lahf_lm", + "xsave", + "cx16", "aes", "pclmulqdq", "avx", @@ -1045,7 +1065,11 @@ "sse4_1", "sse4_2", "popcnt", + "abm", + "lahf_lm", + "cx16", "aes", + "xsave", "pclmulqdq", "avx", "rdrand", @@ -1128,6 +1152,9 @@ "sse4_1", "sse4_2", "popcnt", + "abm", + "lahf_lm", + "cx16", "aes", "pclmulqdq", "avx", @@ -1141,6 +1168,7 @@ "rdseed", "adx", "clflushopt", + "xsave", "xsavec", "xsaveopt", "avx512f", @@ -1221,6 +1249,9 @@ "sse4_1", "sse4_2", "popcnt", + "abm", + "lahf_lm", + "cx16", "aes", "pclmulqdq", "avx", @@ -1234,6 +1265,7 @@ "rdseed", "adx", "clflushopt", + "xsave", "xsavec", "xsaveopt", "avx512f", @@ -1243,7 +1275,7 @@ "avx512cd", "avx512vbmi", "avx512ifma", - "sha" + "sha_ni" ], "compilers": { "gcc": [ @@ -1310,6 +1342,9 @@ "sse4_1", "sse4_2", "popcnt", + "abm", + "lahf_lm", + "cx16", "aes", "pclmulqdq", "avx", @@ -1323,6 +1358,7 @@ "rdseed", "adx", "clflushopt", + "xsave", "xsavec", "xsaveopt", "avx512f", @@ -1399,7 +1435,11 @@ "sse4_1", "sse4_2", "popcnt", + "abm", + "lahf_lm", + "cx16", "aes", + "sha_ni", "pclmulqdq", "avx", "rdrand", @@ -1412,6 +1452,7 @@ "rdseed", "adx", "clflushopt", + "xsave", "xsavec", "xsaveopt", "avx512f", @@ -1512,6 +1553,10 @@ "sse4_1", "sse4_2", "popcnt", + "abm", + "lahf_lm", + "cx16", + "sha_ni", "aes", "pclmulqdq", "avx", @@ -1525,6 +1570,7 @@ "rdseed", "adx", "clflushopt", + "xsave", "xsavec", "xsaveopt", "avx512f", @@ -1658,6 +1704,10 @@ "sse", "sse2", "sse4a", + "popcnt", + "lahf_lm", + "cx16", + "xsave", "abm", "avx", "xop", @@ -1730,6 +1780,10 @@ "sse", "sse2", "sse4a", + "popcnt", + "lahf_lm", + "cx16", + "xsave", "abm", "avx", "xop", @@ -1806,6 +1860,10 @@ "sse", "sse2", "sse4a", + "popcnt", + "lahf_lm", + "cx16", + "xsave", "abm", "avx", "xop", @@ -1885,6 +1943,10 @@ "sse", "sse2", "sse4a", + "popcnt", + "lahf_lm", + "cx16", + "xsave", "abm", "avx", "xop", @@ -1986,10 +2048,13 @@ "sse4_1", "sse4_2", "abm", + "xsave", "xsavec", "xsaveopt", "clflushopt", - "popcnt" + "popcnt", + "lahf_lm", + "cx16" ], "compilers": { "gcc": [ @@ -2072,11 +2137,13 @@ "sse4_1", "sse4_2", "abm", + "xsave", "xsavec", "xsaveopt", "clflushopt", "popcnt", - "clwb" + "clwb", + "lahf_lm" ], "compilers": { "gcc": [ @@ -2159,10 +2226,12 @@ "sse4_1", "sse4_2", "abm", + "xsave", "xsavec", "xsaveopt", "clflushopt", "popcnt", + "lahf_lm", "clwb", "vaes", "vpclmulqdq", @@ -2250,10 +2319,12 @@ "sse4_1", "sse4_2", "abm", + "xsave", "xsavec", "xsaveopt", "clflushopt", "popcnt", + "lahf_lm", "clwb", "vaes", "vpclmulqdq", @@ -2354,7 +2425,6 @@ "clflushopt", "clwb", "clzero", - "cppc", "cx16", "f16c", "flush_l1d", @@ -2365,9 +2435,11 @@ "mmx", "movbe", "movdir64b", + "lahf_lm", "movdiri", "pclmulqdq", "popcnt", + "pku", "rdseed", "sse", "sse2", @@ -2378,6 +2450,7 @@ "tsc_adjust", "vaes", "vpclmulqdq", + "xsave", "xsavec", "xsaveopt" ], @@ -2831,6 +2904,27 @@ ] } }, + "armv8.6a": { + "from": [ + "armv8.5a" + ], + "vendor": "generic", + "features": [], + "compilers": { + "gcc": [ + { + "versions": "10.1:", + "flags": "-march=armv8.6-a -mtune=generic" + } + ], + "clang": [ + { + "versions": "11:", + "flags": "-march=armv8.6-a -mtune=generic" + } + ] + } + }, "armv9.0a": { "from": [ "armv8.5a" @@ -3613,6 +3707,182 @@ }, "cpupart": "0x032" }, + "m3": { + "from": [ + "m2", + "armv8.6a" + ], + "vendor": "Apple", + "features": [ + "fp", + "asimd", + "evtstrm", + "aes", + "pmull", + "sha1", + "sha2", + "crc32", + "atomics", + "fphp", + "asimdhp", + "cpuid", + "asimdrdm", + "jscvt", + "fcma", + "lrcpc", + "dcpop", + "sha3", + "asimddp", + "sha512", + "asimdfhm", + "dit", + "uscat", + "ilrcpc", + "flagm", + "ssbs", + "sb", + "paca", + "pacg", + "dcpodp", + "flagm2", + "frint", + "ecv", + "bf16", + "i8mm", + "bti" + ], + "compilers": { + "gcc": [ + { + "versions": "8.0:", + "flags": "-march=armv8.5-a -mtune=generic" + } + ], + "clang": [ + { + "versions": "9.0:12.0", + "flags": "-march=armv8.5-a" + }, + { + "versions": "13.0:", + "flags": "-mcpu=apple-m1" + }, + { + "versions": "16.0:", + "flags": "-mcpu=apple-m3" + } + ], + "apple-clang": [ + { + "versions": "11.0:12.5", + "flags": "-march=armv8.5-a" + }, + { + "versions": "13.0:14.0.2", + "flags": "-mcpu=apple-m1" + }, + { + "versions": "14.0.2:15", + "flags": "-mcpu=apple-m2" + }, + { + "versions": "16:", + "flags": "-mcpu=apple-m3" + } + ] + }, + "cpupart": "Unknown" + }, + "m4": { + "from": [ + "m3", + "armv8.6a" + ], + "vendor": "Apple", + "features": [ + "fp", + "asimd", + "evtstrm", + "aes", + "pmull", + "sha1", + "sha2", + "crc32", + "atomics", + "fphp", + "asimdhp", + "cpuid", + "asimdrdm", + "jscvt", + "fcma", + "lrcpc", + "dcpop", + "sha3", + "asimddp", + "sha512", + "asimdfhm", + "dit", + "uscat", + "ilrcpc", + "flagm", + "ssbs", + "sb", + "paca", + "pacg", + "dcpodp", + "flagm2", + "frint", + "ecv", + "bf16", + "i8mm", + "bti", + "sme", + "sme2" + ], + "compilers": { + "clang": [ + { + "versions": "9.0:12.0", + "flags": "-march=armv8.5-a" + }, + { + "versions": "13.0:", + "flags": "-mcpu=apple-m1" + }, + { + "versions": "16.0:18", + "flags": "-mcpu=apple-m3" + }, + { + "versions": "19:", + "flags": "-mcpu=apple-m4" + } + ], + "apple-clang": [ + { + "versions": "11.0:12.5", + "flags": "-march=armv8.5-a" + }, + { + "versions": "13.0:14.0.2", + "flags": "-mcpu=apple-m1" + }, + { + "versions": "14.0.2:15", + "flags": "-mcpu=apple-m2" + }, + { + "versions": "16:", + "flags": "-mcpu=apple-m3" + }, + { + "versions": "17:", + "flags": "-mcpu=apple-m4" + } + ] + }, + "cpupart": "Unknown" + }, "arm": { "from": [], "vendor": "generic", @@ -3774,6 +4044,9 @@ "sse4.1": "sse4_1", "sse4.2": "sse4_2", "avx1.0": "avx", + "lahf": "lahf_lm", + "sha": "sha_ni", + "popcnt lzcnt": "abm", "clfsopt": "clflushopt", "xsave": "xsavec xsaveopt" } diff --git a/lib/spack/spack/vendor/ruamel/yaml/comments.py b/lib/spack/spack/vendor/ruamel/yaml/comments.py index 199e8d5777ec13..dae5e1075092a7 100644 --- a/lib/spack/spack/vendor/ruamel/yaml/comments.py +++ b/lib/spack/spack/vendor/ruamel/yaml/comments.py @@ -628,7 +628,7 @@ def __deepcopy__(self, memo): memo[id(self)] = res for k in self: res.append(copy.deepcopy(k, memo)) - self.copy_attributes(res, memo=memo) + self.copy_attributes(res, memo=memo) return res def __add__(self, other): diff --git a/lib/spack/spack/version/__init__.py b/lib/spack/spack/version/__init__.py index 357add8c476f4e..2ad36716fbffad 100644 --- a/lib/spack/spack/version/__init__.py +++ b/lib/spack/spack/version/__init__.py @@ -3,15 +3,12 @@ # SPDX-License-Identifier: (Apache-2.0 OR MIT) """ -This module implements Version and version-ish objects. These are: +This module implements Version and version-ish objects. These are: -StandardVersion: A single version of a package. -ClosedOpenRange: A range of versions of a package. -VersionList: A ordered list of Version and VersionRange elements. - -The set of Version and ClosedOpenRange is totally ordered wiht < -defined as Version(x) < VersionRange(Version(y), Version(x)) -if Version(x) <= Version(y). +* :class:`~spack.version.version_types.StandardVersion`: A single version of a package. +* :class:`~spack.version.version_types.ClosedOpenRange`: A range of versions of a package. +* :class:`~spack.version.version_types.VersionList`: A ordered list of Version and VersionRange + elements. """ from .common import ( diff --git a/lib/spack/spack/version/common.py b/lib/spack/spack/version/common.py index ad13fc53ad167f..2db8aa418970be 100644 --- a/lib/spack/spack/version/common.py +++ b/lib/spack/spack/version/common.py @@ -2,12 +2,8 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import re - import spack.error - -# regex for a commit version -COMMIT_VERSION = re.compile(r"^[a-f0-9]{40}$") +from spack.util.git import is_git_commit_sha # Infinity-like versions. The order in the list implies the comparison rules infinity_versions = ["stable", "nightly", "trunk", "head", "master", "main", "develop"] @@ -23,10 +19,6 @@ STRING_TO_PRERELEASE = {"alpha": ALPHA, "beta": BETA, "rc": RC, "final": FINAL} -def is_git_commit_sha(string: str) -> bool: - return len(string) == 40 and bool(COMMIT_VERSION.match(string)) - - def is_git_version(string: str) -> bool: return string.startswith("git.") or is_git_commit_sha(string) or "=" in string[1:] diff --git a/lib/spack/spack/version/version_types.py b/lib/spack/spack/version/version_types.py index 65fd23ad2c2054..aea3aaadbdc48e 100644 --- a/lib/spack/spack/version/version_types.py +++ b/lib/spack/spack/version/version_types.py @@ -10,13 +10,13 @@ from .common import ( ALPHA, - COMMIT_VERSION, FINAL, PRERELEASE_TO_STRING, STRING_TO_PRERELEASE, EmptyRangeError, VersionLookupError, infinity_versions, + is_git_commit_sha, is_git_version, iv_min_len, ) @@ -172,6 +172,8 @@ class VersionType(SupportsRichComparison): """ + __slots__ = () + def intersection(self, other: "VersionType") -> "VersionType": """Any versions contained in both self and other, or empty VersionList if no overlap.""" raise NotImplementedError @@ -199,6 +201,8 @@ def __hash__(self) -> int: class ConcreteVersion(VersionType): """Base type for versions that represents a single (non-range or list) version.""" + __slots__ = () + def _stringify_version(versions: VersionTuple, separators: Tuple[str, ...]) -> str: """Create a string representation from version components.""" @@ -217,7 +221,7 @@ def _stringify_version(versions: VersionTuple, separators: Tuple[str, ...]) -> s class StandardVersion(ConcreteVersion): """Class to represent versions""" - __slots__ = ["version", "_string", "separators"] + __slots__ = ("version", "_string", "separators") _string: str version: VersionTuple @@ -386,7 +390,7 @@ def intersection(self, other: VersionType) -> VersionType: return other.intersection(self) def isdevelop(self) -> bool: - """Triggers on the special case of the `@develop-like` version.""" + """Triggers on the special case of the ``@develop-like`` version.""" return any( isinstance(p, VersionStrComponent) and isinstance(p.data, int) for p in self.version[0] ) @@ -410,6 +414,7 @@ def dotted(self) -> "StandardVersion": """The dotted representation of the version. Example: + >>> version = Version('1-2-3b') >>> version.dotted Version('1.2.3b') @@ -424,13 +429,13 @@ def underscored(self) -> "StandardVersion": """The underscored representation of the version. Example: - >>> version = Version('1.2.3b') + + >>> version = Version("1.2.3b") >>> version.underscored - Version('1_2_3b') + Version("1_2_3b") Returns: - Version: The version with separator characters replaced by - underscores + Version: The version with separator characters replaced by underscores """ return type(self).from_string(self.string.replace(".", "_").replace("-", "_")) @@ -439,9 +444,10 @@ def dashed(self) -> "StandardVersion": """The dashed representation of the version. Example: - >>> version = Version('1.2.3b') + + >>> version = Version("1.2.3b") >>> version.dashed - Version('1-2-3b') + Version("1-2-3b") Returns: Version: The version with separator characters replaced by dashes @@ -453,9 +459,10 @@ def joined(self) -> "StandardVersion": """The joined representation of the version. Example: - >>> version = Version('1.2.3b') + + >>> version = Version("1.2.3b") >>> version.joined - Version('123b') + Version("123b") Returns: Version: The version with separator characters removed @@ -468,21 +475,22 @@ def up_to(self, index: int) -> "StandardVersion": """The version up to the specified component. Examples: - >>> version = Version('1.23-4b') + + >>> version = Version("1.23-4b") >>> version.up_to(1) - Version('1') + Version("1") >>> version.up_to(2) - Version('1.23') + Version("1.23") >>> version.up_to(3) - Version('1.23-4') + Version("1.23-4") >>> version.up_to(4) - Version('1.23-4b') + Version("1.23-4b") >>> version.up_to(-1) - Version('1.23-4') + Version("1.23-4") >>> version.up_to(-2) - Version('1.23') + Version("1.23") >>> version.up_to(-3) - Version('1') + Version("1") Returns: Version: The first index components of the version @@ -505,19 +513,12 @@ def up_to_3(self): return self.up_to(3) -_STANDARD_VERSION_TYPEMIN = StandardVersion("", ((), (ALPHA,)), ("",)) - -_STANDARD_VERSION_TYPEMAX = StandardVersion( - "infinity", ((VersionStrComponent(len(infinity_versions)),), (FINAL,)), ("",) -) - - class GitVersion(ConcreteVersion): """Class to represent versions interpreted from git refs. There are two distinct categories of git versions: - 1) GitVersions instantiated with an associated reference version (e.g. 'git.foo=1.2') + 1) GitVersions instantiated with an associated reference version (e.g. ``git.foo=1.2``) 2) GitVersions requiring commit lookups Git ref versions that are not paired with a known version are handled separately from @@ -548,7 +549,7 @@ class GitVersion(ConcreteVersion): sufficient. """ - __slots__ = ["has_git_prefix", "commit_sha", "ref", "std_version", "_ref_lookup"] + __slots__ = ("has_git_prefix", "commit_sha", "ref", "is_commit", "std_version", "_ref_lookup") def __init__(self, string: str): # TODO will be required for concrete specs when commit lookup added @@ -579,7 +580,7 @@ def __init__(self, string: str): self.ref = normalized_string # Used by fetcher - self.is_commit: bool = len(self.ref) == 40 and bool(COMMIT_VERSION.match(self.ref)) + self.is_commit: bool = is_git_commit_sha(self.ref) # translations if self.is_commit: @@ -773,6 +774,8 @@ def up_to(self, index) -> StandardVersion: class ClosedOpenRange(VersionType): + __slots__ = ("lo", "hi") + def __init__(self, lo: StandardVersion, hi: StandardVersion): if hi < lo: raise EmptyRangeError(f"{lo}..{hi} is an empty range") @@ -1066,6 +1069,13 @@ def from_dict(dictionary) -> "VersionList": return VersionList([Version(dictionary["version"])]) raise ValueError("Dict must have 'version' or 'versions' in it.") + @classmethod + def any(cls) -> "VersionList": + """Return a VersionList that matches any version.""" + version_list = cls.__new__(cls) + version_list.versions = [_UNBOUNDED_RANGE] + return version_list + def update(self, other: "VersionList") -> None: self.add(other) @@ -1161,9 +1171,7 @@ def __str__(self) -> str: if not self.versions: return "" - return ",".join( - f"={v}" if isinstance(v, StandardVersion) else str(v) for v in self.versions - ) + return ",".join(f"={v}" if type(v) is StandardVersion else str(v) for v in self.versions) def __repr__(self) -> str: return str(self.versions) @@ -1326,3 +1334,14 @@ def ver(obj: Union[VersionType, str, list, tuple, int, float]) -> VersionType: return from_string(str(obj)) else: raise TypeError("ver() can't convert %s to version!" % type(obj)) + + +_STANDARD_VERSION_TYPEMIN = StandardVersion("", ((), (ALPHA,)), ("",)) + +_STANDARD_VERSION_TYPEMAX = StandardVersion( + "infinity", ((VersionStrComponent(len(infinity_versions)),), (FINAL,)), ("",) +) + +_UNBOUNDED_RANGE = ClosedOpenRange.from_version_range( + _STANDARD_VERSION_TYPEMIN, _STANDARD_VERSION_TYPEMAX +) diff --git a/pyproject.toml b/pyproject.toml index afc08351a4fb0b..8e24a4d3de7c6c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ name = "spack" description = "The spack package manager" requires-python = ">=3.6" -dependencies = ["clingo", "setuptools"] +dependencies = ["clingo"] dynamic = ["version"] [project.scripts] @@ -16,7 +16,6 @@ dev = [ "pip>=21.3", "pytest", "pytest-xdist", - "setuptools", "click", "black", "mypy", @@ -263,11 +262,6 @@ drop = [ "bin/", # interpreter and OS specific msgpack libs "msgpack/*.so", - # unneeded parts of setuptools - "easy_install.py", - "setuptools", - "pkg_resources/_vendor/", - "pkg_resources/extern/", # trim vendored pygments styles and lexers "pygments/styles/[!_]*.py", "^pygments/lexers/(?!python|__init__|_mapping).*\\.py$", @@ -294,9 +288,6 @@ pyrsistent = [] ruamel = [] six = [] -[tool.vendoring.license.directories] -setuptools = "pkg_resources" - [tool.vendoring.license.fallback-urls] CacheControl = "https://raw.githubusercontent.com/ionrock/cachecontrol/v0.12.6/LICENSE.txt" distlib = "https://bitbucket.org/pypa/distlib/raw/master/LICENSE.txt" diff --git a/pytest.ini b/pytest.ini index 2d4d18d86d277e..39f3f7fc8f6409 100644 --- a/pytest.ini +++ b/pytest.ini @@ -16,3 +16,6 @@ markers = not_on_windows: mark tests that are skipped on Windows only_windows: mark tests that are skipped everywhere but Windows require_provenance: tests that have enough infrastructure to test git binary provenance + enable_parallelism: mark tests that require parallelism to be enabled + use_package_hash: mark test to use real package hash computation instead of mock + diff --git a/share/spack/bootstrap/github-actions-v0.5/clingo.json b/share/spack/bootstrap/github-actions-v0.5/clingo.json deleted file mode 100644 index ee7a3595568d4b..00000000000000 --- a/share/spack/bootstrap/github-actions-v0.5/clingo.json +++ /dev/null @@ -1,389 +0,0 @@ -{ - "verified": [ - { - "binaries": [ - [ - "clingo-bootstrap", - "riu2vekwzrloc3fktlf6v7kwv6fja7lp", - "7527bc4d2d75671162fe0db3de04c5d3e1e6ab7991dfd85442c302c698febb45" - ] - ], - "python": "python@3.10.13", - "spec": "clingo-bootstrap platform=darwin target=aarch64 %apple-clang" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "sgf6pgn4ihfcbxutxhevp36n3orfpdkw", - "958531adcb449094bca7703f8f08d0f55a18f9a4c0f10a175ae4190d20982891" - ] - ], - "python": "python@3.11.5", - "spec": "clingo-bootstrap platform=darwin target=aarch64 %apple-clang" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "ie4wlhhnb4snroymbnjksajwvoid6omx", - "4af14c3375a211ead3d2b4a31b59683744adcb79b820cc0c6b168ab162a7d983" - ] - ], - "python": "python@3.12.0", - "spec": "clingo-bootstrap platform=darwin target=aarch64 %apple-clang" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "5ke32podcipzxxwrj6uzm324bxegbwca", - "a4106c42ee68d07c3d954ab73fe305ca4204f44d90b58fd91a8f784d9b96e7e3" - ] - ], - "python": "python@3.6", - "spec": "clingo-bootstrap platform=darwin target=aarch64 %apple-clang" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "scu4cnnf5axmjgozqc7cccpqnj5nc5tj", - "54de4ca141b92222c8f1729e9e336c8a71dad9efa641e76438fcfb79bb58fc7f" - ] - ], - "python": "python@3.7.17", - "spec": "clingo-bootstrap platform=darwin target=aarch64 %apple-clang" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "ajbswc25irhmhbc4qibdcr6ohsvpcdku", - "8b9e7af163a4259256eca4b4a1a92b5d95463a5cf467be2a11c64ab536ca5b04" - ] - ], - "python": "python@3.8.18", - "spec": "clingo-bootstrap platform=darwin target=aarch64 %apple-clang" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "vwkuxa5z4pj7vviwsmrpw2r6kbbqej2p", - "a3f10024ff859e15b79ccd06c970a5f0e6ba11b0eae423f096ec9a35863816d2" - ] - ], - "python": "python@3.9.18", - "spec": "clingo-bootstrap platform=darwin target=aarch64 %apple-clang" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "attdjmyzpfnhoobadw55pgg4hwkyp7zk", - "f3258af3a648b47f12285dd3f048b685ed652b2b55b53861ac9913926de0f1c3" - ] - ], - "python": "python@3.10", - "spec": "clingo-bootstrap platform=darwin target=x86_64 %apple-clang" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "w4vnbsxjgkhsmgwozudzcsqlvccjsec4", - "19322c2c951fc80234963ac068c78442df57ac63055325b24a39ab705d27a5b9" - ] - ], - "python": "python@3.11", - "spec": "clingo-bootstrap platform=darwin target=x86_64 %apple-clang" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "dw7ez2xcx6e5dxo3n4jin7pdbo3ihwtw", - "c368edda4b3c8fd767f5f0f098ea416864b088c767dc43135df49cf5f6ef4c93" - ] - ], - "python": "python@3.12", - "spec": "clingo-bootstrap platform=darwin target=x86_64 %apple-clang" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "audrlxaw3ny3kyjkf6kqywumhokcxh3p", - "db2f44966ec104ffe57c0911f0b1e0d3d052753f4c46c30c0890dfb26d547b09" - ] - ], - "python": "python@3.6", - "spec": "clingo-bootstrap platform=darwin target=x86_64 %apple-clang" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "al7brxvvvhih5nlxvtfkavufqc3pe5t2", - "4e09b6d50d42c898e075fd20f7c7eddf91cb80edfd2d1326d26fd779e4d1ffed" - ] - ], - "python": "python@3.7", - "spec": "clingo-bootstrap platform=darwin target=x86_64 %apple-clang" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "v3ctpkyogl542wjibng6m2h2426spjbb", - "d9ceb4f9ca23ef1dcc33872e5410ccfef6ea0360247d3e8faedf1751fb1ae4ca" - ] - ], - "python": "python@3.8", - "spec": "clingo-bootstrap platform=darwin target=x86_64 %apple-clang" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "zxo5ih5ac6r7lj6miwyx36ot7s6a4dcw", - "f8f5e124d0e7bada34ff687a05e80b2fe207ce4d26205dab09b144edb148f05e" - ] - ], - "python": "python@3.9", - "spec": "clingo-bootstrap platform=darwin target=x86_64 %apple-clang" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "wki4qcy3wzpoxav3auxt2u7yb4sk3xcc", - "f5b9251eb51c60a71f7a0359c252f48c1a1121c426e1e6f9181808c626cb5fef" - ] - ], - "python": "python@3.10.13", - "spec": "clingo-bootstrap platform=linux target=aarch64 %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "gun6hbksmsecau5wjyrmxodq4hxievzx", - "28839ec43db444d6725bde3fcff99adadf61a392d967041fb16f0ffc0afa2f9d" - ] - ], - "python": "python@3.11.5", - "spec": "clingo-bootstrap platform=linux target=aarch64 %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "er73owosuqfmmkxvuw3f7sqnvvj6s4xp", - "99264d48c290256bf16e202c155bf3f8c88fdbbe9894d901344d0db7258abce3" - ] - ], - "python": "python@3.12.0", - "spec": "clingo-bootstrap platform=linux target=aarch64 %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "kv6l7qttuzk7zxkxi5fhff52qso3pj7m", - "59aa052e89d3c698fdd35e30ac21a896c8e49bbcc2f589a8f777bd5dafff2af7" - ] - ], - "python": "python@3.6", - "spec": "clingo-bootstrap platform=linux target=aarch64 %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "uw5o2z753otspa3lmmy2bdodh5munkir", - "7a8b6359ce83463541ff68c221296fe9875adf28ea2b2c1416229750cf4935d2" - ] - ], - "python": "python@3.7.17", - "spec": "clingo-bootstrap platform=linux target=aarch64 %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "d63pp2l453bfygh6q7afwdj5mw7lhsns", - "425bef3a8605732b2fbe74cdd77ef6a359cbdb62800490bbd05620a57da35b0c" - ] - ], - "python": "python@3.8.18", - "spec": "clingo-bootstrap platform=linux target=aarch64 %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "nap44jiznzwlma6n75uxbpznppazs7av", - "316d940ca9af8c6b3bc50f8fdaadba02b0e955c4f24345a63a1a6715b01a752c" - ] - ], - "python": "python@3.9.18", - "spec": "clingo-bootstrap platform=linux target=aarch64 %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "qhvnw4yowmk2tofg3u7a4uomisktgzw5", - "d30ec81385377521dd2d1ac091546cc2dec6a852ad31f35c24c65919f94fbf64" - ] - ], - "python": "python@3.10.13", - "spec": "clingo-bootstrap platform=linux target=ppc64le %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "b3y37ryfuhjq6ljbkq7piglsafg5stgw", - "3c2f9cca3a6d37685fdf7d7dffb7a0505336c32562715069004631c446e46a7c" - ] - ], - "python": "python@3.11.5", - "spec": "clingo-bootstrap platform=linux target=ppc64le %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "dbloojtq5kcfd3pjmj4pislgpzrcvjpn", - "f8aeba80e6c106b769adba164702db94e077255fe1a22d6d265ccc3172b4ab1a" - ] - ], - "python": "python@3.12.0", - "spec": "clingo-bootstrap platform=linux target=ppc64le %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "gtlngzdb7iggcjmaottob54qi3b24blt", - "3efc534ba293ee51156971b8c19a597ebcb237b003c98e3c215a49a88064dfd1" - ] - ], - "python": "python@3.6", - "spec": "clingo-bootstrap platform=linux target=ppc64le %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "4ab4wobwa7bvhlkrmhdp2dwgtcq5rpzo", - "3dc6539a989701ec1d83d644a79953af912c11fe6046a8d720970faf8e477991" - ] - ], - "python": "python@3.7.17", - "spec": "clingo-bootstrap platform=linux target=ppc64le %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "fgout3h4mt4i64xaovqrpcsdy3ly2aml", - "ade67f0623e941b16f2dd531270b4863de8befd56a9a47bd87af85345bc8bed6" - ] - ], - "python": "python@3.8.18", - "spec": "clingo-bootstrap platform=linux target=ppc64le %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "5fv2q4agg4b4g53f4zhnymrbv6ogiwpy", - "18047d48538a770f014cce73756258c1a320d4ac143abef3c5d8bc09dd7a03cc" - ] - ], - "python": "python@3.9.18", - "spec": "clingo-bootstrap platform=linux target=ppc64le %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "smkmkb5xqz4v2f7tl22g4e2ghamglox5", - "a850c80c7a48dab506f807cc936b9e54e6f5640fe96543ff58281c046140f112" - ] - ], - "python": "python@3.10.13", - "spec": "clingo-bootstrap platform=linux target=x86_64 %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "55qeu52pkt5shpwd7ulugv7wzt5j7vqd", - "e5e1a10b3b2d543b1555f5caef9ac1a9ccdcddb36a1278d3bf68bf0e9f490626" - ] - ], - "python": "python@3.11.5", - "spec": "clingo-bootstrap platform=linux target=x86_64 %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "zcw5ieomfwwpzpzpabetix2plfqzpvwd", - "ed409165109488d13afe8ef12edd3b373ed08967903dc802889523b5d3bccd14" - ] - ], - "python": "python@3.12.0", - "spec": "clingo-bootstrap platform=linux target=x86_64 %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "t4yf34cuvquqp5xd66zybmcfyhwbdlsf", - "b14e26e86bcfdac98b3a55109996265683f32910d3452e034ddc0d328bf62d67" - ] - ], - "python": "python@3.6", - "spec": "clingo-bootstrap platform=linux target=x86_64 %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "grkrpj76lxsxa753uzndwfmrj3pwvyhp", - "11a535d4a8a9dbb18c2f995e10bc90b27b6ebc61f7ac2090f15db9b4f9be1a64" - ] - ], - "python": "python@3.7.17", - "spec": "clingo-bootstrap platform=linux target=x86_64 %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "zowwoarrf3hvo6i3iereolfujr42iyro", - "154d3a725f02c1775644d99a0b74f9e2cdf6736989a264ccfd5d9a8bce77a16b" - ] - ], - "python": "python@3.8.18", - "spec": "clingo-bootstrap platform=linux target=x86_64 %gcc" - }, - { - "binaries": [ - [ - "clingo-bootstrap", - "bhqgwuvef354fwuxq7heeighavunpber", - "399dec8cb6b8cd1b03737e68ea32e6ed69030b57e5f05d983e8856024143ea78" - ] - ], - "python": "python@3.9.18", - "spec": "clingo-bootstrap platform=linux target=x86_64 %gcc" - } - ] -} \ No newline at end of file diff --git a/share/spack/bootstrap/github-actions-v0.5/gnupg.json b/share/spack/bootstrap/github-actions-v0.5/gnupg.json deleted file mode 100644 index b2fcace1269437..00000000000000 --- a/share/spack/bootstrap/github-actions-v0.5/gnupg.json +++ /dev/null @@ -1,254 +0,0 @@ -{ - "verified": [ - { - "binaries": [ - [ - "libgpg-error", - "stcmj3wdfxrohn2a53ecvsfsxe7rzrn4", - "942b0f0918798f0a5f007de0f104d71273e6988165c7a34a874e0846b1aa8977" - ], - [ - "libassuan", - "z27suzptvelnavipmldx6dcntiwqmguq", - "c703d6b534e89e383893913fb3b71b47322726c5e19f69178e4d1a3a42a76426" - ], - [ - "libgcrypt", - "if4uocx75kk6nc5vwvvuxq4dvaoljxkm", - "a2320f8cfc8201d15c0e9e244b824ce3d76542c148f4f0631648987957759f07" - ], - [ - "libiconv", - "nccvt7adwkq5anilrjspffdzl4hggon5", - "e23aa0184eb6661331bc850292fa22579005fd8ed62efd4c0c7a87489d8acaf6" - ], - [ - "libksba", - "lbfaarmpo2tupbezmqhfjvyspvwepv4r", - "96888ed37642a2425e2262a5904b82a38f9eecfb18a900493e32d4ab742f994b" - ], - [ - "npth", - "yc7h5c7cp7mupstvh5wlujp3xqet3xxq", - "3ac8e284878c5a556e38aab706e4303daf0a4d2bbb9fac2644495f8a362f9988" - ], - [ - "pinentry", - "rlo36pidutbjxxc3atooiwruaptfwmml", - "70114fe6c9e8723daa960f1a3dc36ed8b5a6c6f9cc828d43f79b8f59f7363605" - ], - [ - "zlib-ng", - "hewnrm76ju4qcjaezxole5htrulkij25", - "7babbe4d3d6e58631a944472356c07f0f4ad4a0759eaeefcf8584f33cce51ca6" - ], - [ - "gnupg", - "5cguax2vflgy2cwmt2ikvixtynommlmr", - "23fdd223493f441fa2e5f82d7e02837ecfad831fbfa4c27c175b3e294ed977d1" - ] - ], - "spec": "gnupg@2.3: platform=darwin target=aarch64 %apple-clang" - }, - { - "binaries": [ - [ - "libgpg-error", - "7yjoei55i6wxycmzbopyrw7nrquc22ac", - "c29cfe32521a4a1e2108c711233964c27ca74ffc7505eea86cb8c047ace5715b" - ], - [ - "libassuan", - "b4pkkugfhdtitffvlh4o3dexmthr6rmk", - "27ee6fc272f011f9ad4f000dc54961cccd67b34d6f24f316ca7faf26673bf98b" - ], - [ - "libgcrypt", - "uqjmpmpeta3w7c66m4e5jojopngpibvp", - "d73fbb6e9327faec75af450d602b663ed6bb65ac9657bd795034a53f6acd32c8" - ], - [ - "libiconv", - "rfsiwcq6tlw6to42a3uxw7wcmcyk5m6r", - "1f0176395130ed8b919538fa4b1cbda9f0ff8b836e51097258efc8cf5e11f753" - ], - [ - "libksba", - "gsobopcvr2p7d7rpgrbk2ulrnhvrpt6u", - "0e404a8353f91918f385db8cf661f53f91ffd805798fcd83fb1168a1f1758fe8" - ], - [ - "npth", - "gib2edyujm2oymkvu2hllm2yeghttvn3", - "e04e579e514cd965baf71b7f160b063bff8b116e991e6931c6919cd5f3270e59" - ], - [ - "pinentry", - "5ndbckveeaywx77rqmujglfnqwpxu3t6", - "0ec02dca08ad2e8b3dd1c71195ed3fe3bb8856b746726708f5e5d450619e1285" - ], - [ - "zlib-ng", - "fg366ys6nx3hthuiix4xooi6xx4qe5d2", - "cc372a21608885182233c7800355c7c0bbaff47ea16e190827a9618b0c4703e2" - ], - [ - "gnupg", - "2x5ftl46zcnxk6knz5y3nuhyn7zcttk3", - "b9481e122e2cb26f69b70505830d0fcc0d200aadbb6c6572339825f17ad1e52d" - ] - ], - "spec": "gnupg@2.3: platform=darwin target=x86_64 %apple-clang" - }, - { - "binaries": [ - [ - "libgpg-error", - "b7o5zrguyniw5362eey3peglzhlmig7l", - "b4373f2b0a2567b3b87e6bfc934135ce7790432aea58c802139bb5352f24b6a9" - ], - [ - "libassuan", - "6k2arop3mjwfhe4cwga6a775ud5m4scp", - "1e5143d35b0938a206ecf1ecb39b77e732629897d2b936cb8274239770055d90" - ], - [ - "libgcrypt", - "eh5h3zisjkupzr2pgqarvgs2fm7pun5r", - "b57eff265b48d0472243babfd1221c7c16189a4e324ea26e65d1a0a8c1391020" - ], - [ - "libiconv", - "vgk2zgjeflpnksj3lywuwdzs2nez63qv", - "d153953c40c630fd2bf271f3de901d7671f80e8161cf746cb54afbf28d934d03" - ], - [ - "libksba", - "au3xdl4oyfbxat6dknp3mldid7gupgt5", - "f1b1a1a02138109bc41b0b2ba54e689b43f35e2828f58b5de74280ce754fac0b" - ], - [ - "npth", - "ja7cauk7yhhyj7msnprlirue7cn3jpnj", - "cf6fd998a8f92ce1cf34c63db09c77b1891bf8f5915deef03c0cae5492bd691b" - ], - [ - "pinentry", - "6yo4flozla2tvw3ojkh2atvnfxuqx6ym", - "e78826a269109b3d67a54b1d01ff0a93be043dddcb4f52d329770ae1f75313f3" - ], - [ - "zlib-ng", - "4cgenrt3rcinueq6peyolxhegnryoeem", - "918a1e48f823806f1562c95569953a4658b2fbc54a2606a09bcd7e259b62f492" - ], - [ - "gnupg", - "lrmigjenpqj5fy4ojcs5jy6doktiu4qz", - "228ccb475932f7f40a64e9d87dec045931cc57f71b1dfd4b4c3926107222d96c" - ] - ], - "spec": "gnupg@2.3: platform=linux target=aarch64 %gcc" - }, - { - "binaries": [ - [ - "libgpg-error", - "km6l24czfhnmlya74nu6cxwufgimyhzz", - "23c3b7b487b36b9b03eeebbcc484adc6c8190c1bbcaa458943847148c915c6b2" - ], - [ - "libassuan", - "crkk525xdgsn2k5s4xqdaxkudz6pjqbm", - "ae3048a8059c0709d3efe832de1a8f82594373ba853d4bc2dfa05fb9dbfbc782" - ], - [ - "libgcrypt", - "4s5lkowqilor35fscjwvtmg4wasdknkc", - "62d3d13278d60d0329af1a9649b06591153ff68de4584f57777d13d693c7012e" - ], - [ - "libiconv", - "kbijqx45l3n64dlhenbuwgqpmf434g2d", - "dddf581a14a35b85cb69a8c785dd8e250f41e6de7697e34bb0ab2a942e0c2128" - ], - [ - "libksba", - "jnll3rfuh6xhgqxbwfnpizammcwloxjc", - "6200f2b6150aaf6d0e69771dfd5621582bd99ed0024fe83e7bc777cb66cabb29" - ], - [ - "npth", - "6j6b4hbkhwkb5gfigysqgn5lpu3i4kw5", - "0be0c70f3d9d45c4fe7490d8fdb8d7584de6324c3bfac8d884072409799c9951" - ], - [ - "pinentry", - "cdpcdd4iah6jot4odehm3xmulw3t3e32", - "5b447c770d0f705fbc97564fccdfbb0dfff8b6f8e2b4abbea326a538bc1bff80" - ], - [ - "zlib-ng", - "ogchs3i5tosoqrtsp3czp2azxvm7icig", - "acfa12c4e73560416e1169b37adabfbec5ee9a580a684b23e75d7591d8e39a03" - ], - [ - "gnupg", - "jwpu2wrofbwylpztltmi257benj2wp6z", - "98e2bcb4064ec0830d896938bc1fe5264dac611da71ea546b9ca03349b752041" - ] - ], - "spec": "gnupg@2.3: platform=linux target=ppc64le %gcc" - }, - { - "binaries": [ - [ - "libgpg-error", - "dwcgnnqt364enpf5554dio7kklspmrko", - "bfe9b506ccba0cca619133a3d2e05aa23c929749428bf6eecbff0c6985447009" - ], - [ - "libassuan", - "yl5rfsfuxd6if36h7rap7zbbpbfztkpw", - "4343dabbeed0851885992acd7b63fd74cb9d1acc06501a8af934e7e103801a15" - ], - [ - "libgcrypt", - "ka3t3dq73bkz4bs5ilyz6kymkypgbzxl", - "ec1bcc324e9f9d660395e2c586094431361a02196da43fce91be41cca5da9636" - ], - [ - "libiconv", - "5tog27ephuzc4j6kdxavhjsjm2kd5nu6", - "928fab3c32a1ae09651bb8491ee3855ccaf3c57a146ee72a289a073accd3fc8f" - ], - [ - "libksba", - "4ezfhjkmfc4fr34ozzl5q6b4x6jqqmsw", - "3045841c50c19a41beb0f32b4e8a960901397b95e82af3a73817babf35d4cfca" - ], - [ - "npth", - "bn4zrugdajgpk5dssoeccbl7o2gfgmcp", - "ef90ef85a818456afbff709b4a0757a077d69fd3c07d1b7612e1d461d837c46f" - ], - [ - "pinentry", - "cdwqocmusjomjjavnz6nn764oo54j5xj", - "b251047c1cb4be1bb884a7843d4419fae40fdbe5e1d36904e35f5e3fef5e4ced" - ], - [ - "zlib-ng", - "ozawh46coczjwtlul27msr3swe6pl6l5", - "0a397b53d64ac8191a36de8b32c5ced28a4c7a6dbafe9396dd897c55bcf7a168" - ], - [ - "gnupg", - "jra2dbsvpr5c5gj3ittejusa2mjh2sf5", - "054fac6eaad7c862ea4661461d847fb069876eb114209416b015748266f7d166" - ] - ], - "spec": "gnupg@2.3: platform=linux target=x86_64 %gcc" - } - ] -} \ No newline at end of file diff --git a/share/spack/bootstrap/github-actions-v0.5/patchelf.json b/share/spack/bootstrap/github-actions-v0.5/patchelf.json deleted file mode 100644 index 2e879a952bd3f1..00000000000000 --- a/share/spack/bootstrap/github-actions-v0.5/patchelf.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "verified": [ - { - "binaries": [ - [ - "patchelf", - "4txke6ixd2zg2yzg33l3fqnjyassono7", - "102800775f789cc293e244899f39a22f0b7a19373305ef0497ca3189223123f3" - ] - ], - "spec": "patchelf@0.13: platform=linux target=aarch64 %gcc" - }, - { - "binaries": [ - [ - "patchelf", - "tnbgxc22uebqsiwrhchf3nieatuqlsrr", - "91cf0a9d4750c04575c5ed3bcdefc4754e1cf9d1cd1bf197eb1fe20ccaa869f1" - ] - ], - "spec": "patchelf@0.13: platform=linux target=ppc64le %gcc" - }, - { - "binaries": [ - [ - "patchelf", - "afv7arjarb7nzmlh7c5slkfxykybuqce", - "73f4bde46b843c96521e3f5c31ab94756491404c1ad6429c9f61dbafbbfa6470" - ] - ], - "spec": "patchelf@0.13: platform=linux target=x86_64 %gcc" - } - ] -} \ No newline at end of file diff --git a/share/spack/bootstrap/github-actions-v2/clingo.json b/share/spack/bootstrap/github-actions-v2/clingo.json new file mode 100644 index 00000000000000..b35ea40e121d70 --- /dev/null +++ b/share/spack/bootstrap/github-actions-v2/clingo.json @@ -0,0 +1,414 @@ +{ + "verified": [ + { + "binaries": [ + [ + "clingo-bootstrap", + "pwlnvmalslnjfyyxni2rabs3ps7fheel", + "1d5b86c7b72caf39ae4288f14f10cd4172470f0a5a82091a19de832c8a9b8686" + ] + ], + "spec": "clingo-bootstrap@spack platform=darwin target=aarch64 %apple-clang ^python@3.10" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "kflct3lxoi7idem2ftxb2hoe2cuv4wld", + "08202efda0a9dde65625653e9a6c598d2c88330cddf8e7bdba4f41d45d614acc" + ] + ], + "spec": "clingo-bootstrap@spack platform=darwin target=aarch64 %apple-clang ^python@3.11" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "jfdsy54no4qh3xuxfsz3oo2oruk2pfk7", + "83d3b4021e3f1a76efc779530098b90495131b45494b865f0a9957f2e998b6fe" + ] + ], + "spec": "clingo-bootstrap@spack platform=darwin target=aarch64 %apple-clang ^python@3.12" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "jkef76w5ghtcmnui4ff6ysesap2w6rmw", + "99affb48dd65b7ac9c1fb128b8db22087a9af3b6d1b799b1faed753823b8cdfe" + ] + ], + "spec": "clingo-bootstrap@spack platform=darwin target=aarch64 %apple-clang ^python@3.13" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "5xlknpwrrgpdmlrxzplayuvkfjzbrozj", + "8720111b230ced41bf77601ef3f54e085e3f53b080a24570992b71340ac5da49" + ] + ], + "spec": "clingo-bootstrap@spack platform=darwin target=aarch64 %apple-clang ^python@3.14" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "5n3lq67e73eiyvtmubyyvylv2fptrvsg", + "e9a22379dd9e66a778f4ebf38c3d50c6c896d3121039faf9619e4a2352f11b5c" + ] + ], + "spec": "clingo-bootstrap@spack platform=darwin target=aarch64 %apple-clang ^python@3.8" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "k3pn5eauyxhrbw5gdui2lpqa6igipczc", + "8277af1cbc941cc4907815e6175869fdba121fab579a6169e018777e7fa456d2" + ] + ], + "spec": "clingo-bootstrap@spack platform=darwin target=aarch64 %apple-clang ^python@3.9" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "wzv7r4rqmd6d6weadqswme3qxja34ffx", + "4fd752d04e9bd30f318d792aabae68239366c653eae530ace0691f6cf9a8e4e6" + ] + ], + "spec": "clingo-bootstrap@spack platform=darwin target=x86_64 %apple-clang ^python@3.10" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "zcoxhxmsoovwpk3vh62fbothtasogma2", + "26ece3445157ae7846aba7ac07c5359bda8500607a6ca932a3a10de4d8523297" + ] + ], + "spec": "clingo-bootstrap@spack platform=darwin target=x86_64 %apple-clang ^python@3.11" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "7et6m6e3b2qtsztsqakp6fi2gfrc4lx3", + "df7b27379800fc56c277229c45c61e22d9855d7aadb5a189a976d39175c69007" + ] + ], + "spec": "clingo-bootstrap@spack platform=darwin target=x86_64 %apple-clang ^python@3.12" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "mqybi754kuxj2jo2br6bfpysgmjufktr", + "6e5abf247c13232d7b693c5b9e419c1252059f3d6a02351f7a8538d6971c5ca4" + ] + ], + "spec": "clingo-bootstrap@spack platform=darwin target=x86_64 %apple-clang ^python@3.13" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "4ura3zhguxl6hvv2phzu6spfogj3colf", + "ba8a4e37277b1f3506b825f08803bab258c693c539f2d4f8dfa75722eb95c45a" + ] + ], + "spec": "clingo-bootstrap@spack platform=darwin target=x86_64 %apple-clang ^python@3.14" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "a6fd27ilevl6h5nrhuetqljzjiowhflb", + "553d8f08423191cfb52c729edf1ac419f3ed2d06a4332e3bb2598d2700e6176e" + ] + ], + "spec": "clingo-bootstrap@spack platform=darwin target=x86_64 %apple-clang ^python@3.8" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "of6tyq5fdcrljkigauox6pemni2gxuxo", + "66946cbe5b41440d18f29971ecf99d13cf29ecb0f0d24c485c5be3301e78a69a" + ] + ], + "spec": "clingo-bootstrap@spack platform=darwin target=x86_64 %apple-clang ^python@3.9" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "uay7gqalzdsyrpd5nyyvg7fql2cy52jd", + "f38dbf6541cad89e16875d986e3765a36cb4f152eea641875001149c0b8db032" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=aarch64 %gcc ^python@3.10" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "z4rhvmrt2pzjzklbjwvuqbeiytnxokul", + "cbbf79e1f4ce26095092ac47f18500e3cf647e10836b654e1f454b458543f135" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=aarch64 %gcc ^python@3.11" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "l2vozyyvzhslys7rvmvspdfvh4n6gybu", + "f84e5573c11138d709ce4d74245290e1ccea094ac7c2ed5742fc7f4becef0a13" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=aarch64 %gcc ^python@3.12" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "tw5wwjg63hw4hmcqacvlxpjstybfpmbn", + "77a42d4a34ed2c9cce7a5d84335fad79adad8b0ea2fd045df954b8dce212b98c" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=aarch64 %gcc ^python@3.13" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "uba4n5ava5yppjfyvy32kwqnjnnx5aww", + "fb83a9312313b85b4c96abba421ead94081554e7ce8692388dc14fbeaf2f8c1a" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=aarch64 %gcc ^python@3.14" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "ruyhb55k2xdsbx5t4rjttwxg7hpb2swr", + "ab3d5b2cdf926a43f2fc63bd18a24f11f7a4a2c26d9572e27818104588d18808" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=aarch64 %gcc ^python@3.6" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "dw5kmugt2vyfvjdg6rpjlioxqrmo4bi4", + "d8a20cc03e9a0137d8ab5de0e81cd87b57a15d4171ec5ad59fec4dc07c8673c6" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=aarch64 %gcc ^python@3.7" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "dop3tdkhgjuenndjlwcrptxg2wlggtke", + "b9a3a9b990228374c479c398b61502a09818948829a8e9c9ae2038cbbaa6328f" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=aarch64 %gcc ^python@3.8" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "6rscdq3rsb34xf7euuwiijv2fzcz7tyt", + "87ef77a2c2ef7cd880e47fa38abf96e8ef70a64645ba353ebe468e5ad9974595" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=aarch64 %gcc ^python@3.9" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "vwzrp2t4al2hoogtzagzngpemfpnfpx7", + "65281df2bd13df6ced19320ea6326474f75a15066e22bb934ecee7b20175aec9" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=ppc64le %gcc ^python@3.10" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "tumbbd2ch5smnxs2jtji5sf76mot2vxa", + "5bf5ac1121d36e62bef544940abd67e4f683dee760a20dfd5dd52b88ca8e947a" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=ppc64le %gcc ^python@3.11" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "2fx2vrlzl3jb3e4vfpjnoc7www7qzx6k", + "297b399857de8bd0f248bd6f1ec16ea60956ccbfdfbef5cbfa5deec4e4d7301c" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=ppc64le %gcc ^python@3.12" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "oth3pyk5fpcuas35r52gfirioji2nok2", + "0a7f6fd232a30248efe49fcc62ff9200778d33757cefb4c2559252f222b889a4" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=ppc64le %gcc ^python@3.13" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "xrqc6j2stvnmt2axpozx6bh63j4zuicg", + "58e718850872c45559973fa2674450bdde841b2ede6247274c209c7bf382b345" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=ppc64le %gcc ^python@3.14" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "osrrlgv7c2havm4izgqjso5cj54hqsz7", + "b941a1d88d198e8427c80d25378d0c38e358eb44a370eecad8187d76e4062ac5" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=ppc64le %gcc ^python@3.6" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "mjst5t4zc7lcxdos5b6mkzwap6lxxq2i", + "1e871a754a620f922e5918ed25157b19bbc7ff48a46470bf6210398f35b05262" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=ppc64le %gcc ^python@3.7" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "siukeftqvdnpxy6ppvfitsty6tjfamdw", + "ded0342e3c4e732162407dfd1c099b309d0852e1fcce3ccd8fd47f43caeba9b2" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=ppc64le %gcc ^python@3.8" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "k4lcuo4d3mhcjqzrqylui74zm3id4prj", + "fc3c53439fe2b1907fbe03e7610c905fc32d572af5b672831ebb1e81eff32cda" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=ppc64le %gcc ^python@3.9" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "pujlvg6ky6uyrauwqmy6cysh7q5mzpuh", + "2b97dc9cde3a94f933a4e7181cf9809836ed6e9d5a5784416758222e6fd2865f" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=x86_64 %gcc ^python@3.10" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "rbdouwoucdnekbaxbtazoibqkurc4xly", + "f2be487604f0b436ec982b38a7d931c5b4de4101c864cd63737b1c0463f3a8f1" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=x86_64 %gcc ^python@3.11" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "z5wzybmgi3x4g2hvcsmkrtuag7j7svd6", + "cc238348fb1a3b70351d784ec61f7843d0cc9ca5de745ce98e6bea7cd00787ea" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=x86_64 %gcc ^python@3.12" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "iag4xok2t4ww3phk2ease7rqm4x4rr3k", + "735b948738902a47aa91d6663168b0be5276d88ef5914e0a21c60ada20eefa26" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=x86_64 %gcc ^python@3.13" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "kjqhf4ekk6jvpq6o32j5e322dp6ffujk", + "a4af59b800b9b2691c77a5ae09bffd0be5690451d50c123ad0dff984fa255b7f" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=x86_64 %gcc ^python@3.14" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "rmz6nolo5afkeakgyc2eoc7n2ojhvf4v", + "526d468db326aea1e36183b68a7f81cc5fa8094b03162baab76b9fbf88567f60" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=x86_64 %gcc ^python@3.6" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "tg4kwou6ekawgprc64px5vzujnw7jgto", + "c4bd0809a64a29a9c6d0b514b221f44222c3f610f66d81af0f9954cddae5e17c" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=x86_64 %gcc ^python@3.7" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "frmgp5pxdesofrkmy7kv7dotrz3p74e6", + "7a349c91530f7cd45e6a663cad1d3f352f59424c2d6780dccb6deb089dacc5c1" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=x86_64 %gcc ^python@3.8" + }, + { + "binaries": [ + [ + "clingo-bootstrap", + "h6badtwteaj2ogjacqr2fyqmy7am2g6i", + "31f1649728e2d58902eb62d1c2e37b1cfc73e007089322a17463b3cb5777cb98" + ] + ], + "spec": "clingo-bootstrap@spack platform=linux target=x86_64 %gcc ^python@3.9" + } + ] +} \ No newline at end of file diff --git a/share/spack/bootstrap/github-actions-v2/gnupg.json b/share/spack/bootstrap/github-actions-v2/gnupg.json new file mode 100644 index 00000000000000..b77aadb3693eb1 --- /dev/null +++ b/share/spack/bootstrap/github-actions-v2/gnupg.json @@ -0,0 +1,269 @@ +{ + "verified": [ + { + "binaries": [ + [ + "libgpg-error", + "p6ks2rjtltbjmowrzjt3o5hcnqhmqo3g", + "36e4f9f8abdd2b6735064c6cc3d39bda4479c7a7d651c587db07e1e650b03d11" + ], + [ + "libassuan", + "s22673fgzdajkvaaymjlz43m5nov6o4z", + "8a78ee0cfe7303785cce339a1337e082c28d52e04799ad58c31a6c34fa798a4f" + ], + [ + "zlib-ng", + "eln5ekvcwrdazo5etpoeskz7usdnynmt", + "16225297e19c7d68a21e2edaf95bf634917c5abda7057fe1fc32b9791eed0c71" + ], + [ + "pinentry", + "dqyra7sgczn4nhmbaz5ypvdrzkz2uwv7", + "74203adf69467000d3ee4cc6226a8ac289f3a837c4cfe00c34ea618471069a9e" + ], + [ + "npth", + "tt4y5y2shy3z6rk3gukdej4uexvjaiqt", + "42af19bda509ffef37837a5b79a07648026e57d378c3e72846c76c3d59295d3f" + ], + [ + "libksba", + "ff2mmhvtj5dnrjwsdwwnr2i4jzgp4pgf", + "bdcd9d428ac85d913832716b2d44afb933933b0a75cf9a68e474400c3f9f4a44" + ], + [ + "libiconv", + "xbmzo4ps6duao6i3dt33vyrzmuxq2g4o", + "e4aae6e999afa6e64aeba3a425b0e2164b61e72fe72a078bc71d16bde3390d46" + ], + [ + "libgcrypt", + "tlfizutpns6w76bnztq73m7lfkyaqtxb", + "d6e57414f6a2add6211cdd6ee2342c171476ea3c59109d1965afc6318ffd5782" + ], + [ + "gnupg", + "lk3abrd4ka756pnc4ma3j6ys4i3dtwjb", + "b2c8a36cfde2f534ed3434836e1d69610d09ec564120c03d203ff4aa800d81a3" + ] + ], + "spec": "gnupg@2.5.12 platform=darwin target=aarch64 %apple-clang" + }, + { + "binaries": [ + [ + "libgpg-error", + "ficfyoshyglsa33bgjfa5bzisauadyvq", + "ae340b35fdd89ef528ed9d3a49535d73b8f860525e8031673dbd7a0b550bc192" + ], + [ + "libassuan", + "2ibfzrkt32m42mlzhgilojyre6okhgqc", + "5e66283710dd1e8042fe0b309c619ff39b305d790ce5c36c742a341dd38c5d6a" + ], + [ + "zlib-ng", + "45h66x4vasowwxwhdcaynn4hbxmnjyqc", + "c870961f4996614f85bd800f5acd5b98c088df4231d8452e139be870f5570c6b" + ], + [ + "pinentry", + "4y3qewpjudfrxdlaaig6qutajbgxjopn", + "c5fb0998e350bd22a2bd94e59d4a2fa9a7423e9dacb1dfae71dbaf543faefb9b" + ], + [ + "npth", + "zfhw2mj6gl345z72hqcx5ot3slhgweup", + "a77e49b3f9419c47de6a9eb7c19d6a3d5451a9452b8f9b5c1f18486bc1f25627" + ], + [ + "libksba", + "fcgccrt3e2zacftlspvtci2cz2rkonzo", + "cdd3bb81cd7f4c0f95c403c07730909fa67c9e3d43e0399369205a507f36d3f6" + ], + [ + "libiconv", + "ozffgclj6pwiqd2jwlq4h26tbosp244y", + "96e4dd435fd0e12bc58386524cd6b0ac5a0be58467d8fd9fbcdf60b51eef12ec" + ], + [ + "libgcrypt", + "ih6xqv6zk2egxksutlaicnajzzjrlthx", + "aff58ead569465debaaee7c79bdba201e21f6abcc413e1044fce598acefdb90b" + ], + [ + "gnupg", + "2njkdwwe33ok64fddcmyim6trlb7ytvf", + "15df98b729177ff5515369751058bf2b92d41ac2c6c1a45f25c72ef0ccbafebf" + ] + ], + "spec": "gnupg@2.5.12 platform=darwin target=x86_64 %apple-clang" + }, + { + "binaries": [ + [ + "gcc-runtime", + "pcezo5yeqxxk4elffb6gy7meed2ftpzg", + "9468a3997c6f8f62b2c3eb8d3d4af3575e0159a023293af21f65510efb4d206d" + ], + [ + "libgpg-error", + "xwfdrqczdbtrqcb26aotk7t3wh5nrbbk", + "780cace311d677b073e054411fe663ff8265c32d7a5fd5d01db42baeb8fddf36" + ], + [ + "libassuan", + "omdnw5dv2lvyzx6us4u762xubwcus7vo", + "e6fd6f1504bd17aea04d0eef0064569a4b9422826b1b5f1a77c708ca9458a205" + ], + [ + "zlib-ng", + "wf7u7kfwpo6aw6rf3o2m5j6lv7z4wgh4", + "a904a0f66a168995897fc22e043469274186813777883caea0013e9a1441aa69" + ], + [ + "pinentry", + "ooxce5xizv367rxgwd7mrosgs2ax43ho", + "5e48ae9bd3519798d8f14c319876bbd63283cb2e8eacc21adcadc643f9a74d24" + ], + [ + "npth", + "m37zexajpcvfbool7i5x6iza667rkl5x", + "4f295559bcb33640e874211c440ba1a7b713ca54fc2a8b1de39d3203c5348a9f" + ], + [ + "libksba", + "nqltpp5nodk576xsccwkq4svecax4vik", + "c117cb04265b1cfc9d870b3bf97a9a71aa7d5e0823c54779a38fc35616c21bf4" + ], + [ + "libiconv", + "r5krqjc6bcadcmjdydwx5gigo522h6jq", + "6ded368c97d4afb48955505035fbc1f9dd88a13fc9a4c9091d471958613cc40a" + ], + [ + "libgcrypt", + "bid4feih23cyga2lxcxp5dyb7xcrkqhj", + "fdd6da6cf29e87fb12c375d21a28a7cf3c913656ed0376eeab83444d6f00e8c7" + ], + [ + "gnupg", + "vo6n6irpmlb63qjnpzhgn2bxfsaa7og2", + "2ae977241ef79ebf1d9d7bf81a7d9dd6740dc63d58e3a4c5c047763d147b7254" + ] + ], + "spec": "gnupg@2.5.12 platform=linux target=aarch64 %gcc" + }, + { + "binaries": [ + [ + "gcc-runtime", + "cbdic4gway7fjticqpycyxiq7ttwnde2", + "b57b0fa49669e3d224ce70bb7ac571691fa3947d30da4317c2bda97d13928226" + ], + [ + "libgpg-error", + "f75xdly6m2wqudcht6bq6ost7ljm5o6h", + "8bdc2634aed9d25a822053c2cb3f61edcc751a94a67b5128babcd9c06c63673e" + ], + [ + "libassuan", + "mizn62ncutddwlytldhbuv6yep3yfvlb", + "112da1bf01b1e3c5debf694c598ef9cb990a01f88216126cd42a54d4f76a2e59" + ], + [ + "zlib-ng", + "uup2zlmbj737h42holbbxi32g32ropls", + "9c7adcacebce98f75797fd89d216698f4ee2fc4f3eca33a6acc1be55ca0fbac2" + ], + [ + "pinentry", + "tfszkup64dn4k2mxps3oe2frqarlubvg", + "a52b15b90722a145b00424d028f228d59053a9523fc85d9a3b1343050c110f89" + ], + [ + "npth", + "3dz4nqboshfv2wp65owk2n4e6ke5z5rn", + "de81c08b84eb4ce78c315950c1ecaccac2e330dda2ef9115dfddcbb3935896a0" + ], + [ + "libksba", + "nrizhg6hbsb7toylcassz7q7jgy36k7b", + "c6d13e4529bd43116b203654caeab6bfc885fb415abd2e89aa1314747f6b5d45" + ], + [ + "libiconv", + "6od2jkz4mezuei7sarwbbrfqqtqsqjy3", + "b02d5801384188c92b42067a76949e50372acc1a580554f0ed59f59c8821269d" + ], + [ + "libgcrypt", + "afglhu5y5xwe5dgxkgehpc47uc33wot3", + "96e1ff9c04d930d4a93b93d1f4f01c9d7ceb587f2ea8072fa433eed07d466f6e" + ], + [ + "gnupg", + "s3ujlzxgr42jtj4exhjqamlkqt2hgccf", + "6fbb087c5ca4914a94b609278db9e55d4ba9a946b0de0b6aa7cdc67e1d794a96" + ] + ], + "spec": "gnupg@2.5.12 platform=linux target=ppc64le %gcc" + }, + { + "binaries": [ + [ + "gcc-runtime", + "is2veele627qiqdkcq4eghofnk7ukkyt", + "e47ddd094a147df1a80b8f6d78ae21aa96a713bd73a81946fcf7fb3c17cdc651" + ], + [ + "libgpg-error", + "tvdsawtbvve55jop7ewpzqy37msmspzv", + "0622408fc3404e20edcee6621cd607b303c523f3ef58e3f5520d8431d00bdc0d" + ], + [ + "libassuan", + "avea2svrm6qjtqzhu6jnzgxfkfjjylup", + "36f8265834af9b96a96a8033a555491f05e91f8231e8482e261f4a47fb4d6263" + ], + [ + "zlib-ng", + "am5hrjas73xurqg6zv6x77gw7sci5yy6", + "a6f3c0e72b73f49fdfaa048547e8f1d7fe566b70ff813760c0db9b391081f47c" + ], + [ + "pinentry", + "sitdcvblziwz6te3opu2jk3ibhxjnz3o", + "e60ebf50daed9d53efac9a9b49fe43bc5854f3d9bd54027d20e1c5d3d754766b" + ], + [ + "npth", + "g6aasobswschrn7ai27y5qz35qjw3he5", + "17c1f325f20e69983d587fd3bb13a2b194fceb9689bc3e289f5d39264274555a" + ], + [ + "libksba", + "2qiefbkbghuhanzzqfku2bd6lpdbj67a", + "3cae6510008d674603daabce9443220070dfb9c9a0d40e4faca856c03be6fce2" + ], + [ + "libiconv", + "onurjwrr6nqgscqjwwal7x7davlsw6xj", + "e820f1d9f9035b651fd5f1317d986dedce94e5d8667bf313a0068818d015b5e1" + ], + [ + "libgcrypt", + "25dprxujaujfwj7nkshafphsv5igm64w", + "7313843c3445e910798b6621a989b2881c65b1cbafd80b883cba697b7d9d608f" + ], + [ + "gnupg", + "mfoiwwhwxg5ot4iw2k2dc6sbvd5q7v3v", + "71e5ea8a7f9ab5d7b6909bb704f13003a772288823bd87abc261cdfdbf12561a" + ] + ], + "spec": "gnupg@2.5.12 platform=linux target=x86_64 %gcc" + } + ] +} \ No newline at end of file diff --git a/share/spack/bootstrap/github-actions-v0.5/metadata.yaml b/share/spack/bootstrap/github-actions-v2/metadata.yaml similarity index 61% rename from share/spack/bootstrap/github-actions-v0.5/metadata.yaml rename to share/spack/bootstrap/github-actions-v2/metadata.yaml index 0fd413a618a5fd..b6149b06a08088 100644 --- a/share/spack/bootstrap/github-actions-v0.5/metadata.yaml +++ b/share/spack/bootstrap/github-actions-v2/metadata.yaml @@ -1,8 +1,8 @@ type: buildcache description: | - Buildcache generated from a public workflow using Github Actions. + Buildcache generated from a public workflow using GitHub Actions hosted on GitHub Packages. The sha256 checksum of binaries is checked before installation. info: - url: https://mirror.spack.io/bootstrap/github-actions/v0.5 + url: oci://ghcr.io/spack/bootstrap-buildcache-v2.2 homepage: https://github.com/spack/spack-bootstrap-mirrors releases: https://github.com/spack/spack-bootstrap-mirrors/releases diff --git a/share/spack/bootstrap/github-actions-v2/patchelf.json b/share/spack/bootstrap/github-actions-v2/patchelf.json new file mode 100644 index 00000000000000..25d86b0c8c8821 --- /dev/null +++ b/share/spack/bootstrap/github-actions-v2/patchelf.json @@ -0,0 +1,49 @@ +{ + "verified": [ + { + "binaries": [ + [ + "gcc-runtime", + "pcezo5yeqxxk4elffb6gy7meed2ftpzg", + "9468a3997c6f8f62b2c3eb8d3d4af3575e0159a023293af21f65510efb4d206d" + ], + [ + "patchelf", + "4on5iazigq7aamyysl5sja64hsfuxqcm", + "3a1f9a22486d1dd27b9a4d6a24114ce4d9bbd9f93e4618033c9be2c92615bbd7" + ] + ], + "spec": "patchelf@0.17.2 platform=linux target=aarch64 %gcc" + }, + { + "binaries": [ + [ + "gcc-runtime", + "cbdic4gway7fjticqpycyxiq7ttwnde2", + "b57b0fa49669e3d224ce70bb7ac571691fa3947d30da4317c2bda97d13928226" + ], + [ + "patchelf", + "kiaoaxjuvmyzcvgks6rrsb4ydjo2uo7j", + "d495bd63d5ac04947640af8cef3812e8504f48c7a3027098cc2b2640aec8d0bd" + ] + ], + "spec": "patchelf@0.17.2 platform=linux target=ppc64le %gcc" + }, + { + "binaries": [ + [ + "gcc-runtime", + "is2veele627qiqdkcq4eghofnk7ukkyt", + "e47ddd094a147df1a80b8f6d78ae21aa96a713bd73a81946fcf7fb3c17cdc651" + ], + [ + "patchelf", + "fpek7u6dm2qhg6ad3rboha7afnu3ctpy", + "88d2f7aa5f66104ea143a849b26c3ba7c1f722ba802ea6690b49587579814fec" + ] + ], + "spec": "patchelf@0.17.2 platform=linux target=x86_64 %gcc" + } + ] +} \ No newline at end of file diff --git a/share/spack/qa/environment_activation.py b/share/spack/qa/environment_activation.py new file mode 100644 index 00000000000000..fe35e76e54cff5 --- /dev/null +++ b/share/spack/qa/environment_activation.py @@ -0,0 +1,14 @@ +import spack.config +import spack.environment + +KEY = "concretizer:unify" + +before = spack.config.CONFIG.get(KEY) +with spack.environment.active_environment().manifest.use_config(): + within = spack.config.CONFIG.get(KEY) +after = spack.config.CONFIG.get(KEY) + +if before == within == after: + print(f"SUCCESS: {before}") +else: + print(f"FAILURE: {before} -> {within} -> {after}") diff --git a/share/spack/qa/run-unit-tests b/share/spack/qa/run-unit-tests index 60775a8286222c..cc86de6f4ef16e 100755 --- a/share/spack/qa/run-unit-tests +++ b/share/spack/qa/run-unit-tests @@ -46,8 +46,8 @@ $coverage_run $(which spack) python -c "from spack_repo.builtin.packages.mpileak # Run unit tests with code coverage #----------------------------------------------------------- # Check if xdist is available -if [[ "$UNIT_TEST_COVERAGE" != "true" ]] && python -m pytest -VV 2>&1 | grep xdist; then - export PYTEST_ADDOPTS="$PYTEST_ADDOPTS --dist loadfile --tx '${SPACK_TEST_PARALLEL:=3}*popen//python=./bin/spack-tmpconfig python -u ./bin/spack python'" +if python3 -m pytest -VV 2>&1 | grep xdist; then + export PYTEST_ADDOPTS="$PYTEST_ADDOPTS --dist loadfile -n${SPACK_TEST_PARALLEL:=3}" fi # We are running pytest-cov after the addition of pytest-xdist, since it integrates @@ -60,11 +60,10 @@ fi # where it seems that otherwise the configuration file might not be located by subprocesses # in some, not better specified, cases. if [[ "$UNIT_TEST_COVERAGE" == "true" ]]; then - "$(which spack)" unit-test -x --verbose --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml -else - "$(which spack)" unit-test -x --verbose + export PYTEST_ADDOPTS="$PYTEST_ADDOPTS --cov --cov-config=pyproject.toml --cov-report=xml:coverage.xml" fi +python3 -m pytest -x --verbose bash "$QA_DIR/test-env-cfg.sh" diff --git a/share/spack/qa/setup-env-test.sh b/share/spack/qa/setup-env-test.sh index d6960dec24ebb9..0578fbe90567c6 100755 --- a/share/spack/qa/setup-env-test.sh +++ b/share/spack/qa/setup-env-test.sh @@ -148,6 +148,25 @@ contains "usage: spack env deactivate " spack env deactivate no_such_environment contains "usage: spack env deactivate " spack env deactivate -h contains "usage: spack env deactivate " spack env deactivate --help +title "Testing 'spack config edit'" +echo "Testing 'spack config edit' with malformed spack.yaml" +spack env activate --temp +bad_yaml_env=$(spack location -e) +mv $bad_yaml_env/spack.yaml $bad_yaml_env/.backup +echo "bad_yaml" > $bad_yaml_env/spack.yaml +EDITOR=cat contains "Error: " spack config edit # error message prints first +EDITOR=cat contains "bad_yaml" spack config edit # followed by call to EDITOR + +echo "testing 'spack config edit' with non-complying spack.yaml" +cat > $bad_yaml_env/spack.yaml < - # from pkg_resources import load_entry_point - # ImportError: No module named pkg_resources - # - # Print a more useful error message if setuptools not found. - if [[ $dep == flake8 || $dep == sphinx* ]]; then - # Find which Python is being run - # Spack-installed packages have a hard-coded shebang - python_cmd=$(head -n 1 $(which $dep) | cut -c 3-) - # May not have a shebang - if [[ $python_cmd != *python* ]]; then - python_cmd=python - fi - # Check if setuptools is in the PYTHONPATH - if ! $python_cmd -c "import setuptools" 2> /dev/null; then - echo "ERROR: setuptools is required to run $dep." - echo "Please add it to your PYTHONPATH." - - exit 1 - fi - fi done echo "Dependencies found." } diff --git a/share/spack/spack-completion.bash b/share/spack/spack-completion.bash index a99cf86648ea75..ecfb9b01c23244 100644 --- a/share/spack/spack-completion.bash +++ b/share/spack/spack-completion.bash @@ -398,7 +398,7 @@ SPACK_ALIASES="concretise:concretize;containerise:containerize;rm:remove" _spack() { if $list_options then - SPACK_COMPREPLY="-h --help -H --all-help --color -c --config -C --config-scope -d --debug --timestamp --pdb -e --env -D --env-dir -E --no-env --use-env-repo -k --insecure -l --enable-locks -L --disable-locks -m --mock -b --bootstrap -p --profile --sorted-profile --lines -v --verbose --stacktrace -t --backtrace -V --version --print-shell-vars" + SPACK_COMPREPLY="--color -v --verbose -k --insecure -b --bootstrap -V --version -h --help -H --all-help -c --config -C --config-scope -e --env -D --env-dir -E --no-env --use-env-repo -d --debug -t --backtrace --pdb --timestamp -m --mock --print-shell-vars --stacktrace -l --enable-locks -L --disable-locks -p --profile --profile-file --sorted-profile --lines" else SPACK_COMPREPLY="add arch audit blame bootstrap build-env buildcache cd change checksum ci clean commands compiler compilers concretize concretise config containerize containerise create debug deconcretize dependencies dependents deprecate dev-build develop diff docs edit env extensions external fetch find gc gpg graph help info install license list load location log-parse logs maintainers make-installer mark mirror module patch pkg providers pydoc python reindex remove rm repo resource restage solve spec stage style tags test test-env tutorial undevelop uninstall unit-test unload url verify versions view" fi @@ -563,7 +563,7 @@ _spack_buildcache() { then SPACK_COMPREPLY="-h --help" else - SPACK_COMPREPLY="push create install list keys check download prune save-specfile sync update-index rebuild-index migrate" + SPACK_COMPREPLY="push create install list keys check download prune save-specfile sync check-index update-index rebuild-index migrate" fi } @@ -623,7 +623,7 @@ _spack_buildcache_download() { _spack_buildcache_prune() { if $list_options then - SPACK_COMPREPLY="-h --help --dry-run" + SPACK_COMPREPLY="-h --help -k --keeplist --dry-run" else _mirrors fi @@ -642,10 +642,19 @@ _spack_buildcache_sync() { fi } +_spack_buildcache_check_index() { + if $list_options + then + SPACK_COMPREPLY="-h --help --verify --name -n --output -o" + else + _mirrors + fi +} + _spack_buildcache_update_index() { if $list_options then - SPACK_COMPREPLY="-h --help -k --keys" + SPACK_COMPREPLY="-h --help --name -n --append -a --force -f -k --keys -y --yes-to-all" else _mirrors fi @@ -654,7 +663,7 @@ _spack_buildcache_update_index() { _spack_buildcache_rebuild_index() { if $list_options then - SPACK_COMPREPLY="-h --help -k --keys" + SPACK_COMPREPLY="-h --help --name -n --append -a --force -f -k --keys -y --yes-to-all" else _mirrors fi @@ -681,7 +690,7 @@ _spack_cd() { _spack_change() { if $list_options then - SPACK_COMPREPLY="-h --help -l --list-name --match-spec -a --all" + SPACK_COMPREPLY="-h --help -l --list-name --match-spec -a --all -c --concrete -C --concrete-only" else _all_packages fi @@ -706,7 +715,7 @@ _spack_ci() { } _spack_ci_generate() { - SPACK_COMPREPLY="-h --help --output-file --prune-dag --no-prune-dag --prune-unaffected --no-prune-unaffected --prune-externals --no-prune-externals --check-index-only --artifacts-root -f --force -U --fresh --reuse --fresh-roots --reuse-deps --deprecated -j --jobs" + SPACK_COMPREPLY="-h --help --output-file --prune-dag --no-prune-dag --prune-unaffected --no-prune-unaffected --prune-externals --no-prune-externals --check-index-only --artifacts-root --forward-variable -f --force -U --fresh --reuse --fresh-roots --reuse-deps --deprecated -j --jobs" } _spack_ci_rebuild_index() { @@ -714,7 +723,7 @@ _spack_ci_rebuild_index() { } _spack_ci_rebuild() { - SPACK_COMPREPLY="-h --help -t --tests --fail-fast --timeout -j --jobs" + SPACK_COMPREPLY="-h --help -t --tests --no-fail-fast --fail-fast --timeout -j --jobs" } _spack_ci_reproduce_build() { @@ -765,7 +774,7 @@ _spack_compiler() { _spack_compiler_find() { if $list_options then - SPACK_COMPREPLY="-h --help --mixed-toolchain --no-mixed-toolchain --scope -j --jobs" + SPACK_COMPREPLY="-h --help --scope -j --jobs" else SPACK_COMPREPLY="" fi @@ -774,7 +783,7 @@ _spack_compiler_find() { _spack_compiler_add() { if $list_options then - SPACK_COMPREPLY="-h --help --mixed-toolchain --no-mixed-toolchain --scope -j --jobs" + SPACK_COMPREPLY="-h --help --scope -j --jobs" else SPACK_COMPREPLY="" fi @@ -809,7 +818,7 @@ _spack_compiler_ls() { _spack_compiler_info() { if $list_options then - SPACK_COMPREPLY="-h --help --scope" + SPACK_COMPREPLY="-h --help --scope --remote" else _installed_compilers fi @@ -820,11 +829,11 @@ _spack_compilers() { } _spack_concretize() { - SPACK_COMPREPLY="-h --help --test -q --quiet -f --force -U --fresh --reuse --fresh-roots --reuse-deps --deprecated -j --jobs" + SPACK_COMPREPLY="-h --help --test -q --quiet -f --force -U --fresh --reuse --fresh-roots --reuse-deps --deprecated -j --jobs --non-defaults" } _spack_concretise() { - SPACK_COMPREPLY="-h --help --test -q --quiet -f --force -U --fresh --reuse --fresh-roots --reuse-deps --deprecated -j --jobs" + SPACK_COMPREPLY="-h --help --test -q --quiet -f --force -U --fresh --reuse --fresh-roots --reuse-deps --deprecated -j --jobs --non-defaults" } _spack_config() { @@ -839,7 +848,7 @@ _spack_config() { _spack_config_get() { if $list_options then - SPACK_COMPREPLY="-h --help" + SPACK_COMPREPLY="-h --help --json" else _config_sections fi @@ -870,7 +879,7 @@ _spack_config_list() { _spack_config_scopes() { if $list_options then - SPACK_COMPREPLY="-h --help -p --paths -t --type" + SPACK_COMPREPLY="-h --help -p --paths -t --type -v --verbose" else _config_sections fi @@ -1012,7 +1021,7 @@ _spack_dev_build() { _spack_develop() { if $list_options then - SPACK_COMPREPLY="-h --help -p --path -b --build-directory --no-clone --clone -f --force -r --recursive" + SPACK_COMPREPLY="-h --help -p --path -b --build-directory --no-clone --clone --no-modify-concrete-specs -f --force -r --recursive" else _all_packages fi @@ -1232,7 +1241,7 @@ _spack_fetch() { _spack_find() { if $list_options then - SPACK_COMPREPLY="-h --help --format -H --hashes --json -I --install-status --specfile-format -d --deps -p --paths --groups --no-groups -l --long -L --very-long -t --tag -N --namespaces -r --only-roots -c --show-concretized -f --show-flags --show-full-compiler -x --explicit -X --implicit -u --unknown -m --missing -v --variants --loaded -M --only-missing --only-deprecated --deprecated --install-tree --start-date --end-date" + SPACK_COMPREPLY="-h --help --format -H --hashes --json -I --install-status --specfile-format -d --deps -p --paths --groups --no-groups -l --long -L --very-long -t --tag -N --namespaces -r --only-roots -c --show-concretized --show-configured-externals -f --show-flags --show-full-compiler -x --explicit -X --implicit -e --external -u --unknown -m --missing -v --variants --loaded -M --only-missing --only-deprecated --deprecated --install-tree --start-date --end-date" else _installed_packages fi @@ -1348,7 +1357,7 @@ _spack_help() { _spack_info() { if $list_options then - SPACK_COMPREPLY="-h --help -a --all --detectable --maintainers --namespace --no-dependencies --no-variants --no-versions --phases --tags --tests --virtuals --variants-by-name" + SPACK_COMPREPLY="-h --help -a --all --by-name --by-when --detectable --maintainers --namespace --no-dependencies --no-variants --no-versions --phases --tags --tests --virtuals --variants-by-name" else _all_packages fi @@ -1464,7 +1473,7 @@ _spack_mirror() { _spack_mirror_create() { if $list_options then - SPACK_COMPREPLY="-h --help -d --directory -a --all --file --exclude-file --exclude-specs --skip-unstable-versions -D --dependencies -n --versions-per-spec --private -f --force -U --fresh --reuse --fresh-roots --reuse-deps --deprecated" + SPACK_COMPREPLY="-h --help -d --directory -a --all -j --jobs --file --exclude-file --exclude-specs --skip-unstable-versions -D --dependencies -n --versions-per-spec --private -f --force -U --fresh --reuse --fresh-roots --reuse-deps --deprecated" else _all_packages fi @@ -1477,7 +1486,7 @@ _spack_mirror_destroy() { _spack_mirror_add() { if $list_options then - SPACK_COMPREPLY="-h --help --scope --type --autopush --unsigned --signed --s3-access-key-id --s3-access-key-id-variable --s3-access-key-secret --s3-access-key-secret-variable --s3-access-token --s3-access-token-variable --s3-profile --s3-endpoint-url --oci-username --oci-username-variable --oci-password --oci-password-variable" + SPACK_COMPREPLY="-h --help --scope --type --autopush --unsigned --signed --name -n --s3-access-key-id --s3-access-key-id-variable --s3-access-key-secret-variable --s3-access-token-variable --s3-profile --s3-endpoint-url --oci-username --oci-username-variable --oci-password-variable" else _mirrors fi @@ -1486,7 +1495,7 @@ _spack_mirror_add() { _spack_mirror_remove() { if $list_options then - SPACK_COMPREPLY="-h --help --scope" + SPACK_COMPREPLY="-h --help --scope --all-scopes" else _mirrors fi @@ -1495,7 +1504,7 @@ _spack_mirror_remove() { _spack_mirror_rm() { if $list_options then - SPACK_COMPREPLY="-h --help --scope" + SPACK_COMPREPLY="-h --help --scope --all-scopes" else _mirrors fi @@ -1504,7 +1513,7 @@ _spack_mirror_rm() { _spack_mirror_set_url() { if $list_options then - SPACK_COMPREPLY="-h --help --push --fetch --scope --s3-access-key-id --s3-access-key-id-variable --s3-access-key-secret --s3-access-key-secret-variable --s3-access-token --s3-access-token-variable --s3-profile --s3-endpoint-url --oci-username --oci-username-variable --oci-password --oci-password-variable" + SPACK_COMPREPLY="-h --help --push --fetch --scope --s3-access-key-id --s3-access-key-id-variable --s3-access-key-secret-variable --s3-access-token-variable --s3-profile --s3-endpoint-url --oci-username --oci-username-variable --oci-password-variable" else _mirrors fi @@ -1513,7 +1522,7 @@ _spack_mirror_set_url() { _spack_mirror_set() { if $list_options then - SPACK_COMPREPLY="-h --help --push --fetch --type --url --autopush --no-autopush --unsigned --signed --scope --s3-access-key-id --s3-access-key-id-variable --s3-access-key-secret --s3-access-key-secret-variable --s3-access-token --s3-access-token-variable --s3-profile --s3-endpoint-url --oci-username --oci-username-variable --oci-password --oci-password-variable" + SPACK_COMPREPLY="-h --help --push --fetch --type --url --autopush --no-autopush --unsigned --signed --scope --s3-access-key-id --s3-access-key-id-variable --s3-access-key-secret-variable --s3-access-token-variable --s3-profile --s3-endpoint-url --oci-username --oci-username-variable --oci-password-variable" else _mirrors fi @@ -1839,7 +1848,7 @@ _spack_repo_set() { _spack_repo_remove() { if $list_options then - SPACK_COMPREPLY="-h --help --scope" + SPACK_COMPREPLY="-h --help --scope --all-scopes" else _repos fi @@ -1848,7 +1857,7 @@ _spack_repo_remove() { _spack_repo_rm() { if $list_options then - SPACK_COMPREPLY="-h --help --scope" + SPACK_COMPREPLY="-h --help --scope --all-scopes" else _repos fi @@ -1906,7 +1915,7 @@ _spack_restage() { _spack_solve() { if $list_options then - SPACK_COMPREPLY="-h --help --show --timers --stats -l --long -L --very-long -N --namespaces -I --install-status --no-install-status -y --yaml -j --json --format -c --cover -t --types -f --force -U --fresh --reuse --fresh-roots --reuse-deps --deprecated" + SPACK_COMPREPLY="-h --help --show --timers --stats -l --long -L --very-long -N --namespaces -I --install-status --no-install-status -y --yaml -j --json --format --non-defaults -c --cover -t --types -f --force -U --fresh --reuse --fresh-roots --reuse-deps --deprecated" else _all_packages fi @@ -1915,7 +1924,7 @@ _spack_solve() { _spack_spec() { if $list_options then - SPACK_COMPREPLY="-h --help -l --long -L --very-long -N --namespaces -I --install-status --no-install-status -y --yaml -j --json --format -c --cover -t --types -f --force -U --fresh --reuse --fresh-roots --reuse-deps --deprecated" + SPACK_COMPREPLY="-h --help -l --long -L --very-long -N --namespaces -I --install-status --no-install-status -y --yaml -j --json --format --non-defaults -c --cover -t --types -f --force -U --fresh --reuse --fresh-roots --reuse-deps --deprecated" else _all_packages fi @@ -2027,7 +2036,7 @@ _spack_tutorial() { _spack_undevelop() { if $list_options then - SPACK_COMPREPLY="-h --help -a --all" + SPACK_COMPREPLY="-h --help --no-modify-concrete-specs -a --all" else _all_packages fi @@ -2095,7 +2104,7 @@ _spack_verify() { then SPACK_COMPREPLY="-h --help" else - SPACK_COMPREPLY="manifest libraries" + SPACK_COMPREPLY="manifest libraries versions" fi } @@ -2117,6 +2126,15 @@ _spack_verify_libraries() { fi } +_spack_verify_versions() { + if $list_options + then + SPACK_COMPREPLY="-h --help" + else + _installed_packages + fi +} + _spack_versions() { if $list_options then diff --git a/share/spack/spack-completion.fish b/share/spack/spack-completion.fish index bf828b3435d847..c740edfa425e10 100644 --- a/share/spack/spack-completion.fish +++ b/share/spack/spack-completion.fish @@ -346,13 +346,13 @@ complete -c spack --erase # Everything below here is auto-generated. # spack -set -g __fish_spack_optspecs_spack h/help H/all-help color= c/config= C/config-scope= d/debug timestamp pdb e/env= D/env-dir= E/no-env use-env-repo k/insecure l/enable-locks L/disable-locks m/mock b/bootstrap p/profile sorted-profile= lines= v/verbose stacktrace t/backtrace V/version print-shell-vars= +set -g __fish_spack_optspecs_spack color= v/verbose k/insecure b/bootstrap V/version h/help H/all-help c/config= C/config-scope= e/env= D/env-dir= E/no-env use-env-repo d/debug t/backtrace pdb timestamp m/mock print-shell-vars= stacktrace l/enable-locks L/disable-locks p/profile profile-file= sorted-profile= lines= complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a add -d 'add a spec to an environment' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a arch -d 'print architecture information about this machine' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a audit -d 'audit configuration files, packages, etc.' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a blame -d 'show contributors to packages' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a bootstrap -d 'manage bootstrap configuration' -complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a build-env -d 'run a command in a spec'"'"'s install environment, or dump its environment to screen or file' +complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a build-env -d 'dump the install environment for a spec,' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a buildcache -d 'create, download and install binary packages' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a cd -d 'cd to spack directories in the shell' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a change -d 'change an existing spec in an environment' @@ -365,20 +365,20 @@ complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a compilers -d 'lis complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a concretize -d 'concretize an environment and write a lockfile' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a concretise -d 'concretize an environment and write a lockfile' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a config -d 'get and set configuration options' -complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a containerize -d 'creates recipes to build images for different container runtimes' -complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a containerise -d 'creates recipes to build images for different container runtimes' +complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a containerize -d 'create a container build recipe from an environment' +complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a containerise -d 'create a container build recipe from an environment' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a create -d 'create a new package file' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a debug -d 'debugging commands for troubleshooting Spack' -complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a deconcretize -d 'remove specs from the concretized lockfile of an environment' +complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a deconcretize -d 'remove specs from the lockfile of an environment' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a dependencies -d 'show dependencies of a package' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a dependents -d 'show packages that depend on another' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a deprecate -d 'replace one package with another via symlinks' -complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a dev-build -d 'developer build: build from code in current working directory' +complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a dev-build -d 'build package from code in current working directory' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a develop -d 'add a spec to an environment'"'"'s dev-build information' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a diff -d 'compare two specs' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a docs -d 'open spack documentation in a web browser' -complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a edit -d 'open package files in $EDITOR' -complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a env -d 'manage virtual environments' +complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a edit -d 'open package files in ``$EDITOR``' +complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a env -d 'manage environments' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a extensions -d 'list extensions for package' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a external -d 'manage external packages in Spack configuration' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a fetch -d 'fetch archives for packages' @@ -400,7 +400,7 @@ complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a make-installer -d complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a mark -d 'mark packages as explicitly or implicitly installed' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a mirror -d 'manage mirrors (source and binary)' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a module -d 'generate/manage module files' -complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a patch -d 'patch expanded archive sources in preparation for install' +complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a patch -d 'patch expanded sources in preparation for install' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a pkg -d 'query packages associated with particular git revisions' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a providers -d 'list packages that provide a particular virtual package' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a pydoc -d 'run pydoc from within spack' @@ -409,7 +409,7 @@ complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a reindex -d 'rebui complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a remove -d 'remove specs from an environment' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a rm -d 'remove specs from an environment' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a repo -d 'manage package source repositories' -complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a resource -d 'list downloadable resources (tarballs, repos, patches, etc.)' +complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a resource -d 'list downloadable resources (tarballs, repos, patches)' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a restage -d 'revert checked out package source code' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a solve -d 'concretize a specs using an ASP solver' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a spec -d 'show what would be installed, given a spec' @@ -417,7 +417,7 @@ complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a stage -d 'expand complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a style -d 'runs source code style checks on spack' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a tags -d 'show package tags and associated packages' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a test -d 'run spack'"'"'s tests for an install' -complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a test-env -d 'run a command in a spec'"'"'s test environment, or dump its environment to screen or file' +complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a test-env -d 'run a command in a spec'"'"'s test environment,' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a tutorial -d 'set up spack for our tutorial (WARNING: modifies config!)' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a undevelop -d 'remove specs from an environment' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a uninstall -d 'remove installed packages' @@ -426,57 +426,59 @@ complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a unload -d 'remove complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a url -d 'debugging tool for url parsing' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a verify -d 'verify spack installations on disk' complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a versions -d 'list available versions of a package' -complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a view -d 'project packages to a compact naming scheme on the filesystem' +complete -c spack -n '__fish_spack_using_command_pos 0 ' -f -a view -d 'manipulate view directories in the filesystem' +complete -c spack -n '__fish_spack_using_command ' -l color -r -f -a 'always never auto' +complete -c spack -n '__fish_spack_using_command ' -l color -r -d 'when to colorize output (default: auto)' +complete -c spack -n '__fish_spack_using_command ' -s v -l verbose -f -a verbose +complete -c spack -n '__fish_spack_using_command ' -s v -l verbose -d 'print additional output during builds' +complete -c spack -n '__fish_spack_using_command ' -s k -l insecure -f -a insecure +complete -c spack -n '__fish_spack_using_command ' -s k -l insecure -d 'do not check ssl certificates when downloading' +complete -c spack -n '__fish_spack_using_command ' -s b -l bootstrap -f -a bootstrap +complete -c spack -n '__fish_spack_using_command ' -s b -l bootstrap -d 'use bootstrap config, store, and externals' +complete -c spack -n '__fish_spack_using_command ' -s V -l version -f -a version +complete -c spack -n '__fish_spack_using_command ' -s V -l version -d 'show version number and exit' complete -c spack -n '__fish_spack_using_command ' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command ' -s h -l help -d 'show this help message and exit' complete -c spack -n '__fish_spack_using_command ' -s H -l all-help -f -a help -complete -c spack -n '__fish_spack_using_command ' -s H -l all-help -d 'show help for all commands (same as spack help --all)' -complete -c spack -n '__fish_spack_using_command ' -l color -r -f -a 'always never auto' -complete -c spack -n '__fish_spack_using_command ' -l color -r -d 'when to colorize output (default: auto)' +complete -c spack -n '__fish_spack_using_command ' -s H -l all-help -d 'show help for all commands (same as `spack help --all`)' complete -c spack -n '__fish_spack_using_command ' -s c -l config -r -f -a config_vars -complete -c spack -n '__fish_spack_using_command ' -s c -l config -r -d 'add one or more custom, one off config settings' +complete -c spack -n '__fish_spack_using_command ' -s c -l config -r -d 'add one or more custom, one-off config settings' complete -c spack -n '__fish_spack_using_command ' -s C -l config-scope -r -f -a config_scopes -complete -c spack -n '__fish_spack_using_command ' -s C -l config-scope -r -d 'add directory or environment as read-only configuration scope, without activating the environment.' -complete -c spack -n '__fish_spack_using_command ' -s d -l debug -f -a debug -complete -c spack -n '__fish_spack_using_command ' -s d -l debug -d 'write out debug messages' -complete -c spack -n '__fish_spack_using_command ' -l timestamp -f -a timestamp -complete -c spack -n '__fish_spack_using_command ' -l timestamp -d 'add a timestamp to tty output' -complete -c spack -n '__fish_spack_using_command ' -l pdb -f -a pdb -complete -c spack -n '__fish_spack_using_command ' -l pdb -d 'run spack under the pdb debugger' +complete -c spack -n '__fish_spack_using_command ' -s C -l config-scope -r -d 'add directory or environment as read-only config scope' complete -c spack -n '__fish_spack_using_command ' -s e -l env -r -f -a env -complete -c spack -n '__fish_spack_using_command ' -s e -l env -r -d 'run with a specific environment (see spack env)' +complete -c spack -n '__fish_spack_using_command ' -s e -l env -r -d 'run with an environment' complete -c spack -n '__fish_spack_using_command ' -s D -l env-dir -r -f -a env_dir -complete -c spack -n '__fish_spack_using_command ' -s D -l env-dir -r -d 'run with an environment directory (ignore managed environments)' +complete -c spack -n '__fish_spack_using_command ' -s D -l env-dir -r -d 'run with environment in directory (ignore managed envs)' complete -c spack -n '__fish_spack_using_command ' -s E -l no-env -f -a no_env complete -c spack -n '__fish_spack_using_command ' -s E -l no-env -d 'run without any environments activated (see spack env)' complete -c spack -n '__fish_spack_using_command ' -l use-env-repo -f -a use_env_repo -complete -c spack -n '__fish_spack_using_command ' -l use-env-repo -d 'when running in an environment, use its package repository' -complete -c spack -n '__fish_spack_using_command ' -s k -l insecure -f -a insecure -complete -c spack -n '__fish_spack_using_command ' -s k -l insecure -d 'do not check ssl certificates when downloading' +complete -c spack -n '__fish_spack_using_command ' -l use-env-repo -d 'when in an environment, use its package repository' +complete -c spack -n '__fish_spack_using_command ' -s d -l debug -f -a debug +complete -c spack -n '__fish_spack_using_command ' -s d -l debug -d 'write out debug messages' +complete -c spack -n '__fish_spack_using_command ' -s t -l backtrace -f -a backtrace +complete -c spack -n '__fish_spack_using_command ' -s t -l backtrace -d 'always show backtraces for exceptions' +complete -c spack -n '__fish_spack_using_command ' -l pdb -f -a pdb +complete -c spack -n '__fish_spack_using_command ' -l pdb -d 'run spack under the pdb debugger' +complete -c spack -n '__fish_spack_using_command ' -l timestamp -f -a timestamp +complete -c spack -n '__fish_spack_using_command ' -l timestamp -d 'add a timestamp to tty output' +complete -c spack -n '__fish_spack_using_command ' -s m -l mock -f -a mock +complete -c spack -n '__fish_spack_using_command ' -s m -l mock -d 'use mock packages instead of real ones' +complete -c spack -n '__fish_spack_using_command ' -l print-shell-vars -r -f -a print_shell_vars +complete -c spack -n '__fish_spack_using_command ' -l print-shell-vars -r -d 'print info needed by setup-env.*sh' +complete -c spack -n '__fish_spack_using_command ' -l stacktrace -f -a stacktrace +complete -c spack -n '__fish_spack_using_command ' -l stacktrace -d 'add stacktraces to all printed statements' complete -c spack -n '__fish_spack_using_command ' -s l -l enable-locks -f -a locks complete -c spack -n '__fish_spack_using_command ' -s l -l enable-locks -d 'use filesystem locking (default)' complete -c spack -n '__fish_spack_using_command ' -s L -l disable-locks -f -a locks complete -c spack -n '__fish_spack_using_command ' -s L -l disable-locks -d 'do not use filesystem locking (unsafe)' -complete -c spack -n '__fish_spack_using_command ' -s m -l mock -f -a mock -complete -c spack -n '__fish_spack_using_command ' -s m -l mock -d 'use mock packages instead of real ones' -complete -c spack -n '__fish_spack_using_command ' -s b -l bootstrap -f -a bootstrap -complete -c spack -n '__fish_spack_using_command ' -s b -l bootstrap -d 'use bootstrap configuration (bootstrap store, config, externals)' complete -c spack -n '__fish_spack_using_command ' -s p -l profile -f -a spack_profile complete -c spack -n '__fish_spack_using_command ' -s p -l profile -d 'profile execution using cProfile' +complete -c spack -n '__fish_spack_using_command ' -l profile-file -r -f -a profile_file +complete -c spack -n '__fish_spack_using_command ' -l profile-file -r -d 'Filename to save profile data to.' complete -c spack -n '__fish_spack_using_command ' -l sorted-profile -r -f -a sorted_profile -complete -c spack -n '__fish_spack_using_command ' -l sorted-profile -r -d 'profile and sort' +complete -c spack -n '__fish_spack_using_command ' -l sorted-profile -r -d 'profile and sort by STAT, which can be: calls, ncalls,' complete -c spack -n '__fish_spack_using_command ' -l lines -r -f -a lines complete -c spack -n '__fish_spack_using_command ' -l lines -r -d 'lines of profile output or '"'"'all'"'"' (default: 20)' -complete -c spack -n '__fish_spack_using_command ' -s v -l verbose -f -a verbose -complete -c spack -n '__fish_spack_using_command ' -s v -l verbose -d 'print additional output during builds' -complete -c spack -n '__fish_spack_using_command ' -l stacktrace -f -a stacktrace -complete -c spack -n '__fish_spack_using_command ' -l stacktrace -d 'add stacktraces to all printed statements' -complete -c spack -n '__fish_spack_using_command ' -s t -l backtrace -f -a backtrace -complete -c spack -n '__fish_spack_using_command ' -s t -l backtrace -d 'always show backtraces for exceptions' -complete -c spack -n '__fish_spack_using_command ' -s V -l version -f -a version -complete -c spack -n '__fish_spack_using_command ' -s V -l version -d 'show version number and exit' -complete -c spack -n '__fish_spack_using_command ' -l print-shell-vars -r -f -a print_shell_vars -complete -c spack -n '__fish_spack_using_command ' -l print-shell-vars -r -d 'print info needed by setup-env.*sh' # spack add set -g __fish_spack_optspecs_spack_add h/help l/list-name= @@ -601,7 +603,7 @@ set -g __fish_spack_optspecs_spack_bootstrap_enable h/help scope= complete -c spack -n '__fish_spack_using_command_pos 0 bootstrap enable' -f -a '(__fish_spack_bootstrap_names)' complete -c spack -n '__fish_spack_using_command bootstrap enable' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command bootstrap enable' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command bootstrap enable' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command bootstrap enable' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command bootstrap enable' -l scope -r -d 'configuration scope to read/modify' # spack bootstrap disable @@ -609,7 +611,7 @@ set -g __fish_spack_optspecs_spack_bootstrap_disable h/help scope= complete -c spack -n '__fish_spack_using_command_pos 0 bootstrap disable' -f -a '(__fish_spack_bootstrap_names)' complete -c spack -n '__fish_spack_using_command bootstrap disable' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command bootstrap disable' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command bootstrap disable' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command bootstrap disable' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command bootstrap disable' -l scope -r -d 'configuration scope to read/modify' # spack bootstrap reset @@ -624,14 +626,14 @@ set -g __fish_spack_optspecs_spack_bootstrap_root h/help scope= complete -c spack -n '__fish_spack_using_command_pos 0 bootstrap root' -f -a '(__fish_complete_directories)' complete -c spack -n '__fish_spack_using_command bootstrap root' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command bootstrap root' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command bootstrap root' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command bootstrap root' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command bootstrap root' -l scope -r -d 'configuration scope to read/modify' # spack bootstrap list set -g __fish_spack_optspecs_spack_bootstrap_list h/help scope= complete -c spack -n '__fish_spack_using_command bootstrap list' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command bootstrap list' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command bootstrap list' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command bootstrap list' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command bootstrap list' -l scope -r -d 'configuration scope to read/modify' # spack bootstrap add @@ -640,7 +642,7 @@ complete -c spack -n '__fish_spack_using_command_pos 0 bootstrap add' -f -a '(__ complete -c spack -n '__fish_spack_using_command_pos 1 bootstrap add' -f -a '(__fish_spack_environments)' complete -c spack -n '__fish_spack_using_command bootstrap add' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command bootstrap add' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command bootstrap add' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command bootstrap add' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command bootstrap add' -l scope -r -d 'configuration scope to read/modify' complete -c spack -n '__fish_spack_using_command bootstrap add' -l trust -f -a trust complete -c spack -n '__fish_spack_using_command bootstrap add' -l trust -d 'enable the source immediately upon addition' @@ -694,11 +696,12 @@ complete -c spack -n '__fish_spack_using_command_pos 0 buildcache' -f -a list -d complete -c spack -n '__fish_spack_using_command_pos 0 buildcache' -f -a keys -d 'get public keys available on mirrors' complete -c spack -n '__fish_spack_using_command_pos 0 buildcache' -f -a check -d 'check specs against remote binary mirror(s) to see if any need to be rebuilt' complete -c spack -n '__fish_spack_using_command_pos 0 buildcache' -f -a download -d 'download buildcache entry from a remote mirror to local folder' -complete -c spack -n '__fish_spack_using_command_pos 0 buildcache' -f -a prune -d 'prune stale buildcache entries from the mirror' +complete -c spack -n '__fish_spack_using_command_pos 0 buildcache' -f -a prune -d 'prune buildcache entries from the mirror' complete -c spack -n '__fish_spack_using_command_pos 0 buildcache' -f -a save-specfile -d 'get full spec for dependencies and write them to files in the specified output directory' complete -c spack -n '__fish_spack_using_command_pos 0 buildcache' -f -a sync -d 'sync binaries (and associated metadata) from one mirror to another' -complete -c spack -n '__fish_spack_using_command_pos 0 buildcache' -f -a update-index -d 'update a buildcache index' -complete -c spack -n '__fish_spack_using_command_pos 0 buildcache' -f -a rebuild-index -d 'update a buildcache index' +complete -c spack -n '__fish_spack_using_command_pos 0 buildcache' -f -a check-index -d 'Check if a build cache index, manifests, and blobs are consistent' +complete -c spack -n '__fish_spack_using_command_pos 0 buildcache' -f -a update-index -d 'update a buildcache index or index view if extra arguments are provided.' +complete -c spack -n '__fish_spack_using_command_pos 0 buildcache' -f -a rebuild-index -d 'update a buildcache index or index view if extra arguments are provided.' complete -c spack -n '__fish_spack_using_command_pos 0 buildcache' -f -a migrate -d 'perform in-place binary mirror migration (2 to 3)' complete -c spack -n '__fish_spack_using_command buildcache' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command buildcache' -s h -l help -d 'show this help message and exit' @@ -817,7 +820,7 @@ complete -c spack -n '__fish_spack_using_command buildcache check' -s m -l mirro complete -c spack -n '__fish_spack_using_command buildcache check' -s m -l mirror-url -r -d 'override any configured mirrors with this mirror URL' complete -c spack -n '__fish_spack_using_command buildcache check' -s o -l output-file -r -f -a output_file complete -c spack -n '__fish_spack_using_command buildcache check' -s o -l output-file -r -d 'file where rebuild info should be written' -complete -c spack -n '__fish_spack_using_command buildcache check' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command buildcache check' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command buildcache check' -l scope -r -d 'configuration scope containing mirrors to check' # spack buildcache download @@ -830,10 +833,12 @@ complete -c spack -n '__fish_spack_using_command buildcache download' -s p -l pa complete -c spack -n '__fish_spack_using_command buildcache download' -s p -l path -r -d 'path to directory where tarball should be downloaded' # spack buildcache prune -set -g __fish_spack_optspecs_spack_buildcache_prune h/help dry-run +set -g __fish_spack_optspecs_spack_buildcache_prune h/help k/keeplist= dry-run complete -c spack -n '__fish_spack_using_command buildcache prune' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command buildcache prune' -s h -l help -d 'show this help message and exit' +complete -c spack -n '__fish_spack_using_command buildcache prune' -s k -l keeplist -r -f -a keeplist +complete -c spack -n '__fish_spack_using_command buildcache prune' -s k -l keeplist -r -d 'file containing newline-delimited list of package hashes to keep (optional)' complete -c spack -n '__fish_spack_using_command buildcache prune' -l dry-run -f -a dry_run complete -c spack -n '__fish_spack_using_command buildcache prune' -l dry-run -d 'do not actually delete anything from the buildcache, but log what would be deleted' @@ -856,21 +861,49 @@ complete -c spack -n '__fish_spack_using_command buildcache sync' -s h -l help - complete -c spack -n '__fish_spack_using_command buildcache sync' -l manifest-glob -r -f -a manifest_glob complete -c spack -n '__fish_spack_using_command buildcache sync' -l manifest-glob -r -d 'a quoted glob pattern identifying CI rebuild manifest files' +# spack buildcache check-index +set -g __fish_spack_optspecs_spack_buildcache_check_index h/help verify= n/name= o/output= + +complete -c spack -n '__fish_spack_using_command buildcache check-index' -s h -l help -f -a help +complete -c spack -n '__fish_spack_using_command buildcache check-index' -s h -l help -d 'show this help message and exit' +complete -c spack -n '__fish_spack_using_command buildcache check-index' -l verify -r -f -a 'exists manifests blobs all' +complete -c spack -n '__fish_spack_using_command buildcache check-index' -l verify -r -d 'List of items to verify along along with the index.' +complete -c spack -n '__fish_spack_using_command buildcache check-index' -l name -s n -r -f -a name +complete -c spack -n '__fish_spack_using_command buildcache check-index' -l name -s n -r -d 'Name of the view index to check' +complete -c spack -n '__fish_spack_using_command buildcache check-index' -l output -s o -r -f -a output +complete -c spack -n '__fish_spack_using_command buildcache check-index' -l output -s o -r -d 'File to write check details to' + # spack buildcache update-index -set -g __fish_spack_optspecs_spack_buildcache_update_index h/help k/keys +set -g __fish_spack_optspecs_spack_buildcache_update_index h/help n/name= a/append f/force k/keys y/yes-to-all complete -c spack -n '__fish_spack_using_command buildcache update-index' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command buildcache update-index' -s h -l help -d 'show this help message and exit' +complete -c spack -n '__fish_spack_using_command buildcache update-index' -l name -s n -r -f -a name +complete -c spack -n '__fish_spack_using_command buildcache update-index' -l name -s n -r -d 'Name of the view index to update' +complete -c spack -n '__fish_spack_using_command buildcache update-index' -l append -s a -f -a append +complete -c spack -n '__fish_spack_using_command buildcache update-index' -l append -s a -d 'Append the listed specs to the current view index if it already exists. This operation does not guarentee atomic write and should be run with care.' +complete -c spack -n '__fish_spack_using_command buildcache update-index' -l force -s f -f -a force +complete -c spack -n '__fish_spack_using_command buildcache update-index' -l force -s f -d 'If a view index already exists, overwrite it and suppress warnings (this is the default for non-view indices)' complete -c spack -n '__fish_spack_using_command buildcache update-index' -s k -l keys -f -a keys complete -c spack -n '__fish_spack_using_command buildcache update-index' -s k -l keys -d 'if provided, key index will be updated as well as package index' +complete -c spack -n '__fish_spack_using_command buildcache update-index' -s y -l yes-to-all -f -a yes_to_all +complete -c spack -n '__fish_spack_using_command buildcache update-index' -s y -l yes-to-all -d 'assume "yes" is the answer to every confirmation request' # spack buildcache rebuild-index -set -g __fish_spack_optspecs_spack_buildcache_rebuild_index h/help k/keys +set -g __fish_spack_optspecs_spack_buildcache_rebuild_index h/help n/name= a/append f/force k/keys y/yes-to-all complete -c spack -n '__fish_spack_using_command buildcache rebuild-index' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command buildcache rebuild-index' -s h -l help -d 'show this help message and exit' +complete -c spack -n '__fish_spack_using_command buildcache rebuild-index' -l name -s n -r -f -a name +complete -c spack -n '__fish_spack_using_command buildcache rebuild-index' -l name -s n -r -d 'Name of the view index to update' +complete -c spack -n '__fish_spack_using_command buildcache rebuild-index' -l append -s a -f -a append +complete -c spack -n '__fish_spack_using_command buildcache rebuild-index' -l append -s a -d 'Append the listed specs to the current view index if it already exists. This operation does not guarentee atomic write and should be run with care.' +complete -c spack -n '__fish_spack_using_command buildcache rebuild-index' -l force -s f -f -a force +complete -c spack -n '__fish_spack_using_command buildcache rebuild-index' -l force -s f -d 'If a view index already exists, overwrite it and suppress warnings (this is the default for non-view indices)' complete -c spack -n '__fish_spack_using_command buildcache rebuild-index' -s k -l keys -f -a keys complete -c spack -n '__fish_spack_using_command buildcache rebuild-index' -s k -l keys -d 'if provided, key index will be updated as well as package index' +complete -c spack -n '__fish_spack_using_command buildcache rebuild-index' -s y -l yes-to-all -f -a yes_to_all +complete -c spack -n '__fish_spack_using_command buildcache rebuild-index' -s y -l yes-to-all -d 'assume "yes" is the answer to every confirmation request' # spack buildcache migrate set -g __fish_spack_optspecs_spack_buildcache_migrate h/help u/unsigned d/delete-existing y/yes-to-all @@ -913,16 +946,20 @@ complete -c spack -n '__fish_spack_using_command cd' -l first -f -a find_first complete -c spack -n '__fish_spack_using_command cd' -l first -d 'use the first match if multiple packages match the spec' # spack change -set -g __fish_spack_optspecs_spack_change h/help l/list-name= match-spec= a/all +set -g __fish_spack_optspecs_spack_change h/help l/list-name= match-spec= a/all c/concrete C/concrete-only complete -c spack -n '__fish_spack_using_command_pos_remainder 0 change' -f -k -a '(__fish_spack_specs)' complete -c spack -n '__fish_spack_using_command change' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command change' -s h -l help -d 'show this help message and exit' complete -c spack -n '__fish_spack_using_command change' -s l -l list-name -r -f -a list_name -complete -c spack -n '__fish_spack_using_command change' -s l -l list-name -r -d 'name of the list to remove specs from' +complete -c spack -n '__fish_spack_using_command change' -s l -l list-name -r -d 'name of the list to remove abstract specs from' complete -c spack -n '__fish_spack_using_command change' -l match-spec -r -f -a match_spec -complete -c spack -n '__fish_spack_using_command change' -l match-spec -r -d 'if name is ambiguous, supply a spec to match' +complete -c spack -n '__fish_spack_using_command change' -l match-spec -r -d 'change all specs matching match-spec (default is match by spec name)' complete -c spack -n '__fish_spack_using_command change' -s a -l all -f -a all -complete -c spack -n '__fish_spack_using_command change' -s a -l all -d 'change all matching specs (allow changing more than one spec)' +complete -c spack -n '__fish_spack_using_command change' -s a -l all -d 'change all matching abstract specs (allow changing more than one abstract spec)' +complete -c spack -n '__fish_spack_using_command change' -s c -l concrete -f -a concrete +complete -c spack -n '__fish_spack_using_command change' -s c -l concrete -d 'change concrete specs in the environment' +complete -c spack -n '__fish_spack_using_command change' -s C -l concrete-only -f -a concrete_only +complete -c spack -n '__fish_spack_using_command change' -s C -l concrete-only -d 'change only concrete specs in the environment' # spack checksum set -g __fish_spack_optspecs_spack_checksum h/help keep-stage b/batch l/latest p/preferred a/add-to-package verify j/jobs= @@ -956,7 +993,7 @@ complete -c spack -n '__fish_spack_using_command ci' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command ci' -s h -l help -d 'show this help message and exit' # spack ci generate -set -g __fish_spack_optspecs_spack_ci_generate h/help output-file= prune-dag no-prune-dag prune-unaffected no-prune-unaffected prune-externals no-prune-externals check-index-only artifacts-root= f/force U/fresh reuse fresh-roots deprecated j/jobs= +set -g __fish_spack_optspecs_spack_ci_generate h/help output-file= prune-dag no-prune-dag prune-unaffected no-prune-unaffected prune-externals no-prune-externals check-index-only artifacts-root= forward-variable= f/force U/fresh reuse fresh-roots deprecated j/jobs= complete -c spack -n '__fish_spack_using_command ci generate' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command ci generate' -s h -l help -d 'show this help message and exit' complete -c spack -n '__fish_spack_using_command ci generate' -l output-file -r -f -a output_file @@ -977,6 +1014,8 @@ complete -c spack -n '__fish_spack_using_command ci generate' -l check-index-onl complete -c spack -n '__fish_spack_using_command ci generate' -l check-index-only -d 'only check spec state from buildcache indices' complete -c spack -n '__fish_spack_using_command ci generate' -l artifacts-root -r -f -a artifacts_root complete -c spack -n '__fish_spack_using_command ci generate' -l artifacts-root -r -d 'path to the root of the artifacts directory' +complete -c spack -n '__fish_spack_using_command ci generate' -l forward-variable -r -f -a forward_variable +complete -c spack -n '__fish_spack_using_command ci generate' -l forward-variable -r -d 'Environment variables to forward from the generate environment to the generated jobs.' complete -c spack -n '__fish_spack_using_command ci generate' -s f -l force -f -a concretizer_force complete -c spack -n '__fish_spack_using_command ci generate' -s f -l force -d 'allow changes to concretized specs in spack.lock (in an env)' complete -c spack -n '__fish_spack_using_command ci generate' -s U -l fresh -f -a concretizer_reuse @@ -996,13 +1035,15 @@ complete -c spack -n '__fish_spack_using_command ci rebuild-index' -s h -l help complete -c spack -n '__fish_spack_using_command ci rebuild-index' -s h -l help -d 'show this help message and exit' # spack ci rebuild -set -g __fish_spack_optspecs_spack_ci_rebuild h/help t/tests fail-fast timeout= j/jobs= +set -g __fish_spack_optspecs_spack_ci_rebuild h/help t/tests no-fail-fast fail-fast timeout= j/jobs= complete -c spack -n '__fish_spack_using_command ci rebuild' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command ci rebuild' -s h -l help -d 'show this help message and exit' complete -c spack -n '__fish_spack_using_command ci rebuild' -s t -l tests -f -a tests complete -c spack -n '__fish_spack_using_command ci rebuild' -s t -l tests -d 'run stand-alone tests after the build' +complete -c spack -n '__fish_spack_using_command ci rebuild' -l no-fail-fast -f -a fail_fast +complete -c spack -n '__fish_spack_using_command ci rebuild' -l no-fail-fast -d 'continue build/stand-alone tests after the first failure' complete -c spack -n '__fish_spack_using_command ci rebuild' -l fail-fast -f -a fail_fast -complete -c spack -n '__fish_spack_using_command ci rebuild' -l fail-fast -d 'stop stand-alone tests after the first failure' +complete -c spack -n '__fish_spack_using_command ci rebuild' -l fail-fast -d 'stop build/stand-alone tests after the first failure' complete -c spack -n '__fish_spack_using_command ci rebuild' -l timeout -r -f -a timeout complete -c spack -n '__fish_spack_using_command ci rebuild' -l timeout -r -d 'maximum time (in seconds) that tests are allowed to run' complete -c spack -n '__fish_spack_using_command ci rebuild' -s j -l jobs -r -f -a jobs @@ -1050,7 +1091,7 @@ complete -c spack -n '__fish_spack_using_command clean' -s p -l python-cache -d complete -c spack -n '__fish_spack_using_command clean' -s b -l bootstrap -f -a bootstrap complete -c spack -n '__fish_spack_using_command clean' -s b -l bootstrap -d 'remove software and configuration needed to bootstrap Spack' complete -c spack -n '__fish_spack_using_command clean' -s a -l all -f -a all -complete -c spack -n '__fish_spack_using_command clean' -s a -l all -d 'equivalent to -sdfmp (does not include --bootstrap)' +complete -c spack -n '__fish_spack_using_command clean' -s a -l all -d 'equivalent to ``-sdfmpb``' # spack commands set -g __fish_spack_optspecs_spack_commands h/help update-completion a/aliases format= header= update= @@ -1081,29 +1122,21 @@ complete -c spack -n '__fish_spack_using_command compiler' -s h -l help -f -a he complete -c spack -n '__fish_spack_using_command compiler' -s h -l help -d 'show this help message and exit' # spack compiler find -set -g __fish_spack_optspecs_spack_compiler_find h/help mixed-toolchain no-mixed-toolchain scope= j/jobs= +set -g __fish_spack_optspecs_spack_compiler_find h/help scope= j/jobs= complete -c spack -n '__fish_spack_using_command compiler find' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command compiler find' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command compiler find' -l mixed-toolchain -f -a mixed_toolchain -complete -c spack -n '__fish_spack_using_command compiler find' -l mixed-toolchain -d '(DEPRECATED) Allow mixed toolchains (for example: clang, clang++, gfortran)' -complete -c spack -n '__fish_spack_using_command compiler find' -l no-mixed-toolchain -f -a mixed_toolchain -complete -c spack -n '__fish_spack_using_command compiler find' -l no-mixed-toolchain -d '(DEPRECATED) Do not allow mixed toolchains (for example: clang, clang++, gfortran)' -complete -c spack -n '__fish_spack_using_command compiler find' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command compiler find' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command compiler find' -l scope -r -d 'configuration scope to modify' complete -c spack -n '__fish_spack_using_command compiler find' -s j -l jobs -r -f -a jobs complete -c spack -n '__fish_spack_using_command compiler find' -s j -l jobs -r -d 'explicitly set number of parallel jobs' # spack compiler add -set -g __fish_spack_optspecs_spack_compiler_add h/help mixed-toolchain no-mixed-toolchain scope= j/jobs= +set -g __fish_spack_optspecs_spack_compiler_add h/help scope= j/jobs= complete -c spack -n '__fish_spack_using_command compiler add' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command compiler add' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command compiler add' -l mixed-toolchain -f -a mixed_toolchain -complete -c spack -n '__fish_spack_using_command compiler add' -l mixed-toolchain -d '(DEPRECATED) Allow mixed toolchains (for example: clang, clang++, gfortran)' -complete -c spack -n '__fish_spack_using_command compiler add' -l no-mixed-toolchain -f -a mixed_toolchain -complete -c spack -n '__fish_spack_using_command compiler add' -l no-mixed-toolchain -d '(DEPRECATED) Do not allow mixed toolchains (for example: clang, clang++, gfortran)' -complete -c spack -n '__fish_spack_using_command compiler add' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command compiler add' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command compiler add' -l scope -r -d 'configuration scope to modify' complete -c spack -n '__fish_spack_using_command compiler add' -s j -l jobs -r -f -a jobs complete -c spack -n '__fish_spack_using_command compiler add' -s j -l jobs -r -d 'explicitly set number of parallel jobs' @@ -1115,7 +1148,7 @@ complete -c spack -n '__fish_spack_using_command compiler remove' -s h -l help - complete -c spack -n '__fish_spack_using_command compiler remove' -s h -l help -d 'show this help message and exit' complete -c spack -n '__fish_spack_using_command compiler remove' -s a -l all -f -a all complete -c spack -n '__fish_spack_using_command compiler remove' -s a -l all -d 'remove ALL compilers that match spec' -complete -c spack -n '__fish_spack_using_command compiler remove' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command compiler remove' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command compiler remove' -l scope -r -d 'configuration scope to modify' # spack compiler rm @@ -1125,14 +1158,14 @@ complete -c spack -n '__fish_spack_using_command compiler rm' -s h -l help -f -a complete -c spack -n '__fish_spack_using_command compiler rm' -s h -l help -d 'show this help message and exit' complete -c spack -n '__fish_spack_using_command compiler rm' -s a -l all -f -a all complete -c spack -n '__fish_spack_using_command compiler rm' -s a -l all -d 'remove ALL compilers that match spec' -complete -c spack -n '__fish_spack_using_command compiler rm' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command compiler rm' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command compiler rm' -l scope -r -d 'configuration scope to modify' # spack compiler list set -g __fish_spack_optspecs_spack_compiler_list h/help scope= remote complete -c spack -n '__fish_spack_using_command compiler list' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command compiler list' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command compiler list' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command compiler list' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command compiler list' -l scope -r -d 'configuration scope to read from' complete -c spack -n '__fish_spack_using_command compiler list' -l remote -f -a remote complete -c spack -n '__fish_spack_using_command compiler list' -l remote -d 'list also compilers from registered buildcaches' @@ -1141,30 +1174,32 @@ complete -c spack -n '__fish_spack_using_command compiler list' -l remote -d 'li set -g __fish_spack_optspecs_spack_compiler_ls h/help scope= remote complete -c spack -n '__fish_spack_using_command compiler ls' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command compiler ls' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command compiler ls' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command compiler ls' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command compiler ls' -l scope -r -d 'configuration scope to read from' complete -c spack -n '__fish_spack_using_command compiler ls' -l remote -f -a remote complete -c spack -n '__fish_spack_using_command compiler ls' -l remote -d 'list also compilers from registered buildcaches' # spack compiler info -set -g __fish_spack_optspecs_spack_compiler_info h/help scope= +set -g __fish_spack_optspecs_spack_compiler_info h/help scope= remote complete -c spack -n '__fish_spack_using_command_pos 0 compiler info' -f -a '(__fish_spack_installed_compilers)' complete -c spack -n '__fish_spack_using_command compiler info' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command compiler info' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command compiler info' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command compiler info' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command compiler info' -l scope -r -d 'configuration scope to read from' +complete -c spack -n '__fish_spack_using_command compiler info' -l remote -f -a remote +complete -c spack -n '__fish_spack_using_command compiler info' -l remote -d 'list also compilers from registered buildcaches' # spack compilers set -g __fish_spack_optspecs_spack_compilers h/help scope= remote complete -c spack -n '__fish_spack_using_command compilers' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command compilers' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command compilers' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command compilers' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command compilers' -l scope -r -d 'configuration scope to read/modify' complete -c spack -n '__fish_spack_using_command compilers' -l remote -f -a remote complete -c spack -n '__fish_spack_using_command compilers' -l remote -d 'list also compilers from registered buildcaches' # spack concretize -set -g __fish_spack_optspecs_spack_concretize h/help test= q/quiet f/force U/fresh reuse fresh-roots deprecated j/jobs= +set -g __fish_spack_optspecs_spack_concretize h/help test= q/quiet f/force U/fresh reuse fresh-roots deprecated j/jobs= non-defaults complete -c spack -n '__fish_spack_using_command concretize' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command concretize' -s h -l help -d 'show this help message and exit' complete -c spack -n '__fish_spack_using_command concretize' -l test -r -f -a 'root all' @@ -1183,9 +1218,11 @@ complete -c spack -n '__fish_spack_using_command concretize' -l deprecated -f -a complete -c spack -n '__fish_spack_using_command concretize' -l deprecated -d 'allow concretizer to select deprecated versions' complete -c spack -n '__fish_spack_using_command concretize' -s j -l jobs -r -f -a jobs complete -c spack -n '__fish_spack_using_command concretize' -s j -l jobs -r -d 'explicitly set number of parallel jobs' +complete -c spack -n '__fish_spack_using_command concretize' -l non-defaults -f -a non_defaults +complete -c spack -n '__fish_spack_using_command concretize' -l non-defaults -d 'highlight non-default versions or variants' # spack concretise -set -g __fish_spack_optspecs_spack_concretise h/help test= q/quiet f/force U/fresh reuse fresh-roots deprecated j/jobs= +set -g __fish_spack_optspecs_spack_concretise h/help test= q/quiet f/force U/fresh reuse fresh-roots deprecated j/jobs= non-defaults complete -c spack -n '__fish_spack_using_command concretise' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command concretise' -s h -l help -d 'show this help message and exit' complete -c spack -n '__fish_spack_using_command concretise' -l test -r -f -a 'root all' @@ -1204,6 +1241,8 @@ complete -c spack -n '__fish_spack_using_command concretise' -l deprecated -f -a complete -c spack -n '__fish_spack_using_command concretise' -l deprecated -d 'allow concretizer to select deprecated versions' complete -c spack -n '__fish_spack_using_command concretise' -s j -l jobs -r -f -a jobs complete -c spack -n '__fish_spack_using_command concretise' -s j -l jobs -r -d 'explicitly set number of parallel jobs' +complete -c spack -n '__fish_spack_using_command concretise' -l non-defaults -f -a non_defaults +complete -c spack -n '__fish_spack_using_command concretise' -l non-defaults -d 'highlight non-default versions or variants' # spack config set -g __fish_spack_optspecs_spack_config h/help scope= @@ -1221,14 +1260,16 @@ complete -c spack -n '__fish_spack_using_command_pos 0 config' -f -a update -d ' complete -c spack -n '__fish_spack_using_command_pos 0 config' -f -a revert -d 'revert configuration files to their state before update' complete -c spack -n '__fish_spack_using_command config' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command config' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command config' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command config' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command config' -l scope -r -d 'configuration scope to read/modify' # spack config get -set -g __fish_spack_optspecs_spack_config_get h/help +set -g __fish_spack_optspecs_spack_config_get h/help json complete -c spack -n '__fish_spack_using_command_pos 0 config get' -f -a 'bootstrap cdash ci compilers concretizer config definitions develop env_vars include mirrors modules packages repos toolchains upstreams view' complete -c spack -n '__fish_spack_using_command config get' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command config get' -s h -l help -d 'show this help message and exit' +complete -c spack -n '__fish_spack_using_command config get' -l json -f -a json +complete -c spack -n '__fish_spack_using_command config get' -l json -d 'output configuration as JSON' # spack config blame set -g __fish_spack_optspecs_spack_config_blame h/help @@ -1250,7 +1291,7 @@ complete -c spack -n '__fish_spack_using_command config list' -s h -l help -f -a complete -c spack -n '__fish_spack_using_command config list' -s h -l help -d 'show this help message and exit' # spack config scopes -set -g __fish_spack_optspecs_spack_config_scopes h/help p/paths t/type= +set -g __fish_spack_optspecs_spack_config_scopes h/help p/paths t/type= v/verbose complete -c spack -n '__fish_spack_using_command_pos 0 config scopes' -f -a 'bootstrap cdash ci compilers concretizer config definitions develop env_vars include mirrors modules packages repos toolchains upstreams view' complete -c spack -n '__fish_spack_using_command config scopes' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command config scopes' -s h -l help -d 'show this help message and exit' @@ -1258,6 +1299,8 @@ complete -c spack -n '__fish_spack_using_command config scopes' -s p -l paths -f complete -c spack -n '__fish_spack_using_command config scopes' -s p -l paths -d 'show associated paths for appropriate scopes' complete -c spack -n '__fish_spack_using_command config scopes' -s t -l type -r -f -a 'all env include internal path' complete -c spack -n '__fish_spack_using_command config scopes' -s t -l type -r -d 'list only scopes of the specified type(s)' +complete -c spack -n '__fish_spack_using_command config scopes' -s v -l verbose -f -a scopes_verbose +complete -c spack -n '__fish_spack_using_command config scopes' -s v -l verbose -d 'show scope types and whether scopes are overridden' # spack config add set -g __fish_spack_optspecs_spack_config_add h/help f/file= @@ -1458,7 +1501,7 @@ complete -c spack -n '__fish_spack_using_command dev-build' -l deprecated -f -a complete -c spack -n '__fish_spack_using_command dev-build' -l deprecated -d 'allow concretizer to select deprecated versions' # spack develop -set -g __fish_spack_optspecs_spack_develop h/help p/path= b/build-directory= no-clone clone f/force= r/recursive +set -g __fish_spack_optspecs_spack_develop h/help p/path= b/build-directory= no-clone clone no-modify-concrete-specs f/force r/recursive complete -c spack -n '__fish_spack_using_command_pos_remainder 0 develop' -f -k -a '(__fish_spack_specs_or_id)' complete -c spack -n '__fish_spack_using_command develop' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command develop' -s h -l help -d 'show this help message and exit' @@ -1469,9 +1512,11 @@ complete -c spack -n '__fish_spack_using_command develop' -s b -l build-director complete -c spack -n '__fish_spack_using_command develop' -l no-clone -f -a clone complete -c spack -n '__fish_spack_using_command develop' -l no-clone -d 'do not clone, the package already exists at the source path' complete -c spack -n '__fish_spack_using_command develop' -l clone -f -a clone -complete -c spack -n '__fish_spack_using_command develop' -l clone -d '(default) clone the package unless the path already exists, use --force to overwrite' -complete -c spack -n '__fish_spack_using_command develop' -s f -l force -r -f -a force -complete -c spack -n '__fish_spack_using_command develop' -s f -l force -r -d 'remove any files or directories that block cloning source code' +complete -c spack -n '__fish_spack_using_command develop' -l clone -d '(default) clone the package unless the path already exists, use ``--force`` to overwrite' +complete -c spack -n '__fish_spack_using_command develop' -l no-modify-concrete-specs -f -a apply_changes +complete -c spack -n '__fish_spack_using_command develop' -l no-modify-concrete-specs -d 'do not mutate concrete specs to have dev_path provenance. This requires a later `spack concretize --force` command to use develop specs' +complete -c spack -n '__fish_spack_using_command develop' -s f -l force -f -a force +complete -c spack -n '__fish_spack_using_command develop' -s f -l force -d 'remove any files or directories that block cloning source code' complete -c spack -n '__fish_spack_using_command develop' -s r -l recursive -f -a recursive complete -c spack -n '__fish_spack_using_command develop' -s r -l recursive -d 'traverse nodes of the graph to mark everything up to the root as a develop spec' @@ -1500,7 +1545,7 @@ complete -c spack -n '__fish_spack_using_command_pos_remainder 0 edit' -f -a '(_ complete -c spack -n '__fish_spack_using_command edit' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command edit' -s h -l help -d 'show this help message and exit' complete -c spack -n '__fish_spack_using_command edit' -s b -l build-system -f -a path -complete -c spack -n '__fish_spack_using_command edit' -s b -l build-system -d 'edit the build system with the supplied name' +complete -c spack -n '__fish_spack_using_command edit' -s b -l build-system -d 'edit the build system with the supplied name or fullname' complete -c spack -n '__fish_spack_using_command edit' -s c -l command -f -a path complete -c spack -n '__fish_spack_using_command edit' -s c -l command -d 'edit the command with the supplied name' complete -c spack -n '__fish_spack_using_command edit' -s d -l docs -f -a path @@ -1510,9 +1555,9 @@ complete -c spack -n '__fish_spack_using_command edit' -s t -l test -d 'edit the complete -c spack -n '__fish_spack_using_command edit' -s m -l module -f -a path complete -c spack -n '__fish_spack_using_command edit' -s m -l module -d 'edit the main spack module with the supplied name' complete -c spack -n '__fish_spack_using_command edit' -s r -l repo -r -f -a repo -complete -c spack -n '__fish_spack_using_command edit' -s r -l repo -r -d 'path to repo to edit package in' +complete -c spack -n '__fish_spack_using_command edit' -s r -l repo -r -d 'path to repo to edit package or build system in' complete -c spack -n '__fish_spack_using_command edit' -s N -l namespace -r -f -a namespace -complete -c spack -n '__fish_spack_using_command edit' -s N -l namespace -r -d 'namespace of package to edit' +complete -c spack -n '__fish_spack_using_command edit' -s N -l namespace -r -d 'namespace of package or build system to edit' # spack env set -g __fish_spack_optspecs_spack_env h/help @@ -1533,7 +1578,7 @@ complete -c spack -n '__fish_spack_using_command_pos 0 env' -f -a update -d 'upd complete -c spack -n '__fish_spack_using_command_pos 0 env' -f -a revert -d 'restore the environment manifest to its previous format' complete -c spack -n '__fish_spack_using_command_pos 0 env' -f -a depfile -d 'generate a depfile to exploit parallel builds across specs' complete -c spack -n '__fish_spack_using_command_pos 0 env' -f -a track -d 'track an environment from a directory in Spack' -complete -c spack -n '__fish_spack_using_command_pos 0 env' -f -a untrack -d 'track an environment from a directory in Spack' +complete -c spack -n '__fish_spack_using_command_pos 0 env' -f -a untrack -d 'untrack an environment from a directory in Spack' complete -c spack -n '__fish_spack_using_command env' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command env' -s h -l help -d 'show this help message and exit' @@ -1771,7 +1816,7 @@ complete -c spack -n '__fish_spack_using_command external find' -l exclude -r -f complete -c spack -n '__fish_spack_using_command external find' -l exclude -r -d 'packages to exclude from search' complete -c spack -n '__fish_spack_using_command external find' -s p -l path -r -f -a path complete -c spack -n '__fish_spack_using_command external find' -s p -l path -r -d 'one or more alternative search paths for finding externals' -complete -c spack -n '__fish_spack_using_command external find' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command external find' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command external find' -l scope -r -d 'configuration scope to modify' complete -c spack -n '__fish_spack_using_command external find' -l all -f -a all complete -c spack -n '__fish_spack_using_command external find' -l all -d 'search for all packages that Spack knows about' @@ -1828,14 +1873,14 @@ complete -c spack -n '__fish_spack_using_command fetch' -l deprecated -f -a conf complete -c spack -n '__fish_spack_using_command fetch' -l deprecated -d 'allow concretizer to select deprecated versions' # spack find -set -g __fish_spack_optspecs_spack_find h/help format= H/hashes json I/install-status specfile-format d/deps p/paths groups no-groups l/long L/very-long t/tag= N/namespaces r/only-roots c/show-concretized f/show-flags show-full-compiler x/explicit X/implicit u/unknown m/missing v/variants loaded M/only-missing only-deprecated deprecated install-tree= start-date= end-date= +set -g __fish_spack_optspecs_spack_find h/help format= H/hashes json I/install-status specfile-format d/deps p/paths groups no-groups l/long L/very-long t/tag= N/namespaces r/only-roots c/show-concretized show-configured-externals f/show-flags show-full-compiler x/explicit X/implicit e/external u/unknown m/missing v/variants loaded M/only-missing only-deprecated deprecated install-tree= start-date= end-date= complete -c spack -n '__fish_spack_using_command_pos_remainder 0 find' -f -a '(__fish_spack_installed_specs)' complete -c spack -n '__fish_spack_using_command find' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command find' -s h -l help -d 'show this help message and exit' complete -c spack -n '__fish_spack_using_command find' -l format -r -f -a format complete -c spack -n '__fish_spack_using_command find' -l format -r -d 'output specs with the specified format string' complete -c spack -n '__fish_spack_using_command find' -s H -l hashes -f -a format -complete -c spack -n '__fish_spack_using_command find' -s H -l hashes -d 'same as '"'"'--format {/hash}'"'"'; use with xargs or $()' +complete -c spack -n '__fish_spack_using_command find' -s H -l hashes -d 'same as ``--format {/hash}``; use with ``xargs`` or ``$()``' complete -c spack -n '__fish_spack_using_command find' -l json -f -a json complete -c spack -n '__fish_spack_using_command find' -l json -d 'output specs as machine-readable json records' complete -c spack -n '__fish_spack_using_command find' -s I -l install-status -f -a install_status @@ -1862,6 +1907,8 @@ complete -c spack -n '__fish_spack_using_command find' -s r -l only-roots -f -a complete -c spack -n '__fish_spack_using_command find' -s r -l only-roots -d 'don'"'"'t show full list of installed specs in an environment' complete -c spack -n '__fish_spack_using_command find' -s c -l show-concretized -f -a show_concretized complete -c spack -n '__fish_spack_using_command find' -s c -l show-concretized -d 'show concretized specs in an environment' +complete -c spack -n '__fish_spack_using_command find' -l show-configured-externals -f -a show_configured_externals +complete -c spack -n '__fish_spack_using_command find' -l show-configured-externals -d 'show externals defined in the '"'"'packages'"'"' section of the configuration' complete -c spack -n '__fish_spack_using_command find' -s f -l show-flags -f -a show_flags complete -c spack -n '__fish_spack_using_command find' -s f -l show-flags -d 'show spec compiler flags' complete -c spack -n '__fish_spack_using_command find' -l show-full-compiler -f -a show_full_compiler @@ -1870,6 +1917,8 @@ complete -c spack -n '__fish_spack_using_command find' -s x -l explicit -f -a ex complete -c spack -n '__fish_spack_using_command find' -s x -l explicit -d 'show only specs that were installed explicitly' complete -c spack -n '__fish_spack_using_command find' -s X -l implicit -f -a implicit complete -c spack -n '__fish_spack_using_command find' -s X -l implicit -d 'show only specs that were installed as dependencies' +complete -c spack -n '__fish_spack_using_command find' -s e -l external -f -a external +complete -c spack -n '__fish_spack_using_command find' -s e -l external -d 'show only specs that are marked as externals' complete -c spack -n '__fish_spack_using_command find' -s u -l unknown -f -a unknown complete -c spack -n '__fish_spack_using_command find' -s u -l unknown -d 'show only specs Spack does not have a package for' complete -c spack -n '__fish_spack_using_command find' -s m -l missing -f -a missing @@ -2012,7 +2061,7 @@ complete -c spack -n '__fish_spack_using_command graph' -s a -l ascii -d 'draw g complete -c spack -n '__fish_spack_using_command graph' -s d -l dot -f -a dot complete -c spack -n '__fish_spack_using_command graph' -s d -l dot -d 'generate graph in dot format and print to stdout' complete -c spack -n '__fish_spack_using_command graph' -s s -l static -f -a static -complete -c spack -n '__fish_spack_using_command graph' -s s -l static -d 'graph static (possible) deps, don'"'"'t concretize (implies --dot)' +complete -c spack -n '__fish_spack_using_command graph' -s s -l static -d 'graph static (possible) deps, don'"'"'t concretize (implies ``--dot``)' complete -c spack -n '__fish_spack_using_command graph' -s c -l color -f -a color complete -c spack -n '__fish_spack_using_command graph' -s c -l color -d 'use different colors for different dependency types' complete -c spack -n '__fish_spack_using_command graph' -s i -l installed -f -a installed @@ -2031,12 +2080,16 @@ complete -c spack -n '__fish_spack_using_command help' -l spec -f -a guide complete -c spack -n '__fish_spack_using_command help' -l spec -d 'help on the package specification syntax' # spack info -set -g __fish_spack_optspecs_spack_info h/help a/all detectable maintainers namespace no-dependencies no-variants no-versions phases tags tests virtuals variants-by-name -complete -c spack -n '__fish_spack_using_command_pos 0 info' -f -a '(__fish_spack_packages)' +set -g __fish_spack_optspecs_spack_info h/help a/all by-name by-when detectable maintainers namespace no-dependencies no-variants no-versions phases tags tests virtuals variants-by-name +complete -c spack -n '__fish_spack_using_command_pos_remainder 0 info' -f -k -a '(__fish_spack_specs)' complete -c spack -n '__fish_spack_using_command info' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command info' -s h -l help -d 'show this help message and exit' complete -c spack -n '__fish_spack_using_command info' -s a -l all -f -a all complete -c spack -n '__fish_spack_using_command info' -s a -l all -d 'output all package information' +complete -c spack -n '__fish_spack_using_command info' -l by-name -f -a by_name +complete -c spack -n '__fish_spack_using_command info' -l by-name -d 'list variants, dependency, etc. in name order, then by when condition' +complete -c spack -n '__fish_spack_using_command info' -l by-when -f -a by_name +complete -c spack -n '__fish_spack_using_command info' -l by-when -d 'group variants, dependencies, etc. first by when condition, then by name' complete -c spack -n '__fish_spack_using_command info' -l detectable -f -a detectable complete -c spack -n '__fish_spack_using_command info' -l detectable -d 'output information on external detection' complete -c spack -n '__fish_spack_using_command info' -l maintainers -f -a maintainers @@ -2057,8 +2110,7 @@ complete -c spack -n '__fish_spack_using_command info' -l tests -f -a tests complete -c spack -n '__fish_spack_using_command info' -l tests -d 'output relevant build-time and stand-alone tests' complete -c spack -n '__fish_spack_using_command info' -l virtuals -f -a virtuals complete -c spack -n '__fish_spack_using_command info' -l virtuals -d 'output virtual packages' -complete -c spack -n '__fish_spack_using_command info' -l variants-by-name -f -a variants_by_name -complete -c spack -n '__fish_spack_using_command info' -l variants-by-name -d 'list variants in strict name order; don'"'"'t group by condition' +complete -c spack -n '__fish_spack_using_command info' -l variants-by-name -f -a by_name # spack install set -g __fish_spack_optspecs_spack_install h/help only= u/until= p/concurrent-packages= j/jobs= overwrite fail-fast keep-prefix keep-stage dont-restage use-cache no-cache cache-only use-buildcache= include-build-deps no-check-signature show-log-on-error source n/no-checksum v/verbose fake only-concrete add no-add clean dirty test= log-format= log-file= help-cdash cdash-upload-url= cdash-build= cdash-site= cdash-track= cdash-buildstamp= y/yes-to-all f/force U/fresh reuse fresh-roots deprecated @@ -2198,7 +2250,7 @@ complete -c spack -n '__fish_spack_using_command load' -l pwsh -d 'print pwsh co complete -c spack -n '__fish_spack_using_command load' -l first -f -a load_first complete -c spack -n '__fish_spack_using_command load' -l first -d 'load the first match if multiple packages match the spec' complete -c spack -n '__fish_spack_using_command load' -l list -f -a list -complete -c spack -n '__fish_spack_using_command load' -l list -d 'show loaded packages: same as `spack find --loaded`' +complete -c spack -n '__fish_spack_using_command load' -l list -d 'show loaded packages: same as ``spack find --loaded``' # spack location set -g __fish_spack_optspecs_spack_location h/help m/module-dir r/spack-root i/install-dir p/package-dir repo= s/stage-dir S/stages c/source-dir b/build-dir e/env= first @@ -2305,7 +2357,7 @@ complete -c spack -n '__fish_spack_using_command mirror' -s n -l no-checksum -f complete -c spack -n '__fish_spack_using_command mirror' -s n -l no-checksum -d 'do not use checksums to verify downloaded files (unsafe)' # spack mirror create -set -g __fish_spack_optspecs_spack_mirror_create h/help d/directory= a/all file= exclude-file= exclude-specs= skip-unstable-versions D/dependencies n/versions-per-spec= private f/force U/fresh reuse fresh-roots deprecated +set -g __fish_spack_optspecs_spack_mirror_create h/help d/directory= a/all j/jobs= file= exclude-file= exclude-specs= skip-unstable-versions D/dependencies n/versions-per-spec= private f/force U/fresh reuse fresh-roots deprecated complete -c spack -n '__fish_spack_using_command_pos_remainder 0 mirror create' -f -k -a '(__fish_spack_specs)' complete -c spack -n '__fish_spack_using_command mirror create' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command mirror create' -s h -l help -d 'show this help message and exit' @@ -2313,6 +2365,8 @@ complete -c spack -n '__fish_spack_using_command mirror create' -s d -l director complete -c spack -n '__fish_spack_using_command mirror create' -s d -l directory -r -d 'directory in which to create mirror' complete -c spack -n '__fish_spack_using_command mirror create' -s a -l all -f -a all complete -c spack -n '__fish_spack_using_command mirror create' -s a -l all -d 'mirror all versions of all packages in Spack, or all packages in the current environment if there is an active environment (this requires significant time and space)' +complete -c spack -n '__fish_spack_using_command mirror create' -s j -l jobs -r -f -a jobs +complete -c spack -n '__fish_spack_using_command mirror create' -s j -l jobs -r -d 'Use a given number of workers to make the mirror (used in combination with -a)' complete -c spack -n '__fish_spack_using_command mirror create' -l file -r -f -a file complete -c spack -n '__fish_spack_using_command mirror create' -l file -r -d 'file with specs of packages to put in mirror' complete -c spack -n '__fish_spack_using_command mirror create' -l exclude-file -r -f -a exclude_file @@ -2348,30 +2402,28 @@ complete -c spack -n '__fish_spack_using_command mirror destroy' -l mirror-url - complete -c spack -n '__fish_spack_using_command mirror destroy' -l mirror-url -r -d 'find mirror to destroy by url' # spack mirror add -set -g __fish_spack_optspecs_spack_mirror_add h/help scope= type= autopush unsigned signed s3-access-key-id= s3-access-key-id-variable= s3-access-key-secret= s3-access-key-secret-variable= s3-access-token= s3-access-token-variable= s3-profile= s3-endpoint-url= oci-username= oci-username-variable= oci-password= oci-password-variable= +set -g __fish_spack_optspecs_spack_mirror_add h/help scope= type= autopush unsigned signed n/name= s3-access-key-id= s3-access-key-id-variable= s3-access-key-secret-variable= s3-access-token-variable= s3-profile= s3-endpoint-url= oci-username= oci-username-variable= oci-password-variable= complete -c spack -n '__fish_spack_using_command_pos 0 mirror add' -f complete -c spack -n '__fish_spack_using_command mirror add' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command mirror add' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command mirror add' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command mirror add' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command mirror add' -l scope -r -d 'configuration scope to modify' complete -c spack -n '__fish_spack_using_command mirror add' -l type -r -f -a 'binary source' -complete -c spack -n '__fish_spack_using_command mirror add' -l type -r -d 'specify the mirror type: for both binary and source use `--type binary --type source` (default)' +complete -c spack -n '__fish_spack_using_command mirror add' -l type -r -d 'specify the mirror type: for both binary and source use ``--type binary --type source`` (default)' complete -c spack -n '__fish_spack_using_command mirror add' -l autopush -f -a autopush complete -c spack -n '__fish_spack_using_command mirror add' -l autopush -d 'set mirror to push automatically after installation' complete -c spack -n '__fish_spack_using_command mirror add' -l unsigned -f -a signed complete -c spack -n '__fish_spack_using_command mirror add' -l unsigned -d 'do not require signing and signature verification when pushing and installing from this build cache' complete -c spack -n '__fish_spack_using_command mirror add' -l signed -f -a signed complete -c spack -n '__fish_spack_using_command mirror add' -l signed -d 'require signing and signature verification when pushing and installing from this build cache' +complete -c spack -n '__fish_spack_using_command mirror add' -l name -s n -r -f -a view_name +complete -c spack -n '__fish_spack_using_command mirror add' -l name -s n -r -d 'Name of the index view for a binary mirror' complete -c spack -n '__fish_spack_using_command mirror add' -l s3-access-key-id -r -f -a s3_access_key_id complete -c spack -n '__fish_spack_using_command mirror add' -l s3-access-key-id -r -d 'ID string to use to connect to this S3 mirror' complete -c spack -n '__fish_spack_using_command mirror add' -l s3-access-key-id-variable -r -f -a s3_access_key_id_variable complete -c spack -n '__fish_spack_using_command mirror add' -l s3-access-key-id-variable -r -d 'environment variable containing ID string to use to connect to this S3 mirror' -complete -c spack -n '__fish_spack_using_command mirror add' -l s3-access-key-secret -r -f -a s3_access_key_secret -complete -c spack -n '__fish_spack_using_command mirror add' -l s3-access-key-secret -r -d 'secret string to use to connect to this S3 mirror' complete -c spack -n '__fish_spack_using_command mirror add' -l s3-access-key-secret-variable -r -f -a s3_access_key_secret_variable complete -c spack -n '__fish_spack_using_command mirror add' -l s3-access-key-secret-variable -r -d 'environment variable containing secret string to use to connect to this S3 mirror' -complete -c spack -n '__fish_spack_using_command mirror add' -l s3-access-token -r -f -a s3_access_token -complete -c spack -n '__fish_spack_using_command mirror add' -l s3-access-token -r -d 'access token to use to connect to this S3 mirror' complete -c spack -n '__fish_spack_using_command mirror add' -l s3-access-token-variable -r -f -a s3_access_token_variable complete -c spack -n '__fish_spack_using_command mirror add' -l s3-access-token-variable -r -d 'environment variable containing access token to use to connect to this S3 mirror' complete -c spack -n '__fish_spack_using_command mirror add' -l s3-profile -r -f -a s3_profile @@ -2382,29 +2434,31 @@ complete -c spack -n '__fish_spack_using_command mirror add' -l oci-username -r complete -c spack -n '__fish_spack_using_command mirror add' -l oci-username -r -d 'username to use to connect to this OCI mirror' complete -c spack -n '__fish_spack_using_command mirror add' -l oci-username-variable -r -f -a oci_username_variable complete -c spack -n '__fish_spack_using_command mirror add' -l oci-username-variable -r -d 'environment variable containing username to use to connect to this OCI mirror' -complete -c spack -n '__fish_spack_using_command mirror add' -l oci-password -r -f -a oci_password -complete -c spack -n '__fish_spack_using_command mirror add' -l oci-password -r -d 'password to use to connect to this OCI mirror' complete -c spack -n '__fish_spack_using_command mirror add' -l oci-password-variable -r -f -a oci_password_variable complete -c spack -n '__fish_spack_using_command mirror add' -l oci-password-variable -r -d 'environment variable containing password to use to connect to this OCI mirror' # spack mirror remove -set -g __fish_spack_optspecs_spack_mirror_remove h/help scope= +set -g __fish_spack_optspecs_spack_mirror_remove h/help scope= all-scopes complete -c spack -n '__fish_spack_using_command_pos 0 mirror remove' -f -a '(__fish_spack_mirrors)' complete -c spack -n '__fish_spack_using_command mirror remove' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command mirror remove' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command mirror remove' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command mirror remove' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command mirror remove' -l scope -r -d 'configuration scope to modify' +complete -c spack -n '__fish_spack_using_command mirror remove' -l all-scopes -f -a all_scopes +complete -c spack -n '__fish_spack_using_command mirror remove' -l all-scopes -d 'remove from all config scopes (default: highest scope with matching mirror)' # spack mirror rm -set -g __fish_spack_optspecs_spack_mirror_rm h/help scope= +set -g __fish_spack_optspecs_spack_mirror_rm h/help scope= all-scopes complete -c spack -n '__fish_spack_using_command_pos 0 mirror rm' -f -a '(__fish_spack_mirrors)' complete -c spack -n '__fish_spack_using_command mirror rm' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command mirror rm' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command mirror rm' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command mirror rm' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command mirror rm' -l scope -r -d 'configuration scope to modify' +complete -c spack -n '__fish_spack_using_command mirror rm' -l all-scopes -f -a all_scopes +complete -c spack -n '__fish_spack_using_command mirror rm' -l all-scopes -d 'remove from all config scopes (default: highest scope with matching mirror)' # spack mirror set-url -set -g __fish_spack_optspecs_spack_mirror_set_url h/help push fetch scope= s3-access-key-id= s3-access-key-id-variable= s3-access-key-secret= s3-access-key-secret-variable= s3-access-token= s3-access-token-variable= s3-profile= s3-endpoint-url= oci-username= oci-username-variable= oci-password= oci-password-variable= +set -g __fish_spack_optspecs_spack_mirror_set_url h/help push fetch scope= s3-access-key-id= s3-access-key-id-variable= s3-access-key-secret-variable= s3-access-token-variable= s3-profile= s3-endpoint-url= oci-username= oci-username-variable= oci-password-variable= complete -c spack -n '__fish_spack_using_command_pos 0 mirror set-url' -f -a '(__fish_spack_mirrors)' complete -c spack -n '__fish_spack_using_command mirror set-url' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command mirror set-url' -s h -l help -d 'show this help message and exit' @@ -2412,18 +2466,14 @@ complete -c spack -n '__fish_spack_using_command mirror set-url' -l push -f -a p complete -c spack -n '__fish_spack_using_command mirror set-url' -l push -d 'set only the URL used for uploading' complete -c spack -n '__fish_spack_using_command mirror set-url' -l fetch -f -a fetch complete -c spack -n '__fish_spack_using_command mirror set-url' -l fetch -d 'set only the URL used for downloading' -complete -c spack -n '__fish_spack_using_command mirror set-url' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command mirror set-url' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command mirror set-url' -l scope -r -d 'configuration scope to modify' complete -c spack -n '__fish_spack_using_command mirror set-url' -l s3-access-key-id -r -f -a s3_access_key_id complete -c spack -n '__fish_spack_using_command mirror set-url' -l s3-access-key-id -r -d 'ID string to use to connect to this S3 mirror' complete -c spack -n '__fish_spack_using_command mirror set-url' -l s3-access-key-id-variable -r -f -a s3_access_key_id_variable complete -c spack -n '__fish_spack_using_command mirror set-url' -l s3-access-key-id-variable -r -d 'environment variable containing ID string to use to connect to this S3 mirror' -complete -c spack -n '__fish_spack_using_command mirror set-url' -l s3-access-key-secret -r -f -a s3_access_key_secret -complete -c spack -n '__fish_spack_using_command mirror set-url' -l s3-access-key-secret -r -d 'secret string to use to connect to this S3 mirror' complete -c spack -n '__fish_spack_using_command mirror set-url' -l s3-access-key-secret-variable -r -f -a s3_access_key_secret_variable complete -c spack -n '__fish_spack_using_command mirror set-url' -l s3-access-key-secret-variable -r -d 'environment variable containing secret string to use to connect to this S3 mirror' -complete -c spack -n '__fish_spack_using_command mirror set-url' -l s3-access-token -r -f -a s3_access_token -complete -c spack -n '__fish_spack_using_command mirror set-url' -l s3-access-token -r -d 'access token to use to connect to this S3 mirror' complete -c spack -n '__fish_spack_using_command mirror set-url' -l s3-access-token-variable -r -f -a s3_access_token_variable complete -c spack -n '__fish_spack_using_command mirror set-url' -l s3-access-token-variable -r -d 'environment variable containing access token to use to connect to this S3 mirror' complete -c spack -n '__fish_spack_using_command mirror set-url' -l s3-profile -r -f -a s3_profile @@ -2434,13 +2484,11 @@ complete -c spack -n '__fish_spack_using_command mirror set-url' -l oci-username complete -c spack -n '__fish_spack_using_command mirror set-url' -l oci-username -r -d 'username to use to connect to this OCI mirror' complete -c spack -n '__fish_spack_using_command mirror set-url' -l oci-username-variable -r -f -a oci_username_variable complete -c spack -n '__fish_spack_using_command mirror set-url' -l oci-username-variable -r -d 'environment variable containing username to use to connect to this OCI mirror' -complete -c spack -n '__fish_spack_using_command mirror set-url' -l oci-password -r -f -a oci_password -complete -c spack -n '__fish_spack_using_command mirror set-url' -l oci-password -r -d 'password to use to connect to this OCI mirror' complete -c spack -n '__fish_spack_using_command mirror set-url' -l oci-password-variable -r -f -a oci_password_variable complete -c spack -n '__fish_spack_using_command mirror set-url' -l oci-password-variable -r -d 'environment variable containing password to use to connect to this OCI mirror' # spack mirror set -set -g __fish_spack_optspecs_spack_mirror_set h/help push fetch type= url= autopush no-autopush unsigned signed scope= s3-access-key-id= s3-access-key-id-variable= s3-access-key-secret= s3-access-key-secret-variable= s3-access-token= s3-access-token-variable= s3-profile= s3-endpoint-url= oci-username= oci-username-variable= oci-password= oci-password-variable= +set -g __fish_spack_optspecs_spack_mirror_set h/help push fetch type= url= autopush no-autopush unsigned signed scope= s3-access-key-id= s3-access-key-id-variable= s3-access-key-secret-variable= s3-access-token-variable= s3-profile= s3-endpoint-url= oci-username= oci-username-variable= oci-password-variable= complete -c spack -n '__fish_spack_using_command_pos 0 mirror set' -f -a '(__fish_spack_mirrors)' complete -c spack -n '__fish_spack_using_command mirror set' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command mirror set' -s h -l help -d 'show this help message and exit' @@ -2449,7 +2497,7 @@ complete -c spack -n '__fish_spack_using_command mirror set' -l push -d 'modify complete -c spack -n '__fish_spack_using_command mirror set' -l fetch -f -a fetch complete -c spack -n '__fish_spack_using_command mirror set' -l fetch -d 'modify just the fetch connection details' complete -c spack -n '__fish_spack_using_command mirror set' -l type -r -f -a 'binary source' -complete -c spack -n '__fish_spack_using_command mirror set' -l type -r -d 'specify the mirror type: for both binary and source use `--type binary --type source`' +complete -c spack -n '__fish_spack_using_command mirror set' -l type -r -d 'specify the mirror type: for both binary and source use ``--type binary --type source``' complete -c spack -n '__fish_spack_using_command mirror set' -l url -r -f -a url complete -c spack -n '__fish_spack_using_command mirror set' -l url -r -d 'url of mirror directory from '"'"'spack mirror create'"'"'' complete -c spack -n '__fish_spack_using_command mirror set' -l autopush -f -a autopush @@ -2460,18 +2508,14 @@ complete -c spack -n '__fish_spack_using_command mirror set' -l unsigned -f -a s complete -c spack -n '__fish_spack_using_command mirror set' -l unsigned -d 'do not require signing and signature verification when pushing and installing from this build cache' complete -c spack -n '__fish_spack_using_command mirror set' -l signed -f -a signed complete -c spack -n '__fish_spack_using_command mirror set' -l signed -d 'require signing and signature verification when pushing and installing from this build cache' -complete -c spack -n '__fish_spack_using_command mirror set' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command mirror set' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command mirror set' -l scope -r -d 'configuration scope to modify' complete -c spack -n '__fish_spack_using_command mirror set' -l s3-access-key-id -r -f -a s3_access_key_id complete -c spack -n '__fish_spack_using_command mirror set' -l s3-access-key-id -r -d 'ID string to use to connect to this S3 mirror' complete -c spack -n '__fish_spack_using_command mirror set' -l s3-access-key-id-variable -r -f -a s3_access_key_id_variable complete -c spack -n '__fish_spack_using_command mirror set' -l s3-access-key-id-variable -r -d 'environment variable containing ID string to use to connect to this S3 mirror' -complete -c spack -n '__fish_spack_using_command mirror set' -l s3-access-key-secret -r -f -a s3_access_key_secret -complete -c spack -n '__fish_spack_using_command mirror set' -l s3-access-key-secret -r -d 'secret string to use to connect to this S3 mirror' complete -c spack -n '__fish_spack_using_command mirror set' -l s3-access-key-secret-variable -r -f -a s3_access_key_secret_variable complete -c spack -n '__fish_spack_using_command mirror set' -l s3-access-key-secret-variable -r -d 'environment variable containing secret string to use to connect to this S3 mirror' -complete -c spack -n '__fish_spack_using_command mirror set' -l s3-access-token -r -f -a s3_access_token -complete -c spack -n '__fish_spack_using_command mirror set' -l s3-access-token -r -d 'access token to use to connect to this S3 mirror' complete -c spack -n '__fish_spack_using_command mirror set' -l s3-access-token-variable -r -f -a s3_access_token_variable complete -c spack -n '__fish_spack_using_command mirror set' -l s3-access-token-variable -r -d 'environment variable containing access token to use to connect to this S3 mirror' complete -c spack -n '__fish_spack_using_command mirror set' -l s3-profile -r -f -a s3_profile @@ -2482,8 +2526,6 @@ complete -c spack -n '__fish_spack_using_command mirror set' -l oci-username -r complete -c spack -n '__fish_spack_using_command mirror set' -l oci-username -r -d 'username to use to connect to this OCI mirror' complete -c spack -n '__fish_spack_using_command mirror set' -l oci-username-variable -r -f -a oci_username_variable complete -c spack -n '__fish_spack_using_command mirror set' -l oci-username-variable -r -d 'environment variable containing username to use to connect to this OCI mirror' -complete -c spack -n '__fish_spack_using_command mirror set' -l oci-password -r -f -a oci_password -complete -c spack -n '__fish_spack_using_command mirror set' -l oci-password -r -d 'password to use to connect to this OCI mirror' complete -c spack -n '__fish_spack_using_command mirror set' -l oci-password-variable -r -f -a oci_password_variable complete -c spack -n '__fish_spack_using_command mirror set' -l oci-password-variable -r -d 'environment variable containing password to use to connect to this OCI mirror' @@ -2491,14 +2533,14 @@ complete -c spack -n '__fish_spack_using_command mirror set' -l oci-password-var set -g __fish_spack_optspecs_spack_mirror_list h/help scope= complete -c spack -n '__fish_spack_using_command mirror list' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command mirror list' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command mirror list' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command mirror list' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command mirror list' -l scope -r -d 'configuration scope to read from' # spack mirror ls set -g __fish_spack_optspecs_spack_mirror_ls h/help scope= complete -c spack -n '__fish_spack_using_command mirror ls' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command mirror ls' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command mirror ls' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command mirror ls' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command mirror ls' -l scope -r -d 'configuration scope to read from' # spack module @@ -2652,7 +2694,7 @@ complete -c spack -n '__fish_spack_using_command patch' -l deprecated -d 'allow # spack pkg set -g __fish_spack_optspecs_spack_pkg h/help -complete -c spack -n '__fish_spack_using_command_pos 0 pkg' -f -a add -d 'add a package to the git stage with `git add`' +complete -c spack -n '__fish_spack_using_command_pos 0 pkg' -f -a add -d 'add a package to the git stage with ``git add``' complete -c spack -n '__fish_spack_using_command_pos 0 pkg' -f -a list -d 'list packages associated with a particular spack git revision' complete -c spack -n '__fish_spack_using_command_pos 0 pkg' -f -a diff -d 'compare packages available in two different git revisions' complete -c spack -n '__fish_spack_using_command_pos 0 pkg' -f -a added -d 'show packages added since a commit' @@ -2811,7 +2853,7 @@ complete -c spack -n '__fish_spack_using_command repo create' -s d -l subdirecto set -g __fish_spack_optspecs_spack_repo_list h/help scope= names namespaces complete -c spack -n '__fish_spack_using_command repo list' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command repo list' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command repo list' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command repo list' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command repo list' -l scope -r -d 'configuration scope to read from' complete -c spack -n '__fish_spack_using_command repo list' -l names -f -a names complete -c spack -n '__fish_spack_using_command repo list' -l names -d 'show configuration names only' @@ -2822,7 +2864,7 @@ complete -c spack -n '__fish_spack_using_command repo list' -l namespaces -d 'sh set -g __fish_spack_optspecs_spack_repo_ls h/help scope= names namespaces complete -c spack -n '__fish_spack_using_command repo ls' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command repo ls' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command repo ls' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command repo ls' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command repo ls' -l scope -r -d 'configuration scope to read from' complete -c spack -n '__fish_spack_using_command repo ls' -l names -f -a names complete -c spack -n '__fish_spack_using_command repo ls' -l names -d 'show configuration names only' @@ -2838,7 +2880,7 @@ complete -c spack -n '__fish_spack_using_command repo add' -l name -r -f -a name complete -c spack -n '__fish_spack_using_command repo add' -l name -r -d 'config name for the package repository, defaults to the namespace of the repository' complete -c spack -n '__fish_spack_using_command repo add' -l path -r -f -a path complete -c spack -n '__fish_spack_using_command repo add' -l path -r -d 'relative path to the Spack package repository inside a git repository. Can be repeated to add multiple package repositories in case of a monorepo' -complete -c spack -n '__fish_spack_using_command repo add' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command repo add' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command repo add' -l scope -r -d 'configuration scope to modify' # spack repo set @@ -2850,24 +2892,28 @@ complete -c spack -n '__fish_spack_using_command repo set' -l destination -r -f complete -c spack -n '__fish_spack_using_command repo set' -l destination -r -d 'destination to clone git repository into' complete -c spack -n '__fish_spack_using_command repo set' -l path -r -f -a path complete -c spack -n '__fish_spack_using_command repo set' -l path -r -d 'relative path to the Spack package repository inside a git repository. Can be repeated to add multiple package repositories in case of a monorepo' -complete -c spack -n '__fish_spack_using_command repo set' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command repo set' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command repo set' -l scope -r -d 'configuration scope to modify' # spack repo remove -set -g __fish_spack_optspecs_spack_repo_remove h/help scope= +set -g __fish_spack_optspecs_spack_repo_remove h/help scope= all-scopes complete -c spack -n '__fish_spack_using_command_pos 0 repo remove' $__fish_spack_force_files -a '(__fish_spack_repos)' complete -c spack -n '__fish_spack_using_command repo remove' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command repo remove' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command repo remove' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command repo remove' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command repo remove' -l scope -r -d 'configuration scope to modify' +complete -c spack -n '__fish_spack_using_command repo remove' -l all-scopes -f -a all_scopes +complete -c spack -n '__fish_spack_using_command repo remove' -l all-scopes -d 'remove from all config scopes (default: highest scope with matching repo)' # spack repo rm -set -g __fish_spack_optspecs_spack_repo_rm h/help scope= +set -g __fish_spack_optspecs_spack_repo_rm h/help scope= all-scopes complete -c spack -n '__fish_spack_using_command_pos 0 repo rm' $__fish_spack_force_files -a '(__fish_spack_repos)' complete -c spack -n '__fish_spack_using_command repo rm' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command repo rm' -s h -l help -d 'show this help message and exit' -complete -c spack -n '__fish_spack_using_command repo rm' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command repo rm' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command repo rm' -l scope -r -d 'configuration scope to modify' +complete -c spack -n '__fish_spack_using_command repo rm' -l all-scopes -f -a all_scopes +complete -c spack -n '__fish_spack_using_command repo rm' -l all-scopes -d 'remove from all config scopes (default: highest scope with matching repo)' # spack repo migrate set -g __fish_spack_optspecs_spack_repo_migrate h/help dry-run fix @@ -2886,7 +2932,7 @@ complete -c spack -n '__fish_spack_using_command repo update' -s h -l help -f -a complete -c spack -n '__fish_spack_using_command repo update' -s h -l help -d 'show this help message and exit' complete -c spack -n '__fish_spack_using_command repo update' -l remote -s r -r -f -a remote complete -c spack -n '__fish_spack_using_command repo update' -l remote -s r -r -d 'name of remote to check for branches, tags, or commits' -complete -c spack -n '__fish_spack_using_command repo update' -l scope -r -f -a '_builtin defaults:base defaults system site user command_line' +complete -c spack -n '__fish_spack_using_command repo update' -l scope -r -f -a '_builtin defaults:base defaults system site user spack command_line' complete -c spack -n '__fish_spack_using_command repo update' -l scope -r -d 'configuration scope to modify' complete -c spack -n '__fish_spack_using_command repo update' -l branch -s b -r -f -a branch complete -c spack -n '__fish_spack_using_command repo update' -l branch -s b -r -d 'name of a branch to change to' @@ -2922,7 +2968,7 @@ complete -c spack -n '__fish_spack_using_command restage' -s h -l help -f -a hel complete -c spack -n '__fish_spack_using_command restage' -s h -l help -d 'show this help message and exit' # spack solve -set -g __fish_spack_optspecs_spack_solve h/help show= timers stats l/long L/very-long N/namespaces I/install-status no-install-status y/yaml j/json format= c/cover= t/types f/force U/fresh reuse fresh-roots deprecated +set -g __fish_spack_optspecs_spack_solve h/help show= timers stats l/long L/very-long N/namespaces I/install-status no-install-status y/yaml j/json format= non-defaults c/cover= t/types f/force U/fresh reuse fresh-roots deprecated complete -c spack -n '__fish_spack_using_command_pos_remainder 0 solve' -f -k -a '(__fish_spack_specs_or_id)' complete -c spack -n '__fish_spack_using_command solve' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command solve' -s h -l help -d 'show this help message and exit' @@ -2948,6 +2994,8 @@ complete -c spack -n '__fish_spack_using_command solve' -s j -l json -f -a forma complete -c spack -n '__fish_spack_using_command solve' -s j -l json -d 'print concrete spec as JSON' complete -c spack -n '__fish_spack_using_command solve' -l format -r -f -a format complete -c spack -n '__fish_spack_using_command solve' -l format -r -d 'print concrete spec with the specified format string' +complete -c spack -n '__fish_spack_using_command solve' -l non-defaults -f -a non_defaults +complete -c spack -n '__fish_spack_using_command solve' -l non-defaults -d 'highlight non-default versions or variants' complete -c spack -n '__fish_spack_using_command solve' -s c -l cover -r -f -a 'nodes edges paths' complete -c spack -n '__fish_spack_using_command solve' -s c -l cover -r -d 'how extensively to traverse the DAG (default: nodes)' complete -c spack -n '__fish_spack_using_command solve' -s t -l types -f -a types @@ -2964,7 +3012,7 @@ complete -c spack -n '__fish_spack_using_command solve' -l deprecated -f -a conf complete -c spack -n '__fish_spack_using_command solve' -l deprecated -d 'allow concretizer to select deprecated versions' # spack spec -set -g __fish_spack_optspecs_spack_spec h/help l/long L/very-long N/namespaces I/install-status no-install-status y/yaml j/json format= c/cover= t/types f/force U/fresh reuse fresh-roots deprecated +set -g __fish_spack_optspecs_spack_spec h/help l/long L/very-long N/namespaces I/install-status no-install-status y/yaml j/json format= non-defaults c/cover= t/types f/force U/fresh reuse fresh-roots deprecated complete -c spack -n '__fish_spack_using_command_pos_remainder 0 spec' -f -k -a '(__fish_spack_specs_or_id)' complete -c spack -n '__fish_spack_using_command spec' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command spec' -s h -l help -d 'show this help message and exit' @@ -2984,6 +3032,8 @@ complete -c spack -n '__fish_spack_using_command spec' -s j -l json -f -a format complete -c spack -n '__fish_spack_using_command spec' -s j -l json -d 'print concrete spec as JSON' complete -c spack -n '__fish_spack_using_command spec' -l format -r -f -a format complete -c spack -n '__fish_spack_using_command spec' -l format -r -d 'print concrete spec with the specified format string' +complete -c spack -n '__fish_spack_using_command spec' -l non-defaults -f -a non_defaults +complete -c spack -n '__fish_spack_using_command spec' -l non-defaults -d 'highlight non-default versions or variants' complete -c spack -n '__fish_spack_using_command spec' -s c -l cover -r -f -a 'nodes edges paths' complete -c spack -n '__fish_spack_using_command spec' -s c -l cover -r -d 'how extensively to traverse the DAG (default: nodes)' complete -c spack -n '__fish_spack_using_command spec' -s t -l types -f -a types @@ -3045,7 +3095,7 @@ complete -c spack -n '__fish_spack_using_command style' -s t -l tool -r -d 'spec complete -c spack -n '__fish_spack_using_command style' -s s -l skip -r -f -a skip complete -c spack -n '__fish_spack_using_command style' -s s -l skip -r -d 'specify tools to skip (choose from import, isort, black, flake8, mypy)' complete -c spack -n '__fish_spack_using_command style' -l spec-strings -f -a spec_strings -complete -c spack -n '__fish_spack_using_command style' -l spec-strings -d 'upgrade spec strings in Python, JSON and YAML files for compatibility with Spack v1.0 and v0.x. Example: spack style --spec-strings $(git ls-files). Note: must be used only on specs from spack v0.X.' +complete -c spack -n '__fish_spack_using_command style' -l spec-strings -d 'upgrade spec strings in Python, JSON and YAML files for compatibility with Spack v1.0 and v0.x. Example: spack style ``--spec-strings $(git ls-files)``. Note: must be used only on specs from spack v0.X.' # spack tags set -g __fish_spack_optspecs_spack_tags h/help i/installed a/all @@ -3173,10 +3223,12 @@ complete -c spack -n '__fish_spack_using_command tutorial' -s y -l yes-to-all -f complete -c spack -n '__fish_spack_using_command tutorial' -s y -l yes-to-all -d 'assume "yes" is the answer to every confirmation request' # spack undevelop -set -g __fish_spack_optspecs_spack_undevelop h/help a/all +set -g __fish_spack_optspecs_spack_undevelop h/help no-modify-concrete-specs a/all complete -c spack -n '__fish_spack_using_command_pos_remainder 0 undevelop' -f -k -a '(__fish_spack_specs_or_id)' complete -c spack -n '__fish_spack_using_command undevelop' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command undevelop' -s h -l help -d 'show this help message and exit' +complete -c spack -n '__fish_spack_using_command undevelop' -l no-modify-concrete-specs -f -a apply_changes +complete -c spack -n '__fish_spack_using_command undevelop' -l no-modify-concrete-specs -d 'do not mutate concrete specs to remove dev_path provenance. This requires running `spack concretize -f` later to apply changes to concrete specs' complete -c spack -n '__fish_spack_using_command undevelop' -s a -l all -f -a all complete -c spack -n '__fish_spack_using_command undevelop' -s a -l all -d 'remove all specs from (clear) the environment' @@ -3290,6 +3342,7 @@ complete -c spack -n '__fish_spack_using_command url stats' -l show-issues -d 's set -g __fish_spack_optspecs_spack_verify h/help complete -c spack -n '__fish_spack_using_command_pos 0 verify' -f -a manifest -d 'verify that install directories have not been modified since installation' complete -c spack -n '__fish_spack_using_command_pos 0 verify' -f -a libraries -d 'verify that shared libraries of install packages can be located in rpaths (Linux only)' +complete -c spack -n '__fish_spack_using_command_pos 0 verify' -f -a versions -d 'Check that all versions of installed packages are known to Spack and non-deprecated.' complete -c spack -n '__fish_spack_using_command verify' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command verify' -s h -l help -d 'show this help message and exit' @@ -3315,6 +3368,12 @@ complete -c spack -n '__fish_spack_using_command_pos_remainder 0 verify librarie complete -c spack -n '__fish_spack_using_command verify libraries' -s h -l help -f -a help complete -c spack -n '__fish_spack_using_command verify libraries' -s h -l help -d 'show this help message and exit' +# spack verify versions +set -g __fish_spack_optspecs_spack_verify_versions h/help +complete -c spack -n '__fish_spack_using_command_pos_remainder 0 verify versions' -f -a '(__fish_spack_installed_specs)' +complete -c spack -n '__fish_spack_using_command verify versions' -s h -l help -f -a help +complete -c spack -n '__fish_spack_using_command verify versions' -s h -l help -d 'show this help message and exit' + # spack versions set -g __fish_spack_optspecs_spack_versions h/help s/safe r/remote n/new j/jobs= complete -c spack -n '__fish_spack_using_command_pos 0 versions' -f -a '(__fish_spack_packages)' diff --git a/share/spack/templates/container/Dockerfile b/share/spack/templates/container/Dockerfile index 76aa9a342cc671..340feac78850b3 100644 --- a/share/spack/templates/container/Dockerfile +++ b/share/spack/templates/container/Dockerfile @@ -56,8 +56,7 @@ FROM {{ run.image }} COPY --from=builder {{ paths.environment }} {{ paths.environment }} COPY --from=builder {{ paths.store }} {{ paths.store }} - -# paths.view is a symlink, so copy the parent to avoid dereferencing and duplicating it +{# paths.view is a symlink, so copy the parent to avoid dereferencing and duplicating it #} COPY --from=builder {{ paths.view_parent }} {{ paths.view_parent }} RUN { \ diff --git a/share/spack/templates/container/almalinux_8.dockerfile b/share/spack/templates/container/almalinux_8.dockerfile index b0070e93323811..e3769298ebc662 100644 --- a/share/spack/templates/container/almalinux_8.dockerfile +++ b/share/spack/templates/container/almalinux_8.dockerfile @@ -18,7 +18,6 @@ RUN dnf update -y \ patch \ python3 \ python3-pip \ - python3-setuptools \ svn \ unzip \ zstd \ diff --git a/share/spack/templates/container/almalinux_9.dockerfile b/share/spack/templates/container/almalinux_9.dockerfile index 8eb4ed9012ace3..43ca8f7361b5d7 100644 --- a/share/spack/templates/container/almalinux_9.dockerfile +++ b/share/spack/templates/container/almalinux_9.dockerfile @@ -20,7 +20,6 @@ RUN dnf update -y \ patch \ python3 \ python3-pip \ - python3-setuptools \ svn \ unzip \ zstd \ diff --git a/share/spack/templates/container/amazonlinux_2.dockerfile b/share/spack/templates/container/amazonlinux_2.dockerfile index e11dd0c482ca14..332f8102b7e0c6 100644 --- a/share/spack/templates/container/amazonlinux_2.dockerfile +++ b/share/spack/templates/container/amazonlinux_2.dockerfile @@ -17,7 +17,6 @@ RUN yum update -y \ patch \ python3 \ python3-pip \ - python3-setuptools \ unzip \ zstd \ && pip3 install boto3 \ diff --git a/share/spack/templates/container/centos_stream9.dockerfile b/share/spack/templates/container/centos_stream9.dockerfile index e62c64364f79d1..494e2f9307a528 100644 --- a/share/spack/templates/container/centos_stream9.dockerfile +++ b/share/spack/templates/container/centos_stream9.dockerfile @@ -22,7 +22,6 @@ RUN dnf update -y \ svn \ patch \ python3.11 \ - python3.11-setuptools \ unzip \ zstd \ && python3.11 -m ensurepip \ diff --git a/share/spack/templates/container/fedora.dockerfile b/share/spack/templates/container/fedora.dockerfile index 4856ad2197e7e9..893c540728b77a 100644 --- a/share/spack/templates/container/fedora.dockerfile +++ b/share/spack/templates/container/fedora.dockerfile @@ -18,7 +18,6 @@ RUN dnf update -y \ patch \ python3 \ python3-pip \ - python3-setuptools \ svn \ unzip \ xz \ diff --git a/share/spack/templates/container/rockylinux_8.dockerfile b/share/spack/templates/container/rockylinux_8.dockerfile index 4856ad2197e7e9..893c540728b77a 100644 --- a/share/spack/templates/container/rockylinux_8.dockerfile +++ b/share/spack/templates/container/rockylinux_8.dockerfile @@ -18,7 +18,6 @@ RUN dnf update -y \ patch \ python3 \ python3-pip \ - python3-setuptools \ svn \ unzip \ xz \ diff --git a/share/spack/templates/container/rockylinux_9.dockerfile b/share/spack/templates/container/rockylinux_9.dockerfile index 8fa568d6c6b32d..55b4b34fa9b1b4 100644 --- a/share/spack/templates/container/rockylinux_9.dockerfile +++ b/share/spack/templates/container/rockylinux_9.dockerfile @@ -20,7 +20,6 @@ RUN dnf update -y \ patch \ python3 \ python3-pip \ - python3-setuptools \ svn \ unzip \ xz \ diff --git a/share/spack/templates/container/ubuntu_2004.dockerfile b/share/spack/templates/container/ubuntu_2004.dockerfile index f1c3ca456de1a4..6cfc6f91a5da7b 100644 --- a/share/spack/templates/container/ubuntu_2004.dockerfile +++ b/share/spack/templates/container/ubuntu_2004.dockerfile @@ -26,7 +26,6 @@ RUN apt-get -yqq update \ subversion \ python3 \ python3-pip \ - python3-setuptools \ unzip \ zstd \ && locale-gen en_US.UTF-8 \ diff --git a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/_checks.py b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/_checks.py index 162ed411731ffc..2349c0b22722e8 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/_checks.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/_checks.py @@ -1,49 +1,7 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -from typing import List - -from spack.package import Builder, BuilderWithDefaults, Spec, execute_install_time_tests - -_ = BuilderWithDefaults -_ = execute_install_time_tests - - -def ensure_build_dependencies_or_raise(spec: Spec, dependencies: List[str], error_msg: str): - """Ensure that some build dependencies are present in the concrete spec. - - If not, raise a RuntimeError with a helpful error message. - - Args: - spec: concrete spec to be checked. - dependencies: list of package names of required build dependencies - error_msg: brief error message to be prepended to a longer description - - Raises: - RuntimeError: when the required build dependencies are not found - """ - assert spec.concrete, "Can ensure build dependencies only on concrete specs" - build_deps = [d.name for d in spec.dependencies(deptype="build")] - missing_deps = [x for x in dependencies if x not in build_deps] - - if not missing_deps: - return - - # Raise an exception on missing deps. - msg = ( - "{0}: missing dependencies: {1}.\n\nPlease add " - "the following lines to the package:\n\n".format( - error_msg, ", ".join(str(d) for d in missing_deps) - ) - ) - - for dep in missing_deps: - msg += ' depends_on("{0}", type="build", when="@{1} {2}")\n'.format( - dep, spec.version, "build_system=autotools" - ) - - msg += '\nUpdate the version (when="@{0}") as needed.'.format(spec.version) - raise RuntimeError(msg) +from spack.package import Builder, BuilderWithDefaults, execute_install_time_tests def execute_build_time_tests(builder: Builder): @@ -57,3 +15,6 @@ def execute_build_time_tests(builder: Builder): return builder.pkg.tester.phase_tests(builder, "build", builder.build_time_test_callbacks) + + +__all__ = ["execute_build_time_tests", "BuilderWithDefaults", "execute_install_time_tests"] diff --git a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/autotools.py b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/autotools.py index 61ed270bbb4919..8c68da7948577f 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/autotools.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/autotools.py @@ -2,109 +2,48 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os -import stat -import subprocess -from typing import Callable, List, Optional, Set, Tuple, Union -import spack.build_environment -import spack.builder -import spack.compilers.libraries -import spack.error -import spack.llnl.util.filesystem as fs -import spack.llnl.util.tty as tty -import spack.package_base -import spack.phase_callbacks -import spack.spec -import spack.util.environment -import spack.util.prefix -from spack.directives import build_system, conflicts, depends_on -from spack.multimethod import when -from spack.operating_systems.mac_os import macos_version -from spack.package import BuilderWithDefaults, apply_macos_rpath_fixups, execute_install_time_tests -from spack.util.executable import Executable -from spack.version import Version +from spack.package import ( + BuilderWithDefaults, + List, + PackageBase, + Prefix, + Spec, + build_system, + depends_on, + register_builder, + run_after, +) -from ._checks import ensure_build_dependencies_or_raise, execute_build_time_tests +from ._checks import execute_build_time_tests, execute_install_time_tests -class AutotoolsPackage(spack.package_base.PackageBase): +class AutotoolsPackage(PackageBase): """Specialized class for packages built using GNU Autotools.""" - #: This attribute is used in UI queries that need to know the build - #: system base class build_system_class = "AutotoolsPackage" - - #: Legacy buildsystem attribute used to deserialize and install old specs default_buildsystem = "autotools" build_system("autotools") - - with when("build_system=autotools"): - depends_on("gnuconfig", type="build", when="target=ppc64le:") - depends_on("gnuconfig", type="build", when="target=aarch64:") - depends_on("gnuconfig", type="build", when="target=riscv64:") - depends_on("gmake", type="build") - conflicts("platform=windows") + depends_on("gmake", type="build", when="build_system=autotools") def flags_to_build_system_args(self, flags): - """Produces a list of all command line arguments to pass specified - compiler flags to configure.""" + """Produces a list of all command line arguments to pass compiler flags to configure.""" # Has to be dynamic attribute due to caching. - setattr(self, "configure_flag_args", []) + configure_flag_args = [] for flag, values in flags.items(): if values: var_name = "LIBS" if flag == "ldlibs" else flag.upper() - values_str = "{0}={1}".format(var_name, " ".join(values)) - self.configure_flag_args.append(values_str) - # Spack's fflags are meant for both F77 and FC, therefore we - # additionaly set FCFLAGS if required. + configure_flag_args.append(f"{var_name}={' '.join(values)}") + # Spack's fflags are meant for both F77 and FC, therefore we additionally set FCFLAGS values = flags.get("fflags", None) if values: - values_str = "FCFLAGS={0}".format(" ".join(values)) - self.configure_flag_args.append(values_str) - - # Legacy methods (used by too many packages to change them, - # need to forward to the builder) - def enable_or_disable(self, *args, **kwargs): - return spack.builder.create(self).enable_or_disable(*args, **kwargs) - - def with_or_without(self, *args, **kwargs): - return spack.builder.create(self).with_or_without(*args, **kwargs) + configure_flag_args.append(f"FCFLAGS={' '.join(values)}") + setattr(self, "configure_flag_args", configure_flag_args) -@spack.builder.register_builder("autotools") +@register_builder("autotools") class AutotoolsBuilder(BuilderWithDefaults): - """The autotools builder encodes the default way of installing software built - with autotools. It has four phases that can be overridden, if need be: - - 1. :py:meth:`~.AutotoolsBuilder.autoreconf` - 2. :py:meth:`~.AutotoolsBuilder.configure` - 3. :py:meth:`~.AutotoolsBuilder.build` - 4. :py:meth:`~.AutotoolsBuilder.install` - - They all have sensible defaults and for many packages the only thing necessary - is to override the helper method - :meth:`~spack.build_systems.autotools.AutotoolsBuilder.configure_args`. - - For a finer tuning you may also override: - - +-----------------------------------------------+--------------------+ - | **Method** | **Purpose** | - +===============================================+====================+ - | :py:attr:`~.AutotoolsBuilder.build_targets` | Specify ``make`` | - | | targets for the | - | | build phase | - +-----------------------------------------------+--------------------+ - | :py:attr:`~.AutotoolsBuilder.install_targets` | Specify ``make`` | - | | targets for the | - | | install phase | - +-----------------------------------------------+--------------------+ - | :py:meth:`~.AutotoolsBuilder.check` | Run build time | - | | tests if required | - +-----------------------------------------------+--------------------+ - - """ - #: Phases of a GNU Autotools package phases = ("autoreconf", "configure", "build", "install") @@ -114,409 +53,22 @@ class AutotoolsBuilder(BuilderWithDefaults): #: Names associated with package attributes in the old build-system format package_attributes = ( "archive_files", - "patch_libtool", - "build_targets", - "install_targets", "build_time_test_callbacks", "install_time_test_callbacks", - "force_autoreconf", - "autoreconf_extra_args", - "install_libtool_archives", - "patch_config_files", "configure_directory", "configure_abs_path", "build_directory", - "autoreconf_search_path_args", ) - #: Whether to update ``libtool`` (e.g. for Arm/Clang/Fujitsu/NVHPC compilers) - patch_libtool = True - - #: Targets for ``make`` during the :py:meth:`~.AutotoolsBuilder.build` phase - build_targets: List[str] = [] - #: Targets for ``make`` during the :py:meth:`~.AutotoolsBuilder.install` phase - install_targets = ["install"] - #: Callback names for build-time test build_time_test_callbacks = ["check"] #: Callback names for install-time test install_time_test_callbacks = ["installcheck"] - #: Set to true to force the autoreconf step even if configure is present - force_autoreconf = False - - #: Options to be passed to autoreconf when using the default implementation - autoreconf_extra_args: List[str] = [] - - #: If False deletes all the .la files in the prefix folder after the installation. - #: If True instead it installs them. - install_libtool_archives = False - - @property - def patch_config_files(self) -> bool: - """Whether to update old ``config.guess`` and ``config.sub`` files - distributed with the tarball. - - This currently only applies to ``ppc64le:``, ``aarch64:``, and - ``riscv64`` target architectures. - - The substitutes are taken from the ``gnuconfig`` package, which is - automatically added as a build dependency for these architectures. In case - system versions of these config files are required, the ``gnuconfig`` package - can be marked external, with a prefix pointing to the directory containing the - system ``config.guess`` and ``config.sub`` files. - """ - return ( - self.pkg.spec.satisfies("target=ppc64le:") - or self.pkg.spec.satisfies("target=aarch64:") - or self.pkg.spec.satisfies("target=riscv64:") - ) - - @property - def _removed_la_files_log(self) -> str: - """File containing the list of removed libtool archives""" - return os.path.join(self.build_directory, "removed_la_files.txt") - @property def archive_files(self) -> List[str]: - """Files to archive for packages based on autotools""" - files = [os.path.join(self.build_directory, "config.log")] - if not self.install_libtool_archives: - files.append(self._removed_la_files_log) - return files - - @spack.phase_callbacks.run_after("autoreconf") - def _do_patch_config_files(self) -> None: - """Some packages ship with older config.guess/config.sub files and need to - have these updated when installed on a newer architecture. - - In particular, config.guess fails for PPC64LE for version prior to a - 2013-06-10 build date (automake 1.13.4) and for AArch64 and RISC-V. - """ - if not self.patch_config_files: - return - - # TODO: Expand this to select the 'config.sub'-compatible architecture - # for each platform (e.g. 'config.sub' doesn't accept 'power9le', but - # does accept 'ppc64le'). - if self.pkg.spec.satisfies("target=ppc64le:"): - config_arch = "ppc64le" - elif self.pkg.spec.satisfies("target=aarch64:"): - config_arch = "aarch64" - elif self.pkg.spec.satisfies("target=riscv64:"): - config_arch = "riscv64" - else: - config_arch = "local" - - def runs_ok(script_abs_path): - # Construct the list of arguments for the call - additional_args = {"config.sub": [config_arch]} - script_name = os.path.basename(script_abs_path) - args = [script_abs_path] + additional_args.get(script_name, []) - - try: - subprocess.check_call(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - except Exception as e: - tty.debug(e) - return False - - return True - - # Get the list of files that needs to be patched - to_be_patched = fs.find(self.pkg.stage.path, files=["config.sub", "config.guess"]) - to_be_patched = [f for f in to_be_patched if not runs_ok(f)] - - # If there are no files to be patched, return early - if not to_be_patched: - return - - # Otherwise, require `gnuconfig` to be a build dependency - ensure_build_dependencies_or_raise( - spec=self.pkg.spec, dependencies=["gnuconfig"], error_msg="Cannot patch config files" - ) - - # Get the config files we need to patch (config.sub / config.guess). - to_be_found = list(set(os.path.basename(f) for f in to_be_patched)) - gnuconfig = self.pkg.spec["gnuconfig"] - gnuconfig_dir = gnuconfig.prefix - - # An external gnuconfig may not not have a prefix. - if gnuconfig_dir is None: - raise spack.error.InstallError( - "Spack could not find substitutes for GNU config files because no " - "prefix is available for the `gnuconfig` package. Make sure you set a " - "prefix path instead of modules for external `gnuconfig`." - ) - - candidates = fs.find(gnuconfig_dir, files=to_be_found, recursive=False) - - # For external packages the user may have specified an incorrect prefix. - # otherwise the installation is just corrupt. - if not candidates: - msg = ( - "Spack could not find `config.guess` and `config.sub` " - "files in the `gnuconfig` prefix `{0}`. This means the " - "`gnuconfig` package is broken" - ).format(gnuconfig_dir) - if gnuconfig.external: - msg += ( - " or the `gnuconfig` package prefix is misconfigured as" " an external package" - ) - raise spack.error.InstallError(msg) - - # Filter working substitutes - candidates = [f for f in candidates if runs_ok(f)] - substitutes = {} - for candidate in candidates: - config_file = os.path.basename(candidate) - substitutes[config_file] = candidate - to_be_found.remove(config_file) - - # Check that we found everything we needed - if to_be_found: - msg = """\ -Spack could not find working replacements for the following autotools config -files: {0}. - -To resolve this problem, please try the following: -1. Try to rebuild with `patch_config_files = False` in the package `{1}`, to - rule out that Spack tries to replace config files not used by the build. -2. Verify that the `gnuconfig` package is up-to-date. -3. On some systems you need to use system-provided `config.guess` and `config.sub` - files. In this case, mark `gnuconfig` as an non-buildable external package, - and set the prefix to the directory containing the `config.guess` and - `config.sub` files. -""" - raise spack.error.InstallError(msg.format(", ".join(to_be_found), self.pkg.name)) - - # Copy the good files over the bad ones - for abs_path in to_be_patched: - name = os.path.basename(abs_path) - mode = os.stat(abs_path).st_mode - os.chmod(abs_path, stat.S_IWUSR) - fs.copy(substitutes[name], abs_path) - os.chmod(abs_path, mode) - - @spack.phase_callbacks.run_before("configure") - def _patch_usr_bin_file(self) -> None: - """On NixOS file is not available in /usr/bin/file. Patch configure - scripts to use file from path.""" - - if self.spec.os.startswith("nixos"): - x = fs.FileFilter( - *filter(fs.is_exe, fs.find(self.build_directory, "configure", recursive=True)) - ) - with fs.keep_modification_time(*x.filenames): - x.filter(regex="/usr/bin/file", repl="file", string=True) - - @spack.phase_callbacks.run_before("configure") - def _set_autotools_environment_variables(self) -> None: - """Many autotools builds use a version of mknod.m4 that fails when - running as root unless FORCE_UNSAFE_CONFIGURE is set to 1. - - We set this to 1 and expect the user to take responsibility if - they are running as root. They have to anyway, as this variable - doesn't actually prevent configure from doing bad things as root. - Without it, configure just fails halfway through, but it can - still run things *before* this check. Forcing this just removes a - nuisance -- this is not circumventing any real protection. - """ - os.environ["FORCE_UNSAFE_CONFIGURE"] = "1" - - @spack.phase_callbacks.run_before("configure") - def _do_patch_libtool_configure(self) -> None: - """Patch bugs that propagate from libtool macros into "configure" and - further into "libtool". Note that patches that can be fixed by patching - "libtool" directly should be implemented in the _do_patch_libtool method - below.""" - - # Exit early if we are required not to patch libtool-related problems: - if not self.patch_libtool: - return - - x = fs.FileFilter( - *filter(fs.is_exe, fs.find(self.build_directory, "configure", recursive=True)) - ) - - # There are distributed automatically generated files that depend on the configure script - # and require additional tools for rebuilding. - # See https://github.com/spack/spack/pull/30768#issuecomment-1219329860 - with fs.keep_modification_time(*x.filenames): - # Fix parsing of compiler output when collecting predeps and postdeps - # https://lists.gnu.org/archive/html/bug-libtool/2016-03/msg00003.html - x.filter(regex=r'^(\s*if test x-L = )("\$p" \|\|\s*)$', repl=r"\1x\2") - x.filter( - regex=r'^(\s*test x-R = )("\$p")(; then\s*)$', repl=r'\1x\2 || test x-l = x"$p"\3' - ) - # Support Libtool 2.4.2 and older: - x.filter(regex=r'^(\s*test \$p = "-R")(; then\s*)$', repl=r'\1 || test x-l = x"$p"\2') - # Configure scripts generated with libtool < 2.5.4 have a faulty test for the - # -single_module linker flag. A deprecation warning makes it think the default is - # -multi_module, triggering it to use problematic linker flags (such as ld -r). The - # linker default is `-single_module` from (ancient) macOS 10.4, so override by setting - # `lt_cv_apple_cc_single_mod=yes`. See the fix in libtool commit - # 82f7f52123e4e7e50721049f7fa6f9b870e09c9d. - x.filter("lt_cv_apple_cc_single_mod=no", "lt_cv_apple_cc_single_mod=yes", string=True) - - @spack.phase_callbacks.run_after("configure") - def _do_patch_libtool(self) -> None: - """If configure generates a "libtool" script that does not correctly - detect the compiler (and patch_libtool is set), patch in the correct - values for libtool variables. - - The generated libtool script supports mixed compilers through tags: - ``libtool --tag=CC/CXX/FC/...```. For each tag there is a block with variables, - which defines what flags to pass to the compiler. The default variables (which - are used by the default tag CC) are set in a block enclosed by - ``# ### {BEGIN,END} LIBTOOL CONFIG``. For non-default tags, there are - corresponding blocks ``# ### {BEGIN,END} LIBTOOL TAG CONFIG: {CXX,FC,F77}`` at - the end of the file (after the exit command). libtool evals these blocks. - Whenever we need to update variables that the configure script got wrong - (for example cause it did not recognize the compiler), we should properly scope - those changes to these tags/blocks so they only apply to the compiler we care - about. Below, ``start_at`` and ``stop_at`` are used for that.""" - - # Exit early if we are required not to patch libtool: - if not self.patch_libtool: - return - - x = fs.FileFilter( - *filter(fs.is_exe, fs.find(self.build_directory, "libtool", recursive=True)) - ) - - # Exit early if there is nothing to patch: - if not x.filenames: - return - - markers = {"cc": "LIBTOOL CONFIG"} - for tag in ["cxx", "fc", "f77"]: - markers[tag] = "LIBTOOL TAG CONFIG: {0}".format(tag.upper()) - - # Replace empty linker flag prefixes: - if self.spec.satisfies("%nag"): - # Nag is mixed with gcc and g++, which are recognized correctly. - # Therefore, we change only Fortran values: - nag_pkg = self.spec["fortran"].package - for tag in ["fc", "f77"]: - marker = markers[tag] - x.filter( - regex='^wl=""$', - repl=f'wl="{nag_pkg.linker_arg}"', - start_at=f"# ### BEGIN {marker}", - stop_at=f"# ### END {marker}", - ) - else: - compiler_spec = spack.compilers.libraries.compiler_spec(self.spec) - if compiler_spec: - x.filter(regex='^wl=""$', repl='wl="{0}"'.format(compiler_spec.package.linker_arg)) - - # Replace empty PIC flag values: - for compiler, marker in markers.items(): - if compiler == "cc": - language = "c" - elif compiler == "cxx": - language = "cxx" - else: - language = "fortran" - - if language not in self.spec: - continue - - x.filter( - regex='^pic_flag=""$', - repl=f'pic_flag="{self.spec[language].package.pic_flag}"', - start_at=f"# ### BEGIN {marker}", - stop_at=f"# ### END {marker}", - ) - - # Other compiler-specific patches: - if self.spec.satisfies("%fj"): - x.filter(regex="-nostdlib", repl="", string=True) - rehead = r"/\S*/" - for o in [ - r"fjhpctag\.o", - r"fjcrt0\.o", - r"fjlang08\.o", - r"fjomp\.o", - r"crti\.o", - r"crtbeginS\.o", - r"crtendS\.o", - ]: - x.filter(regex=(rehead + o), repl="") - elif self.spec.satisfies("%nag"): - for tag in ["fc", "f77"]: - marker = markers[tag] - start_at = "# ### BEGIN {0}".format(marker) - stop_at = "# ### END {0}".format(marker) - # Libtool 2.4.2 does not know the shared flag: - x.filter( - regex=r"\$CC -shared", - repl=r"\$CC -Wl,-shared", - string=True, - start_at=start_at, - stop_at=stop_at, - ) - # Libtool does not know how to inject whole archives - # (e.g. https://github.com/pmodels/mpich/issues/4358): - x.filter( - regex=r'^whole_archive_flag_spec="\\\$({?wl}?)--whole-archive' - r'\\\$convenience \\\$\1--no-whole-archive"$', - repl=r'whole_archive_flag_spec="\$\1--whole-archive' - r"\`for conv in \$convenience\\\\\"\\\\\"; do test -n \\\\\"\$conv\\\\\" && " - r"new_convenience=\\\\\"\$new_convenience,\$conv\\\\\"; done; " - r'func_echo_all \\\\\"\$new_convenience\\\\\"\` \$\1--no-whole-archive"', - start_at=start_at, - stop_at=stop_at, - ) - # The compiler requires special treatment in certain cases: - x.filter( - regex=r"^(with_gcc=.*)$", - repl="\\1\n\n# Is the compiler the NAG compiler?\nwith_nag=yes", - start_at=start_at, - stop_at=stop_at, - ) - - # Disable the special treatment for gcc and g++: - for tag in ["cc", "cxx"]: - marker = markers[tag] - x.filter( - regex=r"^(with_gcc=.*)$", - repl="\\1\n\n# Is the compiler the NAG compiler?\nwith_nag=no", - start_at="# ### BEGIN {0}".format(marker), - stop_at="# ### END {0}".format(marker), - ) - - # The compiler does not support -pthread flag, which might come - # from the inherited linker flags. We prepend the flag with -Wl, - # before using it: - x.filter( - regex=r"^(\s*)(for tmp_inherited_linker_flag in \$tmp_inherited_linker_flags; " - r"do\s*)$", - repl='\\1if test "x$with_nag" = xyes; then\n' - "\\1 revert_nag_pthread=$tmp_inherited_linker_flags\n" - "\\1 tmp_inherited_linker_flags=" - "`$ECHO \"$tmp_inherited_linker_flags\" | $SED 's% -pthread% -Wl,-pthread%g'`\n" - '\\1 test x"$revert_nag_pthread" = x"$tmp_inherited_linker_flags" && ' - "revert_nag_pthread=no || revert_nag_pthread=yes\n" - "\\1fi\n\\1\\2", - start_at='if test -n "$inherited_linker_flags"; then', - stop_at='case " $new_inherited_linker_flags " in', - ) - # And revert the modification to produce '*.la' files that can be - # used with gcc (normally, we do not install the files but they can - # still be used during the building): - start_at = '# Time to change all our "foo.ltframework" stuff back to "-framework foo"' - stop_at = "# installed libraries to the beginning of the library search list" - x.filter( - regex=r"(\s*)(# move library search paths that coincide with paths to not " - r"yet\s*)$", - repl='\\1test x"$with_nag$revert_nag_pthread" = xyesyes &&\n' - '\\1 new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | ' - "$SED 's% -Wl,-pthread% -pthread%g'`\n\\1\\2", - start_at=start_at, - stop_at=stop_at, - ) + return [os.path.join(self.build_directory, "config.log")] @property def configure_directory(self) -> str: @@ -539,345 +91,26 @@ def build_directory(self) -> str: build_dir = os.path.join(self.pkg.stage.source_path, build_dir) return build_dir - @spack.phase_callbacks.run_before("autoreconf") - def _delete_configure_to_force_update(self) -> None: - if self.force_autoreconf: - fs.force_remove(self.configure_abs_path) - - @property - def autoreconf_search_path_args(self) -> List[str]: - """Search path includes for autoreconf. Add an -I flag for all `aclocal` dirs - of build deps, skips the default path of automake, move external include - flags to the back, since they might pull in unrelated m4 files shadowing - spack dependencies.""" - return _autoreconf_search_path_args(self.spec) - - @spack.phase_callbacks.run_after("autoreconf") - def _set_configure_or_die(self) -> None: - """Ensure the presence of a "configure" script, or raise. If the "configure" - is found, a module level attribute is set. - - Raises: - RuntimeError: if the "configure" script is not found - """ - # Check if the "configure" script is there. If not raise a RuntimeError. - if not os.path.exists(self.configure_abs_path): - msg = "configure script not found in {0}" - raise RuntimeError(msg.format(self.configure_directory)) - - # Monkey-patch the configure script in the corresponding module - globals_for_pkg = spack.build_environment.ModuleChangePropagator(self.pkg) - globals_for_pkg.configure = Executable(self.configure_abs_path) - globals_for_pkg.propagate_changes_to_mro() - def configure_args(self) -> List[str]: """Return the list of all the arguments that must be passed to configure, except ``--prefix`` which will be pre-pended to the list. """ return [] - def autoreconf( - self, pkg: AutotoolsPackage, spec: spack.spec.Spec, prefix: spack.util.prefix.Prefix - ) -> None: - """Not needed usually, configure should be already there""" - - # If configure exists nothing needs to be done - if os.path.exists(self.configure_abs_path): - return + def autoreconf(self, pkg: AutotoolsPackage, spec: Spec, prefix: Prefix) -> None: + pass - # Else try to regenerate it, which requires a few build dependencies - ensure_build_dependencies_or_raise( - spec=spec, - dependencies=["autoconf", "automake", "libtool"], - error_msg="Cannot generate configure", - ) + def configure(self, pkg: AutotoolsPackage, spec: Spec, prefix: Prefix) -> None: + pass - tty.msg("Configure script not found: trying to generate it") - tty.warn("*********************************************************") - tty.warn("* If the default procedure fails, consider implementing *") - tty.warn("* a custom AUTORECONF phase in the package *") - tty.warn("*********************************************************") - with fs.working_dir(self.configure_directory): - # This line is what is needed most of the time - # --install, --verbose, --force - autoreconf_args = ["-ivf"] - autoreconf_args += self.autoreconf_search_path_args - autoreconf_args += self.autoreconf_extra_args - self.pkg.module.autoreconf(*autoreconf_args) + def build(self, pkg: AutotoolsPackage, spec: Spec, prefix: Prefix) -> None: + pass - def configure( - self, pkg: AutotoolsPackage, spec: spack.spec.Spec, prefix: spack.util.prefix.Prefix - ) -> None: - """Run "configure", with the arguments specified by the builder and an - appropriately set prefix. - """ - options = getattr(self.pkg, "configure_flag_args", []) - options += ["--prefix={0}".format(prefix)] - options += self.configure_args() - - with fs.working_dir(self.build_directory, create=True): - pkg.module.configure(*options) - - def build( - self, pkg: AutotoolsPackage, spec: spack.spec.Spec, prefix: spack.util.prefix.Prefix - ) -> None: - """Run "make" on the build targets specified by the builder.""" - # See https://autotools.io/automake/silent.html - params = ["V=1"] - params += self.build_targets - with fs.working_dir(self.build_directory): - pkg.module.make(*params) - - def install( - self, pkg: AutotoolsPackage, spec: spack.spec.Spec, prefix: spack.util.prefix.Prefix - ) -> None: - """Run "make" on the install targets specified by the builder.""" - with fs.working_dir(self.build_directory): - pkg.module.make(*self.install_targets) - - spack.phase_callbacks.run_after("build")(execute_build_time_tests) + def install(self, pkg: AutotoolsPackage, spec: Spec, prefix: Prefix) -> None: + pass def check(self) -> None: - """Run "make" on the ``test`` and ``check`` targets, if found.""" - with fs.working_dir(self.build_directory): - self.pkg._if_make_target_execute("test") - self.pkg._if_make_target_execute("check") - - def _activate_or_not( - self, - name: str, - activation_word: str, - deactivation_word: str, - activation_value: Optional[Union[Callable, str]] = None, - variant=None, - ) -> List[str]: - """This function contain the current implementation details of - :meth:`~spack.build_systems.autotools.AutotoolsBuilder.with_or_without` and - :meth:`~spack.build_systems.autotools.AutotoolsBuilder.enable_or_disable`. - - Args: - name: name of the option that is being activated or not - activation_word: the default activation word ('with' in the case of - ``with_or_without``) - deactivation_word: the default deactivation word ('without' in the case of - ``with_or_without``) - activation_value: callable that accepts a single value. This value is either one of the - allowed values for a multi-valued variant or the name of a bool-valued variant. - Returns the parameter to be used when the value is activated. - - The special value "prefix" can also be assigned and will return - ``spec[name].prefix`` as activation parameter. - variant: name of the variant that is being processed (if different from option name) - - Examples: - - Given a package with: - - .. code-block:: python - - variant("foo", values=("x", "y"), description=") - variant("bar", default=True, description=") - variant("ba_z", default=True, description=") - - calling this function like: - - .. code-block:: python - - _activate_or_not( - "foo", "with", "without", activation_value="prefix" - ) - _activate_or_not("bar", "with", "without") - _activate_or_not("ba-z", "with", "without", variant="ba_z") - - will generate the following configuration options: - - .. code-block:: console - - --with-x= --without-y --with-bar --with-ba-z - - for `` foo=x +bar`` - - Note: returns an empty list when the variant is conditional and its condition - is not met. - - Returns: - list: list of strings that corresponds to the activation/deactivation - of the variant that has been processed - - Raises: - KeyError: if name is not among known variants - """ - spec: spack.spec.Spec = self.pkg.spec - args: List[str] = [] - - if activation_value == "prefix": - activation_value = lambda x: spec[x].prefix - - variant = variant or name - - # Defensively look that the name passed as argument is among variants - if not self.pkg.has_variant(variant): - msg = '"{0}" is not a variant of "{1}"' - raise KeyError(msg.format(variant, self.pkg.name)) - - if variant not in spec.variants: - return [] - - # Create a list of pairs. Each pair includes a configuration - # option and whether or not that option is activated - vdef = self.pkg.get_variant(variant) - if set(vdef.values) == set((True, False)): # type: ignore - # BoolValuedVariant carry information about a single option. - # Nonetheless, for uniformity of treatment we'll package them - # in an iterable of one element. - options = [(name, f"+{variant}" in spec)] - else: - # "feature_values" is used to track values which correspond to - # features which can be enabled or disabled as understood by the - # package's build system. It excludes values which have special - # meanings and do not correspond to features (e.g. "none") - feature_values = getattr(vdef.values, "feature_values", None) or vdef.values - options = [(v, f"{variant}={v}" in spec) for v in feature_values] # type: ignore - - # For each allowed value in the list of values - for option_value, activated in options: - # Search for an override in the package for this value - override_name = f"{activation_word}_or_{deactivation_word}_{option_value}" - line_generator = getattr(self, override_name, None) or getattr( - self.pkg, override_name, None - ) - # If not available use a sensible default - if line_generator is None: - - def _default_generator(is_activated): - if is_activated: - line = f"--{activation_word}-{option_value}" - if activation_value is not None and activation_value( - option_value - ): # NOQA=ignore=E501 - line = f"{line}={activation_value(option_value)}" - return line - return f"--{deactivation_word}-{option_value}" - - line_generator = _default_generator - args.append(line_generator(activated)) - return args - - def with_or_without( - self, - name: str, - activation_value: Optional[Union[Callable, str]] = None, - variant: Optional[str] = None, - ) -> List[str]: - """Inspects a variant and returns the arguments that activate - or deactivate the selected feature(s) for the configure options. - - This function works on all type of variants. For bool-valued variants - it will return by default ``--with-{name}`` or ``--without-{name}``. - For other kinds of variants it will cycle over the allowed values and - return either ``--with-{value}`` or ``--without-{value}``. - - If activation_value is given, then for each possible value of the - variant, the option ``--with-{value}=activation_value(value)`` or - ``--without-{value}`` will be added depending on whether or not - ``variant=value`` is in the spec. - - Args: - name: name of a valid multi-valued variant - activation_value: callable that accepts a single value and returns the parameter to be - used leading to an entry of the type ``--with-{name}={parameter}``. - - The special value "prefix" can also be assigned and will return - ``spec[name].prefix`` as activation parameter. - - Returns: - list of arguments to configure - """ - return self._activate_or_not(name, "with", "without", activation_value, variant) - - def enable_or_disable( - self, - name: str, - activation_value: Optional[Union[Callable, str]] = None, - variant: Optional[str] = None, - ) -> List[str]: - """Same as - :meth:`~spack.build_systems.autotools.AutotoolsBuilder.with_or_without` - but substitute ``with`` with ``enable`` and ``without`` with ``disable``. - - Args: - name: name of a valid multi-valued variant - activation_value: if present accepts a single value and returns the parameter to be - used leading to an entry of the type ``--enable-{name}={parameter}`` - - The special value "prefix" can also be assigned and will return - ``spec[name].prefix`` as activation parameter. - - Returns: - list of arguments to configure - """ - return self._activate_or_not(name, "enable", "disable", activation_value, variant) - - spack.phase_callbacks.run_after("install")(execute_install_time_tests) - - def installcheck(self) -> None: - """Run "make" on the ``installcheck`` target, if found.""" - with fs.working_dir(self.build_directory): - self.pkg._if_make_target_execute("installcheck") - - @spack.phase_callbacks.run_after("install") - def _remove_libtool_archives(self) -> None: - """Remove all .la files in prefix sub-folders if the package sets - ``install_libtool_archives`` to be False. - """ - # If .la files are to be installed there's nothing to do - if self.install_libtool_archives: - return - - # Remove the files and create a log of what was removed - libtool_files = fs.find(str(self.pkg.prefix), "*.la", recursive=True) - with fs.safe_remove(*libtool_files): - fs.mkdirp(os.path.dirname(self._removed_la_files_log)) - with open(self._removed_la_files_log, mode="w", encoding="utf-8") as f: - f.write("\n".join(libtool_files)) - - def setup_build_environment( - self, env: spack.util.environment.EnvironmentModifications - ) -> None: - if self.spec.platform == "darwin" and macos_version() >= Version("11"): - # Many configure files rely on matching '10.*' for macOS version - # detection and fail to add flags if it shows as version 11. - env.set("MACOSX_DEPLOYMENT_TARGET", "10.16") - - # On macOS, force rpaths for shared library IDs and remove duplicate rpaths - spack.phase_callbacks.run_after("install", when="platform=darwin")(apply_macos_rpath_fixups) - - -def _autoreconf_search_path_args(spec: spack.spec.Spec) -> List[str]: - dirs_seen: Set[Tuple[int, int]] = set() - flags_spack: List[str] = [] - flags_external: List[str] = [] - - # We don't want to add an include flag for automake's default search path. - for automake in spec.dependencies(name="automake", deptype="build"): - try: - s = os.stat(automake.prefix.share.aclocal) - if stat.S_ISDIR(s.st_mode): - dirs_seen.add((s.st_ino, s.st_dev)) - except OSError: - pass + pass - for dep in spec.dependencies(deptype="build"): - path = dep.prefix.share.aclocal - # Skip non-existing aclocal paths - try: - s = os.stat(path) - except OSError: - continue - # Skip things seen before, as well as non-dirs. - if (s.st_ino, s.st_dev) in dirs_seen or not stat.S_ISDIR(s.st_mode): - continue - dirs_seen.add((s.st_ino, s.st_dev)) - flags = flags_external if dep.external else flags_spack - flags.extend(["-I", path]) - return flags_spack + flags_external + run_after("build")(execute_build_time_tests) + run_after("install")(execute_install_time_tests) diff --git a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/bundle.py b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/bundle.py index 2aff55bd402a01..f601c722b36fbf 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/bundle.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/bundle.py @@ -1,30 +1,23 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import spack.builder -import spack.directives -import spack.package_base +from spack.package import Builder, PackageBase, Prefix, Spec, build_system, register_builder -class BundlePackage(spack.package_base.PackageBase): + +class BundlePackage(PackageBase): """General purpose bundle, or no-code, package class.""" - #: This attribute is used in UI queries that require to know which - #: build-system class we are using build_system_class = "BundlePackage" - - #: Legacy buildsystem attribute used to deserialize and install old specs default_buildsystem = "bundle" - - #: Bundle packages do not have associated source or binary code. has_code = False - spack.directives.build_system("bundle") + build_system("bundle") -@spack.builder.register_builder("bundle") -class BundleBuilder(spack.builder.Builder): +@register_builder("bundle") +class BundleBuilder(Builder): phases = ("install",) - def install(self, pkg, spec, prefix): + def install(self, pkg: BundlePackage, spec: Spec, prefix: Prefix) -> None: pass diff --git a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/cmake.py b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/cmake.py index 28433ce1583676..ecbda74cef97a7 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/cmake.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/cmake.py @@ -1,312 +1,65 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import collections.abc import os -import pathlib -import platform -import re -import sys -from itertools import chain -from typing import Any, List, Optional, Tuple -import spack.builder -import spack.deptypes as dt -import spack.error -import spack.llnl.util.filesystem as fs -import spack.package_base -import spack.phase_callbacks -import spack.spec -import spack.util.prefix -from spack import traverse -from spack.directives import build_system, conflicts, depends_on, variant -from spack.llnl.util import tty -from spack.llnl.util.lang import stable_partition -from spack.multimethod import when -from spack.util.environment import filter_system_paths +from spack.package import ( + BuilderWithDefaults, + List, + PackageBase, + Prefix, + Spec, + Tuple, + build_system, + depends_on, + register_builder, + run_after, +) -from ._checks import BuilderWithDefaults, execute_build_time_tests +from ._checks import execute_build_time_tests -# Regex to extract the primary generator from the CMake generator -# string. -_primary_generator_extractor = re.compile(r"(?:.* - )?(.*)") - -def _extract_primary_generator(generator): - """Use the compiled regex _primary_generator_extractor to extract the - primary generator from the generator string which may contain an - optional secondary generator. - """ - return _primary_generator_extractor.match(generator).group(1) - - -def _maybe_set_python_hints(pkg: spack.package_base.PackageBase, args: List[str]) -> None: - """Set the PYTHON_EXECUTABLE, Python_EXECUTABLE, and Python3_EXECUTABLE CMake variables - if the package has Python as build or link dep and ``find_python_hints`` is set to True. See - ``find_python_hints`` for context.""" - if not getattr(pkg, "find_python_hints", False) or not pkg.spec.dependencies( - "python", dt.BUILD | dt.LINK - ): - return - python_executable = pkg.spec["python"].command.path - args.extend( - [ - define("PYTHON_EXECUTABLE", python_executable), - define("Python_EXECUTABLE", python_executable), - define("Python3_EXECUTABLE", python_executable), - ] - ) - - -def _supports_compilation_databases(pkg: spack.package_base.PackageBase) -> bool: - """Check if this package (and CMake) can support compilation databases.""" - - # CMAKE_EXPORT_COMPILE_COMMANDS only exists for CMake >= 3.5 - if not pkg.spec.satisfies("^cmake@3.5:"): - return False - - # CMAKE_EXPORT_COMPILE_COMMANDS is only implemented for Makefile and Ninja generators - if not (pkg.spec.satisfies("generator=make") or pkg.spec.satisfies("generator=ninja")): - return False - - return True - - -def _conditional_cmake_defaults(pkg: spack.package_base.PackageBase, args: List[str]) -> None: - """Set a few default defines for CMake, depending on its version.""" - cmakes = pkg.spec.dependencies("cmake", dt.BUILD) - - if len(cmakes) != 1: - return - - cmake = cmakes[0] - - # CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9 - try: - ipo = pkg.spec.variants["ipo"].value - except KeyError: - ipo = False - - if cmake.satisfies("@3.9:"): - args.append(define("CMAKE_INTERPROCEDURAL_OPTIMIZATION", ipo)) - - # Disable Package Registry: export(PACKAGE) may put files in the user's home directory, and - # find_package may search there. This is not what we want. - - # Do not populate CMake User Package Registry - if cmake.satisfies("@3.15:"): - # see https://cmake.org/cmake/help/latest/policy/CMP0090.html - args.append(define("CMAKE_POLICY_DEFAULT_CMP0090", "NEW")) - elif cmake.satisfies("@3.1:"): - # see https://cmake.org/cmake/help/latest/variable/CMAKE_EXPORT_NO_PACKAGE_REGISTRY.html - args.append(define("CMAKE_EXPORT_NO_PACKAGE_REGISTRY", True)) - - # Do not use CMake User/System Package Registry - # https://cmake.org/cmake/help/latest/manual/cmake-packages.7.html#disabling-the-package-registry - if cmake.satisfies("@3.16:"): - args.append(define("CMAKE_FIND_USE_PACKAGE_REGISTRY", False)) - elif cmake.satisfies("@3.1:3.15"): - args.append(define("CMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY", False)) - args.append(define("CMAKE_FIND_PACKAGE_NO_SYSTEM_PACKAGE_REGISTRY", False)) - - # Export a compilation database if supported. - if _supports_compilation_databases(pkg): - args.append(define("CMAKE_EXPORT_COMPILE_COMMANDS", True)) - - # Enable MACOSX_RPATH by default when cmake_minimum_required < 3 - # https://cmake.org/cmake/help/latest/policy/CMP0042.html - if pkg.spec.satisfies("platform=darwin") and cmake.satisfies("@3:"): - args.append(define("CMAKE_POLICY_DEFAULT_CMP0042", "NEW")) - - # Disable find package's config mode for versions of Boost that - # didn't provide it. See https://github.com/spack/spack/issues/20169 - # and https://cmake.org/cmake/help/latest/module/FindBoost.html - if pkg.spec.satisfies("^boost@:1.69.0"): - args.append(define("Boost_NO_BOOST_CMAKE", True)) - - -def generator(*names: str, default: Optional[str] = None) -> None: - """The build system generator to use. - - See ``cmake --help`` for a list of valid generators. - Currently, "Unix Makefiles" and "Ninja" are the only generators - that Spack supports. Defaults to "Unix Makefiles". - - See https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html - for more information. - - Args: - names: allowed generators for this package - default: default generator - """ - allowed_values = ("make", "ninja") - if any(x not in allowed_values for x in names): - msg = "only 'make' and 'ninja' are allowed for CMake's 'generator' directive" - raise ValueError(msg) - - default = default or names[0] - not_used = [x for x in allowed_values if x not in names] - - def _values(x): - return x in allowed_values - - _values.__doc__ = f"{','.join(names)}" - - variant( - "generator", - default=default, - values=_values, - description="the build system generator to use", - when="build_system=cmake", - ) - for x in not_used: - conflicts(f"generator={x}") - - -def get_cmake_prefix_path(pkg: spack.package_base.PackageBase) -> List[str]: - """Obtain the CMAKE_PREFIX_PATH entries for a package, based on the - :attr:`~spack.package_base.PackageBase.cmake_prefix_paths` package - attribute of direct build/test and transitive link dependencies.""" - edges = traverse.traverse_topo_edges_generator( - traverse.with_artificial_edges([pkg.spec]), - visitor=traverse.MixedDepthVisitor( - direct=dt.BUILD | dt.TEST, transitive=dt.LINK, key=traverse.by_dag_hash - ), - key=traverse.by_dag_hash, - root=False, - all_edges=False, # cover all nodes, not all edges - ) - ordered_specs = [edge.spec for edge in edges] - # Separate out externals so they do not shadow Spack prefixes - externals, spack_built = stable_partition((s for s in ordered_specs), lambda x: x.external) - - return filter_system_paths( - path for spec in chain(spack_built, externals) for path in spec.package.cmake_prefix_paths - ) - - -class CMakePackage(spack.package_base.PackageBase): +class CMakePackage(PackageBase): """Specialized class for packages built using CMake For more information on the CMake build system, see: https://cmake.org/cmake/help/latest/ """ - #: This attribute is used in UI queries that need to know the build - #: system base class build_system_class = "CMakePackage" - - #: Legacy buildsystem attribute used to deserialize and install old specs default_buildsystem = "cmake" - #: When this package depends on Python and ``find_python_hints`` is set to True, pass the - #: defines {Python3,Python,PYTHON}_EXECUTABLE explicitly, so that CMake locates the right - #: Python in its builtin FindPython3, FindPython, and FindPythonInterp modules. Spack does - #: CMake's job because CMake's modules by default only search for Python versions known at the - #: time of release. - find_python_hints = True - build_system("cmake") - with when("build_system=cmake"): - # https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html - # See https://github.com/spack/spack/pull/36679 and related issues for a - # discussion of the trade-offs between Release and RelWithDebInfo for default - # builds. Release is chosen to maximize performance and reduce disk-space burden, - # at the cost of more difficulty in debugging. - variant( - "build_type", - default="Release", - description="CMake build type", - values=("Debug", "Release", "RelWithDebInfo", "MinSizeRel"), - ) - # CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9 - # https://cmake.org/cmake/help/latest/variable/CMAKE_INTERPROCEDURAL_OPTIMIZATION.html - variant( - "ipo", - default=False, - when="^cmake@3.9:", - description="CMake interprocedural optimization", - ) - - if sys.platform == "win32": - generator("ninja") - else: - generator("ninja", "make", default="make") - - depends_on("cmake", type="build") - depends_on("gmake", type="build", when="generator=make") - depends_on("ninja", type="build", when="generator=ninja") + depends_on("cmake", type="build", when="build_system=cmake") def flags_to_build_system_args(self, flags): - """Return a list of all command line arguments to pass the specified - compiler flags to cmake. Note CMAKE does not have a cppflags option, - so cppflags will be added to cflags, cxxflags, and fflags to mimic the - behavior in other tools. - """ + """Translate compiler flags to CMake arguments.""" # Has to be dynamic attribute due to caching - setattr(self, "cmake_flag_args", []) - - flag_string = "-DCMAKE_{0}_FLAGS={1}" - langs = {"C": "c", "CXX": "cxx", "Fortran": "f"} + cmake_flag_args = [] - # Handle language compiler flags - for lang, pre in langs.items(): - flag = pre + "flags" - # cmake has no explicit cppflags support -> add it to all langs - lang_flags = " ".join(flags.get(flag, []) + flags.get("cppflags", [])) + for lang, pre in (("C", "c"), ("CXX", "cxx"), ("Fortran", "f")): + lang_flags = " ".join(flags.get(f"{pre}flags", []) + flags.get("cppflags", [])) if lang_flags: - self.cmake_flag_args.append(flag_string.format(lang, lang_flags)) + cmake_flag_args.append(f"-DCMAKE_{lang}_FLAGS={lang_flags}") - # Cmake has different linker arguments for different build types. - # We specify for each of them. if flags["ldflags"]: ldflags = " ".join(flags["ldflags"]) - # cmake has separate linker arguments for types of builds. - self.cmake_flag_args.append(f"-DCMAKE_EXE_LINKER_FLAGS={ldflags}") - self.cmake_flag_args.append(f"-DCMAKE_MODULE_LINKER_FLAGS={ldflags}") - self.cmake_flag_args.append(f"-DCMAKE_SHARED_LINKER_FLAGS={ldflags}") + cmake_flag_args.append(f"-DCMAKE_EXE_LINKER_FLAGS={ldflags}") + cmake_flag_args.append(f"-DCMAKE_MODULE_LINKER_FLAGS={ldflags}") + cmake_flag_args.append(f"-DCMAKE_SHARED_LINKER_FLAGS={ldflags}") - # CMake has libs options separated by language. Apply ours to each. if flags["ldlibs"]: libs_flags = " ".join(flags["ldlibs"]) - libs_string = "-DCMAKE_{0}_STANDARD_LIBRARIES={1}" - for lang in langs: - self.cmake_flag_args.append(libs_string.format(lang, libs_flags)) - - # Legacy methods (used by too many packages to change them, - # need to forward to the builder) - def define(self, cmake_var: str, value: Any) -> str: - return define(cmake_var, value) + for lang in ("C", "CXX", "Fortran"): + cmake_flag_args.append(f"-DCMAKE_{lang}_STANDARD_LIBRARIES={libs_flags}") - def define_from_variant(self, cmake_var: str, variant: Optional[str] = None) -> str: - return define_from_variant(self, cmake_var, variant) + setattr(self, "cmake_flag_args", cmake_flag_args) -@spack.builder.register_builder("cmake") +@register_builder("cmake") class CMakeBuilder(BuilderWithDefaults): - """The cmake builder encodes the default way of building software with CMake. IT - has three phases that can be overridden: - - 1. :py:meth:`~.CMakeBuilder.cmake` - 2. :py:meth:`~.CMakeBuilder.build` - 3. :py:meth:`~.CMakeBuilder.install` - - They all have sensible defaults and for many packages the only thing - necessary will be to override :py:meth:`~.CMakeBuilder.cmake_args`. - - For a finer tuning you may also override: - - +-----------------------------------------------+--------------------+ - | **Method** | **Purpose** | - +===============================================+====================+ - | :py:meth:`~.CMakeBuilder.root_cmakelists_dir` | Location of the | - | | root CMakeLists.txt| - +-----------------------------------------------+--------------------+ - | :py:meth:`~.CMakeBuilder.build_directory` | Directory where to | - | | build the package | - +-----------------------------------------------+--------------------+ - """ + """Builder for CMake packages""" #: Phases of a CMake package phases: Tuple[str, ...] = ("cmake", "build", "install") @@ -316,334 +69,35 @@ class CMakeBuilder(BuilderWithDefaults): #: Names associated with package attributes in the old build-system format package_attributes: Tuple[str, ...] = ( - "build_targets", - "install_targets", "build_time_test_callbacks", "archive_files", - "root_cmakelists_dir", - "std_cmake_args", - "build_dirname", "build_directory", ) - #: Targets to be used during the build phase - build_targets: List[str] = [] - #: Targets to be used during the install phase - install_targets = ["install"] #: Callback names for build-time test build_time_test_callbacks = ["check"] @property def archive_files(self) -> List[str]: - """Files to archive for packages based on CMake""" - files = [os.path.join(self.build_directory, "CMakeCache.txt")] - if _supports_compilation_databases(self.pkg): - files.append(os.path.join(self.build_directory, "compile_commands.json")) - return files - - @property - def root_cmakelists_dir(self) -> str: - """The relative path to the directory containing CMakeLists.txt - - This path is relative to the root of the extracted tarball, - not to the ``build_directory``. Defaults to the current directory. - """ - return self.pkg.stage.source_path - - @property - def generator(self) -> str: - if self.spec.satisfies("generator=make"): - return "Unix Makefiles" - if self.spec.satisfies("generator=ninja"): - return "Ninja" - raise ValueError( - f'{self.spec.format()} has an unsupported value for the "generator" variant' - ) - - @property - def std_cmake_args(self) -> List[str]: - """Standard cmake arguments provided as a property for - convenience of package writers - """ - args = CMakeBuilder.std_args(self.pkg, generator=self.generator) - args += getattr(self.pkg, "cmake_flag_args", []) - return args - - @staticmethod - def std_args( - pkg: spack.package_base.PackageBase, generator: Optional[str] = None - ) -> List[str]: - """Computes the standard cmake arguments for a generic package""" - default_generator = "Ninja" if sys.platform == "win32" else "Unix Makefiles" - generator = generator or default_generator - valid_primary_generators = ["Unix Makefiles", "Ninja"] - primary_generator = _extract_primary_generator(generator) - if primary_generator not in valid_primary_generators: - msg = "Invalid CMake generator: '{0}'\n".format(generator) - msg += "CMakePackage currently supports the following " - msg += "primary generators: '{0}'".format("', '".join(valid_primary_generators)) - raise spack.error.InstallError(msg) - - try: - build_type = pkg.spec.variants["build_type"].value - except KeyError: - build_type = "RelWithDebInfo" - - args = [ - "-G", - generator, - define("CMAKE_INSTALL_PREFIX", pathlib.Path(pkg.prefix).as_posix()), - define("CMAKE_INSTALL_RPATH_USE_LINK_PATH", True), - # only include the install prefix lib dirs; rpaths for deps are added by USE_LINK_PATH - define( - "CMAKE_INSTALL_RPATH", - [ - pathlib.Path(pkg.prefix, "lib").as_posix(), - pathlib.Path(pkg.prefix, "lib64").as_posix(), - ], - ), - define("CMAKE_PREFIX_PATH", get_cmake_prefix_path(pkg)), - define("CMAKE_BUILD_TYPE", build_type), - ] - - if primary_generator == "Unix Makefiles": - args.append(define("CMAKE_VERBOSE_MAKEFILE", True)) - - if platform.mac_ver()[0]: - args.extend( - [define("CMAKE_FIND_FRAMEWORK", "LAST"), define("CMAKE_FIND_APPBUNDLE", "LAST")] - ) - - _conditional_cmake_defaults(pkg, args) - _maybe_set_python_hints(pkg, args) - - return args - - @staticmethod - def define_cuda_architectures(pkg: spack.package_base.PackageBase) -> str: - return define_cuda_architectures(pkg) - - @staticmethod - def define_hip_architectures(pkg: spack.package_base.PackageBase) -> str: - return define_hip_architectures(pkg) - - @staticmethod - def define(cmake_var: str, value: Any) -> str: - return define(cmake_var, value) - - def define_from_variant(self, cmake_var: str, variant: Optional[str] = None) -> str: - return define_from_variant(self.pkg, cmake_var, variant) - - @property - def build_dirname(self) -> str: - """Directory name to use when building the package.""" - return f"spack-build-{self.pkg.spec.dag_hash(7)}" + return [os.path.join(self.build_directory, "CMakeCache.txt")] @property def build_directory(self) -> str: - """Full-path to the directory to use when building the package.""" - return os.path.join(self.pkg.stage.path, self.build_dirname) + return os.path.join(self.pkg.stage.path, "build") def cmake_args(self) -> List[str]: - """List of all the arguments that must be passed to cmake, except: - - * CMAKE_INSTALL_PREFIX - * CMAKE_BUILD_TYPE - - which will be set automatically. - """ return [] - def cmake( - self, pkg: CMakePackage, spec: spack.spec.Spec, prefix: spack.util.prefix.Prefix - ) -> None: - """Runs ``cmake`` in the build directory""" - - if spec.is_develop: - # skip cmake phase if it is an incremental develop build - - # Determine the files that will re-run CMake that are generated from a successful - # configure step based on state - primary_generator = _extract_primary_generator(self.generator) - configure_artifact = "Makefile" - if primary_generator == "Ninja": - configure_artifact = "ninja.build" - - if os.path.isfile(os.path.join(self.build_directory, configure_artifact)): - tty.msg( - "Incremental build criteria satisfied." - "Skipping CMake configure step. To force configuration run" - f" `spack clean {pkg.name}`" - ) - return - - options = self.std_cmake_args - options += self.cmake_args() - options.append(os.path.abspath(self.root_cmakelists_dir)) - with fs.working_dir(self.build_directory, create=True): - pkg.module.cmake(*options) + def cmake(self, pkg: CMakePackage, spec: Spec, prefix: Prefix) -> None: + pass - def build( - self, pkg: CMakePackage, spec: spack.spec.Spec, prefix: spack.util.prefix.Prefix - ) -> None: - """Make the build targets""" - with fs.working_dir(self.build_directory): - if self.generator == "Unix Makefiles": - pkg.module.make(*self.build_targets) - elif self.generator == "Ninja": - self.build_targets.append("-v") - pkg.module.ninja(*self.build_targets) + def build(self, pkg: CMakePackage, spec: Spec, prefix: Prefix) -> None: + pass - def install( - self, pkg: CMakePackage, spec: spack.spec.Spec, prefix: spack.util.prefix.Prefix - ) -> None: - """Make the install targets""" - with fs.working_dir(self.build_directory): - if self.generator == "Unix Makefiles": - pkg.module.make(*self.install_targets) - elif self.generator == "Ninja": - pkg.module.ninja(*self.install_targets) - - spack.phase_callbacks.run_after("build")(execute_build_time_tests) + def install(self, pkg: CMakePackage, spec: Spec, prefix: Prefix) -> None: + pass def check(self) -> None: - """Search the CMake-generated files for the targets ``test`` and ``check``, - and runs them if found. - """ - with fs.working_dir(self.build_directory): - if self.generator == "Unix Makefiles": - self.pkg._if_make_target_execute("test", jobs_env="CTEST_PARALLEL_LEVEL") - self.pkg._if_make_target_execute("check") - elif self.generator == "Ninja": - self.pkg._if_ninja_target_execute("test", jobs_env="CTEST_PARALLEL_LEVEL") - self.pkg._if_ninja_target_execute("check") - - -def define(cmake_var: str, value: Any) -> str: - """Return a CMake command line argument that defines a variable. - - The resulting argument will convert boolean values to OFF/ON and lists/tuples to CMake - semicolon-separated string lists. All other values will be interpreted as strings. - - Examples: - - .. code-block:: python - - [define("BUILD_SHARED_LIBS", True), - define("CMAKE_CXX_STANDARD", 14), - define("swr", ["avx", "avx2"])] - - will generate the following configuration options: - - .. code-block:: console - - ["-DBUILD_SHARED_LIBS:BOOL=ON", - "-DCMAKE_CXX_STANDARD:STRING=14", - "-DSWR:STRING=avx;avx2] - - """ - # Create a list of pairs. Each pair includes a configuration - # option and whether or not that option is activated - if isinstance(value, bool): - kind = "BOOL" - value = "ON" if value else "OFF" - else: - kind = "STRING" - if isinstance(value, collections.abc.Sequence) and not isinstance(value, str): - value = ";".join(str(v) for v in value) - else: - value = str(value) - - return "".join(["-D", cmake_var, ":", kind, "=", value]) - - -def define_from_variant( - pkg: spack.package_base.PackageBase, cmake_var: str, variant: Optional[str] = None -) -> str: - """Return a CMake command line argument from the given variant's value. + pass - The optional ``variant`` argument defaults to the lower-case transform - of ``cmake_var``. - - Examples: - - Given a package with: - - .. code-block:: python - - variant("cxxstd", default="11", values=("11", "14"), - multi=False, description="") - variant("shared", default=True, description="") - variant("swr", values=any_combination_of("avx", "avx2"), - description="") - - calling this function like: - - .. code-block:: python - - [ - self.define_from_variant("BUILD_SHARED_LIBS", "shared"), - self.define_from_variant("CMAKE_CXX_STANDARD", "cxxstd"), - self.define_from_variant("SWR"), - ] - - will generate the following configuration options: - - .. code-block:: console - - [ - "-DBUILD_SHARED_LIBS:BOOL=ON", - "-DCMAKE_CXX_STANDARD:STRING=14", - "-DSWR:STRING=avx;avx2", - ] - - for `` cxxstd=14 +shared swr=avx,avx2`` - - Note: if the provided variant is conditional, and the condition is not met, this function - returns an empty string. CMake discards empty strings provided on the command line. - """ - if variant is None: - variant = cmake_var.lower() - - if not pkg.has_variant(variant): - raise KeyError('"{0}" is not a variant of "{1}"'.format(variant, pkg.name)) - - if variant not in pkg.spec.variants: - return "" - - value = pkg.spec.variants[variant].value - if isinstance(value, (tuple, list)): - # Sort multi-valued variants for reproducibility - value = sorted(value) - - return define(cmake_var, value) - - -def define_hip_architectures(pkg: spack.package_base.PackageBase) -> str: - """Returns the str ``-DCMAKE_HIP_ARCHITECTURES:STRING=(expanded amdgpu_target)``. - - ``amdgpu_target`` is variant composed of a list of the target HIP - architectures and it is declared in the rocm package. - - This method is no-op for cmake<3.18 and when ``amdgpu_target`` variant is - not set. - - """ - if "amdgpu_target" in pkg.spec.variants and pkg.spec.satisfies("^cmake@3.21:"): - return define("CMAKE_HIP_ARCHITECTURES", pkg.spec.variants["amdgpu_target"].value) - - return "" - - -def define_cuda_architectures(pkg: spack.package_base.PackageBase) -> str: - """Returns the str ``-DCMAKE_CUDA_ARCHITECTURES:STRING=(expanded cuda_arch)``. - - ``cuda_arch`` is variant composed of a list of target CUDA architectures and - it is declared in the cuda package. - - This method is no-op for cmake<3.18 and when ``cuda_arch`` variant is not set. - - """ - if "cuda_arch" in pkg.spec.variants and pkg.spec.satisfies("^cmake@3.18:"): - return define("CMAKE_CUDA_ARCHITECTURES", pkg.spec.variants["cuda_arch"].value) - return "" + run_after("build")(execute_build_time_tests) diff --git a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/generic.py b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/generic.py index 7fe0b1edf7a44e..108b0877706e95 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/generic.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/generic.py @@ -3,5 +3,4 @@ # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack.package import GenericBuilder, Package -_ = Package -_ = GenericBuilder +__all__ = ["Package", "GenericBuilder"] diff --git a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/gnu.py b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/gnu.py index 08478e61ab3c8f..296d828d3b42aa 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/gnu.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/gnu.py @@ -4,35 +4,19 @@ from typing import Optional -import spack.package_base -import spack.util.url +from spack.package import PackageBase, join_url -class GNUMirrorPackage(spack.package_base.PackageBase): - """Mixin that takes care of setting url and mirrors for GNU packages.""" - - #: Path of the package in a GNU mirror +class GNUMirrorPackage(PackageBase): gnu_mirror_path: Optional[str] = None - - #: List of GNU mirrors used by Spack base_mirrors = [ "https://ftpmirror.gnu.org/", "https://ftp.gnu.org/gnu/", - # Fall back to http if https didn't work (for instance because - # Spack is bootstrapping curl) "http://ftpmirror.gnu.org/", ] @property def urls(self): - self._ensure_gnu_mirror_path_is_set_or_raise() - return [ - spack.util.url.join(m, self.gnu_mirror_path, resolve_href=True) - for m in self.base_mirrors - ] - - def _ensure_gnu_mirror_path_is_set_or_raise(self): if self.gnu_mirror_path is None: - cls_name = type(self).__name__ - msg = "{0} must define a `gnu_mirror_path` attribute" " [none defined]" - raise AttributeError(msg.format(cls_name)) + raise AttributeError(f"{self.__class__.__name__}: `gnu_mirror_path` missing") + return [join_url(m, self.gnu_mirror_path, resolve_href=True) for m in self.base_mirrors] diff --git a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/makefile.py b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/makefile.py index c66beee1c9e8d0..c6e6171dacf215 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/makefile.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/makefile.py @@ -1,90 +1,40 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -from typing import List - -import spack.builder -import spack.llnl.util.filesystem as fs -import spack.package_base -import spack.phase_callbacks -import spack.spec -import spack.util.prefix -from spack.directives import build_system, conflicts, depends_on -from spack.multimethod import when -from spack.package import BuilderWithDefaults, apply_macos_rpath_fixups, execute_install_time_tests +from spack.package import ( + BuilderWithDefaults, + PackageBase, + Prefix, + Spec, + build_system, + depends_on, + execute_install_time_tests, + register_builder, + run_after, +) from ._checks import execute_build_time_tests -class MakefilePackage(spack.package_base.PackageBase): - """Specialized class for packages built using Makefiles.""" - - #: This attribute is used in UI queries that need to know the build - #: system base class +class MakefilePackage(PackageBase): build_system_class = "MakefilePackage" - #: Legacy buildsystem attribute used to deserialize and install old specs default_buildsystem = "makefile" build_system("makefile") - - with when("build_system=makefile"): - conflicts("platform=windows") - depends_on("gmake", type="build") + depends_on("gmake", type="build", when="build_system=makefile") -@spack.builder.register_builder("makefile") +@register_builder("makefile") class MakefileBuilder(BuilderWithDefaults): - """The Makefile builder encodes the most common way of building software with - Makefiles. It has three phases that can be overridden, if need be: - - 1. :py:meth:`~.MakefileBuilder.edit` - 2. :py:meth:`~.MakefileBuilder.build` - 3. :py:meth:`~.MakefileBuilder.install` - - It is usually necessary to override the :py:meth:`~.MakefileBuilder.edit` - phase (which is by default a no-op), while the other two have sensible defaults. - - For a finer tuning you may override: - - +-----------------------------------------------+--------------------+ - | **Method** | **Purpose** | - +===============================================+====================+ - | :py:attr:`~.MakefileBuilder.build_targets` | Specify ``make`` | - | | targets for the | - | | build phase | - +-----------------------------------------------+--------------------+ - | :py:attr:`~.MakefileBuilder.install_targets` | Specify ``make`` | - | | targets for the | - | | install phase | - +-----------------------------------------------+--------------------+ - | :py:meth:`~.MakefileBuilder.build_directory` | Directory where the| - | | Makefile is located| - +-----------------------------------------------+--------------------+ - """ - phases = ("edit", "build", "install") - - #: Names associated with package methods in the old build-system format package_methods = ("check", "installcheck") - - #: Names associated with package attributes in the old build-system format package_attributes = ( - "build_targets", - "install_targets", "build_time_test_callbacks", "install_time_test_callbacks", "build_directory", ) - #: Targets for ``make`` during the :py:meth:`~.MakefileBuilder.build` phase - build_targets: List[str] = [] - #: Targets for ``make`` during the :py:meth:`~.MakefileBuilder.install` phase - install_targets = ["install"] - - #: Callback names for build-time test build_time_test_callbacks = ["check"] - - #: Callback names for install-time test install_time_test_callbacks = ["installcheck"] @property @@ -92,42 +42,20 @@ def build_directory(self) -> str: """Return the directory containing the main Makefile.""" return self.pkg.stage.source_path - def edit( - self, pkg: MakefilePackage, spec: spack.spec.Spec, prefix: spack.util.prefix.Prefix - ) -> None: - """Edit the Makefile before calling make. The default is a no-op.""" + def edit(self, pkg: MakefilePackage, spec: Spec, prefix: Prefix) -> None: pass - def build( - self, pkg: MakefilePackage, spec: spack.spec.Spec, prefix: spack.util.prefix.Prefix - ) -> None: - """Run "make" on the build targets specified by the builder.""" - with fs.working_dir(self.build_directory): - pkg.module.make(*self.build_targets) - - def install( - self, pkg: MakefilePackage, spec: spack.spec.Spec, prefix: spack.util.prefix.Prefix - ) -> None: - """Run "make" on the install targets specified by the builder.""" - with fs.working_dir(self.build_directory): - pkg.module.make(*self.install_targets) + def build(self, pkg: MakefilePackage, spec: Spec, prefix: Prefix) -> None: + pass - spack.phase_callbacks.run_after("build")(execute_build_time_tests) + def install(self, pkg: MakefilePackage, spec: Spec, prefix: Prefix) -> None: + pass def check(self) -> None: - """Run "make" on the ``test`` and ``check`` targets, if found.""" - with fs.working_dir(self.build_directory): - self.pkg._if_make_target_execute("test") - self.pkg._if_make_target_execute("check") - - spack.phase_callbacks.run_after("install")(execute_install_time_tests) + pass def installcheck(self) -> None: - """Searches the Makefile for an ``installcheck`` target - and runs it if found. - """ - with fs.working_dir(self.build_directory): - self.pkg._if_make_target_execute("installcheck") + pass - # On macOS, force rpaths for shared library IDs and remove duplicate rpaths - spack.phase_callbacks.run_after("install", when="platform=darwin")(apply_macos_rpath_fixups) + run_after("build")(execute_build_time_tests) + run_after("install")(execute_install_time_tests) diff --git a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/perl.py b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/perl.py index 36b380a547c7c9..5b5b7c07bccf6f 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/perl.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/perl.py @@ -1,196 +1,51 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import os -from typing import Iterable -import spack.builder -import spack.package_base -import spack.phase_callbacks -import spack.spec -import spack.util.prefix -from spack.directives import build_system, depends_on, extends -from spack.install_test import SkipTest, test_part -from spack.llnl.util.filesystem import filter_file, find -from spack.llnl.util.lang import memoized -from spack.multimethod import when -from spack.util.executable import Executable +from spack.package import ( + PackageBase, + Prefix, + Spec, + build_system, + extends, + register_builder, + run_after, +) from ._checks import BuilderWithDefaults, execute_build_time_tests -class PerlPackage(spack.package_base.PackageBase): +class PerlPackage(PackageBase): """Specialized class for packages that are built using Perl.""" - #: This attribute is used in UI queries that need to know the build - #: system base class build_system_class = "PerlPackage" - #: Legacy buildsystem attribute used to deserialize and install old specs default_buildsystem = "perl" build_system("perl") - - with when("build_system=perl"): - extends("perl") - depends_on("gmake", type="build") - - @property - @memoized - def _platform_dir(self): - """Name of platform-specific module subdirectory.""" - perl = self.spec["perl"].command - options = "-E", "use Config; say $Config{archname}" - out = perl(*options, output=str.split, error=str.split) - return out.strip() - - @property - def use_modules(self) -> Iterable[str]: - """Names of the package's perl modules.""" - module_files = find(self.prefix.lib, ["*.pm"], recursive=True) - - # Drop the platform directory, if present - if self._platform_dir: - platform_dir = self._platform_dir + os.sep - module_files = [m.replace(platform_dir, "") for m in module_files] - - # Drop the extension and library path - prefix = self.prefix.lib + os.sep - modules = [os.path.splitext(m)[0].replace(prefix, "") for m in module_files] - - # Drop the perl subdirectory as well - return ["::".join(m.split(os.sep)[1:]) for m in modules] - - @property - def skip_modules(self) -> Iterable[str]: - """Names of modules that should be skipped when running tests. - - These are a subset of use_modules. - - Returns: - List of strings of module names. - """ - return [] + extends("perl", when="build_system=perl") def test_use(self): - """Test 'use module'""" - if not self.use_modules: - raise SkipTest("Test requires use_modules package property.") - - perl = self.spec["perl"].command - for module in self.use_modules: - if module in self.skip_modules: - continue - - with test_part(self, f"test_use-{module}", purpose=f"checking use of {module}"): - options = ["-we", f'use strict; use {module}; print("OK\n")'] - out = perl(*options, output=str.split, error=str.split) - assert "OK" in out + pass -@spack.builder.register_builder("perl") +@register_builder("perl") class PerlBuilder(BuilderWithDefaults): - """The perl builder provides four phases that can be overridden, if required: - - 1. :py:meth:`~.PerlBuilder.configure` - 2. :py:meth:`~.PerlBuilder.build` - 3. :py:meth:`~.PerlBuilder.check` - 4. :py:meth:`~.PerlBuilder.install` - - The default methods use, in order of preference: - (1) Makefile.PL, - (2) Build.PL. - - Some packages may need to override :py:meth:`~.PerlBuilder.configure_args`, - which produces a list of arguments for :py:meth:`~.PerlBuilder.configure`. - - Arguments should not include the installation base directory. - """ - - #: Phases of a Perl package phases = ("configure", "build", "install") - - #: Names associated with package methods in the old build-system format - package_methods = ("configure_args", "check", "test_use") - - #: Names associated with package attributes in the old build-system format + package_methods = ("check", "test_use") package_attributes = () - - #: Callback names for build-time test build_time_test_callbacks = ["check"] - @property - def build_method(self): - """Searches the package for either a Makefile.PL or Build.PL. - - Raises: - RuntimeError: if neither Makefile.PL nor Build.PL exist - """ - if os.path.isfile("Makefile.PL"): - build_method = "Makefile.PL" - elif os.path.isfile("Build.PL"): - build_method = "Build.PL" - else: - raise RuntimeError("Unknown build_method for perl package") - return build_method - - @property - def build_executable(self): - """Returns the executable method to build the perl package""" - if self.build_method == "Makefile.PL": - build_executable = self.pkg.module.make - elif self.build_method == "Build.PL": - build_executable = Executable(os.path.join(self.pkg.stage.source_path, "Build")) - return build_executable + def configure(self, pkg: PerlPackage, spec: Spec, prefix: Prefix) -> None: + pass - def configure_args(self): - """List of arguments passed to :py:meth:`~.PerlBuilder.configure`. + def build(self, pkg: PerlPackage, spec: Spec, prefix: Prefix) -> None: + pass - Arguments should not include the installation base directory, which - is prepended automatically. - """ - return [] - - def configure( - self, pkg: PerlPackage, spec: spack.spec.Spec, prefix: spack.util.prefix.Prefix - ) -> None: - """Run Makefile.PL or Build.PL with arguments consisting of - an appropriate installation base directory followed by the - list returned by :py:meth:`~.PerlBuilder.configure_args`. - """ - if self.build_method == "Makefile.PL": - options = ["Makefile.PL", "INSTALL_BASE={0}".format(prefix)] - elif self.build_method == "Build.PL": - options = ["Build.PL", "--install_base", prefix] - options += self.configure_args() - - pkg.module.perl(*options) - - # It is possible that the shebang in the Build script that is created from - # Build.PL may be too long causing the build to fail. Patching the shebang - # does not happen until after install so set '/usr/bin/env perl' here in - # the Build script. - @spack.phase_callbacks.run_after("configure") - def fix_shebang(self): - if self.build_method == "Build.PL": - pattern = "#!{0}".format(self.spec["perl"].command.path) - repl = "#!/usr/bin/env perl" - filter_file(pattern, repl, "Build", backup=False) - - def build( - self, pkg: PerlPackage, spec: spack.spec.Spec, prefix: spack.util.prefix.Prefix - ) -> None: - """Builds a Perl package.""" - self.build_executable() - - # Ensure that tests run after build (if requested): - spack.phase_callbacks.run_after("build")(execute_build_time_tests) + def install(self, pkg: PerlPackage, spec: Spec, prefix: Prefix) -> None: + pass def check(self): - """Runs built-in tests of a Perl package.""" - self.build_executable("test") + pass - def install( - self, pkg: PerlPackage, spec: spack.spec.Spec, prefix: spack.util.prefix.Prefix - ) -> None: - """Installs a Perl package.""" - self.build_executable("install") + # Ensure that tests run after build (if requested): + run_after("build")(execute_build_time_tests) diff --git a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/python.py b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/python.py index d1134238423fa3..3e39fb70e72fd2 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/python.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/python.py @@ -2,456 +2,45 @@ # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import functools -import operator -import os -import re -import shutil -import stat -from typing import Dict, Iterable, List, Mapping, Optional, Tuple - -import spack.builder -import spack.llnl.util.filesystem as fs -import spack.llnl.util.tty as tty -import spack.multimethod -import spack.package_base -import spack.phase_callbacks -import spack.spec -import spack.util.prefix -from spack.directives import build_system, depends_on, extends -from spack.error import NoHeadersError, NoLibrariesError -from spack.install_test import test_part -from spack.llnl.util.filesystem import HeaderList, LibraryList, join_path -from spack.llnl.util.lang import ClassProperty, classproperty, match_predicate -from spack.spec import Spec -from spack.util.prefix import Prefix +from spack.package import ( + PackageBase, + Prefix, + Spec, + build_system, + extends, + register_builder, + run_after, +) from ._checks import BuilderWithDefaults, execute_install_time_tests -def _flatten_dict(dictionary: Mapping[str, object]) -> Iterable[str]: - """Iterable that yields KEY=VALUE paths through a dictionary. - - Args: - dictionary: Possibly nested dictionary of arbitrary keys and values. - - Yields: - A single path through the dictionary. - """ - for key, item in dictionary.items(): - if isinstance(item, dict): - # Recursive case - for value in _flatten_dict(item): - yield f"{key}={value}" - else: - # Base case - yield f"{key}={item}" - - -class PythonExtension(spack.package_base.PackageBase): - @property - def import_modules(self) -> Iterable[str]: - """Names of modules that the Python package provides. - - These are used to test whether or not the installation succeeded. - These names generally come from running: - - .. code-block:: python - - >> import setuptools - >> setuptools.find_packages() - - in the source tarball directory. If the module names are incorrectly - detected, this property can be overridden by the package. - - Returns: - List of strings of module names. - """ - modules = [] - pkg = self.spec["python"].package - - # Packages may be installed in platform-specific or platform-independent - # site-packages directories - for directory in {pkg.platlib, pkg.purelib}: - root = os.path.join(self.prefix, directory) - - # Some Python libraries are packages: collections of modules - # distributed in directories containing __init__.py files - for path in fs.find(root, "__init__.py", recursive=True): - modules.append( - path.replace(root + os.sep, "", 1) - .replace(os.sep + "__init__.py", "") - .replace("/", ".") - ) - - # Some Python libraries are modules: individual *.py files - # found in the site-packages directory - for path in fs.find(root, "*.py", recursive=False): - modules.append( - path.replace(root + os.sep, "", 1).replace(".py", "").replace("/", ".") - ) - - modules = [ - mod - for mod in modules - if re.match("[a-zA-Z0-9._]+$", mod) and not any(map(mod.startswith, self.skip_modules)) - ] - - tty.debug("Detected the following modules: {0}".format(modules)) - - return modules - - @property - def skip_modules(self) -> Iterable[str]: - """Names of modules that should be skipped when running tests. - - These are a subset of import_modules. If a module has submodules, - they are skipped as well (meaning a.b is skipped if a is contained). - - Returns: - List of strings of module names. - """ - return [] - - @property - def bindir(self) -> str: - """Path to Python package's bindir, bin on unix like OS's Scripts on Windows""" - windows = self.spec.satisfies("platform=windows") - return join_path(self.spec.prefix, "Scripts" if windows else "bin") - - def view_file_conflicts(self, view, merge_map): - """Report all file conflicts, excepting special cases for python. - Specifically, this does not report errors for duplicate - __init__.py files for packages in the same namespace. - """ - conflicts = list(dst for src, dst in merge_map.items() if os.path.exists(dst)) - - if conflicts and self.py_namespace: - ext_map = view.extensions_layout.extension_map(self.extendee_spec) - namespaces = set(x.package.py_namespace for x in ext_map.values()) - namespace_re = r"site-packages/{0}/__init__.py".format(self.py_namespace) - find_namespace = match_predicate(namespace_re) - if self.py_namespace in namespaces: - conflicts = list(x for x in conflicts if not find_namespace(x)) - - return conflicts - - def add_files_to_view(self, view, merge_map, skip_if_exists=True): - # Patch up shebangs if the package extends Python and we put a Python interpreter in the - # view. - if not self.extendee_spec: - return super().add_files_to_view(view, merge_map, skip_if_exists) - - python, *_ = self.spec.dependencies("python-venv") or self.spec.dependencies("python") - - if python.external: - return super().add_files_to_view(view, merge_map, skip_if_exists) - - # We only patch shebangs in the bin directory. - copied_files: Dict[Tuple[int, int], str] = {} # File identifier -> source - delayed_links: List[Tuple[str, str]] = [] # List of symlinks from merge map - bin_dir = self.spec.prefix.bin - - for src, dst in merge_map.items(): - if skip_if_exists and os.path.lexists(dst): - continue - - if not fs.path_contains_subdirectory(src, bin_dir): - view.link(src, dst) - continue - - s = os.lstat(src) - - # Symlink is delayed because we may need to re-target if its target is copied in view - if stat.S_ISLNK(s.st_mode): - delayed_links.append((src, dst)) - continue - - # If it's executable and has a shebang, copy and patch it. - if (s.st_mode & 0b111) and fs.has_shebang(src): - copied_files[(s.st_dev, s.st_ino)] = dst - shutil.copy2(src, dst) - fs.filter_file( - python.prefix, os.path.abspath(view.get_projection_for_spec(self.spec)), dst - ) - else: - view.link(src, dst) - - # Finally re-target the symlinks that point to copied files. - for src, dst in delayed_links: - try: - s = os.stat(src) - target = copied_files[(s.st_dev, s.st_ino)] - except (OSError, KeyError): - target = None - if target: - os.symlink(os.path.relpath(target, os.path.dirname(dst)), dst) - else: - view.link(src, dst, spec=self.spec) - - def remove_files_from_view(self, view, merge_map): - ignore_namespace = False - if self.py_namespace: - ext_map = view.extensions_layout.extension_map(self.extendee_spec) - remaining_namespaces = set( - spec.package.py_namespace for name, spec in ext_map.items() if name != self.name - ) - if self.py_namespace in remaining_namespaces: - namespace_init = match_predicate( - r"site-packages/{0}/__init__.py".format(self.py_namespace) - ) - ignore_namespace = True - - bin_dir = self.spec.prefix.bin - - to_remove = [] - for src, dst in merge_map.items(): - if ignore_namespace and namespace_init(dst): - continue - - if not fs.path_contains_subdirectory(src, bin_dir): - to_remove.append(dst) - else: - os.remove(dst) - - view.remove_files(to_remove) - +class PythonExtension(PackageBase): def test_imports(self) -> None: - """Attempts to import modules of the installed package.""" - - # Make sure we are importing the installed modules, - # not the ones in the source directory - python = self.module.python - for module in self.import_modules: - with test_part( - self, - f"test_imports_{module}", - purpose=f"checking import of {module}", - work_dir="spack-test", - ): - python("-c", f"import {module}") - - -def _homepage(cls: "PythonPackage") -> Optional[str]: - """Get the homepage from PyPI if available.""" - if cls.pypi: - name = cls.pypi.split("/")[0] - return f"https://pypi.org/project/{name}/" - return None - - -def _url(cls: "PythonPackage") -> Optional[str]: - if cls.pypi: - return f"https://files.pythonhosted.org/packages/source/{cls.pypi[0]}/{cls.pypi}" - return None - - -def _list_url(cls: "PythonPackage") -> Optional[str]: - if cls.pypi: - name = cls.pypi.split("/")[0] - return f"https://pypi.org/simple/{name}/" - return None + pass class PythonPackage(PythonExtension): - """Specialized class for packages that are built using pip.""" - - #: Package name, version, and extension on PyPI - pypi: Optional[str] = None - - # To be used in UI queries that require to know which - # build-system class we are using build_system_class = "PythonPackage" - #: Legacy buildsystem attribute used to deserialize and install old specs default_buildsystem = "python_pip" - - #: Callback names for install-time test install_time_test_callbacks = ["test_imports"] build_system("python_pip") - - with spack.multimethod.when("build_system=python_pip"): - extends("python") - depends_on("py-pip", type="build") - # FIXME: technically wheel is only needed when building from source, not when - # installing a downloaded wheel, but I don't want to add wheel as a dep to every - # package manually - depends_on("py-wheel", type="build") - - py_namespace: Optional[str] = None - - homepage: ClassProperty[Optional[str]] = classproperty(_homepage) - url: ClassProperty[Optional[str]] = classproperty(_url) - list_url: ClassProperty[Optional[str]] = classproperty(_list_url) - - @property - def python_spec(self) -> Spec: - """Get python-venv if it exists or python otherwise.""" - python, *_ = self.spec.dependencies("python-venv") or self.spec.dependencies("python") - return python - - @property - def headers(self) -> HeaderList: - """Discover header files in platlib.""" - - # Remove py- prefix in package name - name = self.spec.name[3:] - - # Headers should only be in include or platlib, but no harm in checking purelib too - include = self.prefix.join(self.spec["python"].package.include).join(name) - python = self.python_spec - platlib = self.prefix.join(python.package.platlib).join(name) - purelib = self.prefix.join(python.package.purelib).join(name) - - headers_list = map(fs.find_all_headers, [include, platlib, purelib]) - headers = functools.reduce(operator.add, headers_list) - - if headers: - return headers - - msg = "Unable to locate {} headers in {}, {}, or {}" - raise NoHeadersError(msg.format(self.spec.name, include, platlib, purelib)) - - @property - def libs(self) -> LibraryList: - """Discover libraries in platlib.""" - - # Remove py- prefix in package name - name = self.spec.name[3:] - - # Libraries should only be in platlib, but no harm in checking purelib too - python = self.python_spec - platlib = self.prefix.join(python.package.platlib).join(name) - purelib = self.prefix.join(python.package.purelib).join(name) - - find_all_libraries = functools.partial(fs.find_all_libraries, recursive=True) - libs_list = map(find_all_libraries, [platlib, purelib]) - libs = functools.reduce(operator.add, libs_list) - - if libs: - return libs - - msg = "Unable to recursively locate {} libraries in {} or {}" - raise NoLibrariesError(msg.format(self.spec.name, platlib, purelib)) + extends("python", when="build_system=python_pip") -@spack.builder.register_builder("python_pip") +@register_builder("python_pip") class PythonPipBuilder(BuilderWithDefaults): phases = ("install",) - - #: Names associated with package methods in the old build-system format package_methods = ("test_imports",) - - #: Same as legacy_methods, but the signature is different - package_long_methods = ("install_options", "global_options", "config_settings") - - #: Names associated with package attributes in the old build-system format package_attributes = ("archive_files", "build_directory", "install_time_test_callbacks") - - #: Callback names for install-time test install_time_test_callbacks = ["test_imports"] - @staticmethod - def std_args(cls) -> List[str]: - return [ - # Verbose - "-vvv", - # Disable prompting for input - "--no-input", - # Disable the cache - "--no-cache-dir", - # Don't check to see if pip is up-to-date - "--disable-pip-version-check", - # Install packages - "install", - # Don't install package dependencies - "--no-deps", - # Overwrite existing packages - "--ignore-installed", - # Use env vars like PYTHONPATH - "--no-build-isolation", - # Don't warn that prefix.bin is not in PATH - "--no-warn-script-location", - # Ignore the PyPI package index - "--no-index", - ] - @property def build_directory(self) -> str: - """The root directory of the Python package. - - This is usually the directory containing one of the following files: - - * ``pyproject.toml`` - * ``setup.cfg`` - * ``setup.py`` - """ return self.pkg.stage.source_path - def config_settings(self, spec: Spec, prefix: Prefix) -> Mapping[str, object]: - """Configuration settings to be passed to the PEP 517 build backend. - - Requires pip 22.1 or newer for keys that appear only a single time, - or pip 23.1 or newer if the same key appears multiple times. - - Args: - spec: Build spec. - prefix: Installation prefix. - - Returns: - Possibly nested dictionary of KEY, VALUE settings. - """ - return {} - - def install_options(self, spec: Spec, prefix: Prefix) -> Iterable[str]: - """Extra arguments to be supplied to the setup.py install command. - - Requires pip 23.0 or older. - - Args: - spec: Build spec. - prefix: Installation prefix. - - Returns: - List of options. - """ - return [] - - def global_options(self, spec: Spec, prefix: Prefix) -> Iterable[str]: - """Extra global options to be supplied to the setup.py call before the install - or bdist_wheel command. - - Deprecated in pip 23.1. - - Args: - spec: Build spec. - prefix: Installation prefix. - - Returns: - List of options. - """ - return [] - def install(self, pkg: PythonPackage, spec: Spec, prefix: Prefix) -> None: - """Install everything from build directory.""" - pip = spec["python"].command - pip.add_default_arg("-m", "pip") - - args = PythonPipBuilder.std_args(pkg) + [f"--prefix={prefix}"] - - for setting in _flatten_dict(self.config_settings(spec, prefix)): - args.append(f"--config-settings={setting}") - for option in self.install_options(spec, prefix): - args.append(f"--install-option={option}") - for option in self.global_options(spec, prefix): - args.append(f"--global-option={option}") - - if pkg.stage.archive_file and pkg.stage.archive_file.endswith(".whl"): - args.append(pkg.stage.archive_file) - else: - args.append(".") - - with fs.working_dir(self.build_directory): - pip(*args) + pass - spack.phase_callbacks.run_after("install")(execute_install_time_tests) + run_after("install")(execute_install_time_tests) diff --git a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/sourceforge.py b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/sourceforge.py index 56ec76b7f8ddfb..18301dd131de7c 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/sourceforge.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/sourceforge.py @@ -4,18 +4,11 @@ from typing import Optional -import spack.package_base -import spack.util.url +from spack.package import PackageBase, join_url -class SourceforgePackage(spack.package_base.PackageBase): - """Mixin that takes care of setting url and mirrors for Sourceforge - packages.""" - - #: Path of the package in a Sourceforge mirror +class SourceforgePackage(PackageBase): sourceforge_mirror_path: Optional[str] = None - - #: List of Sourceforge mirrors used by Spack base_mirrors = [ "https://prdownloads.sourceforge.net/", "https://freefr.dl.sourceforge.net/", @@ -27,14 +20,8 @@ class SourceforgePackage(spack.package_base.PackageBase): @property def urls(self): - self._ensure_sourceforge_mirror_path_is_set_or_raise() + if self.sourceforge_mirror_path is None: + raise AttributeError(f"{self.__class__.__name__}: `sourceforge_mirror_path` missing") return [ - spack.util.url.join(m, self.sourceforge_mirror_path, resolve_href=True) - for m in self.base_mirrors + join_url(m, self.sourceforge_mirror_path, resolve_href=True) for m in self.base_mirrors ] - - def _ensure_sourceforge_mirror_path_is_set_or_raise(self): - if self.sourceforge_mirror_path is None: - cls_name = type(self).__name__ - msg = "{0} must define a `sourceforge_mirror_path` attribute" " [none defined]" - raise AttributeError(msg.format(cls_name)) diff --git a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/sourceware.py b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/sourceware.py index 35dd794d86e17e..5080d72a54020d 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/sourceware.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/sourceware.py @@ -3,18 +3,11 @@ # SPDX-License-Identifier: (Apache-2.0 OR MIT) from typing import Optional -import spack.package_base -import spack.util.url +from spack.package import PackageBase, join_url -class SourcewarePackage(spack.package_base.PackageBase): - """Mixin that takes care of setting url and mirrors for Sourceware.org - packages.""" - - #: Path of the package in a Sourceware mirror +class SourcewarePackage(PackageBase): sourceware_mirror_path: Optional[str] = None - - #: List of Sourceware mirrors used by Spack base_mirrors = [ "https://sourceware.org/pub/", "https://mirrors.kernel.org/sourceware/", @@ -23,14 +16,8 @@ class SourcewarePackage(spack.package_base.PackageBase): @property def urls(self): - self._ensure_sourceware_mirror_path_is_set_or_raise() + if self.sourceware_mirror_path is None: + raise AttributeError(f"{self.__class__.__name__}: `sourceware_mirror_path` missing") return [ - spack.util.url.join(m, self.sourceware_mirror_path, resolve_href=True) - for m in self.base_mirrors + join_url(m, self.sourceware_mirror_path, resolve_href=True) for m in self.base_mirrors ] - - def _ensure_sourceware_mirror_path_is_set_or_raise(self): - if self.sourceware_mirror_path is None: - cls_name = type(self).__name__ - msg = "{0} must define a `sourceware_mirror_path` attribute" " [none defined]" - raise AttributeError(msg.format(cls_name)) diff --git a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/xorg.py b/var/spack/test_repos/spack_repo/builtin_mock/build_systems/xorg.py deleted file mode 100644 index 131444cbb5e4b8..00000000000000 --- a/var/spack/test_repos/spack_repo/builtin_mock/build_systems/xorg.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright Spack Project Developers. See COPYRIGHT file for details. -# -# SPDX-License-Identifier: (Apache-2.0 OR MIT) - -from typing import Optional - -import spack.package_base -import spack.util.url - - -class XorgPackage(spack.package_base.PackageBase): - """Mixin that takes care of setting url and mirrors for x.org - packages.""" - - #: Path of the package in a x.org mirror - xorg_mirror_path: Optional[str] = None - - #: List of x.org mirrors used by Spack - # Note: x.org mirrors are a bit tricky, since many are out-of-sync or off. - # A good package to test with is `util-macros`, which had a "recent" - # release. - base_mirrors = [ - "https://www.x.org/archive/individual/", - "https://mirrors.ircam.fr/pub/x.org/individual/", - "https://mirror.transip.net/xorg/individual/", - "ftp://ftp.freedesktop.org/pub/xorg/individual/", - "http://xorg.mirrors.pair.com/individual/", - ] - - @property - def urls(self): - self._ensure_xorg_mirror_path_is_set_or_raise() - return [ - spack.util.url.join(m, self.xorg_mirror_path, resolve_href=True) - for m in self.base_mirrors - ] - - def _ensure_xorg_mirror_path_is_set_or_raise(self): - if self.xorg_mirror_path is None: - cls_name = type(self).__name__ - msg = "{0} must define a `xorg_mirror_path` attribute" " [none defined]" - raise AttributeError(msg.format(cls_name)) diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/_7zip_dependent/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/_7zip_dependent/package.py new file mode 100644 index 00000000000000..26f991bc3181a2 --- /dev/null +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/_7zip_dependent/package.py @@ -0,0 +1,17 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +from spack_repo.builtin_mock.build_systems.autotools import AutotoolsPackage + +from spack.package import * + + +class _7zipDependent(AutotoolsPackage): + """A dependent of 7zip, that also needs gmake""" + + homepage = "http://www.example.com" + url = "http://www.example.com/a-1.0.tar.gz" + + version("1.0", md5="0123456789abcdef0123456789abcdef") + + depends_on("7zip") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/callpath/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/callpath/package.py index 8e9ba8ed551caf..4fb870f120fba8 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/packages/callpath/package.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/callpath/package.py @@ -16,6 +16,7 @@ class Callpath(Package): version("1.0", md5="0123456789abcdef0123456789abcdef") depends_on("c", type="build") + depends_on("cxx", type="build") depends_on("dyninst") depends_on("mpi") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/canfail/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/canfail/package.py index b3ee80203010f9..329a3c35962a8c 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/packages/canfail/package.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/canfail/package.py @@ -1,8 +1,6 @@ # Copyright Spack Project Developers. See COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -import os - from spack_repo.builtin_mock.build_systems.generic import Package from spack.package import * @@ -13,20 +11,10 @@ class Canfail(Package): homepage = "http://www.example.com" url = "http://www.example.com/a-1.0.tar.gz" + succeed = True version("1.0", md5="0123456789abcdef0123456789abcdef") - def set_install_succeed(self): - os.environ["CANFAIL_SUCCEED"] = "1" - - def set_install_fail(self): - os.environ.pop("CANFAIL_SUCCEED", None) - - @property - def succeed(self): - result = True if "CANFAIL_SUCCEED" in os.environ else False - return result - def install(self, spec, prefix): if not self.succeed: raise InstallError("'succeed' was false") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/dt_diamond/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/dt_diamond/package.py index 759489bd524c51..5f9c7bd9d9bfd6 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/packages/dt_diamond/package.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/dt_diamond/package.py @@ -19,3 +19,4 @@ class DtDiamond(Package): depends_on("dt-diamond-right") depends_on("c", type="build") + depends_on("cxx", type="build") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/dual_cmake_autotools/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/dual_cmake_autotools/package.py new file mode 100644 index 00000000000000..6fa4632dd04715 --- /dev/null +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/dual_cmake_autotools/package.py @@ -0,0 +1,28 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +from spack_repo.builtin_mock.build_systems.autotools import AutotoolsPackage +from spack_repo.builtin_mock.build_systems.cmake import CMakePackage + +from spack.package import * + + +class DualCmakeAutotools(AutotoolsPackage, CMakePackage): + """Package with two build systems.""" + + homepage = "http://www.example.com" + url = "http://www.example.com/dual-cmake-autotools-1.0.tar.gz" + + version("1.0") + build_system("autotools", "cmake", default="autotools") + variant( + "generator", + default="make", + values=("make", "ninja"), + description="the build system generator to use", + when="build_system=cmake", + ) + + with when("build_system=cmake"): + depends_on("cmake@3.5.1:", type="build") + depends_on("cmake@3.14.0:", type="build", when="@2.1.0:") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/gcc/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/gcc/package.py index d3cd5da2158f32..a433f6d403b611 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/packages/gcc/package.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/gcc/package.py @@ -15,11 +15,19 @@ class Gcc(CompilerPackage, Package): homepage = "http://www.example.com" url = "http://www.example.com/gcc-1.0.tar.gz" + version("14.0.1", md5="abcdef0123456789abcdef0123456789") version("14.0", md5="abcdef0123456789abcdef0123456789") + version("12.1.0", md5="abcdef0123456789abcdef0123456789") + version("10.2.1", md5="abcdef0123456789abcdef0123456789") + version("9.4.1", md5="abcdef0123456789abcdef0123456789") + version("9.4.0", md5="abcdef0123456789abcdef0123456789") version("3.0", md5="def0123456789abcdef0123456789abc") version("2.0", md5="abcdef0123456789abcdef0123456789") version("1.0", md5="0123456789abcdef0123456789abcdef") + with default_args(deprecated=True): + version("12.4.0", md5="abcdef0123456789abcdef0123456789") + variant( "languages", default="c,c++,fortran", diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/git_sparsepaths_version/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/git_sparsepaths_version/package.py new file mode 100644 index 00000000000000..10f1017414a168 --- /dev/null +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/git_sparsepaths_version/package.py @@ -0,0 +1,17 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +from spack_repo.builtin_mock.build_systems.generic import Package + +from spack.package import * + + +class GitSparsepathsVersion(Package): + """Mock package with git_sparse_paths attribute""" + + homepage = "http://www.git-fetch-example.com" + git = "https://a/really.com/big/repo.git" + + version("1.0", tag="v1.0", git_sparse_paths=["foo", "bar"]) + version("0.9", tag="v0.9") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/git_test_commit/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/git_test_commit/package.py index e6e64f186cda40..2d87e5d4b8b0f8 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/packages/git_test_commit/package.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/git_test_commit/package.py @@ -21,6 +21,7 @@ class GitTestCommit(Package): # -- only mock_git_repository # (session scope) version("tag", tag="test-tag") + version("annotated-tag", tag="annotated-tag") # ---------------------------- # -- only mock_git_version_info below # (function scope) diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/gmake/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/gmake/package.py index 464e6ea9da989d..7f7266180e4203 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/packages/gmake/package.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/gmake/package.py @@ -13,6 +13,8 @@ class Gmake(Package): homepage = "https://www.gnu.org/software/make" url = "https://ftpmirror.gnu.org/make/make-4.4.tar.gz" + tags = ["build-tools"] + version("4.4", sha256="ce35865411f0490368a8fc383f29071de6690cbadc27704734978221f25e2bed") version("3.0", sha256="ce35865411f0490368a8fc383f29071de6690cbadc27704734978221f25e2bed") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/licenses_1/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/licenses_1/package.py index b3cd11ebab3a5a..a4ace8de9a23bd 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/packages/licenses_1/package.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/licenses_1/package.py @@ -17,3 +17,5 @@ class Licenses1(Package): license("Apache-2.0", when="~foo") version("1.0", md5="0123456789abcdef0123456789abcdef") + + variant("foo", default=True, description="toggle license") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/long_boost_dependency/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/long_boost_dependency/package.py new file mode 100644 index 00000000000000..61de156bf94cd4 --- /dev/null +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/long_boost_dependency/package.py @@ -0,0 +1,18 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +from spack.package import * + + +class LongBoostDependency(Package): + """Simple package with one optional dependency""" + + homepage = "http://www.example.com" + url = "http://www.example.com/a-1.0.tar.gz" + + version("1.0") + + variant("longdep", description="enable boost dependency", default=True) + + depends_on("boost+atomic+chrono+date_time+filesystem+graph+iostreams+locale", when="+longdep") + depends_on("boost", when="~longdep") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/many_conditional_deps/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/many_conditional_deps/package.py new file mode 100644 index 00000000000000..65dc672899c8ad --- /dev/null +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/many_conditional_deps/package.py @@ -0,0 +1,22 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +from spack.package import * + + +class ManyConditionalDeps(Package): + """Simple package with one optional dependency""" + + homepage = "http://www.example.com" + url = "http://www.example.com/a-1.0.tar.gz" + + version("1.0") + + variant("cuda", description="enable foo dependencies", default=True) + variant("rocm", description="enable bar dependencies", default=True) + + for i in range(30): + depends_on(f"gpu-dep +cuda cuda_arch={i}", when=f"+cuda cuda_arch={i}") + + for i in range(30): + depends_on(f"gpu-dep +rocm amdgpu_target={i}", when=f"+rocm amdgpu_target={i}") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/mirror_xorg/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/mirror_xorg/package.py deleted file mode 100644 index c1daa129bdd9de..00000000000000 --- a/var/spack/test_repos/spack_repo/builtin_mock/packages/mirror_xorg/package.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright Spack Project Developers. See COPYRIGHT file for details. -# -# SPDX-License-Identifier: (Apache-2.0 OR MIT) - -from spack_repo.builtin_mock.build_systems.autotools import AutotoolsPackage -from spack_repo.builtin_mock.build_systems.xorg import XorgPackage - -from spack.package import * - - -class MirrorXorg(AutotoolsPackage, XorgPackage): - """Simple x.org package""" - - homepage = "http://cgit.freedesktop.org/xorg/util/macros/" - xorg_mirror_path = "util/util-macros-1.19.1.tar.bz2" - - version("1.19.1", sha256="18d459400558f4ea99527bc9786c033965a3db45bf4c6a32eefdc07aa9e306a6") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/mirror_xorg_broken/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/mirror_xorg_broken/package.py deleted file mode 100644 index b3eca014025d82..00000000000000 --- a/var/spack/test_repos/spack_repo/builtin_mock/packages/mirror_xorg_broken/package.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright Spack Project Developers. See COPYRIGHT file for details. -# -# SPDX-License-Identifier: (Apache-2.0 OR MIT) - -from spack_repo.builtin_mock.build_systems.autotools import AutotoolsPackage -from spack_repo.builtin_mock.build_systems.xorg import XorgPackage - -from spack.package import * - - -class MirrorXorgBroken(AutotoolsPackage, XorgPackage): - """Simple x.org package""" - - homepage = "http://cgit.freedesktop.org/xorg/util/macros/" - url = "https://www.x.org/archive/individual/util/util-macros-1.19.1.tar.bz2" - - version("1.19.1", sha256="18d459400558f4ea99527bc9786c033965a3db45bf4c6a32eefdc07aa9e306a6") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/mixing_parent/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/mixing_parent/package.py new file mode 100644 index 00000000000000..cce56c8ad5932b --- /dev/null +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/mixing_parent/package.py @@ -0,0 +1,18 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +from spack.package import * + + +class MixingParent(Package): + + homepage = "http://www.example.com" + url = "http://www.example.com/a-1.0.tar.gz" + + version("1.0", md5="0123456789abcdef0123456789abcdef") + version("2.0", md5="abcdef0123456789abcdef0123456789") + + depends_on("c", type="build") + depends_on("libdwarf") + depends_on("cmake", type="build") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/mpileaks/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/mpileaks/package.py index 02e4a38ddd9078..a0943b2f3b713c 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/packages/mpileaks/package.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/mpileaks/package.py @@ -22,11 +22,14 @@ class Mpileaks(Package): variant("opt", default=False, description="Optimized variant") variant("shared", default=True, description="Build shared library") variant("static", default=True, description="Build static library") + variant("fortran", default=False, description="Enable fortran API") depends_on("mpi") depends_on("callpath") depends_on("c", type="build") + depends_on("cxx", type="build") + depends_on("fortran", type="build", when="+fortran") # Will be used to try raising an exception libs = None diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/mvapich2/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/mvapich2/package.py index ae6989292dc64f..e45b08fddb5d03 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/packages/mvapich2/package.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/mvapich2/package.py @@ -17,3 +17,6 @@ class Mvapich2(Package): description="List of the ROMIO file systems to activate", values=auto_or_any_combination_of("lustre", "gpfs", "nfs", "ufs"), ) + variant("noauto", default=False, description="Adds a conflict with 'auto' for tests") + + conflicts("file_systems=auto", when="+noauto") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/openblas/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/openblas/package.py index 3e31689a9818be..3745b0f41d7ff5 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/packages/openblas/package.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/openblas/package.py @@ -21,6 +21,7 @@ class Openblas(Package): variant("shared", default=True, description="Build shared libraries") depends_on("c", type="build") + depends_on("fortran", type="build") # See #20019 for this conflict conflicts("%gcc@:4.4", when="@0.2.14:") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/package_base_extendee/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/package_base_extendee/package.py new file mode 100644 index 00000000000000..8aebd4919bf423 --- /dev/null +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/package_base_extendee/package.py @@ -0,0 +1,13 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +from spack.package import * + + +class PackageBaseExtendee(PackageBase): + """Simple package with one optional dependency""" + + homepage = "http://www.example.com" + url = "http://www.example.com/a-1.0.tar.gz" + + version("1.0") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/patch_several_dependencies/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/patch_several_dependencies/package.py index 31c114b29e1dce..ded1209b307178 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/packages/patch_several_dependencies/package.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/patch_several_dependencies/package.py @@ -22,7 +22,11 @@ class PatchSeveralDependencies(Package): # single patch file in repo depends_on("libelf", patches="foo.patch") - depends_on("libelf@0.8.10", patches="foo.patch", when="+foo") + # The following 3 directives are all under the same when clause, to be combined in + # the metadata for this package class + depends_on("libelf@0.8.10", patches="foo.patch", type="link", when="+foo") + depends_on("libelf", type="build", when="+foo") + depends_on("libelf@0.8:", when="+foo") # using a list of patches in one depends_on depends_on( @@ -31,8 +35,8 @@ class PatchSeveralDependencies(Package): patch("bar.patch"), # nested patch directive patch("baz.patch", when="@20111030"), # and with a conditional ], - when="@1.0", - ) # with a depends_on conditional + when="@1.0", # with a depends_on conditional + ) # URL patches depends_on( diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/py_numpy/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/py_numpy/package.py new file mode 100644 index 00000000000000..884442a427851e --- /dev/null +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/py_numpy/package.py @@ -0,0 +1,23 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +from spack_repo.builtin_mock.build_systems.generic import Package +from spack_repo.builtin_mock.build_systems.python import PythonExtension + +from spack.package import * + + +class PyNumpy(Package, PythonExtension): + """A package which extends python, depends on C and C++, and has a pure build dependency""" + + homepage = "http://www.example.com" + url = "http://www.example.com/py-numpy-1.0.tar.gz" + + version("2.3.4", md5="00000000000000000000000000000120") + + extends("python") + + depends_on("c", type="build") + depends_on("cxx", type="build") + + depends_on("cmake", type="build") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/simple_resource/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/simple_resource/package.py new file mode 100644 index 00000000000000..336306cc6fe930 --- /dev/null +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/simple_resource/package.py @@ -0,0 +1,22 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +from spack_repo.builtin_mock.build_systems.generic import Package + +from spack.package import * + + +class SimpleResource(Package): + url = "http://example.com/source-1.0.tgz" + + version("1.0", sha256="1111111111111111111111111111111111111111111111111111111111111111") + + resource( + name="sample-resource", + url="https://example.com/resource.tgz", + checksum="2222222222222222222222222222222222222222222222222222222222222222", + when="@1.0", + placement="resource-dst", + expand="True", + ) diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/trigger_and_effect_deps/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/trigger_and_effect_deps/package.py new file mode 100644 index 00000000000000..fdb542ef9ed1c9 --- /dev/null +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/trigger_and_effect_deps/package.py @@ -0,0 +1,25 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +from spack_repo.builtin_mock.build_systems.generic import Package + +from spack.package import * + + +class TriggerAndEffectDeps(Package): + """Package used to see if triggers and effects for dependencies are emitted correctly.""" + + homepage = "http://www.example.com" + url = "http://www.example.com/patch-a-dependency-1.0.tar.gz" + version("1.0", sha256="0000000000000000000000000000000000000000000000000000000000000000") + variant("x", default=False, description="x") + variant("y", default=False, description="y") + + with when("+x"): + depends_on("pkg-a", type="link") + depends_on("pkg-b", type="link") + + with when("+y"): + depends_on("pkg-a", type="run") + depends_on("pkg-b", type="run") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/trilinos/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/trilinos/package.py new file mode 100644 index 00000000000000..f4e204343e76af --- /dev/null +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/trilinos/package.py @@ -0,0 +1,35 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) +from spack.package import * + + +class Trilinos(Package): + """A package which has pure build dependencies, run dependencies, and link dependencies.""" + + homepage = "http://www.example.com" + url = "http://www.example.com/trilinos-1.0.tar.gz" + + version("16.1.0", md5="00000000000000000000000000000120") + + depends_on("c", type="build") + depends_on("cxx", type="build") + + depends_on("cmake", type="build") + + depends_on("py-numpy", type="run") + + depends_on("mpi") + depends_on("callpath") + + # The variant default value cannot be taken by the default version of the package + variant("disable17", default=False, description="Disable support for C++17") + variant( + "cxxstd", + default="14", + description="C++ standard", + values=["14", "17", "20", "23"], + multi=False, + ) + conflicts("cxxstd=14", when="@16:") + conflicts("cxxstd=17", when="+disable17") diff --git a/var/spack/test_repos/spack_repo/builtin_mock/packages/with_constraint_met/package.py b/var/spack/test_repos/spack_repo/builtin_mock/packages/with_constraint_met/package.py index 024c4d08005b50..3501d1214125af 100644 --- a/var/spack/test_repos/spack_repo/builtin_mock/packages/with_constraint_met/package.py +++ b/var/spack/test_repos/spack_repo/builtin_mock/packages/with_constraint_met/package.py @@ -24,3 +24,22 @@ class WithConstraintMet(Package): with when("@0.14: ^pkg-b@:4.0"): depends_on("pkg-c", when="@:15 ^pkg-b@3.8:") + + # Direct dependency in a "when" context manager + with when("%pkg-b"): + depends_on("pkg-e") + + # More complex dependency with nested contexts + with when("%pkg-c"): + with when("@2 %pkg-b@:4.0"): + depends_on("pkg-e", when="%c=gcc") + + # Nested ^pkg-c followed by %pkg-c + with when("^pkg-c"): + with when("%pkg-c"): + depends_on("pkg-e") + + # Nested ^pkg-c followed by ^pkg-c %gcc + with when("^pkg-c"): + with when("^pkg-c %gcc"): + depends_on("pkg-e") diff --git a/var/spack/vendoring/patches/altgraph-version.patch b/var/spack/vendoring/patches/altgraph-version.patch new file mode 100644 index 00000000000000..09fcec9ab470b3 --- /dev/null +++ b/var/spack/vendoring/patches/altgraph-version.patch @@ -0,0 +1,15 @@ +diff --git a/lib/spack/spack/vendor/altgraph/__init__.py b/lib/spack/spack/vendor/altgraph/__init__.py +index 45ce7bfe5f8..0fb21d77884 100644 +--- a/lib/spack/spack/vendor/altgraph/__init__.py ++++ b/lib/spack/spack/vendor/altgraph/__init__.py +@@ -139,9 +139,8 @@ + @contributor: U{Reka Albert } + + """ +-import pkg_resources + +-__version__ = pkg_resources.require("altgraph")[0].version ++__version__ = "0.17.3" + + + class GraphError(ValueError): diff --git a/var/spack/vendoring/patches/ruamelyaml.patch b/var/spack/vendoring/patches/ruamelyaml.patch index cb83c94f0bd12c..7564aa150e600d 100644 --- a/var/spack/vendoring/patches/ruamelyaml.patch +++ b/var/spack/vendoring/patches/ruamelyaml.patch @@ -1,5 +1,5 @@ diff --git a/lib/spack/spack/vendor/ruamel/yaml/comments.py b/lib/spack/spack/vendor/ruamel/yaml/comments.py -index 1badeda585..892c868af3 100644 +index cf121823a3..dae5e10750 100644 --- a/lib/spack/spack/vendor/ruamel/yaml/comments.py +++ b/lib/spack/spack/vendor/ruamel/yaml/comments.py @@ -497,7 +497,7 @@ def copy_attributes(self, t, memo=None): @@ -11,3 +11,12 @@ index 1badeda585..892c868af3 100644 else: setattr(t, a, getattr(self, a)) # fmt: on +@@ -628,7 +628,7 @@ def __deepcopy__(self, memo): + memo[id(self)] = res + for k in self: + res.append(copy.deepcopy(k, memo)) +- self.copy_attributes(res, memo=memo) ++ self.copy_attributes(res, memo=memo) + return res + + def __add__(self, other): diff --git a/var/spack/vendoring/vendor.txt b/var/spack/vendoring/vendor.txt index d491b72862f2a7..9411efb2ced4ea 100644 --- a/var/spack/vendoring/vendor.txt +++ b/var/spack/vendoring/vendor.txt @@ -9,4 +9,4 @@ macholib==1.16.2 altgraph==0.17.3 ruamel.yaml==0.17.21 typing_extensions==4.1.1 -archspec @ git+https://github.com/archspec/archspec.git@77f3f81df3dd80b7e538e2e41bc4485fbec2dbaa +archspec @ git+https://github.com/archspec/archspec.git@0aec32368faa199fe3e6a549207ceffad78600cb