From 30243461677c6272f697828b52e525cc972ece92 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Tue, 20 May 2025 12:33:34 +0200 Subject: [PATCH 01/26] Added Resolution of Identity and Fixed some Spinintegration bugs --- adcgen/__init__.py | 2 + adcgen/expression/object_container.py | 7 + adcgen/func.py | 5 +- adcgen/generate_code/config.json | 3 +- adcgen/generate_code/contraction.py | 2 + adcgen/indices.py | 10 +- adcgen/resolution_of_identity.py | 77 + adcgen/simplify.py | 2 +- adcgen/spatial_orbitals.py | 12 +- adcgen/sympy_objects.py | 8 +- adcgen/tensor_names.json | 5 +- adcgen/tensor_names.py | 3 + build/lib/adcgen/__init__.py | 51 + build/lib/adcgen/core_valence_separation.py | 421 ++++ build/lib/adcgen/derivative.py | 103 + build/lib/adcgen/eri_orbenergy.py | 615 ++++++ build/lib/adcgen/expression/__init__.py | 12 + build/lib/adcgen/expression/container.py | 228 +++ build/lib/adcgen/expression/expr_container.py | 555 +++++ .../expression/normal_ordered_container.py | 199 ++ .../lib/adcgen/expression/object_container.py | 879 ++++++++ .../adcgen/expression/polynom_container.py | 282 +++ build/lib/adcgen/expression/term_container.py | 770 +++++++ build/lib/adcgen/factor_intermediates.py | 1813 +++++++++++++++++ build/lib/adcgen/func.py | 540 +++++ build/lib/adcgen/generate_code/__init__.py | 8 + build/lib/adcgen/generate_code/config.json | 8 + build/lib/adcgen/generate_code/contraction.py | 250 +++ .../lib/adcgen/generate_code/generate_code.py | 390 ++++ .../generate_code/optimize_contractions.py | 329 +++ build/lib/adcgen/groundstate.py | 476 +++++ build/lib/adcgen/indices.py | 527 +++++ build/lib/adcgen/intermediate_states.py | 598 ++++++ build/lib/adcgen/intermediates.py | 1663 +++++++++++++++ build/lib/adcgen/logger.py | 63 + build/lib/adcgen/logger_config.json | 40 + build/lib/adcgen/misc.py | 116 ++ build/lib/adcgen/operators.py | 213 ++ build/lib/adcgen/properties.py | 423 ++++ build/lib/adcgen/reduce_expr.py | 330 +++ build/lib/adcgen/resolution_of_identity.py | 71 + build/lib/adcgen/rules.py | 65 + build/lib/adcgen/secular_matrix.py | 436 ++++ build/lib/adcgen/simplify.py | 765 +++++++ build/lib/adcgen/sort_expr.py | 382 ++++ build/lib/adcgen/spatial_orbitals.py | 443 ++++ build/lib/adcgen/symmetry.py | 368 ++++ build/lib/adcgen/sympy_objects.py | 399 ++++ build/lib/adcgen/tensor_names.json | 15 + build/lib/adcgen/tensor_names.py | 225 ++ tests/contraction_test.py | 25 +- tests/indices_test.py | 5 + tests/reference_data/generate_data.py | 45 + tests/reference_data/gs_energy.json | 4 +- tests/reference_data/isr_precursor.json | 16 +- .../reference_data/isr_precursor_overlap.json | 2 +- .../properties_expectation_value.json | 4 +- .../properties_trans_moment.json | 8 +- tests/reference_data/ri_gs_energy.json | 66 + tests/reference_data/secular_matrix.json | 4 +- tests/reference_data/spatial_gs_energy.json | 30 + tests/resolution_of_identity_test.py | 32 + 62 files changed, 15407 insertions(+), 41 deletions(-) create mode 100644 adcgen/resolution_of_identity.py create mode 100644 build/lib/adcgen/__init__.py create mode 100644 build/lib/adcgen/core_valence_separation.py create mode 100644 build/lib/adcgen/derivative.py create mode 100644 build/lib/adcgen/eri_orbenergy.py create mode 100644 build/lib/adcgen/expression/__init__.py create mode 100644 build/lib/adcgen/expression/container.py create mode 100644 build/lib/adcgen/expression/expr_container.py create mode 100644 build/lib/adcgen/expression/normal_ordered_container.py create mode 100644 build/lib/adcgen/expression/object_container.py create mode 100644 build/lib/adcgen/expression/polynom_container.py create mode 100644 build/lib/adcgen/expression/term_container.py create mode 100644 build/lib/adcgen/factor_intermediates.py create mode 100644 build/lib/adcgen/func.py create mode 100644 build/lib/adcgen/generate_code/__init__.py create mode 100644 build/lib/adcgen/generate_code/config.json create mode 100644 build/lib/adcgen/generate_code/contraction.py create mode 100644 build/lib/adcgen/generate_code/generate_code.py create mode 100644 build/lib/adcgen/generate_code/optimize_contractions.py create mode 100644 build/lib/adcgen/groundstate.py create mode 100644 build/lib/adcgen/indices.py create mode 100644 build/lib/adcgen/intermediate_states.py create mode 100644 build/lib/adcgen/intermediates.py create mode 100644 build/lib/adcgen/logger.py create mode 100644 build/lib/adcgen/logger_config.json create mode 100644 build/lib/adcgen/misc.py create mode 100644 build/lib/adcgen/operators.py create mode 100644 build/lib/adcgen/properties.py create mode 100644 build/lib/adcgen/reduce_expr.py create mode 100644 build/lib/adcgen/resolution_of_identity.py create mode 100644 build/lib/adcgen/rules.py create mode 100644 build/lib/adcgen/secular_matrix.py create mode 100644 build/lib/adcgen/simplify.py create mode 100644 build/lib/adcgen/sort_expr.py create mode 100644 build/lib/adcgen/spatial_orbitals.py create mode 100644 build/lib/adcgen/symmetry.py create mode 100644 build/lib/adcgen/sympy_objects.py create mode 100644 build/lib/adcgen/tensor_names.json create mode 100644 build/lib/adcgen/tensor_names.py create mode 100644 tests/reference_data/ri_gs_energy.json create mode 100644 tests/reference_data/spatial_gs_energy.json create mode 100644 tests/resolution_of_identity_test.py diff --git a/adcgen/__init__.py b/adcgen/__init__.py index 3343cae..e62ee16 100644 --- a/adcgen/__init__.py +++ b/adcgen/__init__.py @@ -20,6 +20,7 @@ from .sympy_objects import (AntiSymmetricTensor, SymmetricTensor, Amplitude, NonSymmetricTensor, KroneckerDelta, SymbolicTensor) from .tensor_names import tensor_names +from .resolution_of_identity import apply_resolution_of_identity from . import sort_expr as sort @@ -35,6 +36,7 @@ "Intermediates", "reduce_expr", "factor_intermediates", "sort", "transform_to_spatial_orbitals", + "apply_resolution_of_identity" "apply_cvs_approximation", "generate_code", "optimize_contractions", "unoptimized_contraction", "Contraction", diff --git a/adcgen/expression/object_container.py b/adcgen/expression/object_container.py index 3257e20..0c54abf 100644 --- a/adcgen/expression/object_container.py +++ b/adcgen/expression/object_container.py @@ -463,6 +463,11 @@ def allowed_spin_blocks(self) -> tuple[str, ...] | None: ])) elif name == tensor_names.coulomb: # ERI in chemist notation return ("aaaa", "aabb", "bbaa", "bbbb") + elif name in (tensor_names.ri_sym, tensor_names.ri_asym_eri, + tensor_names.ri_asym_factor): + return ("aaa", "abb") + elif name == tensor_names.fock: + return ("aa", "bb") elif isinstance(obj, KroneckerDelta): # delta # spins have to be equal return ("aa", "bb") @@ -517,6 +522,8 @@ def format_indices(indices) -> str: ), # coulomb integral chemist notation tensor_names.coulomb: lambda up, lo: f"({up}\\vert {lo})", + # 2e3c integral in asymmetric RI + tensor_names.ri_asym_eri: lambda up, lo: f"({up}\\vert {lo})", # orbital energy tensor_names.orb_energy: lambda _, lo: f"\\varepsilon_{{{lo}}}" } diff --git a/adcgen/func.py b/adcgen/func.py index ae8e5ad..ec5319b 100644 --- a/adcgen/func.py +++ b/adcgen/func.py @@ -159,7 +159,10 @@ def import_tensor(tensor: str) -> Expr: # ADC-Amplitude or t-amplitudes if is_adc_amplitude(name) or is_t_amplitude(name): base: Expr = Amplitude(name, upper, lower) - elif name == tensor_names.coulomb: # eri in chemist notation + elif name in (tensor_names.coulomb, tensor_names.ri_sym, + tensor_names.ri_asym_eri, + tensor_names.ri_asym_factor): + # eri in chemist notation or RI tensor base: Expr = SymmetricTensor(name, upper, lower) else: base: Expr = AntiSymmetricTensor(name, upper, lower) diff --git a/adcgen/generate_code/config.json b/adcgen/generate_code/config.json index 676fbe8..23780fa 100644 --- a/adcgen/generate_code/config.json +++ b/adcgen/generate_code/config.json @@ -2,6 +2,7 @@ "sizes": { "core": 5, "occ": 20, - "virt": 200 + "virt": 200, + "ri": 250 } } \ No newline at end of file diff --git a/adcgen/generate_code/contraction.py b/adcgen/generate_code/contraction.py index df9b957..2119288 100644 --- a/adcgen/generate_code/contraction.py +++ b/adcgen/generate_code/contraction.py @@ -23,6 +23,7 @@ class Sizes: occ: int = 0 virt: int = 0 general: int = 0 + ri: int = 0 @staticmethod def from_dict(input: dict[str, int]) -> "Sizes": @@ -232,6 +233,7 @@ class ScalingComponent: virt: int occ: int core: int + ri: int def evaluate_costs(self, sizes: Sizes) -> int: """ diff --git a/adcgen/indices.py b/adcgen/indices.py index a281292..dd1b34e 100644 --- a/adcgen/indices.py +++ b/adcgen/indices.py @@ -45,6 +45,8 @@ def space(self) -> str: return "virt" elif self.assumptions0.get("core"): return "core" + elif self.assumptions0.get("ri"): + return "ri" else: return "general" @@ -83,7 +85,7 @@ class Indices(metaclass=Singleton): # the valid spaces with their corresponding associated index names base = { "occ": "ijklmno", "virt": "abcdefgh", "general": "pqrstuvw", - "core": "IJKLMNO" + "core": "IJKLMNO", "ri": "PQRSTUVWXYZ" } # the valid spins spins = ("", "a", "b") @@ -244,6 +246,8 @@ def _new_symbol(self, name: str, space: str, spin: str) -> Index: assumptions["above_fermi"] = True elif space == "core": assumptions["core"] = True + elif space == "ri": + assumptions["ri"] = True elif space != "general": raise ValueError(f"Invalid space {space}") if spin: @@ -270,7 +274,7 @@ def sort_idx_canonical(idx: Index | Any): # - also add the hash here for wicks, where multiple i are around # - we have to map the spaces onto numbers, since in adcman and adcc # the ordering o < c < v is used for the definition of canonical blocks - space_keys = {"g": 0, "o": 1, "c": 2, "v": 3} + space_keys = {"g": 0, "o": 1, "c": 2, "v": 3, "r": 4} return (space_keys[idx.space[0]], idx.spin, int(idx.name[1:]) if idx.name[1:] else 0, @@ -316,6 +320,8 @@ def generic_indices_from_space(space_str: str) -> list[Index]: assert len(generic_idx) <= 2 # only occ and virt occ = generic_idx.get(("occ", ""), []) occ.extend(generic_idx.get(("virt", ""), [])) + occ.extend(generic_idx.get(("core", ""), [])) + occ.extend(generic_idx.get(("ri", ""), [])) return occ diff --git a/adcgen/resolution_of_identity.py b/adcgen/resolution_of_identity.py new file mode 100644 index 0000000..3f0c4b7 --- /dev/null +++ b/adcgen/resolution_of_identity.py @@ -0,0 +1,77 @@ +from .expression import ExprContainer +from .sympy_objects import SymmetricTensor +from .tensor_names import tensor_names +from .indices import Indices + + +def apply_resolution_of_identity(expr: ExprContainer, + symmetric: bool = True) -> ExprContainer: + """ + Applies the Resolution of Identity approximation (RI, sometimes also + called density fitting, DF) to an expression. This implies that every + spatial ERI is replaced by its factorised form. Two types of factorisation + are supported: symmetric and asymmetric. In the symmetric decomposition, + a spatial ERI is approximated as: + + (pq | rs) ~ B^P_{pq} B^P_{rs} + B^P_{pq} = (P | Q)^{-1/2} (Q | pq) + + This decomposition is the default. In the asymmetric factorisation, the + same spatial ERI is approximated as: + + (pq | rs) ~ C^P_{pq} (P | rs) + C^P_{pq} = (P | Q)^{-1} (Q | pq) + + Note that the RI approximation is only meaningful on spatial ERIs. + Therefore, this routine will crash and exit if the given expression has + not been spin-integrated before. All RI indices receive an alpha spin + by default + + Args: + expr : ExprContainer + The expression to be spin-integrated. + symmetric : bool, optional + If true, the symmetric factorisation variant is employed. + If false, the asymmetric factorisation variant is employed instead. + """ + + resolved_expr = ExprContainer(0, **expr.assumptions) + idx_cls = Indices() + + # We iterate over all terms in the expression and apply RI individually + for term in expr.terms: + # Check if the term is spin-integrated + assert ("n" not in "".join([o.spin for o in term.objects])) + # Check that no antisymmetric ERIs remain + assert (tensor_names.eri not in + ",".join([str(o.name) for o in term.objects])) + + resolved_term = 1 + + for object in term.objects: + # Replace spatial ERIs + if object.name == tensor_names.coulomb: + # Extract indices + lower = object.idx[0:2] + upper = object.idx[2:4] + ri_idx = idx_cls.get_generic_indices(ri_a=1)[("ri", "a")] + + if symmetric: + # v_pqrs = B^P_pq B^P_rs + ri_expr = (SymmetricTensor(tensor_names.ri_sym, + ri_idx, tuple(lower)) + * SymmetricTensor(tensor_names.ri_sym, + ri_idx, tuple(upper))) + else: + # v_pqrs = C^P_pq W^P_rs + ri_expr = (SymmetricTensor(tensor_names.ri_asym_eri, + ri_idx, tuple(upper)) + * SymmetricTensor(tensor_names.ri_asym_factor, + ri_idx, tuple(lower))) + resolved_term *= ri_expr + else: + # Everything else is unaffected by RI + resolved_term *= object + + resolved_expr += resolved_term + return resolved_expr diff --git a/adcgen/simplify.py b/adcgen/simplify.py index 3ac02e7..9196858 100644 --- a/adcgen/simplify.py +++ b/adcgen/simplify.py @@ -60,7 +60,7 @@ def check_term(term: TermContainer) -> bool: # True if all requested tensors are in the term if strict == 'low': return all(t in available for t in set(t_strings)) - # True if all requested Tensors occure the correct amount of times + # True if all requested Tensors occur the correct amount of times elif strict == 'medium': available = Counter(available) desired = Counter(t_strings) diff --git a/adcgen/spatial_orbitals.py b/adcgen/spatial_orbitals.py index 732df87..62881e3 100644 --- a/adcgen/spatial_orbitals.py +++ b/adcgen/spatial_orbitals.py @@ -179,11 +179,19 @@ def integrate_spin(expr: ExprContainer, target_idx: str, addition: list[str | None] = [ None for _ in range(len(term_indices)) ] + # We keep an explicit note whether to keep the block + # This is required because of the internal loop structure + # of the checks + accept_spin_block: bool = True for spin, idx in zip(block, indices): if addition[idx] is not None and addition[idx] != spin: - raise ValueError("Found invalid allowed spin block " - f"{block} for {obj}.") + # This can occur if the same index appears in the same + # object multiple times, e. g. < ij || ij > + accept_spin_block = False + break addition[idx] = spin + if not accept_spin_block: + continue # check for contracdictions with the target_spin and skip the # block if this is the case if any(sp1 != sp2 for sp1, sp2 in diff --git a/adcgen/sympy_objects.py b/adcgen/sympy_objects.py index 0cabeea..abc8162 100644 --- a/adcgen/sympy_objects.py +++ b/adcgen/sympy_objects.py @@ -320,7 +320,7 @@ def eval(cls, i: Expr, j: Expr) -> Expr | None: # type: ignore[override] return S.Zero spi, spj = i.space[0], j.space[0] - valid_spaces = ["o", "v", "g", "c"] + valid_spaces = ["o", "v", "g", "c", "r"] assert spi in valid_spaces and spj in valid_spaces if spi != "g" and spj != "g" and spi != spj: # delta_ov / delta_vo return S.Zero @@ -366,12 +366,14 @@ def preferred_and_killable(self) -> tuple[Index, Index] | None: space2, spin2 = j.space[0], j.spin # ensure we have no unexpected space and spin assert ( - space1 in ["o", "v", "g", "c"] and space2 in ["o", "v", "g", "c"] + space1 in ["o", "v", "g", "c", "r"] + and space2 in ["o", "v", "g", "c", "r"] ) assert spin1 in ["", "a", "b"] and spin2 in ["", "a", "b"] if spin1 == spin2: # nn / aa / bb -> equal spin information - # oo / vv / cc / gg / og / vg / cg + # oo / vv / cc / gg / og / vg / cg / rr + # RI indices will always end up here if space1 == space2 or space2 == "g": return (i, j) else: # go / gv / gc diff --git a/adcgen/tensor_names.json b/adcgen/tensor_names.json index 1f68b32..38dd928 100644 --- a/adcgen/tensor_names.json +++ b/adcgen/tensor_names.json @@ -8,5 +8,8 @@ "left_adc_amplitude": "X", "right_adc_amplitude": "Y", "orb_energy": "e", - "sym_orb_denom": "D" + "sym_orb_denom": "D", + "ri_sym": "B", + "ri_asym_factor": "C", + "ri_asym_eri": "G" } \ No newline at end of file diff --git a/adcgen/tensor_names.py b/adcgen/tensor_names.py index e795724..a5fe4c0 100644 --- a/adcgen/tensor_names.py +++ b/adcgen/tensor_names.py @@ -47,6 +47,9 @@ class TensorNames(metaclass=Singleton): """ eri: str = "V" coulomb: str = "v" + ri_sym: str = "B" + ri_asym_factor: str = "C" + ri_asym_eri: str = "G" fock: str = "f" operator: str = "d" gs_amplitude: str = "t" diff --git a/build/lib/adcgen/__init__.py b/build/lib/adcgen/__init__.py new file mode 100644 index 0000000..e62ee16 --- /dev/null +++ b/build/lib/adcgen/__init__.py @@ -0,0 +1,51 @@ +from .core_valence_separation import apply_cvs_approximation +from .derivative import derivative +from .eri_orbenergy import EriOrbenergy +from .expression import ExprContainer +from .factor_intermediates import factor_intermediates +from .func import import_from_sympy_latex, evaluate_deltas, wicks +from .generate_code import (generate_code, optimize_contractions, Contraction, + unoptimized_contraction) +from .groundstate import GroundState +from .indices import Indices, get_symbols +from .intermediate_states import IntermediateStates +from .intermediates import Intermediates +from .logger import set_log_level, _config_logger +from .operators import Operators +from .properties import Properties +from .reduce_expr import reduce_expr +from .secular_matrix import SecularMatrix +from .simplify import simplify, simplify_unitary, remove_tensor +from .spatial_orbitals import transform_to_spatial_orbitals +from .sympy_objects import (AntiSymmetricTensor, SymmetricTensor, Amplitude, + NonSymmetricTensor, KroneckerDelta, SymbolicTensor) +from .tensor_names import tensor_names +from .resolution_of_identity import apply_resolution_of_identity +from . import sort_expr as sort + + +__all__ = ["AntiSymmetricTensor", "SymmetricTensor", "NonSymmetricTensor", + "Amplitude", "SymbolicTensor", "KroneckerDelta", + "Operators", "GroundState", "IntermediateStates", + "SecularMatrix", "Properties", + "Indices", "get_symbols", + "ExprContainer", "EriOrbenergy", "import_from_sympy_latex", + "evaluate_deltas", "wicks", + "simplify", "simplify_unitary", "remove_tensor", + "derivative", + "Intermediates", "reduce_expr", "factor_intermediates", + "sort", + "transform_to_spatial_orbitals", + "apply_resolution_of_identity" + "apply_cvs_approximation", + "generate_code", "optimize_contractions", + "unoptimized_contraction", "Contraction", + "set_log_level", + "tensor_names"] + +__authors__ = ["Jonas Leitner", "Linus Dittmer"] +__version__ = "0.0.4" + + +# load the logger configuration and apply it +_config_logger() diff --git a/build/lib/adcgen/core_valence_separation.py b/build/lib/adcgen/core_valence_separation.py new file mode 100644 index 0000000..4e60305 --- /dev/null +++ b/build/lib/adcgen/core_valence_separation.py @@ -0,0 +1,421 @@ +from collections.abc import Callable, Sequence +import itertools + +from sympy.physics.secondquant import FermionicOperator +from sympy import S + +from .expression import ( + ExprContainer, TermContainer, ObjectContainer, + NormalOrderedContainer, PolynomContainer +) +from .indices import Index, get_symbols, sort_idx_canonical +from .logger import logger +from .misc import Inputerror +from .sympy_objects import SymbolicTensor, KroneckerDelta +from .tensor_names import tensor_names, is_t_amplitude + + +def apply_cvs_approximation( + expr: ExprContainer, core_indices: str, spin: str | None = None, + cvs_approximation: Callable[[ObjectContainer, str], bool] | None = None + ) -> ExprContainer: + """ + Apply the core-valence approximation to the given expression by + splitting the occupied space into core and valence space. + Furthermore certain ERI/Coulomb blocks are assumed to vanish. + + Parameters + ---------- + expr: Expr + Expression the CVS approximation should be applied to. + core_indices: str + The names of the core target indices to introduce assuming we currently + have occupied target indices with matching names in the expression, + e.g., "IJ" will transform the occupied target indices "ij" to the core + target indices "IJ". + spin: str | None, optional + The spin of the core indices, e.g., "aa" for two core indices with + alpha spin. + cvs_approximation : callable, optional + Callable that takes an ObjectContainer instance and a space string + (e.g. 'covv'). It returns a bool indicating whether the block of the + object described by the space string is valid within the CVS + approximation, i.e., whether the block is neglected or not. + By default, the "is_allowed_cvs_block" function is used, which applies + the CVS approximation as described in 10.1063/1.453424 and as + implemented in adcman/adcc. + """ + # NOTE: Index substitutions have to be performed for all indices + # simultaneously, to avoid creating an intermediate delta_cx that is then + # further substituted to a delta_cc for instance. However, the delta_cx + # will be evalauted to zero upon creation and therefore some terms might + # vanish by accident. + assert isinstance(expr, ExprContainer) + # get the target indices of the expression + terms: tuple[TermContainer, ...] = expr.terms + target_indices: tuple[Index, ...] = terms[0].target + assert all(term.target == target_indices for term in terms) + # build the substitution dict for the occupied target indices + target_subs = build_target_substitutions( + target_indices, core_indices, spin=spin + ) + result = ExprContainer(0, **expr.assumptions) + for term in terms: + result += expand_contracted_indices( + term, target_subs=target_subs, + cvs_approximation=cvs_approximation + ) + # update the set target indices if necessary + if result.provided_target_idx is not None: + result_target = tuple(target_subs.get(s, s) for s in target_indices) + result.set_target_idx(result_target) + return result + + +def build_target_substitutions(target_indices: tuple[Index, ...], + core_indices: str, + spin: str | None = None) -> dict[Index, Index]: + """ + Determines the necessary index substitutions to introduce the desired + core indices as target indices. + + Parameters + ---------- + target_indices: tuple[Index] + The target indices in which to substitute the core indices. + core_indices: str + Names of the core indices to introduce. + spin: str | None, optional + Spin of the core indices to introduce, e.g., "aa" for two core indices + with alpha spin. + """ + core_symbols: list[Index] = get_symbols(core_indices, spin) + # ensure that the provided core indices are valid + if not all(idx.space == "core" for idx in core_symbols): + raise Inputerror(f"The provided core indices {core_symbols} are no " + "valid core indices, i.e., they do not belong to the" + " core space.") + # for each occupied target index build the corresponding core index + occupied_target_indices: tuple[Index, ...] = tuple( + idx for idx in target_indices if idx.space == "occ" + ) + occ_target_as_core = get_core_indices(occupied_target_indices) + # build the substitution dict for the occupied target indices + return {occ: core for occ, core in + zip(occupied_target_indices, occ_target_as_core) + if core in core_symbols} + + +def expand_contracted_indices( + term: TermContainer, target_subs: dict[Index, Index], + cvs_approximation: Callable[[ObjectContainer, str], bool] | None = None + ) -> ExprContainer: + """ + Expands the contracted occupied indices in the given term into core + and valence indices. Note that valence indices are denoted as occupied + in the result. + + Parameters + ---------- + term: TermContainer + Term in which to expand the occupied contracted indices + target_subs: dict[Index, Index] + The substitution dict containing the necessary occ -> core + substitutions for the target indices. Will not be modified in this + function! + cvs_approximation : callable, optional + Callable that takes an ObjectContainer instance and a space string + (e.g. 'covv'). It returns a bool indicating whether the block of the + object described by the space string is valid within the CVS + approximation, i.e., whether the block is neglected or not. + By default, the "is_allowed_cvs_block" function is used, which applies + the CVS approximation as described in 10.1063/1.453424 and as + implemented in adcman/adcc. + """ + if not term.idx: # term is a number -> nothing to do + return ExprContainer(term.inner, **term.assumptions) + + if cvs_approximation is None: + cvs_approximation = is_allowed_cvs_block + # get the contracted occupied indices + # and build the corresponding core indices + contracted: tuple[Index, ...] = term.contracted + occupied_contracted: tuple[Index, ...] = tuple( + idx for idx in contracted if idx.space == "occ" + ) + core_contracted = get_core_indices(occupied_contracted) + result = ExprContainer(0, **term.assumptions) + # go through all variants of valence and core indices + for variant in itertools.product("oc", repeat=len(occupied_contracted)): + # finish the substitution dict + subs = target_subs.copy() + for space, occ, core in \ + zip(variant, occupied_contracted, core_contracted): + if space != "c": + continue + # check for contradictions in the full substitutions dict + if occ in subs and subs[occ] is not core: + raise RuntimeError("Found contradiction in substitution dict. " + f"The occ index {occ} can not be mapped " + f"onto {subs[occ]} and {core} at the " + "same time.") + subs[occ] = core + # go through the objects and check if there is a block that is + # neglected within the CVS approximation + is_valid_variant = True + for obj in term.objects: + cvs_block = "".join( + subs.get(idx, idx).space[0] for idx in obj.idx + ) + if not cvs_approximation(obj, cvs_block): + is_valid_variant = False + break + if not is_valid_variant: # variant generates a neglected block + continue + # apply the substitutions to the term. This has to happen + # simultaneously in order to avoid intermediates delta_cx which + # evaluate to zero. + sub_term = term.subs(subs, simultaneous=True) + result += sub_term + return result + + +def allowed_cvs_blocks( + expr: ExprContainer, target_idx: Sequence[str] | Sequence[Index], + spin: str | None = None, + cvs_approximation: Callable[[ObjectContainer, str], bool] | None = None + ) -> tuple[str, ...]: + """ + Determines all allowed blocks for the given expression + within the CVS approximation by expanding the occupied indices into + core and valence indices. + + Parameters + ---------- + expr: Expr + The expression in which the allowed cvs blocks should be determined. + target_idx: Sequence[str] | Sequence[Index] + The target indices of the expression. + cvs_approximation : callable, optional + Callable that takes an ObjectContainer instance and a space string + (e.g. 'covv'). It returns a bool indicating whether the block of the + object described by the space string is valid within the CVS + approximation, i.e., whether the block is neglected or not. + By default, the "is_allowed_cvs_block" function is used, which applies + the CVS approximation as described in 10.1063/1.453424 and as + implemented in adcman/adcc. + """ + target_symbols: list[Index] = get_symbols(target_idx, spin) + sorted_target: tuple[Index, ...] = tuple( + sorted(target_symbols, key=sort_idx_canonical) + ) + # identify all occupied target indices + # and build the corresponding core indices + occupied_target: list[Index] = [ + idx for idx in target_symbols if idx.space == "occ" + ] + core_target: list[Index] = get_core_indices(occupied_target) + # determine the possible cvs variants (part of the block string) + cvs_variants: tuple[tuple[str, ...], ...] = tuple( + itertools.product("oc", repeat=len(occupied_target)) + ) + cvs_variants_to_check: list[int] = [i for i in range(len(cvs_variants))] + allowed_blocks: list[str] = [] + # go through all terms and check each for the invalid cvs blocks + for term in expr.terms: + if term.target != sorted_target: + raise ValueError(f"Target indices {term.target} of {term} dont " + "match the provided target indices " + f"{target_symbols}") + + variants_to_remove: set[int] = set() + for variant_i in cvs_variants_to_check: + variant = cvs_variants[variant_i] + # build the target index occ -> core substitution dict + target_subs = {occ: core for space, occ, core in + zip(variant, occupied_target, core_target) + if space == "c"} + # expand the occupied contracted indices + sub_term = expand_contracted_indices( + term, target_subs=target_subs, + cvs_approximation=cvs_approximation + ) + # invalid substitutions -> invalid variant + if sub_term.inner is S.Zero: + continue + # build the full block string + variant = list(reversed(variant)) + block = "".join( + idx.space[0] if idx.space != "occ" else variant.pop() + for idx in target_symbols + ) + assert not variant + allowed_blocks.append(block) + variants_to_remove.add(variant_i) + cvs_variants_to_check = [i for i in cvs_variants_to_check + if i not in variants_to_remove] + return tuple(allowed_blocks) + + +def allow_all_cvs_blocks(obj: ObjectContainer, cvs_block: str) -> bool: + _ = obj, cvs_block + return True + + +def is_allowed_cvs_block(obj: ObjectContainer, cvs_block: str) -> bool: + """ + Whether the object is allowed within the CVS approximation. + """ + from .intermediates import Intermediates, RegisteredIntermediate + + if not obj.idx: # prefactor or symbol + return True + # skip Polynoms for now. + # The MP orbital energy denoms should not be important + if isinstance(obj, PolynomContainer): + return True + elif isinstance(obj, NormalOrderedContainer): + return all( + is_allowed_cvs_block(o, b) for o, b in zip(obj.objects, cvs_block) + ) + + sympy_obj = obj.base + if isinstance(sympy_obj, SymbolicTensor): + name = sympy_obj.name + if name == tensor_names.eri: + return is_allowed_cvs_eri_block(cvs_block) + elif name == tensor_names.coulomb: + return is_allowed_cvs_coulomb_block(cvs_block) + elif is_t_amplitude(name): + return is_allowed_cvs_t_amplitude_block(cvs_block) + elif name == tensor_names.fock: + return is_allowed_cvs_fock_block(cvs_block) + elif isinstance(sympy_obj, KroneckerDelta): + return is_allowed_cvs_delta_block(cvs_block) + elif isinstance(sympy_obj, FermionicOperator): + return True + + # check if the obj is a known intermediate + longname = obj.longname(use_default_names=True) + assert longname is not None + itmd = Intermediates().available.get(longname, None) + if itmd is None: + # the object is no intermediate + # assume that all blocks are valid in this case + logger.warning( + f"Could not determine whether {obj} is valid within the CVS " + "approximation." + ) + return True + # the object is a known intermediate: + # expand the intermediate, and determine the allowed spin blocks + assert isinstance(itmd, RegisteredIntermediate) + return cvs_block in itmd.allowed_cvs_blocks(is_allowed_cvs_block) + + +def is_allowed_cvs_coulomb_block(coulomb_block: str) -> bool: + """ + Whether the given Coulomb integral (in chemist notation) block + is allowed within the CVS approximation + """ + # NOTE: according to 10.1063/1.453424 (from 1987) coulomb integrals with + # 1 and 3 core indices vanish. Furthermore, the Coulomb integrals + # , , , + # vanish, i.e., all integrals co/cv vanish. + # However, in a later paper 10.1063/1.1418437 (from 2001) the integrals + # , , , + # = (co|co), (cv|cv), (oc|oc), (vc|vc) + # only vanish when arising from different core-level occupations (DCO), + # i.e., when they appear in matrix blocks that we are neglecting anyway. + # In the current implementation in adcman/adcc those blocks are assumed + # to vanish following the earlier paper. + # The current implementation follows the implementation in adcman/adcc. + assert len(coulomb_block) == 4 + assert "g" not in coulomb_block # no general indices + if "c" in coulomb_block and (coulomb_block[:2].count("c") == 1 or + coulomb_block[2:].count("c") == 1): + return False + return True + + +def is_allowed_cvs_eri_block(eri_block: str) -> bool: + """ + Whether the given anti-symmetric ERI block (in physicist notation) + is allowed within the CVS approximation. + """ + assert len(eri_block) == 4 + assert "g" not in eri_block # no general indices + n_core = eri_block.count("c") + if n_core == 1 or n_core == 3: + return False + # additionally, the blocks ccxx and xxcc are not allowed + # (see comment in is_allowed_cvs_coulomb_block) + elif n_core == 2 and (eri_block[:2] == "cc" or eri_block[2:] == "cc"): + return False + return True + + +def is_allowed_cvs_fock_block(fock_block: str) -> bool: + """ + Whether the given Fock matrix block is allowed within the CVS + approximation. + """ + assert len(fock_block) == 2 + assert "g" not in fock_block # no general indices + if fock_block.count("c") == 1: # f_cx / f_xc + return False + return True # f_cc / f_xx + + +def is_allowed_cvs_t_amplitude_block(amplitude_block: str) -> bool: + """ + Whether the given block of a ground state t-amplitude is valid within + the CVS approximation + """ + # t-amplitudes seem to follow the rule that only the valence space + # has to be considered, i.e., all core orbitals can simply + # be neglected. + # t2_1: oovv t1_2: ov t2_2: oovv t3_2: ooovvv t4_2: oooovvvv + assert not len(amplitude_block) % 2 + assert all(sp == "v" for sp in amplitude_block[len(amplitude_block)//2:]) + if amplitude_block.count("c"): + return False + assert all(sp == "o" for sp in amplitude_block[:len(amplitude_block)//2]) + return True + + +def is_allowed_cvs_delta_block(delta_block: str) -> bool: + """ + Whether the given delta block is allowed within the CVS approximation. + """ + assert len(delta_block) == 2 + assert "g" not in delta_block # no general indices + return delta_block[0] == delta_block[1] + + +def get_core_indices(occupied_indices: Sequence[Index]) -> list[Index]: + """ + Builds core indices for the given occupied indices, i.e., + I for the occupied index i. + """ + assert all(idx.space == "occ" for idx in occupied_indices) + names = [] + spins = [] + for idx in occupied_indices: + names.append(idx.name.upper()) + spins.append(idx.spin) + return get_symbols(names, spins) + + +def get_occ_indices(core_indices: Sequence[Index]) -> list[Index]: + """ + Builds the occupied/valence indices for the given core indices, i.e., + i for the core index I. + """ + assert all(idx.space == "core" for idx in core_indices) + names = [] + spins = [] + for idx in core_indices: + names.append(idx.name.lower()) + spins.append(idx.spin) + return get_symbols(names, spins) diff --git a/build/lib/adcgen/derivative.py b/build/lib/adcgen/derivative.py new file mode 100644 index 0000000..a296ed9 --- /dev/null +++ b/build/lib/adcgen/derivative.py @@ -0,0 +1,103 @@ +from sympy import Mul, Rational, S, diff + +from .expression import ExprContainer, ObjectContainer +from .indices import minimize_tensor_indices, Index +from .sympy_objects import SymbolicTensor + + +def derivative(expr: ExprContainer, t_string: str + ) -> dict[tuple[str, str], ExprContainer]: + """Computes the derivative of an expression with respect to a tensor. + The derivative is separated block whise, e.g, terms that contribute to + the derivative w.r.t. the oooo ERI block are separated from terms that + contribute to the ooov block. + Assumptions of the input expression are NOT updated or modified. + The derivative is NOT simplified.""" + assert isinstance(t_string, str) + assert isinstance(expr, ExprContainer) + expr = expr.expand() + # create some Dummy Symbol. Replace the tensor with the Symbol and + # compute the derivative with respect to the Symbol. Afterwards + # resubstitute the Tensor for the Dummy Symbol. + x = Index('x') + + derivative = {} + for term in expr.terms: + assumptions = term.assumptions + objects = term.objects + # - find all occurences of the desired tensor + tensor_obj: list[ObjectContainer] = [] + remaining_obj = ExprContainer(1, **term.assumptions) + for obj in objects: + if obj.name == t_string: + tensor_obj.append(obj) + else: + remaining_obj *= obj + + # - extract the names of target indices of the term + target_names_by_space: dict[tuple[str, str], set[str]] = {} + for s in term.target: + if (key := s.space_and_spin) not in target_names_by_space: + target_names_by_space[key] = set() + target_names_by_space[key].add(s.name) + + # 2) go through the tensor_obj list and compute the derivative + # for all found occurences one after another (product rule) + for i, obj in enumerate(tensor_obj): + # - extract the exponent of the tensor + exponent = obj.exponent + # - rebuild the term without the current occurence of the + # tensor obj + deriv_contrib = remaining_obj.copy() + for other_i, other_obj in enumerate(tensor_obj): + if i != other_i: + deriv_contrib *= other_obj + # - minimize the indices of the removed tensor + _, perms = minimize_tensor_indices(obj.idx, target_names_by_space) + # - apply the permutations to the remaining term + deriv_contrib = deriv_contrib.permute(*perms) + if deriv_contrib.inner is S.Zero: + raise RuntimeError(f"Mnimization permutations {perms} let " + f"the remaining term {deriv_contrib} " + "vanish.") + # - Apply the permutations to the object. Might introduce + # a prefactor of -1 that we need to move to the deriv_contrib. + # Also the indices might be further minimized due to the + # symmetry of the tensor obj + obj = obj.permute(*perms).terms[0] + if (factor := obj.prefactor) < S.Zero: + deriv_contrib *= factor + # - Apply the symmetry of the removed tensor to the remaining + # term to ensure that the result has the correct symmetry. + # Also replace the removed tensor by a Dummy Variable x. + # This allows to compute the symbolic derivative with diff. + tensor_sym = obj.symmetry() + deriv_contrib *= Rational(1, len(tensor_sym) + 1) + symmetrized_deriv_contrib = S.Zero + symmetrized_deriv_contrib += Mul(deriv_contrib.inner, x**exponent) + for perms, factor in tensor_sym.items(): + symmetrized_deriv_contrib += Mul( + deriv_contrib.copy().permute(*perms).inner, + factor, + x**exponent + ) + # - compute the derivative with respect to x + symmetrized_deriv_contrib = diff(symmetrized_deriv_contrib, x) + # - replace x by the removed tensor (due to diff the exponent + # is lowered by 1) + obj = [ + o for o in obj.objects if isinstance(o.base, SymbolicTensor) + ] + assert len(obj) == 1 + obj = obj[0] + symmetrized_deriv_contrib = ( + symmetrized_deriv_contrib.subs(x, obj.base) + ) + # - sort the derivative according to the space of the minimal + # tensor indices + # -> sort the derivative block whise. + key = (obj.space, obj.spin) + if key not in derivative: + derivative[key] = ExprContainer(0, **assumptions) + derivative[key] += symmetrized_deriv_contrib + return derivative diff --git a/build/lib/adcgen/eri_orbenergy.py b/build/lib/adcgen/eri_orbenergy.py new file mode 100644 index 0000000..c6e8360 --- /dev/null +++ b/build/lib/adcgen/eri_orbenergy.py @@ -0,0 +1,615 @@ +from collections.abc import Sequence +from collections import Counter +from typing import TypeGuard + +from sympy import Add, Basic, Expr, Mul, Pow, Rational, S, nsimplify + +from .expression import ( + ExprContainer, ObjectContainer, PolynomContainer, TermContainer +) +from .logger import logger +from .misc import Inputerror +from .symmetry import Permutation +from .sympy_objects import SymmetricTensor +from .tensor_names import tensor_names + + +class EriOrbenergy: + """ + Splits a single term into an orbital energy fraction, a prefactor and a + remainder. + + Parameters + ---------- + term : TermContainer | ExprContainer + The term to split. + """ + + def __init__(self, term: TermContainer | ExprContainer) -> None: + # ensure the input consists of a single term either as term or expr + if not isinstance(term, TermContainer) or not len(term) == 1: + Inputerror("Expected a single term as input.") + # factor the term to ensure all prefactors are in the numerator + factored: ExprContainer = term.factor() # returns an expr + + # split the term in num, denom and eri + splitted = factored.terms[0].split_orb_energy() + # validate the denominator: has to be of the form: (a+b)(c+d) or (a+b) + self._denom: ExprContainer = splitted['denom'] + self._validate_denom() + + # validate eri: has to consist of a single term + eri = splitted['remainder'] + if len(eri) != 1: + raise Inputerror("Remainder/ERI part should consist of a single " + f"term. Got {eri} from term {splitted}.") + self._eri: TermContainer = eri.terms[0] + + # numerator can essentially be anything: a or a+b + # extract the prefactor with the smallest abs value from the numerator + # NOTE: this is not mandatory. It is also possible to just use + # term.prefactor as pref. Then we might have prefactors < 1 + # in the numerator. Should not be important except for + # canceling the orbital energy fraction. + # But if we keep it like it is, we should have a more clear + # definition of the prefactor (only the sign might be ambiguous + # +0.5 vs -0.5) + self._pref: Expr = min( # type: ignore + [t.prefactor for t in splitted["num"].terms], + key=abs # type: ignore + ) + + # only possiblity to extract 0 should be if the numerator is 0 + if self._pref is S.Zero: + if not splitted['num'].inner.is_number: + raise NotImplementedError(f"Extracted pref {self._pref} from " + "unexpected numerator " + f"{splitted['num']}") + self._num: ExprContainer = splitted['num'] + elif self._pref is S.One: # nothing to factor + self._num: ExprContainer = splitted['num'] + else: + # we can factor a number and remove it afterwards from the term + # the result of the division needs to be converted to rational + # again! + self._num: ExprContainer = factor_and_remove_number( + splitted['num'], self._pref + ) + # ensure that the numerator is what we expect + self._validate_num() + + def __str__(self): + return f"{self.pref} * [{self.num}] / [{self.denom}] * {self.eri}" + + def _validate_denom(self) -> None: + """ + Ensures that the denominator only consists of brackets of the form + (e_a + e_b - ...)(...). + """ + # only objects that contain only e tensors with a single idx can + # occur in the denominator + + if self._denom.inner.is_number: # denom is a number -> has to be 1 + if self._denom.inner is not S.One: + raise Inputerror(f"Invalid denominator {self._denom}") + else: + # check that each bracket consists of terms that each contain + # a single epsilon and possibly a prefactor of -1 + for bracket in self.denom_brackets: + if not isinstance(bracket, (ExprContainer, PolynomContainer)): + raise TypeError(f"Invalid bracket {bracket} in " + f"{self._denom}.") + for term in bracket.terms: + n_orb_energy = 0 + for o in term.objects: + if o.is_orbital_energy and o.exponent == 1: + n_orb_energy += 1 + # denominator has to contain prefactors +- 1 + # prefactors need to be +-1 for cancelling to work + elif o.inner.is_number and o.inner is S.NegativeOne: + continue + else: + raise Inputerror(f"Invalid bracket {bracket} in " + f"{self._denom}.") + if n_orb_energy != 1: + raise Inputerror(f"Invalid bracket {bracket} in " + f"{self._denom}.") + + def _validate_num(self) -> None: + """ + Ensures that the numerator is of the form (e_a + e_b - ...) only + allowing prefactors +-1. + """ + # numerator can only contain terms that consist of e tensors with a + # single index and prefactors + # checking that each term only contains a single tensor with exponent 1 + # ensures that each term only holds a single index + + if self._num.inner.is_number: # is a number -> 1 or 0 possible + if self._num.inner not in [S.One, S.Zero]: + raise Inputerror(f"Invalid numerator {self._num}.") + else: # an expr object (a + b + ...) + for term in self._num.terms: + n_orb_energy = 0 + for o in term.objects: + if o.is_orbital_energy and o.exponent == 1: + n_orb_energy += 1 + elif o.inner.is_number: # any prefactors allowed + continue + else: + raise Inputerror(f"Invalid object {o} in {self._num}.") + if n_orb_energy != 1: + raise Inputerror(f"Invalid term {term} in numerator " + f"{self._num}.") + + @property + def denom(self) -> ExprContainer: + """Returns the denominator of the orbital energy fraction.""" + return self._denom + + @property + def eri(self) -> TermContainer: + """Returns the remainder of the term.""" + return self._eri + + @property + def num(self) -> ExprContainer: + """Returns the numerator of the orbital energy fraction.""" + return self._num + + @property + def pref(self) -> Expr: # sympy rational + """Returns the prefactor of the term.""" + return self._pref + + @property + def denom_brackets(self + ) -> tuple[ExprContainer] | tuple[PolynomContainer, ...]: # noqa E501 + """Returns a tuple containing the brackets of the denominator.""" + if len(self.denom) != 1 or self.denom.inner.is_number: + return (self.denom,) + else: # denom consists of brackets + brackets = self.denom.terms[0].objects + assert _is_polynom_tuple(brackets) + return brackets + + def copy(self) -> "EriOrbenergy": + return EriOrbenergy(self.expr) + + @property + def expr(self) -> ExprContainer: + """Rebuild the original term.""" + return self.num * self.eri / self.denom * self.pref + + def denom_description(self) -> str | None: + """ + Returns a string that describes the denominator containing the + number of brackets, as well as the length and exponent of each + bracket. + """ + if self.denom.inner.is_number: + return None + + brackets = self.denom_brackets + bracket_data = [] + for bk in brackets: + exponent = S.One if isinstance(bk, ExprContainer) else bk.exponent + assert isinstance(exponent, Expr) + bracket_data.append(f"{len(bk)}-{exponent}") + # reverse sorting -> longest braket will be listed first + bracket_data = "_".join(sorted(bracket_data, reverse=True)) + return f"{len(brackets)}_{bracket_data}" + + def cancel_denom_brackets(self, braket_idx_list: Sequence[int] + ) -> ExprContainer: + """ + Cancels brackets by their index in the denominator lowering the + exponent by 1 or removing the bracket completely if an exponent + of 0 is reached. If an index is listed n times the exponent + will be lowered by n. + The original denominator is not modified. + """ + denom: list[ExprContainer | PolynomContainer | None | Expr] = list( + self.denom_brackets + ) + for idx, n in Counter(braket_idx_list).items(): + braket = denom[idx] + assert braket is not None and not isinstance(braket, Expr) + if isinstance(braket, ExprContainer): + exponent = S.One + base = braket.inner + else: + base, exponent = braket.base_and_exponent + assert exponent.is_Integer + if (new_exp := int(exponent) - n) == 0: + denom[idx] = None + else: + denom[idx] = Pow(base, new_exp) + new_denom = Mul(*( + bk if isinstance(bk, Expr) else bk.inner for bk in denom + if bk is not None + )) + return ExprContainer(new_denom, **self.denom.assumptions) + + def cancel_eri_objects(self, obj_idx_list: Sequence[int]) -> ExprContainer: + """ + Cancels objects in the remainder (eri) part according to their index + lowering their exponent by 1 for each time the objects index is + provided. If a final exponent of 0 is reached, the object is removed + from the remainder entirely. + The original remainder is not changed. + """ + objects: list[ObjectContainer | None | Expr] = list(self.eri.objects) + for idx, n in Counter(obj_idx_list).items(): + obj = objects[idx] + assert obj is not None and not isinstance(obj, Expr) + base, exponent = obj.base_and_exponent + assert exponent.is_Integer + if (new_exp := int(exponent) - n) == 0: + objects[idx] = None + else: + objects[idx] = Pow(base, new_exp) + new_eri = Mul(*( + obj if isinstance(obj, Basic) else obj.inner for obj in objects + if obj is not None + )) + return ExprContainer(new_eri, **self.eri.assumptions) + + def denom_eri_sym(self, + eri_sym: dict[tuple[Permutation, ...], int] | None = None, # noqa E501 + **kwargs) -> dict[tuple[Permutation, ...], int]: + """ + Apply the symmetry of the remainder (eri) part to the denominator + identifying the common symmetry of both parts of the term. + + Parameters + ---------- + eri_sym : dict, optional + The symmetry of the remainder (eri) part of the term. + If not provided it will be determined on the fly. + **kwargs : dict, optional + Additional arguments that are forwarded to the 'Term.symmetry' + method to determine the symmetry of the remainder on the fly. + """ + # if the denominator is a number -> just return symmetry of eri part + if self.denom.inner.is_number: + return self.eri.symmetry(**kwargs) if eri_sym is None else eri_sym + + if eri_sym is None: + # if the eri part is just a number all possible permutations of the + # denom would be required with their symmetry + if not self.eri.idx: + raise NotImplementedError("Symmetry of an expr (the " + "denominator) not implemented") + eri_sym = self.eri.symmetry(**kwargs) + + ret = {} + denom = self.denom.inner + for perms, factor in eri_sym.items(): + perm_denom = self.denom.copy().permute(*perms).inner + # permutations are not valid for the denominator + if perm_denom is S.Zero and denom is not S.Zero: + continue + + if Add(denom, -perm_denom) is S.Zero: + ret[perms] = factor # P_pq Denom = Denom -> +1 + elif Add(denom, perm_denom) is S.Zero: + ret[perms] = factor * -1 # P_pq Denom = -Denom -> -1 + else: # permutation changes the denominator + ret[perms] = None + return ret + + def permute_num(self, + eri_sym: dict[tuple[Permutation, ...], int] | None = None + ) -> "EriOrbenergy": + """ + Symmetrize the orbital energy numerator by applying the common symmetry + of the remainder (eri) part and the orbital energy denominator + - only considering contracted indices! - to the numerator keeping the + result normalized. + For instance, a numerator (e_i - e_a) may be expanded to + 1/2 (e_i + e_j - e_a - e_b) by applying the permutation P_{ij}P_{ab}. + The new prefactor is automatically extracted from the + new numerator and added to the existing prefactor. + The class instance is modified in place. + """ + # if the numerator is a number no permutation will do anything useful + if self.num.inner.is_number: + return self + # apply all permutations to the numerator that satisfy + # P_pq ERI = a * ERI and P_pq Denom = b * Denom + # with a, b in [-1, +1] and a*b = 1 + permutations = [ + (perms, factor) for perms, factor in + self.denom_eri_sym(eri_sym=eri_sym, only_contracted=True).items() + if factor is not None + ] + num = self.num.copy() + for perms, factor in permutations: + num += self.num.copy().permute(*perms) * factor + num = num * Rational(1, len(permutations) + 1) + assert isinstance(num, ExprContainer) + num.expand() + # this possibly introduced prefactors in the numerator again + # -> extract the smallest prefactor and shift to self.pref + additional_pref = min( # type: ignore + [t.prefactor for t in num.terms], key=abs # type: ignore + ) + self._pref *= additional_pref + if additional_pref is S.Zero: # permuted num = 0 + if not num.inner.is_number: + raise ValueError("Only expected to obtain 0 as pref" + "from a 0 numerator. Got " + f"{additional_pref} from {num}.") + self._num = num + elif additional_pref is S.One: # nothing to factor + self._num = num + else: + self._num = factor_and_remove_number(num, additional_pref) + self._validate_num() + return self + + def canonicalize_sign(self, only_denom: bool = False) -> "EriOrbenergy": + """ + Adjusts the sign of orbital energies in the numerator and denominator: + virtual orbital energies are subtracted, while occupied orbital + energies are added. The possible factor of -1 is extracted to the + prefactor. + Modifies the class instance in place. + + Parameters + ---------- + only_denom : bool, optional + If set, only the signs in the denominator will be adjusted + (default: False). + """ + + def adjust_sign(expr: ExprContainer | PolynomContainer) -> bool: + # function that extracts the sign of the occupied and virtual + # indices in a term. + + signs = {} + for term in expr.terms: + idx = term.idx + if len(idx) != 1: + raise RuntimeError("Expected a bracket to consist of " + "epsilons that each hold a single index" + f". Found: {term} in {expr}.") + ov = idx[0].space[0] + if ov not in signs: + signs[ov] = [] + signs[ov].append(term.sign) + + # map that connects sign and space + desired_sign = {"o": "plus", "v": "minus"} + + # adjust sign if necessary + change_sign = [] + for ov, sign in signs.items(): + # first check that all o/v terms have the same sign + if not all(pm == sign[0] for pm in sign): + raise RuntimeError(f"Ambiguous signs of the {ov} indices " + f"in {expr} in\n{self}") + if ov not in desired_sign: + raise NotImplementedError("No desired sign defined for " + "orbital energies of the space " + f"{ov}.") + if sign[0] != desired_sign[ov]: + change_sign.append(True) + if change_sign: + if len(change_sign) != len(signs): + raise RuntimeError(f"Apparently not all {signs.keys()} " + "spaces require a sign change in " + f"{expr}.") + return True + else: + return False + + # numerator + if not only_denom and not self.num.inner.is_number and \ + adjust_sign(self.num): + self._pref *= S.NegativeOne + self._num *= S.NegativeOne + assert isinstance(self._pref, Expr) + assert isinstance(self._num, ExprContainer) + + # denominator + if not self.denom.inner.is_number: + denom = S.One + for bracket in self.denom_brackets: + if adjust_sign(bracket): + if isinstance(bracket, ExprContainer): + exponent = S.One + base = bracket.inner + else: + base, exponent = bracket.base_and_exponent + assert exponent.is_Integer + if int(exponent) % 2: + self._pref *= S.NegativeOne + bracket = ExprContainer( + Pow(S.NegativeOne*base, exponent), + **bracket.assumptions + ) + denom *= bracket + assert isinstance(denom, ExprContainer) + self._denom = denom + return self + + def cancel_orb_energy_frac(self) -> ExprContainer: + """ + Cancel the orbital energy fraction. Thereby, long denominator brackets + or brackets with rare indices are priorized. + """ + def multiply(expr_list: list[ExprContainer | PolynomContainer] + ) -> Expr | ExprContainer: + res = S.One + assert isinstance(res, Expr) + for term in expr_list: + res *= term + return res + + def cancel(num: ExprContainer, + denom: list[ExprContainer | PolynomContainer], + pref: Expr) -> ExprContainer: + num = num.copy() # avoid in place modification + cancelled_result = None + for bracket_i, bracket in enumerate(denom): + bracket_indices = bracket.idx + + # get the prefactors of all orbital energies that occur in the + # bracket that we currently want to remove + relevant_prefs = [term.prefactor for term in num.terms + if term.idx[0] in bracket_indices] + # do all indices that occur in the bracket also occur in the + # numerator? + if len(relevant_prefs) != len(bracket_indices): + continue + + # find the smallest relevant prefactor and factor the prefactor + # -> this ensures that at least one of the relevant orbital + # energies has a prefactor of 1 + # -> at least 1 of the orbital energies will not be present + # in the new numerator + # -> can only cancel each bracket at most 1 time + # -> no need to recurse just iterate through the list + min_pref = min(relevant_prefs, key=abs) # type: ignore + # the sign in the numerator has been fixed before entering this + # function -> dont change it! + if min_pref < 0: + min_pref *= -1 + + if min_pref is not S.One: + pref *= min_pref + num = factor_and_remove_number(num, min_pref) + + # all orbital energies that also occur in the bracket now + # have at least a prefactor of 1 + # others might have a pref < 1 + # construct the new numerator by subtracting the content + # of the bracket from the numerator. This works, because + # - all relevant orbital energies in the numerator have a + # prefactor of at least 1 and the signs in the numerator and + # in the bracket match + # - all orbital energies in the numerator have an exponent + # of 1 + if isinstance(bracket, ExprContainer): + exponent = S.One + assert isinstance(exponent, Expr) + base = bracket.inner + else: # polynom + base, exponent = bracket.base_and_exponent + logger.info(f"Cancelling: {ExprContainer(base)}") + num -= base + # build the new denominator -> lower bracket exponent by 1 + if exponent == 1: + new_denom = denom[:bracket_i] + denom[bracket_i+1:] + else: + new_denom = denom[:] + new_denom[bracket_i] = ExprContainer( + Pow(base, exponent-S.One), **bracket.assumptions + ) + # result <- 1/new_denom + new_num/denom + if cancelled_result is None: + cancelled_result = S.Zero + cancelled_result += pref * self.eri / multiply(new_denom) + # check if we have something left to cancel + if num.inner.is_number: + if num.inner is not S.Zero: + cancelled_result += \ + pref * self.eri * num / multiply(denom) + break + # return just the term if it was not possible to successfully + # cancel any bracket + assert (cancelled_result is None or + isinstance(cancelled_result, ExprContainer)) + return self.expr if cancelled_result is None else cancelled_result + + # fix the sign of the orbital energies in numerator and denominator: + # occupied orb energies are added, while virtual ones are subtracted + self.canonicalize_sign() + + # do we have something to do? + if self.num.inner.is_number or self.denom.inner.is_number: + return self.expr + + # sort the brackets in the denominator: + # - length of the braket: tiples > doubles + # - rarity of the contained indices: prioritize brackets with target + # indices + denom_indices = Counter(self.denom.idx) + + def bracket_sort_key(bracket: ExprContainer | PolynomContainer): + bracket_indices = bracket.idx + rarest_idx = min(bracket_indices, key=lambda s: denom_indices[s]) + return (-len(bracket), + denom_indices[rarest_idx], + sum(denom_indices[s] for s in bracket_indices)) + denom = sorted(self.denom_brackets, key=bracket_sort_key) + + return cancel(self.num, denom, self.pref) + + def symbolic_denominator(self) -> ExprContainer: + """ + Replaces the explicit orbital energy denominator with a tensor + of the correct symmetry (a SymmetricTensor with bra-ket antisymmetry): + (e_i + e_j - e_a - e_b) -> D^{ij}_{ab}. + """ + if self.denom.inner.is_number: # denom is a number -> nothing to do + return self.denom + + symbolic_denom = ExprContainer(1, **self.denom.assumptions) + has_symbolic_denom = False + for bracket in self.denom_brackets: + signs = {'-': set(), '+': set()} + for term in bracket.terms: + idx = term.idx + if len(idx) != 1: + raise RuntimeError("Expected a denominator bracket to " + "consists of orbital energies that each" + " hold a single index. " + f"Found: {term} in {bracket}.") + pref = term.prefactor + if pref is S.One: + signs['+'].add(idx[0]) + elif pref is S.NegativeOne: + signs['-'].add(idx[0]) + else: + raise RuntimeError(f"Found invalid prefactor {pref} in " + f"denominator bracket {bracket}.") + if signs['+'] & signs['-']: + raise RuntimeError(f"Found index that is added and " + f"subtracted in a denominator: {bracket}.") + has_symbolic_denom = True + exponent = ( + S.One if isinstance(bracket, ExprContainer) + else bracket.exponent + ) + symbolic_denom *= Pow(SymmetricTensor( + tensor_names.sym_orb_denom, tuple(signs['+']), + tuple(signs['-']), -1 + ), exponent) + if has_symbolic_denom: + symbolic_denom.antisym_tensors = ( + symbolic_denom.antisym_tensors + (tensor_names.sym_orb_denom,) + ) + return symbolic_denom + + +def factor_and_remove_number(expr: ExprContainer, number) -> ExprContainer: + """ + Factors the given number in the expression and removes it afterwards by + dividing through the number. The operations are performed in place! + """ + expr.factor(num=number) + expr /= number + expr.doit() + expr._inner = nsimplify(expr.inner, rational=True) + return expr + + +####################### +# Usefull type guards # +####################### +def _is_polynom_tuple(sequence: tuple + ) -> TypeGuard[tuple[PolynomContainer, ...]]: + return all(isinstance(item, PolynomContainer) for item in sequence) diff --git a/build/lib/adcgen/expression/__init__.py b/build/lib/adcgen/expression/__init__.py new file mode 100644 index 0000000..d88cab0 --- /dev/null +++ b/build/lib/adcgen/expression/__init__.py @@ -0,0 +1,12 @@ +from .expr_container import ExprContainer +from .normal_ordered_container import NormalOrderedContainer +from .object_container import ObjectContainer +from .polynom_container import PolynomContainer +from .term_container import TermContainer + + +__all__ = [ + "ExprContainer", "NormalOrderedContainer", + "PolynomContainer", + "ObjectContainer", "TermContainer" +] diff --git a/build/lib/adcgen/expression/container.py b/build/lib/adcgen/expression/container.py new file mode 100644 index 0000000..37f0c67 --- /dev/null +++ b/build/lib/adcgen/expression/container.py @@ -0,0 +1,228 @@ +from collections.abc import Iterable +from typing import Any, TYPE_CHECKING + +from sympy import Expr, latex, sympify + +from ..indices import Index, order_substitutions + +# imports only required for type checking (avoid circular imports) +if TYPE_CHECKING: + from .expr_container import ExprContainer + + +class Container: + """ + Base class for all container classes that wrap a native + sympy object. + + Parameters + ---------- + inner: Expr | Container | Any + The algebraic expression to wrap, e.g., a sympy.Add or sympy.Mul object + real : bool, optional + Whether the expression is represented in a real orbital basis. + sym_tensors: Iterable[str] | None, optional + Names of tensors with bra-ket-symmetry, i.e., + d^{pq}_{rs} = d^{rs}_{pq}. Adjusts the corresponding tensors to + correctly represent this additional symmetry if they are not aware + of it yet. + antisym_tensors: Iterable[str] | None, optional + Names of tensors with bra-ket-antisymmetry, i.e., + d^{pq}_{rs} = - d^{rs}_{pq}. Adjusts the corresponding tensors to + correctly represent this additional antisymmetry if they are not + aware of it yet. + target_idx: Iterable[Index] | None, optional + Target indices of the expression. By default the Einstein sum + convention will be used to identify target and contracted indices, + which is not always sufficient. + """ + + def __init__(self, inner: "Expr | Container | Any", + real: bool = False, + sym_tensors: Iterable[str] = tuple(), + antisym_tensors: Iterable[str] = tuple(), + target_idx: Iterable[Index] | None = None) -> None: + # possibly extract or import the expression to wrap + if isinstance(inner, Container): + inner = inner.inner + if not isinstance(inner, Expr): + inner = sympify(inner) + assert isinstance(inner, Expr) + self._inner: Expr = inner + # set the assumptions + self._real: bool = real + + if isinstance(sym_tensors, str): + sym_tensors = (sym_tensors,) + elif not isinstance(sym_tensors, tuple): + sym_tensors = tuple(sym_tensors) + self._sym_tensors: tuple[str, ...] = sym_tensors + + if isinstance(antisym_tensors, str): + antisym_tensors = (antisym_tensors,) + elif not isinstance(antisym_tensors, tuple): + antisym_tensors = tuple(antisym_tensors) + self._antisym_tensors: tuple[str, ...] = antisym_tensors + if target_idx is not None and not isinstance(target_idx, tuple): + target_idx = tuple(target_idx) + self._target_idx: tuple[Index, ...] | None = target_idx + + def __str__(self) -> str: + return latex(self.inner) + + @property + def assumptions(self) -> dict[str, Any]: + return { + "real": self.real, + "sym_tensors": self.sym_tensors, + "antisym_tensors": self.antisym_tensors, + "target_idx": self.provided_target_idx, + } + + @property + def real(self) -> bool: + return self._real + + @property + def sym_tensors(self) -> tuple[str, ...]: + return self._sym_tensors + + @property + def antisym_tensors(self) -> tuple[str, ...]: + return self._antisym_tensors + + @property + def provided_target_idx(self) -> tuple[Index, ...] | None: + return self._target_idx + + @property + def inner(self) -> Expr: + return self._inner + + def permute(self, *perms: tuple[Index, Index]) -> "ExprContainer": + """ + Permute indices by applying permutation operators P_pq. + + Parameters + ---------- + *perms : tuple[Index, Index] + Permutations to apply to the wrapped object. Permutations are + applied one after another in the order they are provided. + """ + sub = {} + for p, q in perms: + addition = {p: q, q: p} + for old, new in sub.items(): + if new is p: + sub[old] = q + del addition[p] + elif new is q: + sub[old] = p + del addition[q] + if addition: + sub.update(addition) + return self.subs(order_substitutions(sub)) + + ################################ + # Forwards some calls to inner # + ################################ + def expand(self) -> "ExprContainer": + """ + Forwards the expand call to inner and wraps the result in a new + Container + """ + from .expr_container import ExprContainer + return ExprContainer(inner=self.inner.expand(), **self.assumptions) + + def doit(self, *args, **kwargs) -> "ExprContainer": + """ + Forwards the doit call to inner and wraps the result in a new Container + """ + from .expr_container import ExprContainer + return ExprContainer( + inner=self.inner.doit(*args, **kwargs), **self.assumptions + ) + + def subs(self, *args, **kwargs) -> "ExprContainer": + """ + Forwards the subs call to inner and wraps the result in a new Container + """ + from .expr_container import ExprContainer + return ExprContainer( + inner=self.inner.subs(*args, **kwargs), **self.assumptions + ) + + ############# + # Operators # + ############# + def __add__(self, other: Any) -> "ExprContainer": + from .expr_container import ExprContainer + + if isinstance(other, Container): + if self.assumptions != other.assumptions: + raise TypeError("Assumptions need to be equal. Got: " + f"{self.assumptions} and {other.assumptions}") + other = other.inner + return ExprContainer(self.inner + other, **self.assumptions) + + def __iadd__(self, other: Any) -> "ExprContainer": + return self.__add__(other) + + def __radd__(self, other: Any) -> "ExprContainer": + from .expr_container import ExprContainer + # other: some sympy stuff or some number + return ExprContainer(other + self.inner, **self.assumptions) + + def __sub__(self, other: Any) -> "ExprContainer": + from .expr_container import ExprContainer + + if isinstance(other, Container): + if self.assumptions != other.assumptions: + raise TypeError("Assumptions need to be equal. Got: " + f"{self.assumptions} and {other.assumptions}") + other = other.inner + return ExprContainer(self.inner - other, **self.assumptions) + + def __isub__(self, other: Any) -> "ExprContainer": + return self.__sub__(other) + + def __rsub__(self, other: Any) -> "ExprContainer": + from .expr_container import ExprContainer + # other: some sympy stuff or some number + return ExprContainer(other - self.inner, **self.assumptions) + + def __mul__(self, other: Any) -> "ExprContainer": + from .expr_container import ExprContainer + + if isinstance(other, Container): + if self.assumptions != other.assumptions: + raise TypeError("Assumptions need to be equal. Got: " + f"{self.assumptions} and {other.assumptions}") + other = other.inner + return ExprContainer(self.inner * other, **self.assumptions) + + def __imul__(self, other: Any) -> "ExprContainer": + return self.__mul__(other) + + def __rmul__(self, other: Any) -> "ExprContainer": + from .expr_container import ExprContainer + # other: some sympy stuff or some number + return ExprContainer(other * self.inner, **self.assumptions) + + def __truediv__(self, other: Any) -> "ExprContainer": + from .expr_container import ExprContainer + + if isinstance(other, Container): + if self.assumptions != other.assumptions: + raise TypeError("Assumptions need to be equal. Got: " + f"{self.assumptions} and {other.assumptions}") + other = other.inner + return ExprContainer(self.inner / other, **self.assumptions) + + def __itruediv__(self, other: Any) -> "ExprContainer": + return self.__truediv__(other) + + def __rtruediv__(self, other: Any) -> "ExprContainer": + from .expr_container import ExprContainer + # other: some sympy stuff or some number + return ExprContainer(other / self.inner, **self.assumptions) diff --git a/build/lib/adcgen/expression/expr_container.py b/build/lib/adcgen/expression/expr_container.py new file mode 100644 index 0000000..29504e9 --- /dev/null +++ b/build/lib/adcgen/expression/expr_container.py @@ -0,0 +1,555 @@ +from collections.abc import Iterable, Sequence +from typing import Any + +from sympy import Add, Basic, Expr, Mul, Pow, S, Symbol, factor, nsimplify + +from ..indices import ( + Index, get_symbols, sort_idx_canonical, + _is_str_sequence, _is_index_sequence +) +from ..tensor_names import tensor_names +from .container import Container +from .term_container import TermContainer + + +class ExprContainer(Container): + """ + Wraps an arbitrary algebraic expression. + + Parameters + ---------- + inner: + The algebraic expression to wrap, e.g., a sympy.Add or sympy.Mul object + real : bool, optional + Whether the expression is represented in a real orbital basis. + sym_tensors: Iterable[str] | None, optional + Names of tensors with bra-ket-symmetry, i.e., + d^{pq}_{rs} = d^{rs}_{pq}. Adjusts the corresponding tensors to + correctly represent this additional symmetry if they are not aware + of it yet. + antisym_tensors: Iterable[str] | None, optional + Names of tensors with bra-ket-antisymmetry, i.e., + d^{pq}_{rs} = - d^{rs}_{pq}. Adjusts the corresponding tensors to + correctly represent this additional antisymmetry if they are not + aware of it yet. + target_idx: Iterable[Index] | None, optional + Target indices of the expression. By default the Einstein sum + convention will be used to identify target and contracted indices, + which is not always sufficient. + """ + def __init__(self, inner: Expr | Container | Any, + real: bool = False, + sym_tensors: Iterable[str] = tuple(), + antisym_tensors: Iterable[str] = tuple(), + target_idx: Iterable[Index] | Iterable[str] | None = None + ) -> None: + # import target index strings + if target_idx is not None: + if isinstance(target_idx, str) or isinstance(target_idx, Sequence): + target_idx = get_symbols(target_idx) + else: + target_tpl = tuple(target_idx) + assert (_is_str_sequence(target_tpl) or + _is_index_sequence(target_tpl)) + target_idx = get_symbols(target_tpl) + del target_tpl + # set the class attributes and import the inner expression + super().__init__( + inner=inner, real=real, sym_tensors=sym_tensors, + antisym_tensors=antisym_tensors, target_idx=target_idx + ) + # ensure that sym_tensor and antisym_tensor are immutable tuples and + # remove duplicates + self._sym_tensors = tuple(sorted(set(self._sym_tensors))) + self._antisym_tensors = tuple(sorted(set(self._antisym_tensors))) + # Now apply the given assumptions: this only happens in this class + # store target indices as sorted tuple + if self._target_idx is not None: + self.set_target_idx(self._target_idx) + # applying the tensor symmetry has a certain overlap with + # make_real: make_real will try to add ERI and Fock matrix + # to sym_tensor and apply the tensor symmetry (but only + # if the tensors were not already marked as symmetric). + # Therefore, it makes sense to manually add them here + # to avoid applying the tensor symmetry twice. + if self._sym_tensors or self._antisym_tensors: + if self._real: + self._sym_tensors = tuple(sorted(set( + self._sym_tensors + (tensor_names.fock, tensor_names.eri) + ))) + self._apply_tensor_braket_sym() + if self._real: + self.make_real(force=True) + + def __len__(self) -> int: + # ExprContainer(0) also has length 1! + if isinstance(self._inner, Add): + return len(self._inner.args) + else: + return 1 + + def copy(self) -> "ExprContainer": + """ + Creates a new container with the same expression and assumptions. + The wrapped expression will not be copied. + """ + return ExprContainer(self.inner, **self.assumptions) + + @property + def terms(self) -> tuple[TermContainer, ...]: + """ + Returns all terms of the expression, where a term might be a single + tensor 'a' or a product of the form 'a * b * c'. + """ + kwargs = self.assumptions + if isinstance(self._inner, Add): + return tuple( + TermContainer(inner=term, **kwargs) + for term in self._inner.args + ) + else: + return (TermContainer(inner=self._inner, **kwargs),) + + @property + def idx(self) -> tuple[Index, ...]: + """ + Returns all indices that occur in the expression. Indices that occur + multiple times will be listed multiple times. + """ + idx = [s for t in self.terms for s in t.idx] + return tuple(sorted(idx, key=sort_idx_canonical)) + + ############################### + # setters for the assumptions # + ############################### + def set_target_idx(self, target_idx: Sequence[str] | Sequence[Index] | None + ) -> None: + """ + Set the target indices of the expression. Only necessary if the + Einstein sum contension is not sufficient to determine them + automatically. + """ + if target_idx is None: + self._target_idx = target_idx + else: # import the indices + self._target_idx = tuple( + sorted(set(get_symbols(target_idx)), key=sort_idx_canonical) + ) + + @Container.sym_tensors.setter + def sym_tensors(self, tensors: Iterable[str]) -> None: + """ + Add bra-ket-symmetry to tensors according to their name. + Note that it is only formally possible to remove tensors from + sym_tensors, because the original state of a tensor is lost when the + bra-ket-symmetry is applied, i.e., after bra-ket-symmetry was added to + a tensor d^{p}_{q} it is not knwon whether it's original state was + d^{q}_{p} or d^{p}_{q}. + """ + if isinstance(tensors, str): + tensors = {tensors, } + else: + assert all(isinstance(t, str) for t in tensors) + tensors = set(tensors) + + if self.real: + tensors.update([tensor_names.fock, tensor_names.eri]) + tensors = tuple(sorted(tensors)) + if tensors != self._sym_tensors: + self._sym_tensors = tensors + self._apply_tensor_braket_sym() + + @Container.antisym_tensors.setter + def antisym_tensors(self, tensors: Iterable[str]) -> None: + """ + Add bra-ket-antisymmetry to tensors according to their name. + Note that it is only formally possible to remove tensors from + sym_tensors, because the original state of a tensor is lost when the + bra-ket-symmetry is applied, i.e., after bra-ket-antisymmetry was + added to a tensor d^{p}_{q} it is not knwon whether it's original + state was d^{q}_{p} or d^{p}_{q}. + """ + if isinstance(tensors, str): + tensors = (tensors,) + else: + assert all(isinstance(t, str) for t in tensors) + tensors = tuple(sorted(set(tensors))) + + if tensors != self._antisym_tensors: + self._antisym_tensors = tensors + self._apply_tensor_braket_sym() + + ################################################# + # methods that modify the expression (in place) # + ################################################# + def _apply_tensor_braket_sym(self) -> "ExprContainer": + """ + Adds the bra-ket symmetry and antisymmetry defined in + sym_tensors and antisym_tensors to the tensor objects + in the expression. + """ + if self.inner.is_number: + return self + # actually do something + res = S.Zero + for term in self.terms: + res += term._apply_tensor_braket_sym(wrap_result=False) + assert isinstance(res, Expr) + self._inner = res + return self + + def make_real(self, force: bool = False) -> "ExprContainer": + """ + Represent the expression in a real orbital basis. + - names of complex conjugate t-amplitudes, for instance t1cc -> t1 + - adds bra-ket-symmetry to the fock matrix and the ERI. + + Parameters + ---------- + force: bool, optional + If set the function will also run also if 'real' is already set. + (default: False) + """ + if (self.real and not force): + return self + # actually so something: first adjust the tensor symmetry + self._real = True + sym_tensors = self._sym_tensors + if tensor_names.fock not in sym_tensors or \ + tensor_names.eri not in sym_tensors: + self._sym_tensors = tuple(sorted(set( + sym_tensors + (tensor_names.fock, tensor_names.eri) + ))) + self._apply_tensor_braket_sym() + if self.inner.is_number: + return self + # and then adjust the tensor names + res = S.Zero + for term in self.terms: + res += term.make_real(wrap_result=False) + assert isinstance(res, Expr) + self._inner = res + return self + + def block_diagonalize_fock(self) -> "ExprContainer": + """ + Block diagonalize the Fock matrix, i.e. all terms that contain off + diagonal Fock matrix blocks (f_ov/f_vo) are set to 0. + """ + self.expand() + res = S.Zero + for term in self.terms: + res += term.block_diagonalize_fock(wrap_result=False) + assert isinstance(res, Expr) + self._inner = res + return self + + def diagonalize_fock(self) -> "ExprContainer": + """ + Represent the expression in the canonical orbital basis, where the + fock matrix is diagonal. Because it is not possible to + determine the target indices in the resulting expression according + to the Einstein sum convention, the current target indices will + be set manually in the resulting expression. + """ + # expand to get rid of polynoms as much as possible + self.expand() + diag = S.Zero + for term in self.terms: + contrib = term.diagonalize_fock(wrap_result=True) + assert isinstance(contrib, ExprContainer) + diag += contrib + assert isinstance(diag, ExprContainer) + self._inner = diag.inner + self._target_idx = diag.provided_target_idx + return self + + def rename_tensor(self, current: str, new: str) -> 'ExprContainer': + """Changes the name of a tensor from current to new.""" + assert isinstance(current, str) and isinstance(new, str) + + renamed = S.Zero + for term in self.terms: + renamed += term.rename_tensor(current, new, wrap_result=False) + assert isinstance(renamed, Expr) + self._inner = renamed + return self + + def expand_antisym_eri(self) -> 'ExprContainer': + """ + Expands the antisymmetric ERI using chemists notation + = (pr|qs) - (ps|qr). + ERI's in chemists notation are by default denoted as 'v'. + Currently this only works for real orbitals, i.e., + for symmetric ERI's = .""" + res = S.Zero + for term in self.terms: + res += term.expand_antisym_eri(wrap_result=False) + assert isinstance(res, Expr) + self._inner = res + # only update the assumptions if there was an eri to expand + if Symbol(tensor_names.coulomb) in self.inner.atoms(Symbol): + self._sym_tensors = tuple(sorted(set( + self._sym_tensors + (tensor_names.coulomb,) + ))) + return self + + def use_explicit_denominators(self) -> 'ExprContainer': + """ + Switch to an explicit representation of orbital energy denominators by + replacing all symbolic denominators by their explicit counter part, + i.e., D^{ij}_{ab} -> (e_i + e_j - e_a - e_b)^{-1}. + """ + res = S.Zero + for term in self.terms: + res += term.use_explicit_denominators(wrap_result=False) + assert isinstance(res, Expr) + self._inner = res + # remove the symbolic denom from the assumptions if necessary + if tensor_names.sym_orb_denom in self._antisym_tensors: + self._antisym_tensors = tuple( + t for t in self._antisym_tensors + if t != tensor_names.sym_orb_denom + ) + return self + + def substitute_contracted(self) -> 'ExprContainer': + """ + Tries to substitute all contracted indices with pretty indices, i.e. + i, j, k instad of i3, n4, o42 etc. + """ + self.expand() + res = S.Zero + for term in self.terms: + contrib = term.substitute_contracted(wrap_result=False) + assert isinstance(contrib, Expr) + res += contrib + self._inner = res + return self + + def substitute_with_generic(self) -> 'ExprContainer': + """ + Subsitutes all contracted indices with new, unused generic indices. + """ + self.expand() + res = S.Zero + for term in self.terms: + res += term.substitute_with_generic(wrap_result=False) + assert isinstance(res, Expr) + self._inner = res + return self + + def factor(self, num=None) -> 'ExprContainer': + """ + Tries to factors the expression. Note: this only works for simple cases + + Parameters + ---------- + num : optional + Number to factor in the expression. + """ + + if num is None: + res = factor(self.inner) + else: + num = nsimplify(num, rational=True) + factored = map( + lambda t: Mul(nsimplify(Pow(num, -1), rational=True), t.inner), + self.terms + ) + res = Mul(num, Add(*factored), evaluate=False) + assert isinstance(res, Expr) + self._inner = res + return self + + def expand_intermediates(self, fully_expand: bool = True + ) -> 'ExprContainer': + """ + Expand the known intermediates in the expression. + + Parameters + ---------- + fully_expand: bool, optional + True (default): The intermediates are recursively expanded + into orbital energies and ERI (if possible) + False: The intermediates are only expanded once, e.g., n'th + order MP t-amplitudes are expressed by means of (n-1)'th order + MP t-amplitudes and ERI. + """ + # TODO: only expand specific intermediates + # need to adjust the target indices -> not necessarily possible to + # determine them after expanding intermediates + expanded = S.Zero + for t in self.terms: + expanded += t.expand_intermediates(fully_expand=fully_expand) + assert isinstance(expanded, ExprContainer) + self._inner = expanded.inner + self.set_target_idx(expanded.provided_target_idx) + return self + + def use_symbolic_denominators(self) -> "ExprContainer": + """ + Replace all orbital energy denominators in the expression by tensors, + e.g., (e_a + e_b - e_i - e_j)^{-1} will be replaced by D^{ab}_{ij}, + where D is a SymmetricTensor. + """ + symbolic_denom = S.Zero + has_symbolic_denom = False + for term in self.terms: + term = term.use_symbolic_denominators() + symbolic_denom += term.inner + if tensor_names.sym_orb_denom in term.antisym_tensors: + has_symbolic_denom = True + # the symbolic denominators have additional antisymmetry + # for bra ket swaps + # -> this is the only possible change in the assumptions + # -> only set if we replaced a denominator in the expr + assert isinstance(symbolic_denom, Expr) + self._inner = symbolic_denom + if has_symbolic_denom: + self._antisym_tensors = tuple(sorted(set( + self._antisym_tensors + (tensor_names.sym_orb_denom,) + ))) + return self + + ########################################################### + # Overwrite parent class methods for inplace modification # + ########################################################### + def expand(self): + """ + Forwards the expand call to inner replacing the wrapped + expression + """ + res = self._inner.expand() + assert isinstance(res, Expr) + self._inner = res + return self + + def doit(self, *args, **kwargs): + """ + Forwards the doit call to inner replacing the wrapped + expression + """ + res = self._inner.doit(*args, **kwargs) + assert isinstance(res, Expr) + self._inner = res + return self + + def subs(self, *args, **kwargs): + """ + Forwards the subs call to inner replacing the wrapped + expression + """ + res = self._inner.subs(*args, **kwargs) + assert isinstance(res, Expr) + self._inner = res + return self + + ####################################### + # Operators for in-place modification # + ####################################### + def __iadd__(self, other: Any) -> "ExprContainer": + if isinstance(other, Container): + if self.assumptions != other.assumptions: + raise TypeError("Assumptions need to be equal. Got: " + f"{self.assumptions} and {other.assumptions}") + other = other.inner + elif isinstance(other, Basic): + # Apply the assumptions to the sympy object + other = ExprContainer(other, **self.assumptions).inner + res = self.inner + other + assert isinstance(res, Expr) + self._inner = res + return self + + def __isub__(self, other: Any) -> "ExprContainer": + if isinstance(other, Container): + if self.assumptions != other.assumptions: + raise TypeError("Assumptions need to be equal. Got: " + f"{self.assumptions} and {other.assumptions}") + other = other.inner + elif isinstance(other, Basic): + # Apply the assumptions to the sympy object + other = ExprContainer(other, **self.assumptions).inner + res = self.inner - other + assert isinstance(res, Expr) + self._inner = res + return self + + def __imul__(self, other: Any) -> "ExprContainer": + if isinstance(other, Container): + if self.assumptions != other.assumptions: + raise TypeError("Assumptions need to be equal. Got: " + f"{self.assumptions} and {other.assumptions}") + other = other.inner + elif isinstance(other, Basic): + # Apply the assumptions to the sympy object + other = ExprContainer(other, **self.assumptions).inner + res = self.inner * other + assert isinstance(res, Expr) + self._inner = res + return self + + def __itruediv__(self, other: Any) -> "ExprContainer": + if isinstance(other, Container): + if self.assumptions != other.assumptions: + raise TypeError("Assumptions need to be equal. Got: " + f"{self.assumptions} and {other.assumptions}") + other = other.inner + elif isinstance(other, Basic): + other = ExprContainer(other, **self.assumptions).inner + res = self.inner / other + assert isinstance(res, Expr) + self._inner = res + return self + + def to_latex_str(self, terms_per_line: int | None = None, + only_pull_out_pref: bool = False, + spin_as_overbar: bool = False) -> str: + """ + Transforms the expression to a latex string. + + Parameters + ---------- + terms_per_line: int, optional + Returns the expression using the syntax from an 'align' + environment with the provided number of terms per line. + only_pull_out_pref: bool, optional + Use the 'latex' printout from sympy, while prefactors are printed + in front of each term. This avoids long fractions with a huge + number of tensors in the numerator and only a factor in the + denominator. + spin_as_overbar: bool, optional + Instead of printing the spin of an index as suffix (idxname_spin) + use an overbar for beta spin and no indication for alpha. Because + alpha indices and indices without spin are not distinguishable + anymore, this only works if all indices have a spin set (the + expression is completely represented in spatial orbitals). + """ + tex_terms = [ + term.to_latex_str(only_pull_out_pref, spin_as_overbar) + for term in self.terms + ] + # remove '+' in the first term + if tex_terms[0].lstrip().startswith("+"): + tex_terms[0] = tex_terms[0].replace('+', '', 1).lstrip() + # just the raw output without linebreaks + if terms_per_line is None: + return " ".join(tex_terms) + assert isinstance(terms_per_line, int) + # only print x terms per line in an align environment + # create the string of all but the last line + tex_string = "" + for i in range(0, len(tex_terms) - terms_per_line, terms_per_line): + tex_string += ( + "& " + " ".join(tex_terms[i:i+terms_per_line]) + + " \\nonumber\\\\\n" + ) + # add the last line. Could ommit this if the equation is not supposed + # to have a number. + if len(tex_terms) % terms_per_line: + remaining = len(tex_terms) % terms_per_line + else: + remaining = terms_per_line + tex_string += "& " + " ".join(tex_terms[-remaining:]) + return tex_string diff --git a/build/lib/adcgen/expression/normal_ordered_container.py b/build/lib/adcgen/expression/normal_ordered_container.py new file mode 100644 index 0000000..4989bc5 --- /dev/null +++ b/build/lib/adcgen/expression/normal_ordered_container.py @@ -0,0 +1,199 @@ +from collections.abc import Iterable, Sequence +from functools import cached_property +from typing import Any +import itertools + +from sympy.physics.secondquant import F, Fd, NO +from sympy import Expr, Mul + +from ..indices import Index +from ..misc import cached_member +from .container import Container +from .object_container import ObjectContainer + + +class NormalOrderedContainer(ObjectContainer): + """ + Wrapper for a normal ordered operator string. + + Parameters + ---------- + inner: + The NO object to wrap + real : bool, optional + Whether the expression is represented in a real orbital basis. + sym_tensors: Iterable[str] | None, optional + Names of tensors with bra-ket-symmetry, i.e., + d^{pq}_{rs} = d^{rs}_{pq}. Adjusts the corresponding tensors to + correctly represent this additional symmetry if they are not aware + of it yet. + antisym_tensors: Iterable[str] | None, optional + Names of tensors with bra-ket-antisymmetry, i.e., + d^{pq}_{rs} = - d^{rs}_{pq}. Adjusts the corresponding tensors to + correctly represent this additional antisymmetry if they are not + aware of it yet. + target_idx: Iterable[Index] | None, optional + Target indices of the expression. By default the Einstein sum + convention will be used to identify target and contracted indices, + which is not always sufficient. + """ + def __init__(self, inner: Expr | Container | Any, + real: bool = False, + sym_tensors: Iterable[str] = tuple(), + antisym_tensors: Iterable[str] = tuple(), + target_idx: Iterable[Index] | None = None) -> None: + # call init from ObjectContainers parent class + super(ObjectContainer, self).__init__( + inner=inner, real=real, sym_tensors=sym_tensors, + antisym_tensors=antisym_tensors, target_idx=target_idx + ) + assert isinstance(self._inner, NO) + + def __len__(self) -> int: + return len(self._extract_operators.args) + + #################################### + # Some helpers for accessing inner # + #################################### + @property + def _extract_operators(self) -> Expr: + operators = self._inner.args[0] + assert isinstance(operators, Mul) + return operators + + @cached_property + def objects(self) -> tuple["ObjectContainer", ...]: + return tuple( + ObjectContainer(op, **self.assumptions) + for op in self._extract_operators.args + ) + + @property + def exponent(self) -> Expr: + # actually sympy should throw an error if a NO object contains a Pow + # obj or anything else than a*b*c + exp = set(o.exponent for o in self.objects) + if len(exp) == 1: + return exp.pop() + else: + raise NotImplementedError( + 'Exponent only implemented for NO objects, where all ' + f'operators share the same exponent. {self}' + ) + + @cached_property + def idx(self) -> tuple[Index, ...]: + """ + Indices of the normal ordered operator string. Indices that appear + multiple times will be listed multiple times. + """ + objects = self.objects + exp = self.exponent + assert exp.is_Integer + exp = int(exp) + ret = tuple(s for o in objects for s in o.idx for _ in range(exp)) + if len(objects) != len(ret): + raise NotImplementedError('Expected a NO object only to contain' + "second quantized operators with an " + f"exponent of 1. {self}") + return ret + + ################################################ + # compute additional properties for the object # + ################################################ + @property + def type_as_str(self) -> str: + return 'NormalOrdered' + + @cached_member + def description(self, target_idx: Sequence[Index] | None = None, + include_exponent: bool = True) -> str: + """ + Generates a string that describes the operators. + + Parameters + ---------- + target_idx: Sequence[Index] | None, optional + The target indices of the term the operators are a part of. + If given, the explicit names of target indices will be + included in the description. + include_exponent: bool, optional + If set the exponent of the object will be included in the + description. (default: True) + """ + # exponent has to be 1 for all contained operators + assert self.exponent == 1 + _ = include_exponent + + obj_contribs = [] + for o in self.objects: + # add either index space or target idx name + idx = o.idx + assert len(idx) == 1 + idx = idx[0] + if target_idx is not None and idx in target_idx: + op_str = f"{idx.name}_{idx.space[0]}{idx.spin}" + else: + op_str = idx.space[0] + idx.spin + # add a plus for creation operators + base = o.base + if isinstance(base, Fd): + op_str += '+' + elif not isinstance(base, F): # has to be annihilate here + raise TypeError("Unexpected content for " + f"NormalOrderedContainer: {o}, {type(o)}.") + obj_contribs.append(op_str) + return f"{self.type_as_str}-{'-'.join(sorted(obj_contribs))}" + + @cached_member + def crude_pos(self, target_idx: Sequence[Index] | None = None, + include_exponent: bool = True) -> dict[Index, list[str]]: + """ + Returns the 'crude' position of the indices in the operator string. + + Parameters + ---------- + target_idx: Sequence[Index] | None, optional + The target indices of the term the operators are a part of. + If given, the names of target indices will be included in + the positions. + include_exponent: bool, optional + If set the exponent of the object will be considered in the + positions. (default: True) + """ + + descr = self.description( + target_idx=target_idx, include_exponent=include_exponent + ) + ret = {} + for o in self.objects: + o_descr = o.description( + target_idx=target_idx, include_exponent=include_exponent + ) + idx = o.idx + assert len(idx) == 1 + idx = idx[0] + if idx not in ret: + ret[idx] = [] + ret[idx].append(f"{descr}_{o_descr}") + return ret + + @property + def allowed_spin_blocks(self) -> tuple[str, ...]: + """ + Returns the valid spin blocks of the operator string. + """ + allowed_blocks = [] + for obj in self.objects: + blocks = obj.allowed_spin_blocks + if blocks is not None: + allowed_blocks.append(blocks) + return tuple("".join(b) for b in itertools.product(*allowed_blocks)) + + def to_latex_str(self, only_pull_out_pref: bool = False, + spin_as_overbar: bool = False) -> str: + """Returns a latex string for the object.""" + return " ".join( + o.to_latex_str(only_pull_out_pref, spin_as_overbar) + for o in self.objects + ) diff --git a/build/lib/adcgen/expression/object_container.py b/build/lib/adcgen/expression/object_container.py new file mode 100644 index 0000000..46e6799 --- /dev/null +++ b/build/lib/adcgen/expression/object_container.py @@ -0,0 +1,879 @@ +from collections.abc import Iterable +from functools import cached_property +from typing import Any, Sequence, TYPE_CHECKING +import itertools + +from sympy.physics.secondquant import F, Fd, FermionicOperator, NO +from sympy import Add, Expr, Mul, Number, Pow, S, Symbol, latex, sympify + +from ..indices import Index, _is_index_tuple +from ..logger import logger +from ..misc import cached_member +from ..sympy_objects import ( + Amplitude, AntiSymmetricTensor, KroneckerDelta, NonSymmetricTensor, + SymbolicTensor, SymmetricTensor +) +from ..tensor_names import ( + is_adc_amplitude, is_t_amplitude, is_gs_density, split_gs_density_name, + split_t_amplitude_name, tensor_names +) +from .container import Container +# imports only required for type checking (avoid circular imports) +if TYPE_CHECKING: + from .expr_container import ExprContainer + + +class ObjectContainer(Container): + """ + Wrapper for a single object, e.g., a tensor that is part of a term. + + Parameters + ---------- + inner: + The object to wrap, e.g., an AntiSymmetricTensor + real : bool, optional + Whether the expression is represented in a real orbital basis. + sym_tensors: Iterable[str] | None, optional + Names of tensors with bra-ket-symmetry, i.e., + d^{pq}_{rs} = d^{rs}_{pq}. Adjusts the corresponding tensors to + correctly represent this additional symmetry if they are not aware + of it yet. + antisym_tensors: Iterable[str] | None, optional + Names of tensors with bra-ket-antisymmetry, i.e., + d^{pq}_{rs} = - d^{rs}_{pq}. Adjusts the corresponding tensors to + correctly represent this additional antisymmetry if they are not + aware of it yet. + target_idx: Iterable[Index] | None, optional + Target indices of the expression. By default the Einstein sum + convention will be used to identify target and contracted indices, + which is not always sufficient. + """ + def __init__(self, inner: Expr | Container | Any, + real: bool = False, + sym_tensors: Iterable[str] = tuple(), + antisym_tensors: Iterable[str] = tuple(), + target_idx: Iterable[Index] | None = None) -> None: + super().__init__( + inner=inner, real=real, sym_tensors=sym_tensors, + antisym_tensors=antisym_tensors, target_idx=target_idx + ) + # we can not wrap an Add object: should be wrapped by ExprContainer + # we can not wrap an Mul object: should be wrapped by TermContainer + # we can not wrap an NO object: should be wrapped by + # NormalOrderedContainer + # we can not wrap a polynom: should be wrapped by PolynomContainer + # But everything else should be fine (single objects) + assert not isinstance(self._inner, (Add, Mul, NO)) + if isinstance(self._inner, Pow): # polynom + assert not isinstance(self._inner.args[0], Add) + + #################################### + # Some helpers for accessing inner # + #################################### + @property + def base(self) -> Expr: + """ + Returns the base of an :py:class:`Pow` object (base^exp) - if we have + a Pow object. Otherwise the object itself is returned. + """ + if isinstance(self.inner, Pow): + return self.inner.args[0] + else: + return self.inner + + @property + def exponent(self) -> Expr: + """ + Returns the exponent of an :py:class:`Pow` object (base^exp). + """ + if isinstance(self.inner, Pow): + return self.inner.args[1] + else: + return sympify(1) + + @property + def base_and_exponent(self) -> tuple[Expr, Expr]: + """Return base and exponent of the object.""" + base = self.inner + if isinstance(base, Pow): + return base.args + else: + return base, sympify(1) + + @property + def name(self) -> str | None: + """Extract the name of tensor objects.""" + base = self.base + if isinstance(base, SymbolicTensor): + return base.name + return None + + @property + def is_t_amplitude(self) -> bool: + """Whether the object is a ground state t-amplitude.""" + name = self.name + return False if name is None else is_t_amplitude(name) + + @property + def is_gs_density(self) -> bool: + """Check whether the object is a ground state density tensor.""" + name = self.name + return False if name is None else is_gs_density(name) + + @property + def is_orbital_energy(self) -> bool: + """Whether the object is a orbital energy tensor.""" + # all orb energies should be nonsym_tensors actually + return self.name == tensor_names.orb_energy and len(self.idx) == 1 + + @property + def contains_only_orb_energies(self) -> bool: + """Whether the object is a orbital energy tensor.""" + # To have a common interface with e.g. Polynoms + return self.is_orbital_energy + + @cached_property + def idx(self) -> tuple[Index, ...]: + """Return the indices of the object.""" + if self.inner.is_number: # prefactor + return tuple() + obj = self.base + # Antisym-, Sym-, Nonsymtensor, Amplitude, Kroneckerdelta + if isinstance(obj, (SymbolicTensor, KroneckerDelta)): + return obj.idx + elif isinstance(obj, FermionicOperator): # F and Fd + idx = obj.args + assert _is_index_tuple(idx) + return idx + elif isinstance(obj, Symbol): # a symbol without indices + return tuple() + else: + raise TypeError("Can not determine the indices for an obj of type" + f"{type(obj)}: {self}.") + + @property + def space(self) -> str: + """Returns the index space (tensor block) of the object.""" + return "".join(s.space[0] for s in self.idx) + + @property + def spin(self) -> str: + """Returns the spin block of the current object.""" + return "".join(s.spin if s.spin else "n" for s in self.idx) + + ################################################ + # compute additional properties for the object # + ################################################ + @property + def type_as_str(self) -> str: + """Returns a string that describes the type of the object.""" + if self.inner.is_number: + return "prefactor" + obj = self.base + if isinstance(obj, Amplitude): + return "amplitude" + elif isinstance(obj, SymmetricTensor): + return "symtensor" + elif isinstance(obj, AntiSymmetricTensor): + return "antisymtensor" + elif isinstance(obj, NonSymmetricTensor): + return "nonsymtensor" + elif isinstance(obj, KroneckerDelta): + return "delta" + elif isinstance(obj, F): + return "annihilate" + elif isinstance(obj, Fd): + return "create" + elif isinstance(obj, Symbol): + return "symbol" + else: + raise TypeError(f"Unknown object {self} of type {type(obj)}.") + + def longname(self, use_default_names: bool = False) -> str | None: + """ + Returns a more exhaustive name of the object. Used for intermediates + and transformation to code. + + Parameters + ---------- + use_default_names: bool, optional + If set, the default names are used to generate the longname. + This is necessary to e.g., map a tensor name to an intermediate + name, since they are defined using the default names. + (default: False) + """ + if any(s.spin for s in self.idx): + logger.warning("Longname only covers the space of indices. The " + "spin is omitted.") + name = None + base = self.base + if isinstance(base, SymbolicTensor): + name = base.name + # t-amplitudes + if is_t_amplitude(name): + assert isinstance(base, Amplitude) + if len(base.upper) != len(base.lower): + raise RuntimeError("Number of upper and lower indices not " + f"equal for t-amplitude {self}.") + base_name, ext = split_t_amplitude_name(name) + if use_default_names: + base_name = tensor_names.defaults().get("gs_amplitude") + assert base_name is not None + if ext: + name = f"{base_name}{len(base.upper)}_{ext}" + else: # name for t-amplitudes without a order + name = f"{base_name}{len(base.upper)}" + elif is_adc_amplitude(name): # adc amplitudes + assert isinstance(base, Amplitude) + # need to determine the excitation space as int + space = self.space + assert all(sp in ["o", "v", "c"] for sp in space) + n_o = space.count("o") + space.count("c") + n_v = space.count("v") + if n_o == n_v: # pp-ADC + n = n_o # p-h -> 1 // 2p-2h -> 2 etc. + else: # ip-/ea-/dip-/dea-ADC + n = min([n_o, n_v]) + 1 # h -> 1 / 2h -> 1 / p-2h -> 2... + lr = "l" if name == tensor_names.left_adc_amplitude else 'r' + name = f"u{lr}{n}" + elif is_gs_density(name): # mp densities + assert isinstance(base, AntiSymmetricTensor) + if len(base.upper) != len(base.lower): + raise RuntimeError("Number of upper and lower indices not " + f"equal for mp density {self}.") + base_name, ext = split_gs_density_name(name) + if use_default_names: + base_name = tensor_names.defaults().get("gs_density") + assert base_name is not None + if ext: + name = f"{base_name}0_{ext}_{self.space}" + else: # name for gs-dentity without a order + name = f"{base_name}0_{self.space}" + elif name.startswith('t2eri'): # t2eri + name = f"t2eri_{name[5:]}" + elif name == 't2sq': + pass + else: # arbitrary other tensor + name += f"_{self.space}" + elif isinstance(base, KroneckerDelta): # deltas -> d_oo / d_vv + name = f"d_{self.space}" + return name + + @cached_property + def order(self) -> int: + """ + Returns the perturbation theoretical order of the object (tensor). + """ + from ..intermediates import Intermediates + + if isinstance(self.base, SymbolicTensor): + name = self.name + assert name is not None + if name == tensor_names.eri: # eri + return 1 + elif is_t_amplitude(name): + _, ext = split_t_amplitude_name(name) + return int(ext.replace('c', '')) + elif is_gs_density(name): + # we might have p / p2 / p3 / ... + _, ext = split_gs_density_name(name) + if ext: + return int(ext) + # all intermediates + longname = self.longname(True) + assert longname is not None + itmd_cls = Intermediates().available.get(longname, None) + if itmd_cls is not None: + return itmd_cls.order + return 0 + + @cached_member + def description(self, target_idx: Sequence[Index] | None = None, + include_exponent: bool = True) -> str: + """ + Generates a string that describes the object. + + Parameters + ---------- + target_idx: Sequence[Index] | None, optional + The target indices of the term the object is a part of. + If given, the explicit names of target indices will be + included in the description. + include_exponent: bool, optional + If set the exponent of the object will be included in the + description. (default: True) + """ + + descr = [self.type_as_str] + if descr[0] in ["prefactor", "symbol"]: + return descr[0] + + if descr[0] in ["antisymtensor", "amplitude", "symtensor"]: + base, exponent = self.base_and_exponent + assert isinstance(base, AntiSymmetricTensor) + # - space separated in upper and lower part + upper, lower = base.upper, base.lower + assert _is_index_tuple(upper) and _is_index_tuple(lower) + data_u = "".join(s.space[0] + s.spin for s in upper) + data_l = "".join(s.space[0] + s.spin for s in lower) + descr.append(f"{base.name}-{data_u}-{data_l}") + # names of target indices, also separated in upper and lower part + # indices in upper and lower have been sorted upon tensor creation! + if target_idx is not None: + target_u = "".join(s.name for s in upper if s in target_idx) + target_l = "".join(s.name for s in lower if s in target_idx) + if target_l or target_u: # we have at least 1 target idx + if base.bra_ket_sym is S.Zero: # no bra ket symmetry + if not target_u: + target_u = "none" + if not target_l: + target_l = "none" + descr.append(f"{target_u}-{target_l}") + else: # bra ket sym or antisym + # target indices in both spaces + if target_u and target_l: + descr.extend(sorted([target_u, target_l])) + else: # only in 1 space at least 1 target idx + descr.append(target_u + target_l) + if include_exponent: # add exponent to description + descr.append(str(exponent)) + elif descr[0] == "nonsymtensor": + data = "".join(s.space[0] + s.spin for s in self.idx) + descr.append(f"{self.name}-{data}") + if target_idx is not None: + target_str = "".join( + s.name + str(i) for i, s in + enumerate(self.idx) if s in target_idx + ) + if target_str: + descr.append(target_str) + if include_exponent: + descr.append(str(self.exponent)) + elif descr[0] in ["delta", "annihilate", "create"]: + data = "".join(s.space[0] + s.spin for s in self.idx) + descr.append(data) + if target_idx is not None: + target_str = "".join( + s.name for s in self.idx if s in target_idx + ) + if target_str: + descr.append(target_str) + if include_exponent: + descr.append(str(self.exponent)) + else: + raise ValueError(f"Unknown object {self} of type {descr[0]}") + return "-".join(descr) + + @cached_member + def crude_pos(self, target_idx: Sequence[Index] | None = None, + include_exponent: bool = True) -> dict[Index, list[str]]: + """ + Returns the 'crude' position of the indices in the object. + (e.g. only if they are located in bra/ket, not the exact position). + + Parameters + ---------- + target_idx: Sequence[Index] | None, optional + The target indices of the term the object is a part of. + If given, the names of target indices will be included in + the positions. + include_exponent: bool, optional + If set the exponent of the object will be considered in the + positions. (default: True) + """ + if not self.idx: # just a number (prefactor or symbol) + return {} + + ret = {} + description = self.description( + include_exponent=include_exponent, target_idx=target_idx + ) + obj = self.base + # antisym-, symtensor and amplitude + if isinstance(obj, AntiSymmetricTensor): + for uplo, idx_tpl in (("u", obj.upper), ("l", obj.lower)): + assert _is_index_tuple(idx_tpl) + for s in idx_tpl: + # space (upper/lower) in which the tensor occurs + pos = [description] + if obj.bra_ket_sym is S.Zero: + pos.append(uplo) + # space (occ/virt) of neighbour indices + neighbours = [i for i in idx_tpl if i is not s] + if neighbours: + neighbour_data = "".join( + i.space[0] + i.spin for i in neighbours + ) + pos.append(neighbour_data) + # names of neighbour target indices + if target_idx is not None: + neighbour_target = [ + i.name for i in neighbours if i in target_idx + ] + if neighbour_target: + pos.append("".join(neighbour_target)) + if s not in ret: + ret[s] = [] + ret[s].append("-".join(pos)) + elif isinstance(obj, NonSymmetricTensor): # nonsymtensor + # target idx position is already in the description + idx = self.idx + for i, s in enumerate(idx): + if s not in ret: + ret[s] = [] + ret[s].append(f"{description}_{i}") + # delta, create, annihilate + elif isinstance(obj, (KroneckerDelta, F, Fd)): + for s in self.idx: + if s not in ret: + ret[s] = [] + ret[s].append(description) + else: + raise ValueError(f"Unknown object {self} of type {type(obj)}") + return ret + + @property + def allowed_spin_blocks(self) -> tuple[str, ...] | None: + """ + Returns the valid spin blocks of the object. + """ + from ..intermediates import Intermediates, RegisteredIntermediate + + # prefactor or symbol have no indices -> no allowed spin blocks + if not self.idx: + return None + + obj = self.base + # antisym-, sym-, nonsymtensor and amplitude + if isinstance(obj, SymbolicTensor): + name = obj.name + if name == tensor_names.eri: # hardcode the ERI spin blocks + return ("aaaa", "abab", "abba", "baab", "baba", "bbbb") + # t-amplitudes: all spin conserving spin blocks are allowed, i.e., + # all blocks with the same amount of alpha and beta indices + # in upper and lower + elif is_t_amplitude(name): + idx = obj.idx + assert not len(idx) % 2 + n = len(idx)//2 + return tuple(sorted([ + "".join(block) for block in + itertools.product("ab", repeat=len(idx)) + if block[:n].count("a") == block[n:].count("a") + ])) + elif name == tensor_names.coulomb: # ERI in chemist notation + return ("aaaa", "aabb", "bbaa", "bbbb") + elif isinstance(obj, KroneckerDelta): # delta + # spins have to be equal + return ("aa", "bb") + elif isinstance(obj, FermionicOperator): # create / annihilate + # both spins allowed! + return ("a", "b") + # the known allowed spin blocks of eri, t-amplitudes and deltas + # may be used to generate the spin blocks of other intermediates + longname = self.longname(True) + assert longname is not None + itmd = Intermediates().available.get(longname, None) + if itmd is None: + logger.warning( + f"Could not determine valid spin blocks for {self}." + ) + return None + assert isinstance(itmd, RegisteredIntermediate) + return itmd.allowed_spin_blocks + + def to_latex_str(self, only_pull_out_pref: bool = False, + spin_as_overbar: bool = False) -> str: + """Returns a latex string for the object.""" + + def format_indices(indices) -> str: + if spin_as_overbar: + spins = [s.spin for s in indices] + if any(spins) and not all(spins): + raise ValueError("All indices have to have a spin " + "assigned in order to differentiate " + "indices without spin from indices with " + f"alpha spin: {self}") + return "".join( + f"\\overline{{{i.name}}}" if s == "b" else i.name + for i, s in zip(indices, spins) + ) + else: + return "".join(latex(i) for i in indices) + + if only_pull_out_pref: # use sympy latex print + return self.__str__() + + name = self.name + obj, exp = self.base_and_exponent + if isinstance(obj, SymbolicTensor): + assert name is not None + special_tensors = { + tensor_names.eri: ( # antisym ERI physicist + lambda up, lo: f"\\langle {up}\\vert\\vert {lo}\\rangle" + ), + tensor_names.fock: ( # fock matrix + lambda up, lo: f"{tensor_names.fock}_{{{up}{lo}}}" + ), + # coulomb integral chemist notation + tensor_names.coulomb: lambda up, lo: f"({up}\\vert {lo})", + # 2e3c integral in asymmetric RI + tensor_names.ri_asym_eri: lambda up, lo: f"({up}\\vert {lo})", + # orbital energy + tensor_names.orb_energy: lambda _, lo: f"\\varepsilon_{{{lo}}}" + } + # convert the indices to string + if isinstance(obj, AntiSymmetricTensor): + upper = format_indices(obj.upper) + lower = format_indices(obj.lower) + elif isinstance(obj, NonSymmetricTensor): + upper, lower = None, format_indices(obj.indices) + else: + raise TypeError(f"Unknown tensor object {obj} of type " + f"{type(obj)}") + + if name in special_tensors: + tex_str = special_tensors[name](upper, lower) + else: + order_str = None + if is_t_amplitude(name): # mp t-amplitudes + base_name, ext = split_t_amplitude_name(name) + if "c" in ext: + order_str = f"({ext.replace('c', '')})\\ast" + else: + order_str = f"({ext})" + order_str = f"}}^{{{order_str}}}" + name = f"{{{base_name}" + elif is_gs_density(name): # mp densities + _, ext = split_gs_density_name(name) + order_str = f"}}^{{({ext})}}" + name = "{\\rho" + + tex_str = name + if upper is not None: + tex_str += f"^{{{upper}}}" + tex_str += f"_{{{lower}}}" + + # append pt order for amplitude and mp densities + if order_str is not None: + tex_str += order_str + elif isinstance(obj, KroneckerDelta): + tex_str = f"\\delta_{{{format_indices(obj.idx)}}}" + elif isinstance(obj, F): # annihilate + tex_str = f"a_{{{format_indices(obj.args)}}}" + elif isinstance(obj, Fd): # create + tex_str = f"a^\\dagger_{{{format_indices(obj.args)}}}" + else: + return self.__str__() + + if exp != 1: + # special case for ERI and coulomb + if name in [tensor_names.eri, tensor_names.coulomb]: + tex_str += f"^{{{exp}}}" + else: + tex_str = f"\\bigl({tex_str}\\bigr)^{{{exp}}}" + return tex_str + + ################################### + # methods manipulating the object # + ################################### + def _apply_tensor_braket_sym(self, wrap_result: bool = True + ) -> "ExprContainer | Expr": + """ + Applies the bra-ket symmetry defined in sym_tensors and antisym_tensors + to the current object. If wrap_result is set, the new object will be + wrapped by :py:class:`ExprContainer`. + """ + from .expr_container import ExprContainer + + obj = self.inner + base, exponent = self.base_and_exponent + # antisymtensor, symtensor or amplitude + if isinstance(base, AntiSymmetricTensor): + name = base.name + braketsym: None | Number = None + if name in self.sym_tensors and base.bra_ket_sym is not S.One: + braketsym = S.One + elif name in self.antisym_tensors and \ + base.bra_ket_sym is not S.NegativeOne: + braketsym = S.NegativeOne + if braketsym is not None: + obj = Pow( + base.add_bra_ket_sym(braketsym), + exponent + ) + if wrap_result: + obj = ExprContainer(inner=obj, **self.assumptions) + return obj + + def make_real(self, wrap_result: bool = True) -> "ExprContainer | Expr": + """ + Represent the object in a real orbital basis by renaming the + complex conjugate t-amplitudes, for instance 't1cc' -> 't1'. + + Parameters + ---------- + wrap_result: bool, optional + If set the result will be wrapped with + :py:class:`ExprContainer`. Otherwise the unwrapped + object is returned. (default: True) + """ + from .expr_container import ExprContainer + + real_obj = self.inner + if self.is_t_amplitude: + old = self.name + assert old is not None + base_name, ext = split_t_amplitude_name(old) + new = f"{base_name}{ext.replace('c', '')}" + if old != new: # only rename when name changes + base, exponent = self.base_and_exponent + assert isinstance(base, Amplitude) + real_obj = Pow( + Amplitude(new, base.upper, base.lower, base.bra_ket_sym), + exponent + ) + if wrap_result: + kwargs = self.assumptions + kwargs["real"] = True + real_obj = ExprContainer(real_obj, **kwargs) + return real_obj + + def block_diagonalize_fock(self, wrap_result: bool = True + ) -> "ExprContainer | Expr": + """ + Block diagonalize the Fock matrix, i.e. if the object is part of an + off-diagonal fock matrix block, it is set to 0. + + Parameters + ---------- + wrap_result: bool, optional + If this is set the result will be wrapped with an + :py:class:`ExprContainer`. (default: True) + """ + from .expr_container import ExprContainer + + bl_diag = self.inner + if self.name == tensor_names.fock: + sp1, sp2 = self.space + if sp1 != sp2: + bl_diag = S.Zero + if wrap_result: + bl_diag = ExprContainer(bl_diag, **self.assumptions) + return bl_diag + + def diagonalize_fock(self, target: Sequence[Index], + wrap_result: bool = False + ) -> tuple["ExprContainer | Expr", dict[Index, Index]]: # noqa E501 + """ + Diagonalize the fock matrix, i.e., if the object is a fock matrix + element it is replaced by an orbital energy - but only if no + information is lost. + If the result is wrapped, the target indices will be set in the + resulting expression, because it might not be possible to + determine them according to the einstein sum convention + (f_ij X_j -> e_i X_i). + """ + from ..func import evaluate_deltas + + def pack_result(diag, sub, target): + if wrap_result: + assumptions = self.assumptions + assumptions["target_idx"] = target + diag = Expr(diag, **assumptions) + return diag, sub + + if self.name != tensor_names.fock: # no fock matrix + return pack_result(self.inner, {}, target) + # build a delta with the fock indices + p, q = self.idx + delta = KroneckerDelta(p, q) + if delta is S.Zero: # off diagonal block + assert isinstance(delta, Number) + return pack_result(delta, {}, target) + elif delta is S.One: + # diagonal fock element: if we evaluate it, we might loose a + # contracted index. + return pack_result(self.inner, {}, target) + # try to evaluate the delta + result = evaluate_deltas(Mul(self.inner, delta), target_idx=target) + if isinstance(result, Mul): # could not evaluate + return pack_result(self.inner, {}, target) + # check which of the indices survived + remaining_idx = result.atoms(Index) + assert len(remaining_idx) == 1 # only one of the indices can survive + remaining_idx = remaining_idx.pop() + # dict holding the necessary index substitution + sub = {} + if p is remaining_idx: # p survived + sub[q] = p + else: # q surived + assert q is remaining_idx + sub[p] = q + diag = Pow( + NonSymmetricTensor(tensor_names.orb_energy, (remaining_idx,)), + self.exponent + ) + return pack_result(diag, sub, target) + + def rename_tensor(self, current: str, new: str, + wrap_result: bool = True) -> "ExprContainer | Expr": + """ + Renames a tensor object with name 'current' to 'new'. If wrap_result + is set, the result will be wrapped with an :py:class:`ExprContainer`. + """ + from .expr_container import ExprContainer + + obj = self.inner + base, exponent = self.base_and_exponent + if isinstance(base, SymbolicTensor) and base.name == current: + if isinstance(base, AntiSymmetricTensor): + # antisym, amplitude, symmetric + base = base.__class__( + new, base.upper, base.lower, base.bra_ket_sym + ) + elif isinstance(base, NonSymmetricTensor): + # nonsymmetric + base = base.__class__(new, base.indices) + else: + raise TypeError(f"Unknown tensor type {type(base)}.") + obj = Pow(base, exponent) + if wrap_result: + obj = ExprContainer(obj, **self.assumptions) + return obj + + def expand_antisym_eri(self, wrap_result: bool = True + ) -> "ExprContainer | Expr": + """ + Expands the antisymmetric ERI using chemists notation + = (pr|qs) - (ps|qr). + ERI's in chemists notation are by default denoted as 'v'. + Currently this only works for real orbitals, i.e., + for symmetric ERI's = . + """ + from .expr_container import ExprContainer + + expanded_coulomb = False + res = self.inner + base, exponent = self.base_and_exponent + if isinstance(base, AntiSymmetricTensor) and \ + base.name == tensor_names.eri: + # ensure that the eri is Symmetric. Otherwise we would introduce + # additional unwanted symmetry in the result + if base.bra_ket_sym != 1: + raise NotImplementedError("Can only expand antisymmetric ERI " + "with bra-ket symmetry " + "(real orbitals).") + p, q, r, s = self.idx # + res = S.Zero + if p.spin == r.spin and q.spin == s.spin: + res += SymmetricTensor(tensor_names.coulomb, (p, r), (q, s), 1) + expanded_coulomb = True + if p.spin == s.spin and q.spin == r.spin: + res -= SymmetricTensor(tensor_names.coulomb, (p, s), (q, r), 1) + expanded_coulomb = True + res = Pow(res, exponent) + + if wrap_result: + kwargs = self.assumptions + if expanded_coulomb: + kwargs["sym_tensors"] += (tensor_names.coulomb,) + res = ExprContainer(res, **kwargs) + return res + + def expand_intermediates(self, target: Sequence[Index], + wrap_result: bool = True, + fully_expand: bool = True + ) -> "ExprContainer | Expr": + """ + Expand the object if it is a known intermediate. + + Parameters + ---------- + target: tuple[Index] + The target indices of the term the object is a part of. + wrap_result: bool, optional + If set, the result will be wrapped with an + :py:class:`ExprContainer`. Note that the target indices will + be set in the resturned container, since the einstein + sum convention is often not valid after intermediate + expansion. (default: True) + fully_expand: bool, optional + True (default): The intermediate is recursively expanded + into orbital energies and ERI (if possible) + False: The intermediate is only expanded once, e.g., n'th + order MP t-amplitudes are expressed by means of (n-1)'th order + MP t-amplitudes and ERI. + """ + from ..intermediates import Intermediates, RegisteredIntermediate + from .expr_container import ExprContainer + + # intermediates only defined for tensors + if not isinstance(self.base, SymbolicTensor): + ret = self.inner + if wrap_result: + assumptions = self.assumptions + assumptions["target_idx"] = target + ret = ExprContainer(ret, **assumptions) + return ret + + longname = self.longname(use_default_names=True) + assert longname is not None + itmd = Intermediates().available.get(longname, None) + expanded = self.inner + if itmd is not None: + assert isinstance(itmd, RegisteredIntermediate) + # Use a for loop to obtain different contracted itmd indices + # for each x in: x * x * ... + expanded = S.One + exponent = self.exponent + assert exponent.is_Integer + for _ in range(abs(int(exponent))): + expanded *= itmd.expand_itmd( + indices=self.idx, wrap_result=False, + fully_expand=fully_expand + ) + if exponent < S.Zero: + expanded = Pow(expanded, -1) + if wrap_result: + assumptions = self.assumptions + assumptions["target_idx"] = target + ret = ExprContainer(expanded, **assumptions) + return expanded + + def use_explicit_denominators(self, wrap_result: bool = True + ) -> "ExprContainer | Expr": + """ + Switch to an explicit representation of orbital energy denominators by + replacing all symbolic denominators by their explicit counter part, + i.e., D^{ij}_{ab} -> (e_i + e_j - e_a - e_b)^{-1}.+ + """ + from .expr_container import ExprContainer + + explicit_denom = self.inner + if self.name == tensor_names.sym_orb_denom: + tensor, exponent = self.base_and_exponent + assert isinstance(tensor, AntiSymmetricTensor) + # upper indices are added, lower indices subtracted + explicit_denom = S.Zero + for s in tensor.upper: + assert isinstance(s, Index) + explicit_denom += NonSymmetricTensor( + tensor_names.orb_energy, (s,) + ) + for s in tensor.lower: + assert isinstance(s, Index) + explicit_denom -= NonSymmetricTensor( + tensor_names.orb_energy, (s,) + ) + explicit_denom = Pow(explicit_denom, -exponent) + if wrap_result: + assumptions = self.assumptions + # remove the symbolic denom from the assumptions if necessary + if tensor_names.sym_orb_denom in self.antisym_tensors: + assumptions["antisym_tensors"] = tuple( + n for n in assumptions["antisym_tensors"] + if n != tensor_names.sym_orb_denom + ) + explicit_denom = ExprContainer(explicit_denom, **assumptions) + return explicit_denom diff --git a/build/lib/adcgen/expression/polynom_container.py b/build/lib/adcgen/expression/polynom_container.py new file mode 100644 index 0000000..ac6610d --- /dev/null +++ b/build/lib/adcgen/expression/polynom_container.py @@ -0,0 +1,282 @@ +from collections.abc import Iterable, Sequence +from functools import cached_property +from typing import Any, TYPE_CHECKING + +from sympy import Add, Expr, Pow, Symbol, S + +from ..indices import Index, sort_idx_canonical +from ..tensor_names import tensor_names +from .container import Container +from .object_container import ObjectContainer + +# imports only required for type checking (avoid circular imports) +if TYPE_CHECKING: + from .term_container import TermContainer + from .expr_container import ExprContainer + + +class PolynomContainer(ObjectContainer): + """ + Wrapper for a polynom of the form (a + b + ...)^x + + Parameters + ---------- + inner: + The polynom to wrap + real : bool, optional + Whether the expression is represented in a real orbital basis. + sym_tensors: Iterable[str] | None, optional + Names of tensors with bra-ket-symmetry, i.e., + d^{pq}_{rs} = d^{rs}_{pq}. Adjusts the corresponding tensors to + correctly represent this additional symmetry if they are not aware + of it yet. + antisym_tensors: Iterable[str] | None, optional + Names of tensors with bra-ket-antisymmetry, i.e., + d^{pq}_{rs} = - d^{rs}_{pq}. Adjusts the corresponding tensors to + correctly represent this additional antisymmetry if they are not + aware of it yet. + target_idx: Iterable[Index] | None, optional + Target indices of the expression. By default the Einstein sum + convention will be used to identify target and contracted indices, + which is not always sufficient. + """ + def __init__(self, inner: Expr | Container | Any, + real: bool = False, + sym_tensors: Iterable[str] = tuple(), + antisym_tensors: Iterable[str] = tuple(), + target_idx: Iterable[Index] | None = None) -> None: + # call init from ObjectContainers parent class + super(ObjectContainer, self).__init__( + inner=inner, real=real, sym_tensors=sym_tensors, + antisym_tensors=antisym_tensors, target_idx=target_idx + ) + if isinstance(self._inner, Pow): + assert isinstance(self._inner.args[0], Add) + else: + # (a + b + ...)^1 (orbital energy denominator) + assert isinstance(self._inner, Add) + + def __len__(self) -> int: + return len(self.base.args) + + @cached_property + def terms(self) -> "tuple[TermContainer, ...]": + from .term_container import TermContainer + + return tuple( + TermContainer(inner=term, **self.assumptions) + for term in self.base.args + ) + + ################################################# + # compute additional properties for the Polynom # + ################################################# + @property + def type_as_str(self) -> str: + return 'polynom' + + @cached_property + def idx(self) -> tuple[Index, ...]: + """ + Returns all indices that occur in the polynom. Indices that occur + multiple times will be listed multiple times. + """ + idx = [s for t in self.terms for s in t.idx] + return tuple(sorted(idx, key=sort_idx_canonical)) + + @cached_property + def order(self): + raise NotImplementedError("Order not implemented for polynoms: " + f"{self}") + + def crude_pos(self, *args, **kwargs): + _, _ = args, kwargs + raise NotImplementedError("crude_pos for determining index positions " + f"not implemented for polynoms: {self}") + + def description(self, *args, **kwargs): + _, _ = args, kwargs + raise NotImplementedError("description not implemented for polynoms:", + f"{self}") + + @property + def allowed_spin_blocks(self) -> None: + # allowed spin blocks not available for Polynoms + return None + + @property + def contains_only_orb_energies(self) -> bool: + """Whether the poylnom only contains orbital energy tensors.""" + return all(term.contains_only_orb_energies for term in self.terms) + + #################################### + # methods manipulating the polynom # + #################################### + def _apply_tensor_braket_sym(self, wrap_result: bool = True + ) -> "ExprContainer | Expr": + """ + Applies the tensor bra-ket symmetry defined in sym_tensors and + antisym_tensors to all tensors in the polynom. If wrap_result is set, + the new term will be wrapped by :py:class:`ExprContainer`. + """ + from .expr_container import ExprContainer + + with_sym = S.Zero + for term in self.terms: + with_sym += term._apply_tensor_braket_sym(wrap_result=False) + assert isinstance(with_sym, Expr) + with_sym = Pow(with_sym, self.exponent) + + if wrap_result: + with_sym = ExprContainer(inner=with_sym, **self.assumptions) + return with_sym + + def make_real(self, wrap_result: bool = True) -> "ExprContainer | Expr": + """ + Represent the polynom in a real orbital basis. + - names of complex conjugate t-amplitudes, for instance t1cc -> t1 + - adds bra-ket-symmetry to the fock matrix and the ERI. + + Parameters + ---------- + wrap_result : bool, optional + If set the result will be wrapped with an + :py:class:`ExprContainer`. (default: True) + """ + from .expr_container import ExprContainer + + real = S.Zero + for term in self.terms: + real += term.make_real(wrap_result=False) + assert isinstance(real, Expr) + real = Pow(real, self.exponent) + + if wrap_result: + assumptions = self.assumptions + assumptions["real"] = True + real = ExprContainer(inner=real, **assumptions) + return real + + def block_diagonalize_fock(self, wrap_result: bool = True + ) -> "ExprContainer | Expr": + """ + Block diagonalize the fock matrix in the polynom by removing terms + that contain elements of off-diagonal blocks. + """ + from .expr_container import ExprContainer + + bl_diag = S.Zero + for term in self.terms: + bl_diag += term.block_diagonalize_fock(wrap_result=False) + assert isinstance(bl_diag, Expr) + bl_diag = Pow(bl_diag, self.exponent) + + if wrap_result: + bl_diag = ExprContainer(inner=bl_diag, **self.assumptions) + return bl_diag + + def diagonalize_fock(self, target: Sequence[Index], + wrap_result: bool = True + ): + _, _ = target, wrap_result + raise NotImplementedError("Fock matrix diagonalization not implemented" + f" for polynoms: {self}") + + def rename_tensor(self, current: str, new: str, + wrap_result: bool = True + ) -> "ExprContainer | Expr": + """Rename a tensor from current to new.""" + from .expr_container import ExprContainer + + renamed = S.Zero + for term in self.terms: + renamed += term.rename_tensor(current, new, wrap_result=False) + assert isinstance(renamed, Expr) + renamed = Pow(renamed, self.exponent) + + if wrap_result: + renamed = ExprContainer(inner=renamed, **self.assumptions) + return renamed + + def expand_antisym_eri(self, wrap_result: bool = True): + """ + Expands the antisymmetric ERI using chemists notation + = (pr|qs) - (ps|qr). + ERI's in chemists notation are by default denoted as 'v'. + Currently this only works for real orbitals, i.e., + for symmetric ERI's = . + """ + from .expr_container import ExprContainer + + expanded = S.Zero + for term in self.terms: + expanded += term.expand_antisym_eri(wrap_result=False) + assert isinstance(expanded, Expr) + expanded = Pow(expanded, self.exponent) + + if wrap_result: + assumptions = self.assumptions + # add the coulomb tensor to sym_tensors if necessary + if Symbol(tensor_names.coulomb) in expanded.atoms(Symbol): + assumptions["sym_tensors"] += (tensor_names.coulomb,) + expanded = ExprContainer(inner=expanded, **assumptions) + return expanded + + def expand_intermediates(self, target: Sequence[Index], + wrap_result: bool = True, + fully_expand: bool = True + ) -> "ExprContainer | Expr": + """Expands all known intermediates in the polynom.""" + from .expr_container import ExprContainer + + expanded = S.Zero + for term in self.terms: + expanded += term.expand_intermediates( + target, wrap_result=False, fully_expand=fully_expand + ) + assert isinstance(expanded, Expr) + expanded = Pow(expanded, self.exponent) + + if wrap_result: + assumptions = self.assumptions + assumptions["target_idx"] = target + return ExprContainer(expanded, **assumptions) + return expanded + + def use_explicit_denominators(self, wrap_result: bool = True + ) -> "ExprContainer | Expr": + """ + Switch to an explicit representation of orbital energy denominators by + replacing all symbolic denominators by their explicit counter part, + i.e., D^{ij}_{ab} -> (e_i + e_j - e_a - e_b)^{-1}. + """ + from .expr_container import ExprContainer + + explicit_denom = S.Zero + for term in self.terms: + explicit_denom += term.use_explicit_denominators(wrap_result=False) + assert isinstance(explicit_denom, Expr) + explicit_denom = Pow(explicit_denom, self.exponent) + + if wrap_result: + assumptions = self.assumptions + if tensor_names.sym_orb_denom in self.antisym_tensors: + assumptions["antisym_tensors"] = tuple( + n for n in assumptions["antisym_tensors"] + if n != tensor_names.sym_orb_denom + ) + explicit_denom = ExprContainer(inner=explicit_denom, **assumptions) + return explicit_denom + + def to_latex_str(self, only_pull_out_pref: bool = False, + spin_as_overbar: bool = False) -> str: + """Returns a latex string for the polynom.""" + tex_str = " ".join( + term.to_latex_str(only_pull_out_pref=only_pull_out_pref, + spin_as_overbar=spin_as_overbar) + for term in self.terms + ) + tex_str = f"\\bigl({tex_str}\\bigr)" + if self.exponent != 1: + tex_str += f"^{{{self.exponent}}}" + return tex_str diff --git a/build/lib/adcgen/expression/term_container.py b/build/lib/adcgen/expression/term_container.py new file mode 100644 index 0000000..6a385a4 --- /dev/null +++ b/build/lib/adcgen/expression/term_container.py @@ -0,0 +1,770 @@ +from collections.abc import Iterable +from collections import Counter +from functools import cached_property +from typing import Any, TYPE_CHECKING, Sequence + +from sympy import Add, Expr, Mul, Pow, S, Symbol, factor, latex, nsimplify +from sympy.physics.secondquant import NO + +from ..indices import ( + Index, Indices, get_lowest_avail_indices, get_symbols, order_substitutions, + sort_idx_canonical +) +from ..misc import Inputerror, cached_member +from ..sympy_objects import NonSymmetricTensor +from ..tensor_names import tensor_names +from .container import Container +from .normal_ordered_container import NormalOrderedContainer +from .polynom_container import PolynomContainer +from .object_container import ObjectContainer + +# imports only required for type checking (avoid circular imports) +if TYPE_CHECKING: + from ..symmetry import Permutation + from .expr_container import ExprContainer + + +class TermContainer(Container): + """ + Wrapper for a single term of the form a * b * c. + + Parameters + ---------- + inner: + The algebraic term to wrap, e.g., a sympy.Mul object + real : bool, optional + Whether the expression is represented in a real orbital basis. + sym_tensors: Iterable[str] | None, optional + Names of tensors with bra-ket-symmetry, i.e., + d^{pq}_{rs} = d^{rs}_{pq}. Adjusts the corresponding tensors to + correctly represent this additional symmetry if they are not aware + of it yet. + antisym_tensors: Iterable[str] | None, optional + Names of tensors with bra-ket-antisymmetry, i.e., + d^{pq}_{rs} = - d^{rs}_{pq}. Adjusts the corresponding tensors to + correctly represent this additional antisymmetry if they are not + aware of it yet. + target_idx: Iterable[Index] | None, optional + Target indices of the expression. By default the Einstein sum + convention will be used to identify target and contracted indices, + which is not always sufficient. + """ + + def __init__(self, inner: Expr | Container | Any, + real: bool = False, + sym_tensors: Iterable[str] = tuple(), + antisym_tensors: Iterable[str] = tuple(), + target_idx: Iterable[Index] | None = None) -> None: + super().__init__( + inner=inner, real=real, sym_tensors=sym_tensors, + antisym_tensors=antisym_tensors, target_idx=target_idx + ) + # we can not wrap an Add object: should be wrapped by ExprContainer + # But everything else should be fine (Mul or single objects) + assert not isinstance(self._inner, Add) + + def __len__(self) -> int: + if isinstance(self.inner, Mul): + return len(self.inner.args) + else: + return 1 + + @cached_property + def objects(self) -> tuple[ObjectContainer, ...]: + """ + Returns all objects the term contains, e.g. tensors. + """ + def dispatch(obj, kwargs) -> ObjectContainer: + if isinstance(obj, NO): + return NormalOrderedContainer(inner=obj, **kwargs) + elif (isinstance(obj, Pow) and isinstance(obj.args[0], Add)) or \ + isinstance(obj, Add): + return PolynomContainer(inner=obj, **kwargs) + else: + return ObjectContainer(inner=obj, **kwargs) + + kwargs = self.assumptions + if isinstance(self.inner, Mul): + return tuple( + dispatch(obj, kwargs) + for obj in self.inner.args + ) + else: + return (dispatch(self.inner, kwargs),) + + ############################################### + # methods that compute additional information # + ############################################### + @cached_property + def order(self) -> int: + return sum( + obj.order for obj in self.objects + if not isinstance(obj, PolynomContainer) + ) + + @cached_property + def prefactor(self) -> Expr: + """Returns the (numeric) prefactor of the term.""" + return nsimplify( + Mul(*(o.inner for o in self.objects if o.inner.is_number)), + rational=True + ) + + @property + def sign(self) -> str: + """Returns the sign of the term.""" + return "minus" if self.prefactor < S.Zero else "plus" + + @property + def contracted(self) -> tuple[Index, ...]: + """ + Returns all contracted indices of the term. If no target indices + have been provided to the parent expression, the Einstein sum + convention will be applied. + """ + # target indices have been provided -> no need to count indices + if (target := self.provided_target_idx) is not None: + return tuple(s for s, _ in self._idx_counter if s not in target) + else: # count indices to determine target and contracted indices + return tuple(s for s, n in self._idx_counter if n) + + @property + def target(self) -> tuple[Index, ...]: + """ + Returns all target indices of the term. If no target indices have been + provided to the parent expression, the Einstein sum convention will + be applied. + """ + if (target := self.provided_target_idx) is not None: + return target + else: + return tuple(s for s, n in self._idx_counter if not n) + + @cached_property + def idx(self) -> tuple[Index, ...]: + """ + Returns all indices that occur in the term. Indices that occur multiple + times will be listed multiple times. + """ + return tuple(s for s, n in self._idx_counter for _ in range(n + 1)) + + @cached_property + def _idx_counter(self) -> tuple[tuple[Index, int], ...]: + idx: dict[Index, int] = {} + for o in self.objects: + if o.inner.is_number: + continue + n = abs(o.exponent) # abs value for denominators + assert n.is_Integer + n = int(n) + for s in o.idx: + if s in idx: + idx[s] += n + else: # start counting at 0 + idx[s] = n - 1 + return tuple(sorted( + idx.items(), key=lambda itms: sort_idx_canonical(itms[0]) + )) + + @cached_member + def pattern(self, include_target_idx: bool = True, + include_exponent: bool = True + ) -> dict[tuple[str, str], dict[Index, list[str]]]: + """ + Determins the pattern of the indices in the term. This is a (kind of) + readable string hash for each index that is based upon the positions + the index appears and the coupling of the objects. + + Parameters + ---------- + include_target_idx: bool, optional + If set, the explicit names of target indices are included to make + the pattern more precise. Should be set if the target indices + are not allowed to be renamed. (default: True) + include_exponent: bool, optional + If set, the exponents of the objects are included in the pattern + (default: True) + """ + + target_idx = self.target if include_target_idx else None + coupl = self.coupling( + include_target_idx=include_target_idx, + include_exponent=include_exponent + ) + pattern: dict[tuple[str, str], dict[Index, list[str]]] = {} + for i, o in enumerate(self.objects): + positions = o.crude_pos(target_idx=target_idx, + include_exponent=include_exponent) + c = f"_{'_'.join(sorted(coupl[i]))}" if i in coupl else None + for s, pos in positions.items(): + key = s.space_and_spin + if key not in pattern: + pattern[key] = {} + if s not in pattern[key]: + pattern[key][s] = [] + if c is None: + pattern[key][s].extend(p for p in pos) + else: + pattern[key][s].extend(p + c for p in pos) + # sort pattern to allow for direct comparison + for ov, idx_pat in pattern.items(): + for s, pat in idx_pat.items(): + pattern[ov][s] = sorted(pat) + return pattern + + @cached_member + def coupling(self, include_target_idx: bool = True, + include_exponent: bool = True) -> dict[int, list[str]]: + """ + Returns the coupling between the objects in the term, where two objects + are coupled when they share common indices. Only the coupling of non + unique objects is returned, i.e., the coupling of e.g. a t2_1 amplitude + is only returned if there is another one in the same term. + """ + # - collect all the couplings (e.g. if a index s occurs at two tensors + # t and V: the crude_pos of s at t will be extended by the crude_pos + # of s at V. And vice versa for V.) + objects = self.objects + target_idx = self.target if include_target_idx else None + descriptions = [ + o.description(include_exponent=include_exponent, + target_idx=target_idx) + for o in objects + ] + descr_counter = Counter(descriptions) + positions = [ + o.crude_pos(include_exponent=include_exponent, + target_idx=target_idx) + for o in objects + ] + coupling = {} + for i, (descr, idx_pos) in enumerate(zip(descriptions, positions)): + # if the tensor is unique in the term -> no coupling necessary + if descr_counter[descr] < 2: + continue + for other_i, other_idx_pos in enumerate(positions): + if i == other_i: + continue + matches = [idx for idx in idx_pos if idx in other_idx_pos] + if not matches: + continue + if i not in coupling: + coupling[i] = [] + coupling[i].extend( + [p for s in matches for p in other_idx_pos[s]] + ) + return coupling + + @cached_member + def symmetry(self, only_contracted: bool = False, + only_target: bool = False + ) -> "dict[tuple[Permutation, ...], int]": + """ + Determines the symmetry of the term with respect to index permutations. + By default all indices of the term are considered. However, by setting + either only_contracted or only_target the indices may be restricted to + the respective subset of indices. + """ + from itertools import combinations, permutations, chain, product + from math import factorial + from ..indices import split_idx_string + from ..symmetry import Permutation, PermutationProduct + + def permute_str(string, *perms): + string = split_idx_string(string) + for perm in perms: + p, q = [s.name for s in perm] + sub = {p: q, q: p} + string = [sub.get(s, s) for s in string] + return "".join(string) + + def get_perms(*space_perms): + for perms in chain.from_iterable(space_perms): + yield perms + if len(space_perms) > 1: # form the product + for perm_tpl in product(*space_perms): + yield PermutationProduct(*chain.from_iterable(perm_tpl)) + + if only_contracted and only_target: + raise Inputerror("Can not set only_contracted and only_target " + "simultaneously.") + if self.inner.is_number or isinstance(self.inner, NonSymmetricTensor): + return {} # in both cases we can't find any symmetry + + if only_contracted: + indices = self.contracted + elif only_target: + indices = self.target + else: + indices = self.idx + + if len(indices) < 2: # not enough indices for any permutations + return {} + + # split in occ and virt indices to only generate P_oo, P_vv and P_gg. + # Similarly, the spin has to be the same! + sorted_idx = {} + for s in indices: + if (key := s.space_and_spin) not in sorted_idx: + sorted_idx[key] = [] + sorted_idx[key].append(s) + + space_perms: list[list] = [] # find all permutations within a space + for idx_list in sorted_idx.values(): + if len(idx_list) < 2: + continue + max_n_perms = factorial(len(idx_list)) + # generate idx string that will also be permuted to avoid + # redundant permutations + idx_string = "".join([s.name for s in idx_list]) + permuted_str = [idx_string] + # form all index pairs - all permutations operators + pairs = [Permutation(*pair) for pair in combinations(idx_list, 2)] + # form all combinations of permutation operators + combs = chain.from_iterable( + permutations(pairs, n) for n in range(1, len(idx_list)) + ) + # remove redundant combinations + temp = [] + for perms in combs: + if len(permuted_str) == max_n_perms: + break # did find enough permutations + perm_str = permute_str(idx_string, *perms) + if perm_str in permuted_str: # is the perm redundant? + continue + permuted_str.append(perm_str) + temp.append(perms) + space_perms.append(temp) + # now apply all found perms to the term and determine the symmetry + # -> add/subtract permuted versions of the term and see if we get 0 + symmetry: dict[tuple, int] = {} + original_term = self.inner + for perms in get_perms(*space_perms): + permuted = self.permute(*perms).inner + if Add(original_term, permuted) is S.Zero: + symmetry[perms] = -1 + elif Add(original_term, -permuted) is S.Zero: + symmetry[perms] = +1 + return symmetry + + @property + def contains_only_orb_energies(self) -> bool: + """Whether the term only contains orbital energies.""" + return all( + o.contains_only_orb_energies for o in self.objects + if not o.inner.is_number + ) + + ############################### + # method that modify the term # + ############################### + def _apply_tensor_braket_sym(self, wrap_result: bool = True + ) -> "ExprContainer | Expr": + """ + Applies the tensor bra-ket symmetry defined in sym_tensors and + antisym_tensors to all tensors in the term. If wrap_result is set, + the new term will be wrapped by :py:class:`ExprContainer`. + """ + from .expr_container import ExprContainer + + term = S.One + for obj in self.objects: + term *= obj._apply_tensor_braket_sym(wrap_result=False) + if wrap_result: + term = ExprContainer(inner=term, **self.assumptions) + return term + + def make_real(self, wrap_result: bool = True) -> "ExprContainer | Expr": + """ + Represent the tern in a real orbital basis. + - names of complex conjugate t-amplitudes, for instance t1cc -> t1 + - adds bra-ket-symmetry to the fock matrix and the ERI. + + Parameters + ---------- + wrap_result: bool, optional + If this is set the result will be wrapped in an + :py:class:`ExprContainer`. Otherwhise that unwrapped object + is returned. (default: True) + """ + from .expr_container import ExprContainer + + real_term = S.One + for obj in self.objects: + real_term *= obj.make_real(wrap_result=False) + + if wrap_result: + kwargs = self.assumptions + kwargs["real"] = True + real_term = ExprContainer(inner=real_term, **kwargs) + return real_term + + def block_diagonalize_fock(self, wrap_result: bool = True + ) -> "ExprContainer | Expr": + """ + Block diagonalize the Fock matrix, i.e. if the term contains a off + diagonal Fock matrix block (f_ov/f_vo) it is set to 0. + + Parameters + ---------- + wrap_result: bool, optional + If this is set the result will be wrapped with an + :py:class:`ExprContainer`. + """ + bl_diag = S.One + for obj in self.objects: + bl_diag *= obj.block_diagonalize_fock(wrap_result=False) + + if wrap_result: + bl_diag = ExprContainer(bl_diag, **self.assumptions) + return bl_diag + + def diagonalize_fock(self, target: Sequence[Index] | None = None, + wrap_result: bool = True, + apply_substitutions: bool = True + ) -> "ExprContainer | Expr | tuple[ExprContainer | Expr, dict[Index, Index]]": # noqa E501 + """ + Represent the term in the canonical orbital basis, where the + Fock matrix is diagonal. Because it is not possible to + determine the target indices in the resulting term according + to the Einstein sum convention, the current target indices will + be set manually in the resulting term. + + Parameters + ---------- + target: Sequence[Index] | None + The target indices of a potential parent term. + wrap_result: bool, optional + If this is set the result will be wrapped with an + :py:class:`ExprContainer`. + apply_substitutions: bool, optional + If set the index substitutions will be applied to the result. + Otherwhise the substitutions will be returned in addition to the + expression (without applying them). + In both cases fock matrix elements will be replaced by orbital + energie elements, e.g., f_ij will be replaced by e_i. + """ + from .expr_container import ExprContainer + + if target is None: + target = self.target + + sub: dict[Index, Index] = {} + diag = S.One + for o in self.objects: + diag_obj, sub_obj = o.diagonalize_fock(target, wrap_result=False) + diag *= diag_obj + if any(k in sub and sub[k] != v for k, v in sub_obj.items()): + raise NotImplementedError("Did not implement the case of " + "multiple fock matrix elements with " + f"intersecting indices: {self}") + sub.update(sub_obj) + + if wrap_result: + kwargs = self.assumptions + kwargs["target_idx"] = target + diag = ExprContainer(diag, **kwargs) + if apply_substitutions: + return diag.subs(order_substitutions(sub)) + else: + return diag, sub + + def substitute_contracted(self, wrap_result: bool = True, + apply_substitutions: bool = True + ) -> "ExprContainer | Expr | list[tuple[Index, Index]]": # noqa E501 + """ + Replace the contracted indices in the term with the lowest available + (non-target) indices. This is done for each space and spin + independently, i.e., + i_{\\alpha} j_{\\beta} -> i_{\\alpha} i_{\\beta} + assuming both indices are contracted indices and + i_{\\alpha} i_{\\beta} are not used as target indices. + + Parameters + ---------- + wrap_result: bool, optional + If set the result will be wrapped in an + :py:class:`ExprContainer`. (default: True) + apply_substitutions: bool, optional + If set the substitutions will be applied to the + term and the new expression is returned. Otherwise, + the index substitutions will be returned without + applying them to the expression. (default: True) + """ + from .expr_container import ExprContainer + + # - determine the target and contracted indices + # and split them according to their space + # Don't use atoms to obtain the contracted indices! Atoms is a set + # and therefore not sorted -> will produce a random result. + contracted = {} + for s in self.contracted: + if (key := s.space_and_spin) not in contracted: + contracted[key] = [] + contracted[key].append(s) + used = {} + for s in set(self.target): + if (key := s.space_and_spin) not in used: + used[key] = set() + used[key].add(s.name) + + # - generate new indices the contracted will be replaced with + # and build a substitution dictionary + # Don't filter out indices that will not change! + sub = {} + for (space, spin), idx_list in contracted.items(): + new_idx = get_lowest_avail_indices( + len(idx_list), used.get((space, spin), []), space + ) + if spin: + new_idx = get_symbols(new_idx, spin * len(idx_list)) + else: + new_idx = get_symbols(new_idx) + sub.update({o: n for o, n in zip(idx_list, new_idx)}) + # - apply substitutions while ensuring the substitutions are + # performed in the correct order + sub = order_substitutions(sub) + + if not apply_substitutions: # only build and return the sub_list + return sub + + substituted = self.inner.subs(sub) + assert isinstance(substituted, Expr) + # ensure that the substitutions are valid + if substituted is S.Zero and self.inner is not S.Zero: + raise ValueError(f"Invalid substitutions {sub} for {self}.") + + if wrap_result: + substituted = ExprContainer(substituted, **self.assumptions) + return substituted + + def substitute_with_generic(self, wrap_result: bool = True + ) -> "ExprContainer | Expr": + """ + Replace the contracted indices in the term with new, unused generic + indices. + """ + from .expr_container import ExprContainer + # sort the contracted indices according to their space and spin + contracted: dict[tuple[str, str], list[Index]] = {} + for idx in self.contracted: + if (key := idx.space_and_spin) not in contracted: + contracted[key] = [] + contracted[key].append(idx) + # generate new generic indices + kwargs = {f"{space}_{spin}" if spin else space: len(indices) + for (space, spin), indices in contracted.items()} + generic = Indices().get_generic_indices(**kwargs) + # build the subs dict + subs: dict[Index, Index] = {} + for key, old_indices in contracted.items(): + new_indices = generic[key] + subs.update({ + idx: new_idx for idx, new_idx in zip(old_indices, new_indices) + }) + # substitute the indices + substituted = self.inner.subs(order_substitutions(subs)) + assert isinstance(substituted, Expr) + # ensure that the substitutions are valid + if substituted is S.Zero and self.inner is not S.Zero: + raise ValueError(f"Invalid substitutions {subs} for {self}.") + + if wrap_result: + substituted = ExprContainer(substituted, **self.assumptions) + return substituted + + def rename_tensor(self, current: str, new: str, wrap_result: bool = True + ) -> "ExprContainer | Expr": + """ + Rename tensors in a terms. + + Parameters + ---------- + wrap_result: bool, optional + If this is set the result will be wrapped with an + :py:class:`ExprContainer`. (default: True) + """ + from .expr_container import ExprContainer + + renamed = S.One + for obj in self.objects: + renamed *= obj.rename_tensor( + current=current, new=new, wrap_result=False + ) + + if wrap_result: + renamed = ExprContainer(renamed, **self.assumptions) + return renamed + + def expand_antisym_eri(self, wrap_result: bool = True + ) -> "ExprContainer | Expr": + """ + Expands the antisymmetric ERI using chemists notation + = (pr|qs) - (ps|qr). + ERI's in chemists notation are by default denoted as 'v'. + Currently this only works for real orbitals, i.e., for + symmetric ERI's = . + """ + from .expr_container import ExprContainer + + expanded = S.One + for obj in self.objects: + expanded *= obj.expand_antisym_eri(wrap_result=False) + + if wrap_result: + assumptions = self.assumptions + if Symbol(tensor_names.coulomb) in expanded.atoms(Symbol): + assumptions["sym_tensors"] += (tensor_names.coulomb,) + expanded = ExprContainer(expanded, **assumptions) + return expanded + + def expand_intermediates(self, target: Sequence[Index] | None = None, + wrap_result: bool = True, + fully_expand: bool = True + ) -> "ExprContainer | Expr": + """ + Expands all known intermediates in the term. + + Parameters + ---------- + target: tuple[Index] | None, optional + The target indices of the term. Determined automatically if not + given. Since it might not be possible to determine the + target indices in the resulting expression (e.g. after + expanding MP t-amplitudes) the target indices will be + set in the expression. + wrap_result: bool, optional + If set the result is wrapped in an + :py:class:`ExprContainer`. (default: True) + fully_expand: bool, optional + True (default): The intermediates are recursively expanded + into orbital energies and ERI (if possible) + False: The intermediates are only expanded once, e.g., n'th + order MP t-amplitudes are expressed by means of (n-1)'th order + MP t-amplitudes and ERI. + """ + from .expr_container import ExprContainer + + if target is None: + target = self.target + + expanded = S.One + for obj in self.objects: + expanded *= obj.expand_intermediates( + target, wrap_result=False, fully_expand=fully_expand + ) + + if wrap_result: + assumptions = self.assumptions + assumptions["target_idx"] = target + expanded = ExprContainer(expanded, **assumptions) + return expanded + + def factor(self) -> "ExprContainer": + """ + Tries to factor the term. + """ + from .expr_container import ExprContainer + + return ExprContainer( + inner=factor(self.inner), **self.assumptions + ) + + def use_explicit_denominators(self, wrap_result: bool = True + ) -> "ExprContainer | Expr": + """ + Switch to an explicit representation of orbital energy denominators by + replacing all symbolic denominators by their explicit counter part, + i.e., D^{ij}_{ab} -> (e_i + e_j - e_a - e_b)^{-1}. + """ + from .expr_container import ExprContainer + + explicit_denom = S.One + for obj in self.objects: + explicit_denom *= obj.use_explicit_denominators(wrap_result=False) + + if wrap_result: + assumptions = self.assumptions + # remove the tensor from the assumptions + if tensor_names.sym_orb_denom in self.antisym_tensors: + assumptions["antisym_tensors"] = tuple( + n for n in assumptions["antisym_tensors"] + if n != tensor_names.sym_orb_denom + ) + explicit_denom = ExprContainer(explicit_denom, **assumptions) + return explicit_denom + + def split_orb_energy(self) -> "dict[str, ExprContainer]": + """ + Splits the term in a orbital energy fraction and a remainder, e.g. + (e_i + e_j) / (e_i + e_j - e_a - e_b) * (tensor1 * tensor2). + To this end all polynoms that only contain orbital energy tensors + ('e' by default) are collected to form the numerator and denominator, + while the rest of the term is collected in the remainder. + Prefactors are collected in the numerator. + """ + from .expr_container import ExprContainer + + assumptions = self.assumptions + assumptions['target_idx'] = self.target + ret = {"num": ExprContainer(1, **assumptions), + 'denom': ExprContainer(1, **assumptions), + 'remainder': ExprContainer(1, **assumptions)} + for o in self.objects: + base, exponent = o.base_and_exponent + if o.inner.is_number: + key = "num" + elif o.contains_only_orb_energies: + key = "denom" if exponent < S.Zero else "num" + else: + key = 'remainder' + ret[key] *= Pow(base, abs(exponent)) + return ret + + def use_symbolic_denominators(self) -> "ExprContainer": + """ + Replace all orbital energy denominators in the expression by tensors, + e.g., (e_a + e_b - e_i - e_j)^{-1} will be replaced by D^{ab}_{ij}, + where D is a SymmetricTensor.""" + from ..eri_orbenergy import EriOrbenergy + + term = EriOrbenergy(self) + symbolic_denom = term.symbolic_denominator() + # symbolic denom might additionaly have D set as antisym tensor + return symbolic_denom * term.pref * term.num.inner * term.eri.inner + + def to_latex_str(self, only_pull_out_pref: bool = False, + spin_as_overbar: bool = False): + """ + Transforms the term to a latex string. + + Parameters + ---------- + only_pull_out_pref: bool, optional + Use the 'latex' printout from sympy, while prefactors are printed + in front of each term. This avoids long fractions with a huge + number of tensors in the numerator and only a factor in the + denominator. + spin_as_overbar: bool, optional + Instead of printing the spin of an index as suffix (name_spin) + use an overbar for beta spin and no indication for alpha. Because + alpha indices and indices without spin are not distinguishable + anymore, this only works if all indices have a spin set (the + expression is completely represented in spatial orbitals). + """ + # - sign and prefactor + pref = self.prefactor + tex_str = "+ " if pref >= S.Zero else "- " + # term only consists of a number (only pref) + if self.inner.is_number: + return tex_str + f"{latex(abs(pref))}" + # avoid printing +- 1 prefactors + if pref not in [+1, -1]: + tex_str += f"{latex(abs(pref))} " + + # - latex strings for the remaining objects + tex_str += " ".join([ + o.to_latex_str(only_pull_out_pref, spin_as_overbar) + for o in self.objects if not o.inner.is_number + ]) + return tex_str diff --git a/build/lib/adcgen/factor_intermediates.py b/build/lib/adcgen/factor_intermediates.py new file mode 100644 index 0000000..df3c195 --- /dev/null +++ b/build/lib/adcgen/factor_intermediates.py @@ -0,0 +1,1813 @@ +from collections.abc import Iterable, Sequence, Generator +from collections import Counter +from functools import cached_property +from typing import Any, TYPE_CHECKING, TypeGuard +import itertools + +from sympy import Add, Expr, Mul, Rational, S, sympify + +from .eri_orbenergy import EriOrbenergy +from .expression import ExprContainer, TermContainer, ObjectContainer +from .indices import ( + Index, + order_substitutions, get_symbols, minimize_tensor_indices +) +from .logger import logger +from .symmetry import LazyTermMap, Permutation +from .sympy_objects import AntiSymmetricTensor, SymbolicTensor + +if TYPE_CHECKING: + from .intermediates import RegisteredIntermediate + + +def factor_intermediates(expr: ExprContainer, + types_or_names: Sequence[str] | None = None, + max_order: int | None = None, + allow_repeated_itmd_indices: bool = False + ) -> ExprContainer: + """ + Factors the intermediates defined in 'intermediates.py' in an expression. + Note that the implementation assumes that a real orbital basis is used. + + Parameters + ---------- + expr : Expr + Expression in which to factor intermediates. + types_or_names : Sequence[str], optional + The types or names of the intermediates to factor. If not given, the + function tries to factor all available intermediates. + max_order : int, optional + The maximum perturbation theoretical order of intermediates to + consider. + allow_repeated_itmd_indices: bool, optional + If set, the factorization of intermediates of the form I_iij are + allowed, i.e., indices on the intermediate may appear more than once. + This corresponds to either a partial trace or a diagonal element of + the intermediate. Note that this does not consistently work for + "long" intermediates (at least 2 terms), because the number of terms + might be reduced which is not correctly handled currently. + """ + from .intermediates import Intermediates, RegisteredIntermediate + from time import perf_counter + + assert isinstance(expr, ExprContainer) + if expr.inner.is_number: # nothing to factor + return expr + + # get all intermediates that are about to be factored in the expr + itmd = Intermediates() + if types_or_names is not None: + if isinstance(types_or_names, str): + itmd_to_factor: dict[str, RegisteredIntermediate] = getattr( + itmd, types_or_names + ) + else: # list / tuple / set of strings + itmd_to_factor: dict[str, RegisteredIntermediate] = {} + for t_or_n in types_or_names: + if not isinstance(t_or_n, str): + raise TypeError("Intermediate types/names to factor have " + "to be provided as str or list of strings." + f"Got {t_or_n} of type {type(t_or_n)}.") + itmd_to_factor |= getattr(itmd, t_or_n) + else: + itmd_to_factor: dict[str, RegisteredIntermediate] = itmd.available + + if max_order is not None: + itmd_to_factor = {n: itmd_cls for n, itmd_cls in itmd_to_factor.items() + if itmd_cls.order <= max_order} + + logger.info("".join([ + "\n\n", "#"*80, "\n", " "*25, "INTERMEDIATE FACTORIZATION\n", "#"*80, + "\n\n", + f"Trying to factor intermediates in expr of length {len(expr)}\n" + ])) + for i, term in enumerate(expr.terms): + logger.info(f"{i+1}: {EriOrbenergy(term)}\n") + logger.info('#'*80) + # try to factor all requested intermediates + factored: list[str] = [] + for name, itmd_cls in itmd_to_factor.items(): + logger.info("".join(["\n", ' '*25, f"Factoring {name}\n\n", '#'*80])) + start = perf_counter() + expr = itmd_cls.factor_itmd( + expr, factored_itmds=factored, max_order=max_order, + allow_repeated_itmd_indices=allow_repeated_itmd_indices + ) + factored.append(name) + logger.info("".join([ + "\n", "-"*80, "\n" + f"Done in {perf_counter()-start:.2f}s. {len(expr)} terms remain", + "\n", "-"*80, "\n" + ])) + for i, term in enumerate(expr.terms): + logger.info( + f"{i+1: >{len(str(len(expr)+1))}}: {EriOrbenergy(term)}\n" + ) + logger.info("#"*80) + logger.info("".join(["\n\n", '#'*80, "\n", " "*25, + "INTERMEDIATE FACTORIZATION FINISHED\n", '#'*80])) + # make the result pretty by minimizing contracted indices: + # some contracted indices might be hidden inside some intermediates. + # -> ensure that the remaining ones are the lowest available + expr = expr.substitute_contracted() + logger.info(f"\n{len(expr)} terms in the final result:") + width = len(str(len(expr)+1)) + for i, term in enumerate(expr.terms): + logger.info(f"{i+1: >{width}}: {EriOrbenergy(term)}") + return expr + + +def _factor_long_intermediate(expr: ExprContainer, + itmd: Sequence[EriOrbenergy], + itmd_data: Sequence["FactorizationTermData"], + itmd_term_map: LazyTermMap, + itmd_cls: "RegisteredIntermediate", + allow_repeated_itmd_indices: bool = False + ) -> ExprContainer: + """ + Factores a long intermediate - an intermediate that consists of more + than one term - in an expression. + + Parameters + ---------- + expr : Expr + The expression to factor the intermediate in. + itmd : list[EriOrbenergy] + The expression of the intermediate to factor splitted into terms + and separating orbital energy fractions. + itmd_data : tuple[FactorizationTermData] + Data for each term in the intermediate to map the itmd term onto + subparts of terms in the expression. + itmd_term_map : LazyTermMap + Provides information about the mapping of terms in the intermediate + if the target indices of the intermediate are permuted. + idmd_cls + The class instance of the intermediate to factor. + allow_repeated_itmd_indices: bool, optional + If set, the factorization of intermediates of the form I_iij are + allowed, i.e., indices on the intermediate may appear more than + once. This does not consistently work for "long" intermediates + (at least 2 terms), because the number of terms might be reduced which + is not correctly handled currently. + """ + + if expr.inner.is_number: + return expr + + # does any itmd term has a denominator? + itmd_has_denom: bool = any( + term_data.denom_bracket_lengths is not None for term_data in itmd_data + ) + itmd_length: int = len(itmd) + # get the default symbols of the intermediate + itmd_default_symbols: tuple[Index, ...] = tuple( + get_symbols(itmd_cls.default_idx) + ) + + terms: tuple[TermContainer, ...] = expr.terms + + # class that manages the found itmd variants + intermediate_variants: LongItmdVariants = LongItmdVariants(itmd_length) + for term_i, term in enumerate(terms): + term = EriOrbenergy(term).canonicalize_sign() + # prescan: check that the term holds the correct tensors and + # denominator brackets + term_data = FactorizationTermData(term) + # description of all objects in the eri part, exponent implicitly + # included + obj_descr = term_data.eri_obj_descriptions + if itmd_has_denom: + bracket_lengths = term_data.denom_bracket_lengths + else: + bracket_lengths = None + + # compare to all of the itmd terms -> only try to map on a subset of + # intermediate terms later + possible_matches: list[int] = [] + for itmd_i, itmd_term_data in enumerate(itmd_data): + # do all tensors in the eri part occur at least as often as + # in the intermediate + if any(obj_descr[descr] < n for descr, n in + itmd_term_data.eri_obj_descriptions.items()): + continue + # itmd_term has a denominator? + itmd_bracket_lengths = itmd_term_data.denom_bracket_lengths + if itmd_bracket_lengths is not None: + if bracket_lengths is None: # term has no denom -> cant match + continue + else: # term also has a denominator + # ensure that bracket of the correct length are available + if any(bracket_lengths[length] < n for length, n in + itmd_bracket_lengths.items()): + continue + possible_matches.append(itmd_i) + if not possible_matches: # did not find any possible matches + continue + + # extract the target idx names of the term + target_idx_by_space: dict[tuple[str, str], set[str]] = {} + for s in term.eri.target: + if (key := s.space_and_spin) not in target_idx_by_space: + target_idx_by_space[key] = set() + target_idx_by_space[key].add(s.name) + + # go through all possible matches + for itmd_i in possible_matches: + # - compare and obtain data (sub_dict, obj indices, factor) + # that makes the itmd_term equal to the defined sub part + # of the term. + variants = _compare_terms( + term, itmd[itmd_i], term_data=term_data, + itmd_term_data=itmd_data[itmd_i] + ) + if variants is None: # was not possible to map the terms + continue + + # The term_map allows to spread a term assignement to multiple + # terms taking the symmetry of the remainder into account, e.g., + # for the t2_2 amplitudes: + # t2_2 <- (1-P_ij)(1-P_ab) X + # - Depending on the symmetry of the remainder these 4 terms + # might occur as 4, 2 or 1 terms in the expression to factor: + # Rem * (1-P_ij)(1-P_ab) X -> 4 * Rem * X + # (if Rem has ij and ab antisymmetry) + # - If a term with such a remainder is matched with one of the 4 + # 4 terms he will automatically also be matched with the other + # 3 terms using the term_map for the intermediate. + # NOTE: it is not possible to exploit this to reduce the workload + # by exploiting the fact that the current term has already + # been matched to a itmd_term through the term map, because + # more complicated permutations do not provide a back and + # forth relation ship between terms: + # P_ij P_ik A(ijk) -> B(kij) + # P_ij P_ik B(kij) -> C(jki) + # comparing the current term to A can also provide a match + # with B through the term_map. + # comparing the current term to B however can provide a match + # with C! + # Therefore, the comparison with B can not be skipped, even + # if remainder and itmd_indices are identical to a previously + # found variant that matched to A and B! + # What can be done: for matching term 1 -> itmd_term A + # due to the symmetry of tensors one probably obtains multiple + # variants for the same itmd_indices and remainder that only + # differ in contracted indices. + # -> for each itmd_indices only consider one variant for each + # remainder + + # get the contracted indices of the itmd term + itmd_contracted_symbols = tuple( + s for s in set(itmd[itmd_i].expr.idx) + if s not in itmd_default_symbols + ) + # {itmd_indices: [remainder]} + found_remainders: dict[tuple[Index, ...], list[ExprContainer]] = {} + for variant_data in variants: # go through all valid variants + # - extract the remainder of the term (objects, excluding + # prefactors that will remain if the current variant is + # used to factor the itmd) + + remainder = _get_remainder( + term, variant_data['eri_i'], variant_data['denom_i'] + ) + + # - obtain the indices of the intermediate + itmd_indices: tuple[Index, ...] = tuple( + variant_data['sub'].get(s, s) for s in itmd_default_symbols + ) + + # - ensure that we have no repeated indices on the itmd + if not allow_repeated_itmd_indices and itmd_indices and \ + any(c != 1 for c in Counter(itmd_indices).values()): + continue + + # - check that none of the contracted itmd indices appears + # in the remainder! + # error or continue? probably better have a look if the error + # thrown and decide then + contracted_itmd_indices: tuple[Index, ...] = tuple( + variant_data['sub'].get(s, s) + for s in itmd_contracted_symbols + ) + remainder_indices: set[Index] = set(remainder.idx) + if any(s in remainder_indices + for s in contracted_itmd_indices): + raise RuntimeError("Invalid contracted itmd indices " + f"{contracted_itmd_indices} found " + "that also appear in the remainder:\n" + f"{remainder}") + + # - minimize the indices of the intermediate to ensure that + # the same indices are used in each term of the long itmd + # (use the lowest non target indices) + itmd_indices, minimization_perms = minimize_tensor_indices( + itmd_indices, target_idx_by_space + ) + + # - apply the substitutions to the remainder + remainder = remainder.permute(*minimization_perms) + # if this ever triggers probably switch to a continue + assert remainder.inner is not S.Zero + + # - Further minimize the tensor indices taking the tensor + # symmetry of the itmd into account by building a tensor + # using the minimized tensor indices + # -> returns the tensor with completely minimized indices + # and possibly a factor of -1 + tensor_obj = itmd_cls.tensor( + indices=itmd_indices, wrap_result=True + ) + assert isinstance(tensor_obj, ExprContainer) + tensor_obj = tensor_obj.terms[0] + if len(tensor_obj) > 2: + raise ValueError("Expected the term to be at most of " + f"length 2. Got: {tensor_obj}.") + for obj in tensor_obj.objects: + if isinstance(obj.base, SymbolicTensor): + itmd_indices = obj.idx + elif obj.inner.is_number: + variant_data['factor'] *= obj.inner + else: + raise TypeError("Only expected tensor and prefactor." + f"Found {obj} in {tensor_obj}") + + # check if we already found another variant that gives the + # same itmd_indices and remainder (an identical result that + # only differs in contracted itmd_indices) + if itmd_indices not in found_remainders: + found_remainders[itmd_indices] = [] + if any(_compare_remainder(remainder, found_rem, itmd_indices) + is not None + for found_rem in found_remainders[itmd_indices]): + continue # go to the next variant + else: + found_remainders[itmd_indices].append(remainder) + + # - check if the current itmd_term can be mapped onto other + # itmd terms + matching_itmd_terms = _map_on_other_terms( + itmd_i, remainder, itmd_term_map, itmd_indices, + itmd_default_symbols + ) + + # - calculate the final prefactor of the remainder if the + # current variant is applied for factorization + # keep the term normalized if spreading to multiple terms! + # (factor is +-1) + prefactor: Expr = ( + term.pref * variant_data['factor'] * + Rational(1, len(matching_itmd_terms)) / itmd[itmd_i].pref + ) + + # - compute the factor that the term should have if we want + # to factor the current variant with a prefactor of 1 + # (required for factoring mixed prefactors) + unit_factorization_pref: Expr = ( + itmd[itmd_i].pref * variant_data['factor'] + * len(matching_itmd_terms) + ) + + # - add the match to the pool where intermediate variants + # are build from + intermediate_variants.add( + term_i=term_i, itmd_indices=itmd_indices, + matching_itmd_terms=matching_itmd_terms, + remainder=remainder, prefactor=prefactor, + unit_factorization_pref=unit_factorization_pref + ) + logger.debug("\nMATCHED INTERMEDIATE TERMS:") + logger.debug(intermediate_variants) + + result: ExprContainer = ExprContainer(0, **expr.assumptions) + # keep track which terms have already been factored + factored_terms: set[int] = set() + factored_successfully: bool = False + + # first try to factor all complete intermediate variants + result, successful = _factor_complete( + result, terms, itmd_cls, factored_terms, intermediate_variants + ) + factored_successfully |= successful + + # go again through the remaining itmd variants and try to build more + # complete variants by allowing mixed prefactors, i.e., + # add a term that belongs to a variant with prefactor 1 + # to the nearly complete variant with prefactor 2. To compensate + # for this, additional terms are added to the result. + result, factored_mixed_pref_successfully = _factor_mixed_prefactors( + result, terms, itmd_cls, factored_terms, intermediate_variants + ) + factored_successfully |= factored_mixed_pref_successfully + + # TODO: + # go again through the remaining itmds and see if we can factor another + # intermediate by filling up some terms, e.g. if we found 5 out of 6 terms + # it still makes sense to factor the itmd + + # add all terms that were not involved in itmd_factorization to the result + for term_i, term in enumerate(terms): + if term_i not in factored_terms: + factored_terms.add(term_i) + result += term.inner + assert len(factored_terms) == len(terms) + + # if we factored the itmd successfully it might be necessary to adjust + # sym_tensors or antisym_tensors of the returned expression + if factored_successfully: + tensor = itmd_cls.tensor(wrap_result=False) + if isinstance(tensor, AntiSymmetricTensor): + name = tensor.name + if tensor.bra_ket_sym is S.One and \ + name not in (sym_tensors := result.sym_tensors): + result.sym_tensors = sym_tensors + (name,) + elif tensor.bra_ket_sym is S.NegativeOne and \ + name not in (antisym_t := result.antisym_tensors): + result.antisym_tensors = antisym_t + (name,) + return result + + +def _factor_short_intermediate(expr: ExprContainer, itmd: EriOrbenergy, + itmd_data: "FactorizationTermData", + itmd_cls: "RegisteredIntermediate", + allow_repeated_itmd_indices: bool = False + ) -> ExprContainer: + """ + Factors a short intermediate - an intermediate that consits only of one + term - in an expression. + + Parameters + ---------- + expr : ExprContainer + The expression to factor the intermediate in. + itmd : EriOrbenergy + The expression of the intermediate (a single term). + itmd_data : FactorizationTermData + Data of the intermediate term to map it onto subparts of terms in the + expression. + itmd_cls: RegisteredIntermediate + The class instance of the intermediate to factor. + allow_repeated_itmd_indices: bool, optional + If set, the factorization of intermediates of the form I_iij are + allowed, i.e., indices on the intermediate may appear more than + once. This corresponds to either a partial trace or a diagonal + element of the intermediate. + """ + + if expr.inner.is_number: + return expr + + # get the default symbols of the intermediate + itmd_default_symbols: tuple[Index, ...] = tuple( + get_symbols(itmd_cls.default_idx) + ) + # and the itmd contracted indices + itmd_contracted_symbols: tuple[Index, ...] = tuple( + s for s in set(itmd.expr.idx) if s not in itmd_default_symbols + ) + + terms: tuple[TermContainer, ...] = expr.terms + + factored: ExprContainer = ExprContainer(0, **expr.assumptions) + # factored expression that is returned + factored_sucessfully: bool = False # bool to indicate whether we factored + for term in terms: + term = EriOrbenergy(term).canonicalize_sign() + data = FactorizationTermData(term) + # check if the current term and the itmd are compatible: + # - check if all necessary objects occur in the eri part + obj_descr = data.eri_obj_descriptions + if any(obj_descr[descr] < n for descr, n in + itmd_data.eri_obj_descriptions.items()): + factored += term.expr.inner + continue + # - check if brackets of the correct length occur in the denominator + if itmd_data.denom_bracket_lengths is not None: # itmd has a denom + bracket_lengths = data.denom_bracket_lengths + if bracket_lengths is None: # term has no denom + factored += term.expr.inner + continue + else: # term also has a denom + if any(bracket_lengths[length] < n for length, n in + itmd_data.denom_bracket_lengths.items()): + factored += term.expr.inner + continue + # ok, the term seems to be a possible match -> try to factor + + # compare the term and the itmd term + variants = _compare_terms(term, itmd, data, itmd_data) + + if variants is None: + factored += term.expr.inner + continue + + # filter out variants, where repeated indices appear on the + # intermediate to factor + if not allow_repeated_itmd_indices: + variants = [ + var for var in variants + if all(c == 1 for c in + Counter(var["sub"].get(s, s) + for s in itmd_default_symbols).values()) + ] + if not variants: + factored += term.expr.inner + continue + + # choose the variant with the lowest overlap to other variants + # - find all unique obj indices (eri and denom) + # - and determine all itmd_indices + unique_obj_i: \ + dict[tuple[tuple[int, ...], tuple[int, ...]], list[int]] = {} + for var_idx, var in enumerate(variants): + key = (tuple(sorted(set(var["eri_i"]))), + tuple(sorted(set(var["denom_i"])))) + if key not in unique_obj_i: + unique_obj_i[key] = [] + unique_obj_i[key].append(var_idx) + + if len(unique_obj_i) == 1: # always the same objects in each variant + _, rel_variant_indices = unique_obj_i.popitem() + min_overlap: list[int] = [] + del unique_obj_i + else: + # multiple different objects -> try to find the one with the + # lowest overlap to the other variants (so that we can possibly + # factor the itmd more than once) + unique_obj_i_list = list(unique_obj_i.items()) + del unique_obj_i + overlaps: list[list[int]] = [] + for i, (key, _) in enumerate(unique_obj_i_list): + eri_i, denom_i = set(key[0]), set(key[1]) + # determine the intersection of the objects + overlaps.append(sorted([ + len(eri_i & set(other_key[0])) + + len(denom_i & set(other_key[1])) + for other_i, (other_key, _) in enumerate(unique_obj_i_list) + if i != other_i + ])) + # get the idx of the unique_obj_i with minimal intersections, + # get the variant_data of the first element in the variant_idx_list + min_overlap: list[int] = min(overlaps) + # collect all variant indices that have this overlap + rel_variant_indices = [] + for overlap, (_, var_idx_list) in zip(overlaps, unique_obj_i_list): + if overlap == min_overlap: + rel_variant_indices.extend(var_idx_list) + del overlaps + del unique_obj_i_list + # choose the variant with the minimal itmd_indices + variant_data = min( + [variants[var_idx] for var_idx in rel_variant_indices], + key=lambda var: [var["sub"].get(s, s).name for s in + itmd_default_symbols] + ) + + # now start with factoring + # - extract the remainder that survives the factorization (excluding + # the prefactor) + remainder: ExprContainer = _get_remainder( + term, variant_data["eri_i"], variant_data["denom_i"] + ) + # - find the itmd indices: + # for short itmds it is not necessary to minimize the itmd indices + # just use whatever is found + itmd_indices: tuple[Index, ...] = tuple( + variant_data["sub"].get(s, s) for s in itmd_default_symbols + ) + + contracted_itmd_indices: tuple[Index, ...] = tuple( + variant_data["sub"].get(s, s) for s in itmd_contracted_symbols + ) + remainder_indices = set(remainder.idx) + if any(s in remainder_indices for s in contracted_itmd_indices): + raise RuntimeError("Invalid contracted itmd indices " + f"{contracted_itmd_indices} found that also " + f"appear in the remainder:\n{remainder}") + + # - determine the prefactor of the factored term + pref = term.pref * variant_data["factor"] / itmd.pref + # - check if it is possible to factor the itmd another time: + # should be possible if there is a 0 in the min_overlap list: + # -> Currently factoring a variant that has 0 overlap with another + # variant + # -> It should be possible to factor the intermediate in the + # remainder again! + if 0 in min_overlap: + # factor again and ensure that the factored result has the + # the current assumptions + remainder = ExprContainer( + _factor_short_intermediate(remainder, itmd, itmd_data, + itmd_cls).inner, + **remainder.assumptions + ) + # - build the new term including the itmd + factored_term = _build_factored_term(remainder, pref, itmd_cls, + itmd_indices) + + factored_sucessfully = True + logger.info(f"\nFactoring {itmd_cls.name} in:\n{term}\n" + f"result:\n{EriOrbenergy(factored_term)}") + factored += factored_term.inner + # if we factored the itmd sucessfully it might be necessary to add + # the itmd tensor to the sym or antisym tensors + if factored_sucessfully: + tensor = itmd_cls.tensor(wrap_result=False) + if isinstance(tensor, AntiSymmetricTensor): + name = tensor.name + if tensor.bra_ket_sym is S.One and \ + name not in (sym_tensors := factored.sym_tensors): + factored.sym_tensors = sym_tensors + (name,) + elif tensor.bra_ket_sym is S.NegativeOne and \ + name not in (antisym_t := factored.antisym_tensors): + factored.antisym_tensors = antisym_t + (name,) + return factored + + +def _factor_complete(result: ExprContainer, + terms: Sequence[TermContainer], + itmd_cls: "RegisteredIntermediate", + factored_terms: set[int], + intermediate_variants: 'LongItmdVariants' + ) -> tuple[ExprContainer, bool]: + """ + Factors all found complete intermediate variants of a long intermediate + in an expression, i.e., variants where for all terms a match with the same + prefactor could be found meaning that nothing has to be added to the + expression to factor the intermediate. + + Parameters + ---------- + result : ExprContainer + The resulting expression where newly factored terms are added. + term : Sequence[TermContainer] + The original expression where the intermediate should be factored + split into terms. + itmd_cls : RegisteredIntermediate + The class instance of the intermediate to factor. + factored_terms : set[int] + Terms which were already involved in the factorization of an + intermediate variant. + intermediate_variants : LongItmdVariants + The intermediate variants found in the expression. + + Returns + ------- + tuple[ExprContainer, bool] + The result expression - with only the factored terms added - and a bool + indicating whether a complete intermediate could be factored. + """ + factored_successfully: bool = False + for itmd_indices, remainders in intermediate_variants.items(): + for rem in remainders: + complete_variant = intermediate_variants.get_complete_variant( + itmd_indices, rem + ) + while complete_variant is not None: + # Found a complete intermediate with matching prefactors!! + pref, term_list = complete_variant + + logger.info(f"\nFactoring {itmd_cls.name} in terms:") + for term_i in term_list: + logger.info(EriOrbenergy(terms[term_i])) + + new_term = _build_factored_term( + rem, pref, itmd_cls, itmd_indices + ) + logger.info(f"result:\n{EriOrbenergy(new_term)}") + result += new_term.inner + + # remove the used terms from the pool of available terms + # and add the terms to the already factored terms + intermediate_variants.remove_used_terms(term_list) + factored_terms.update(term_list) + factored_successfully = True + + # try to find the next complete variant + complete_variant = intermediate_variants.get_complete_variant( + itmd_indices, rem + ) + # remove empty itmd_indices and remainders + if factored_successfully: + intermediate_variants.clean_empty() + + return result, factored_successfully + + +def _factor_mixed_prefactors(result: ExprContainer, + terms: Sequence[TermContainer], + itmd_cls: "RegisteredIntermediate", + factored_terms: set[int], + intermediate_variants: "LongItmdVariants" + ) -> tuple[ExprContainer, bool]: + """ + Factors intermediate variants where all terms were found, though with + different prefactors, i.e., the intermediate might be factored by + adding one or more of the original terms to the result expression to + compensate: + z = a + 2b + c + d + a + b + c + d = z - b + + Parameters + ---------- + result : ExprContainer + The result expression where newly factored terms are added. + terms : Sequence[TermContainer] + The original expression where the intermediate should be factored + split into terms. + itmd_cls: RegisteredIntermediate + The class instance of the intermediate to factor. + factored_terms : set[int] + Terms which were already involved in the factorization of an + intermediate variant. + intermediate_variants : LongItmdVariants + The found intermediate variants. + + Returns + ------- + tuple[Expr, bool] + The result expression - with only the factored terms added - and a bool + indicating whether an interemdiate with mixed prefactors was factored. + """ + factored_successfully = False + for itmd_indices, remainders in intermediate_variants.items(): + for rem in remainders: + mixed_variant = intermediate_variants.get_mixed_pref_variant( + itmd_indices, rem + ) + while mixed_variant is not None: + prefs, term_list, unit_factors, pref_counter = mixed_variant + + # determine the most common prefactor and which terms needs + # to be added (have a different prefactor) + most_common_pref = max(pref_counter.items(), + key=lambda tpl: tpl[1])[0] + terms_to_add: dict[int, Expr] = {} + for p, term_i in zip(prefs, term_list): + if p == most_common_pref or term_i in terms_to_add: + continue + terms_to_add[term_i] = p + + # for all terms that don't have the most common prefactor: + # determine the 'extension' that needs to be added to the + # result to factor the intermediate using the most common pref + logger.info("\nAdding terms:") + for term_i, p in terms_to_add.items(): + desired_pref = Mul(most_common_pref, unit_factors[term_i]) + term = EriOrbenergy(terms[term_i]).canonicalize_sign() + extension_pref = Add(term.pref, -desired_pref) + term = term.num * extension_pref * term.eri / term.denom + logger.info(EriOrbenergy(term)) + result += term.inner + + logger.info(f"\nFactoring {itmd_cls.name} with mixed " + "prefactors in:") + for term_i in term_list: + logger.info(EriOrbenergy(terms[term_i])) + + new_term = _build_factored_term( + rem, most_common_pref, itmd_cls, itmd_indices + ) + logger.info(f"result:\n{EriOrbenergy(new_term)}") + result += new_term.inner + + # remove the used terms from the pool of available terms + # and add the terms to the already factored terms + intermediate_variants.remove_used_terms(term_list) + factored_terms.update(term_list) + factored_successfully = True + + # try to find the next mixed intermediate + mixed_variant = intermediate_variants.get_mixed_pref_variant( + itmd_indices, rem + ) + # remove empty itmd_indices and remainders + if factored_successfully: + intermediate_variants.clean_empty() + + return result, factored_successfully + + +def _build_factored_term(remainder: ExprContainer, pref: Expr, + itmd_cls: "RegisteredIntermediate", + itmd_indices: tuple[Index, ...] + ) -> ExprContainer: + """Builds the factored term.""" + tensor = itmd_cls.tensor(indices=itmd_indices, wrap_result=False) + # if the itmd_indices are completely minimized, we should always + # get a tensor (and no Mul object) + assert isinstance(tensor, SymbolicTensor) + # resolve the Zero placeholder for residuals + if tensor.name == "Zero": + return ExprContainer(0, **remainder.assumptions) + return remainder * pref * tensor + + +def _get_remainder(term: EriOrbenergy, obj_i: Sequence[int], + denom_i: Sequence[int]) -> ExprContainer: + """ + Builds the remaining part of the provided term that survives the + factorization of the itmd, excluding the prefactor! + Note that the returned remainder can still hold a prefactor of -1, + because sympy is not maintaining the canonical sign in the orbtial energy + fraction. + """ + eri: ExprContainer = term.cancel_eri_objects(obj_i) + denom: ExprContainer = term.cancel_denom_brackets(denom_i) + rem = term.num * eri / denom + # explicitly set the target indices, because the remainder not necessarily + # has to contain all of them. + if rem.provided_target_idx is None: # no target indices set + rem.set_target_idx(term.eri.target) + return rem + + +def _map_on_other_terms(itmd_i: int, remainder: ExprContainer, + itmd_term_map: LazyTermMap, + itmd_indices: tuple[Index, ...], + itmd_default_idx: tuple[Index, ...]) -> set[int]: + """ + Checks on which other itmd_terms the current itmd_term can be mapped if + the symmetry of the remainder is taken into account. The set of + intermediate terms (by index) is returned. + """ + from .symmetry import Permutation, PermutationProduct + + # find the itmd indices that are no target indices of the overall term + # -> those are available for permutations + target_indices = remainder.terms[0].target + idx_to_permute = {s for s in itmd_indices if s not in target_indices} + # copy the remainder and set the previously determined + # indices as target indices + rem: ExprContainer = remainder.copy() + rem.set_target_idx(tuple(idx_to_permute)) + # create a substitution dict to map the minimal indices to the + # default indices of the intermediate + minimal_to_default = {o: n for o, n in zip(itmd_indices, itmd_default_idx)} + # iterate over the subset of remainder symmetry that only involves + # non-target intermediate indices + matching_itmd_terms: set[int] = {itmd_i} + for perms, perm_factor in rem.terms[0].symmetry(only_target=True).items(): + # translate the permutations to the default indices + perms = PermutationProduct(*( + Permutation(minimal_to_default[p], minimal_to_default[q]) + for p, q in perms + )) + # look up the translated symmetry in the term map + term_map = itmd_term_map[(perms, perm_factor)] + if itmd_i in term_map: + matching_itmd_terms.add(term_map[itmd_i]) + return matching_itmd_terms + + +def _compare_eri_parts( + term: EriOrbenergy, itmd_term: EriOrbenergy, + term_data: "FactorizationTermData | None" = None, + itmd_term_data: 'FactorizationTermData | None' = None + ) -> list[tuple[list[int], dict[Index, Index], list[tuple[Index, Index]], int]] | None: # noqa E501 + """ + Compares the ERI parts of two terms. Determines + - the objects (by index) in the term on which the objects in the itmd term + can be mapped, i.e., the tensors that have to be removed from the term + if the intermediate is factored. + - the necessary index substitutions to bring the ERI part of the itmd + term into the form found in the expression term. + - the additional factor (+-1) that needs to be introduced after applying + the index substitutions. + """ + + # the eri part of the term to factor has to be at least as long as the + # eri part of the itmd (prefactors are separated!) + if len(itmd_term.eri) > len(term.eri): + return None + + objects: tuple[ObjectContainer, ...] = term.eri.objects + itmd_objects: tuple[ObjectContainer, ...] = itmd_term.eri.objects + + # generate term_data if not provided + if term_data is None: + term_data = FactorizationTermData(term) + # generate itmd_data if not provided + if itmd_term_data is None: + itmd_term_data = FactorizationTermData(itmd_term) + + relevant_itmd_data = zip(enumerate(itmd_term_data.eri_pattern), + itmd_term_data.eri_obj_indices, + itmd_term_data.eri_obj_symmetry) + + # compare all objects in the eri parts + variants: list[tuple[list[int], dict[Index, Index], int]] = [] + for (itmd_i, (itmd_descr, itmd_coupl)), itmd_indices, itmd_obj_sym in \ + relevant_itmd_data: + itmd_obj_exponent = itmd_objects[itmd_i].exponent + assert itmd_obj_exponent.is_Integer + + relevant_data = zip(enumerate(term_data.eri_pattern), + term_data.eri_obj_indices) + # list to collect all obj that can match the itmd_obj + # with their corresponding sub variants + itmd_obj_matches: list[tuple[list[int], dict[Index, Index], int]] = [] + for (i, (descr, coupl)), indices in relevant_data: + # tensors have same name and space? + # is the coupling of the itmd_obj a subset of the obj coupling? + if descr != itmd_descr or any(coupl[c] < n for c, n in + itmd_coupl.items()): + continue + # collect the obj index n-times to indicate how often the + # object has to be cancelled (possibly multiple times depending + # on the exponent of the itmd_obj) + to_cancel: list[int] = [i for _ in range(int(itmd_obj_exponent))] + # create all possibilites to map the indices onto each other + # by taking the symmetry of the itmd_obj into account + # store them as tuple: (obj_indices, sub, factor) + itmd_obj_matches.append((to_cancel, + dict(zip(itmd_indices, indices)), + 1)) + for perms, factor in itmd_obj_sym.items(): + perm_itmd_indices = itmd_indices + for p, q in perms: + sub = {p: q, q: p} + perm_itmd_indices = [sub.get(s, s) for s in + perm_itmd_indices] + itmd_obj_matches.append((to_cancel, + dict(zip(perm_itmd_indices, indices)), + factor)) + # was not possible to map the itmd_obj onto any obj in the term + # -> terms can not match + if not itmd_obj_matches: + return None + + if not variants: # initialize variants + variants.extend(itmd_obj_matches) + else: # try to add the mapping of the current itmd_obj + extended_variants: \ + list[tuple[list[int], dict[Index, Index], int]] = [] + for (i_list, sub, factor), (new_i_list, new_sub, new_factor) in \ + itertools.product(variants, itmd_obj_matches): + # was the obj already mapped onto another itmd_obj? + # do we have a contradiction in the sub_dicts? + # -> a index in the itmd can only be mapped onto 1 index + # in the term simultaneously + if new_i_list[0] not in i_list and all( + o not in sub or sub[o] is n + for o, n in new_sub.items()): + extended_variants.append((i_list + new_i_list, + sub | new_sub, # OR combine dict + factor * new_factor)) + if not extended_variants: # no valid combinations -> cant match + return None + variants = extended_variants + # validate the found variants to map the terms onto each other + valid: list[tuple[list[int], dict[Index, Index], list[tuple[Index, Index]], int]] = [] # noqa E501 + for i_list, sub_dict, factor in variants: + i_set: set[int] = set(i_list) + # did we find a match for all itmd_objects? + if len(i_set) != len(itmd_objects): + continue + # extract the objects of the term + relevant_obj: Expr = Mul(*(objects[i].inner for i in i_set)) + # apply the substitutions to the itmd_term, remove the prefactor + # (the substitutions might introduce a factor of -1 that we don't need) + # and check if the substituted itmd_term is identical to the subset + # of objects + sub_list = order_substitutions(sub_dict) + sub_itmd_eri = itmd_term.eri.subs(sub_list) + + if sub_itmd_eri.inner is S.Zero: # invalid substitution list + continue + pref = sub_itmd_eri.terms[0].prefactor # +-1 + + if Add(relevant_obj, -Mul(sub_itmd_eri.inner, pref)) is S.Zero: + valid.append((i_list, sub_dict, sub_list, factor)) + return valid if valid else None + + +def _compare_terms(term: EriOrbenergy, itmd_term: EriOrbenergy, + term_data: "FactorizationTermData | None" = None, + itmd_term_data: "FactorizationTermData | None" = None + ) -> None | list[dict[str, Any]]: + """ + Compares two terms and determines + - the index of objects in the eri and denominator part of the term that + need to be removed in the term to factor the interemdiate. + - the necessary index substitutions to bring the itmd term into the form + found in the expression. + - the additional factor (+-1) that needs to be introduced after applying + the index substitutions. + Note: orbital energy numerators are currently not treated! + """ + + eri_variants = _compare_eri_parts( + term, itmd_term, term_data, itmd_term_data + ) + + if eri_variants is None: + return None + + # itmd_term has no denominator -> stop here + if itmd_term.denom.inner.is_number: + return [{'eri_i': eri_i, 'denom_i': [], + 'sub': sub_dict, 'sub_list': sub_list, 'factor': factor} + for eri_i, sub_dict, sub_list, factor in eri_variants] + + # term and itmd_term should have a denominator at this point + # -> extract the brackets + brackets = term.denom_brackets + itmd_brackets = itmd_term.denom_brackets + # extract the lengths of all brakets + bracket_lengths: list[int] = [len(bk) for bk in brackets] + # prescan the brackets according to their length to avoid unnecessary + # substitutions + compatible_brackets: dict[int, list[int]] = {} + for itmd_denom_i, itmd_bk in enumerate(itmd_brackets): + itmd_bk_length = len(itmd_bk) + matching_brackets = [denom_i for denom_i, bk_length + in enumerate(bracket_lengths) + if bk_length == itmd_bk_length] + if not matching_brackets: # could not find a match for a itmd bracket + return None + compatible_brackets[itmd_denom_i] = matching_brackets + + # check which of the found substitutions are also valid for the denominator + variants: list[dict[str, Any]] = [] + for eri_i, sub_dict, sub_list, factor in eri_variants: + # can only map each bracket onto 1 itmd bracket + # otherwise something should be wrong + denom_matches: list[int] = [] + for itmd_denom_i, denom_idx_list in compatible_brackets.items(): + itmd_bk = itmd_brackets[itmd_denom_i] + # extract base and exponent of the bracket + if isinstance(itmd_bk, ExprContainer): + itmd_bk_exponent = S.One + itmd_bk = itmd_bk.inner + else: # polynom -> Pow object + itmd_bk, itmd_bk_exponent = itmd_bk.base_and_exponent + assert itmd_bk_exponent.is_Integer + + # apply the substitutions to the base of the bracket + sub_itmd_bk = itmd_bk.subs(sub_list) + if sub_itmd_bk is S.Zero: # invalid substitution list + continue + + # try to find a match in the subset of brackets of equal length + for denom_i in denom_idx_list: + if denom_i in denom_matches: # denom bk is already assigned + continue + bk = brackets[denom_i] + # extract the base of the bracket + bk = bk.inner if isinstance(bk, ExprContainer) else bk.base + if Add(sub_itmd_bk, -bk) is S.Zero: # brackets are equal? + denom_matches.extend( + denom_i for _ in range(int(itmd_bk_exponent)) + ) + break + # did not run into the break: + # -> could not find a match for the itmd_bracket + # -> directly skip to next eri_variant + else: + break + # did we find a match for all itmd brackets? + if len(set(denom_matches)) == len(itmd_brackets): + variants.append({'eri_i': eri_i, 'denom_i': denom_matches, + 'sub': sub_dict, 'sub_list': sub_list, + 'factor': factor}) + return variants if variants else None + + +def _compare_remainder(remainder: ExprContainer, ref_remainder: ExprContainer, + itmd_indices: tuple[Index, ...]) -> int | None: + """ + Compares two remainders and tries to map remainder onto ref_remainder. + + Returns + int | None + None if the remainder can not be mapped onto each other. + The factor (+-1) that is necessary to achieve equality for both + remainder. + """ + from .reduce_expr import factor_eri_parts, factor_denom + + # if we have a number as remainder, it should be +-1 + if remainder.inner is S.Zero or ref_remainder.inner is S.Zero: + raise ValueError("It should not be possible for a remainder to " + "be equal to 0.") + + # in addition to the target indices, the itmd_indices have to be fixed too. + # -> set both indices sets as target indices of the expressions + fixed_indices = remainder.terms[0].target + assert fixed_indices == ref_remainder.terms[0].target + fixed_indices += itmd_indices + + # create a copy of the expressions to keep the assumptions of the original + # expressions valid (assumptions should hold the correct target indices) + remainder, ref_remainder = remainder.copy(), ref_remainder.copy() + remainder.set_target_idx(fixed_indices) + ref_remainder.set_target_idx(fixed_indices) + + # TODO: we have a different situation in this function, because not all + # contracted indices have to occur in the eri part of the remainder: + # eri indices: jkln. Additionally we have m in the denominator. + # the function will only map n->m but not m->n, because it does + # not occur in the eri part. This might mess up the denominator + # or numerator of the term completely! + # -> can neither use find_compatible_terms nor compare_terms!! + # I think in a usual run this should only occur if previously some + # intermediate was not found correctly, because for t-amplitudes all + # removed indices either only occur in the eri part or occur in eri and + # denom. But if we did not find some t-amplitude and have some denominator + # left, this problem might occur if a denom idx is a contracted index + # in the eri part of the itmd. + # -> but then we can not factor the itmd anyway, because the contracted + # idx in the eri part and the denom have to be identical + # -> need to be solved at another point + + difference = remainder - ref_remainder + if len(difference) == 1: # already identical -> 0 or added to 1 term + return 1 if difference.inner is S.Zero else -1 + # check if the eri parts of both remainders can be mapped onto each other + factored = factor_eri_parts(difference) + if len(factored) > 1: # eri parts not compatible + return None + + # check if the denominators are compatible too. + factored = factor_denom(factored[0]) + if len(factored) > 1: # denominators are not compatible + return None + return 1 if factored[0].inner is S.Zero else -1 + + +class LongItmdVariants( + dict[tuple[Index, ...], dict[ExprContainer, dict[tuple[int, ...], list[tuple[int, Expr, Expr]]]]] # noqa E501 + ): + """ + Class to manage the variants of long intermediates. + + Parameters + ---------- + n_itmd_terms : int + The number of terms in the long intermediate. + """ + + def __init__(self, n_itmd_terms: int, *args, **kwargs): + self.n_itmd_terms: int = n_itmd_terms + # The number of terms we require to share a common prefactor + # for mixed prefactor intermediates + self.n_common_pref_terms: int = (0.6 * self.n_itmd_terms).__ceil__() + super().__init__(*args, **kwargs) + + def add(self, term_i: int, itmd_indices: tuple[Index, ...], + remainder: ExprContainer, matching_itmd_terms: Iterable[int], + prefactor: Expr, unit_factorization_pref: Expr) -> None: + """ + Add a matching term-itmd_term pair to the pool for building + intermediate variants. + + Parameters + ---------- + term_i : int + The index of the term. + itmd_indices : tuple[Index] + The indices of the factored interemdiate. + remainder : ExprContainer + Remaining objects of the term after factoring the intermediate. + matching_itmd_terms : Iterable[int] + Index of itmd terms the term can be mapped. + prefactor: Expr + The prefactor of the resulting term after factoring the itmd. + unit_factorization_pref: Expr + The factor that the current term would need if the intermediate + would be factored with a prefactor of 1. + """ + # trivial separation by itmd_indices (the indices of the itmd we + # try to factor with the current variant) + if itmd_indices not in self: + self[itmd_indices] = {} + + matching_itmd_terms = tuple(sorted(matching_itmd_terms)) + is_new_remainder = True + for rem, found_matches in self[itmd_indices].items(): + # next we can separate the variants by the remainder they will + # create when the variant is factored + factor = _compare_remainder(remainder=remainder, ref_remainder=rem, + itmd_indices=itmd_indices) + if factor is None: # remainder did not match + continue + + is_new_remainder = False + # possibly we got another -1 from matching the remainder + prefactor *= sympify(factor) + + # next, we can separate them according to the itmd_positions + # so we can later build intermediate variants more efficient + if matching_itmd_terms not in found_matches: + found_matches[matching_itmd_terms] = [] + # It is possible to obtain entries that have the same + # term_i and pref, but differ in the sign of the unit factor + # this is probably a result of the permutation symmetry + # of some intermediates + # -> only add the term if term_i and pref have not been found yet + is_dublicate = any( + (term_i == other_term_i and prefactor == other_pref + and abs(unit_factorization_pref) == abs(other_unit_factor)) + for other_term_i, other_pref, other_unit_factor in + found_matches[matching_itmd_terms] + ) + if not is_dublicate: + found_matches[matching_itmd_terms].append( + (term_i, prefactor, unit_factorization_pref) + ) + break + if is_new_remainder: + self[itmd_indices][remainder] = {} + self[itmd_indices][remainder][matching_itmd_terms] = [ + (term_i, prefactor, unit_factorization_pref) + ] + + def get_complete_variant(self, itmd_indices: tuple[Index, ...], + remainder: ExprContainer + ) -> None | tuple[Expr, list[int]]: + """ + Returns prefactor and terms (by index) of a complete intermediate + variant for the given itmd_indices and remainder. + Only variants that are complete and share a common prefactor are + considered here. + If no variant can be found None is returned. + """ + + def sort_matches(pool: list[tuple[tuple[int, ...], set[int]]] + ) -> list[tuple[tuple[int, ...], list[int]]]: + term_i_counter: dict[int, dict[int, int]] = {} + for positions, matches in pool: + for term_i in matches: + if term_i not in term_i_counter: + term_i_counter[term_i] = {} + for p in positions: + if p not in term_i_counter[term_i]: + term_i_counter[term_i][p] = 0 + term_i_counter[term_i][p] += 1 + term_i_counter_evaluated: dict[int, tuple[int, int]] = { + term_i: (len(positions), sum(positions.values())) + for term_i, positions in term_i_counter.items() + } + del term_i_counter + return [ + (pos, sorted(matches, + key=lambda m: term_i_counter_evaluated[m])) + for pos, matches in pool] + + # itmd_indices and or remainder not found + if itmd_indices not in self or \ + remainder not in self[itmd_indices]: + return None + pool = self[itmd_indices][remainder] + if not pool: # empty pool: already factored everything + return None + + # construct base variants that are likely to form complete variants + for pref, term_list in self._complete_base_variants(pool): + # filter the pool: + # - remove all already occupied positions + # - remove all matches of already used terms + # - remove all matches that have a different prefactor + relevant_pool: dict[tuple[int, ...], set[int]] = {} + for positions, matches in pool.items(): + if any(term_list[p] is not None for p in positions): + continue + relevant_matches = { + term_i for term_i, other_pref, _ in matches + if other_pref == pref and term_i not in term_list + } + if relevant_matches: + relevant_pool[positions] = relevant_matches + if not relevant_pool: # nothing relevant left + continue + + # sort the pool: + # - start with the positions with the lowest number of matches + # - prioritize rare indices + relevant_pool_sorted = sorted( + relevant_pool.items(), key=lambda kv: len(kv[1]) + ) + del relevant_pool + relevant_pool_sorted = sort_matches(relevant_pool_sorted) + + # set up masks to avoid creating copies of the pool + pos_mask = [True for _ in relevant_pool_sorted] + match_masks = [[True for _ in matches] + for _, matches in relevant_pool_sorted] + # try to complete the base variant from the relevant pool + success = self._build_complete_variant( + term_list, relevant_pool_sorted, pos_mask, match_masks + ) + if success: + assert _is_int_list(term_list) + return pref, term_list + # continue with the next base variant + # loop completed -> no complete variant found + return None + + def _complete_base_variants( + self, pool: dict[tuple[int, ...], list[tuple[int, Expr, Expr]]] + ) -> Generator[tuple[Expr, list[int | None]], None, None]: + """Iterator over the base variants for complete intermediates.""" + + def sort_matches( + pool: dict[tuple[int, ...], list[tuple[int, Expr, Expr]]], + matches_to_sort: list[tuple[int, Expr, Expr]] + ) -> list[tuple[int, Expr, Expr]]: + term_i_counter: dict[int, dict[int, int]] = {} + pref_available_pos: dict[Expr, list[bool]] = {} + for positions, matches in pool.items(): + for term_i, pref, _, in matches: + if term_i not in term_i_counter: + term_i_counter[term_i] = {} + if pref not in pref_available_pos: + pref_available_pos[pref] = [False for _ in + range(self.n_itmd_terms)] + for p in positions: + if p not in term_i_counter[term_i]: + term_i_counter[term_i][p] = 0 + term_i_counter[term_i][p] += 1 + pref_available_pos[pref][p] = True + term_i_counter_evaluated = {term_i: ( + len(positions), + sum(positions.values()) + ) for term_i, positions in term_i_counter.items()} + # remove prefactors where not all positions are available + matches_to_sort = [m for m in matches_to_sort + if all(pref_available_pos[m[1]])] + return sorted( + matches_to_sort, key=lambda m: term_i_counter_evaluated[m[0]] + ) + + # find the position with the lowest number of matches + pos, matches = min(pool.items(), key=lambda kv: len(kv[1])) + # sort the matches so that rare term_i are covered first + # and remove prefactors where not all positions are available + if len(matches) > 1: + matches = sort_matches(pool, matches) + + # ensure we only ever try once per term_i and pref combination + prev_tried: dict[Expr, set[int]] = {} + for term_i, pref, _ in matches: + if pref not in prev_tried: + prev_tried[pref] = set() + if term_i in prev_tried[pref]: + continue + prev_tried[pref].add(term_i) + + yield (pref, + [term_i if i in pos else None + for i in range(self.n_itmd_terms)]) + + def _build_complete_variant(self, term_list: list[int | None], + pool: list[tuple[tuple[int, ...], list[int]]], + pos_mask: list[bool], + match_masks: list[list[bool]]) -> bool: + """ + Recursively builds the complete variant from the pool of matches. + """ + # check if the variant can be completed with the available + # positions + unique_positions = {p for pos, _ in itertools.compress(pool, pos_mask) + for p in pos} + n_missing_terms = term_list.count(None) + if n_missing_terms > len(unique_positions): + return False + + for i, (positions, matches) in \ + itertools.compress(enumerate(pool), pos_mask): + # update the mask: + # mask the positions that will be filled in the following loop + pos_mask[i] = False + + # since all positions have to be available we can already + # predict here whether we will be able to complete the variant + completed = (n_missing_terms == len(positions)) + for term_i in itertools.compress(matches, match_masks[i]): + # don't copy the term_list. Instead revert the changes + # before continue the iteration + for p in positions: + term_list[p] = term_i + + if completed: # check if we completed the variant + return True + + # update the mask: + # mask all positions that intersect with the filled + # positions and mask term_i as not available + # for now just store the mask changes, but we can + # also recompute the changes to revert them. + masked_pos: list[int] = [] + masked_matches: list[tuple[int, int]] = [] + for other_i, (pos, other_matches) in \ + itertools.compress(enumerate(pool), pos_mask): + if any(p in positions for p in pos): + # no need to update the match mask here + pos_mask[other_i] = False + masked_pos.append(other_i) + continue + for j, other_term_i in \ + itertools.compress(enumerate(other_matches), + match_masks[other_i]): + if term_i == other_term_i: + match_masks[other_i][j] = False + masked_matches.append((other_i, j)) + if not any(match_masks[other_i]): + pos_mask[other_i] = False + masked_pos.append(other_i) + + # recurse and try to complete the variant + success = self._build_complete_variant( + term_list, pool, pos_mask, match_masks + ) + if success: # found complete variant + return True + + # revert the mask changes + for other_i in masked_pos: + pos_mask[other_i] = True + for other_i, j in masked_matches: + match_masks[other_i][j] = True + + # revert the changes to term_list and continue the loop + for p in positions: + term_list[p] = None + # unmask the position + pos_mask[i] = True + return False + + def get_mixed_pref_variant( + self, itmd_indices: tuple[Index, ...], remainder: ExprContainer + ) -> None | tuple[list[Expr], list[int], dict[int, Expr], Counter[Expr]]: # noqa E501 + """ + Returns a complete variant allowing mixed prefactors for the given + itmd_indices and remainder. Only variants where at leas 60% of the + terms share a common prefactor are considered. + """ + def _is_pool( + sequence: list + ) -> TypeGuard[list[tuple[tuple[int, ...], list[tuple[int, Expr, Expr]]]]]: # noqa E501 + # the alternative return type would have a length of 3 + return all(len(tpl) == 2 for tpl in sequence) + + # itmd_indices or remainder not available + if itmd_indices not in self or \ + remainder not in self[itmd_indices]: + return None + pool = self[itmd_indices][remainder] + if not pool: # empty pool: already factored all term_i + return None + + for (prefs, term_list, unit_factors) in \ + self._mixed_pref_base_variants(pool): + # filter the pool by removing all positions that are already + # occupied. Addtionally, remove all term_i that are already + # in use. + relevant_pool: \ + dict[tuple[int, ...], list[tuple[int, Expr, Expr]]] = {} + for positions, matches in pool.items(): + if any(term_list[p] is not None for p in positions): + continue + relevant_matches = [ + data for data in matches if data[0] not in term_list + ] + if relevant_matches: + relevant_pool[positions] = relevant_matches + if not relevant_pool: + continue + + # sort the pool to start with the position with the lowest amount + # of valid matches and prioritize rare term_i and common prefactors + relevant_pool_sorted = sorted( + relevant_pool.items(), key=lambda kv: len(kv[1]) + ) + del relevant_pool + relevant_pool_sorted = self._sort_mixed_pref_matches( + relevant_pool_sorted + ) + assert _is_pool(relevant_pool_sorted) + + # set up masks for position and matches to avoid copying data + pos_mask: list[bool] = [True for _ in relevant_pool_sorted] + match_masks: list[list[bool]] = [ + [True for _ in matches] + for _, matches in relevant_pool_sorted + ] + pref_counter = Counter([p for p in prefs if p is not None]) + # try to complete the base variant using the relevant pool + success = self._complete_mixed_variant( + term_list, prefs, unit_factors, relevant_pool_sorted, + pref_counter, pos_mask, match_masks + ) + if success: + assert _is_int_list(term_list) and _is_expr_list(prefs) + return prefs, term_list, unit_factors, pref_counter + # else continue with the next base variant + # loop completed -> no mixed variant found + return None + + def _mixed_pref_base_variants( + self, pool: dict[tuple[int, ...], list[tuple[int, Expr, Expr]]] + ) -> Generator[tuple[list[Expr | None], list[int | None], dict[int, Expr]], None, None]: # noqa E501 + """ + Iterator over the base variants for intermediates with mixed prefactors + """ + def _is_long_itmd_data_list( + sequence: list) -> TypeGuard[list[tuple[int, Expr, Expr]]]: + return all( + len(tpl) == 3 and isinstance(tpl[0], int) + and isinstance(tpl[1], Expr) and isinstance(tpl[2], Expr) + for tpl in sequence + ) + # find the positions with the lowest number of matches + pos, matches = min(pool.items(), key=lambda kv: len(kv[1])) + # sort the matches so that + # rare indices and common prefactors are preferred + if len(matches) > 1: + matches = self._sort_mixed_pref_matches( + tuple(pool.items()), matches + ) + assert _is_long_itmd_data_list(matches) + + # filter out matches that have the same term_i and pref + prev_tried: dict[Expr, set[int]] = {} + for term_i, pref, unit_factor in matches: + if pref not in prev_tried: + prev_tried[pref] = set() + if term_i in prev_tried[pref]: + continue + prev_tried[pref].add(term_i) + + yield ( + [pref if i in pos else None for i in range(self.n_itmd_terms)], + [term_i if i in pos else None + for i in range(self.n_itmd_terms)], + {term_i: unit_factor} + ) + + def _sort_mixed_pref_matches( + self, + pool: Sequence[tuple[tuple[int, ...], list[tuple[int, Expr, Expr]]]], # noqa E501 + matches_to_sort: list[tuple[int, Expr, Expr]] | None = None + ) -> list[tuple[tuple[int, ...], list[tuple[int, Expr, Expr]]]] | \ + list[tuple[int, Expr, Expr]]: + """ + Sorts all matches in the pool so that rare term_i and common + prefactors are preferred. If an additional match list is provided + instead this match list will be sorted instead of all matches in + the pool. + """ + term_i_counter: dict[int, dict[int, int]] = {} + pref_counter: dict[Expr, dict[int, int]] = {} + for positions, matches in pool: + for term_i, pref, _ in matches: + if term_i not in term_i_counter: + term_i_counter[term_i] = {} + if pref not in pref_counter: + pref_counter[pref] = {} + for pos in positions: + if pos not in term_i_counter[term_i]: + term_i_counter[term_i][pos] = 0 + term_i_counter[term_i][pos] += 1 + if pos not in pref_counter[pref]: + pref_counter[pref][pos] = 0 + pref_counter[pref][pos] += 1 + term_i_counter_evaluated: dict[int, tuple[int, int]] = { + term_i: (len(positions), sum(positions.values())) + for term_i, positions in term_i_counter.items() + } + del term_i_counter + pref_counter_evaluated: dict[Expr, tuple[int, int]] = { + pref: (-len(positions), -sum(positions.values())) + for pref, positions in pref_counter.items() + } + del pref_counter + + if matches_to_sort is None: + return [ + (pos, sorted(matches, + key=lambda m: (*term_i_counter_evaluated[m[0]], + *pref_counter_evaluated[m[1]]))) + for pos, matches in pool + ] + else: + return sorted( + matches_to_sort, + key=lambda m: (*term_i_counter_evaluated[m[0]], + *pref_counter_evaluated[m[1]]) + ) + + def _complete_mixed_variant( + self, term_list: list[int | None], prefactors: list[Expr | None], + unit_factors: dict[int, Expr], + pool: list[tuple[tuple[int, ...], list[tuple[int, Expr, Expr]]]], + pref_counter: Counter[Expr], pos_mask: list[bool], + match_masks: list[list[bool]]) -> bool: + """ + Recursively builds the complete mixed prefactor variant + Only variants where at least 60% of the terms share a common prefactor + are accepted. + """ + # check if the variant can be completed with the available + # positions + unique_positions = {p for pos, _ in itertools.compress(pool, pos_mask) + for p in pos} + n_missing_terms = term_list.count(None) + if n_missing_terms > len(unique_positions): + return False + + for i, (positions, matches) in \ + itertools.compress(enumerate(pool), pos_mask): + # update the poositions mask + pos_mask[i] = False + + completed = (n_missing_terms == len(positions)) + for term_i, pref, unit_factor in \ + itertools.compress(matches, match_masks[i]): + # if we add the match: will we still be able to + # create a valid variant that hast at least 60% common + # prefactor? + pref_counter[pref] += len(positions) + + max_terms_common_pref = max(pref_counter.values()) + \ + n_missing_terms - len(positions) + if max_terms_common_pref < self.n_common_pref_terms: + # we will not be able to complete the variant + # with the current addition + pref_counter[pref] -= len(positions) + continue + + # add the current match to the variant + for p in positions: + term_list[p] = term_i + prefactors[p] = pref + unit_factors[term_i] = unit_factor + + if completed and max(pref_counter.values()) >= \ + self.n_common_pref_terms: + return True + + # update the mask: + # - mask any position that intersects with the the added + # positions + # - mask all otherm matches of term_i + masked_pos: list[int] = [] + masked_matches: list[tuple[int, int]] = [] + for other_i, (pos, other_matches) in \ + itertools.compress(enumerate(pool), pos_mask): + if any(p in positions for p in pos): + pos_mask[other_i] = False + masked_pos.append(other_i) + continue + for j, (other_term_i, _, _) in \ + itertools.compress(enumerate(other_matches), + match_masks[other_i]): + if term_i == other_term_i: + match_masks[other_i][j] = False + masked_matches.append((other_i, j)) + if not any(match_masks[other_i]): + pos_mask[other_i] = False + masked_pos.append(other_i) + + # recurse and try to complete the variant + success = self._complete_mixed_variant( + term_list, prefactors, unit_factors, pool, pref_counter, + pos_mask, match_masks + ) + if success: + return True + + # revert the mask changes + for other_i in masked_pos: + pos_mask[other_i] = True + for other_i, j in masked_matches: + match_masks[other_i][j] = True + + # undo the changes to the variant + for p in positions: + term_list[p] = None + prefactors[p] = None + del unit_factors[term_i] + + # undo the prefcounter changes + pref_counter[pref] -= len(positions) + + # unmaks the position + pos_mask[i] = True + return False + + def remove_used_terms(self, used_terms: list[int]) -> None: + """ + Removes the provided terms from the pool, so they can not + be used to build further variants. + """ + for remainders in self.values(): + for positions in remainders.values(): + empty_pos: list[tuple[int, ...]] = [] + for pos, matches in positions.items(): + to_delete = [i for i, m in enumerate(matches) + if m[0] in used_terms] + # need to remove element with highest index first! + for i in sorted(to_delete, reverse=True): + del matches[i] + if not matches: # removed all matches for the position + empty_pos.append(pos) + for pos in empty_pos: + del positions[pos] + + def clean_empty(self) -> None: + """Removes all empty entries in the nested dictionary.""" + empty_indices: list[tuple[Index, ...]] = [] + for itmd_indices, remainders in self.items(): + empty_rem = [rem for rem, positions in remainders.items() + if not positions] + for rem in empty_rem: + del remainders[rem] + if not remainders: + empty_indices.append(itmd_indices) + for itmd_indices in empty_indices: + del self[itmd_indices] + + +class FactorizationTermData: + """ + Class that extracts some data needed for the intermediate factorization. + + Parameters + ---------- + term : EriOrbenergy + The term to extract data from. + """ + + def __init__(self, term: EriOrbenergy): + self._term: EriOrbenergy = term + + @cached_property + def eri_pattern(self) -> tuple[tuple[str, Counter[str]], ...]: + """ + Returns the pattern of the eri part of the term. In contrast to the + pattern used in simplify, the pattern is determined for each object + as tuple that consists of the object description and the + coupling of the object. + """ + coupling: dict[int, list[str]] = self._term.eri.coupling( + include_exponent=False, include_target_idx=False + ) + return tuple( + (obj.description(include_exponent=False, target_idx=None), + Counter(coupling.get(i, []))) + for i, obj in enumerate(self._term.eri.objects) + ) + + @cached_property + def eri_obj_indices(self) -> tuple[tuple[Index, ...], ...]: + """Indices hold by each of the objects in the eri part.""" + return tuple(obj.idx for obj in self._term.eri.objects) + + @cached_property + def eri_obj_symmetry(self + ) -> tuple[dict[tuple[Permutation, ...], int], ...]: + """Symmetry of all objects in the eri part.""" + return tuple( + TermContainer(obj, **obj.assumptions).symmetry() + for obj in self._term.eri.objects + ) + + @cached_property + def eri_obj_descriptions(self) -> Counter[str]: + """ + Counts how often each description occurs in the eri part. + Exponent of the objects is included implicitly by incrementing + the description counter. + """ + descr = [] + for obj in self._term.eri.objects: + exp = obj.exponent + assert exp.is_Integer + descr.extend( + obj.description(target_idx=None, include_exponent=False) + for _ in range(int(exp)) + ) + return Counter(descr) + + @cached_property + def denom_bracket_lengths(self) -> None | Counter[int]: + """ + Determine the length of all brackets in the orbital energy + denominator and count how often each length occurs in the denominator. + """ + if self._term.denom.inner.is_number: + return None + else: + return Counter(len(bk) for bk in self._term.denom_brackets) + + +def _is_int_list(sequence: list) -> TypeGuard[list[int]]: + return all(isinstance(item, int) for item in sequence) + + +def _is_expr_list(sequence: list) -> TypeGuard[list[Expr]]: + return all(isinstance(item, Expr) for item in sequence) + + +_ = _factor_long_intermediate diff --git a/build/lib/adcgen/func.py b/build/lib/adcgen/func.py new file mode 100644 index 0000000..ae8e5ad --- /dev/null +++ b/build/lib/adcgen/func.py @@ -0,0 +1,540 @@ +from collections.abc import Sequence +import itertools + +from sympy.physics.secondquant import ( + F, Fd, FermionicOperator, NO +) +from sympy import S, Add, Expr, Mul, Pow, sqrt, Symbol, sympify + +from .expression import ExprContainer +from .misc import Inputerror +from .rules import Rules +from .indices import Index, Indices, get_symbols, split_idx_string +from .sympy_objects import ( + KroneckerDelta, NonSymmetricTensor, AntiSymmetricTensor, SymmetricTensor, + Amplitude +) +from .tensor_names import is_adc_amplitude, is_t_amplitude, tensor_names + + +def gen_term_orders(order: int, term_length: int, min_order: int + ) -> list[tuple[int, ...]]: + """ + Generate all combinations of orders that contribute to the n'th-order + contribution of a term of the given length + (a * b * c * ...)^{(n)}, + where a, b and c are each subject of a perturbation expansion. + + Parameters + ---------- + order : int + The perturbation theoretical order n. + term_length : int + The number of objects in the term. + min_order : int + The minimum perturbation theoretical order of the objects in the + term to consider. For instance, 2 if the first and zeroth order + contributions are not relevant, because they vanish or are considered + separately. + """ + + if not all(isinstance(n, int) and n >= 0 + for n in [order, term_length, min_order]): + raise Inputerror("Order, term_length and min_order need to be " + "non-negative integers.") + + orders = (o for o in range(min_order, order + 1)) + combinations = itertools.product(orders, repeat=term_length) + return [comb for comb in combinations if sum(comb) == order] + + +def import_from_sympy_latex(expr_string: str, + convert_default_names: bool = False + ) -> ExprContainer: + """ + Imports an expression from a string created by the 'sympy.latex' function. + + Parameters + ---------- + convert_default_names : bool, optional + If set, all default tensor names found in the expression to import + will be converted to the currently configured names. + + Returns + ------- + ExprContainer + The imported expression in a 'Expr' container. Note that no assumptions + (sym_tensors or antisym_tensors) have been applied yet. + """ + + def import_indices(indices: str) -> list[Index]: + # split at the end of each index with a spin label + # -> n1n2n3_{spin} + idx: list[Index] = [] + for sub_part in indices.split("}"): + if not sub_part: # skip empty string + continue + if "_{\\" in sub_part: # the last index has a spin label + names, spin = sub_part.split("_{\\") + if spin not in ["alpha", "beta"]: + raise RuntimeError(f"Found invalid spin on Index: {spin}. " + f"Input: {indices}") + names = split_idx_string(names) + idx.extend(get_symbols(names[:-1])) + idx.extend(get_symbols(names[-1], spin[0])) + else: # no index has a spin label + idx.extend(get_symbols(sub_part)) + return idx + + def import_tensor(tensor: str) -> Expr: + # split the tensor in base and exponent + stack: list[str] = [] + separator: int | None = None + for i, c in enumerate(tensor): + if c == "{": + stack.append(c) + elif c == "}": + assert stack.pop() == "{" + elif not stack and c == "^": + separator = i + break + if separator is None: + exponent = 1 + else: + exponent = tensor[separator+1:] + exponent = int(exponent.lstrip("{").rstrip("}")) + tensor = tensor[:separator] + # done with processing the exponent + # -> deal with the tensor. remove 1 layer of curly brackets and + # afterwards split the tensor string into its components + if tensor[0] == "{": + tensor = tensor[1:] + if tensor[-1] == "}": + tensor = tensor[:-1] + stack.clear() + components: list[str] = [] + temp: list[str] = [] + for i, c in enumerate(tensor): + if c == "{": + stack.append(c) + elif c == "}": + assert stack.pop() == "{" + elif not stack and c in ["^", "_"]: + components.append("".join(temp)) + temp.clear() + continue + temp.append(c) + if temp: + components.append("".join(temp)) + name, indices = components[0], components[1:] + # if desired map the default tensor names to their currently + # configured name + # -> this allows expressions with the default names to + # be imported and mapped to the current configuration, correctly + # recognizing Amplitudes and SymmetricTensors. + if convert_default_names: + name = tensor_names.map_default_name(name) + + # remove 1 layer of brackets from all indices + for i, idx in enumerate(indices): + if idx[0] == "{": + idx = idx[1:] + if idx[-1] == "}": + idx = idx[:-1] + indices[i] = idx + + if len(indices) == 0: # no indices -> a symbol + base: Expr = Symbol(name) + elif name == "a": # create / annihilate + if len(indices) == 2 and indices[0] == "\\dagger": + base: Expr = Fd(*import_indices(indices[1])) + elif len(indices) == 1: + base: Expr = F(*import_indices(indices[0])) + else: + raise RuntimeError("Unknown second quantized operator: ", + tensor) + elif len(indices) == 2: # antisym-/symtensor or amplitude + upper = import_indices(indices[0]) + lower = import_indices(indices[1]) + # ADC-Amplitude or t-amplitudes + if is_adc_amplitude(name) or is_t_amplitude(name): + base: Expr = Amplitude(name, upper, lower) + elif name == tensor_names.coulomb: # eri in chemist notation + base: Expr = SymmetricTensor(name, upper, lower) + else: + base: Expr = AntiSymmetricTensor(name, upper, lower) + elif len(indices) == 1: # nonsymtensor + base: Expr = NonSymmetricTensor(name, import_indices(indices[0])) + else: + raise RuntimeError(f"Unknown tensor object: {tensor}") + assert isinstance(base, Expr) + return Pow(base, exponent) + + def import_obj(obj_str: str) -> Expr: + # import an individial object + if obj_str.isnumeric(): # prefactor + return sympify(int(obj_str)) + elif obj_str.startswith("\\sqrt{"): # sqrt{x} prefactor + return sqrt(int(obj_str[:-1].replace("\\sqrt{", "", 1))) + elif obj_str.startswith("\\delta_"): # KroneckerDelta + idx = obj_str[:-1].replace("\\delta_{", "", 1).split() + idx = import_indices("".join(idx)) + if len(idx) != 2: + raise RuntimeError(f"Invalid indices for delta: {idx}.") + ret = KroneckerDelta(*idx) + assert isinstance(ret, Expr) + return ret + elif obj_str.startswith("\\left("): # braket + # need to take care of exponent of the braket! + base, exponent = obj_str.rsplit('\\right)', 1) + if exponent: # exponent != "" -> ^{x} -> exponent != 1 + exponent = int(exponent[:-1].lstrip('^{')) + else: + exponent = 1 + obj_str = base.replace("\\left(", "", 1) + obj = import_from_sympy_latex( + obj_str, convert_default_names=convert_default_names + ) + return Pow(obj.inner, exponent) + elif obj_str.startswith("\\left\\{"): # NO + no, unexpected_stuff = obj_str.rsplit("\\right\\}", 1) + if unexpected_stuff: + raise NotImplementedError(f"Unexpected NO object: {obj_str}.") + obj_str = no.replace("\\left\\{", "", 1) + obj = import_from_sympy_latex( + obj_str, convert_default_names=convert_default_names + ) + return NO(obj.inner) + else: # tensor or creation/annihilation operator or symbol + return import_tensor(obj_str) + + def split_terms(expr_string: str) -> list[str]: + stack: list[str] = [] + terms: list[str] = [] + + term_start_idx = 0 + for i, char in enumerate(expr_string): + if char in ['{', '(']: + stack.append(char) + elif char == '}': + assert stack.pop() == '{' + elif char == ')': + assert stack.pop() == '(' + elif char in ['+', '-'] and not stack and i != term_start_idx: + terms.append(expr_string[term_start_idx:i]) + term_start_idx = i + terms.append(expr_string[term_start_idx:]) # append last term + return terms + + def import_term(term_string: str) -> Expr: + from sympy import Mul + + stack: list[str] = [] + objects: list[str] = [] + + obj_start_idx = 0 + for i, char in enumerate(term_string): + if char in ['{', '(']: + stack.append(char) + elif char == '}': + assert stack.pop() == '{' + elif char == ')': + assert stack.pop() == '(' + # in case we have a denom of the form: + # 2a+2b+4c and not 2 * (a+b+2c) + elif char in ['+', '-'] and not stack: + return import_from_sympy_latex( + term_string, convert_default_names=convert_default_names + ).inner + elif char == " " and not stack and i != obj_start_idx: + objects.append(term_string[obj_start_idx:i]) + obj_start_idx = i + 1 + objects.append(term_string[obj_start_idx:]) # last object + return Mul(*(import_obj(o) for o in objects)) + + expr_string = expr_string.strip() + if not expr_string: + return ExprContainer(0) + + terms = split_terms(expr_string) + if terms[0][0] not in ['+', '-']: + terms[0] = '+ ' + terms[0] + + sympy_expr = S.Zero + for term in terms: + sign = term[0] # extract the sign of the term + if sign not in ['+', '-']: + raise ValueError(f"Found invalid sign {sign} in term {term}") + term = term[1:].strip() + + sympy_term = S.NegativeOne if sign == '-' else S.One + assert isinstance(sympy_term, Expr) + + if term.startswith("\\frac"): # fraction + # remove frac layout and split: \\frac{...}{...} + num, denom = term[:-1].replace("\\frac{", "", 1).split("}{") + else: # no denominator + num, denom = term, None + + sympy_term = Mul(sympy_term, import_term(num)) + assert isinstance(sympy_term, Expr) + if denom is not None: + sympy_term = Mul(sympy_term, S.One/import_term(denom)) + sympy_expr += sympy_term + assert isinstance(sympy_expr, Expr) + return ExprContainer(sympy_expr) + + +def evaluate_deltas( + expr: Expr, + target_idx: Sequence[str] | Index | Sequence[Index] | None = None + ) -> Expr: + """ + Evaluates the KroneckerDeltas in an expression. + The function only removes contracted indices from the expression and + ensures that no information is lost if an index is removed. + Adapted from the implementation in 'sympy.physics.secondquant'. + Note that KroneckerDeltas in a Polynom (a*b + c*d)^n will not be evaluated. + However, in most cases the expression can simply be expanded before + calling this function. + + Parameters + ---------- + expr: Expr + Expression containing the KroneckerDeltas to evaluate. This function + expects a plain object from sympy (Add/Mul/...) and no container class. + target_idx : Sequence[str] | Sequence[Index] | None, optional + Optionally, target indices can be provided if they can not be + determined from the expression using the Einstein sum convention. + """ + assert isinstance(expr, Expr) + + if isinstance(expr, Add): + return Add(*( + evaluate_deltas(arg, target_idx) for arg in expr.args + )) + elif isinstance(expr, Mul): + if target_idx is None: + # for determining the target indices it is sufficient to use + # atoms, which lists every index only once per object, i.e., + # (f_ii).atoms(Index) -> i. + # We are only interested in indices on deltas + # -> it is sufficient to know that an index occurs on another + # object. (twice on the same delta is not possible) + deltas: list[KroneckerDelta] = [] + indices: dict[Index, int] = {} + for obj in expr.args: + for s in obj.atoms(Index): + if s in indices: + indices[s] += 1 + else: + indices[s] = 0 + if isinstance(obj, KroneckerDelta): + deltas.append(obj) + # extract the target indices and use them in next recursion + # so they only need to be determined once + target_idx = [s for s, n in indices.items() if not n] + else: + # find all occurrences of kronecker delta + deltas = [d for d in expr.args if isinstance(d, KroneckerDelta)] + target_idx = get_symbols(target_idx) + + for d in deltas: + # determine the killable and preferred index + # in the case we have delta_{i p_alpha} we want to keep i_alpha + # -> a new index is required. But for now just don't evaluate + # these deltas + idx = d.preferred_and_killable + if idx is None: # delta_{i p_alpha} + continue + preferred, killable = idx + # try to remove killable + if killable not in target_idx: + res = expr.subs(killable, preferred) + assert isinstance(res, Expr) + expr = res + if len(deltas) > 1: + return evaluate_deltas(expr, target_idx) + # try to remove preferred. + # But only if no information is lost if doing so + # -> killable has to be of length 1 + elif preferred not in target_idx \ + and d.indices_contain_equal_information: + res = expr.subs(preferred, killable) + assert isinstance(res, Expr) + expr = res + if len(deltas) > 1: + return evaluate_deltas(expr, target_idx) + return expr + else: + return expr + + +def wicks(expr: Expr, rules: Rules | None = None, + simplify_kronecker_deltas: bool = False) -> Expr: + """ + Evaluates Wicks theorem in the provided expression only returning fully + contracted contributions. + Adapted from the implementation in 'sympy.physics.secondquant'. + + Parameters + ---------- + expr: Expr + Expression containing the second quantized operator strings to + evaluate. This function expects plain sympy objects (Add/Mul/...) + and no container class. + rules : Rules, optional + Rules that are applied to the result before returning, e.g., in the + context of RE not all tensor blocks might be allowed in the result. + simplify_kronecker_deltas : bool, optional + If set, the KroneckerDeltas generated through the contractions + will be evaluated before returning. + """ + assert isinstance(expr, Expr) + # normal ordered operator string has to evaluate to zero + # and a single second quantized operator can not be contracted + if isinstance(expr, (NO, FermionicOperator)): + return S.Zero + + # break up any NO-objects, and evaluate commutators + expr = expr.doit(wicks=True).expand() + assert isinstance(expr, Expr) + + if isinstance(expr, Add): + return Add(*( + wicks(term, rules=rules, + simplify_kronecker_deltas=simplify_kronecker_deltas) + for term in expr.args + )) + elif not isinstance(expr, Mul): + # nether Add, Mul, NO, F, Fd -> maybe a number or tensor + return expr + # -> we have a Mul object + # we don't want to mess around with commuting part of Mul + # so we factorize it out before starting recursion + c_part: list[Expr] = [] + op_string: list[FermionicOperator] = [] + for factor in expr.args: + if factor.is_commutative: + c_part.append(factor) + else: + assert isinstance(factor, FermionicOperator) + op_string.append(factor) + + if (n := len(op_string)) == 0: # no operators + result = expr + elif n == 1: # a single operator + return S.Zero + else: # at least 2 operators + result = _contract_operator_string(op_string) + result = Mul(*c_part, result).expand() + assert isinstance(result, Expr) + if simplify_kronecker_deltas: + result = evaluate_deltas(result) + + # apply rules to the result + if rules is None: + return result + assert isinstance(rules, Rules) + return rules.apply(ExprContainer(result)).inner + + +def _contract_operator_string(op_string: list[FermionicOperator]) -> Expr: + """ + Contracts the operator string only returning fully contracted + contritbutions. + Adapted from 'sympy.physics.secondquant'. + """ + # check that we can get a fully contracted contribution + if not _has_fully_contracted_contribution(op_string): + return S.Zero + + result = [] + for i in range(1, len(op_string)): + c = _contraction(op_string[0], op_string[i]) + if c is S.Zero: + continue + if not i % 2: # introduce -1 for swapping operators + c *= S.NegativeOne + + if len(op_string) - 2 > 0: # at least one operator left + # remove the contracted operators from the string and recurse + remaining = op_string[1:i] + op_string[i+1:] + result.append(Mul(c, _contract_operator_string(remaining))) + else: # no operators left + result.append(c) + return Add(*result) + + +def _contraction(p: FermionicOperator, q: FermionicOperator) -> Expr: + """ + Evaluates the contraction between two sqcond quantized fermionic + operators. + Adapted from 'sympy.physics.secondquant'. + """ + assert isinstance(p, FermionicOperator) + assert isinstance(q, FermionicOperator) + + p_idx, q_idx = p.args[0], q.args[0] + assert isinstance(p_idx, Index) and isinstance(q_idx, Index) + if p_idx.spin or q_idx.spin: + raise NotImplementedError("Contraction not implemented for indices " + "with spin.") + # get the space and ensure we have no unexpected space + space_p, space_q = p_idx.space[0], q_idx.space[0] + assert space_p in ["o", "v", "g"] and space_q in ["o", "v", "g"] + + if isinstance(p, F) and isinstance(q, Fd): + if space_p == "o" or space_q == "o": + res = S.Zero + elif space_p == "v" or space_q == "v": + res = KroneckerDelta(p_idx, q_idx) + else: + res = Mul( + KroneckerDelta(p_idx, q_idx), + KroneckerDelta(q_idx, Index('a', above_fermi=True)) + ) + elif isinstance(p, Fd) and isinstance(q, F): + if space_p == "v" or space_q == "v": + res = S.Zero + elif space_p == "o" or space_q == "o": + res = KroneckerDelta(p_idx, q_idx) + else: + res = Mul( + KroneckerDelta(p_idx, q_idx), + KroneckerDelta(q_idx, Index('i', below_fermi=True)) + ) + else: # vanish if 2xAnnihilator or 2xCreator + res = S.Zero + assert isinstance(res, Expr) + return res + + +def _has_fully_contracted_contribution(op_string: list[FermionicOperator] + ) -> bool: + """ + Takes a list of second quantized operators and checks whether a + non-vanishing fully contracted contribution can exist. + """ + if len(op_string) % 2: # odd number of operators + return False + # count the number of creation and annihilation operators per space + create = {space: 0 for space in Indices.base} + annihilate = {space: 0 for space in Indices.base} + for op in op_string: + if isinstance(op, Fd): + counter = create + else: + counter = annihilate + idx = op.args[0] + assert isinstance(idx, Index) + counter[idx.space] += 1 + # check that we have a matching amount of creation and annihilation + # operators + for space, n_create in create.items(): + if space == "general": + continue + n_annihilate = annihilate[space] + annihilate["general"] + if n_create - n_annihilate > 0: + return False + return True diff --git a/build/lib/adcgen/generate_code/__init__.py b/build/lib/adcgen/generate_code/__init__.py new file mode 100644 index 0000000..0533c13 --- /dev/null +++ b/build/lib/adcgen/generate_code/__init__.py @@ -0,0 +1,8 @@ +from .contraction import Contraction +from .generate_code import generate_code +from .optimize_contractions import ( + optimize_contractions, unoptimized_contraction +) + +__all__ = ["Contraction", "generate_code", "optimize_contractions", + "unoptimized_contraction"] diff --git a/build/lib/adcgen/generate_code/config.json b/build/lib/adcgen/generate_code/config.json new file mode 100644 index 0000000..23780fa --- /dev/null +++ b/build/lib/adcgen/generate_code/config.json @@ -0,0 +1,8 @@ +{ + "sizes": { + "core": 5, + "occ": 20, + "virt": 200, + "ri": 250 + } +} \ No newline at end of file diff --git a/build/lib/adcgen/generate_code/contraction.py b/build/lib/adcgen/generate_code/contraction.py new file mode 100644 index 0000000..2119288 --- /dev/null +++ b/build/lib/adcgen/generate_code/contraction.py @@ -0,0 +1,250 @@ +from collections.abc import Sequence +from collections import Counter +from dataclasses import dataclass, fields +from pathlib import Path +from typing import Any +import itertools +import json + +from ..expression import TermContainer +from ..indices import Index, Indices, sort_idx_canonical + + +_config_file = "config.json" + + +@dataclass(frozen=True, slots=True) +class Sizes: + """ + Explicit sizes for each of the spaces (occ, virt, ...). + Used to estimate the costs of a contraction. + """ + core: int = 0 + occ: int = 0 + virt: int = 0 + general: int = 0 + ri: int = 0 + + @staticmethod + def from_dict(input: dict[str, int]) -> "Sizes": + """ + Construct an instance from dictionary. The size of the "general" space + is evaluated on the fly as sum of the sizes of the other spaces + if not provided. + """ + if "general" not in input: + input["general"] = sum(input.values()) + return Sizes(**input) + + @staticmethod + def from_config() -> "Sizes": + """ + Construct an instance using the values in the config file + (by default: "config.json"). The size of the "general" space is + evaluated on the fly as sum of the sizes of the other spaces if not + present in the config file. + """ + config_file = Path(__file__).parent.resolve() / _config_file + sizes: dict[str, int] | None = ( + json.load(open(config_file, "r")).get("sizes", None) + ) + if sizes is None: + raise KeyError(f"Invalid config file {config_file}. " + "Missing key 'sizes'.") + return Sizes.from_dict(sizes) + + +class Contraction: + """ + Represents a single contration of n objects. + + Parameters + ---------- + indices: tuple[tuple[Index]] + The indices of the contracted tensors. + names: tuple[str] + The names of the contracted tensors. + term_target_indices: tuple[Index] + The target indices of the term the contraction belongs to. + """ + # use counter that essentially counts how many class instances have + # been created + # -> unique id for every instance + # -> easy to differentiate and identify individual instances + _base_name = "contraction" + _instance_counter = itertools.count(0, 1) + + # fallback sizes to estimate the costs of a contraction + _sizes = Sizes.from_config() + + def __init__(self, indices: Sequence[tuple[Index, ...]], + names: Sequence[str], + term_target_indices: Sequence[Index]) -> None: + if not isinstance(indices, tuple): + indices = tuple(indices) + if isinstance(names, str): + names = (names,) + elif not isinstance(names, tuple): + names = tuple(names) + + self.indices: tuple[tuple[Index, ...], ...] = indices + self.names: tuple[str, ...] = names + self.contracted: tuple[Index, ...] = tuple() + self.target: tuple[Index, ...] = tuple() + self.scaling: Scaling + self.id: int = next(self._instance_counter) + self.contraction_name: str = f"{self._base_name}_{self.id}" + self._determine_contracted_and_target(term_target_indices) + self._determine_scaling() + + def __str__(self): + return (f"Contraction(indices={self.indices}, names={self.names}, " + f"contracted={self.contracted}, target={self.target}, " + f"scaling={self.scaling}), id={self.id}, " + f"contraction_name={self.contraction_name})") + + def __repr__(self): + return self.__str__() + + def _determine_contracted_and_target(self, + term_target_indices: Sequence[Index] + ) -> None: + """ + Determines and sets the contracted and target indices on the + contraction using the provided target indices of the term + the contraction is a part of. In case the target indices of the + contraction contain the same indices as the target indices of the + term, the target indices of the term will be used instead. + """ + contracted, target = self._split_contracted_and_target( + self.indices, term_target_indices + ) + # sort the indices canonical + contracted = sorted(contracted, key=sort_idx_canonical) + target = sorted(target, key=sort_idx_canonical) + # if the contraction is an outer contraction, we have to use the + # provided target indices as target indices since their order + # might be different from the canonical order. + if sorted(term_target_indices, key=sort_idx_canonical) == target: + target = term_target_indices + self.contracted = tuple(contracted) + self.target = tuple(target) + + @staticmethod + def _split_contracted_and_target(indices: Sequence[tuple[Index, ...]], + term_target_indices: Sequence[Index] + ) -> tuple[list[Index], list[Index]]: + """ + Splits the given indices in contracted and target indices using + the provided target indices of the term the contraction is a + part of. + """ + idx_counter = Counter(itertools.chain.from_iterable(indices)) + contracted: list[Index] = [] + target: list[Index] = [] + for idx, count in idx_counter.items(): + if count == 1 or idx in term_target_indices: + target.append(idx) + else: + contracted.append(idx) + return contracted, target + + def _determine_scaling(self) -> None: + """Determine the computational and memory scaling of the contraction""" + contracted_by_space = Counter(idx.space for idx in self.contracted) + target_by_space = Counter(idx.space for idx in self.target) + # computational scaling + componentwise = { + space: contracted_by_space[space] + target_by_space[space] + for space in Indices.base + } + comp_scaling = ScalingComponent(total=sum(componentwise.values()), + **componentwise) + # memory scaling + componentwise = { + space: target_by_space[space] for space in Indices.base + } + mem_scaling = ScalingComponent(total=len(self.target), + **componentwise) + # overall scaling + self.scaling = Scaling(computational=comp_scaling, memory=mem_scaling) + + def evaluate_costs(self, sizes: Sizes | None = None + ) -> tuple[int, int]: + """ + Estimate the costs of the contraction. Returns a tuple containing + the flop count and the memory foot print of the result tensor. + + Parameters + ---------- + sizes: dict[str, int] | Sizes | None, optional + The sizes of the individual spaces used to estimate the + computational costs and the memory footprint of the contraction. + """ + if sizes is None: + sizes = self._sizes + return self.scaling.evaluate_costs(sizes) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Contraction): + return False + return (self.indices == other.indices and + self.names == other.names and + self.contracted == other.contracted and + self.target == other.target and self.scaling == other.scaling) + + @staticmethod + def is_contraction(name: str) -> bool: + return name.startswith(Contraction._base_name) + + +def term_memory_requirements(term: TermContainer) -> "ScalingComponent": + """Determines the maximum memory requirements for the given term.""" + mem_scaling: list[ScalingComponent] = [] + for obj in term.objects: + space = obj.space + scaling = {"total": len(space)} + for field in fields(ScalingComponent): + if field.name == "total": + continue + scaling[field.name] = space.count(field.name[0]) + mem_scaling.append(ScalingComponent(**scaling)) + return max(mem_scaling) + + +@dataclass(frozen=True, slots=True, order=True) +class Scaling: + computational: "ScalingComponent" + memory: "ScalingComponent" + + def evaluate_costs(self, sizes: Sizes) -> tuple[int, int]: + """ + Estimate the computational costs and the memory footprint using the + provided sizes for the spaces. + """ + return (self.computational.evaluate_costs(sizes), + self.memory.evaluate_costs(sizes)) + + +@dataclass(frozen=True, slots=True, order=True) +class ScalingComponent: + total: int + general: int + virt: int + occ: int + core: int + ri: int + + def evaluate_costs(self, sizes: Sizes) -> int: + """ + Estimate the costs of the component using the provided sizes for the + spaces. + """ + costs = 1 + for field in fields(sizes): + base = getattr(sizes, field.name) + power = getattr(self, field.name, None) + assert power is not None + if base: + costs *= base ** power + return costs diff --git a/build/lib/adcgen/generate_code/generate_code.py b/build/lib/adcgen/generate_code/generate_code.py new file mode 100644 index 0000000..f2bc25d --- /dev/null +++ b/build/lib/adcgen/generate_code/generate_code.py @@ -0,0 +1,390 @@ +from collections.abc import Sequence +from collections import Counter + +from sympy import Expr, Symbol, Rational, Pow, Mul, S + +from ..expression import ExprContainer, TermContainer +from ..indices import Index, Indices +from ..logger import logger +from ..sort_expr import exploit_perm_sym +from ..symmetry import Permutation +from ..tensor_names import tensor_names + +from .contraction import Contraction, term_memory_requirements +from .optimize_contractions import ( + optimize_contractions, unoptimized_contraction +) + + +def generate_code(expr: ExprContainer, target_indices: str, + target_spin: str | None = None, + bra_ket_sym: int = 0, + antisymmetric_result_tensor: bool = True, + backend: str = "einsum", max_itmd_dim: int | None = None, + max_n_simultaneous_contracted: int | None = None, + optimize_contraction_scheme: bool = True, + space_dims: dict[str, int] | None = None) -> str: + """ + Generates contractions for a given expression using either 'einsum' + (Python) or 'libtensor' (C++) syntax. + + Parameters + ---------- + expr: ExprContainer + The expression to generate contractions for. + target_indices: str + String of target indices. A ',' might be inserted to indicate where + the indices are split in upper and lower indices of the result tensor, + e.g., 'ia,jb' for 'r^{ia}_{jb}'. + target_spin: str | None, optional + The spin of the target indices, e.g., 'aabb' to indicate that the + first 2 target indices have alpha spin, while number 3 and 4 have + beta spin. If not given, target indices without spin will be used. + bra_ket_sym: int, optional + The bra-ket symmetry of the result tensor. (default: 0, i.e., + no bra-ket symmetry) + antisymmetric_result_tensor: bool, optional + If set, teh result tensor will be treated as AntiSymmetricTensor + d_{ij}^{ab} = - d_{ji}^{ab}. Otherwise, a SymmetricTensor will be used + to mimic the symmetry of the result tensor, i.e., + d_{ij}^{ab} = d_{ji}^{ab}. (default: True) + backend: str, optional + The backend for which to generate contractions. (default: einsum) + max_itmd_dim: int | None, optional + Upper bound for the dimensionality of intermediate results, that + may be generated if the contractions are optimized. + max_n_simultaneous_contracted: int | None, optional + The maximum number of objects allowed to be contracted + simultaneously in a single contraction. (default: None) + optimize_contraction_scheme: bool, optional + If set, we try to find the contractions with the lowest arithmetic + and memory scaling, i.e., if possible only 2 tensors are contracted + simultaneously. (default: True) + space_dims: dict[str, int] | None, optional + The sizes of the spaces (occ, virt, ...) used to estimate the cost of + contractions. If not provided, the sizes from "config.json" will be + used. + """ + assert isinstance(expr, ExprContainer) + # try to reduce the number of terms by exploiting permutational symmetry + expr_with_perm_sym = exploit_perm_sym( + expr=expr, target_indices=target_indices, target_spin=target_spin, + bra_ket_sym=bra_ket_sym, + antisymmetric_result_tensor=antisymmetric_result_tensor + ) + # remove the bra-ket separator in target indices and target spin + if "," in target_indices: + target_indices = target_indices.replace(",", "") + if target_spin is not None and "," in target_spin: + target_spin = target_spin.replace(",", "") + + code = [] + for perm_symmetry, sub_expr in expr_with_perm_sym.items(): + perm_str = format_perm_symmetry(perm_symmetry) + + # generate the contrations for each of the terms + contraction_code = [] + for term in sub_expr.terms: + prefactor = format_prefactor(term, backend) + + if not term.idx: # term is just a prefactor + contraction_code.append(prefactor) + continue + if len({idx.spin for idx in term.idx}) > 1: + logger.warning("Found more than one spin in the indices of " + f"term {term}. Indices with different spin " + "might not be distinguishable in the " + "generated contractions, because only the name " + "of the indices is considered.") + + # generate the contractions for the term + if optimize_contraction_scheme: + contractions = optimize_contractions( + term=term, target_indices=target_indices, + target_spin=target_spin, max_itmd_dim=max_itmd_dim, + space_dims=space_dims, + max_n_simultaneous_contracted=max_n_simultaneous_contracted + ) + else: + contractions = unoptimized_contraction( + term=term, target_indices=target_indices, + target_spin=target_spin + ) + # build a comment describing the scaling of the contraction + # scheme + scaling_comment = format_scaling_comment( + term=term, contractions=contractions, backend=backend + ) + # identify inner and outer contractions. + # They are sorted in the way they need to be executed + # -> contraction can only be used in a later contraction + inner: list[Contraction] = [] + outer: list[Contraction] = [] + for i, contr in enumerate(contractions): + if any(contr.contraction_name in other_contr.names + for other_contr in contractions[i+1:]): + inner.append(contr) + else: + outer.append(contr) + # currently, there has to be only 1 outer contraction (the last + # contraction), because even if an inner contraction gives a + # number, the contraction is still kept in the pool of objects, + # i.e., contractions might contain objects without indices! + contraction_cache: dict[str, str] = {} + for contr in inner: + contr_str = format_contraction(contr, contraction_cache, + backend=backend) + contraction_cache[contr.contraction_name] = contr_str + assert len(outer) == 1 + contr_str = format_contraction(outer[0], contraction_cache, + backend=backend) + contraction_code.append( + f"{prefactor} * {contr_str} {scaling_comment}" + ) + contraction_code = '\n'.join(contraction_code) + code.append( + "The scaling comment is given as: [comp_scaling] / [mem_scaling]\n" + f"Apply {perm_str} to:\n{contraction_code}" + ) + return "\n\n".join(code) + + +def format_contraction(contraction: Contraction, + contraction_cache: dict[str, str], + backend: str) -> str: + """ + Builds a backend specific string for the given contraction. + """ + # split the objects in tensors and factors + # and transform the indices of the tensors to string + tensors: list[str] = [] + factors: list[str] = [] + idx_str: list[str] = [] + for name, indices in zip(contraction.names, contraction.indices): + # check the cache for the contraction string of the inner contraction + if Contraction.is_contraction(name): + name = contraction_cache.get(name, None) + if name is None: + raise KeyError("Could not find contraction string for inner " + f"contraction {contraction}.") + # we have a tensor that we need to treat depening on the backend + elif backend == "einsum": # translate eri and fock matrix + name = translate_adcc_names(name, indices) + elif backend == "libtensor": + # we can not form a partial trace in libtensor + contracted_obj_indices = [ + idx for idx in indices if idx in contraction.contracted + ] + if any(n > 1 for _, n in Counter(contracted_obj_indices).items()): + raise NotImplementedError( + "Libtensor can not handle a partial trace, i.e., a trace " + f"with a tensor as result. Found {indices} on tensor " + f"{name} of contraction\n{contraction}" + ) + # translate eri and t2eri + name = translate_libadc_names(name, indices) + name = f"{name}({'|'.join(idx.name for idx in indices)})" + + if indices: # we have a tensor + tensors.append(name) + # build a string for the indices + idx_str.append("".join(idx.name for idx in indices)) + else: # we have a factor without indices + factors.append(name) + # also transform the target indices to string + target = "".join(idx.name for idx in contraction.target) + + if backend == "einsum": + return format_einsum_contraction(tensors=tensors, factors=factors, + indices=idx_str, target=target) + elif backend == "libtensor": + return format_libtensor_contraction(tensors=tensors, factors=factors, + target=target, + contracted=contraction.contracted) + else: + raise NotImplementedError("Contraction not implemented for backend " + f"{backend}.") + + +def format_einsum_contraction(tensors: list[str], factors: list[str], + indices: list[str], target: str) -> str: + """ + Builds a contraction string for the given contraction using Python + numpy einsum syntax. + """ + + components = [*factors] + # special case: single tensor with the correct target indices + # -> no einsum needed + if len(tensors) == 1 and indices[0] == target: + components.append(tensors[0]) + elif tensors: # we need a einsum: reorder or contraction or outer + contr_str = f"\"{','.join(indices)}->{target}\"" + components.append( + f"einsum({contr_str}, {', '.join(tensors)})" + ) + return " * ".join(components) + + +def format_libtensor_contraction(tensors: list[str], factors: list[str], + target: str, contracted: Sequence[Index] + ) -> str: + """ + Builds a contraction string for the given contraction using libtensor + C++ syntax. + """ + + components = [*factors] + if len(tensors) == 1: # single tensor + assert not contracted # trace + components.append(tensors[0]) + elif len(tensors) > 1: # multipe tensors + # hyper-contraction only implemented for 3 tensors i think + if contracted and target: # contract + components.append( + f"contract({'|'.join(s.name for s in contracted)}, " + f"{', '.join(tensors)})" + ) + elif not contracted and target: # outer product + components.extend(tensors) + elif contracted and not target: # inner product + components.append(f"dot_product({', '.join(tensors)})") + else: + raise NotImplementedError("No target and contracted indices in " + f"contraction of {tensors} and " + f"{factors}.") + return " * ".join(components) + + +def translate_adcc_names(name: str, indices: Sequence[Index]) -> str: + """Translates tensor names specifically for adcc.""" + if name.startswith(tensor_names.eri): + space = "".join(s.space[0] for s in indices) + return f"hf.{space}" + elif name.startswith(tensor_names.fock): + space = "".join(s.space[0] for s in indices) + return f"hf.f{space}" + return name + + +def translate_libadc_names(name: str, indices: Sequence[Index]) -> str: + if name.startswith(tensor_names.eri): + space = "".join(s.space[0] for s in indices) + return f"i_{space}" + elif name.startswith("t2eri"): + _, n = name.split("_") + return f"pi{n}" + return name + + +def format_scaling_comment(term: TermContainer, + contractions: list[Contraction], + backend: str) -> str: + """ + Builds a backend specific comment describing the scaling of the + contraction scheme. + """ + max_comp_scaling = max(contr.scaling.computational + for contr in contractions) + max_mem_scaling = max(contr.scaling.memory for contr in contractions) + max_mem_scaling = max(max_mem_scaling, term_memory_requirements(term)) + comp = [f"N^{max_comp_scaling.total}: "] + mem = [f"N^{max_mem_scaling.total}: "] + for space in Indices.base: + if (n := getattr(max_comp_scaling, space)): + comp.append(f"{space[0].capitalize()}^{n}") + if (n := getattr(max_mem_scaling, space)): + mem.append(f"{space[0].capitalize()}^{n}") + if backend == "einsum": + comment_token = "#" + elif backend == "libtensor": + comment_token = "//" + else: + raise NotImplementedError("Comment token not implemented for backend " + f"{backend}.") + return f"{comment_token} {''.join(comp)} / {''.join(mem)}" + + +def format_prefactor(term: TermContainer, backend: str) -> str: + """Formats the prefactor for Python (einsum) or C++ (libtensor).""" + # extract number and symbolic prefactor + number_pref = term.prefactor + symbol_pref = " * ".join( + [obj.base.name for obj in term.objects if isinstance(obj.base, Symbol) + for _ in range(int(obj.exponent))] + ) + # extract the sign + if number_pref < S.Zero: + sign = "-" + number_pref *= S.NegativeOne + else: + sign = "+" + # format the number prefactor (depends on the backend) + if backend == "einsum": # python + number_pref = _format_python_prefactor(number_pref) + elif backend == "libtensor": # C++ + number_pref = _format_cpp_prefactor(number_pref) + else: + raise NotImplementedError(f"Prefactor for backend {backend} not " + "implemented.") + # combine the contributions + if symbol_pref: + return f"{sign} {number_pref} * {symbol_pref}" + else: + return f"{sign} {number_pref}" + + +def _format_python_prefactor(prefactor: Expr) -> str: + """Formats a prefactor using Python syntax.""" + + if prefactor == int(prefactor): # natural number + return str(prefactor) + elif prefactor in [Rational(1, 2), Rational(1, 4)]: # simple Rational + return str(float(prefactor)) + elif isinstance(prefactor, Rational): # more complex rational + return f"{prefactor.p} / {prefactor.q}" + elif isinstance(prefactor, Pow) and prefactor.args[1] == Rational(1, 2): + return f"sqrt({prefactor.args[0]})" + elif isinstance(prefactor, Mul): + return " * ".join( + _format_python_prefactor(pref) for pref in prefactor.args + ) + raise NotImplementedError( + f"Formatting of prefactor {prefactor}, {type(prefactor)} " + "not implemented." + ) + + +def _format_cpp_prefactor(prefactor: Expr) -> str: + """Formats a prefactor using C++ syntax.""" + + if prefactor == int(prefactor) or \ + prefactor in [Rational(1, 2), Rational(1, 4)]: + return str(float(prefactor)) + elif isinstance(prefactor, Rational): + return f"{float(prefactor.p)} / {float(prefactor.q)}" + elif isinstance(prefactor, Pow) and prefactor.args[1] == Rational(1, 2): + return f"constants::sq{prefactor.args[0]}" + elif isinstance(prefactor, Mul): + return " * ".join( + _format_cpp_prefactor(pref) for pref in prefactor.args + ) + raise NotImplementedError( + f"Formatting of prefactor {prefactor}, {type(prefactor)} " + "not implemented." + ) + + +def format_perm_symmetry( + perm_symmetry: tuple[tuple[tuple[Permutation, ...], int], ...]) -> str: + """Formats the permutational symmetry.""" + perm_sym = ["1"] + for permutations, factor in perm_symmetry: + assert factor in [1, -1] + contrib = ["+ "] if factor == 1 else ["- "] + for perm in permutations: + contrib.append(str(perm)) + perm_sym.append("".join(contrib)) + if len(perm_sym) == 1: + return "1" + return f"({' '.join(perm_sym)})" diff --git a/build/lib/adcgen/generate_code/optimize_contractions.py b/build/lib/adcgen/generate_code/optimize_contractions.py new file mode 100644 index 0000000..f90f23e --- /dev/null +++ b/build/lib/adcgen/generate_code/optimize_contractions.py @@ -0,0 +1,329 @@ +from collections.abc import Sequence +from typing import Generator +import itertools + +from sympy import Symbol, S + +from ..expression import TermContainer +from ..indices import get_symbols, Index +from ..sympy_objects import SymbolicTensor, KroneckerDelta +from .contraction import Contraction, Sizes + + +def optimize_contractions(term: TermContainer, + target_indices: str | None = None, + target_spin: str | None = None, + max_itmd_dim: int | None = None, + max_n_simultaneous_contracted: int | None = None, + space_dims: dict[str, int] | None = None + ) -> list[Contraction]: + """ + Find the optimal contraction scheme with the lowest computational + and memory scaling for a given term. Thereby, the computational scaling + is prioritized over the memory scaling. + + Parameters + ---------- + term: TermContainer + Find the optimal contraction scheme for this term. + target_indices: str | None, optional + The target indices of the term. If not given, the canonical target + indices of the term according to the Einstein sum convention + will be used. For instance, 2 occupied and 2 virtual + indices will always be in the order 'ijab'. Therefore, target indices + have to be provided if the result tensor has indices 'iajb'. + target_spin: str | None, optional + The spin of the target indices, e.g., "aabb" for + alpha, alpha, beta, beta. If not given, target indices without spin + will be used. + max_itmd_dim: int | None, optional + Upper bound for the dimensionality of intermediates created by + inner contractions if the contractions are nested, i.e., + the dimensionality of the result of contr2 and contr3 is restricted in + "contr1(contr2(contr3(...)))". + max_n_simultaneous_contracted: int | None, optional + The maximum number of objects allowed to be contracted + simultaneously in a single contraction. (default: None) + space_dims: dict[str, int] | None, optional + The sizes of the spaces (occ, virt, ...) used to estimate the cost of + contractions. If not provided, the sizes from "config.json" will be + used. + """ + # - import (or extract) the target indices + if target_indices is None: + target_symbols = term.target + else: + target_symbols = tuple(get_symbols(target_indices, target_spin)) + # - import the space sizes/dims + if isinstance(space_dims, dict): + space_sizes = Sizes.from_dict(space_dims) + else: + assert space_dims is None + space_sizes = None + # - extract the relevant part (tensors and deltas) of the term + relevant_obj_names: list[str] = [] + relevant_obj_indices: list[tuple[Index, ...]] = [] + for obj in term.objects: + base, exp = obj.base_and_exponent + if obj.inner.is_number: # skip number prefactor + continue + elif exp < S.Zero: + raise NotImplementedError(f"Found object {obj} with exponent " + f"{exp} < 0. Contractions not " + "implemented for divisions.") + elif isinstance(base, Symbol): # skip symbolic prefactor + continue + elif not isinstance(base, (SymbolicTensor, KroneckerDelta)): + raise NotImplementedError("Contractions can only be optimized for " + "tensors and KroneckerDeltas.") + name, indices = obj.longname(), obj.idx + assert name is not None + assert exp.is_Integer + relevant_obj_names.extend(name for _ in range(int(exp))) + relevant_obj_indices.extend(indices for _ in range(int(exp))) + assert len(relevant_obj_names) == len(relevant_obj_indices) + + if not relevant_obj_names: # no tensors or deltas in the term + return [] + elif len(relevant_obj_names) == 1: + # trivial: only a single tensor/delta with exponent 1 + # - resorting of indices + # - trace + return [Contraction( + indices=relevant_obj_indices, names=relevant_obj_names, + term_target_indices=target_symbols + )] + # lazily find the contraction schemes + contraction_schemes = _optimize_contractions( + relevant_obj_names=tuple(relevant_obj_names), + relevant_obj_indices=tuple(relevant_obj_indices), + target_indices=target_symbols, max_itmd_dim=max_itmd_dim, + max_n_simultaneous_contracted=max_n_simultaneous_contracted + ) + # go through all schemes and find the one with the lowest scaling by + # considering the: + # 1) Computational scaling (flop count) + # 2) Memory scaling (number of elements to store) + optimal_scaling = None + optimal_scheme = None + for scheme in contraction_schemes: + # determine the costs for the current contraction scheme + arithmetic = 0 + memory = 0 + for contr in scheme: + nflops, mem = contr.evaluate_costs(space_sizes) + arithmetic += nflops + memory += mem + scaling = (arithmetic, memory) + if optimal_scaling is None or scaling < optimal_scaling: + optimal_scheme = scheme + optimal_scaling = scaling + # the generator is empty, i.e., we could not find any contraction scheme + if optimal_scheme is None: + raise RuntimeError("Could not find a valid contraction scheme for " + f"term {term} while restricting the maximum " + f"dimensionality of intermediates to " + f"{max_itmd_dim} and allowing simultaneous " + f"contractions of {max_n_simultaneous_contracted} " + "objects.") + return optimal_scheme + + +def _optimize_contractions(relevant_obj_names: Sequence[str], + relevant_obj_indices: Sequence[tuple[Index, ...]], + target_indices: Sequence[Index], + max_itmd_dim: int | None = None, + max_n_simultaneous_contracted: int | None = None, + ) -> Generator[list[Contraction], None, None]: + """ + Find the optimal contractions for the given relevant objects of a term. + """ + assert len(relevant_obj_indices) == len(relevant_obj_names) + if len(relevant_obj_names) < 2: + raise ValueError("Need at least 2 objects to define a contraction.") + + # split the relevant objects into subgroups that share contracted indices + # and therefore should be contracted simultaneously + connected_groups = _group_objects( + obj_indices=relevant_obj_indices, target_indices=target_indices, + max_group_size=max_n_simultaneous_contracted + ) + + for group in connected_groups: + contr_indices = tuple(relevant_obj_indices[pos] for pos in group) + contr_names = tuple(relevant_obj_names[pos] for pos in group) + contraction = Contraction(indices=contr_indices, names=contr_names, + term_target_indices=target_indices) + # if the contraction is not an outer contraction we have to check + # the dimensionality of the intermediate tensor + if max_itmd_dim is not None and \ + contraction.target != target_indices and \ + len(contraction.target) > max_itmd_dim: + continue + # remove the contracted names and indices + remaining_pos = [pos for pos in range(len(relevant_obj_names)) + if pos not in group] + remaining_names = (contraction.contraction_name, + *(relevant_obj_names[pos] for pos in remaining_pos)) + remaining_indices = (contraction.target, *(relevant_obj_indices[pos] + for pos in remaining_pos)) + # there are no objects left to contract -> we are done + if len(remaining_names) == 1: + yield [contraction] + continue + # recurse to build further contractions + completed_schemes = _optimize_contractions( + relevant_obj_names=remaining_names, + relevant_obj_indices=remaining_indices, + target_indices=target_indices, max_itmd_dim=max_itmd_dim, + max_n_simultaneous_contracted=max_n_simultaneous_contracted + ) + for contraction_scheme in completed_schemes: + contraction_scheme.insert(0, contraction) + yield contraction_scheme + + +def _group_objects(obj_indices: Sequence[tuple[Index, ...]], + target_indices: Sequence[Index], + max_group_size: int | None = None + ) -> tuple[tuple[int, ...], ...]: + """ + Split the provided relevant objects into subgroups that share common + contracted indices. Thereby, a group can at most contain 'max_group_size' + objects. By default, all objects are allowed to be in one group. + """ + # NOTE: the algorithm currently maximizes the number of contracted + # indices, i.e., a contraction runs over all common contracted + # indices. While this is fine in most cases, it might be benefitial + # to not contract all possible indices simultaneously + # in certain cases, since this leads to an increased group size: + # 0 1 2 + # d_ijk d_ij d_jl + # 0 and 1 share i and j. A contraction running only over i can be + # performed for the pair (0, 1). However, if the contraction runs + # runs over i and j, we have to consider the triple (0, 1, 2). + + # sanity checks for input + assert len(obj_indices) > 1 # we need at least 2 objects + if max_group_size is None: + max_group_size = len(obj_indices) + assert max_group_size > 1 # group size has to be at least 2 + + # track on which objects the indices appear + idx_occurences: dict[Index, list[int]] = {} + for pos, indices in enumerate(obj_indices): + for idx in indices: + if idx not in idx_occurences: + idx_occurences[idx] = [] + idx_occurences[idx].append(pos) + + # store grouped objects and isolated objects (outer products) + # for the groups we are using a dict, since it by default returns + # keys in the order they were inserted. A set would need to be sorted + # before returning to produce consistent results. + groups: dict[tuple[int, ...], None] = {} + outer_products: list[tuple[int, int]] = [] + # iterate over all pairs of objects (index tuples) + for (pos1, indices1), (pos2, indices2) in \ + itertools.combinations(enumerate(obj_indices), 2): + # check if the objects have any common contracted indices + # -> outer products can be treated as pair + contracted, _ = Contraction._split_contracted_and_target( + (indices1, indices2), target_indices + ) + if not contracted: + outer_products.append((pos1, pos2)) + continue + # get all the objects any of the contracted indices appears + positions = {pos for idx in contracted for pos in idx_occurences[idx]} + # group too large + if len(positions) > max_group_size: + continue + # avoid duplication: 0, 1 and 2 are connected by a common index + # -> the pair 0,1 and 0,2 will both give the triple 0,1,2 + # which will then grow in the same way independent of the starting + # pair. + key = tuple(sorted(positions)) + if key in groups: + continue + # store the minimal group + groups[key] = None + + # self-consistently update the contracted indices and the positions + # This corresponds to maximizing the group size. + # However, it is unclear if growing the group leads to a better + # scaling contraction. Therefore, also store smaller groups + while True: + # update the contracted indices + new_contracted, _ = Contraction._split_contracted_and_target( + [obj_indices[pos] for pos in positions], target_indices + ) + # no new contracted indices pulled in -> we are done + if contracted == new_contracted: + break + # update the positions + new_positions = { + pos for idx in new_contracted for pos in idx_occurences[idx] + } + # no new positions or the extended group is too large + if new_positions == positions or \ + len(new_positions) > max_group_size: + break + # store the current group before trying to further + # increase the size + groups[tuple(sorted(new_positions))] = None + contracted = new_contracted + positions = new_positions + return (*groups.keys(), *outer_products) + + +def unoptimized_contraction(term: TermContainer, + target_indices: str | None = None, + target_spin: str | None = None + ) -> list[Contraction]: + """ + Determines the unoptimized contraction for the given term, i.e., + a simultaneous hyper-contraction of all tensors and deltas. + + Parameters + ---------- + term: TermContainer + Build an unoptimized contraction for the given term. + target_indices: str | None, optional + The target indices of the term. If not given, the canonical target + indices of the term according to the Einstein sum convention + will be used. + target_sin: str | None, optional + The spin of the target indices, e.g., "aabb" for + alpha, alpha, beta, beta. If not given, target indices without spin + will be used. + """ + # - import (or extract) the target indices + if target_indices is None: + target_symbols = term.target + else: + target_symbols = tuple(get_symbols(target_indices, target_spin)) + # extract the relevant part of the term + relevant_obj_names: list[str] = [] + relevant_obj_indices: list[tuple[Index, ...]] = [] + for obj in term.objects: + base, exp = obj.base_and_exponent + if obj.inner.is_number: # skip number prefactor + continue + elif exp < S.Zero: + raise NotImplementedError(f"Found object {obj} with exponent " + f"{exp} < 0. Contractions not " + "implemented for divisions.") + elif isinstance(base, Symbol): # skip symbolic prefactor + continue + elif not isinstance(base, (SymbolicTensor, KroneckerDelta)): + raise NotImplementedError("Contractions only implemented for " + "tensors and KroneckerDeltas.") + name, indices = obj.longname(), obj.idx + assert name is not None + assert exp.is_Integer + relevant_obj_names.extend(name for _ in range(int(exp))) + relevant_obj_indices.extend(indices for _ in range(int(exp))) + assert len(relevant_obj_indices) == len(relevant_obj_names) + return [Contraction(indices=relevant_obj_indices, names=relevant_obj_names, + term_target_indices=target_symbols)] diff --git a/build/lib/adcgen/groundstate.py b/build/lib/adcgen/groundstate.py new file mode 100644 index 0000000..4ee5128 --- /dev/null +++ b/build/lib/adcgen/groundstate.py @@ -0,0 +1,476 @@ +from math import factorial + +from sympy.physics.secondquant import NO, Dagger +from sympy import Expr, Mul, Rational, S, latex + +from .expression import ExprContainer +from .func import gen_term_orders, wicks +from .indices import Indices, n_ov_from_space +from .logger import logger +from .misc import cached_member, Inputerror, validate_input +from .operators import Operators +from .simplify import simplify +from .sympy_objects import Amplitude +from .tensor_names import tensor_names + + +class GroundState: + """ + Constructs ground state expressions using Rayleigh-Schrödinger + perturbation theory. + + Parameters + ---------- + hamiltonian : Operators + An Operators instance to request the partitioned Hamiltonian and + other Operators. + first_order_singles : bool, optional + If set, the first order wavefunction will contain single amplitudes. + (Defaults to False) + """ + def __init__(self, hamiltonian: Operators, + first_order_singles: bool = False): + assert isinstance(hamiltonian, Operators) + self.indices: Indices = Indices() + self.h: Operators = hamiltonian + self.singles: bool = first_order_singles + + @cached_member + def energy(self, order: int) -> Expr: + """ + Constructs an expression for the n'th-order ground state energy + contribution. + + Parameters + ---------- + order : int + The perturbation theoretical order. + """ + # NOTE: this function assumes a block diagonal H0 + # in the general case we have to include <0|H0|n> + + validate_input(order=order) + + h, rules = self.h.h0 if order == 0 else self.h.h1 + bra = self.psi(order=0, braket="bra") + ket = self.psi(order=0, braket='ket') if order == 0 else \ + self.psi(order=order-1, braket='ket') + e = Mul(bra, h, ket) + e = wicks(e, simplify_kronecker_deltas=True, rules=rules) + # option 1: return the not simplified energy -> will give a lot more + # terms later on + # option 2: simplify the energy expression and replace the indices with + # new, generic indices + # guess option 2 is nicer, because energy is more readable and shorter + e = simplify(ExprContainer(e)).substitute_with_generic() + logger.debug(f"E^({order}) = {e}") + return e.inner + + def psi(self, order: int, braket: str) -> Expr: + """ + Constructs the n'th-order ground state wavefunction without inserting + definitions of the respective ground state amplitudes. + + Parameters + ---------- + order : int + The perturbation theoretical order. + braket: str + Possible values: 'bra', 'ket'. Defines whether a bra or ket + wavefunction is constructed. + """ + # Can't cache ground state wave function! + # Leads to an error for terms of the form: + # |1><2|1>... the two |1> need to have different indices!! + # |1><1|2>... |1> and |2> can't share indices + # -> Therefore, each time a gs wavefunction is requested new indices + # need to be used. + # But one can still use overlapping indices within a wavefunction + # e.g. singles: ia, doubles ijab, triples ijkabc + + validate_input(order=order, braket=braket) + + # catch 0th order wavefunction + if order == 0: + logger.debug(f"gs({order}) {braket} = 1") + return S.One + + # generalized gs wavefunction generation + tensor_name = f"{tensor_names.gs_amplitude}{order}" + if braket == "bra": + tensor_name += "cc" + idx = self.indices.get_generic_indices(occ=2*order, virt=2*order) + virtual = idx[("virt", "")] + occupied = idx[("occ", "")] + psi = S.Zero + for excitation in range(1, order * 2 + 1): + # skip singles for the first order wavefunction if + # they are not requested + if order == 1 and not self.singles and excitation == 1: + continue + # build tensor + virt: list = virtual[:excitation] + occ: list = occupied[:excitation] + t = Amplitude(tensor_name, virt, occ) + # build operators + operators = self.h.excitation_operator( + creation=virt, annihilation=occ, reverse_annihilation=True + ) + if braket == "bra": + operators = Dagger(operators) + # prefactor for lifting index restrictions + prefactor = Rational(1, factorial(excitation) ** 2) + # For signs: Decided to subtract all Doubles to stay consistent + # with existing implementations of the amplitudes. + # The remaining amplitudes (S/T/Q...) are added! + # (denominator for Triples: i+j+k-a-b-c + # Doubles: a+b-i-j) + if excitation == 2: # doubles + psi -= prefactor * t * NO(operators) + else: + psi += prefactor * t * NO(operators) + assert isinstance(psi, Expr) + logger.debug(f"gs({order}) {braket} = {latex(psi)}") + return psi + + def amplitude(self, order: int, space: str, indices: str) -> Expr: + """ + Constructs the n'th-order expression for the ground state t-amplitudes. + + Parameters + ---------- + order : int + The perturbation theoretical order. + space : str + The excitation space, e.g., 'ph' or 'pphh' for singly or doubly + excited configurations. + indices : str + The indices the t-amplitude. + """ + variant = self.h._variant + if variant == 'mp': + return self.mp_amplitude(order, space, indices) + elif variant == 're': + return self.amplitude_residual(order, space, indices) + else: + raise NotImplementedError("Amplitudes not implemented for " + f"{self.h._variant}") + + @cached_member + def mp_amplitude(self, order: int, space: str, indices: str) -> Expr: + """ + Constructs the closed n'th-order expression for the MP t-amplitudes. + + Parameters + ---------- + order : int + The perturbation theoretical order. + space : str + The excitation space, e.g., 'ph' or 'pphh' for singly or doubly + excited configurations. + indices : str + The indices of the constructed t-amplitude. + """ + from .intermediates import orb_energy + + validate_input(order=order, space=space, indices=indices) + + n_ov = n_ov_from_space(space) + if n_ov["occ"] != n_ov["virt"]: + raise Inputerror("Invalid space string for a MP t-amplitude: " + f"{space}.") + # if the space is not present at the requested order return 0 + if n_ov["occ"] > 2 * order: + return S.Zero + + idx = self.indices.get_indices(indices) + lower = idx.get(("occ", ""), []) + upper = idx.get(("virt", ""), []) + if n_ov["occ"] != len(lower) or n_ov["virt"] != len(upper): + raise Inputerror(f"Provided indices {indices} are not adequate for" + f" space {space}.") + + # build the denominator + if len(lower) == 2: # doubles amplitude: a+b-i-j + occ_factor = S.NegativeOne + virt_factor = S.One + else: # any other amplitude: i-a // i+j+k-a-b-c // ... + occ_factor = S.One + virt_factor = S.NegativeOne + + denom = S.Zero + for s in lower: + denom += occ_factor * orb_energy(s) + for s in upper: + denom += virt_factor * orb_energy(s) + + # build the bra state: + h1, rules = self.h.h1 + contrib = Mul(bra, h1, self.psi(order-1, "ket")) + numerator += wicks( + contrib, simplify_kronecker_deltas=True, rules=rules + ) + # subtract: - sum_{m=1} E_0^(m) * t_k^(n-m) + terms = gen_term_orders(order=order, term_length=2, min_order=1) + for o1, o2 in terms: + # check if a t-amplitude of order o2 exists with special + # treatment of the first order singles amplitude + if (n_ov["occ"] > 2 * o2) or \ + (n_ov["occ"] == 1 and o2 == 1 and not self.singles): + continue + name = f"{tensor_names.gs_amplitude}{o2}" + contrib = Mul( + self.energy(o1), Amplitude(name, upper, lower) + ).expand() + if n_ov["occ"] == 2: # doubles... special sign + numerator += contrib + else: + numerator -= contrib + res = numerator / denom + assert isinstance(res, Expr) + return res + + @cached_member + def amplitude_residual(self, order: int, space: str, indices: str) -> Expr: + """ + Constructs the n'th-order residual for ground state amplitudes. + + Parameters + ---------- + order : int + The perturbation theoretical order. + space : str + The excitation space, e.g., 'ph' or 'pphh' for singly or doubly + excited configurations. + indices : str + The indices of the constructed t-amplitude. + """ + # + - sum_{m=0}^n E^{(m)} t_k^{(n-m)} = 0 + + # NOTE: Currently the implementation is general and should work for + # arbitrary 0th order Hamiltonians. + # Performance can be improved if the block structure + # of the RE hamiltonian is taken into account before evaluting + # wicks theorem! (Currently its done afterwards) + + # validate the input + validate_input(order=order, space=space, indices=indices) + n_ov = n_ov_from_space(space) + if n_ov["occ"] != n_ov["virt"]: + raise Inputerror(f"Invalid space for a RE t-amplitude: {space}.") + if n_ov["occ"] > 2 * order: # space not present at the order + return S.Zero + + # get the target indices and validate + idx = self.indices.get_indices(indices) + occupied = idx.get(("occ", ""), []) + virtual = idx.get(("virt", ""), []) + if n_ov["occ"] != len(occupied) or n_ov["virt"] != len(virtual): + raise Inputerror(f"Indices {indices} are not valid for space " + f"{space}.") + + # - build + ) + h0, rule = self.h.h0 + term = Mul(bra, h0, self.psi(order, 'ket')) + res += wicks(term, rules=rule, simplify_kronecker_deltas=True) + + h1, rule = self.h.h1 + term = Mul(bra, h1, self.psi(order - 1, 'ket')) + res += wicks(term, rules=rule, simplify_kronecker_deltas=True) + + # - subtract sum_{m=0}^n E^{(m)} t_k^{(n-m)} + for e_order, t_order in gen_term_orders(order, 2, 0): + # check if a t amplitude of order t_order exists + # special treatment of first order singles + if n_ov["occ"] > 2 * t_order or \ + (n_ov["occ"] == 1 and t_order == 1 and not self.singles): + continue + name = f"{tensor_names.gs_amplitude}{t_order}" + contrib = Mul( + self.energy(e_order), Amplitude(name, virtual, occupied) + ).expand() + if n_ov["occ"] == 2: # doubles -> different sign! + res += contrib + else: + res -= contrib + assert isinstance(res, Expr) + return res + + def overlap(self, order: int) -> Expr: + """ + Computes the n'th-order contribution to the ground state overlap + matrix. + + Parameters + ---------- + order : int + The perturbation theoretical order. + """ + validate_input(order=order) + + # catch zeroth order + if order == 0: + return S.One + + orders = gen_term_orders(order=order, term_length=2, min_order=0) + res = S.Zero + for term in orders: + # each wfn is requested only once -> no need to precompute and + # cache + i1 = Mul( + self.psi(order=term[0], braket='bra'), + self.psi(order=term[1], braket='ket') + ) + res += wicks(i1, simplify_kronecker_deltas=True) + # simplify the result by permuting contracted indices + res = simplify(ExprContainer(res)) + logger.debug(f"gs S^({order}) = {res}") + return res.inner + + @cached_member + def expectation_value(self, order: int, n_particles: int) -> Expr: + """ + Constructs the n'th-order contribution to the expectation value for + the given operator. + + Parameters + ---------- + order : int + The perturbation theoretical order. + n_particles : int + The number of creation and annihilation operators in the operator + string. + """ + validate_input(order=order) + # - import all mp wavefunctions. It should be possible here, because + # it is not possible to obtain a term |1>*x*|1>. + wfn = {} + for o in range(order + 1): + wfn[o] = {} + for bk in ["bra", "ket"]: + wfn[o][bk] = self.psi(order=o, braket=bk) + + # better to generate twice orders for length 2 than once for length 3 + orders = gen_term_orders(order=order, term_length=2, min_order=0) + res = S.Zero + # get the operator + op, rules = self.h.operator(n_create=n_particles, + n_annihilate=n_particles) + # iterate over all norm*d combinations of n'th order + for norm_term in orders: + norm = self.norm_factor(norm_term[0]) + if norm is S.Zero: + continue + # compute d for a given norm factor + orders_d = gen_term_orders( + order=norm_term[1], term_length=2, min_order=0 + ) + d = S.Zero + for term in orders_d: + i1 = wfn[term[0]]['bra'] * op * wfn[term[1]]['ket'] + d += wicks(i1, simplify_kronecker_deltas=True, rules=rules) + res += (norm * d).expand() + return simplify(ExprContainer(res)).inner + + def norm_factor(self, order: int) -> Expr: + """ + Constructs the n'th-order contribution of the factor + that corrects the the norm of the ground state wavefunction: + 1 - sum_i S^(i) + (sum_i S^(i))^2 - ... + which is the result of a taylor expansion of a^2 + S = a^2 sum_{i=0} S^{(i)} = 1 -> a^2 = [sum_{i=0} S^{(i)}]^{(-1)}. + + Parameters + ---------- + order : int + The perturbation theoretical order. + """ + # This can not be cached! + # in case there is something like a(2)*a(2)*x + # do the two a(2) need to be different? + # all a(n) only consist of t-amplitudes and all indices are + # contracted + # a(2) = 0.25*t_d^(2) + # a(2)*a(2) = 1/16 * t_d^(2) * t_d'^(2) + # -> no caching allowed + # Then it is also not possible to cache the overlap matrix + validate_input(order=order) + + taylor_expansion = self.expand_norm_factor(order=order, min_order=2) + norm_factor = S.Zero + for pref, termlist in taylor_expansion: + for term in termlist: + i1 = pref + for o in term: + i1 = Mul(i1, self.overlap(o)) + if i1 is S.Zero: + break + norm_factor += i1.expand() + assert isinstance(norm_factor, Expr) + logger.debug(f"norm_factor^({order}): {latex(norm_factor)}") + return norm_factor + + def expand_norm_factor(self, order, min_order=2 + ) -> list[tuple[Expr, list[tuple[int, ...]]]]: + """ + Constructs the taylor expansion of the n'th-order contribution to the + normalization factor a + f = (1 + x)^(-1), + where x is defined as x = sum_i S^{(i)}. + + Parameters + ---------- + order : int + The perturbation theoretical order. + min_order : int, optional + The lowest order non-vanishing contribution of the overlap matrix S + excluding the zeroth order contribution which is assumed to have + a value of 1. + + Returns + ------- + list + Iterable containing tuples of prefactors and perturbation + theoretical orders, for instance with a min_order of 2 the + 5'th order contribution reads + [(-1, [(5,)]), (1, [(2, 3), (3, 2)])]. + """ + from sympy import symbols, diff, nsimplify + + validate_input(order=order, min_order=min_order) + if min_order == 0: + raise Inputerror("A minimum order of 0 does not make sense here.") + + # below min_order all orders of the overlap matrix should be 0. + # only the zeroth order contribution should be 1 + # -> obtain 0 or 1 from the overlap function -> handled automatically + if order < min_order: + return [(S.One, [(order,)])] + + x = symbols('x') + f = (1 + x) ** -1.0 + ret: list[tuple[Expr, list[tuple[int, ...]]]] = [] + for exp in range(1, order//min_order + 1): + f = diff(f, x) + pref = nsimplify( + f.subs(x, 0) * S.One / factorial(exp), rational=True + ) + orders = gen_term_orders( + order=order, term_length=exp, min_order=min_order + ) + ret.append((pref, orders)) + return ret diff --git a/build/lib/adcgen/indices.py b/build/lib/adcgen/indices.py new file mode 100644 index 0000000..dd1b34e --- /dev/null +++ b/build/lib/adcgen/indices.py @@ -0,0 +1,527 @@ +from collections.abc import Sequence, Collection, Mapping +from typing import Any, TypeGuard, TYPE_CHECKING + +from sympy import Dummy, Tuple + +from .misc import Inputerror, Singleton + +if TYPE_CHECKING: + from .symmetry import Permutation + + +class Index(Dummy): + """ + Represents an Index. Wrapper implementation around the 'sympy.Dummy' + class, which means Index("x") != Index("x"). + Important assumptions: + - below_fermi: The index represents an occupied orbital. + - above_fermi: The index represents a virtual orbital. + - core: The index represents a core orbital. + - alpha: The index represents an alpha (spatial) orbital. + - beta: The index represents a beta (spatial) orbital. + """ + + @property + def spin(self) -> str: + """ + The spin of the index. An empty string is returned if no spin is + defined. + """ + if self.assumptions0.get("alpha"): + return "a" + elif self.assumptions0.get("beta"): + return "b" + else: + return "" + + @property + def space(self) -> str: + """ + The space to which the index belongs (core/occupied/virtual/general). + """ + if self.assumptions0.get("below_fermi"): + return "occ" + elif self.assumptions0.get("above_fermi"): + return "virt" + elif self.assumptions0.get("core"): + return "core" + elif self.assumptions0.get("ri"): + return "ri" + else: + return "general" + + @property + def space_and_spin(self) -> tuple[str, str]: + """Returns space and spin of the Index.""" + return self.space, self.spin + + def __str__(self): + spin = self.spin + return f"{self.name}_{spin}" if spin else self.name + + def __repr__(self) -> str: + return self.__str__() + + def _sympystr(self, printer): + _ = printer + return self.__str__() + + def _latex(self, printer) -> str: + _ = printer + ret = self.name + if (spin := self.spin): + spin = "alpha" if spin == "a" else "beta" + ret += "_{\\" + spin + "}" + return ret + + +class Indices(metaclass=Singleton): + """ + Manages the indices used thoughout the package and ensures that + only a single class instance exists for each index. + This is necessary because the 'equality' operator is essentially replaced + by the 'is' operator for the indices: Index("x") != Index("x"). + """ + # the valid spaces with their corresponding associated index names + base = { + "occ": "ijklmno", "virt": "abcdefgh", "general": "pqrstuvw", + "core": "IJKLMNO", "ri": "PQRSTUVWXYZ" + } + # the valid spins + spins = ("", "a", "b") + # the generation of generic indices starts with e.g., "i3" for occupied + # indices. Therefore, the indices "i", "i1" and "i2" are only available + # through a specific request to get_indices + _initial_counter = 3 + + def __init__(self) -> None: + # dict that holds all symbols that have been created previously. + # structure: {space: {spin: {name: symbol}}} + self._symbols: dict[str, dict[str, dict[str, Index]]] = {} + # dict that holds the automatically generated generic index names + # (strings) that have not been used yet. + # structure: {space: {spin: [names]}} + self._generic_indices: dict[str, dict[str, list[str]]] = {} + # dict holding the counter (the number appended to the index names) + # for the generation of generic indices. + self._counter: dict[str, dict[str, int]] = {} + # initialize the data structures + for space in self.base: + self._symbols[space] = {} + self._generic_indices[space] = {} + self._counter[space] = {} + for spin in self.spins: + self._symbols[space][spin] = {} + self._generic_indices[space][spin] = [] + self._counter[space][spin] = self._initial_counter + + def is_cached_index(self, index: Index) -> bool: + """ + Whether an index was generated with the 'Indices' class and is thus + cached in the class. + """ + cached_symbol = ( + self._symbols[index.space][index.spin].get(index.name, None) + ) + return cached_symbol is index + + def _gen_generic_idx(self, space: str, spin: str = ""): + """ + Generated the next 'generation' of generic indices, i.e. extends + _generic_indices by incrementing the _counter attached to the index + base. + """ + # generate the not used indices of the new generation + counter = str(self._counter[space][spin]) + used_names = self._symbols[space][spin] + new_idx = [idx + counter for idx in self.base[space] + if idx + counter not in used_names] + # extend the available generic indices and increment the counter + self._generic_indices[space][spin].extend(new_idx) + self._counter[space][spin] += 1 + + def get_indices(self, indices: Sequence[str], + spins: Sequence[str] | None = None + ) -> dict[tuple[str, str], list[Index]]: + """ + Obtain the Indices for the provided string of names. + + Parameters + ---------- + indices : Sequence[str] + The names of the indices as a single string that is split + automatically as "ij21kl3" -> i, j21, k, l3. + They can also provided as list/tuple of index names. + spins : Sequence[str] | None, optional + The spins of the indices as a single string, e.g., "aaba" + to obtain four indices with spin: alpha, alpha, beta, alpha. + Since no spin is represented by the empty string, it is only + possible to obtain indices with and without spin when the spins + are provided as list/tuple. + + Returns + ------- + dict + key: tuple containing the space and spin of the indices. + value: list containing the indices in the order the indices are + provided in the input. + """ + # split the string of indices + if isinstance(indices, str): + indices = split_idx_string(indices) + if spins is None: + spins = ["" for _ in range(len(indices))] + if len(indices) != len(spins): + raise Inputerror(f"Indices {indices} and spins {spins} do not " + "match.") + + ret: dict[tuple[str, str], list[Index]] = {} + for idx, spin in zip(indices, spins): + space = index_space(idx) + key = (space, spin) + if key not in ret: + ret[key] = [] + # check the cache for the index + symbol = self._symbols[space][spin].get(idx, None) + if symbol is not None: + ret[key].append(symbol) + continue + # not found in cache + # -> create new symbol and cache it + symbol = self._new_symbol(idx, space, spin) + self._symbols[space][spin][idx] = symbol + ret[key].append(symbol) + # -> also remove it from the available generic indices + try: + self._generic_indices[space][spin].remove(idx) + except ValueError: + continue + return ret + + def get_generic_indices(self, **kwargs + ) -> dict[tuple[str, str], list[Index]]: + """ + Request indices with unique names that have not been used in the + current run of the program. Easy way to ensure that contracted indices + do not appear anywhere else in a term. + Indices can be requested using the syntax "{space}_{spin}", + where spin is optional. For instance, occupied indices without + spin can be obtained with "occ=5", or "occ_a=5" for occupied indices + with alpha spin. + + Returns + ------- + dict + key: tuple of space and spin of the indices. + value: list containing the indices. + """ + + ret = {} + for key, n in kwargs.items(): + if n == 0: + continue + key = key.split("_") + if len(key) == 2: + space, spin = key + elif len(key) == 1: + space, spin = key[0], "" + else: + raise Inputerror(f"{'_'.join(key)} is not valid for " + "requesting generic indices.") + # generate generic index names until we have enough + while n > len(self._generic_indices[space][spin]): + self._gen_generic_idx(space, spin) + # get the indices + idx = self._generic_indices[space][spin][:n] + spins = tuple(spin for _ in range(n)) + ret.update(self.get_indices(idx, spins)) + return ret + + def _new_symbol(self, name: str, space: str, spin: str) -> Index: + """Creates a new symbol with the defined name, space and spin.""" + assumptions = {} + if space == "occ": + assumptions["below_fermi"] = True + elif space == "virt": + assumptions["above_fermi"] = True + elif space == "core": + assumptions["core"] = True + elif space == "ri": + assumptions["ri"] = True + elif space != "general": + raise ValueError(f"Invalid space {space}") + if spin: + if spin == "a": + assumptions["alpha"] = True + elif spin == "b": + assumptions["beta"] = True + else: + raise ValueError(f"Invalid spin {spin}.") + return Index(name, **assumptions) + + +def index_space(idx: str) -> str: + """Returns the space an index belongs to (occ/virt/general).""" + for sp, idx_string in Indices.base.items(): + if idx[0] in idx_string: + return sp + raise Inputerror(f"Could not assign the index {idx} to a space.") + + +def sort_idx_canonical(idx: Index | Any): + """Use as sort key to to bring indices in canonical order.""" + if isinstance(idx, Index): + # - also add the hash here for wicks, where multiple i are around + # - we have to map the spaces onto numbers, since in adcman and adcc + # the ordering o < c < v is used for the definition of canonical blocks + space_keys = {"g": 0, "o": 1, "c": 2, "v": 3, "r": 4} + return (space_keys[idx.space[0]], + idx.spin, + int(idx.name[1:]) if idx.name[1:] else 0, + idx.name[0], + hash(idx)) + else: # necessary for subs to work correctly with simultaneous=True + return (-1, "", 0, str(idx), hash(idx)) + + +def split_idx_string(str_tosplit: str) -> list[str]: + """ + Splits an index string of the form 'ij12a3b' in a list ['i','j12','a3','b'] + """ + splitted = [] + temp = [] + for i, idx in enumerate(str_tosplit): + temp.append(idx) + try: + if str_tosplit[i+1].isdigit(): + continue + else: + splitted.append("".join(temp)) + temp.clear() + except IndexError: + splitted.append("".join(temp)) + return splitted + + +def n_ov_from_space(space_str: str): + """ + Number of required occupied and virtual indices required for the given + exictation space, e.g., "pph" -> 2 virtual and 1 occupied index. + """ + return {"occ": space_str.count("h"), "virt": space_str.count("p")} + + +def generic_indices_from_space(space_str: str) -> list[Index]: + """ + Constructs generic indices from a given space string (e.g. 'pphh'). + Thereby, occupied indices are listed before virtual indices! + """ + generic_idx = Indices().get_generic_indices(**n_ov_from_space(space_str)) + assert len(generic_idx) <= 2 # only occ and virt + occ = generic_idx.get(("occ", ""), []) + occ.extend(generic_idx.get(("virt", ""), [])) + occ.extend(generic_idx.get(("core", ""), [])) + occ.extend(generic_idx.get(("ri", ""), [])) + return occ + + +def repeated_indices(idx_a: str, idx_b: str) -> bool: + """Checks whether both index strings share an index.""" + split_a = split_idx_string(idx_a) + split_b = split_idx_string(idx_b) + return any(i in split_b for i in split_a) + + +def get_lowest_avail_indices(n: int, used: Collection[str], space: str + ) -> list[str]: + """ + Return the names of the n lowest available indices belonging to the desired + space. + + Parameters + ---------- + n : int + The number of available indices. + used : Collection[str] + The names of the indices that are already in use. + space : str + The space (occ/virt/general) to which the indices belong. + """ + # generate idx pool to pick the lowest indices from + base = Indices.base[space] + idx = list(base) + required = len(used) + n # the number of indices present in the term + suffix = 1 + while len(idx) < required: + idx.extend(s + str(suffix) for s in base) + suffix += 1 + # remove the already used indices (that are not available anymore) + # and return the first n elements of the resulting list + return [s for s in idx if s not in used][:n] + + +def get_symbols(indices: Sequence[str] | Index | Sequence[Index], + spins: Sequence[str] | None = None) -> list[Index]: + """ + Wrapper around the Indices class to initialize 'Index' instances with the + provided names and spin. + + Parameters + ---------- + indices : Index | Sequence[str] | Sequence[Index] + The names of the indices to generate. If they are already instances + of the 'Index' class we do nothing. + spins : Sequence[str] | None, optional + The spin of the indices, e.g., "aab" to obtain 3 indices with + alpha, alpha and beta spin. + """ + + if not indices: # empty string/list + return [] + elif isinstance(indices, Index): # a single symbol is not iterable + return [indices] + elif _is_index_sequence(indices): + return indices if isinstance(indices, list) else list(indices) + # we actually have to do something + # -> prepare indices and spin + if isinstance(indices, str): + indices = split_idx_string(indices) + if spins is None: + spins = ["" for _ in range(len(indices))] + # at this point we should have a list/tuple of strings + # construct the indices + assert _is_str_sequence(indices) + symbols = Indices().get_indices(indices, spins) + # and return them in the order they were provided in the input + for val in symbols.values(): + val.reverse() + ret = [symbols[(index_space(idx), spin)].pop() + for idx, spin in zip(indices, spins)] + assert not any(symbols.values()) # ensure we consumed all indices + return ret + + +def order_substitutions(subsdict: dict[Index, Index] + ) -> list[tuple[Index, Index]]: + """ + Order substitutions such that only a minial amount of intermediate + indices is required when the substitutions are executed one after another + and the usage of the 'simultaneous=True' option of sympys 'subs' method. + Adapted from the 'substitute_dummies' function defined in + 'sympy.physics.secondquant'. + """ + + subs = [] + final_subs = [] + for o, n in subsdict.items(): + if o is n: # indices are identical -> nothing to do + continue + # the new index is substituted by another index + if (other_n := subsdict.get(n, None)) is not None: + if other_n in subsdict: + # i -> j / j -> i + # temporary variable is needed + p = Index('p') + subs.append((o, p)) + final_subs.append((p, n)) + else: + # i -> j / j -> k + # in this case it is sufficient to do the i -> j substitution + # after the j -> k substitution, but before temporary variables + # are resubstituted again. + final_subs.insert(0, (o, n)) + else: + subs.append((o, n)) + subs.extend(final_subs) + return subs + + +def minimize_tensor_indices( + tensor_indices: Sequence[Index], + target_idx_names: Mapping[tuple[str, str], Collection[str]] + ) -> "tuple[tuple[Index, ...], tuple[Permutation, ...]]": + """ + Minimizes the indices on a tensor using the lowest available indices that + are no target indices. + + Parameters + ---------- + tensor_indices : Sequence[Index] + The indices of the tensor. + target_idx : dict[tuple[str, str], Collection[str]] + The names of target indices sorted by their space and spin with + key = (space, spin). + + Returns + ------- + tuple + The minimized indices and the list of index permutations that was + applied to reach this minimized state. + """ + from .symmetry import Permutation, PermutationProduct + + for target in target_idx_names.values(): + if not all(isinstance(s, str) for s in target): + raise TypeError("Target indices need to be provided as string.") + + tensor_idx: list[Index] = list(tensor_indices) + n_unique_indices: int = len(set(tensor_idx)) + minimal_indices: dict[tuple[str, str], list[Index]] = {} + permutations: list[Permutation] = [] + minimized = set() + for s in tensor_idx: + if s in minimized: + continue + idx_key = s.space_and_spin + # target indices of the corresponding space + space_target = target_idx_names.get(idx_key, []) + # index is a target idx -> keep as is + if s.name in space_target: + minimized.add(s) + continue + # generate minimal indices for the corresponding space and spin + if idx_key not in minimal_indices: + space, spin = idx_key + min_names = get_lowest_avail_indices(n_unique_indices, + space_target, space) + if spin: + spins = spin * n_unique_indices + else: + spins = None + min_symbols = get_symbols(min_names, spins) + min_symbols.reverse() + minimal_indices[idx_key] = min_symbols + + # get the lowest available index for the corresponding space + min_s = minimal_indices[idx_key].pop() + minimized.add(min_s) + if s is min_s: # s is already the lowest available index + continue + # found a lower index + # -> permute tensor indices and append permutation to permutations + # list + perm = {s: min_s, min_s: s} + for i, other_s in enumerate(tensor_idx): + tensor_idx[i] = perm.get(other_s, other_s) + permutations.append(Permutation(s, min_s)) + return tuple(tensor_idx), PermutationProduct(*permutations) + + +################################################ +# Some TypeGuards to make the type checker happy +############################################### +def _is_index_sequence(sequence: Sequence) -> TypeGuard[Sequence[Index]]: + return all(isinstance(s, Index) for s in sequence) + + +def _is_index_tuple(sequence: tuple | Tuple) -> TypeGuard[tuple[Index, ...]]: + return all(isinstance(s, Index) for s in sequence) + + +def _is_str_sequence(sequence: Sequence) -> TypeGuard[Sequence[str]]: + return ( + isinstance(sequence, str) or all(isinstance(s, str) for s in sequence) + ) + + +_ = _is_index_tuple diff --git a/build/lib/adcgen/intermediate_states.py b/build/lib/adcgen/intermediate_states.py new file mode 100644 index 0000000..2580ac6 --- /dev/null +++ b/build/lib/adcgen/intermediate_states.py @@ -0,0 +1,598 @@ +from collections.abc import Sequence +from math import factorial + +from sympy.physics.secondquant import NO, Dagger +from sympy import Expr, Mul, Rational, S, latex, nsimplify, diff, symbols + +from .expression import ExprContainer +from .func import gen_term_orders, wicks, evaluate_deltas +from .groundstate import GroundState +from .indices import ( + n_ov_from_space, repeated_indices, Indices, generic_indices_from_space +) +from .logger import logger +from .misc import cached_member, Inputerror, transform_to_tuple, validate_input +from .simplify import simplify +from .sympy_objects import Amplitude +from .tensor_names import tensor_names + + +class IntermediateStates: + """ + Class for constructing epxressions for Precursor or Intermediate states. + + Parameters + ---------- + mp : GroundState + Representation of the underlying ground state. Used to generate + ground state related expressions. + variant : str, optional + The ADC variant for which Intermediates are constructed, e.g., + 'pp', 'ip' or 'ea' for PP-, IP- or EA-ADC expressions, respectively + (default: 'pp'). + """ + def __init__(self, mp: GroundState, variant: str = "pp"): + assert isinstance(mp, GroundState) + self.gs: GroundState = mp + self.indices: Indices = Indices() + + variants: dict[str, tuple[str, ...]] = { + "pp": ("ph", "hp"), + "ea": ("p",), + "ip": ("h",), + "dip": ("hh",), + "dea": ("pp",), + } + if variant not in variants.keys(): + raise Inputerror(f"The ADC variant {variant} is not valid. " + "Supported variants are " + f"{list(variants.keys())}.") + self.variant: str = variant + self.min_space: tuple[str, ...] = variants[variant] + + @cached_member + def precursor(self, order: int, space: str, braket: str, indices: str + ) -> Expr: + """ + Constructs expressions for precursor states. + + Parameters + ---------- + order : int + The perturbation theoretical order. + space : str + The excitation space of the desired precursor state, e.g., 'ph' or + 'pphh' for singly or doubly excited precursor states. + braket : str + Defines whether a bra or ket precursor state is constructed. + indices : str + The indices of the precursor state. + """ + + # check input parameters + indices_tpl = transform_to_tuple(indices) + validate_input(order=order, space=space, braket=braket, + indices=indices_tpl) + if len(indices_tpl) != 1: + raise Inputerror(f"{indices} are not valid for constructing a " + "precursor state.") + indices = indices_tpl[0] + del indices_tpl + # check that the space is valid for the given ADC variant + if not self.validate_space(space): + raise Inputerror(f"{space} is not a valid space for " + f"{self.variant} ADC.") + + # get the target symbols of the precursor state + idx = self.indices.get_indices(indices) + # check compatibility of indices and space + if idx.get(("general", "")): + raise Inputerror(f"The provided indices {indices} include a " + "general index.") + n_ov = n_ov_from_space(space) + occupied = idx.get(("occ", ""), []) + virtual = idx.get(("virt", ""), []) + if len(occupied) != n_ov["occ"] or len(virtual) != n_ov["virt"]: + raise Inputerror(f"The indices {indices} and the space {space} " + "are not compatible.") + del n_ov # prevent accidentally usage below + + # in contrast to the gs, here the operators are ordered as + # abij instead of abji in order to stay consistent with the + # ADC literature. + operators = self.gs.h.excitation_operator( + creation=virtual, annihilation=occupied, reverse_annihilation=False + ) + if braket == "bra": + operators = Dagger(operators) + + res = S.Zero + + # leading term: + # no need to differentiate bra/ket here, because + # operators * mp = mp * operators (there is always an equal number of + # p/h operators in mp that needs to be moved to the other side. + # Will always give +.) + max_gs = self.gs.psi(order=order, braket=braket) + res += Mul(NO(operators), max_gs).expand() + + # get all terms of a*b of the desired order (ground state norm) + orders = gen_term_orders(order=order, term_length=2, min_order=0) + + # orthogonalise with respect to the ground state for pp ADC. + # checked up to 4th order! + if self.variant == "pp": + # import all ground state wave functions that may not appear twice + # in |a>, i.e. all of: order > int(order/2) + gs_psi: dict[str, dict[int, Expr]] = {'bra': {}, 'ket': {}} + gs_psi[braket][order] = max_gs + for o in range(order//2 + 1, order+1): + if not gs_psi['bra'].get(o): + gs_psi['bra'][o] = self.gs.psi(order=o, braket='bra') + if not gs_psi['ket'].get(o): + gs_psi['ket'][o] = self.gs.psi(order=o, braket='ket') + + def get_gs_wfn(o, bk): return gs_psi[bk][o] if o > order//2 else \ + self.gs.psi(order=o, braket=bk) + + # 1) iterate through all combinations of norm_factor*projector + for norm_term in orders: + norm = self.gs.norm_factor(norm_term[0]) + if norm is S.Zero: + continue + # 2) construct the projector for a given norm_factor + # the overall order is split between the norm_factor and the + # projector + orders_projection = gen_term_orders( + order=norm_term[1], term_length=3, min_order=0 + ) + projection = S.Zero + for term in orders_projection: + # |Y> <-- -|X> + if braket == "ket": + i1 = Mul( + get_gs_wfn(term[1], 'bra'), NO(operators), + get_gs_wfn(term[2], 'ket') + ) + state = get_gs_wfn(term[0], 'ket') + # + n_ov = n_ov_from_space(lower_space) + prefactor = Rational( + 1, factorial(n_ov["occ"]) * factorial(n_ov["virt"]) + ) + del n_ov + + # orthogonalise with respsect to the lower excited ISR state + # 1) iterate through all combinations of norm_factor*projector + for norm_term in orders: + norm = self.gs.norm_factor(norm_term[0]) + if norm is S.Zero: + continue + # 2) construct the projector for a given norm factor + # the overall order is split between he norm_factor and the + # projector + orders_projection = gen_term_orders( + norm_term[1], term_length=3, min_order=0 + ) + projection = S.Zero + for term in orders_projection: + # |Y#> <-- -|X> + if braket == "ket": + i1 = Mul( + self.intermediate_state(order=term[1], + space=lower_space, + braket="bra", + indices=idx_isr), + NO(operators), + self.gs.psi(order=term[2], braket="ket") + ) + state = self.intermediate_state( + order=term[0], space=lower_space, braket="ket", + indices=idx_isr + ) + # Expr: + """ + Constructs expressions for elements of the overlap matrix of the + precursor states. + + Parameters + ---------- + order : int + The perturbation theoretical order. + block : Sequence[str] + The block of the overlap matrix, e.g., 'ph,ph' for an element of + the 1p-1h/1p-1h block. + indices : Sequence[str] + The indices of the overlap matrix element, e.g., 'ia,jb' for + S_{ia,jb}. + """ + + # no need to do more validation here -> will be done in precursor + block = transform_to_tuple(block) + indices = transform_to_tuple(indices) + validate_input(order=order, block=block, indices=indices) + if len(indices) != 2: + raise Inputerror("2 index strings required for an overlap matrix " + f"block. Got: {indices}.") + + if repeated_indices(indices[0], indices[1]): + raise Inputerror("Repeated index found in indices of precursor " + f"overlap matrix: {indices}.") + + orders = gen_term_orders(order=order, term_length=2, min_order=0) + + res = S.Zero + # 1) iterate through all combinations of norm_factor*S + for norm_term in orders: + norm = self.gs.norm_factor(norm_term[0]) + if norm is S.Zero: + continue + # 2) construct S for a given norm factor + # the overall order is split between the norm_factor and S + orders_overlap = gen_term_orders( + order=norm_term[1], term_length=2, min_order=0 + ) + overlap = S.Zero + for term in orders_overlap: + i1 = Mul( + self.precursor(order=term[0], space=block[0], + braket="bra", indices=indices[0]), + self.precursor(order=term[1], space=block[1], + braket="ket", indices=indices[1]) + ) + i1 = wicks(i1, simplify_kronecker_deltas=True) + overlap += i1 + res += (norm * overlap).expand() + # It should be valid to simplifiy the result by permuting contracted + # indices before returning -> should lower the overall size of the + # final expression + res = simplify(ExprContainer(res)) + logger.debug(f"overlap {block} S_{indices}^({order}) = {res}") + return res.inner + + @cached_member + def s_root(self, order: int, block: Sequence[str], + indices: Sequence[str]) -> Expr: + """ + Constructs expression for elements of the inverse square root of the + precursor overlap matrix (S^{-0.5})_{I,J} by expanding + S^{-0.5} in a Taylor series. + + Parameters + ---------- + order : int + The perturbation theoretical order. + block : Sequence[str] + The desired matrix block, e.g., 'ph,pphh' for an element of the + 1p-1h/2p-2h block. + indices : Sequence[str] + The indices of the matrix element, e.g., 'ia,jkcd' for + (S^{-0.5})_{ia,jkcd}. + """ + + block = transform_to_tuple(block) + indices = transform_to_tuple(indices) + validate_input(order=order, block=block, indices=indices) + if len(indices) != 2: + raise Inputerror("2 index strings required for a block of the " + "inverse suqare root of the overlap matrix. " + f"Got: {indices}.") + if repeated_indices(indices[0], indices[1]): + raise Inputerror(f"Repeated index found in indices {indices}.") + if block[0] != block[1]: + raise NotImplementedError("Off diagonal blocks of the overlap " + "matrix should be 0 by definition. " + "Simply don't know how to handle the " + "index generation needed in this case.") + + taylor_expansion = self.expand_S_taylor(order, min_order=2) + # create an index list: first and last element are the two provided + # idx strings + idx: list[str] = list(indices) + # create more indices: exponent-1 or len(taylor_expansion)-1 indices + # - x*x 1 additional index 'pair' is required: I,I' = I,I'' * I'',I' + # - x^3: I,I' = I,I'' * I'',I''' * I''',I' + for _ in range(len(taylor_expansion) - 1): + new_idx: str = "".join( + s.name for s in generic_indices_from_space(block[0]) + ) + idx.insert(-1, new_idx) + # iterate over exponents and terms, starting with the lowest exponent + res = S.Zero + for pref, termlist in taylor_expansion: + # all terms in the list should have the same length, i.e. + # all originate from x*x or x^3 etc. + for term in termlist: + relevant_idx = idx[:len(term)] + [idx[-1]] + i1 = S.One * pref + for o in term: + i1 *= self.overlap_precursor( + order=o, block=block, + indices=tuple(relevant_idx[:2]) + ) + del relevant_idx[0] + if i1 is S.Zero: + break + assert ( + len(relevant_idx) == 1 and + relevant_idx[0] == indices[1] + ) + # in squared or higher terms S*S*... delta evaluation might + # be necessary + res += evaluate_deltas(i1.expand()) + assert isinstance(res, Expr) + logger.debug( + f"{block} S_root_{indices}^({order}) = {latex(res)}" + ) + return res + + @cached_member + def intermediate_state(self, order: int, space: str, braket: str, + indices: str) -> Expr: + """ + Constructs expressions for intermediate states. + + Parameters + ---------- + order : int + The perturbation theoretical order. + space : str + The excitation space of the desired intermediate state, e.g., + 'ph' and 'pphh' for singly and doubly excited intermediate states. + braket : str + Defines whether a bra or ket intermediate state is constructed. + indices : str + The indices of the intermediate state. + """ + indices_tpl: tuple[str, ...] = transform_to_tuple(indices) + validate_input(order=order, space=space, braket=braket, + indices=indices_tpl) + if len(indices_tpl) != 1: + raise Inputerror(f"{indices} are not valid for " + "constructing an intermediate state.") + indices = indices_tpl[0] + del indices_tpl + + # generate additional indices for the precursor state + idx_pre: str = "".join( + s.name for s in generic_indices_from_space(space) + ) + + n_ov = n_ov_from_space(space) + prefactor = Rational( + 1, factorial(n_ov["occ"]) * factorial(n_ov["virt"]) + ) + del n_ov + + # sandwich the IS and precursor indices together + s_indices = { + 'bra': ",".join([indices, idx_pre]), + 'ket': ",".join([idx_pre, indices]) + } + + orders = gen_term_orders(order=order, term_length=2, min_order=0) + res = S.Zero + for term in orders: + i1 = Mul( + prefactor, + self.s_root(order=term[0], block=(space, space), + indices=s_indices[braket]), + self.precursor(order=term[1], space=space, braket=braket, + indices=idx_pre) + ) + res += evaluate_deltas(i1.expand()) + assert isinstance(res, Expr) + logger.debug(f"{space} ISR_({indices}^({order}) {braket} = " + f"{latex(res)}") + return res + + @cached_member + def overlap_isr(self, order: int, block: Sequence[str], + indices: Sequence[str]) -> Expr: + """ + Computes a block of the overlap matrix in the basis of intermediate + states. + + Parameters + ---------- + order : int + The perturbation theoretical order. + block : Sequence[str] + The desired matrix block. + indices : Sequence[str] + The indices of the matrix element. + """ + + block = transform_to_tuple(block) + indices = transform_to_tuple(indices) + validate_input(order=order, block=block, indices=indices) + if len(indices) != 2: + raise Inputerror("Constructing a ISR overlap matrix block requires" + f" 2 index strings. Provided: {indices}.") + + orders = gen_term_orders(order=order, term_length=2, min_order=0) + res = S.Zero + # 1) iterate through all combinations of norm_factor*S + for norm_term in orders: + norm = self.gs.norm_factor(norm_term[0]) + if norm is S.Zero: + continue + # 2) construct S for a given norm factor + # the overall order is split between he norm_factor and S + orders_overlap = gen_term_orders( + order=norm_term[1], term_length=2, min_order=0 + ) + overlap = S.Zero + for term in orders_overlap: + i1 = Mul( + self.intermediate_state(order=term[0], space=block[0], + braket="bra", + indices=indices[0]), + self.intermediate_state(order=term[1], space=block[1], + braket="ket", + indices=indices[1]) + ) + i1 = wicks(i1, simplify_kronecker_deltas=True) + overlap += i1 + res += (norm * overlap).expand() + assert isinstance(res, Expr) + logger.debug(f"ISR overlap {block} S_{indices}^({order}) = " + f"{latex(res)}") + return res + + @cached_member + def amplitude_vector(self, indices: str, lr: str = "right") -> Expr: + """ + Constructs an amplitude vector with the provided indices. + + Parameters + ---------- + indices : str + The indices of the amplitude vector. + lr : str, optional + Whether a left (X) or right (Y) amplitude vector is constructed + (default: 'right'). + """ + + validate_input(indices=indices, lr=lr) + + idx = self.indices.get_indices(indices) + occ = idx.get(("occ", ""), []) + virt = idx.get(("virt", ""), []) + + name = getattr(tensor_names, f"{lr}_adc_amplitude") + return Amplitude(name, virt, occ) + + def expand_S_taylor(self, order: int, min_order: int = 2 + ) -> list[tuple[Expr, list[tuple[int, ...]]]]: + """ + Performs a Taylor expansion of the inverse square root of the + overlap matrix + S^{0.5} = (1 + x)^{-0.5} with x = sum_{n=1} S^(n) + returning all n'th-order contributions. + + Parameters + ---------- + order : int + The perturbation theoretical order. + min_order : int, optional + The lowest order at which the overlap matrix S has a non-vanishing + caontribution excluding the zeroth order contribution + (default: 2). + + Returns + ------- + list + Iterable containing tuples of prefactors and perturbation + theoretical orders, for instance, with a min_order of 2 the + 5'th order contributions read + [(-1/2, [(5,)]), (3/8, [(2, 3), (3, 2)])]. + """ + validate_input(order=order, min_order=min_order) + if min_order == 0: + raise Inputerror("A minimum order of 0 does not make sense here.") + + # below min_order all orders - except the zeroth order contribution - + # should be zero. Should be handled automatically if the corresponding + # orders are forwarded to the overlap method. + if order < min_order: + return [(S.One, [(order,)])] + + x = symbols('x') + f = (1 + x) ** -0.5 + ret: list[tuple[Expr, list[tuple[int, ...]]]] = [] + for exp in range(1, order//min_order + 1): + f = diff(f, x) + pref = nsimplify( + f.subs(x, 0) * S.One / factorial(exp), rational=True + ) + orders = gen_term_orders( + order=order, term_length=exp, min_order=min_order + ) + assert isinstance(pref, Expr) + ret.append((pref, orders)) + return ret + + def _generate_lower_spaces(self, space_str: str) -> list[str]: + """ + Generates all strings of lower excited configurations for a given + excitation space. + + Parameters + ---------- + space_str : str + The space for which to construct lower excitation spaces, e.g., + ['ph'] for 'pphh'. + """ + lower_spaces: list[str] = [] + for _ in range(min(space_str.count('p'), space_str.count('h'))): + space_str = space_str.replace('p', '', 1).replace('h', '', 1) + if not space_str: + break + lower_spaces.append(space_str) + return lower_spaces + + def validate_space(self, space_str: str) -> bool: + """ + Checks whether the given space is valid for the current ADC variant. + + Parameters + ---------- + space_str : str + The excitation space to validate. + """ + + if space_str in self.min_space: + return True + + lower_spaces = self._generate_lower_spaces(space_str) + return any(sp in self.min_space for sp in lower_spaces) diff --git a/build/lib/adcgen/intermediates.py b/build/lib/adcgen/intermediates.py new file mode 100644 index 0000000..2022f18 --- /dev/null +++ b/build/lib/adcgen/intermediates.py @@ -0,0 +1,1663 @@ +from collections.abc import Sequence, Callable +from collections import Counter +from dataclasses import dataclass +from functools import cached_property +import itertools + +from sympy import Add, Expr, Min, Mul, Pow, Rational, S + +from .expression import ExprContainer, ObjectContainer +from .core_valence_separation import allowed_cvs_blocks +from .indices import ( + Indices, Index, + get_symbols, order_substitutions, sort_idx_canonical, +) +from .logger import logger +from .misc import Inputerror, Singleton, cached_member +from .eri_orbenergy import EriOrbenergy +from .sympy_objects import NonSymmetricTensor, AntiSymmetricTensor, Amplitude +from .symmetry import LazyTermMap, Permutation +from .spatial_orbitals import allowed_spin_blocks +from .tensor_names import tensor_names + + +@dataclass(frozen=True, slots=True) +class ItmdExpr: + expr: Expr + target: tuple[Index, ...] + contracted: tuple[Index, ...] | None + + +class Intermediates(metaclass=Singleton): + """ + Manages all defined intermediates. + New intermediates can be defined by inheriting from + 'RegisteredIntermediate'. + """ + + def __init__(self): + self._registered: dict[str, dict[str, RegisteredIntermediate]] = ( + RegisteredIntermediate()._registry + ) + self._available: dict[str, RegisteredIntermediate] = { + name: obj for objects in self._registered.values() + for name, obj in objects.items() + } + + @property + def available(self) -> dict[str, "RegisteredIntermediate"]: + """ + Returns all available intermediates using their name as dict key. + """ + return self._available + + @property + def types(self) -> list[str]: + """Returns all available types of intermediates.""" + return list(self._registered.keys()) + + def __getattr__(self, attr: str) -> dict[str, "RegisteredIntermediate"]: + if attr in self._registered: # is the attr an intermediate type? + return self._registered[attr] + elif attr in self._available: # is the attr an intermediate name? + return {attr: self._available[attr]} + else: + raise AttributeError(f"{self} has no attribute {attr}. " + f"The intermediate types: {self.types} " + "and the intermediate names: " + f"{list(self.available.keys())} are " + "available.") + + +class RegisteredIntermediate: + """ + Base class for defined intermediates. + New intermediates can be added by inheriting from this class and require: + - an itmd type '_itmd_type' + - an perturbation theoretical order '_order' + - names of default indices '_default_idx' + - a method that fully expands the itmd into orbital energies and ERI + '_build_expanded_itmd' + - a method that returns the itmd tensor '_build_tensor' + """ + _registry: dict[str, dict[str, "RegisteredIntermediate"]] = {} + _itmd_type: str | None = None + _order: int | None = None + _default_idx: tuple[str, ...] | None = None + + def __init_subclass__(cls) -> None: + itmd_type = cls._itmd_type + assert itmd_type is not None + if itmd_type not in cls._registry: + cls._registry[itmd_type] = {} + if (name := cls.__name__) not in cls._registry[itmd_type]: + cls._registry[itmd_type][name] = cls() + + @property + def name(self) -> str: + """Name of the intermediate (the class name).""" + return type(self).__name__ + + @property + def order(self) -> int: + """Perturbation theoretical order of the intermediate.""" + if not hasattr(self, "_order") or self._order is None: + raise AttributeError(f"No order defined for {self.name}.") + return self._order + + @property + def default_idx(self) -> tuple[str, ...]: + """Names of the default indices of the intermediate.""" + if not hasattr(self, "_default_idx") or self._default_idx is None: + raise AttributeError(f"No default indices defined for {self.name}") + return self._default_idx + + @property + def itmd_type(self) -> str: + """The type of the intermediate.""" + if not hasattr(self, "_itmd_type") or self._itmd_type is None: + raise AttributeError(f"No itmd_type defined for {self.name}.") + return self._itmd_type + + def validate_indices(self, + indices: Sequence[str] | Sequence[Index] | None = None + ) -> list[Index]: + """ + Ensures that the indices are valid for the intermediate and + transforms them to 'Index' instances. + """ + if indices is None: # no need to validate the default indices + return get_symbols(self.default_idx) + + indices = get_symbols(indices) + default = get_symbols(self.default_idx) + if len(indices) != len(default): + raise Inputerror("Wrong number of indices for the itmd " + f"{self.name}.") + elif any(s.space != d.space for s, d in zip(indices, default)): + raise Inputerror(f"The indices {indices} are not valid for the " + f"itmd {self.name}") + return indices + + def expand_itmd(self, + indices: Sequence[str] | Sequence[Index] | None = None, + wrap_result: bool = True, fully_expand: bool = True + ) -> Expr | ExprContainer: + """ + Expands the intermediate into orbital energies and ERI. + + Parameters + ---------- + indices : Sequence[str] | Sequence[Index], optional + The names of the indices of the intermediate. By default the + default indices (defined on the itmd class) will be used. + wrap_result : bool, optional + Whether to wrap the result in an + :py:class:ExprContainer. (default: True) + fully_expand : bool, optional + True (default): The returned intermediate is recursively fully + expanded into orbital energies and ERI (if possible). + False: Returns a more readable version which is not recusively + expanded, e.g., n't-order MP t-amplitudes are expressed by + means of (n-1)'th-order MP t-amplitudes. + """ + # check that the provided indices are fine for the itmd + indices = self.validate_indices(indices) + # currently all intermediates are only implemented for spin orbitals, + # because the intermediate definition depends on the spin, i.e., + # we would need either multiple definitions per intermediate or + # incorporate the spin in the intermediate names. + if any(idx.spin for idx in indices): + raise NotImplementedError( + "Intermediates not implemented for indices with spin " + "(spatial orbitals)." + ) + + # build a cached base version of the intermediate where we can just + # substitute indices in + expanded_itmd = self._build_expanded_itmd(fully_expand) + + # build the substitution dict + subs: dict[Index, Index] = {} + # map target indices onto each other + if (base_target := expanded_itmd.target) is not None: + subs.update({o: n for o, n in zip(base_target, indices)}) + # map contracted indices onto each other (replace them by generic idx) + if (base_contracted := expanded_itmd.contracted) is not None: + spaces = [s.space_and_spin for s in base_contracted] + kwargs = Counter( + f"{sp}_{spin}" if spin else sp for sp, spin in spaces + ) + contracted = Indices().get_generic_indices(**kwargs) + for new in contracted.values(): + new.reverse() + for old, sp in zip(base_contracted, spaces): + subs[old] = contracted[sp].pop() + if any(li for li in contracted.values()): + raise RuntimeError("Generated more contracted indices than " + f"necessary. {contracted} are left.") + + # do some extra work with the substitutions to avoid using the + # simultantous=True option for subs (very slow) + itmd = expanded_itmd.expr.subs(order_substitutions(subs)) + assert isinstance(itmd, Expr) + + if itmd is S.Zero and expanded_itmd.expr is not S.Zero: + raise ValueError(f"The substitutions {subs} are not valid for " + f"{expanded_itmd.expr}.") + + if wrap_result: + itmd = ExprContainer(itmd, target_idx=indices) + return itmd + + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + """ + Expand the intermediate using the default indices. + """ + _ = fully_expand + raise NotImplementedError("Build expanded intermediate not implemented" + f" on {self.name}") + + def tensor(self, indices: Sequence[str] | Sequence[Index] | None = None, + wrap_result: bool = True): + """ + Returns the itmd tensor. + + Parameters + ---------- + indices : str, optional + The names of the indices of the intermediate. By default the + default indices (defined on the itmd class) will be used. + wrap_result : bool, optional + Whether to wrap the result in an + :py:class:ExprContainer. (default: True) + """ + # check that the provided indices are sufficient for the itmd + indices = self.validate_indices(indices) + + # build the tensor object + tensor = self._build_tensor(indices=indices) + if wrap_result: + kwargs = {} + if isinstance(tensor, AntiSymmetricTensor): + if tensor.bra_ket_sym is S.One: # bra ket symmetry + kwargs["sym_tensors"] = (tensor.name,) + elif tensor.bra_ket_sym is S.NegativeOne: # bra ket anisym + kwargs["antisym_tensors"] = (tensor.name,) + return ExprContainer(tensor, **kwargs) + else: + return tensor + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + """ + Build the tensor representing the intermediate using the given indices. + """ + _ = indices + raise NotImplementedError("Build tensor not implemented on " + f"{self.name}") + + @cached_property + def tensor_symmetry(self) -> dict[tuple[Permutation, ...], int]: + """ + Determines the symmetry of the itmd tensor object using the + default indices, e.g., ijk/abc triples symmetry for t3_2. + """ + tensor = self.tensor(wrap_result=True) + assert isinstance(tensor, ExprContainer) and len(tensor) == 1 + return tensor.terms[0].symmetry() + + @cached_property + def allowed_spin_blocks(self) -> tuple[str, ...]: + """Determines all non-zero spin block of the intermediate.""" + + target_idx = self.default_idx + itmd = self.expand_itmd( + indices=target_idx, wrap_result=True, fully_expand=False + ) + assert isinstance(itmd, ExprContainer) + return allowed_spin_blocks(itmd.expand(), target_idx) + + @cached_member + def allowed_cvs_blocks( + self, + cvs_approximation: Callable[[ObjectContainer, str], bool] | None = None # noqa E501 + ) -> tuple[str, ...]: + """ + Splits the occupied orbitals in core and valence orbitals and + determines the valid blocks if the CVS approximation is applied. + + Parameters + ---------- + cvs_approximation : callable, optional + Callable that takes an expr_container.Obj instance and a space + string (e.g. 'covv'). It returns a bool indicating whether the + block of the object described by the space string is valid within + the CVS approximation, i.e., whether the block is neglected or not. + By default, the "is_allowed_cvs_block" function is used, + which applies the CVS approximation as described in + 10.1063/1.453424 and as implemented in adcman/adcc. + """ + target_idx = self.default_idx + itmd = self.expand_itmd( + indices=target_idx, wrap_result=True, fully_expand=False + ) + assert isinstance(itmd, ExprContainer) + return allowed_cvs_blocks( + itmd.expand(), target_idx, cvs_approximation=cvs_approximation + ) + + @cached_member + def itmd_term_map(self, factored_itmds: Sequence[str] = tuple() + ) -> LazyTermMap: + """ + Returns a map that lazily determines permutations of target indices + that map terms in the intermediate definition onto each other. + + Parameters + ---------- + factored_itmds : Sequence[str], optional + Names of other intermediates to factor in the fully expanded + definition of the current intermediate which (if factorization is + successful) changes the form of the intermediate. + By default the fully expanded version will be used. + """ + # - load the appropriate version of the intermediate + itmd = self._prepare_itmd(factored_itmds) + return LazyTermMap(itmd) + + @cached_member + def _prepare_itmd(self, factored_itmds: Sequence[str] = tuple() + ) -> ExprContainer: + """" + Generates a variant of the intermediate with default indices and + simplifies it as much as possible. + + Parameters + ---------- + factored_itmds : tuple[str], optional + Names of other intermediates to factor in the fully expanded + definition of the current intermediate. By default the fully + expanded version will be used. + """ + from .reduce_expr import factor_eri_parts, factor_denom + + # In a usual run we only need 1 variant of an intermediate: + # a b c d e + # a b c d + # a b c + # a b + # a + # For example, always the version of b where a is factorized + # -> for b this function will always be called with a as factored_itmds + # -> caching decorator is sufficient... no need to additionally + # cache the simplified base version + + # build the base version of the itmd and simplify it + # - factor eri and denominator + itmd = self.expand_itmd(wrap_result=True, fully_expand=True) + assert isinstance(itmd, ExprContainer) + itmd.expand().make_real() + reduced = itertools.chain.from_iterable( + factor_denom(sub_expr) for sub_expr in factor_eri_parts(itmd) + ) + itmd = ExprContainer(0, **itmd.assumptions) + for term in reduced: + itmd += term.factor() + + logger.info("".join([ + "\n", "-"*80, "\n", + f"Preparing Intermediate: Factoring {factored_itmds}" + ])) + + if factored_itmds: + available = Intermediates().available + # iterate through factored_itmds and factor them one after another + # in the simplified base itmd + for i, it in enumerate(factored_itmds): + logger.info("\n".join([ + "-"*80, f"Factoring {it} in {self.name}:" + ])) + itmd = available[it].factor_itmd( + itmd, factored_itmds=factored_itmds[:i], + max_order=self.order + ) + logger.info("".join([ + "\n", "-"*80, "\n", + f"Done with factoring {factored_itmds} in {self.name}", "\n", + "-"*80 + ])) + return itmd + + def factor_itmd(self, expr: ExprContainer, + factored_itmds: Sequence[str] = tuple(), + max_order: int | None = None, + allow_repeated_itmd_indices: bool = False + ) -> ExprContainer: + """ + Factors the intermediate in an expression assuming a real orbital + basis. + + Parameters + ---------- + expr : Expr + Expression in which to factor intermediates. + factored_itmds : Sequence[str], optional + Names of other intermediates that have already been factored in + the expression. It is necessary to factor those intermediates in + the current intermediate definition as well, because the + definition might change. By default the fully expanded version + of the intermediate will be used. + max_order : int, optional + The maximum perturbation theoretical order of intermediates + to consider. + allow_repeated_itmd_indices: bool, optional + If set, the factorization of intermediates of the form I_iij are + allowed, i.e., indices on the intermediate may appear more than + once. This corresponds to either a partial trace or a diagonal + element of the intermediate. Note that this does not consistently + work for "long" intermediates (at least 2 terms), because the + number of terms might be reduced which is not correctly handled + currently. + """ + + from .factor_intermediates import ( + _factor_long_intermediate, _factor_short_intermediate, + FactorizationTermData + ) + + assert isinstance(expr, ExprContainer) + if not expr.real: + raise NotImplementedError("Intermediates only implemented for " + "a real orbital basis.") + + # ensure that the previously factored intermediates + # are provided as tuple -> can use them as dict key + if isinstance(factored_itmds, str): + factored_itmds = (factored_itmds,) + elif not isinstance(factored_itmds, tuple): + factored_itmds = tuple(factored_itmds) + + # can not factor if the expr is just a number or the intermediate + # has already been factored or the order of the pt order of the + # intermediate is to high. + # also it does not make sense to factor t4_2 again, because of the + # used factorized form. + if expr.inner.is_number or self.name in factored_itmds or \ + self.name == 't4_2' or \ + (max_order is not None and max_order < self.order): + return expr + + expr = expr.expand() + terms = expr.terms + + # if want to factor a t_amplitude + # -> terms to consider need to have a denominator + # Also the pt order of the term needs to be high enough for the + # current intermediate + if self.itmd_type == 't_amplitude' and self.name != 't4_2': + term_is_relevant = [ + term.order >= self.order and + any(o.exponent < S.Zero and o.contains_only_orb_energies + for o in term.objects) + for term in terms + ] + else: + term_is_relevant = [term.order >= self.order for term in terms] + # no term has a denominator or a sufficient pt order + # -> can't factor the itmd + if not any(term_is_relevant): + return expr + + # determine the maximum pt order present in the expr (order is cached) + max_order = max(term.order for term in terms) + + # build a new expr that only contains the relevant terms + remainder = S.Zero + to_factor = ExprContainer(0, **expr.assumptions) + for term, is_relevant in zip(terms, term_is_relevant): + if is_relevant: + to_factor += term + else: + remainder += term.inner + + # - prepare the itmd for factorization and extract data to speed + # up the later comparison + itmd_expr = self._prepare_itmd(factored_itmds=factored_itmds) + itmd: tuple[EriOrbenergy, ...] = tuple( + EriOrbenergy(term).canonicalize_sign() for term in itmd_expr.terms + ) + itmd_data: tuple[FactorizationTermData, ...] = tuple( + FactorizationTermData(term) for term in itmd + ) + + # factor the intermediate in the expr + if len(itmd) == 1: # short intermediate that consists of a single term + factored = _factor_short_intermediate( + to_factor, itmd[0], itmd_data[0], self, + allow_repeated_itmd_indices=allow_repeated_itmd_indices + ) + factored += remainder + else: # long intermediate that consists of multiple terms + itmd_term_map = self.itmd_term_map(factored_itmds) + for _ in range(max_order // self.order): + to_factor = _factor_long_intermediate( + to_factor, itmd, itmd_data, itmd_term_map, self, + allow_repeated_itmd_indices=allow_repeated_itmd_indices + ) + factored = to_factor + remainder + return factored + + +# ----------------------------------------------------------------------------- +# INTERMEDIATE DEFINITIONS: + + +class t2_1(RegisteredIntermediate): + """First order MP doubles amplitude.""" + _itmd_type = 't_amplitude' # type has to be a class variable + _order = 1 + _default_idx = ("i", "j", "a", "b") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + _ = fully_expand + # build a basic version of the intermediate using minimal indices + # 'like on paper' + i, j, a, b = get_symbols(self.default_idx) + denom = Add( + orb_energy(a), orb_energy(b), -orb_energy(i), -orb_energy(j) + ) + ampl = eri((a, b, i, j)) * S.One / denom + assert isinstance(ampl, Expr) + return ItmdExpr(ampl, (i, j, a, b), None) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + # guess its not worth caching here. Maybe if used a lot. + # build the tensor + return Amplitude( + f"{tensor_names.gs_amplitude}1", indices[2:], indices[:2] + ) + + def factor_itmd(self, expr: ExprContainer, + factored_itmds: Sequence[str] | None = None, + max_order: int | None = None, + allow_repeated_itmd_indices: bool = False + ) -> ExprContainer: + """ + Factors the t2_1 intermediate in an expression assuming a real + orbital basis. + """ + _ = allow_repeated_itmd_indices + assert isinstance(expr, ExprContainer) + if not expr.real: + raise NotImplementedError("Intermediates only implemented for a " + "real orbital basis.") + # do we have something to factor? did we already factor the itmd? + if expr.inner.is_number or \ + (factored_itmds and self.name in factored_itmds): + return expr + + # no need to determine max order for a first order intermediate + if max_order is not None and max_order < self.order: + return expr + + # prepare the itmd and extract information + t2 = self.expand_itmd(wrap_result=True, fully_expand=True) + assert isinstance(t2, ExprContainer) + t2.make_real() + t2 = EriOrbenergy(t2).canonicalize_sign() + t2_eri: ObjectContainer = t2.eri.objects[0] + t2_eri_descr: str = t2_eri.description(include_exponent=False, + target_idx=None) + t2_denom = t2.denom.inner + t2_eri_idx: tuple[Index, ...] = t2_eri.idx + + expr = expr.expand() + + factored = ExprContainer(0, **expr.assumptions) + for term in expr.terms: + term = EriOrbenergy(term) # split the term + + if term.denom.inner.is_number: # term needs to have a denominator + factored += term.expr.inner + continue + term = term.canonicalize_sign() # fix the sign of the denominator + + brackets = term.denom_brackets + removed_brackets: set[int] = set() + factored_term = ExprContainer(1, **expr.assumptions) + eri_obj_to_remove: list[int] = [] + denom_brackets_to_remove: list[int] = [] + for eri_idx, eri in enumerate(term.eri.objects): + # - compare the eri objects (check if we have a oovv eri) + # coupling is not relevant for t2_1 (only a single object) + descr: str = eri.description( + include_exponent=False, target_idx=None + ) + if descr != t2_eri_descr: + continue + # repeated indices on t2_1 make no sense since + # t^aa_ij = / (2a - i - j) = 0 + # due to the permutational antisymmetry of V + assert all(c == 1 for c in Counter(eri.idx).values()) + # - have a correct eri -> zip indices together and substitute + # the itmd denominator + sub = order_substitutions(dict(zip(t2_eri_idx, eri.idx))) + sub_t2_denom = t2_denom.subs(sub) + # consider the exponent! + # ^2 may be factored twice + eri_exp = eri.exponent + # - check if we find a matching denominator + for bk_idx, bk in enumerate(brackets): + # was the braket already removed? + if bk_idx in removed_brackets: + continue + if isinstance(bk, ExprContainer): + bk_exponent = S.One + bk = bk.inner + else: + bk, bk_exponent = bk.base_and_exponent + # found matching bracket in denominator + if bk == sub_t2_denom: + # can possibly factor multiple times, depending + # on the exponent of the eri and the denominator + min_exp = Min(eri_exp, bk_exponent) + # are we removing the bracket completely? + if min_exp == bk_exponent: + removed_brackets.add(bk_idx) + # found matching eri and denominator + # replace eri and bracket by a t2_1 tensor + assert min_exp.is_Integer + denom_brackets_to_remove.extend( + bk_idx for _ in range(int(min_exp)) + ) + eri_obj_to_remove.extend( + eri_idx for _ in range(int(min_exp)) + ) + # can simply use the indices of the eri as target + # indices for the tensor + amplitude = self.tensor( + indices=eri.idx, wrap_result=False + ) + assert isinstance(amplitude, Expr) + factored_term *= Pow( + amplitude / t2.pref, + min_exp + ) + # - remove the matched eri and denominator objects + denom = term.cancel_denom_brackets(denom_brackets_to_remove) + eri = term.cancel_eri_objects(eri_obj_to_remove) + # - collect the remaining objects in the term and add to result + factored_term *= term.pref * eri * term.num / denom + logger.info(f"\nFactoring {self.name} in:\n{term}\nresult:\n" + f"{EriOrbenergy(factored_term)}") + factored += factored_term + return factored + + +class t1_2(RegisteredIntermediate): + """Second order MP singles amplitude.""" + _itmd_type = "t_amplitude" + _order = 2 + _default_idx = ("i", "a") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + # target_indices + i, a = get_symbols(self.default_idx) + # additional contracted indices + j, k, b, c = get_symbols('jkbc') + # t2_1 class instance + t2 = self._registry['t_amplitude']['t2_1'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + # build the amplitude + denom = Add(orb_energy(i), -orb_energy(a)) + term1 = (Rational(1, 2) * + t2(indices=(i, j, b, c), wrap_result=False) * + eri([j, a, b, c])) + term2 = (Rational(1, 2) * + t2(indices=(j, k, a, b), wrap_result=False) * + eri([j, k, i, b])) + return ItmdExpr(term1/denom + term2/denom, (i, a), (j, k, b, c)) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + return Amplitude( + f"{tensor_names.gs_amplitude}2", (indices[1],), (indices[0],) + ) + + +class t2_2(RegisteredIntermediate): + """Second order MP doubles amplitude.""" + _itmd_type = "t_amplitude" + _order = 2 + _default_idx = ("i", "j", "a", "b") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, j, a, b = get_symbols(self.default_idx) + # generate additional contracted indices (2o / 2v) + k, l, c, d = get_symbols('klcd') + # t2_1 class instance for generating t2_1 amplitudes + t2 = self._registry['t_amplitude']['t2_1'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + # build the t2_2 amplitude + denom = Add( + orb_energy(a), orb_energy(b), -orb_energy(i), -orb_energy(j) + ) + itmd = S.Zero + # - 0.5 t2eri_3 + itmd += (- Rational(1, 2) * eri((i, j, k, l)) * + t2(indices=(k, l, a, b), wrap_result=False)) + # - 0.5 t2eri_5 + itmd += (- Rational(1, 2) * eri((a, b, c, d)) * + t2(indices=(i, j, c, d), wrap_result=False)) + # + (1 - P_ij) (1 - P_ab) P_ij t2eri_4 + ampl = t2(indices=(i, k, a, c), wrap_result=True) + assert isinstance(ampl, ExprContainer) + base = ampl * eri((k, b, j, c)) + itmd += Add( + base.inner, -base.copy().permute((i, j)).inner, + -base.copy().permute((a, b)).inner, + base.copy().permute((i, j), (a, b)).inner + ) + return ItmdExpr(itmd * S.One / denom, (i, j, a, b), (k, l, c, d)) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + return Amplitude( + f"{tensor_names.gs_amplitude}2", indices[2:], indices[:2] + ) + + +class t3_2(RegisteredIntermediate): + """Second order MP triples amplitude.""" + _itmd_type = "t_amplitude" + _order = 2 + _default_idx = ("i", "j", "k", "a", "b", "c") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, j, k, a, b, c = get_symbols(self.default_idx) + # generate additional contracted indices (1o / 1v) + l, d = get_symbols('ld') + # t2_1 class instance for generating t2_1 amplitudes + t2 = self._registry['t_amplitude']['t2_1'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + # build the t3_2 amplitude + denom = Add( + orb_energy(i), orb_energy(j), orb_energy(k), + -orb_energy(a), -orb_energy(b), -orb_energy(c) + ) + itmd = S.Zero + # (1 - P_ik - P_jk) (1 - P_ab - P_ac) t_ij^ad + ampl = t2(indices=(i, j, a, d), wrap_result=True) + assert isinstance(ampl, ExprContainer) + base = ampl * eri((k, d, b, c)) + itmd += Add( + base.inner, + -base.copy().permute((i, k)).inner, + -base.copy().permute((j, k)).inner, + -base.copy().permute((a, b)).inner, + -base.copy().permute((a, c)).inner, + base.copy().permute((i, k), (a, b)).inner, + base.copy().permute((i, k), (a, c)).inner, + base.copy().permute((j, k), (a, b)).inner, + base.copy().permute((j, k), (a, c)).inner + ) + # (1 - P_ij - P_ik) (1 - P_ac - P_bc) t_il^ab + ampl = t2(indices=(i, l, a, b), wrap_result=True) + assert isinstance(ampl, ExprContainer) + base = ampl * eri((j, k, l, c)) + itmd += Add( + base.inner, + -base.copy().permute((i, j)).inner, + -base.copy().permute((i, k)).inner, + -base.copy().permute((a, c)).inner, + -base.copy().permute((b, c)).inner, + base.copy().permute((i, j), (a, c)).inner, + base.copy().permute((i, j), (b, c)).inner, + base.copy().permute((i, k), (a, c)).inner, + base.copy().permute((i, k), (b, c)).inner + ) + return ItmdExpr(itmd/denom, (i, j, k, a, b, c), (l, d)) + + def _build_tensor(self, indices) -> Expr: + return Amplitude( + f"{tensor_names.gs_amplitude}2", indices[3:], indices[:3] + ) + + +class t4_2(RegisteredIntermediate): + """ + Second order MP quadruple amplitudes in a factorized form that avoids + the construction of the quadruples denominator. + """ + _itmd_type = "t_amplitude" + _order = 2 + _default_idx = ("i", "j", "k", "l", "a", "b", "c", "d") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, j, k, l, a, b, c, d = get_symbols(self.default_idx) + # t2_1 class instance + t2 = self._registry['t_amplitude']['t2_1'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + # build the t4_2 amplitude + # (1 - P_ac - P_ad - P_bc - P_bd + P_ac P_bd) (1 - P_jk - P_jl) + # t_ij^ab t_kl^cd + ampl = t2(indices=(i, j, a, b)) + assert isinstance(ampl, ExprContainer) + base = ampl * t2(indices=(k, l, c, d), wrap_result=False) + v_permutations = {tuple(tuple()): 1, ((a, c),): -1, ((a, d),): -1, + ((b, c),): -1, ((b, d),): -1, ((a, c), (b, d)): +1} + o_permutations = {tuple(tuple()): 1, ((j, k),): -1, ((j, l),): -1} + t4 = S.Zero + for (o_perms, o_factor), (v_perms, v_factor) in \ + itertools.product(o_permutations.items(), + v_permutations.items()): + perms = o_perms + v_perms + t4 += Mul(o_factor, v_factor, base.copy().permute(*perms).inner) + return ItmdExpr(t4, (i, j, k, l, a, b, c, d), None) + + def _build_tensor(self, indices) -> Expr: + return Amplitude( + f"{tensor_names.gs_amplitude}2", indices[4:], indices[:4] + ) + + +class t1_3(RegisteredIntermediate): + """Third order MP single amplitude.""" + _itmd_type = "t_amplitude" + _order = 3 + _default_idx = ("i", "a") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, a = get_symbols('ia') + # generate additional contracted indices (2o / 2v) + j, k, b, c = get_symbols('jkbc') + # other intermediate class instances + t1 = self._registry['t_amplitude']['t1_2'] + t2 = self._registry['t_amplitude']['t2_2'] + t3 = self._registry['t_amplitude']['t3_2'] + if fully_expand: + t1 = t1.expand_itmd + t2 = t2.expand_itmd + t3 = t3.expand_itmd + else: + t1 = t1.tensor + t2 = t2.tensor + t3 = t3.tensor + # build the amplitude + denom = Add(orb_energy(i), -orb_energy(a)) + itmd = (Rational(1, 2) * eri([j, a, b, c]) * + t2(indices=(i, j, b, c), wrap_result=False)) + itmd += (Rational(1, 2) * eri([j, k, i, b]) * + t2(indices=(j, k, a, b), wrap_result=False)) + amplitude = t1(indices=(j, b), wrap_result=False) + assert isinstance(amplitude, Expr) + itmd -= amplitude * eri([i, b, j, a]) + itmd += (Rational(1, 4) * eri([j, k, b, c]) * + t3(indices=(i, j, k, a, b, c), wrap_result=False)) + # need to keep track of all contracted indices... also contracted + # indices within each of the second order t-amplitudes + # -> substitute_contracted indices to minimize the number of contracted + # indices + target = (i, a) + if fully_expand: + itmd = ExprContainer(itmd, target_idx=target) + itmd = itmd.substitute_contracted().inner + contracted = tuple(sorted( + [s for s in itmd.atoms(Index) if s not in target], + key=sort_idx_canonical + )) + else: + contracted = (j, k, b, c) + return ItmdExpr(itmd * S.One / denom, target, contracted) + + def _build_tensor(self, indices) -> Expr: + return Amplitude( + f"{tensor_names.gs_amplitude}3", (indices[1],), (indices[0],)) + + +class t2_3(RegisteredIntermediate): + """Third order MP double amplitude.""" + _itmd_type = "t_amplitude" + _order = 3 + _default_idx = ("i", "j", "a", "b") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, j, a, b = get_symbols(self.default_idx) + # generate additional contracted indices (2o / 2v) + k, l, c, d = get_symbols('klcd') + # other intermediate class instances + _t2_1 = self._registry['t_amplitude']['t2_1'] + t1 = self._registry['t_amplitude']['t1_2'] + t2 = self._registry['t_amplitude']['t2_2'] + t3 = self._registry['t_amplitude']['t3_2'] + t4 = self._registry['t_amplitude']['t4_2'] + if fully_expand: + _t2_1 = _t2_1.expand_itmd + t1 = t1.expand_itmd + t2 = t2.expand_itmd + t3 = t3.expand_itmd + t4 = t4.expand_itmd + else: + _t2_1 = _t2_1.tensor + t1 = t1.tensor + t2 = t2.tensor + t3 = t3.tensor + t4 = t4.tensor + # build the amplitude + denom = Add( + orb_energy(a), orb_energy(b), -orb_energy(i), -orb_energy(j) + ) + itmd = S.Zero + # +(1-P_ij) * t^c_j(2) + ampl = t1(indices=(j, c)) + assert isinstance(ampl, ExprContainer) + base = ampl * eri((i, c, a, b)) + itmd += Add(base.inner, -base.permute((i, j)).inner) + # +(1-P_ab) * t^b_k(2) + ampl = t1(indices=(k, b)) + assert isinstance(ampl, ExprContainer) + base = ampl * eri((i, j, k, a)) + itmd += Add(base.inner, -base.permute((a, b)).inner) + # - 0.5 * t^cd_ij(2) + itmd -= (Rational(1, 2) * eri((a, b, c, d)) * + t2(indices=(i, j, c, d), wrap_result=False)) + # - 0.5 * t^ab_kl(2) + itmd -= (Rational(1, 2) * eri((i, j, k, l)) * + t2(indices=(k, l, a, b), wrap_result=False)) + # + (1-P_ij)*(1-P_ab) * t^ac_ik(2) + ampl = t2(indices=(i, k, a, c)) + assert isinstance(ampl, ExprContainer) + base = ampl * eri((j, c, k, b)) + itmd += Add( + base.inner, + -base.copy().permute((i, j)).inner, + -base.copy().permute((a, b)).inner, + base.copy().permute((i, j), (a, b)).inner + ) + # + 0.5 * (1-P_ab) * t^bcd_ijk(2) + ampl = t3(indices=(i, j, k, b, c, d)) + assert isinstance(ampl, ExprContainer) + base = ampl * eri((k, a, c, d)) + itmd += (Rational(1, 2) * base.inner + - Rational(1, 2) * base.copy().permute((a, b)).inner) + # + 0.5 * (1-P_ij) t^abc_jkl(2) + ampl = t3(indices=(j, k, l, a, b, c)) + assert isinstance(ampl, ExprContainer) + base = ampl * eri((k, l, i, c)) + itmd += (Rational(1, 2) * base.inner + - Rational(1, 2) * base.copy().permute((i, j)).inner) + # + 0.25 t^abcd_ijkl(2) + itmd += (Rational(1, 4) * eri((k, l, c, d)) * + t4(indices=(i, j, k, l, a, b, c, d), wrap_result=False)) + # - 0.25 t^ab_ij(1) t^kl_cd(1) + itmd -= (Rational(1, 4) * eri((k, l, c, d)) * + _t2_1(indices=(i, j, a, b), wrap_result=False) * + _t2_1(indices=(k, l, c, d), wrap_result=False)) + # minimize the number of contracted indices + target = (i, j, a, b) + if fully_expand: + itmd = ExprContainer(itmd, target_idx=target) + itmd = itmd.substitute_contracted().inner + contracted = tuple(sorted( + [s for s in itmd.atoms(Index) if s not in target], + key=sort_idx_canonical + )) + else: + contracted = (k, l, c, d) + return ItmdExpr(itmd * S.One / denom, target, contracted) + + def _build_tensor(self, indices) -> Expr: + return Amplitude( + f"{tensor_names.gs_amplitude}3", indices[2:], indices[:2] + ) + + +class t2_1_re_residual(RegisteredIntermediate): + """ + Residual of the first order RE doubles amplitudes. + """ + _itmd_type = "re_residual" + _order = 2 # according to MP the maximum order of the residual is 2 + _default_idx = ("i", "j", "a", "b") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + # re intermediates can not be fully expanded, but add the bool + # anyway for a consistent interface + _ = fully_expand + i, j, a, b = get_symbols(self.default_idx) + # additional contracted indices + k, l, c, d = get_symbols('klcd') + # t2_1 class instance + t2 = self._registry['t_amplitude']['t2_1'] + + itmd = S.Zero + + # (1 - P_ij)(1 - P_ab) t_jk^bc + ampl = t2.tensor(indices=[j, k, b, c]) + assert isinstance(ampl, ExprContainer) + base = ampl * eri([i, c, k, a]) + itmd += Add( + base.inner, + -base.copy().permute((i, j)).inner, + -base.copy().permute((a, b)).inner, + base.copy().permute((i, j), (a, b)).inner + ) + # (1 - P_ab) f_ac t_ij^bc + ampl = t2.tensor(indices=[i, j, b, c]) + assert isinstance(ampl, ExprContainer) + base = ampl * fock([a, c]) + itmd += Add(base.inner, -base.copy().permute((a, b)).inner) + # (1 - P_ij) f_jk t_ik^ab + ampl = t2.tensor(indices=[i, k, a, b]) + assert isinstance(ampl, ExprContainer) + base = ampl * fock([j, k]) + itmd += Add(base.inner, -base.copy().permute((i, j)).inner) + # - 0.5 * t_ij^cd + itmd -= (Rational(1, 2) * eri((a, b, c, d)) * + t2.tensor(indices=(i, j, c, d), wrap_result=False)) + # -0.5 * t_kl^ab + itmd -= (Rational(1, 2) * eri((i, j, k, l)) * + t2.tensor(indices=(k, l, a, b), wrap_result=False)) + # + + itmd += eri((i, j, a, b)) + target = (i, j, a, b) + contracted = (k, l, c, d) + return ItmdExpr(itmd, target, contracted) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + # placeholder for 0, will be replaced in factor_intermediate + return AntiSymmetricTensor("Zero", indices[:2], indices[2:]) + + +class t1_2_re_residual(RegisteredIntermediate): + """ + Residual of the second order RE singles amplitudes. + """ + _itmd_type = "re_residual" + _order = 3 # according to MP the maximum order of the residual is 3 + _default_idx = ("i", "a") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True): + _ = fully_expand + i, a = get_symbols(self.default_idx) + # additional contracted indices + j, k, b, c = get_symbols('jkbc') + + # t amplitudes + t2 = self._registry['t_amplitude']['t2_1'] + ts2 = self._registry['t_amplitude']['t1_2'] + + # - {V^{ib}_{ja}} {t2^{b}_{j}} + itmd = ( + -eri([i, b, j, a]) * ts2.tensor(indices=[j, b], wrap_result=False) + ) + # + {f^{a}_{b}} {t2^{b}_{i}} + itmd += ( + fock([a, b]) * ts2.tensor(indices=[i, b], wrap_result=False) + ) + # - {f^{i}_{j}} {t2^{a}_{j}} + itmd -= ( + fock([i, j]) * ts2.tensor(indices=[j, a], wrap_result=False) + ) + # + \frac{{V^{ja}_{bc}} {t1^{bc}_{ij}}}{2} + itmd += (Rational(1, 2) * eri([j, a, b, c]) + * t2.tensor(indices=[i, j, b, c], wrap_result=False)) + # + \frac{{V^{jk}_{ib}} {t1^{ab}_{jk}}}{2} + itmd += (Rational(1, 2) * eri([j, k, i, b]) + * t2.tensor(indices=[j, k, a, b], wrap_result=False)) + # - {f^{j}_{b}} {t1^{ab}_{ij}} + itmd -= ( + fock([j, b]) * t2.tensor(indices=[i, j, a, b], wrap_result=False) + ) + target = (i, a) + contracted = (j, k, b, c) + return ItmdExpr(itmd, target, contracted) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + # placeholder for 0, will be replaced in factor_intermediate + return AntiSymmetricTensor("Zero", (indices[0],), (indices[1],)) + + +class t2_2_re_residual(RegisteredIntermediate): + """ + Residual of the second order RE doubles amplitudes. + """ + _itmd_type = "re_residual" + _order = 3 # according to MP the maximum order of the residual is 3 + _default_idx = ("i", "j", "a", "b") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + _ = fully_expand + i, j, a, b = get_symbols(self.default_idx) + # additional contracted indices + k, l, c, d = get_symbols('klcd') + # t2_1 class instance + t2 = self._registry['t_amplitude']['t2_2'] + + itmd = S.Zero + + # (1 - P_ij)(1 - P_ab) t_jk^bc + ampl = t2.tensor(indices=[j, k, b, c]) + assert isinstance(ampl, ExprContainer) + base = ampl * eri([i, c, k, a]) + itmd += Add( + base.inner, + -base.copy().permute((i, j)).inner, + -base.copy().permute((a, b)).inner, + base.copy().permute((i, j), (a, b)).inner + ) + # (1 - P_ab) f_ac t_ij^bc + ampl = t2.tensor(indices=[i, j, b, c]) + assert isinstance(ampl, ExprContainer) + base = ampl * fock([a, c]) + itmd += Add( + base.inner, -base.copy().permute((a, b)).inner + ) + # (1 - P_ij) f_jk t_ik^ab + ampl = t2.tensor(indices=[i, k, a, b]) + assert isinstance(ampl, ExprContainer) + base = ampl * fock([j, k]) + itmd += Add( + base.inner, -base.copy().permute((i, j)).inner + ) + # - 0.5 * t_ij^cd + itmd -= (Rational(1, 2) * eri((a, b, c, d)) * + t2.tensor(indices=(i, j, c, d), wrap_result=False)) + # -0.5 * t_kl^ab + itmd -= (Rational(1, 2) * eri((i, j, k, l)) * + t2.tensor(indices=(k, l, a, b), wrap_result=False)) + target = (i, j, a, b) + contracted = (k, l, c, d) + return ItmdExpr(itmd, target, contracted) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + # placeholder for 0, will be replaced in factor_intermediate + return AntiSymmetricTensor("Zero", indices[:2], indices[2:]) + + +class p0_2_oo(RegisteredIntermediate): + """ + Second order contribution to the occupied occupied block of the MP + one-particle density matrix. + """ + _itmd_type = "mp_density" + _order = 2 + _default_idx = ("i", "j") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, j = get_symbols(self.default_idx) + # additional contracted indices (1o / 2v) + k, a, b = get_symbols('kab') + # t2_1 class instance + t2 = self._registry['t_amplitude']['t2_1'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + # build the density + p0 = (- Rational(1, 2) * + t2(indices=(i, k, a, b), wrap_result=False) * + t2(indices=(j, k, a, b), wrap_result=False)) + return ItmdExpr(p0, (i, j), (k, a, b)) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + return AntiSymmetricTensor( + f"{tensor_names.gs_density}2", (indices[0],), (indices[1],), 1 + ) + + +class p0_2_vv(RegisteredIntermediate): + """ + Second order contribution to the virtual virtual block of the MP + one-particle density matrix. + """ + _itmd_type = "mp_density" + _order = 2 + _default_idx = ("a", "b") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + a, b = get_symbols(self.default_idx) + # additional contracted indices (2o / 1v) + i, j, c = get_symbols('ijc') + # t2_1 class instance + t2 = self._registry['t_amplitude']['t2_1'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + # build the density + p0 = (Rational(1, 2) * + t2(indices=(i, j, a, c), wrap_result=False) * + t2(indices=(i, j, b, c), wrap_result=False)) + return ItmdExpr(p0, (a, b), (i, j, c)) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + return AntiSymmetricTensor( + f"{tensor_names.gs_density}2", (indices[0],), (indices[1],), 1) + + +class p0_3_oo(RegisteredIntermediate): + """ + Third order contribution to the occupied occupied block of the MP + one-particle density matrix. + """ + _itmd_type = "mp_density" + _order = 3 + _default_idx = ("i", "j") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, j = get_symbols(self.default_idx) + # generate additional contracted indices (1o / 2v) + k, a, b = get_symbols('kab') + # t amplitude cls + t2 = self._registry['t_amplitude']['t2_1'] + td2 = self._registry['t_amplitude']['t2_2'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + td2 = td2.expand_itmd if fully_expand else td2.tensor + # build the density + p0 = (- Rational(1, 2) * + t2(indices=(i, k, a, b), wrap_result=False) * + td2(indices=(j, k, a, b), wrap_result=False)) + p0 += p0.subs({i: j, j: i}, simultaneous=True) + + target = (i, j) + if fully_expand: + p0 = ExprContainer( + p0, target_idx=target + ).substitute_contracted().inner + contracted = tuple(sorted( + [s for s in p0.atoms(Index) if s not in target], + key=sort_idx_canonical + )) + else: + contracted = (k, a, b) + return ItmdExpr(p0, target, contracted) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + return AntiSymmetricTensor( + f"{tensor_names.gs_density}3", (indices[0],), (indices[1],), 1) + + +class p0_3_ov(RegisteredIntermediate): + """ + Third order contribution to the occupied virtual block of the MP + one-particle density matrix. + """ + _itmd_type = "mp_density" + _order = 3 + _default_idx = ("i", "a") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, a = get_symbols(self.default_idx) + # generate additional contracted indices (2o / 2v) + j, k, b, c = get_symbols('jkbc') + # t_amplitude cls instances + t2 = self._registry['t_amplitude']['t2_1'] + ts2 = self._registry['t_amplitude']['t1_2'] + tt2 = self._registry['t_amplitude']['t3_2'] + ts3 = self._registry['t_amplitude']['t1_3'] + if fully_expand: + t2 = t2.expand_itmd + ts2 = ts2.expand_itmd + tt2 = tt2.expand_itmd + ts3 = ts3.expand_itmd + else: + t2 = t2.tensor + ts2 = ts2.tensor + tt2 = tt2.tensor + ts3 = ts3.tensor + p0 = S.Zero + # build the density + # - t^ab_ij(1) t^b_j(2) + p0 += ( + S.NegativeOne * t2(indices=(i, j, a, b), wrap_result=False) * + ts2(indices=(j, b), wrap_result=False) + ) + # - 0.25 * t^bc_jk(1) t^abc_ijk(2) + p0 -= (Rational(1, 4) * + t2(indices=(j, k, b, c), wrap_result=False) * + tt2(indices=(i, j, k, a, b, c), wrap_result=False)) + # + t^a_i(3) + p0 += ts3(indices=(i, a), wrap_result=False) + + target = (i, a) + if fully_expand: + p0 = ExprContainer( + p0, target_idx=target + ).substitute_contracted().inner + contracted = tuple(sorted( + [s for s in p0.atoms(Index) if s not in target], + key=sort_idx_canonical + )) + else: + contracted = (j, k, b, c) + assert isinstance(p0, Expr) + return ItmdExpr(p0, target, contracted) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + return AntiSymmetricTensor( + f"{tensor_names.gs_density}3", (indices[0],), (indices[1],), 1) + + +class p0_3_vv(RegisteredIntermediate): + """ + Third order contribution to the virtual virtual block of the MP + one-particle density matrix. + """ + _itmd_type = "mp_density" + _order = 3 + _default_idx = ("a", "b") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + a, b = get_symbols(self.default_idx) + # additional contracted indices (2o / 1v) + i, j, c = get_symbols('ijc') + # t_amplitude cls instances + t2 = self._registry['t_amplitude']['t2_1'] + td2 = self._registry['t_amplitude']['t2_2'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + td2 = td2.expand_itmd if fully_expand else td2.tensor + # build the density + p0 = (Rational(1, 2) * + t2(indices=(i, j, a, c), wrap_result=False) * + td2(indices=(i, j, b, c), wrap_result=False)) + p0 += p0.subs({a: b, b: a}, simultaneous=True) + + target = (a, b) + if fully_expand: + p0 = ExprContainer( + p0, target_idx=target + ).substitute_contracted().inner + contracted = tuple(sorted( + [s for s in p0.atoms(Index) if s not in target], + key=sort_idx_canonical + )) + else: + contracted = (i, j, c) + return ItmdExpr(p0, target, contracted) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + return AntiSymmetricTensor( + f"{tensor_names.gs_density}3", (indices[0],), (indices[1],), 1) + + +class t2eri_1(RegisteredIntermediate): + """t2eri1 in adcc / pi1 in libadc.""" + _itmd_type = "misc" + _order = 2 + _default_idx = ("i", "j", "k", "a") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, j, k, a = get_symbols(self.default_idx) + # generate additional contracted indices (2v) + b, c = get_symbols('bc') + # t2_1 class instance for generating t2_1 amplitudes + t2 = self._registry['t_amplitude']['t2_1'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + # build the intermediate + t2eri = ( + t2(indices=(i, j, b, c), wrap_result=False) * + eri((k, a, b, c)) + ) + assert isinstance(t2eri, Expr) + return ItmdExpr(t2eri, (i, j, k, a), (b, c)) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + return AntiSymmetricTensor('t2eri1', indices[:2], indices[2:]) + + +class t2eri_2(RegisteredIntermediate): + """t2eri2 in adcc / pi2 in libadc.""" + _itmd_type = "misc" + _order = 2 + _default_idx = ("i", "j", "k", "a") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, j, k, a = get_symbols(self.default_idx) + # generate additional contracted indices (1o / 1v) + b, l = get_symbols('bl') # noqa E741 + t2 = self._registry['t_amplitude']['t2_1'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + # build the intermediate + t2eri = ( + t2(indices=(i, l, a, b), wrap_result=False) * + eri((l, k, j, b)) + ) + assert isinstance(t2eri, Expr) + return ItmdExpr(t2eri, (i, j, k, a), (b, l)) + + def _build_tensor(self, indices: Sequence[Index]) -> NonSymmetricTensor: + return NonSymmetricTensor('t2eri2', indices) + + +class t2eri_3(RegisteredIntermediate): + """t2eri3 in adcc / pi3 in libadc.""" + _itmd_type = "misc" + _order = 2 + _default_idx = ("i", "j", "a", "b") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, j, a, b = get_symbols(self.default_idx) + # generate additional contracted indices (2o) + k, l = get_symbols('kl') # noqa E741 + # t2_1 class instance for generating t2_1 amplitudes + t2 = self._registry['t_amplitude']['t2_1'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + # build the intermediate + t2eri = ( + t2(indices=(k, l, a, b), wrap_result=False) * + eri((i, j, k, l)) + ) + assert isinstance(t2eri, Expr) + return ItmdExpr(t2eri, (i, j, a, b), (k, l)) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + return AntiSymmetricTensor('t2eri3', indices[:2], indices[2:]) + + +class t2eri_4(RegisteredIntermediate): + """t2eri4 in adcc / pi4 in libadc.""" + _itmd_type = "misc" + _order = 2 + _default_idx = ("i", "j", "a", "b") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, j, a, b = get_symbols(self.default_idx) + # generate additional contracted indices (1o / 1v) + k, c = get_symbols('kc') + # t2_1 class instance for generating t2_1 amplitudes + t2 = self._registry['t_amplitude']['t2_1'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + # build the intermediate + t2eri = ( + t2(indices=(j, k, a, c), wrap_result=False) * + eri((k, b, i, c)) + ) + assert isinstance(t2eri, Expr) + return ItmdExpr(t2eri, (i, j, a, b), (k, c)) + + def _build_tensor(self, indices: Sequence[Index]) -> NonSymmetricTensor: + return NonSymmetricTensor('t2eri4', indices) + + +class t2eri_5(RegisteredIntermediate): + """t2eri5 in adcc / pi5 in libadc.""" + _itmd_type = "misc" + _order = 2 + _default_idx = ("i", "j", "a", "b") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, j, a, b = get_symbols(self.default_idx) + # generate additional contracted indices (2v) + c, d = get_symbols('cd') + # t2_1 class instance for generating t2_1 amplitudes + t2 = self._registry['t_amplitude']['t2_1'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + # build the intermediate + t2eri = ( + t2(indices=(i, j, c, d), wrap_result=False) * + eri((a, b, c, d)) + ) + assert isinstance(t2eri, Expr) + return ItmdExpr(t2eri, (i, j, a, b), (c, d)) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + return AntiSymmetricTensor('t2eri5', indices[:2], indices[2:]) + + +class t2eri_6(RegisteredIntermediate): + """t2eri6 in adcc / pi6 in libadc.""" + _itmd_type = "misc" + _order = 2 + _default_idx = ("i", "a", "b", "c") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, a, b, c = get_symbols(self.default_idx) + # generate additional contracted indices (2o) + j, k = get_symbols('jk') + # t2_1 class instance for generating t2_1 amplitudes + t2 = self._registry['t_amplitude']['t2_1'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + # build the intermediate + t2eri = ( + t2(indices=(j, k, b, c), wrap_result=False) * + eri((j, k, i, a)) + ) + assert isinstance(t2eri, Expr) + return ItmdExpr(t2eri, (i, a, b, c), (j, k)) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + return AntiSymmetricTensor('t2eri6', indices[:2], indices[2:]) + + +class t2eri_7(RegisteredIntermediate): + """t2eri7 in adcc / pi7 in libadc.""" + _itmd_type = "misc" + _order = 2 + _default_idx = ("i", "a", "b", "c") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, a, b, c = get_symbols(self.default_idx) + # generate additional contracted indices (1o / 1v) + j, d = get_symbols('jd') + # t2_1 class instance for generating t2_1 amplitudes + t2 = self._registry['t_amplitude']['t2_1'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + # build the intermediate + t2eri = ( + t2(indices=(i, j, b, d), wrap_result=False) * + eri((j, c, a, d)) + ) + assert isinstance(t2eri, Expr) + return ItmdExpr(t2eri, (i, a, b, c), (j, d)) + + def _build_tensor(self, indices: Sequence[Index]) -> NonSymmetricTensor: + return NonSymmetricTensor('t2eri7', indices) + + +class t2eri_A(RegisteredIntermediate): + """pia intermediate in libadc""" + _itmd_type = "misc" + _order = 2 + _default_idx = ("i", "j", "k", "a") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, j, k, a = get_symbols(self.default_idx) + # t2eri cls instances for generating the itmd + pi1 = self._registry['misc']['t2eri_1'] + pi2 = self._registry['misc']['t2eri_2'] + pi1 = pi1.expand_itmd if fully_expand else pi1.tensor + pi2 = pi2.expand_itmd if fully_expand else pi2.tensor + # build the itmd + pia = ( + Rational(1, 2) * pi1(indices=(i, j, k, a), wrap_result=False) + + pi2(indices=(i, j, k, a), wrap_result=False) + + S.NegativeOne * pi2(indices=(j, i, k, a), wrap_result=False) + ) + target = (i, j, k, a) + if fully_expand: + pia = ExprContainer( + pia, target_idx=target + ).substitute_contracted().inner + contracted = tuple(sorted( + [s for s in pia.atoms(Index) if s not in target], + key=sort_idx_canonical + )) + else: + contracted = tuple() + return ItmdExpr(pia, target, contracted) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + return AntiSymmetricTensor('t2eriA', indices[:2], indices[2:]) + + +class t2eri_B(RegisteredIntermediate): + """pib intermediate in libadc""" + _itmd_type = "misc" + _order = 2 + _default_idx = ("i", "a", "b", "c") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, a, b, c = get_symbols(self.default_idx) + # t2eri cls instances for generating the itmd + pi6 = self._registry['misc']['t2eri_6'] + pi7 = self._registry['misc']['t2eri_7'] + pi6 = pi6.expand_itmd if fully_expand else pi6.tensor + pi7 = pi7.expand_itmd if fully_expand else pi7.tensor + # build the itmd + pib = (-Rational(1, 2) * pi6(indices=(i, a, b, c), wrap_result=False) + + pi7(indices=(i, a, b, c), wrap_result=False) + + S.NegativeOne * pi7(indices=(i, a, c, b), wrap_result=False)) + target = (i, a, b, c) + if fully_expand: + pib = ExprContainer( + pib, target_idx=target + ).substitute_contracted().inner + contracted = tuple(sorted( + [s for s in pib.atoms(Index) if s not in target], + key=sort_idx_canonical + )) + else: + contracted = tuple() + return ItmdExpr(pib, target, contracted) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + return AntiSymmetricTensor('t2eriB', indices[:2], indices[2:]) + + +class t2sq(RegisteredIntermediate): + """t2sq intermediate from adcc and libadc.""" + _itmd_type = "misc" + _order = 2 + _default_idx = ("i", "a", "j", "b") + + @cached_member + def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: + i, a, j, b = get_symbols(self.default_idx) + # generate additional contracted indices (1o / 1v) + c, k = get_symbols('ck') + # t2_1 class instance for generating t2_1 amplitudes + t2 = self._registry['t_amplitude']['t2_1'] + t2 = t2.expand_itmd if fully_expand else t2.tensor + # build the intermediate + itmd = ( + t2(indices=(i, k, a, c), wrap_result=False) * + t2(indices=(j, k, b, c), wrap_result=False) + ) + assert isinstance(itmd, Expr) + return ItmdExpr(itmd, (i, a, j, b), (k, c)) + + def _build_tensor(self, indices: Sequence[Index]) -> Expr: + return AntiSymmetricTensor('t2sq', indices[:2], indices[2:], 1) + + +def eri(idx: Sequence[str] | Sequence[Index]) -> Expr: + """ + Builds an antisymmetric electron repulsion integral. + Indices may be provided as list of sympy symbols or as string. + """ + idx = get_symbols(idx) + if len(idx) != 4: + raise Inputerror(f'4 indices required to build a ERI. Got: {idx}.') + return AntiSymmetricTensor(tensor_names.eri, idx[:2], idx[2:]) + + +def fock(idx: Sequence[Index] | Sequence[str]) -> Expr: + """ + Builds a fock matrix element. + Indices may be provided as list of sympy symbols or as string. + """ + idx = get_symbols(idx) + if len(idx) != 2: + raise Inputerror('2 indices required to build a Fock matrix element.' + f'Got: {idx}.') + return AntiSymmetricTensor(tensor_names.fock, idx[:1], idx[1:]) + + +def orb_energy(idx: Index | Sequence[str] | Sequence[Index] + ) -> NonSymmetricTensor: + """ + Builds an orbital energy. + Indices may be provided as list of sympy symbols or as string. + """ + idx = get_symbols(idx) + if len(idx) != 1: + raise Inputerror("1 index required to build a orbital energy. Got: " + f"{idx}.") + return NonSymmetricTensor(tensor_names.orb_energy, idx) diff --git a/build/lib/adcgen/logger.py b/build/lib/adcgen/logger.py new file mode 100644 index 0000000..8bf0a58 --- /dev/null +++ b/build/lib/adcgen/logger.py @@ -0,0 +1,63 @@ +from pathlib import Path +import os +import logging +import json + + +logger: logging.Logger = logging.getLogger("adcgen") +_config_file = "logger_config.json" + + +def set_log_level(level: str) -> None: + """Set the level of the adcgen logger.""" + logger.setLevel(level) + + +def _config_logger() -> None: + """ + Config the logger. + The path to a logging configuration JSON file can be provided through the + 'ADCGEN_LOG_CONFIG' environment variable. By default + 'logging_config.json' will be used. + The level of the adcgen logger can additionally be modified through the + 'ADCGEN_LOG_LEVEL' environment variable. The level will be set after + reading the config. + """ + import logging.config + + # load the configuration + config = os.environ.get("ADCGEN_LOG_CONFIG", None) + if config is None: + config = Path(__file__).parent.resolve() / _config_file + else: + config = Path(config).resolve() + if not config.exists: + raise FileNotFoundError(f"logging config file {config} does not exist") + config = json.load(open(config, "r")) + logging.config.dictConfig(config) + # set the print level + level = os.environ.get("ADCGEN_LOG_LEVEL", None) + if level is not None: + logger.setLevel(level) + + +class Formatter(logging.Formatter): + colors = { + "WARNING": "\033[93m", # yellow + "ERROR": "\033[91m", # red + "CRITICAL": "\033[95m" # pink + } + reset_color = "\033[0m" + + def format(self, record: logging.LogRecord) -> str: + # Color the log message + col = self.colors.get(record.levelname, None) + if col is not None: + record.msg = f"{col}{record.msg}{self.reset_color}" + return super().format(record) + + +class DropErrors(logging.Filter): + # Only keep debug and info messages + def filter(self, record: logging.LogRecord) -> bool: + return record.levelno < logging.WARNING diff --git a/build/lib/adcgen/logger_config.json b/build/lib/adcgen/logger_config.json new file mode 100644 index 0000000..d563c13 --- /dev/null +++ b/build/lib/adcgen/logger_config.json @@ -0,0 +1,40 @@ +{ + "version": 1, + "disable_existing_loggers": false, + "formatters": { + "colored": { + "class": "adcgen.logger.Formatter" + } + }, + "filters": { + "droperrors": { + "()": "adcgen.logger.DropErrors" + } + }, + "handlers": { + "stdout": { + "class": "logging.StreamHandler", + "level": "DEBUG", + "filters": [ + "droperrors" + ], + "formatter": "colored", + "stream": "ext://sys.stdout" + }, + "stderr": { + "class": "logging.StreamHandler", + "level": "WARNING", + "formatter": "colored", + "stream": "ext://sys.stderr" + } + }, + "loggers": { + "adcgen": { + "level": "INFO", + "handlers": [ + "stdout", + "stderr" + ] + } + } +} \ No newline at end of file diff --git a/build/lib/adcgen/misc.py b/build/lib/adcgen/misc.py new file mode 100644 index 0000000..0916a04 --- /dev/null +++ b/build/lib/adcgen/misc.py @@ -0,0 +1,116 @@ +from collections.abc import Sequence +from functools import wraps +import inspect + + +class Inputerror(ValueError): + pass + + +def cached_member(function): + """ + Decorator for a class method that is called with at least one argument + or keyword argument. THe result is cached in the variable '_function_cache' + of the class instance. + """ + + fname = function.__name__ + + # create the signature of the wrapped function and check that we dont + # have any keyword only arguments in the wrapped function + func_sig = inspect.signature(function) + invalid_arg_types = (inspect.Parameter.KEYWORD_ONLY, + inspect.Parameter.VAR_KEYWORD) + if any(arg.kind in invalid_arg_types for arg in + func_sig.parameters.values()): + raise TypeError("Functions with keyword only arguments are not " + "supported by cached_member.") + + @wraps(function) + def wrapper(self, *args, **kwargs): + # - transform all arguments to positional arguments + # and add not provided default arguments + bound_args: inspect.BoundArguments = ( + func_sig.bind(self, *args, **kwargs) + ) + bound_args.apply_defaults() + assert len(bound_args.kwargs) == 0 + args = bound_args.args[1:] # remove self from the positional arguments + + try: # load/create the cache + fun_cache = self._function_cache[fname] + except AttributeError: + self._function_cache = {} + fun_cache = self._function_cache[fname] = {} + except KeyError: + fun_cache = self._function_cache[fname] = {} + + try: # try to load the data from the cache + return fun_cache[args] + except KeyError: + fun_cache[args] = result = function(self, *args) + return result + + return wrapper + + +def validate_input(**kwargs) -> None: + # order, min_order, adc_order, braket, space, lr: single input + # indices, block: 2 strings possible + validate = { + 'order': lambda o: isinstance(o, int) and o >= 0, # int + 'braket': lambda bk: bk in ['bra', 'ket'], # bra/ket + 'space': lambda sp: all(s in ['p', 'h'] for s in sp), + 'indices': lambda idx: all(isinstance(i, str) for i in idx), + 'min_order': lambda o: isinstance(o, int) and o >= 0, # int + 'lr': lambda lr: lr in ['left', 'right'], # left/right + 'block': lambda b: all(validate['space'](sp) for sp in b), + 'adc_order': lambda o: isinstance(o, int) and o >= 0, # int + 'lr_isr': lambda lr: lr in ['left', 'right'], # left/right + } + # braket, lr are exprected as str! + # order, min_order, adc_order are expected as int! + # space, block and indices as list/tuple or ',' separated string + for var, val in kwargs.items(): + if var == 'space': + tpl = transform_to_tuple(val) + if len(tpl) != 1: + raise Inputerror(f'Invalid input for {var}: {val}.') + val = tpl[0] + elif var == 'block': + tpl = transform_to_tuple(val) + if len(tpl) != 2: + raise Inputerror(f'Invalid input for {var}: {val}') + val = tpl + elif var == 'indices': + tpl = transform_to_tuple(val) + if len(tpl) not in [1, 2]: + raise Inputerror(f'Invalid indices input: {val}.') + val = tpl + if not validate[var](val): + raise Inputerror(f'Invalid input for {var}: {val}.') + + +def transform_to_tuple(input: Sequence[str]) -> tuple[str, ...]: + convertor = { + str: lambda x: tuple(i for i in x.split(",")), + list: lambda x: tuple(x), + tuple: lambda x: x + } + conversion = convertor.get(type(input)) + if not conversion: + raise Inputerror(f"{input} of type {type(input)} is not convertable " + "to tuple.") + return conversion(input) + + +class Singleton(type): + """Simple metaclass that implements the Singleton pattern on a class.""" + _instances = {} + + def __call__(cls, *args, **kwargs): + if cls not in cls._instances: + cls._instances[cls] = ( + super(Singleton, cls).__call__(*args, **kwargs) + ) + return cls._instances[cls] diff --git a/build/lib/adcgen/operators.py b/build/lib/adcgen/operators.py new file mode 100644 index 0000000..87b30e6 --- /dev/null +++ b/build/lib/adcgen/operators.py @@ -0,0 +1,213 @@ +from collections.abc import Sequence +from functools import cached_property +from typing import Any + +from sympy import Add, Expr, Rational, Mul, factorial, latex +from sympy.physics.secondquant import Fd, F + +from .misc import cached_member +from .indices import Indices, Index, get_symbols +from .rules import Rules +from .sympy_objects import AntiSymmetricTensor +from .logger import logger +from .tensor_names import tensor_names + + +class Operators: + """ + Constructs operators, like the zeroth and first order Hamiltonian or + arbitrary N-particle operators. + + Parameters + ---------- + variant : str, optional + Defines the partitioning of the Hamiltonian. + (default: the MP Hamiltonian) + """ + def __init__(self, variant: str = "mp"): + self._indices = Indices() + self._variant = variant + + @cached_property + def hamiltonian(self) -> Expr: + """Constructs the full electronic Hamiltonian.""" + h = Add(self.mp_h0()[0], self.mp_h1()[0]) + assert isinstance(h, Expr) + return h + + @cached_property + def h0(self) -> tuple[Expr, Rules | None]: + """Constructs the zeroth order Hamiltonian.""" + if self._variant == 'mp': + return self.mp_h0() + elif self._variant == 're': + return self.re_h0() + else: + raise NotImplementedError( + f"H0 not implemented for {self._variant}" + ) + + @cached_property + def h1(self) -> tuple[Expr, Rules | None]: + """Constructs the first order Hamiltonian.""" + if self._variant == 'mp': + return self.mp_h1() + elif self._variant == 're': + return self.re_h1() + else: + raise NotImplementedError( + f"H1 not implented for {self._variant}" + ) + + @cached_member + def operator(self, n_create: int, n_annihilate: int) -> tuple[Expr, None]: + """ + Constructs an arbitrary second quantized operator placing creation + operators to the left of annihilation operators. + + Parameters + ---------- + n_create : int + The number of creation operators. Placed left of the annihilation + operators. + n_annihilate : int + The number of annihilation operators. Placed right of the creation + operators. + """ + # generate general indices for the operator + idx = self._indices.get_generic_indices( + general=n_create + n_annihilate + ) + idx = idx[("general", "")] + create = idx[:n_create] + annihilate = idx[n_create:] + name = tensor_names.operator + + pref = Rational(1, Mul(factorial(n_create), factorial(n_annihilate))) + d = AntiSymmetricTensor(name, create, annihilate) + op = self.excitation_operator(creation=create, + annihilation=annihilate, + reverse_annihilation=True) + return pref * d * op, None + + def excitation_operator( + self, creation: Sequence[Index] | Sequence[str] | None = None, + annihilation: Sequence[Index] | Sequence[str] | None = None, + reverse_annihilation: bool = True + ) -> Expr: + """ + Creates an arbitrary string of second quantized excitation operators. + Operators are concatenated as [creation * annihilation] + + Parameters + ---------- + creation : Sequence[Index] | Sequence[str], optional + Each of the provided indices is placed on a creation operator. + Operators are concatenated in the provided order: + [i, j] -> Fd(i) * Fd(j). + annihilation : Sequence[Index] | Sequence[str], optional + Each of the provided indices is placed on a annihilation operator. + Operators are concatenated in the provided order: + [i, j] -> F(i) * F(j) + reverse_annihilation : bool, optional + If set, the order of the annihilation operators is reversed, i.e. + [i, j] -> F(j) * F(i) + (default: True). + """ + res = [] + if creation is not None: + res.extend(Fd(s) for s in get_symbols(creation)) + if annihilation is not None: + symbols = get_symbols(annihilation) + if reverse_annihilation: + symbols = reversed(symbols) + res.extend(F(s) for s in symbols) + expr = Mul(*res) + assert isinstance(expr, Expr) + return expr + + @staticmethod + def mp_h0() -> tuple[Expr, None]: + """Constructs the zeroth order MP-Hamiltonian.""" + idx_cls = Indices() + p, q = idx_cls.get_indices("pq")[("general", "")] + f = AntiSymmetricTensor(tensor_names.fock, (p,), (q,)) + pq = Mul(Fd(p), F(q)) + h0 = Mul(f, pq) + assert isinstance(h0, Expr) + logger.debug(f"H0 = {latex(h0)}") + return h0, None + + @staticmethod + def mp_h1() -> tuple[Expr, None]: + """Constructs the first order MP-Hamiltonian.""" + idx_cls = Indices() + p, q, r, s = idx_cls.get_indices("pqrs")[("general", "")] + # get an occ index for 1 particle part of H1 + occ = idx_cls.get_generic_indices(occ=1)[("occ", "")][0] + v1 = AntiSymmetricTensor(tensor_names.eri, (p, occ), (q, occ)) + pq = Mul(Fd(p), F(q)) + v2 = AntiSymmetricTensor(tensor_names.eri, (p, q), (r, s)) + pqsr = Mul(Fd(p), Fd(q), F(s), F(r)) + h1 = Add(Mul(-v1, pq), Rational(1, 4) * v2 * pqsr) + assert isinstance(h1, Expr) + logger.debug(f"H1 = {latex(h1)}") + return h1, None + + @staticmethod + def re_h0() -> tuple[Expr, Rules]: + """Constructs the zeroth order RE-Hamiltonian.""" + idx_cls = Indices() + p, q, r, s = idx_cls.get_indices('pqrs')[("general", "")] + # get an occ index for 1 particle part of H0 + occ = idx_cls.get_generic_indices(occ=1)[("occ", "")][0] + + f = AntiSymmetricTensor(tensor_names.fock, (p,), (q,)) + piqi = AntiSymmetricTensor(tensor_names.eri, (p, occ), (q, occ)) + pqrs = AntiSymmetricTensor(tensor_names.eri, (p, q), (r, s)) + op_pq = Mul(Fd(p), F(q)) + op_pqsr = Mul(Fd(p), Fd(q), F(s), F(r)) + + h0 = Add( + Mul(f, op_pq), -Mul(piqi, op_pq), Rational(1, 4) * pqrs * op_pqsr + ) + assert isinstance(h0, Expr) + logger.debug(f"H0 = {latex(h0)}") + # construct the rules for forbidden blocks in H0 + # we are not in a real orbital basis!! -> More canonical blocks + rules = Rules(forbidden_tensor_blocks={ + tensor_names.fock: ('ov', 'vo'), + tensor_names.eri: ('ooov', 'oovv', 'ovvv', 'ovoo', 'vvoo', 'vvov') + }) + return h0, rules + + @staticmethod + def re_h1() -> tuple[Expr, Rules]: + """Constructs the first order RE-Hamiltonian.""" + idx_cls = Indices() + p, q, r, s = idx_cls.get_indices('pqrs')[("general", "")] + # get an occ index for 1 particle part of H0 + occ = idx_cls.get_generic_indices(occ=1)[("occ", "")][0] + + f = AntiSymmetricTensor(tensor_names.fock, (p,), (q,)) + piqi = AntiSymmetricTensor(tensor_names.eri, (p, occ), (q, occ)) + pqrs = AntiSymmetricTensor(tensor_names.eri, (p, q), (r, s)) + op_pq = Mul(Fd(p), F(q)) + op_pqsr = Mul(Fd(p), Fd(q), F(s), F(r)) + + h1 = Add( + Mul(f, op_pq), -Mul(piqi, op_pq), Rational(1, 4) * pqrs * op_pqsr + ) + assert isinstance(h1, Expr) + logger.debug(f"H1 = {latex(h1)}") + # construct the rules for forbidden blocks in H1 + rules = Rules(forbidden_tensor_blocks={ + tensor_names.fock: ['oo', 'vv'], + tensor_names.eri: ['oooo', 'ovov', 'vvvv'] + }) + return h1, rules + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Operators): + return self._variant == other._variant + return False diff --git a/build/lib/adcgen/properties.py b/build/lib/adcgen/properties.py new file mode 100644 index 0000000..41ed670 --- /dev/null +++ b/build/lib/adcgen/properties.py @@ -0,0 +1,423 @@ +from collections.abc import Sequence +from math import factorial + +from sympy import Add, Expr, S, sqrt, sympify + +from .expression import ExprContainer +from .func import gen_term_orders, wicks +from .indices import n_ov_from_space, generic_indices_from_space +from .intermediate_states import IntermediateStates +from .misc import Inputerror, cached_member, transform_to_tuple, validate_input +from .rules import Rules +from .secular_matrix import SecularMatrix +from .simplify import simplify + + +class Properties: + """ + Constructs ISR property expressions. + + Parameters + ---------- + l_isr : IntermediateStates + The intermediate states used for the construction of properties. + r_isr : IntermediateStates, optional + Optionally, another IntermediateStates can be passed to the class + allowing for the construction of transition properties between + intermediate states of different ADC variants like PP- or IP-ADC + (default: l_isr). + """ + + def __init__(self, l_isr: IntermediateStates, + r_isr: IntermediateStates | None = None): + assert isinstance(l_isr, IntermediateStates) + assert ( + r_isr is None or isinstance(r_isr, IntermediateStates) + ) + self.l_isr: IntermediateStates = l_isr + self.r_isr: IntermediateStates = self.l_isr if r_isr is None else r_isr + self.l_m: SecularMatrix = SecularMatrix(l_isr) + self.r_m: SecularMatrix = ( + self.l_m if r_isr is None else SecularMatrix(r_isr) + ) + # Check if both ground states are compatible. Currently this only means + # to check that either None ot both have singles enabled. + self.gs = l_isr.gs + if self.gs.singles != self.r_isr.gs.singles: + raise Inputerror("Both ISR need to share the same GS, " + "i.e. neither or both have singles enabled.") + # also check that both isr use the same hamiltonian + if self.l_isr.gs.h != self.r_isr.gs.h: + raise Inputerror("The Operator of left and right isr has to be " + "equal") + self.h = l_isr.gs.h + + def operator(self, order: int, n_create: int, n_annihilate: int, + subtract_gs=True) -> tuple[Expr, Rules | None]: + """ + Constructs an arbitrary n'th-order operator. + + Parameters + ---------- + order : int + The perturbation theoretical order. + n_create : int + The number of creation operators. Placed left of the annihilation + operators. + n_annihilate : int + The number of annihilation operators. Placed right of the + creation operators. + subtract_gs : bool, optional + If set, the n'th-order ground state expectation value of the + corresponding operator is subtracted if the operator string + contains an equal amount of creation and annihilation operators + (otherwise the ground state contribution vanishes). + (Defaults to True) + """ + validate_input(order=order) + + if order == 0: + d, rules = self.h.operator( + n_create=n_create, n_annihilate=n_annihilate + ) + else: + d, rules = S.Zero, None + + if subtract_gs and n_create == n_annihilate: + e0 = self.gs.expectation_value(order=order, n_particles=n_create) + return Add(d, -e0), rules + else: + return d, rules + + @cached_member + def expec_block_contribution(self, order: int, block: Sequence[str], + n_particles: int = 1, + subtract_gs: bool = True) -> Expr: + """ + Constructs the n'th order contribution of an individual block IJ to the + expectation value of the operator + d_{pq...} X_I ^(n) Y_J. + + Parameters + ---------- + order : int + The perturbation theoretical order. + block : Sequence[str] + The block of the ADC matrix for which the expectation value + is generated, e.g., 'ph,pphh' for the 1p-1h/2p-2h block. + n_particles : int + The number of creation and annihilation operators in the operator + string. (Defaults to 1.) + subtract_gs : bool, optional + If set, the ground state expectation value of the corresponding + operator is subtracted from the result. (Defaults to True) + """ + + block = transform_to_tuple(block) + validate_input(order=order, block=block) + + # generate indices for the block and compute the prefactors for the + # contraction over the block space + left_idx: str = "".join( + s.name for s in generic_indices_from_space(block[0]) + ) + n_ov = n_ov_from_space(block[0]) + left_pref = S.One / sqrt( + factorial(n_ov["occ"]) * factorial(n_ov["virt"]) + ) + + right_idx: str = "".join( + s.name for s in generic_indices_from_space(block[1]) + ) + n_ov = n_ov_from_space(block[1]) + right_pref = S.One / sqrt( + factorial(n_ov["occ"]) * factorial(n_ov["virt"]) + ) + + # build the ADC amplitude vectors + left_ampl = self.l_isr.amplitude_vector(indices=left_idx, lr='left') + right_ampl = self.r_isr.amplitude_vector(indices=right_idx, lr='right') + + orders = gen_term_orders(order=order, term_length=2, min_order=0) + res = S.Zero + # iterate over all norm*d combinations of n'th order + for norm_term in orders: + norm = self.gs.norm_factor(norm_term[0]) + if norm is S.Zero: + continue + # compute d for a given norm (the overall order is split inbetween + # both factors) + orders_d = gen_term_orders( + order=norm_term[1], term_length=3, min_order=0 + ) + expec = S.Zero + for term in orders_d: + op, rules = self.operator(order=term[1], + n_create=n_particles, + n_annihilate=n_particles, + subtract_gs=subtract_gs) + if op is S.Zero: + continue + i1 = (left_pref * right_pref * left_ampl * + self.l_isr.intermediate_state(order=term[0], + space=block[0], + braket='bra', + indices=left_idx) * + op * + self.r_isr.intermediate_state(order=term[2], + space=block[1], + braket='ket', + indices=right_idx) * + right_ampl) + expec += wicks(i1, simplify_kronecker_deltas=True, rules=rules) + res += (norm * expec).expand() + return simplify(ExprContainer(res)).inner + + @cached_member + def expectation_value(self, adc_order: int, n_particles: int = 1, + order: int | None = None, + subtract_gs: bool = True) -> Expr: + """ + Constructs the expectation value taking all blocks into account + that are available at the specified order of perturbation theory + in the ADC secular matrix + sum_IJ sum_pq... d_{pq...} X_I Y_J. + Note that also lower order contributions are considered, i.e., the + ADC(0) and ADC(1) expectation values are included in the ADC(2) + expectation value. + + Parameters + ---------- + adc_order : int + The perturbation theoretical order of the ADC scheme for which the + expectation value is generated. + n_particles : int + The number of creation and annihilation operators in the operator + string. (Defaults to 1) + order : int, optional + Only consider contributions of the specified order, e.g., + only the zeroth order contributions of all available blocks in + the ADC(n) matrix. + subtract_gs : bool, optional + If set, the ground state expectation value is subtracted from the + result. (Defaults to True) + """ + validate_input(adc_order=adc_order) + if order is not None: + validate_input(order=order) + # get all blocks that are present in the ADC(n) secular matrix and + # the order through which they are expanded. + left_blocks = self.l_m.block_order(adc_order) + left_blocks = sorted( + left_blocks.items(), + key=lambda tpl: (len(tpl[0][0]), len(tpl[0][1])) + ) + right_blocks = self.r_m.block_order(adc_order) + right_blocks = sorted( + right_blocks, key=lambda bl: (len(bl[0]), len(bl[1])) + ) + # iterate over the blocks, replacing the second space in each block + # with the corresponding space of block_2 from isr_2 + # This only works for python3.7 or newer, because it assumes that + # the two block dicts are in the same order -> which is only + # garuanteed from python3.7 + res = sympify(0) + for i, (l_block, max_order) in enumerate(left_blocks): + r_block = right_blocks[i] + # block is not expanded through the given order + if order is not None and max_order < order: + continue + # combine the two spaces to build the correct block with mixed + # ADC variant spaces. + block = (l_block[0], r_block[1]) + + if order is None: + orders_to_gen = list(range(max_order + 1)) + else: + orders_to_gen = [order] + + for o in orders_to_gen: + res += self.expec_block_contribution( + order=o, block=block, n_particles=n_particles, + subtract_gs=subtract_gs + ) + assert isinstance(res, Expr) + return res + + @cached_member + def trans_moment_space(self, order: int, space: str, + n_create: int | None = None, + n_annihilate: int | None = None, + lr_isr: str = 'left', + subtract_gs: bool = True) -> Expr: + """ + Constructs the n'th-order contribution to the transition moment + for the desired excitation space and operator + d_pq... X_I ^(n). + + Parameters + ---------- + order : int + The perturbation theoretical order. + space : str + The excitation space, e.g., 'ph' or 'pphh' for singly or doubly + excited configurations, respectively. + n_create : int, optional + The number of creation operators in the operator string. + By default, the operator string with the lowest amount of + creation and annihilation operators is constructed for which in + general a non-zero result can be expected, e.g., 'ca' and 'a' + for PP- and IP-ADC, respectively. + n_annihilate : int, optional + The number of annihilation operators in the operator string. + By default, the operator string with the lowest amount of + creation and annihilation operators is constructed for which in + general a non-zero result can be expected, e.g., 'ca' and 'a' + for PP- and IP-ADC, respectively. + l_isr : str, optional + Controls whether the left or right 'IntermediateStates' instance + is used to construct the transition moment contribution. + (Defaults to 'left') + subtract_gs : bool, optional + If set, ground state contributions are subtracted if the + operator contains an equal amount of creation and annihilation + operators. (Defaults to True) + """ + # Subtraction of the ground state contribution is probably not + # necessary, because all terms cancel (at least in second order + # Singles PP-ADC). For all other ADC variants (IP/EA...) the ground + # state expectation value is Zero, because the number of creation and + # annihilation operators will never be equal. + # Give the option anyway, because I'm not sure whether it will be + # required at higher orders for PP-ADC + + validate_input(order=order, space=space, lr_isr=lr_isr) + + # - generate indices for the ISR state + n_ov = n_ov_from_space(space) + idx = "".join(s.name for s in generic_indices_from_space(space)) + + # - map lr on the correct intermediate_states instance + if lr_isr == "left": + isr = self.l_isr + else: + assert lr_isr == "right" + isr = self.r_isr + + # - if no operator string is given -> generate a default, i.e. + # 'a' for IP- / 'ca' for PP-ADC + if n_create is None and n_annihilate is None: + n_create = isr.min_space[0].count('p') + n_annihilate = isr.min_space[0].count('h') + elif n_create is None: + n_create = 0 + elif n_annihilate is None: + n_annihilate = 0 + assert isinstance(n_create, int) and isinstance(n_annihilate, int) + + # - generate amplitude vector and prefactor for the summation + ampl = isr.amplitude_vector(indices=idx, lr='left') + pref = S.One / sqrt(factorial(n_ov["occ"]) * factorial(n_ov["virt"])) + + # - import the gs wavefunction (possible here) + mp = {o: self.gs.psi(order=o, braket='ket') for o in range(order + 1)} + + # iterate over all norm*d combinations + orders = gen_term_orders(order=order, term_length=2, min_order=0) + res = S.Zero + for norm_term in orders: + norm = self.gs.norm_factor(norm_term[0]) + if norm is S.Zero: + continue + # compute d for a given norm factor + orders_d = gen_term_orders( + order=norm_term[1], term_length=3, min_order=0 + ) + trans_mom = S.Zero + for term in orders_d: + op, rules = self.operator( + order=term[1], n_create=n_create, + n_annihilate=n_annihilate, subtract_gs=subtract_gs + ) + if op is S.Zero: + continue + i1 = (pref * ampl * + isr.intermediate_state(order=term[0], space=space, + braket='bra', indices=idx) * + op * + mp[term[2]]) + trans_mom += wicks(i1, simplify_kronecker_deltas=True, + rules=rules) + res += (norm * trans_mom).expand() + return simplify(ExprContainer(res)).inner + + @cached_member + def trans_moment(self, adc_order: int, n_create: int | None = None, + n_annihilate: int | None = None, order: int | None = None, + lr_isr: str = 'left', subtract_gs: bool = True) -> Expr: + """ + Constructs the ADC(n) transition moment + sum_I sum_pq... d_pq... X_I + considering all available configurations. + Note that also lower order contributions are considered, i.e., + the ADC(0) and ADC(1) contributions are included in the ADC(2) + transition moments. + + Parameters + ---------- + adc_order : int + The perturbation theoretical order of the ADC scheme. + n_create : int, optional + The number of creation operators in the operator string. + By default, the operator string with the lowest amount of + creation and annihilation operators is constructed for which in + general a non-zero result can be expected, e.g., 'ca' and 'a' + for PP- and IP-ADC, respectively. + n_annihilate : int, optional + The number of annihilation operators in the operator string. + By default, the operator string with the lowest amount of + creation and annihilation operators is constructed for which in + general a non-zero result can be expected, e.g., 'ca' and 'a' + for PP- and IP-ADC, respectively. + order : int, optional + Only consider contributions of the specified order, e.g., + only the zeroth order contributions of all available configurations + in the ADC(n) matrix. + lr_isr : str, optional + Constrols whether the left or right 'IntermediateStates' instance + is used to construct the transition moment. + (Defaults to 'left') + subtract_gs : bool, optional + If set, the ground state contributions are subtracted if the + operator contains an equal amount of creation and annihilation + operators. (Defaults to True) + """ + validate_input(lr_isr=lr_isr) + + # obtain the maximum order through which all the spaces are expanded + # in the secular matrix + if lr_isr == "left": + m = self.l_m + else: + assert lr_isr == "right" + m = self.r_m + max_orders = m.max_ptorder_spaces(adc_order) + + res = S.Zero + for space, max_order in max_orders.items(): + if order is None: + orders_to_gen = list(range(max_order + 1)) + else: + # the space is not expanded through the desired order + if max_order < order: + continue + orders_to_gen = [order] + + for o in orders_to_gen: + res += self.trans_moment_space( + order=o, space=space, n_create=n_create, + n_annihilate=n_annihilate, lr_isr=lr_isr, + subtract_gs=subtract_gs + ) + assert isinstance(res, Expr) + return res diff --git a/build/lib/adcgen/reduce_expr.py b/build/lib/adcgen/reduce_expr.py new file mode 100644 index 0000000..6d301cc --- /dev/null +++ b/build/lib/adcgen/reduce_expr.py @@ -0,0 +1,330 @@ +from collections.abc import Sequence +from collections import defaultdict +import itertools +import time + +from sympy import S + +from .eri_orbenergy import EriOrbenergy +from .expression import ExprContainer, TermContainer +from .indices import Index +from .logger import logger +from .symmetry import Permutation + + +def reduce_expr(expr: ExprContainer) -> ExprContainer: + """ + Fully expands all available intermediates in an expression such that the + expression only exists of orbital energies and electron repulsion + integrals. The expanded expression is then simplified to collect as much + terms as possible. + The implementation assumes a real orbital basis. + """ + assert isinstance(expr, ExprContainer) + if not expr.real: + raise NotImplementedError("Intermediates only implemented for a real " + "orbital basis.") + expr = expr.expand() + + # check if we have anything to do + if expr.inner.is_number: + return expr + + logger.info("".join( + ['\n', '#'*80, '\n', ' '*25, "REDUCING EXPRESSION\n", '#'*80, '\n'] + )) + + # 1) Insert the definitions of all defined intermediates in the expr + # and reduce the number of terms by factoring the ERI in each term. + start = time.perf_counter() + logger.info("Expanding intermediates... ") + expanded_expr: list[ExprContainer] = [] + for term_i, term in enumerate(expr.terms): + logger.info( + "#"*80 + f"\nExpanding term {term_i+1} of {len(expr)}: {term}... ") + term = term.expand_intermediates() + assert isinstance(term, ExprContainer) + term = term.expand() + logger.info(f"into {len(term)} terms.\nCollecting terms.... ") + term = factor_eri_parts(term) + logger.info('-'*80) + for j, equal_eri in enumerate(term): + # minimize the contracted indices + # each term in eri should hold exactly the same indices + # -> build substitutions once and apply to the whole expr + sub = equal_eri.terms[0].substitute_contracted( + apply_substitutions=False + ) + assert isinstance(sub, list) + sub_equal_eri = equal_eri.subs(sub) + # ensure that we are not creating a complete mess + if sub_equal_eri.inner is S.Zero and equal_eri.inner is not S.Zero: + raise ValueError(f"Invalid substitutions {sub} for " + f"{equal_eri}") + term[j] = sub_equal_eri + logger.info(f"\n{j+1}: {EriOrbenergy(sub_equal_eri.terms[0]).eri}") + logger.info("-"*80 + f"\nFound {len(term)} different ERI Structures") + expanded_expr.extend(term) + del expr + # 2) Now try to factor the whole expression + # Only necessary to consider the first term of each of the expressions + # in the list (they all have same ERI) + # -> build new term list and try to factor ERI + Denominator + # -> simplify the orbital energy fraction in the resulting terms + logger.info("\nExpanding and ERI factoring took " + f"{time.perf_counter() - start:.2f}s\n") + logger.info("".join(['#'*80, "\n", '#'*80])) + start = time.perf_counter() + logger.info("\nSumming up all terms...\n" + "#"*80) + unique_terms = [unique_expr.terms[0] for unique_expr in expanded_expr] + logger.info("Factoring ERI...") + unique_compatible_eri = find_compatible_eri_parts(unique_terms) + n = 1 + n_eri_denom = 0 + factored = 0 + # - factor eri again + for i, compatible_eri_subs in unique_compatible_eri.items(): + temp = expanded_expr[i] + eri = EriOrbenergy(expanded_expr[i].terms[0]).eri + logger.info("\n" + "#"*80) + logger.info(f"ERI {n} of {len(unique_compatible_eri)}: {eri}") + n += 1 + for other_i, sub in compatible_eri_subs.items(): + temp += expanded_expr[other_i].subs(sub) + + # collected all terms with equal ERI -> factor denominators + eri_sym = eri.symmetry(only_contracted=True) + logger.info("\nFactoring Denominators...") + for j, term in enumerate(factor_denom(temp, eri_sym=eri_sym)): + term = term.factor() + if len(term) != 1: + raise RuntimeError("Expected the sub expression to have " + "identical Denoms and ERI, which should " + "allow factorization to a single term:\n" + f"{term}") + # symmetrize the numerator and cancel the orbital energy fraction + term = EriOrbenergy(term) + logger.info("-"*80 + f"\nERI/Denom {j}: {term}\n") + logger.info("Permuting numerator... ") + term = term.permute_num(eri_sym=eri_sym) + logger.info(f"Term now reads:\n{term}\n") + logger.info("Cancel orbital energy fraction...") + term = term.cancel_orb_energy_frac() + logger.info("Done.") + + if not all(EriOrbenergy(t).num.inner.is_number + for t in term.terms): + logger.warning("\nNUMERATOR NOT CANCELLED COMPLETELY:") + for t in term.terms: + logger.warning(EriOrbenergy(t)) + + factored += term + n_eri_denom += 1 + del expanded_expr # not up to date anymore + assert isinstance(factored, ExprContainer) + logger.info("#"*80 + + "\n\nFactorizing and cancelling the orbital energy fractions " + f"in {n_eri_denom} terms took " + f"{time.perf_counter() - start:.2f}s.\n" + f"Expression consists now of {len(factored)} terms.") + + # 3) Since we modified some denominators by canceling the orbital energy + # fractions, try to factor eri and denominator again + logger.info("#"*80 + "\n\nFactoring again...") + result = 0 + for term in itertools.chain.from_iterable( + factor_denom(sub_expr) for sub_expr in factor_eri_parts(factored) + ): + # factor the resulting term again, because we can have something like + # 2/(4*a + 4*b) * X - 1/(2 * (a + b)) * X + result += term.factor() + assert isinstance(result, ExprContainer) + logger.info(f"Done. {len(result)} terms remaining.\n\n" + "#"*80) + return result + + +def factor_eri_parts(expr: ExprContainer) -> list[ExprContainer]: + """ + Finds compatible remainder (eri) parts of an expression and collects + the terms in subexpressions. + + Returns + list[ExprContainer] + List of subexpressions, where each subexpression contains terms with + equal eri parts. + """ + + if len(expr) == 1: # trivial case + return [expr] + + terms = expr.terms + ret: list[ExprContainer] = [] + for i, compatible_eri_subs in find_compatible_eri_parts(terms).items(): + temp = ExprContainer(terms[i].inner, **expr.assumptions) + for other_i, sub in compatible_eri_subs.items(): + temp += terms[other_i].subs(sub) + ret.append(temp) + return ret + + +def find_compatible_eri_parts( + term_list: Sequence[TermContainer] + ) -> dict[int, dict[int, list[tuple[Index, Index]]]]: + """ + Determines the necessary index substitutions to make the remainder (eri) + parts of terms equal to each other - so they can be factored easily. + Does not modify the terms, but returns a dict that connects the index of + the terms with a substitution list. + """ + from .simplify import find_compatible_terms + + if len(term_list) == 1: # trivial: only a single eri + return {0: {}} + + # dont use EriOrbenergy class, but rather only do whats necessary to + # extract the eri part of the terms + eri_parts: list[TermContainer] = [] + for term in term_list: + assumptions = term.assumptions + assumptions["target_idx"] = term.target + eris = ExprContainer(1, **assumptions) + for o in term.objects: + if not o.inner.is_number and not o.contains_only_orb_energies: + eris *= o + assert len(eris) == 1 + eri_parts.append(eris.terms[0]) + return find_compatible_terms(eri_parts) + + +def factor_denom(expr: ExprContainer, + eri_sym: dict[tuple[Permutation, ...], int] | None = None + ) -> list[ExprContainer]: + """ + Finds compatible orbital energy denominators in an expression with the + restriction that the necessary index permutations do not modify the + remainder (eri) part of the terms. + + Parameters + ---------- + expr : ExprContainer + Expression to find compatible denominators in. + eri_sym : dict, optional + The symmetry of the eri part of the terms. Warning: if provided, all + terms in the expression are assumed to have the same eri symmetry! + + Returns + list[ExprContainer] + List of subexpressions, where each subexpression contains terms with + equal orbital energy denominators. + """ + + if len(expr) == 1: # trivial case: single term + return [expr] + + terms: tuple[TermContainer, ...] = expr.terms + compatible_denoms = find_compatible_denom(terms, eri_sym=eri_sym) + ret: list[ExprContainer] = [] + for i, compatible_denom_perms in compatible_denoms.items(): + temp = ExprContainer(terms[i].inner, **expr.assumptions) + for other_i, perms in compatible_denom_perms.items(): + temp += terms[other_i].permute(*perms) + ret.append(temp) + return ret + + +def find_compatible_denom( + terms: Sequence[TermContainer], + eri_sym: dict[tuple[Permutation, ...], int] | None = None + ) -> dict[int, dict[int, tuple[Permutation, ...]]]: + """ + Determines the necessary index substitutions to make the orbital energy + denominators of the terms equal to each other - so they can be factored + easily. Only permutations that do not change the remainder (eri) part of + the terms are considered. + Does not modify the terms but returns a dict that connects the index of + the terms with a substitution list. + + Parameters + ---------- + terms : Sequence[TermContainer] + List of terms to find compatible orbital energy denominators. + eri_sym : dict, optional + The symmetry of the eri part of the terms. Warning: if provided, all + terms are assumed to have the same eri symmetry! + """ + if len(terms) == 1: # trivial case: single term + return {0: {}} + + terms_imported: list[EriOrbenergy] = [ + EriOrbenergy(term).canonicalize_sign(only_denom=True) + for term in terms + ] + + # split the terms according to length and and number of denominator + # brackets + filtered_terms = defaultdict(list) + for term_i, term in enumerate(terms_imported): + filtered_terms[term.denom_description()].append(term_i) + + ret: dict[int, dict[int, tuple[Permutation, ...]]] = {} + matched: set[int] = set() + permutations: dict[int, tuple[tuple[Permutation, ...], ...]] = {} + for term_idx_list in filtered_terms.values(): + # check which denominators are already equal + identical_denom: dict[int, list[int]] = {} + for i, term_i in enumerate(term_idx_list): + if term_i in matched: + continue + term: EriOrbenergy = terms_imported[term_i] + identical_denom[term_i] = [] + for other_i in range(i+1, len(term_idx_list)): + other_term_i = term_idx_list[other_i] + if other_term_i in matched: + continue + other_term = terms_imported[other_term_i] + if term.denom.inner == other_term.denom.inner: + identical_denom[term_i].append(other_term_i) + matched.add(other_term_i) + + if len(identical_denom) == 1: # all denoms are equal + term_i, matches = identical_denom.popitem() + ret[term_i] = {other_term_i: tuple() for other_term_i in matches} + continue + + # try to match more denominators by applying index permutations that + # satisfy: P_pq ERI = +- ERI AND P_pq Denom != +- Denom + identical_denom_list = list(identical_denom.items()) + del identical_denom + for i, (term_i, matches) in enumerate(identical_denom_list): + if term_i in matched: + continue + ret[term_i] = {} + for other_term_i in matches: # add all identical denominators + ret[term_i][other_term_i] = tuple() + + denom = terms_imported[term_i].denom.inner + for other_i in range(i+1, len(identical_denom_list)): + other_term_i, other_matches = identical_denom_list[other_i] + if other_term_i in matched: + continue + + other_term: EriOrbenergy = terms_imported[other_term_i] + other_denom: ExprContainer = other_term.denom + + # find all valid permutations + if other_term_i not in permutations: + permutations[other_term_i] = tuple( + perms for perms, factor in + other_term.denom_eri_sym(eri_sym=eri_sym, + only_contracted=True).items() + if factor is None + ) + for perms in permutations[other_term_i]: + # found a permutation! + if denom == other_denom.copy().permute(*perms).inner: + ret[term_i][other_term_i] = perms + for match in other_matches: + ret[term_i][match] = perms + matched.add(other_term_i) + break + return ret diff --git a/build/lib/adcgen/resolution_of_identity.py b/build/lib/adcgen/resolution_of_identity.py new file mode 100644 index 0000000..08061e9 --- /dev/null +++ b/build/lib/adcgen/resolution_of_identity.py @@ -0,0 +1,71 @@ +from .expression import ExprContainer +from .sympy_objects import SymmetricTensor +from .tensor_names import tensor_names +from .indices import Indices + + +def apply_resolution_of_identity(expr: ExprContainer, + symmetric: bool = True) -> ExprContainer: + """ + Applies the Resolution of Identity approximation (RI, sometimes also + called density fitting, DF) to an expression. This implies that every + spatial ERI is replaced by its factorised form. Two types of factorisation + are supported: symmetric and asymmetric. In the symmetric decomposition, + a spatial ERI is approximated as: + + (pq | rs) ~ B^P_{pq} B^P_{rs} + B^P_{pq} = (P | Q)^{-1/2} (Q | pq) + + This decomposition is the default. In the asymmetric factorisation, the + same spatial ERI is approximated as: + + (pq | rs) ~ C^P_{pq} (P | rs) + C^P_{pq} = (P | Q)^{-1} (Q | pq) + + Note that the RI approximation is only meaningful on spatial ERIs. + Therefore, this routine will crash and exit if the given expression has + not been spin-integrated before. All RI indices receive an alpha spin + by default + + Args: + expr : ExprContainer + The expression to be spin-integrated. + symmetric : bool, optional + If true, the symmetric factorisation variant is employed. + If false, the asymmetric factorisation variant is employed instead. + """ + + resolved_expr = 0 + + # We iterate over all terms in the expression and apply RI individually + for term in expr.terms: + # Check if the term is spin-integrated + assert ("n" not in "".join([o.spin for o in term.objects])) + # Check that no antisymmetric ERIs remain + assert (tensor_names.eri not in + ",".join([o.name for o in term.objects])) + idx_cls = Indices() + + for object in term.objects: + # Replace spatial ERIs + if object.name == tensor_names.coulomb: + # Extract indices + lower = object.idx[0:2] + upper = object.idx[2:4] + ri_idx = idx_cls.get_generic_indices(ri_a=1)[("ri", "a")] + + if symmetric: + # v_pqrs = B^P_pq B^P_rs + ri_expr = (SymmetricTensor(tensor_names.ri_sym, + (ri_idx,), lower) + * SymmetricTensor(tensor_names.ri_sym, + (ri_idx,), upper)) + else: + ri_expr = (SymmetricTensor(tensor_names.ri_asym_eri, + (ri_idx,), upper) + * SymmetricTensor(tensor_names.ri_asym_factor, + (ri_idx,), lower)) + term.subs(object, ri_expr) + + resolved_expr += term + return resolved_expr diff --git a/build/lib/adcgen/rules.py b/build/lib/adcgen/rules.py new file mode 100644 index 0000000..e2f22c5 --- /dev/null +++ b/build/lib/adcgen/rules.py @@ -0,0 +1,65 @@ +from collections.abc import Sequence +from typing import Any + +from .expression import ExprContainer + + +class Rules: + """ + Rules to apply to expressions. + + Parameters + ---------- + forbidden_tensor_blocks : dict[str, Sequence[str]], optional + Tensor blocks to remove from an expression, i.e., only allow + a certain subset of blocks in the expression. A dictionary of the form + {tensor_name: [block1, block2, ...]} + is expected. + """ + + def __init__( + self, + forbidden_tensor_blocks: dict[str, Sequence[str]] | None = None): + if forbidden_tensor_blocks is None: + forbidden_tensor_blocks = {} + self._forbidden_blocks: dict[str, Sequence[str]] = ( + forbidden_tensor_blocks + ) + + def apply(self, expr: ExprContainer) -> ExprContainer: + """Applies the rules to the provided expression.""" + assert isinstance(expr, ExprContainer) + if self.is_empty: # nothing to do + return expr + + res = ExprContainer(0, **expr.assumptions) + for term in expr.terms: + # remove the forbidden blocks of tensors + if any(obj.name in self._forbidden_blocks + and obj.space in self._forbidden_blocks[obj.name] + for obj in term.objects): + continue + res += term + return res + + @property + def is_empty(self) -> bool: + return not bool(self._forbidden_blocks) + + def __eq__(self, other: "Rules | Any") -> bool: + if not isinstance(other, Rules): + return False + + empty, other_empty = self.is_empty, other.is_empty + if empty and other_empty: # both are empty + return True + elif empty or other_empty: # only self or other is empty + return False + + # both not empty -> compare forbidden blocks (keys and values) + if self._forbidden_blocks.keys() != other._forbidden_blocks.keys(): + return False + if any(sorted(v) != sorted(other._forbidden_blocks[k]) + for k, v in self._forbidden_blocks.items()): + return False + return True diff --git a/build/lib/adcgen/secular_matrix.py b/build/lib/adcgen/secular_matrix.py new file mode 100644 index 0000000..d374db1 --- /dev/null +++ b/build/lib/adcgen/secular_matrix.py @@ -0,0 +1,436 @@ +from collections.abc import Sequence +from math import factorial + +from sympy import Add, Expr, Mul, S, sqrt + +from .expression import ExprContainer +from .func import gen_term_orders, wicks, evaluate_deltas +from .groundstate import GroundState +from .indices import ( + repeated_indices, Indices, generic_indices_from_space, n_ov_from_space +) +from .intermediate_states import IntermediateStates +from .misc import Inputerror, cached_member, transform_to_tuple, validate_input +from .operators import Operators +from .rules import Rules +from .simplify import simplify + + +class SecularMatrix: + """ + Constructs expressions for the ADC secular matrix M. + + Parameters + ---------- + isr : IntermediateStates + The intermediate states the secular matrix is represented in. + """ + def __init__(self, isr: IntermediateStates): + assert isinstance(isr, IntermediateStates) + self.isr: IntermediateStates = isr + self.gs: GroundState = isr.gs + self.h: Operators = isr.gs.h + self.indices: Indices = Indices() + + def hamiltonian(self, order: int, subtract_gs: bool + ) -> tuple[Expr, Rules | None]: + """Constructs the n'th-order shifted Hamiltonian operator.""" + if order == 0: + h, rules = self.h.h0 + elif order == 1: + h, rules = self.h.h1 + else: + assert order > 0 + h, rules = S.Zero, None + if subtract_gs: + return Add(h, -self.gs.energy(order)), rules + else: + return h, rules + + @cached_member + def precursor_matrix_block(self, order: int, block: Sequence[str], + indices: Sequence[str], + subtract_gs: bool = True) -> Expr: + """ + Constructs the n'th order contribution to a secular matrix block in + the basis of the precursor states. + + Parameters + ---------- + order : int + The perturbation theoretical order. + block : Sequence[str] + The block of the secular matrix, e.g. "ph,pphh" for the + 1p-1h/2p-2h coupling block. + indices : Sequence[str] + The indices of the matrix block. + subtract_gs : bool, optional + Whether ground state contrubitions should be subtracted + (default: True). + """ + + block = transform_to_tuple(block) + indices = transform_to_tuple(indices) + validate_input(order=order, block=block, indices=indices) + if len(indices) != 2: + raise Inputerror("Precursor matrix requires two index strings.") + + if repeated_indices(indices[0], indices[1]): + raise Inputerror("Found repeating index in bra and ket.") + bra_space, ket_space = block + bra_idx, ket_idx = indices + + orders = gen_term_orders(order=order, term_length=2, min_order=0) + res = S.Zero + # 1) iterate through all combinations of norm_factor*M^# + for (norm_order, matrix_order) in orders: + norm = self.gs.norm_factor(norm_order) + if norm is S.Zero: + continue + # 2) construct M^# for a given norm_factor + # the overall order is split between the norm factor and M^# + orders_M = gen_term_orders( + order=matrix_order, term_length=3, min_order=0 + ) + matrix = S.Zero + for (bra_order, op_order, ket_order) in orders_M: + operator, rules = self.hamiltonian(op_order, subtract_gs) + if operator == 0: + continue + itmd = Mul( + self.isr.precursor(order=bra_order, space=bra_space, + braket='bra', indices=bra_idx), + operator, + self.isr.precursor(order=ket_order, space=ket_space, + braket='ket', indices=ket_idx) + ) + itmd = wicks(itmd, simplify_kronecker_deltas=True, rules=rules) + matrix += itmd + # evaluate_deltas should not be necessary here, because norm only + # contains contracted indices + res += (norm * matrix).expand() + return simplify(ExprContainer(res)).inner + + @cached_member + def isr_matrix_block(self, order: int, block: Sequence[str], + indices: Sequence[str], + subtract_gs: bool = True) -> Expr: + """ + Constructs the n'th order contribution to a secular matrix block in + the basis of the intermediate states. + + Parameters + ---------- + order : int + The perturbation theoretical order. + block : Sequence[str] + The block of the secular matrix, e.g. "ph,pphh" for the + 1p-1h/2p-2h coupling block. + indices : Sequence[str] + The indices of the matrix block. + subtract_gs : bool, optional + Whether ground state contrubitions should be subtracted + (default: True). + """ + + block = transform_to_tuple(block) + indices = transform_to_tuple(indices) + validate_input(order=order, block=block, indices=indices) + if len(indices) != 2: + raise Inputerror("ISR matrix requires 2 index strings.") + + if repeated_indices(indices[0], indices[1]): + raise Inputerror("Found a repeating index in bra and ket.") + bra_space, ket_space = block + bra_idx, ket_idx = indices + + orders = gen_term_orders(order=order, term_length=2, min_order=0) + res = S.Zero + # 1) iterate through all combinations of norm_factor*M + for (norm_order, matrix_order) in orders: + norm = self.gs.norm_factor(norm_order) + if norm is S.Zero: + continue + # 2) construct M for a given norm_factor + # the overall order is split between the norm_factor and M + orders_M = gen_term_orders( + order=matrix_order, term_length=3, min_order=0 + ) + matrix = S.Zero + for (bra_order, op_order, ket_order) in orders_M: + operator, rules = self.hamiltonian(op_order, subtract_gs) + if operator == 0: + continue + itmd = Mul( + self.isr.intermediate_state(order=bra_order, + space=bra_space, + braket='bra', + indices=bra_idx), + operator, + self.isr.intermediate_state(order=ket_order, + space=ket_space, + braket='ket', + indices=ket_idx) + ) + itmd = wicks(itmd, simplify_kronecker_deltas=True, rules=rules) + matrix += itmd + # evaluate deltas should not be necessary here + res += (norm * matrix).expand() + return simplify(ExprContainer(res)).inner + + @cached_member + def mvp_block_order(self, order: int, space: str, block: Sequence[str], + indices: str, subtract_gs: bool = True) -> Expr: + """ + Constructs the n'th-order contribution of a secular matrix block to + the matrix vector product + r_{I} = M_{I,J} Y_(J). + + Parameters + ---------- + order : int + The perturbation theoretical order. + space : str + The excitation space of the result vector of the matrix vector + product, e.g., "ph" if the contribution to the 1p-1h MVP + is constructed. + block : Sequence[str] + The block of the secular matrix, e.g. "ph,pphh" for the + 1p-1h/2p-2h coupling block. + indices : str + The indices of the result vector r of the matrix vector product. + subtract_gs : bool, optional + Whether ground state contrubitions should be subtracted + (default: True). + """ + space_tpl = transform_to_tuple(space) + block = transform_to_tuple(block) + indices_tpl = transform_to_tuple(indices) + validate_input(order=order, space=space_tpl, block=block, + indices=indices_tpl) + if len(indices_tpl) != 1: + raise Inputerror(f"Invalid index input for MVP: {indices}") + space = space_tpl[0] + indices = indices_tpl[0] + del space_tpl, indices_tpl + if space != block[0]: + raise Inputerror(f"The desired MVP space {space} has to match " + f"the bra space of the secular matrix block: " + f"{block}.") + + # generate additional indices for the ket state of the secular matrix + idx: str = "".join( + s.name for s in generic_indices_from_space(block[1]) + ) + + # contruct the secular matrix block + m = self.isr_matrix_block( + order=order, block=block, indices=(indices, idx), + subtract_gs=subtract_gs + ) + + # generate the amplitude vector + y = self.isr.amplitude_vector(indices=idx, lr="right") + + # Lifting index restrictions leads to two prefactors + # p = 1/sqrt(n_o! * n_v!), in order to keep the amplitude vector and + # the resulting mvp vector normalized! + # Note, that n_o and n_v might differ for both amplitudes, leading to + # generally different prefactors p. + # In order to keep both vectors normalized they are each multiplied by + # a factor p, i.e., a factor p is 'hidden' in both vectors. + + # - To keep the equality r = M * Y we also have to multiply the right + # hand side of the equation with p if we multiply r with p + n_ov = n_ov_from_space(space) + prefactor_mvp = S.One / sqrt( + factorial(n_ov["occ"]) * factorial(n_ov["virt"]) + ) + + # - lifting the sum restrictions leads to a prefactor of p ** 2. + # However, p is hidden inside the amplitude vector -> only p present + # in the MVP equations + n_ov = n_ov_from_space(block[1]) + prefactor_ampl = S.One / sqrt( + factorial(n_ov["occ"]) * factorial(n_ov["virt"]) + ) + + return evaluate_deltas( + (prefactor_mvp * prefactor_ampl * m * y).expand() + ) + + @cached_member + def mvp(self, adc_order: int, space: str, indices: str, + order: int | None = None, subtract_gs: bool = True) -> Expr: + """ + Constructs the matrix vector product + r_{I} = sum_{J} M_{I,J} Y_{J} + for a given excitation space considering all available ADC(n) + secular matrix blocks. + + Parameters + ---------- + adc_order : int + The perturbation theoretical order the ADC(n) scheme. + space : str + The excitation space of the result vector of the matrix vector + product, e.g., "ph" for the 1p-1h MVP. + order : int, optional + Only consider contributions of the provided order, e.g., + only the zeroth order contributions of all + ADC(n) secular matrix blocks that contribute to the desired + MVP (default: None). + subtract_gs : bool, optional + If set, ground state contributions are subtracted (default: True). + """ + # validate the input parameters + space_tpl = transform_to_tuple(space) + indices_tpl = transform_to_tuple(indices) + validate_input( + adc_order=adc_order, space=space_tpl, indices=indices_tpl + ) + if order is not None: + validate_input(order=order) + if len(indices_tpl) != 1: + raise Inputerror(f"Invalid indices for MVP: {indices}") + space, indices = space_tpl[0], indices_tpl[0] + del space_tpl, indices_tpl + # check that the space is valid for the current adc variant + if not self.isr.validate_space(space): + raise Inputerror(f"The space {space} is not valid for the given " + f"adc variant {self.isr.variant}.") + # and that the space is present at the desired adc_order + if space not in self.max_ptorder_spaces(adc_order): + raise Inputerror(f"The space {space} is not present in " + f"{self.isr.variant}-ADC({adc_order})") + + # add up all blocks that contribute to the given mvp + mvp = S.Zero + for block, max_order in self.block_order(adc_order).items(): + if space != block[0] or (order is not None and max_order < order): + continue + if order is None: # compute all contributions of the block + for o in range(max_order + 1): + mvp += self.mvp_block_order( + order=o, space=space, block=block, indices=indices, + subtract_gs=subtract_gs + ) + else: # only compute contributions of the specified order + mvp += self.mvp_block_order( + order=order, space=space, block=block, indices=indices, + subtract_gs=subtract_gs + ) + assert isinstance(mvp, Expr) + return mvp + + @cached_member + def expectation_value_block_order(self, order: int, + block: Sequence[str], + subtract_gs: bool = True) -> Expr: + """ + Constructs the n'th-order contribution of a secular matrix block + to the energy expectation value. + + Parameters + ---------- + order : int + The perturbation theoretical order. + block : Sequence[str] + The block of the secular matrix. + subtract_gs : bool, optional + If set, ground state contributions are subtracted (default: True). + """ + block = transform_to_tuple(block) + validate_input(order=order, block=block) + + # generate indices for the mvp + mvp_idx: str = "".join( + s.name for s in generic_indices_from_space(block[0]) + ) + # compute the MVP + mvp = self.mvp_block_order( + order, space=block[0], block=block, indices=mvp_idx, + subtract_gs=subtract_gs + ) + # generate the left amplitude vector + left = self.isr.amplitude_vector(mvp_idx, lr='left') + # call simplify -> symmetry of left amplitude vector might reduce + # the number of terms + # prefactors: I think there is no need for any further prefactors + # E = 1/sqrt(l) * 1/sqrt(r) sum_I,J X_I M_I,J Y_J + # -> already included in the mvp function + return simplify(ExprContainer(Mul(left, mvp))).inner + + @cached_member + def expectation_value(self, adc_order: int, order: int | None = None, + subtract_gs: bool = True) -> Expr: + """ + Constructs the ADC(n) energy expectation value considering all + available secular matrix blocks. + + Parameters + ---------- + adc_order : int + The perturbation theoretical order of the ADC(n) scheme. + order : int, optional + Only consider contributions of the provided order, e.g., + only the zeroth order contributions of all + ADC(n) secular matrix (default: None). + subtract_gs : bool, optional + If set, ground state contributions are subtracted (default: True). + """ + expec = S.Zero + for block, max_order in self.block_order(adc_order).items(): + # is the mvp expanded through the desired order? + # e.g. ADC(4) S -> 4 // D -> 3 // T -> 2 + if order is not None and max_order < order: + continue + if order is None: # compute all contributions of the block + for o in range(max_order + 1): + expec += self.expectation_value_block_order( + order=o, block=block, subtract_gs=subtract_gs + ) + else: # only compute contibutions of the specified order + expec += self.expectation_value_block_order( + order=order, block=block, subtract_gs=subtract_gs + ) + # it should not be possible to simplify any further here, because left + # and right amplitude vector have different names + assert isinstance(expec, Expr) + return expec + + def max_ptorder_spaces(self, order: int) -> dict[str, int]: + """ + Returns the maximum perturbation theoretical order of all excitation + spaces in the ADC(n) matrix. + """ + + space = self.isr.min_space[0] + ret: dict[str, int] = {space: order} + for i in range(1, order//2 + 1): + space = f"p{space}h" + ret[space] = order - i + return ret + + def block_order(self, order: int) -> dict[tuple[str, str], int]: + """ + Returns the perturbation theoretical orders through which all blocks + are expanded in the ADC(n) secular matrix. + """ + from itertools import product + + max_orders = self.max_ptorder_spaces(order) + spaces = sorted(max_orders, key=lambda sp: len(sp)) + min_space = self.isr.min_space[0] + ret: dict[tuple[str, str], int] = {} + for block in product(spaces, spaces): + s1, s2 = block + # diagonal + if s1 == s2: + ret[block] = order - (len(s1) - len(min_space)) + # off diagonal + else: + dif = abs(len(s1) - len(s2)) // 2 + diag = order - (len(min(block)) - len(min_space)) + ret[block] = diag - dif + return ret diff --git a/build/lib/adcgen/simplify.py b/build/lib/adcgen/simplify.py new file mode 100644 index 0000000..9196858 --- /dev/null +++ b/build/lib/adcgen/simplify.py @@ -0,0 +1,765 @@ +from collections.abc import Sequence +from collections import Counter, defaultdict +import itertools + +from sympy import Add, Expr, Rational, Pow, S, sqrt + +from . import func +from .expression import ExprContainer, TermContainer, ObjectContainer +from .indices import ( + get_symbols, order_substitutions, Index, get_lowest_avail_indices, + minimize_tensor_indices, _is_index_tuple +) +from .misc import Inputerror +from .sympy_objects import ( + KroneckerDelta, Amplitude, AntiSymmetricTensor, NonSymmetricTensor +) +from .tensor_names import is_adc_amplitude, is_t_amplitude + + +def filter_tensor(expr: ExprContainer, t_strings: Sequence[str], + strict: str = 'low', + ignore_amplitudes: bool = True) -> ExprContainer: + """ + Filter an expression keeping only terms that contain the desired tensors. + + Parameters + ---------- + t_strings : Sequence[str] + List containing the desired tensor names. + struct : str, optional + 3 possible options: + - 'high': return all terms that ONLY contain the desired tensors the + requested amount of times, e.g., ['V', 'V'] returns only + terms that contain not other tensors than 'V*V' + Setting ignore_amplitudes, ignores all not requested + t and ADC ampltiudes amplitudes. + - 'medium': return all terms that contain the desired tensors the + requested amount, but other tensors may additionally be + present in the term. E.g. ['V', 'V'] also returns terms + that contain 'V*V*x', where x may be any amount of + arbitrary other tensors. + - 'low': return all terms that contain all of the requested tensors, + e.g., ['V', 'V'] returns all terms that contain 'V' at least + once. + + Returns + Expr + The filtered expression. + """ + + def check_term(term: TermContainer) -> bool: + available = [] + for obj in term.objects: + name = obj.name + if name is None: + continue + exp = obj.exponent + assert exp.is_Integer + available.extend(name for _ in range(int(exp))) + # True if all requested tensors are in the term + if strict == 'low': + return all(t in available for t in set(t_strings)) + # True if all requested Tensors occur the correct amount of times + elif strict == 'medium': + available = Counter(available) + desired = Counter(t_strings) + return desired.items() <= available.items() + # True if only the requested Tensors are in the term in the correct + # amount + elif strict == 'high': + if ignore_amplitudes: + requested_amplitudes = [ + name for name in t_strings + if is_adc_amplitude(name) or is_t_amplitude(name) + ] + ignored_amplitudes = { + name for name in available if + (is_adc_amplitude(name) or is_t_amplitude(name)) + and name not in requested_amplitudes + } + available = Counter([t for t in available + if t not in ignored_amplitudes]) + else: + available = Counter(available) + desired = Counter(t_strings) + return desired == available + raise ValueError(f"invalid value for strict {strict}") + + if not all(isinstance(t, str) for t in t_strings): + raise Inputerror("Tensor names need to be provided as str.") + if strict not in ['low', 'medium', 'high']: + raise Inputerror(f"{strict} is not a valid option for strict. Valid" + "options are 'low', 'medium' or 'high'.") + assert isinstance(expr, ExprContainer) + + expr = expr.expand() + filtered = Add(*( + term.inner for term in expr.terms if check_term(term) + )) + return ExprContainer(filtered, **expr.assumptions) + + +def find_compatible_terms(terms: Sequence[TermContainer] + ) -> dict[int, dict[int, list[tuple[Index, Index]]]]: + """ + Determines the substitutions of contracted needed to map terms onto each + other. + + Parameters + ---------- + terms: Sequence[Term] + The list of terms to compare and map onto each other. + + Returns + ------- + dict + Nested dictionary containing the indices of terms and the substitution + dict to map the terms onto each other, e.g., the substitutions to + map term j onto term i are stored as + {i: {j: substitutions}}. + If it was not possible to find a match for term_i, the inner dictionary + will be empty {i: {}}. + """ + + def compare_terms( + pattern: dict[tuple[str, str], dict[Index, list[str]]], + other_pattern: dict[tuple[str, str], dict[Index, list[str]]], + target: tuple[Index, ...], term: TermContainer, + other_term: TermContainer) -> None | list[tuple[Index, Index]]: + # function to compare two terms that are compatible, i.e., have the + # same amount of indices in each space, the same amount and type of + # objects and the same target indices + sub_list: list[dict[Index, Index]] = [] + for ov, idx_pattern in pattern.items(): + # only compare indices that belong to the same space + other_idx_pattern = other_pattern.get(ov, None) + # the other space is not available in the other term + # -> they cant match + if other_idx_pattern is None: + return None + # list to hold the substitution dictionaries of the current space + ov_sub_list: list[dict[Index, Index]] = [] + + for idx, pat in idx_pattern.items(): + # find all possible matches for the current idx + # if its a target idx -> only allow mapping on other target idx + is_target = idx in target + # list to collect all possible matches + matching_idx: list[Index] = [] + for other_idx, other_pat in other_idx_pattern.items(): + other_is_target = other_idx in target + # only 1 index is a target index -> cant map + # or both are different target indices + # -> cant map because we cant substitute target indices + if is_target != other_is_target or \ + (is_target and other_is_target and + idx is not other_idx): + continue + # the pattern of both indices is identical + # -> possible match + if pat == other_pat: + matching_idx.append(other_idx) + # could not find a match for idx -> no need to check further + if not matching_idx: + break + + if not ov_sub_list: # initialize the subdicts + ov_sub_list.extend({s: idx} for s in matching_idx) + else: # already initialized -> add when possible + new_ov_sub_list: list[dict[Index, Index]] = [] + for sub, other_idx in \ + itertools.product(ov_sub_list, matching_idx): + # other_idx is already mapped onto another idx + if other_idx in sub: + continue + # copy the sub_dict to avoid inplace modification + extended_sub = sub.copy() + extended_sub[other_idx] = idx + new_ov_sub_list.append(extended_sub) + ov_sub_list = new_ov_sub_list + if not ov_sub_list: # did not find any valid combination + # will not be able to construct complete sub dicts + # say we matched idx1 to some indices and then obtain + # no valid sub dicts after matching idx2 + # -> can only obtain sub dicts that do not contain idx1 + # and idx2 -> they can not be valid + # -> terms can not match! + return None + # Done with comparing the indices of a space + # -> check the result and create total substitution dicts + + # remove incomplete sub lists + # This might not be necessary anymore + ov_sub_list = [ + sub for sub in ov_sub_list + if sub.keys() == other_idx_pattern.keys() + ] + + if not ov_sub_list: # did not find a single complete sub dict + return None + + # initialize the final substitution dicts + if not sub_list: + sub_list.extend(ov_sub_list) + else: # combine the sub dicts. different spaces can not overlap + sub_list = [ + other_sp_sub | sub for other_sp_sub, sub in + itertools.product(sub_list, ov_sub_list) + ] + + # test all sub dicts to identify the correct one (if one exists) + for sub in sub_list: + sub = order_substitutions(sub) + sub_other_term = other_term.inner.subs(sub) + assert isinstance(sub_other_term, Expr) + # sub is not valid for other term: evaluates to 0 due to + # some antisymmetry e.g. t_ijcd -> t_ijcc = 0 + if sub_other_term is S.Zero and other_term.inner is not S.Zero: + continue + # diff (or sum) is a single term (no Add obj) + # can either sum up to 0 or to a single term with a different pref + # -> check for type of result and not for result value + if not isinstance(Add(term.inner, -sub_other_term), Add): + return sub + return None # no valid sub dict -> return None + + def repeating_idx_sp(idx_list: list[tuple[str, set[Index], set[Index]]]): + repeating_idx = [] + for idx1, idx2 in itertools.combinations(idx_list, 2): + descr1, descr2 = idx1[0], idx2[0] + for i1, i2 in itertools.product(idx1[1:], idx2[1:]): + repeated = i1 & i2 + if len(repeated) > 1: + repeated = "".join(sorted( + s.space[0] + s.spin for s in repeated + )) + repeating_idx.append((repeated, *sorted([descr1, descr2]))) + return tuple(sorted(repeating_idx)) + + if not all(isinstance(term, TermContainer) for term in terms): + raise Inputerror("Expected terms as a list of term Containers.") + + # prefilter terms according to + # - number of objects, excluding prefactor + # - type, name, space, spin, obj target indices and exponent of objects + # - the space of repeating indices subsets (2, 3, ...) that repeat on + # on multiple objects together in a common index subspace (upper/lower) + # - number of indices in each space + # - the target indices + filtered_terms: defaultdict[tuple, list[int]] = defaultdict(list) + term_pattern: list[dict[tuple[str, str], dict[Index, list[str]]]] = [] + term_target: list[tuple[Index, ...]] = [] + for term_i, term in enumerate(terms): + # target indices + target = term.target + term_target.append(target) + # pattern + pattern = term.pattern() + term_pattern.append(pattern) + # obj name, space, exponent, obj_target_indices, repeating_indices + descriptions: list[str] = [] + tensor_idx_list: list[tuple[str, set[Index], set[Index]]] = [] + length = 0 + for o in term.objects: + base = o.base + if (descr := o.description()) == 'prefactor': + continue + elif isinstance(base, AntiSymmetricTensor): + upper, lower = base.upper, base.lower + assert _is_index_tuple(upper) and _is_index_tuple(lower) + tensor_idx_list.append( + (descr, set(upper), set(lower)) + ) + elif isinstance(base, (KroneckerDelta, NonSymmetricTensor)): + tensor_idx_list.append((descr, set(o.idx), set())) + length += 1 + descriptions.append(descr) + pattern_key = tuple(sorted( + (sp, len(idx_pat)) for sp, idx_pat in pattern.items() + )) + key = (length, tuple(sorted(descriptions)), + repeating_idx_sp(tensor_idx_list), pattern_key, target) + filtered_terms[key].append(term_i) + + compatible_terms: dict[int, dict[int, list[tuple[Index, Index]]]] = {} + for term_idx_list in filtered_terms.values(): + # set to keep track of the already mapped terms + matched: set[int] = set() + for i, term_i in enumerate(term_idx_list): + if term_i in matched: # term already mapped + continue + + compatible_terms[term_i] = {} + + # data of the current term + term = terms[term_i] + target = term_target[term_i] + pattern = term_pattern[term_i] + + for other_i in range(i+1, len(term_idx_list)): + other_term_i = term_idx_list[other_i] + if other_term_i in matched: # term already mapped + continue + + sub = compare_terms( + pattern, term_pattern[other_term_i], + target, term, terms[other_term_i] + ) + # was possible to map the terms onto each other! + if sub is not None: + compatible_terms[term_i][other_term_i] = sub + matched.add(other_term_i) + return compatible_terms + + +def simplify(expr: ExprContainer) -> ExprContainer: + """ + Simplify an expression by permuting contracted indices. Thereby, terms + are mapped onto each other reducing the number of terms. + Currently this does not work for denominators of the form (a + b + ...). + However, this restriction can often be bypassed by using symbolic, + denominators, i.e., using a tensor of the correct symmetry to represent the + denominator. Alternatively, the functions found in 'reduce_expr' are + capable to handle orbital energy denominators. + + Parameters + ---------- + expr : ExprContainer + The expression to simplify + + Returns + ------- + ExprContainer + The simplified expression. + """ + assert isinstance(expr, ExprContainer) + expr = expr.expand() + if len(expr) == 1: # trivial: only a single term + return expr + # create terms and try to find comaptible terms that may be + # simplified by substituting indices + terms = expr.terms + equal_terms = find_compatible_terms(terms) + # substitue the indices in other_n and keep n as is + res = ExprContainer(0, **expr.assumptions) + for n, matches in equal_terms.items(): + res += terms[n] + for other_n, sub in matches.items(): + res += terms[other_n].subs(sub) + return res + + +def simplify_unitary(expr: ExprContainer, t_name: str, + evaluate_deltas: bool = False) -> ExprContainer: + """ + Simplifies an expression that contains unitary tensors by exploiting + U_pq * U_pr * Remainder = delta_qr * Remainder, + where the Remainder does not contain the index p. + + Parameters + ---------- + expr : Expr + The expression to simplify. + t_name : str + Name of the unitary tensor. + evaluate_deltas: bool, optional + If this is set, the generated KroneckerDeltas will be evaluated + before returning. + + Returns + ------- + Expr + The simplified expression. + """ + + def simplify_term_unitary(term: TermContainer) -> TermContainer: + objects = term.objects + # collect the indices of all unitary tensors in the term + unitary_tensors: list[int] = [] + for i, obj in enumerate(objects): + if obj.name == t_name: + exp = obj.exponent + assert exp.is_Integer + unitary_tensors.extend(i for _ in range(int(exp))) + + # only implemented for 2 dimensional unitary tensors + if any(len(objects[i].idx) != 2 for i in unitary_tensors): + raise NotImplementedError("Did only implement the case of 2D " + f"unitary tensors. Found {t_name} in " + f"{term}") + + # TODO: if we have a AntiSymmetricTensor as unitary tensor + # -> what kind of bra ket symmetry is possible? + # throw an error if it is set to +-1? + + # need at least 2 unitary tensors + if len(unitary_tensors) < 2: + return term + + # find the target indices + target = term.target + idx_counter = Counter(term.idx) + + # iterate over all pairs and look for matching contracted indices + # that do only occur on the two unitary tensors we want to simplify + for (i1, i2) in itertools.combinations(unitary_tensors, 2): + idx1 = objects[i1].idx + idx2 = objects[i2].idx + # U_pq U_pr = delta_qr + if idx1[0] == idx2[0] and idx1[0] not in target and \ + idx_counter[idx1[0]] == 2: + delta = KroneckerDelta(idx1[1], idx2[1]) + # U_qp U_rp = delta_qr + elif idx1[1] == idx2[1] and idx1[1] not in target and \ + idx_counter[idx1[1]] == 2: + delta = KroneckerDelta(idx1[0], idx2[0]) + else: # no matching indices + continue + + # lower the exponent of the 2 unitary tensors and + # add the created delta to the term + new_term = ExprContainer(delta, **term.assumptions) + if i1 == i2: + base, exponent = objects[i1].base_and_exponent + assert exponent.is_Integer + new_term *= Pow(base, int(exponent) - 2) + else: + b1, exponent1 = objects[i1].base_and_exponent + b2, exponent2 = objects[i2].base_and_exponent + assert exponent1.is_Integer and exponent2.is_Integer + new_term *= Pow(b1, int(exponent1) - 1) + new_term *= Pow(b2, int(exponent2) - 1) + + # add remaining objects + for i, o in enumerate(objects): + if i == i1 or i == i2: + continue + else: + new_term *= o + return simplify_term_unitary(new_term.terms[0]) + # could not find simplification -> return + return term + + assert isinstance(expr, ExprContainer) + + res = ExprContainer(0, **expr.assumptions) + for term in expr.terms: + res += simplify_term_unitary(term) + + # evaluate the generated deltas if requested + if evaluate_deltas: + res = ExprContainer(func.evaluate_deltas(res.inner), **res.assumptions) + return res + + +def remove_tensor(expr: ExprContainer, t_name: str + ) -> dict[tuple[str, ...], ExprContainer]: + """ + Removes a tensor from each term of an expression by undoing the contraction + of the remaining term with the tensor. The resulting expression is split + according to the blocks of the removed tensor. Note that only canonical + tensor blocks are considered, because the non-canonical blocks can be + generated from the canonical ones, e.g., removing a symmetric matrix d_{pq} + from an expression can only result in expressions for the 'oo', 'ov' and + 'vv' blocks, since d_{ai} = d_{ia}. + The symmetry of the removed tensor is taken into account, such that the + original expression can be restored if all block expressions are + contracted with the corresponding tensor blocks again. + Note that for ADC-Amplitudes a special prefactor is used. + + Parameters + ---------- + expr : ExprContainer + The expression where the tensor should be removed. + t_name : str + Name of the tensor that should be removed. + + Returns + ------- + dict[tuple[str, ...], ExprContainer] + key: Tuple of removed tensor blocks + value: Part of the original expression that contained the corresponding + blocks. If contracted with the tensor blocks again, a part of + the original expression is recovered. + """ + + def remove(term: TermContainer, tensor: ObjectContainer, + target_indices: dict[tuple[str, str], set[str]] + ) -> ExprContainer: + # - get the tensor indices + indices: Sequence[Index] = list(tensor.idx) + # - split the indices that are in the remaining term according + # to their space and spin to gather information about used indices + used_indices: dict[tuple[str, str], set[str]] = {} + for s in set(s for s, _ in term._idx_counter): + if (idx_key := s.space_and_spin) not in used_indices: + used_indices[idx_key] = set() + used_indices[idx_key].add(s.name) + # - check if the tensor is holding target indices. + # have to introduce a KroneckerDelta for each target index to avoid + # loosing indices in the term and replace the target indices on the + # tensor by new, unused indices: + # f_bc * Y^ac_ij -> delta_ik * delta_jl * delta_ad * f_bc * Y^dc_kl + + # get all target indices on the tensor, split according to their space + # and spin + tensor_target_indices: dict[tuple[str, str], list[Index]] = {} + for s in indices: + idx_key = s.space_and_spin + if s.name in target_indices.get(idx_key, []): + if idx_key not in tensor_target_indices: + tensor_target_indices[idx_key] = [] + if s not in tensor_target_indices[idx_key]: + tensor_target_indices[idx_key].append(s) + # - add the tensor indices to the term_indices to collect all + # not available indices + for s in indices: + if (idx_key := s.space_and_spin) not in used_indices: + used_indices[idx_key] = set() + used_indices[idx_key].add(s.name) + + if tensor_target_indices: + term_with_deltas = ExprContainer(term.inner, **term.assumptions) + for idx_key, idx_list in tensor_target_indices.items(): + if idx_key not in used_indices: + used_indices[idx_key] = set() + space, spin = idx_key + additional_indices = get_lowest_avail_indices( + len(idx_list), used_indices[idx_key], space + ) + # add the new indices to the unavailable indices + used_indices[idx_key].update(additional_indices) + # transform them from string to Dummies + if spin: + spins = spin * len(idx_list) + else: + spins = None + additional_indices = get_symbols(additional_indices, spins) + + sub = { + s: new_s for s, new_s in zip(idx_list, additional_indices) + } + # create a delta for each index and attach to the term + # and replace the index in tensor indices + for s, new_s in sub.items(): + term_with_deltas *= KroneckerDelta(s, new_s) + indices = [sub.get(s, s) for s in indices] + assert len(term_with_deltas) == 1 + term = term_with_deltas.terms[0] + del term_with_deltas + # - check for repeating indices: + # introduce a delta in the term for each repeating index + # e.g. d_iiij -> d_iklj // term <- delta_ik * delta_il + # Problem: this might introduce unstable deltas... + repeating_indices = {} + for s, n in Counter(indices).items(): + if n > 1: + if (idx_key := s.space_and_spin) not in repeating_indices: + repeating_indices[idx_key] = [] + repeating_indices[idx_key].extend(s for _ in range(n-1)) + if repeating_indices: + indices_i: dict[Index, list[int]] = {} + for i, s in enumerate(indices): + if s not in indices_i: + indices_i[s] = [] + indices_i[s].append(i) + # - iterate through the repeating indices and generate a new + # index for each repeating index. Use the repeating and the + # new index to create a KroneckerDelta. On AntiSymmetricTensors + # indices can at most twice, once in upper and once in lower. + # On NonSymmetricTensors no such limit exists -> implement for + # an arbitrary amount of repetitions + term_without_repeating = ExprContainer( + term.inner, **term.assumptions + ) + for idx_key, idx_list in repeating_indices.items(): + space, spin = idx_key + additional_indices = get_lowest_avail_indices( + len(idx_list), used_indices.get(idx_key, []), space + ) + if spin: + spins = spin * len(idx_list) + else: + spins = None + additional_indices = get_symbols(additional_indices, spins) + for s, new_s in zip(idx_list, additional_indices): + term_without_repeating *= KroneckerDelta(s, new_s) + # substitute the second occurence of s in tensor indices + indices[indices_i[s].pop(1)] = new_s + # no repeating indices left + assert max(Counter(indices).values()) == 1 + assert len(term_without_repeating) == 1 + term = term_without_repeating.terms[0] + del term_without_repeating + # - minimize the tensor indices by permuting contracted indices. + # Ensure indices occur in ascending order: kijab -> ijkab. + # target indices are excluded from this procedure: + # with target indices i, a: kijab -> jikab + indices, perms = minimize_tensor_indices(indices, target_indices) + # - apply the index permuations for minimizig the indices + # also to the term + res_term = term.permute(*perms) + del term + assert res_term.inner is not S.Zero + # - build a new tensor that holds the minimized indices + # further minimization might be possible taking the tensor + # symmetry into account, because we did not touch target indices: + # jikab -> d^jik_ab = - d^ijk_ab + raw_tensor = tensor.inner + if isinstance(raw_tensor, AntiSymmetricTensor): + bra_ket_sym = raw_tensor.bra_ket_sym + if isinstance(raw_tensor, Amplitude): # indices = lower, upper + n_l = len(raw_tensor.lower) + upper, lower = indices[n_l:], indices[:n_l] + else: # symtensor / antisymtensor, indices = upper, lower + n_u = len(raw_tensor.upper) + upper, lower = indices[:n_u], indices[n_u:] + res_tensor = ExprContainer(raw_tensor.__class__( + raw_tensor.name, upper, lower, bra_ket_sym + )).terms[0] + elif isinstance(raw_tensor, NonSymmetricTensor): + bra_ket_sym = None + res_tensor = ExprContainer( + NonSymmetricTensor(raw_tensor.name, indices) + ).terms[0] + else: + raise TypeError(f"Unknown tensor type {type(tensor.inner)}") + del raw_tensor + del tensor + # if we got a -1 -> move to the term + res_term *= res_tensor.prefactor + assert isinstance(res_term, ExprContainer) + # PREFACTOR: + # - For a contraction d^ij_ab we obtain an additional prefactor of 1/4 + # in the term, for d^ij_ka it is 1/2, or 1/4 for d^ij_kl + # -> it depends on the symmetry of the tensor we want to remove + # factor = n_perms + 1 + # -> need to remove it from the term: multiply by the term by the + # inverse factor (4 for d^ij_ab) + # - Additionally we need to ensure that the resulting expression + # preserves symmetry that was included in the input expression + # through the tensor we want to remove + # -> apply the tensor symmetry to the term + # d^ij_ab * X -> 1/4 (X - P_ij X - P_ab X + P_ij P_ab X) + # -> this leads to another factor of 1/(n_perms + 1) + # - For usual tensors both factors cancel each other exactly: + # (n_perms + 1) / (n_perms + 1) = 1 + # -> don't change the prefactor and just symmetrize the term + # - If the tensor has additionaly bra ket symmetry: + # swapping bra and ket will either result in an identical + # tensor block (diagonal block) + # or will give a non canonical block which is folded into + # the canonical block we are treating currently + # - diagonal block: multiply the term by 1/2 to keep the result + # normalized... we will get twice as many terms from applying + # the tensor symmetry as without bra ket symmetry + # -> the factor from lifting the index restrictions remains + # constant, while the factor for the symmetrisation is + # multiplied by 2: + # (n_perms + 1) / [2 (n_perms + 1)] = 1/2 + # - non-diagonal block: bra ket swap gives a non canonical block + # which can be folded into the canonical block: + # f_ia + f_ai = 2 f_ia + # However we only want to treat canonical tensor blocks. + # Therefore, we need to "remove" the contributions from the + # non-canonical blocks by multiplying with 1/2 + # -> if we have bra ket symmetry introduce a factor 1/2 + if bra_ket_sym is not None and bra_ket_sym is not S.Zero: + res_term *= Rational(1, 2) + assert isinstance(res_term, ExprContainer) + # - For ADC amplitudes we only have to multiply the term by + # sqrt(n_perms + 1), because the other part of the factor + # is hidden inside the amplitude vector to keep the vector + # norm constant when lifting index restrictions + # -> we obtain an overall factor of + # sqrt(n_perms + 1) / (n_perms + 1) = 1 / sqrt(n_perms + 1) + tensor_sym = res_tensor.symmetry() + if is_adc_amplitude(t_name): # are we removing an ADC amplitude? + if bra_ket_sym is not S.Zero: + raise ValueError("ADC amplitude vectors should have " + "no bra ket symmetry.") + res_term *= S.One / sqrt(len(tensor_sym) + 1) + assert isinstance(res_term, ExprContainer) + # - add the tensor indices to the target indices of the term + # but only if it is not possible to determine them with the einstein + # sum convention -> only if target indices have been set manually + if res_term.provided_target_idx is not None: + res_term.set_target_idx(res_term.provided_target_idx + indices) + # - apply the symmetry of the removed tensor to the term + symmetrized_term = res_term.copy() + for perms, sym_factor in tensor_sym.items(): + symmetrized_term += res_term.copy().permute(*perms) * sym_factor + # - reduce the number of terms as much as possible + return simplify(symmetrized_term) + + def process_term(term: TermContainer, t_name: str + ) -> dict[tuple[str, ...], ExprContainer | TermContainer]: + # print(f"\nProcessing term {term}") + # collect all occurences of the desired tensor + tensors: list[ObjectContainer] = [] + remaining_term = ExprContainer(1, **term.assumptions) + for obj in term.objects: + if obj.name == t_name: + tensors.append(obj) # we take care of the exponent later! + else: + remaining_term *= obj + if not tensors: # could not find the tensor + return {("none",): term} + # extract all the target indices and split according to their space + target_indices: dict[tuple[str, str], set[str]] = {} + for s in term.target: + if (idx_key := s.space_and_spin) not in target_indices: + target_indices[idx_key] = set() + target_indices[idx_key].add(s.name) + # remove the first occurence of the tensor + # and add all the remaining occurences back to the term + for remaining_t in tensors[1:]: + remaining_term *= remaining_t + # the tensor might have an exponent that we need to take care of! + tensor = tensors[0] + exponent = tensor.exponent + # I am not 100% sure atm how to remove tensors with exponents != 1 + # so wait for an actual example to come up and implement it then. + if exponent != 1: + raise NotImplementedError("Did not implement the case of removing " + f"tensors with exponents != 1: {t_name} " + f"in {term}") + assert len(remaining_term) == 1 + remaining_term = remove( + remaining_term.terms[0], tensor, target_indices + ) + # determine the space/block of the removed tensor + # used as key in the returned dict + spin = tensor.spin + if all(c == "n" for c in spin): + t_block = [tensor.space] + else: + t_block = [f"{tensor.space}_{spin}"] + # print(t_block, remaining_term) + if len(tensors) == 1: # only a single occurence no need to recurse + return {tuple(t_block): remaining_term} + else: # more than one occurence of the tensor + # iterate through the terms that already have the first occurence + # removed and recurse for each term + ret = {} + for t in remaining_term.terms: + # add the blocks to the already removed block + contribution = process_term(t, t_name) + for blocks, contrib in contribution.items(): + key = tuple(sorted(t_block + list(blocks))) + if key not in ret: + ret[key] = 0 + ret[key] += contrib + return ret + + assert isinstance(expr, ExprContainer) + assert isinstance(t_name, str) + # expr sorted by tensor block + ret: dict[tuple[str, ...], ExprContainer] = {} + for term in expr.terms: + for key, contrib in process_term(term, t_name).items(): + if key not in ret: + ret[key] = ExprContainer(0, **contrib.assumptions) + ret[key] += contrib + return ret diff --git a/build/lib/adcgen/sort_expr.py b/build/lib/adcgen/sort_expr.py new file mode 100644 index 0000000..2be1992 --- /dev/null +++ b/build/lib/adcgen/sort_expr.py @@ -0,0 +1,382 @@ +from collections import defaultdict +import itertools + +from sympy import Add, S + +from .eri_orbenergy import EriOrbenergy +from .expression import ExprContainer, TermContainer +from .indices import get_symbols, sort_idx_canonical +from .misc import Inputerror +from .simplify import simplify +from .symmetry import Permutation +from .sympy_objects import AntiSymmetricTensor, KroneckerDelta, SymmetricTensor + + +def by_delta_types(expr: ExprContainer + ) -> dict[tuple[str, ...], ExprContainer]: + """Sort the terms in an expression according to their space and spin.""" + assert isinstance(expr, ExprContainer) + expr = expr.expand() + ret: dict[tuple[str, ...], ExprContainer] = {} + for term in expr.terms: + d_blocks = [] + for delta in term.objects: + if not isinstance(delta.base, KroneckerDelta): + continue + spin = delta.spin + if all(c == "n" for c in spin): # no indices with spin + block = delta.space + else: + block = f"{delta.space}_{spin}" + exp = delta.exponent + assert exp.is_Integer + d_blocks.extend(block for _ in range(int(exp))) + d_blocks = tuple(sorted(d_blocks)) + if not d_blocks: + d_blocks = ('none',) + if d_blocks not in ret: + ret[d_blocks] = ExprContainer(0, **term.assumptions) + ret[d_blocks] += term + return ret + + +def by_delta_indices(expr: ExprContainer + ) -> dict[tuple[str, ...], ExprContainer]: + """ + Sort the terms in an expression according to the names and spin of indices + on the KroneckerDeltas in each term. + """ + assert isinstance(expr, ExprContainer) + expr = expr.expand() + ret: dict[tuple[str, ...], ExprContainer] = {} + for term in expr.terms: + d_idx = tuple(sorted( + "".join(str(s) for s in o.idx) for o in term.objects + if isinstance(o.base, KroneckerDelta) + for _ in range(int(o.exponent)) + )) + if not d_idx: + d_idx = ('none',) + if d_idx not in ret: + ret[d_idx] = ExprContainer(0, **term.assumptions) + ret[d_idx] += term + return ret + + +def by_tensor_block(expr: ExprContainer, t_name: str + ) -> dict[tuple[str, ...], ExprContainer]: + """ + Sort the terms in an expression according to the blocks of a tensor. + """ + assert isinstance(t_name, str) + assert isinstance(expr, ExprContainer) + expr = expr.expand() + ret: dict[tuple[str, ...], ExprContainer] = {} + for term in expr.terms: + t_blocks = [] + for tensor in term.objects: + if tensor.name != t_name: + continue + spin = tensor.spin + if all(c == "n" for c in spin): + block = tensor.space + else: + block = f"{tensor.space}_{spin}" + exp = tensor.exponent + assert exp.is_Integer + t_blocks.extend(block for _ in range(int(exp))) + t_blocks = tuple(sorted(t_blocks)) + if not t_blocks: + t_blocks = ("none",) + if t_blocks not in ret: + ret[t_blocks] = ExprContainer(0, **term.assumptions) + ret[t_blocks] += term + return ret + + +def by_tensor_target_block(expr: ExprContainer, t_name: str + ) -> dict[tuple[str, ...], ExprContainer]: + """ + Sort the terms in an expression according to the type of target indices on + the specified tensor, e.g. f_cc Y_ij^ac, where i, j and a are target + indices: + -> if sorting according to the indices on Y: (oov,); + if sorting acording to the indices on f: (none,). + """ + assert isinstance(t_name, str) + assert isinstance(expr, ExprContainer) + expr = expr.expand() + ret: dict[tuple[str, ...], ExprContainer] = {} + for term in expr.terms: + key = [] + target = term.target + for tensor in term.objects: + if tensor.name == t_name: + # indices are in canonical order + tensor_target = [s for s in tensor.idx if s in target] + if not tensor_target: # no target indices on the tensor + key.append("none") + continue + tensor_target_block = "".join( + s.space[0] for s in tensor_target + ) + if any(s.spin for s in tensor_target): # spin is defined + spin = "".join( + s.spin if s.spin else "n" for s in tensor_target + ) + tensor_target_block += f"_{spin}" + key.append(tensor_target_block) + key = tuple(sorted(key)) # in case of multiple occurences + if not key: # did not find a single occurence of the tensor + key = (f'no_{t_name}',) + if key not in ret: + ret[key] = ExprContainer(0, **term.assumptions) + ret[key] += term + return ret + + +def by_tensor_target_indices(expr: ExprContainer, t_name: str + ) -> dict[tuple[str, ...], ExprContainer]: + """ + Sort the terms in an expression according to the names of target indices on + the specified tensor. + """ + assert isinstance(t_name, str) + assert isinstance(expr, ExprContainer) + expr = expr.expand() + ret: dict[tuple[str, ...], ExprContainer] = {} + for term in expr.terms: + key = [] + target = term.target + for obj in term.objects: + if obj.name == t_name: + # indices are in canonical order + obj_target_idx = "".join( + [s.name for s in obj.idx if s in target] + ) + if not obj_target_idx: + obj_target_idx = "none" + key.append(obj_target_idx) + key = tuple(sorted(key)) # in case the tensor occurs more than once + if not key: # tensor did not occur in the term + key = (f"no_{t_name}",) + if key not in ret: + ret[key] = ExprContainer(0, **term.assumptions) + ret[key] += term + return ret + + +def exploit_perm_sym( + expr: ExprContainer, target_indices: str | None = None, + target_spin: str | None = None, bra_ket_sym: int = 0, + antisymmetric_result_tensor: bool = True + ) -> dict[tuple[tuple[tuple[Permutation, ...], int], ...], ExprContainer]: # noqa E501 + """ + Reduces the number of terms in an expression by exploiting the symmetry: + by applying permutations of target indices it might be poossible to map + terms onto each other reducing the overall number of terms. + + Parameters + ---------- + expr : Expr + The expression to probe for symmetry. + target_indices : str | None, optional + The names of target indices of the expression. Bra and ket indices + should be separated by a ',' to lower the amount of permutations the + expression has to be probed for, e.g., to differentiate 'ia,jb' + from 'ij,ab'. If not provided, the function will try to determine the + target indices automatically and probe for the complete symmetry found + for these indices. + target_spin : str | None , optional + The spin of the target indices, e.g., 'aabb' to indicate that the + first 2 target indices have alpha spin, while number 3 and 4 have + beta spin. If not given, target indices without spin will be used. + bra_ket_sym : int, optional + Defines the bra-ket symmetry of the result tensor of the expression. + Only considered if the names of target indices are separated by a ','. + antisymmetric_result_tensor : bool, optional + If set, the result tensor will be treated as AntiSymmetricTensor + d_{ij}^{ab} = - d_{ji}^{ab}. Otherwise, a SymmetricTensor will be used + to mimic the symmetry of the result tensor, i.e., + d_{ij}^{ab} = d_{ji}^{ab}. (default: True) + + Returns + ------- + dict + The remaining terms sorted by the found permutations. + key: The permutations. + value: The part of the expression to which the permutations have to be + applied in order to recover the original expression. + """ + from .reduce_expr import factor_eri_parts, factor_denom + + def simplify_terms_with_denom(sub_expr: ExprContainer): + factored = itertools.chain.from_iterable( + factor_denom(sub_e) for sub_e in factor_eri_parts(sub_expr) + ) + ret = ExprContainer(0, **sub_expr.assumptions) + for term in factored: + ret += term.factor() + return ret + + assert isinstance(expr, ExprContainer) + if expr.inner.is_number: + return {tuple(): expr} + expr.expand() + terms: tuple[TermContainer, ...] = expr.terms + + # check that each term in the expr contains the same target indices + ref_target = terms[0].target + if not expr.provided_target_idx and \ + any(term.target != ref_target for term in terms): + raise Inputerror("Each term in the expression needs to contain the " + "same target indices.") + + # if target indices have been provided + # -> check that they match with the found target indices + if target_indices is not None: + # split in upper/lower indices if possible + if "," in target_indices: + upper, lower = target_indices.split(",") + else: + if bra_ket_sym: + raise Inputerror("Target indices need to be separated by a " + "',' to indicate where to split them in " + "upper and lower indices if the target tensor" + "has bra-ket-symmetry.") + upper, lower = target_indices, "" + # treat the spin + if target_spin is not None: + if "," in target_spin: + upper_spin, lower_spin = target_spin.split(",") + else: + upper_spin = target_spin[:len(upper)] + lower_spin = target_spin[len(upper):] + if len(upper) != len(upper_spin) or len(lower) != len(lower_spin): + raise Inputerror(f"The target indices {target_indices} are " + " not compatible with the provided spin " + f"{target_spin}.") + else: + upper_spin, lower_spin = None, None + + upper = get_symbols(upper, upper_spin) + lower = get_symbols(lower, lower_spin) + sorted_provided_target = tuple(sorted( + upper + lower, key=sort_idx_canonical + )) + if sorted_provided_target != ref_target: + raise Inputerror(f"The provided target indices {target_indices} " + "are not equal to the target indices found in " + f"the expr: {ref_target}.") + else: # just use the found target indices + # if no target indices have been provided all indices are in upper + # -> bra ket sym is irrelevant + upper, lower = ref_target, tuple() + bra_ket_sym = 0 + # build a tensor holding the target indices and determine its symmetry + if antisymmetric_result_tensor: + tensor = AntiSymmetricTensor("x", upper, lower, bra_ket_sym) + else: + tensor = SymmetricTensor("x", upper, lower, bra_ket_sym) + symmetry = ExprContainer(tensor).terms[0].symmetry() + + # prefilter the terms according to the contained objects (name, space, exp) + # and if a denominator is present -> number and length of the brackets + filtered_terms: defaultdict[tuple, list[int]] = defaultdict(list) + has_denom: list[bool] = [] + for term_i, term in enumerate(terms): + term_splitted = EriOrbenergy(term) + has_denom.append(not term_splitted.denom.inner.is_number) + eri_descr: tuple[str, ...] = tuple(sorted( + o.description(target_idx=None) + for o in term_splitted.eri.objects + )) + idx_space = "".join(sorted( + s.space[0] + s.spin for s in term_splitted.eri.contracted + )) + key = (eri_descr, term_splitted.denom_description(), idx_space) + filtered_terms[key].append(term_i) + + ret: dict[tuple[tuple[tuple[Permutation, ...], int], ...], ExprContainer] = {} # noqa E501 + removed_terms: set[int] = set() + for term_idx_list in filtered_terms.values(): + # term is unique -> nothing to compare with + # can not map this term onto any other terms + if len(term_idx_list) == 1: + if tuple() not in ret: + ret[tuple()] = ExprContainer(0, **expr.assumptions) + ret[tuple()] += terms[term_idx_list[0]] + continue + + # decide which function to use for comparing the terms + terms_have_denom = has_denom[term_idx_list[0]] + assert all( + terms_have_denom == has_denom[term_i] for term_i in term_idx_list + ) + if terms_have_denom: + simplify_terms = simplify_terms_with_denom + else: + simplify_terms = simplify + + # first loop over terms!! + # Otherwise it is not garuanteed that all matches for a term can + # be found: consider 4 terms with ia, ja, ib and jb + # we want to find: P_ab, P_ij and P_ijP_ab for ia (or any other term) + # if we first loop over perms, e.g., P_ab we may find + # ia -> ib, ja -> jb for instance. + # -> we will not be able to find the full symmetry of the terms + for term_i in term_idx_list: + if term_i in removed_terms: + continue + term = terms[term_i] + found_sym: list[tuple[tuple[Permutation, ...], int]] = [] + for perms, factor in symmetry.items(): + # apply the permutations to the current term + perm_term = term.permute(*perms) + # permutations are not valid for the current term + if perm_term.inner is S.Zero and term.inner is not S.Zero: + continue + # check if the permutations did change the term + # if the term is still the same (up to the sign) continue + # thereby only looking for the desired symmetry + if factor == -1: + # looking for antisym: P_pq X = - X -> P_pq X + X = 0? + if Add(perm_term.inner, term.inner) is S.Zero: + continue + elif factor == 1: + # looking for sym: P_pq X = + X -> P_pq X - X = 0? + if Add(perm_term.inner, -term.inner) is S.Zero: + continue + else: + raise ValueError(f"Invalid sym factor {factor}.") + # perm term != term -> compare to other terms + for other_term_i in term_idx_list: + if term_i == other_term_i or other_term_i in removed_terms: + continue + # compare the terms: again only look for the desired + # symmetry + if factor == -1: + # looking for antisymmetry: X - X' + # P_pq X + (-X') = 0 | P_pq X = +X' + simplified = ( + simplify_terms(perm_term + terms[other_term_i]) + ) + else: # factor == 1 + # looking for symmetry: X + (X') + # P_pq X - X' = 0 | P_pq X = +X' + simplified = ( + simplify_terms(perm_term - terms[other_term_i]) + ) + # could not map the terms onto each other + if simplified.inner is not S.Zero: + continue + # mapped the terms onto each other + removed_terms.add(other_term_i) + found_sym.append((perms, factor)) + break + # use the found symmetry as dict key + found_sym_tpl = tuple(found_sym) + if found_sym_tpl not in ret: + ret[found_sym_tpl] = ExprContainer(0, **expr.assumptions) + ret[found_sym_tpl] += term + return ret diff --git a/build/lib/adcgen/spatial_orbitals.py b/build/lib/adcgen/spatial_orbitals.py new file mode 100644 index 0000000..732df87 --- /dev/null +++ b/build/lib/adcgen/spatial_orbitals.py @@ -0,0 +1,443 @@ +from collections import Counter +from itertools import product +from typing import Sequence + +from .expression import ExprContainer +from .logger import logger +from .misc import Inputerror +from .indices import ( + Index, get_symbols, order_substitutions, sort_idx_canonical, + _is_str_sequence +) +from .simplify import simplify + + +def transform_to_spatial_orbitals(expr: ExprContainer, target_idx: str, + target_spin: str, + restricted: bool = False, + expand_eri: bool = True) -> ExprContainer: + """ + Transforms an expression to a spatial orbital basis by integrating over + the spin of the spin orbitals, i.e., a spin is attached to all indices. + Furthermore, the antisymmetric ERI's are replaced by the in this context + more commonly used coulomb integrals in chemist notation. + Target indices of the expression are updated if necessary. + + Parameters + ---------- + expr : ExprContainer + Expression to express in terms of spatial orbitals. + target_idx : str + The names of target indices of the expression. Needs to be provided, + because the target indices in the expression are stored in canonical + order, which might not be correct. + target_spin : str + The spin of the target indices, e.g., 'aa' for 2 alpha orbitals. + restricted : bool, optional + Whether a restricted reference (equal alpha and beta orbitals) + should be assumed. In case of a restricted reference, only alpha + orbitals will be present in the returned expression. + (default: False) + expand_eri : bool, optional + If set, the antisymmetric ERI (in physicist notation) are expanded + to coulomb integrals using chemist notation + = - = (pr|qs) - (ps|qr), + where by default a SymmetricTensor 'v' is used to represent the + coulomb integrals. + """ + + # perform the integration first, since the intermediates are defined + # in terms of the antisymmetric ERI + expr = integrate_spin(expr, target_idx, target_spin) + if expand_eri: + expr.expand_antisym_eri().expand() + if not restricted: + return expr + # in the restricted case we can replace all beta orbitals by the + # corresponding alpha orbitals. + # It should be fine to keep the name and only adjust the spin of the + # indices: + # - in the input expression we only have spin orbitals + # - during the integration we generate multiple terms mapping each index + # to a spin + # -> the names are still unique, i.e., at this point each term might only + # hold an index of a certain name with either alpha or beta spin but + # not both of them simultaneously + restricted_expr: ExprContainer = ExprContainer(0, **expr.assumptions) + if expr.provided_target_idx is not None: + # update the target indices + restricted_target = get_symbols(target_idx, "a" * len(target_spin)) + restricted_expr.set_target_idx(restricted_target) + for term in expr.terms: + idx = set(term.idx) + beta_idx = [i for i in idx if i.spin == "b"] + if not beta_idx: + restricted_expr += term.inner + continue + new_idx = get_symbols([i.name for i in beta_idx], "a"*len(beta_idx)) + sub: dict[Index, Index] = {} + for old, new in zip(beta_idx, new_idx): + # conststruct the alpha index + if new in idx: + raise RuntimeError("It is not safe to replace the beta index " + f"{old} with the corresponding alpha index," + " because the index with alpha spin is " + f"already used in the term: {term}.") + sub[old] = new + restricted_expr += term.inner.subs(order_substitutions(sub)) + assert isinstance(restricted_expr, ExprContainer) + return restricted_expr + + +def integrate_spin(expr: ExprContainer, target_idx: str, + target_spin: str) -> ExprContainer: + """ + Integrates over the spin of the spin orbitals to transform an expression + to a spatial orbital basis, i.e, a spin is attached to all indices. + Target indices in the expression will be updated if necessary. + + Parameters + ---------- + expr : ExprContainer + Expression where the spin is integrated. + target_idx : str + Names of target indices of the expression. + target_spin : str + Spin of target indices of the expression. + """ + assert isinstance(expr, ExprContainer) + # - validate the target indices and target spin + target_symbols = get_symbols(target_idx) + if len(target_symbols) != len(target_spin): + raise Inputerror(f"Spin {target_spin} and indices {target_symbols} are" + " not compatible.") + target_idx_spins: dict[Index, str] = {} + for idx, spin in zip(target_symbols, target_spin): + if idx in target_idx_spins and target_idx_spins[idx] != spin: + raise ValueError(f"The index {idx} can not be assigned to alpha " + "and beta spin simultaneously.") + target_idx_spins[idx] = spin + # - sort the target indices to validate that the terms have the correct + # target indices and build the target spins + sorted_target = tuple(sorted(target_idx_spins, key=sort_idx_canonical)) + target_spins = [target_idx_spins[idx] for idx in sorted_target] + del target_idx_spins + # - generate the new target indices of the resulting expression to set + # them if needed + result_target = get_symbols([s.name for s in target_symbols], target_spin) + + result: ExprContainer = ExprContainer(0, **expr.assumptions) + if expr.provided_target_idx is not None: + result.set_target_idx(result_target) + + for term in expr.terms: + logger.debug(f"Integrating spin in term {term}") + # - ensure that the term has matching target indices + term_target = term.target + if term_target != sorted_target: + raise ValueError(f"Target indices {term_target} of term {term} " + "don't match the desired target indices " + f"{target_symbols}") + # - ensure that no index in the term is holding a spin + if any(s.spin for s in term.idx): + raise ValueError("The function assumes that the input expression " + "is expressed in terms of spin orbitals. Found " + f"a spatial orbital in term {term}.") + # we have no indices (the term is a number) we don't have anything + # to do + if not term.idx: + logger.debug(f"Result = {term}") + result += term.inner + continue + # - build a list of indices and base map for the spins of the indices + # starting with the target indices + term_contracted = term.contracted + term_indices = (*term_target, *term_contracted) + assert all(v == 1 for v in Counter(term_indices).values()) + base_spins: list[str | None] = [spin for spin in target_spins] + base_spins.extend(None for _ in range(len(term_contracted))) + # - for each object in the term: go through the allowed spin_blocks and + # try to add them to the base spins (target spins) in order to form a + # valid variants where all indices are assigned to a spin. + spin_variants: list[list[str | None]] = [base_spins] + term_vanishes: bool = False + for obj in term.objects: + allowed_blocks = obj.allowed_spin_blocks + # hit a Polynom, Prefactor or unknown tensor + if allowed_blocks is None: + continue + # we have some allowed blocks to check + # -> try to form valid combinations assigning all indices to a spin + indices: tuple[int, ...] = tuple( + term_indices.index(idx) for idx in obj.idx + ) + old_spin_variants = spin_variants.copy() + spin_variants.clear() + for block in allowed_blocks: + # - ensure that the block is valid: a index can not be + # assigned to alpha and beta at the same time + addition: list[str | None] = [ + None for _ in range(len(term_indices)) + ] + for spin, idx in zip(block, indices): + if addition[idx] is not None and addition[idx] != spin: + raise ValueError("Found invalid allowed spin block " + f"{block} for {obj}.") + addition[idx] = spin + # check for contracdictions with the target_spin and skip the + # block if this is the case + if any(sp1 != sp2 for sp1, sp2 in + zip(target_spins, addition[:len(term_target)]) + if sp2 is not None): + continue + # iterate over the existing variants and try to add the + # addition + for old_variant in old_spin_variants: + # check for any contradiction + if any(sp1 != sp2 for sp1, sp2 in + zip(old_variant, addition) + if sp1 is not None and sp2 is not None): + continue + # add the addition to the old variant + combination = [sp1 if sp2 is None else sp2 + for sp1, sp2 in zip(old_variant, addition)] + # we only need unique variants -> remove duplicates + if any(comb == combination for comb in spin_variants): + continue + spin_variants.append(combination) + # we could not find a single valid combination for the given + # object -> the term has to vanish + if not spin_variants: + term_vanishes = True + break + if term_vanishes: + logger.debug("Result = 0") + continue + # collect the result in a separate expression such that we can call + # simplify before adding the contribution to the result + contribution: ExprContainer = ExprContainer(0, **expr.assumptions) + if expr.provided_target_idx is not None: # if necessary update target + contribution.set_target_idx(result_target) + # - iterate over the unique combinations, replace the spin orbitals + # by the corresponding spatial orbitals (assign a spin to the + # indices) and add the corresponding terms to the result. + # Thereby, ensure that all indices have a spin assigned and + # try to assign a spin for not yet assigned indices: + # since all variants are initialized with the target spins + # set, only contracted indices can not be assigned + # -> generate a variant for alpha and beta since both are allowed + for spin_var in spin_variants: + missing_contracted = [ + idx for idx, spin in enumerate(spin_var) + if idx >= len(target_spin) and spin is None + ] + # construct variants for missing contracted indices assuming that + # alpha and beta spin is allowed. + if missing_contracted: + variants: list[list[str | None]] = [] + for spins in product("ab", repeat=len(missing_contracted)): + complete_variant = spin_var.copy() + for spin, idx in zip(spins, missing_contracted): + complete_variant[idx] = spin + variants.append(complete_variant) + else: + variants: list[list[str | None]] = [spin_var] + # go through the variants and perform the actual substitutions + for variant in variants: + # ensure that we indeed assigned all spins + assert _is_str_sequence(variant) + + new_indices = get_symbols( + indices=[s.name for s in term_indices], + spins="".join(variant) + ) + sub = { + old: new for old, new in zip(term_indices, new_indices) + } + contrib = term.inner.subs(order_substitutions(sub)) + logger.debug(f"Found contribution {contrib}") + contribution += contrib + # TODO: if we simplify the result it will throw an error for any + # polynoms or denominators. Should we skip the simplification altough + # we currently don't treat polynoms correctly in this function + # since their allowed_spin_blocks are not considered. + assert isinstance(contribution, ExprContainer) + result += simplify(contribution) + return result + + +def allowed_spin_blocks(expr: ExprContainer, + target_idx: Sequence[str]) -> tuple[str, ...]: + """ + Determines the allowed spin blocks of an expression. Thereby, it is assumed + that the allowed spin blocks of tensors in the expression are either known + or can be determined on the fly, i.e., this only works for closed + expressions. + + Parameters + ---------- + expr : ExprContainer + The expression to check. + target_idx : Sequence[str] + The target indices of the expression. + """ + + assert isinstance(expr, ExprContainer) + + target_symbols = get_symbols(target_idx) + sorted_target = tuple(sorted(target_symbols, key=sort_idx_canonical)) + + # - determine all possible spin blocks + spin_blocks: list[str] = [ + "".join(b) for b in product("ab", repeat=len(target_symbols)) + ] + spin_blocks_to_check: list[int] = [i for i in range(len(spin_blocks))] + + allowed_blocks: set[str] = set() + for term in expr.terms: + # - ensure that the term has matching target indices + if term.target != sorted_target: + raise ValueError(f"Target indices {term.target} of {term} dont " + "match the provided target indices " + f"{target_symbols}") + # - extract the allowed blocks for all tensors and initialize + # index maps to relate indices to a spin + term_idx_maps: list[tuple[list[dict[Index, str]], int]] = [] + for obj in term.objects: + allowed_object_blocks = obj.allowed_spin_blocks + # hit a Polynom, Prefactor or unknown tensor + if allowed_object_blocks is None: + continue + obj_indices = obj.idx + n_target = len([ + idx for idx in obj_indices if idx in target_symbols + ]) + object_idx_maps: list[dict[Index, str]] = [] + for block in allowed_object_blocks: + idx_map = {} + for spin, idx in zip(block, obj_indices): + if idx in idx_map and idx_map[idx] != spin: + raise ValueError("Found invalid allowed spin block " + f"{block} for {obj}.") + idx_map[idx] = spin + object_idx_maps.append(idx_map) + term_idx_maps.append((object_idx_maps, n_target)) + # - sort the allowed_tensor_blocks such that tensors with a high + # number of target indices are preferred + term_idx_maps = sorted(term_idx_maps, + key=lambda tpl: tpl[1], reverse=True) + + term_indices = set(term.idx) + blocks_to_remove: set[int] = set() + for block_i in spin_blocks_to_check: + block = spin_blocks[block_i] + if block in allowed_blocks: + blocks_to_remove.add(block_i) + continue + valid_block = True + + # - assign the target indices to a spin + target_spin: dict[Index, str] = {} + for spin, idx in zip(block, target_symbols): + # in case we have target indices iiab only spin blocks + # aaxx or bbxx are valid + if idx in target_spin and target_spin[idx] != spin: + valid_block = False + break + target_spin[idx] = spin + if not valid_block: + continue + + # - remove all object spin blocks that are in contradiction to the + # current spin block + relevant_term_spin_idx_maps: list[list[dict[str, set[Index]]]] = [] + for tensor_idx_maps, _ in term_idx_maps: + relevant_object_spin_idx_maps: list[dict[str, set[Index]]] = [] + for idx_map in tensor_idx_maps: + # are all target idx compatible with the block? + if any(spin != idx_map[t_idx] + for t_idx, spin in target_spin.items() + if t_idx in idx_map): + continue + spin_idx_map: dict[str, set[Index]] = { + "a": set(), "b": set() + } + for idx, spin in idx_map.items(): + spin_idx_map[spin].add(idx) + relevant_object_spin_idx_maps.append(spin_idx_map) + # the object has not a single allowed spin block that is + # compatible to the currently probed block + if not relevant_object_spin_idx_maps: + valid_block = False + break + relevant_term_spin_idx_maps.append( + relevant_object_spin_idx_maps + ) + # at least 1 object has no compatible allowed spin block + # -> the current term can not contribute to the current block + if not valid_block: + continue + + # - try to find a valid combination of the remaining spin blocks + spin_idx_map: dict[str, set[Index]] = {"a": set(), "b": set()} + if not _has_valid_combination(relevant_term_spin_idx_maps, 0, + spin_idx_map): + continue + # - verify the result: + # ensure that all indices are assigned + # the target indices have the desired spin + # there is no intersection between the different spins + if spin_idx_map["a"] & spin_idx_map["b"]: + raise RuntimeError("Indices are assigned to alpha and beta " + f"simultaneously in term {term}: ", + spin_idx_map) + if term_indices ^ (spin_idx_map["a"] | spin_idx_map["b"]): + raise RuntimeError("Not all indices were assigned to a spin: " + f"{term_indices} -> {spin_idx_map}") + if any(idx not in spin_idx_map[spin] + for idx, spin in target_spin.items()): + raise RuntimeError("Target index has wrong spin. Desired: " + f"{target_spin}. Found: {spin_idx_map}.") + # everything should be fine! + # also add the 'inverse' block to the allowed blocks + allowed_blocks.add(block) + allowed_blocks.add("".join("a" if spin == "b" else "b" + for spin in block)) + blocks_to_remove.add(block_i) + # blocks that have been found dont need to be checked again + spin_blocks_to_check = [i for i in spin_blocks_to_check + if i not in blocks_to_remove] + return tuple(sorted(allowed_blocks)) + + +def _has_valid_combination(tensor_idx_maps: list[list[dict[str, set[Index]]]], + current_pos: int, variant: dict[str, set[Index]] + ) -> bool: + """ + Tries to recursively assign all indices to a spin without introducing + contradictions. Returns immediately when all indices could be assigned + successfully. + """ + + for idx_map in tensor_idx_maps[current_pos]: + # look for any contradictions + if idx_map["a"] & variant["b"] or idx_map["b"] & variant["a"]: + continue + # compute the indices which are added to remove them later again + # if necessary + addition: dict[str, tuple[Index, ...]] = { + "a": tuple(idx for idx in idx_map["a"] if idx not in variant["a"]), + "b": tuple(idx for idx in idx_map["b"] if idx not in variant["b"]) + } + variant["a"].update(idx_map["a"]) + variant["b"].update(idx_map["b"]) + if len(tensor_idx_maps) == current_pos + 1: # we are done!! + return True + # recurse further and try to complete + if _has_valid_combination(tensor_idx_maps, current_pos+1, variant): + return True + # could not complete -> revert the addition and continue looping + variant["a"].difference_update(addition["a"]) + variant["b"].difference_update(addition["b"]) + # could not add anything to the variant + return False diff --git a/build/lib/adcgen/symmetry.py b/build/lib/adcgen/symmetry.py new file mode 100644 index 0000000..9f17fba --- /dev/null +++ b/build/lib/adcgen/symmetry.py @@ -0,0 +1,368 @@ +from collections.abc import Sequence +from collections import defaultdict +from functools import cached_property +import itertools + +from sympy import Add, S + +from .expression import ExprContainer, TermContainer +from .indices import Index, sort_idx_canonical +from .misc import cached_member, Inputerror + + +class Permutation(tuple[Index, Index]): + """ + Represents a permutation operator P_{pq} that permutes the indices p and q. + """ + + def __new__(cls, p: Index, q: Index): + if sort_idx_canonical(p) < sort_idx_canonical(q): + args = (p, q) + else: + args = (q, p) + return super().__new__(cls, args) + + def __str__(self): + return f"P_{self[0].name}{self[1].name}" + + def __repr__(self): + return f"P_{self[0].name}{self[1].name}" + + +class PermutationProduct(tuple[Permutation, ...]): + """ + Represents a product of permutation operators P_{pq}P{rs}. + The permutations are sorted taking into account that it is only possible + to rearrange permutation operators if the indices belong to different + spaces, e.g., P_{ab}P_{ij} = P_{ij}P_{ab}. + """ + + def __new__(cls, *args: Permutation): + # identify spaces that are linked to each other + # the order of permutations within a linked group has to be maintained! + # e.g. P_ia * P_ij * P_ab * P_pq + # the spaces o and v are linked -> the order of the first 3 + # permutations has to be maintained, while P_pq can be moved + # to any arbitrary place + splitted = cls.split_in_separable_parts(args) + sorted_args = [val for _, val in sorted(splitted.items())] + return super().__new__(cls, itertools.chain.from_iterable(sorted_args)) + + @staticmethod + def split_in_separable_parts(permutations: Sequence[Permutation] + ) -> dict[str, list[Permutation]]: + """ + Splits the permutations in subsets that can be treated independently + of each other. + """ + + # split the permutations according to their index space + # and identify spaces that are linked to each other through at least + # 1 permutation + perm_spaces: list[set[str]] = [] + links: list[set[str]] = [] + for perm in permutations: + p, q = perm + space: set[str] = set((p.space[0] + p.spin, q.space[0] + q.spin)) + perm_spaces.append(space) + + if len(space) > 1: # identify linking permutations + if space not in links: + links.append(space) + + if len(links) == 0: # no links, all spaces separated + linked_spaces: list[set[str]] = [] + elif len(links) == 1: # exactly 2 spaces are linked + linked_spaces: list[set[str]] = links + else: # more than 2 spaces linked: either ov, ox or ov, xy + treated: set[int] = set() + linked_spaces: list[set[str]] = [] + for i, linked_sp in enumerate(links): + if i in treated: + continue + linked = linked_sp.copy() + for other_i in range(i+1, len(links)): + if other_i in treated: + continue + if linked_sp & links[other_i]: + linked.update(links[other_i]) + treated.add(other_i) + linked_spaces.append(linked) + + # sort them in groups that can be treated independently + ret: dict[str, list[Permutation]] = {} + for perm, space in zip(permutations, perm_spaces): + # if the current space is linked to other spaces + # -> replace the space by the linked space + for linked_sp in linked_spaces: + if any(sp in linked_sp for sp in space): + space = linked_sp + break + space_str = "".join(sorted(space)) + if space_str not in ret: + ret[space_str] = [] + ret[space_str].append(perm) + return ret + + +class LazyTermMap: + """ + Establishes a term map for an expression that contains information about + terms that can be mapped onto each other when permuting target indices of + the expression. + """ + + def __init__(self, expr: ExprContainer): + self._expr = expr + # init all term container objects + self._terms: tuple[TermContainer, ...] = expr.terms + # {(perms, factor): {i: other_i}} + self._term_map: \ + dict[tuple[tuple[Permutation, ...], int], dict[int, int]] = {} + + def evaluate(self, antisymmetric_result_tensor: bool = True + ) -> dict: + """ + Fully evaluates the term map of the expression by probing all + possible permutations of target indices. + Due to an ambiguous definition of the symmetry by means of products of + permutation operators + (ijk -> kij can be obtained by applying P_{ij}P_{ik} or P_{ik}P_{jk}) + it might still be possible to encounter unevaluated entries at a + later point. + + Parameters + ---------- + antisymmetric_result_tensor: bool, optional + The result tensor is either represented by an AntiSymmetricTensor + (True) or by a SymmetricTensor (False). (default: True) + """ + from .sympy_objects import AntiSymmetricTensor, SymmetricTensor + + # if we put all indices in lower bra-ket sym is not important + if antisymmetric_result_tensor: + tensor = AntiSymmetricTensor("x", tuple(), self.target_indices) + else: + tensor = SymmetricTensor("x", tuple(), self.target_indices) + tensor = ExprContainer(tensor).terms[0] + for sym in tensor.symmetry().items(): + self[sym] + return self._term_map + + def __getitem__(self, symmetry: tuple[tuple[Permutation, ...], int] + ) -> dict[int, int]: + """ + Checks whether a given symmetry as already been evaluated and probes + the expression for the symmetry if this is not the case. + + Parameters + ---------- + symmetry : tuple + A tuple containing the permutations and the corresponding factor: + +1 to probe for symmetry (+ P_{pq}P_{rs}...) and + -1 to probe for antisymmetry (- P_{pq}P_{rs}...). + """ + # did we already compute the map for the desired symmetry? + if symmetry in self._term_map: + return self._term_map[symmetry] + # split the permutations according to their index space. + # invert the permutations in possible space combinations + # and check if we computed any of the partially or fully inverted + # symmetries + permutations, factor = symmetry + splitted = list( + PermutationProduct.split_in_separable_parts(permutations).items() + ) + # also check the sorted version before inverting + if not isinstance(permutations, PermutationProduct): + permutations = tuple(itertools.chain.from_iterable( + [val for _, val in sorted(splitted)] + )) + sym = (permutations, factor) + if sym in self._term_map: + return self._term_map[sym] + + invertable_subsets: list[int] = [ + i for i, (_, perms) in enumerate(splitted) if len(perms) > 1 + ] + for n_inverts in range(1, len(invertable_subsets)+1): + for to_invert in \ + itertools.combinations(invertable_subsets, n_inverts): + inv_perms: list[tuple[str, list[Permutation]]] = [] + for i, val in enumerate(splitted): + if i in to_invert: # invert the order of the permutations + inv_perms.append((val[0], val[1][::-1])) + else: + inv_perms.append(val) + inv_perms_tpl = tuple(itertools.chain.from_iterable( + [val for _, val in sorted(inv_perms)] + )) + # check if the inverted variant has been already computed + sym = (inv_perms_tpl, factor) + if sym in self._term_map: + return self._term_map[sym] + # could not find any variant in the term_map + # -> probe the expression for the original variant + assert isinstance(permutations, PermutationProduct) + return self.probe_symmetry(permutations, factor) + + @cached_property + def target_indices(self) -> tuple[Index, ...]: + """Returns the target indices of the expression.""" + + if self._expr.provided_target_idx is not None: + return self._expr.provided_target_idx + + # determine the target indices of each term and ensure all terms hold + # the same target indices + target = self._terms[0].target + if any(term.target != target for term in self._terms): + raise NotImplementedError("Can only create a term map for an " + "expression where each term is holding " + "the same target indices.") + return target + + @cached_member + def _prescan_terms(self) -> tuple[tuple[bool, list[int]], ...]: + """ + Prescan the terms of the expression collecting compatible terms that + might be mapped onto each other. + + Returns + ------- + tuple[bool, list] + First entry: Indicates whether the corresopnding terms have an + orbital energy denominator. + Second entry: The terms by their index. + """ + from .eri_orbenergy import EriOrbenergy + + filtered_terms = defaultdict(list) + for term_i, term in enumerate(self._terms): + # split the term in pref, orbital energy frac and remainder + term = EriOrbenergy(term) + # get the description of all objects in the remainder (eri) part + # don't include target indices in the description since thats + # what we want to probe the expr for (contracted permutations + # can be simplified, which is assumed to have happened before.) + eri_descriptions: tuple[str, ...] = tuple(sorted( + o.description(target_idx=None) + for o in term.eri.objects + )) + # space of contracted indices + idx_space = "".join(sorted( + s.space[0] + s.spin for s in term.eri.contracted + )) + # the number and length of brackets in the denominator + key = (eri_descriptions, term.denom_description(), idx_space) + filtered_terms[key].append(term_i) + # rearrange the term idx lists so the information whether they + # contain a denominator is directly available + # Also remove lists with a single entry... cant map them onto + # anything else anyway + return tuple( + (False, term_i_list) if key[1] is None else (True, term_i_list) + for key, term_i_list in filtered_terms.items() + if len(term_i_list) > 1 + ) + + def probe_symmetry(self, permutations: PermutationProduct, + sym_factor: int) -> dict: + """ + Probes which terms in the expression can be mapped onto each other + by applying the given symmetry. + + Parameters + ---------- + permutations : PermutationProduct + A prdocut of permutations of target indices of the expression. + sym_factor : int + Possible values: + +1 -> probe for symmetry (Term + P_{pq}P_{rs}... Term) + -1 -> probe for antisymmetry (Term - P_{pq}P_{rs}... Term) + + Returns + ------- + dict + Contains the index of terms which, when the provided permutations + are applied, become equal to other non-permuted terms. + key: The index of the permuted term. + value: The index of the term it can be mapped onto. + """ + from .reduce_expr import factor_eri_parts, factor_denom + from .simplify import simplify + + def simplify_with_denom(expr: ExprContainer) -> ExprContainer: + if expr.inner.is_number: # trivial + return expr + + factored = itertools.chain.from_iterable( + factor_denom(sub_e) for sub_e in factor_eri_parts(expr) + ) + ret = ExprContainer(0, **expr.assumptions) + for term in factored: + ret += term.factor() + return ret + + if sym_factor not in [1, -1]: + raise Inputerror(f"Invalid symmetry factor {sym_factor}. +-1 " + "is valid.") + + # check that the given permutations only contain target indices + target_indices = self.target_indices + if any(s not in target_indices + for s in itertools.chain.from_iterable(permutations)): + raise NotImplementedError("Found non target index in " + f"{permutations}. Target indices are " + f"{target_indices}.") + + map_contribution: dict[int, int] = {} + for has_denom, term_i_list in self._prescan_terms(): + # go through the terms and filter out terms that are symmetric or + # antisymmetric with respect to the given symmetry + relevant_terms: list[tuple[int, ExprContainer]] = [] + for term_i in term_i_list: + term: TermContainer = self._terms[term_i] + perm_term: ExprContainer = term.permute(*permutations) + # check that the permutations are valid + if perm_term.inner is S.Zero and term.inner is not S.Zero: + continue + # only look for the desired symmetry which is defined by + # sym_factor + if sym_factor == -1: # looking for antisym: P_pq X != -X + if Add(perm_term.inner, term.inner) is not S.Zero: + relevant_terms.append((term_i, perm_term)) + else: # looking for sym: P_pq X != X + if Add(perm_term.inner, -term.inner) is not S.Zero: + relevant_terms.append((term_i, perm_term)) + # choose a function for simplifying the sum/difference of 2 terms + # it might be neccessary to permute contracted indices to + # achieve equality of the 2 terms + simplify_terms = simplify_with_denom if has_denom else simplify + # now compare all relevant terms with each other + for term_i, perm_term in relevant_terms: + for other_term_i, _ in relevant_terms: + if term_i == other_term_i: # dont compare to itself + continue + # looking for antisym: X - (P_pq X) = X - X' + # P_pq X + (- X') = 0 + if sym_factor == -1: + sum = simplify_terms( + perm_term + self._terms[other_term_i] + ) + # looking for sym: X + (P_pq X) = X + X' + # P_pq X - X' = 0 + else: # +1 + sum = simplify_terms( + perm_term - self._terms[other_term_i] + ) + # was it possible to map the terms onto each other? + if sum.inner is S.Zero: + map_contribution[term_i] = other_term_i + # can break the loop: if we are assuming that the + # expression is completely simplified, it will not + # be possible to find another match for term_i + # (otherwise 2 other_term_i would have to be identical) + break + self._term_map[(tuple(permutations), sym_factor)] = map_contribution + return map_contribution diff --git a/build/lib/adcgen/sympy_objects.py b/build/lib/adcgen/sympy_objects.py new file mode 100644 index 0000000..abc8162 --- /dev/null +++ b/build/lib/adcgen/sympy_objects.py @@ -0,0 +1,399 @@ +from collections.abc import Sequence + +from sympy.physics.secondquant import ( + _sort_anticommuting_fermions, ViolationOfPauliPrinciple +) +from sympy.core.logic import fuzzy_not +from sympy.core.function import DefinedFunction +from sympy import sympify, Tuple, Symbol, S, Number, Expr + +from .misc import Inputerror +from .indices import Index, _is_index_tuple, sort_idx_canonical + + +class SymbolicTensor(Expr): + """Base class for symbolic tensors.""" + + is_commutative = True + + @property + def symbol(self) -> Symbol: + """Returns the symbol of the tensor.""" + symbol = self.args[0] + assert isinstance(symbol, Symbol) + return symbol + + @property + def name(self) -> str: + """Returns the name of the tensor.""" + return self.symbol.name + + @property + def idx(self) -> tuple[Index, ...]: + """Returns all indices of the tensor.""" + raise NotImplementedError("'idx' not implemented on " + f"{self.__class__.__name__}") + + +class AntiSymmetricTensor(SymbolicTensor): + """ + Represents antisymmetric tensors + d^{pq}_{rs} = - d^{qp}_{rs} = - d^{pq}_{sr} = d^{qp}_{sr}. + Based on the implementation in 'sympy.physics.secondquant'. + + Parameters + ---------- + name : str | Symbol + The name of the tensor. + upper : Sequence[Index] | Tuple + The upper indices of the tensor. + lower : Sequence[Index] | Tuple + The lower indices of the tensor. + bra_ket_sym : int | Number, optional + The bra-ket symmetry of the tensor: + - 0 no bra-ket-symmetry (d^{i}_{j} != d^{j}_{i}) + - 1 bra-ket symmetry (d^{i}_{j} = d^{j}_{i}) + - -1 bra-ket antisymmetry (d^{i}_{j} = - d^{j}_{i}) + (default: 0) + """ + + def __new__(cls, name: str | Symbol, upper: Sequence[Index] | Tuple, + lower: Sequence[Index] | Tuple, bra_ket_sym: int | Number = 0): + # sort the upper and lower indices + try: + upper_sorted, sign_u = _sort_anticommuting_fermions( + upper, key=sort_idx_canonical + ) + lower_sorted, sign_l = _sort_anticommuting_fermions( + lower, key=sort_idx_canonical + ) + except ViolationOfPauliPrinciple: + return S.Zero + # additionally account for the bra ket symmetry + # add the Index check for subs to work correctly + bra_ket_sym_imported = sympify(bra_ket_sym) + if bra_ket_sym_imported is not S.Zero and \ + all(isinstance(s, Index) for s in upper_sorted+lower_sorted): + if bra_ket_sym_imported not in [S.One, S.NegativeOne]: + raise Inputerror("Invalid bra ket symmetry given " + f"{bra_ket_sym}. Valid are 0, 1 or -1.") + if cls._need_bra_ket_swap(upper_sorted, lower_sorted): + upper_sorted, lower_sorted = lower_sorted, upper_sorted # swap + if bra_ket_sym_imported is S.NegativeOne: # add another -1 + sign_u += 1 + # import all quantities + name_imported = sympify(name) + upper_imported = Tuple(*upper_sorted) + lower_imported = Tuple(*lower_sorted) + # attach -1 if necessary + if (sign_u + sign_l) % 2: + return - super().__new__( + cls, name_imported, upper_imported, lower_imported, + bra_ket_sym_imported + ) + else: + return super().__new__( + cls, name_imported, upper_imported, lower_imported, + bra_ket_sym_imported + ) + + @classmethod + def _need_bra_ket_swap(cls, upper: Sequence, lower: Sequence) -> bool: + if len(upper) != len(lower): + raise NotImplementedError("Bra Ket symmetry only implemented " + "for tensors with an equal amount " + "of upper and lower indices.") + # Build the sort key for each index and collect the first, second, ... + # entries of the keys + # -> Compare each component of the sort keys individually and abort + # if it is clear, that we need or don't need to swap + # Assumes that upper indices should have the smaller keys. + upper_sort_keys = (sort_idx_canonical(s) for s in upper) + lower_sort_keys = (sort_idx_canonical(s) for s in lower) + for upper_keys, lower_keys in \ + zip(zip(*upper_sort_keys), zip(*lower_sort_keys)): + if lower_keys < upper_keys: + return True + elif upper_keys < lower_keys: + return False + return False + + def _latex(self, printer) -> str: + upper = self.upper.args + lower = self.lower.args + assert _is_index_tuple(upper) and _is_index_tuple(lower) + return "{%s^{%s}_{%s}}" % ( + self.symbol, + "".join(i._latex(printer) for i in upper), + "".join(i._latex(printer) for i in lower) + ) + + def __str__(self): + return f"{self.symbol}({self.upper},{self.lower})" + + @property + def upper(self) -> Tuple: + """Returns the upper indices of the tensor.""" + upper = self.args[1] + assert isinstance(upper, Tuple) + return upper + + @property + def lower(self) -> Tuple: + """Returns the lower indices of the tensor.""" + lower = self.args[2] + assert isinstance(lower, Tuple) + return lower + + @property + def bra_ket_sym(self) -> Number: + """Returns the bra-ket symmetry of the tensor.""" + braketsym = self.args[3] + assert isinstance(braketsym, Number) + return braketsym + + def add_bra_ket_sym(self, bra_ket_sym: int | Number + ) -> 'AntiSymmetricTensor': + """ + Adds bra-ket symmetry to the tensor if none has been set yet. + + Parameters + ---------- + bra_ket_sym : int + The bra-ket symmetry to set (0, 1 and -1 are valid.) + """ + + if bra_ket_sym == self.bra_ket_sym: + return self + elif self.bra_ket_sym is S.Zero: + return self.__class__(self.symbol, self.upper, self.lower, + bra_ket_sym) + else: + raise Inputerror("bra ket symmetry already set. The original " + "indices are no longer available. Can not apply " + "any other bra ket sym.") + + @property + def idx(self) -> tuple[Index, ...]: + """ + Returns all indices of the tensor. The upper indices are listed before + the lower indices. + """ + idx = self.upper.args + self.lower.args + assert _is_index_tuple(idx) + return idx + + +class Amplitude(AntiSymmetricTensor): + """ + Represents antisymmetric Amplitudes. + """ + + @property + def idx(self) -> tuple[Index, ...]: + """ + Returns all indices of the amplitude. The lower indices are + listed before the upper indices. + """ + idx = self.lower.args + self.upper.args + assert _is_index_tuple(idx) + return idx + + +class SymmetricTensor(AntiSymmetricTensor): + """ + Represents symmetric tensors + d^{pq}_{rs} = d^{qp}_{rs} = d^{pq}_{sr} = d^{qp}_{sr}. + + Parameters + ---------- + name : str | Symbol + The name of the tensor. + upper : Sequence[Index] + The upper indices of the tensor. + lower : Sequence[Index] + The lower indices of the tensor. + bra_ket_sym : int, optional + The bra-ket symmetry of the tensor: + - 0 no bra-ket-symmetry (d^{i}_{j} != d^{j}_{i}) + - 1 bra-ket symmetry (d^{i}_{j} = d^{j}_{i}) + - -1 bra-ket antisymmetry (d^{i}_{j} = - d^{j}_{i}) + (default: 0) + """ + + def __new__(cls, name: str | Symbol, upper: Sequence[Index], + lower: Sequence[Index], bra_ket_sym: int = 0): + # sort upper and lower. No need to track the number of swaps + upper = sorted(upper, key=sort_idx_canonical) + lower = sorted(lower, key=sort_idx_canonical) + # account for the bra ket symmetry + # add the Index check for subs to work correctly + negative_sign = False + bra_ket_sym_imported = sympify(bra_ket_sym) + if bra_ket_sym_imported is not S.Zero and \ + all(isinstance(s, Index) for s in upper+lower): + if bra_ket_sym_imported not in [S.One, S.NegativeOne]: + raise Inputerror("Invalid bra ket symmetry given " + f"{bra_ket_sym}. Valid are 0, 1 or -1.") + if cls._need_bra_ket_swap(upper, lower): + upper, lower = lower, upper # swap + if bra_ket_sym_imported is S.NegativeOne: + negative_sign = True + # import all quantities to sympy + name_imported = sympify(name) + upper_imported, lower_imported = Tuple(*upper), Tuple(*lower) + # attach -1 if necessary + if negative_sign: + return - super(AntiSymmetricTensor, cls).__new__( + cls, name_imported, upper_imported, lower_imported, + bra_ket_sym_imported + ) + else: + return super(AntiSymmetricTensor, cls).__new__( + cls, name_imported, upper_imported, lower_imported, + bra_ket_sym_imported + ) + + +class NonSymmetricTensor(SymbolicTensor): + """ + Represents tensors that do not have any symmetry. + + Parameters + ---------- + name : str | Symbol + The name of the tensor. + indices : Sequence[Index] | Tuple + The indices of the tensor. + """ + + def __new__(cls, name: str | Symbol, indices: Sequence[Index] | Tuple): + symbol_imported = sympify(name) + indices_imported = Tuple(*indices) + return super().__new__(cls, symbol_imported, indices_imported) + + def _latex(self, printer) -> str: + indices = self.indices.args + assert _is_index_tuple(indices) + return "{%s_{%s}}" % ( + self.symbol, + "".join(i._latex(printer) for i in indices) + ) + + def __str__(self): + return "%s%s" % self.args + + @property + def indices(self) -> Tuple: + """Returns the indices of the tensor.""" + indices = self.args[1] + assert isinstance(indices, Tuple) + return indices + + @property + def idx(self) -> tuple[Index, ...]: + """Returns the indices of the tensor.""" + idx = self.args[1].args + assert _is_index_tuple(idx) + return idx + + +class KroneckerDelta(DefinedFunction): + """ + Represents a Kronecker delta. + Based on the implementation in 'sympy.functions.special.tensor_functions'. + """ + + @classmethod + def eval(cls, i: Expr, j: Expr) -> Expr | None: # type: ignore[override] + """ + Evaluates the KroneckerDelta. Adapted from sympy to also cover Spin. + """ + # This is needed for subs with simultaneous=True + if not isinstance(i, Index) or not isinstance(j, Index): + return None + + diff = i - j + if diff.is_zero: + return S.One + elif fuzzy_not(diff.is_zero): + return S.Zero + + spi, spj = i.space[0], j.space[0] + valid_spaces = ["o", "v", "g", "c", "r"] + assert spi in valid_spaces and spj in valid_spaces + if spi != "g" and spj != "g" and spi != spj: # delta_ov / delta_vo + return S.Zero + spi, spj = i.spin, j.spin + assert spi in ["", "a", "b"] and spj in ["", "a", "b"] + if spi and spj and spi != spj: # delta_ab / delta_ba + return S.Zero + # sort the indices of the delta + if i != min(i, j, key=sort_idx_canonical): + return cls(j, i) + return None + + def _eval_power(self, exp) -> Expr: # type: ignore[override] + # we don't want exponents > 1 on deltas! + if exp.is_positive: + return self + elif exp.is_negative and exp is not S.NegativeOne: + return S.One / self + + def _latex(self, printer) -> str: + return ( + "\\delta_{" + " ".join(s._latex(printer) for s in self.idx) + "}" + ) + + @property + def idx(self) -> tuple[Index, Index]: + """Returns the indices of the Kronecker delta.""" + idx = self.args + assert _is_index_tuple(idx) and len(idx) == 2 + return idx + + @property + def preferred_and_killable(self) -> tuple[Index, Index] | None: + """ + Returns the preferred (first) and killable (second) index of the + kronecker delta. The preferred index contains at least as much + information as the killable index. Therefore, 'evaluate_deltas' + will always try to keep the preferred index in the expression. + """ + i, j = self.args + assert isinstance(i, Index) and isinstance(j, Index) + space1, spin1 = i.space[0], i.spin + space2, spin2 = j.space[0], j.spin + # ensure we have no unexpected space and spin + assert ( + space1 in ["o", "v", "g", "c", "r"] + and space2 in ["o", "v", "g", "c", "r"] + ) + assert spin1 in ["", "a", "b"] and spin2 in ["", "a", "b"] + + if spin1 == spin2: # nn / aa / bb -> equal spin information + # oo / vv / cc / gg / og / vg / cg / rr + # RI indices will always end up here + if space1 == space2 or space2 == "g": + return (i, j) + else: # go / gv / gc + return (j, i) + elif spin2: # na / nb -> 2 holds more spin information + # oo / vv / cc / gg / go / gv / gc + if space1 == space2 or space1 == "g": + return (j, i) + else: # og / vg / cg -> 1 holds more space information + return None + else: # an / bn -> 1 holds more spin information + # oo / vv / cc / gg / og / vg / cg + if space1 == space2 or space2 == "g": + return (i, j) + else: # go / gv / gc -> 2 holds more space information + return None + + @property + def indices_contain_equal_information(self) -> bool: + """Whether both indices contain the same amount of information.""" + i, j = self.args + assert isinstance(i, Index) and isinstance(j, Index) + return i.space == j.space and i.spin == j.spin diff --git a/build/lib/adcgen/tensor_names.json b/build/lib/adcgen/tensor_names.json new file mode 100644 index 0000000..57309ad --- /dev/null +++ b/build/lib/adcgen/tensor_names.json @@ -0,0 +1,15 @@ +{ + "eri": "V", + "coulomb": "v", + "fock": "f", + "operator": "d", + "gs_amplitude": "t", + "gs_density": "p", + "left_adc_amplitude": "X", + "right_adc_amplitude": "Y", + "orb_energy": "e", + "sym_orb_denom": "D", + "ri_sym": "B", + "ri_asym_factor": "C", + "ri_asym_eri": "W" +} \ No newline at end of file diff --git a/build/lib/adcgen/tensor_names.py b/build/lib/adcgen/tensor_names.py new file mode 100644 index 0000000..c9302cc --- /dev/null +++ b/build/lib/adcgen/tensor_names.py @@ -0,0 +1,225 @@ +from dataclasses import dataclass, fields +from pathlib import Path +from typing import TYPE_CHECKING +import json + +from sympy import Symbol + +from .misc import Singleton + +if TYPE_CHECKING: + from .expression import ExprContainer + + +_config_file = "tensor_names.json" + + +# NOTE: Currently it is only possible to modify the values through +# 'tensor_names.json'. It is not possible to adjust them by modifying the +# class attributes. This is less flexible but ensures that throughout +# a run of the program consistent names are used. Especially, +# due to the caching weird behaviour might be possible +# if names are changed in the middle of a run. +@dataclass(slots=True, frozen=True) +class TensorNames(metaclass=Singleton): + """ + Singleton class that that is used to define the names of tensors + used throughout the code. The names can be changed by modifying + 'tensor_names.json'. By default, the following names are used, where + the attributes storing the names are given in brackets: + - antisymmetric ERI in physicist notation (eri): V + - Coulomb integrals in chemist notation (coulomb): v + - The fock matrix (fock): f + - The arbitrary N-particle operator matrix (operator): d + - Ground state amplitudes (gs_amplitude): t + Additionally, an integer representing the perturbation theoretical order + and/or 'cc' to represent complex conjugate amplitudes are appended + to the name. + - The ground state density matrix (gs_density): p + Additionally, an integer representing the perturbation theoretical order + is appended to the name. + - ADC amplitudes belonging to the bra (left) state (left_adc_amplitude): X + - ADC amplitudes belonging to the ket (right) state + (right_adc_amplitude): X + - Orbital energies (orb_energy): e + - Symbolic orbital energy denominators [(e_i - e_a)^-1 -> D^{i}_{a}] + (sym_orb_denom): D + """ + eri: str = "V" + coulomb: str = "v" + ri_sym: str = "B" + ri_asym_factor: str = "C" + ri_asym_eri: str = "W" + fock: str = "f" + operator: str = "d" + gs_amplitude: str = "t" + gs_density: str = "p" + left_adc_amplitude: str = "X" + right_adc_amplitude: str = "Y" + orb_energy: str = "e" + sym_orb_denom: str = "D" + + @staticmethod + def _from_config() -> 'TensorNames': + """ + Construct the TensorNames instance with values from the config file + 'tensor_names.json'. + """ + config_file = Path(__file__).parent.resolve() / _config_file + tensor_names: dict[str, str] = json.load(open(config_file, "r")) + return TensorNames(**tensor_names) + + @staticmethod + def defaults() -> dict[str, str]: + """Returns the default values of all fields.""" + ret = {} + for field in fields(TensorNames): + val = field.default + assert isinstance(val, str) + ret[field.name] = val + return ret + + def map_default_name(self, name: str) -> str: + """ + Takes a tensor name, checks whether it corresponds to any of the + default names and returns the currently used name. + """ + # split the name in base and extension for t-amplitudes and + # ground state densities + if (split_name := _split_default_t_amplitude(name)) is not None: + _, ext = split_name + return self.gs_amplitude + ext + elif (split_name := _split_default_gs_density(name)) is not None: + _, ext = split_name + return self.gs_density + ext + + for field in fields(self): + if field.default == name: + return getattr(self, field.name) + return name # found not matching default name -> return input + + def rename_tensors(self, expr: "ExprContainer") -> "ExprContainer": + """ + Renames all tensors in the expression form their default names to the + currently configured names. Note that only the name of the tensors + is changed, while their type (Amplitude, AntiSymmetricTensor, ...) + remains the same. + + Parameters + ---------- + expr: Expr + The expression using the default tensor names. + """ + from .expression import ExprContainer + + assert isinstance(expr, ExprContainer) + for field in fields(self): + default = field.default + assert isinstance(default, str) + new = getattr(self, field.name) + if default == new: # nothing to do + continue + # find the necessary substitutions + if field.name == "gs_amplitude": # special case for t_amplitudes + subs: list[tuple[str, str]] = [] + for sym in expr.inner.atoms(Symbol): + assert isinstance(sym, Symbol) + split_name = _split_default_t_amplitude(sym.name) + if split_name is None: + continue + subs.append((sym.name, new + split_name[1])) + elif field.name == "gs_density": # and for gs densities + subs = [] + for sym in expr.inner.atoms(Symbol): + assert isinstance(sym, Symbol) + split_name = _split_default_gs_density(sym.name) + if split_name is None: + continue + subs.append((sym.name, new + split_name[1])) + else: + subs = [(default, new)] + + for old, new in subs: + expr.rename_tensor(old, new) + return expr + + +# init the TensorNames instance and overwrite the defaults with +# values from the config file +tensor_names = TensorNames._from_config() + + +def is_t_amplitude(name: str) -> bool: + """ + Checks whether the tensor name belongs to a ground state amplitude. + Possible patterns for names are (assuming the default gs_amplitude name t): + - t + - tcc (complex conjugate amplitude) + - tn (where n is any positive integer) + - tncc + """ + base, order = split_t_amplitude_name(name) + order = order.replace("c", "") + if order: + return base == tensor_names.gs_amplitude and order.isnumeric() + else: + return base == tensor_names.gs_amplitude + + +def split_t_amplitude_name(name: str) -> tuple[str, str]: + """ + Split the name of a ground state amplitude in base and extension, e.g., + 't3cc' -> ('t', '3cc'). + """ + n = len(tensor_names.gs_amplitude) + return name[:n], name[n:] + + +def is_adc_amplitude(name: str) -> bool: + """Checks whether the tensor name belongs to a ADC amplitude.""" + return (name == tensor_names.left_adc_amplitude or + name == tensor_names.right_adc_amplitude) + + +def is_gs_density(name: str) -> bool: + """ + Checks whether the tensor name belongs to the ground state density matrix + """ + base, order = split_gs_density_name(name) + if order: + return base == tensor_names.gs_density and order.isnumeric() + else: + return base == tensor_names.gs_density + + +def split_gs_density_name(name: str) -> tuple[str, str]: + """Splits the name of a ground state density matrix in base and order.""" + n = len(tensor_names.gs_density) + return name[:n], name[n:] + + +def _split_default_t_amplitude(name: str) -> tuple[str, str] | None: + """ + Checks whether the name belongs to a t amplitude with the default name + and return the base and extension of the name + """ + default = tensor_names.defaults()["gs_amplitude"] + base, ext = name[:len(default)], name[len(default):] + if base != default: + return None + order = ext.replace("c", "") + if order and not order.isnumeric(): + return None + return base, ext + + +def _split_default_gs_density(name: str) -> tuple[str, str] | None: + """ + Checks whether the name belongs to a ground stat density with the default + name and returns the base and extension of the name. + """ + default = tensor_names.defaults()["gs_density"] + base, order = name[:len(default)], name[len(default):] + if base != default or (order and not order.isnumeric()): + return None + return base, order diff --git a/tests/contraction_test.py b/tests/contraction_test.py index f67f8b1..f3986de 100644 --- a/tests/contraction_test.py +++ b/tests/contraction_test.py @@ -42,15 +42,15 @@ def test_scaling(self): target_indices = (i, j) contr = Contraction(indices, names, target_indices) scaling = contr.scaling - comp = ScalingComponent(3, 0, 0, 3, 0) - mem = ScalingComponent(2, 0, 0, 2, 0) + comp = ScalingComponent(3, 0, 0, 3, 0, 0) + mem = ScalingComponent(2, 0, 0, 2, 0, 0) assert scaling.computational == comp assert scaling.memory == mem assert scaling == Scaling(comp, mem) def test_sizes(self): # test the automatic evaluation of the size of the general space - sizes = {"occ": 1, "virt": 2, "core": 3} + sizes = {"occ": 1, "virt": 2, "core": 3, "ri": 0} res = Sizes.from_dict(sizes) sizes["general"] = 6 assert sizes == asdict(res) @@ -59,17 +59,20 @@ def test_sizes(self): assert sizes == asdict(res) def test_evalute_costs(self): - sizes = {"occ": 3, "virt": 5, "core": 2, "general": 7} + sizes = {"occ": 3, "virt": 5, "core": 2, "general": 7, "ri": 8} sizes = Sizes.from_dict(sizes) - comp = ScalingComponent(42, 1, 2, 3, 4) - mem = ScalingComponent(42, 4, 3, 2, 1) + comp = ScalingComponent(42, 1, 2, 3, 4, 5) + mem = ScalingComponent(42, 5, 4, 3, 2, 1) scaling = Scaling(comp, mem) - assert comp.evaluate_costs(sizes) == 75600 - assert mem.evaluate_costs(sizes) == 5402250 - assert scaling.evaluate_costs(sizes) == (75600, 5402250) + print(comp.evaluate_costs(sizes)) + print(mem.evaluate_costs(sizes)) + print(scaling.evaluate_costs(sizes)) + assert comp.evaluate_costs(sizes) == 2477260800 + assert mem.evaluate_costs(sizes) == 9075780000 + assert scaling.evaluate_costs(sizes) == (2477260800, 9075780000) # ensure that zero sized spaces are ignored - sizes = {"occ": 3, "virt": 5, "core": 0} # general == 8 + sizes = {"occ": 3, "virt": 5, "core": 0, "ri": 0} # general == 8 sizes = Sizes.from_dict(sizes) assert comp.evaluate_costs(sizes) == 5400 - comp = ScalingComponent(42, 0, 1, 2, 3) + comp = ScalingComponent(42, 0, 1, 2, 3, 0) assert comp.evaluate_costs(sizes) == 45 diff --git a/tests/indices_test.py b/tests/indices_test.py index 40827e0..7468b37 100644 --- a/tests/indices_test.py +++ b/tests/indices_test.py @@ -8,10 +8,15 @@ def test_get_indices(self): assert idx.get_indices("ijk") == idx.get_indices("ijk") assert idx.get_indices("ijk", "aba") == idx.get_indices("ijk", "aba") assert idx.get_indices("a", "a") != idx.get_indices("a", "b") + assert idx.get_indices("PQ") == idx.get_indices("PQ") res = idx.get_indices("Ij", "ba") I, j = res[("core", "b")].pop(), res[("occ", "a")].pop() assert I.space == "core" and I.spin == "b" assert j.space == "occ" and j.spin == "a" + res = idx.get_indices("Pa") + P, a = res[("ri", "")].pop(), res[("virt", "")].pop() + assert P.space == "ri" and P.spin == "" + assert a.space == "virt" and a.spin == "" def test_get_generic_indices(self): # ensure that generic indices don't overlap diff --git a/tests/reference_data/generate_data.py b/tests/reference_data/generate_data.py index fefae77..8e5c77e 100644 --- a/tests/reference_data/generate_data.py +++ b/tests/reference_data/generate_data.py @@ -10,6 +10,8 @@ from adcgen.secular_matrix import SecularMatrix from adcgen.simplify import simplify, remove_tensor from adcgen.tensor_names import tensor_names +from adcgen.resolution_of_identity import apply_resolution_of_identity +from adcgen.spatial_orbitals import transform_to_spatial_orbitals from adcgen import sort_expr as sort import itertools @@ -360,6 +362,49 @@ def gen_adc_properties_trans_moment(self): dump["real_transition_dm"][block] = str(expr) write_json(results, outfile) + def gen_ri_gs_energies(self): + results: dict = {} + outfile = "ri_gs_energy.json" + + variations = itertools.product(['mp', 're'], [0, 1, 2], ['r', 'u'], + ['sym', 'asym']) + + for variant, order, restriction, symmetry in variations: + if variant not in results: + results[variant] = {} + if order not in results[variant]: + results[variant][order] = {} + if restriction not in results[variant][order]: + results[variant][order][restriction] = {} + gs = self.gs[variant] + gs_energy = ExprContainer(gs.energy(order), real=True) + restricted = restriction == 'r' + symmetric = symmetry == 'sym' + gs_energy = transform_to_spatial_orbitals(gs_energy, '', '', + restricted=restricted) + gs_energy = apply_resolution_of_identity(gs_energy, symmetric) + gs_energy.substitute_contracted() + results[variant][order][restriction][symmetry] = str(gs_energy) + write_json(results, outfile) + + def gen_spatial_gs_energies(self): + outfile = "spatial_gs_energy.json" + + results: dict = {} + for variant in ['mp', 're']: + results[variant] = {} + gs = self.gs[variant] + for order in [0, 1, 2]: + results[variant][order] = {} + for restriction in ['r', 'u']: + energy = ExprContainer(gs.energy(order), real=True) + restr = restriction == 'r' + energy = transform_to_spatial_orbitals(energy, '', '', + restricted=restr) + energy.substitute_contracted() + results[variant][order][restriction] = str(energy) + write_json(results, outfile) + def write_json(data, filename): json.dump(data, open(filename, 'w'), indent=2) diff --git a/tests/reference_data/gs_energy.json b/tests/reference_data/gs_energy.json index aa6e000..f348233 100644 --- a/tests/reference_data/gs_energy.json +++ b/tests/reference_data/gs_energy.json @@ -2,11 +2,11 @@ "mp": { "0": "{f^{i}_{i}}", "1": "- \\frac{{V^{ij}_{ij}}}{2}", - "2": "- \\frac{{V^{ij}_{ab}} {t1^{ab}_{ij}}}{4}" + "2": "- \\frac{{t1^{ab}_{ij}} {V^{ij}_{ab}}}{4}" }, "re": { "0": "- \\frac{{V^{ij}_{ij}}}{2} + {f^{i}_{i}}", "1": "0", - "2": "- \\frac{{V^{ij}_{ab}} {t1^{ab}_{ij}}}{4}" + "2": "- \\frac{{t1^{ab}_{ij}} {V^{ij}_{ab}}}{4}" } } \ No newline at end of file diff --git a/tests/reference_data/isr_precursor.json b/tests/reference_data/isr_precursor.json index 2dfbd4f..c0466f7 100644 --- a/tests/reference_data/isr_precursor.json +++ b/tests/reference_data/isr_precursor.json @@ -6,12 +6,12 @@ "ket": "\\left\\{{a^\\dagger_{a}} a_{i}\\right\\}" }, "1": { - "bra": "- \\frac{{t1cc^{a3b3}_{i3j3}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{a3} a_{b3} {a^\\dagger_{i3}} {a^\\dagger_{j3}}\\right\\}}{4}", - "ket": "\\frac{{t1^{a4b4}_{j4k4}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{a4}} {a^\\dagger_{b4}} a_{j4} a_{k4}\\right\\}}{4}" + "bra": "- \\frac{{t1cc^{b210c210}_{k240l240}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{b210} a_{c210} {a^\\dagger_{k240}} {a^\\dagger_{l240}}\\right\\}}{4}", + "ket": "\\frac{{t1^{g377h377}_{k432l432}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{g377}} {a^\\dagger_{h377}} a_{k432} a_{l432}\\right\\}}{4}" }, "2": { - "bra": "- {t2^{a}_{i}} + {t2cc^{a5}_{k5}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{a5} {a^\\dagger_{k5}}\\right\\} - \\frac{{t2cc^{a5b5}_{k5l5}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{a5} a_{b5} {a^\\dagger_{k5}} {a^\\dagger_{l5}}\\right\\}}{4} - \\frac{{t2cc^{a5b5c5}_{k5l5m5}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{a5} a_{b5} a_{c5} {a^\\dagger_{k5}} {a^\\dagger_{l5}} {a^\\dagger_{m5}}\\right\\}}{36} - \\frac{{t2cc^{a5b5c5d5}_{k5l5m5n5}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{a5} a_{b5} a_{c5} a_{d5} {a^\\dagger_{k5}} {a^\\dagger_{l5}} {a^\\dagger_{m5}} {a^\\dagger_{n5}}\\right\\}}{576}", - "ket": "{t2^{e9}_{l10}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{e9}} a_{l10}\\right\\} + \\frac{{t2^{e9f9}_{l10m10}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{e9}} {a^\\dagger_{f9}} a_{l10} a_{m10}\\right\\}}{4} - \\frac{{t2^{e9f9g9}_{l10m10n10}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{e9}} {a^\\dagger_{f9}} {a^\\dagger_{g9}} a_{l10} a_{m10} a_{n10}\\right\\}}{36} + \\frac{{t2^{e9f9g9h9}_{l10m10n10o10}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{e9}} {a^\\dagger_{f9}} {a^\\dagger_{g9}} {a^\\dagger_{h9}} a_{l10} a_{m10} a_{n10} a_{o10}\\right\\}}{576} - {t2cc^{a}_{i}}" + "bra": "- {t2^{a}_{i}} + {t2cc^{d242}_{j277}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{d242} {a^\\dagger_{j277}}\\right\\} - \\frac{{t2cc^{d242e242}_{j277k277}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{d242} a_{e242} {a^\\dagger_{j277}} {a^\\dagger_{k277}}\\right\\}}{4} - \\frac{{t2cc^{d242e242f242}_{j277k277l277}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{d242} a_{e242} a_{f242} {a^\\dagger_{j277}} {a^\\dagger_{k277}} {a^\\dagger_{l277}}\\right\\}}{36} - \\frac{{t2cc^{d242e242f242g242}_{j277k277l277m277}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{d242} a_{e242} a_{f242} a_{g242} {a^\\dagger_{j277}} {a^\\dagger_{k277}} {a^\\dagger_{l277}} {a^\\dagger_{m277}}\\right\\}}{576}", + "ket": "{t2^{g378}_{l433}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{g378}} a_{l433}\\right\\} + \\frac{{t2^{g378h378}_{l433m433}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{g378}} {a^\\dagger_{h378}} a_{l433} a_{m433}\\right\\}}{4} - \\frac{{t2^{g378h378a379}_{l433m433n433}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{a379}} {a^\\dagger_{g378}} {a^\\dagger_{h378}} a_{l433} a_{m433} a_{n433}\\right\\}}{36} + \\frac{{t2^{g378h378a379b379}_{l433m433n433o433}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{a379}} {a^\\dagger_{b379}} {a^\\dagger_{g378}} {a^\\dagger_{h378}} a_{l433} a_{m433} a_{n433} a_{o433}\\right\\}}{576} - {t2cc^{a}_{i}}" } }, "pphh": { @@ -20,12 +20,12 @@ "ket": "\\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\}" }, "1": { - "bra": "- {t1^{ab}_{ij}} + \\frac{{t1cc^{g14h14}_{l16m16}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{g14} a_{h14} {a^\\dagger_{l16}} {a^\\dagger_{m16}}\\right\\}}{4}", - "ket": "\\frac{{t1^{b24c24}_{j27k27}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{b24}} {a^\\dagger_{c24}} a_{j27} a_{k27}\\right\\}}{4} - {t1cc^{ab}_{ij}}" + "bra": "- {t1^{ab}_{ij}} + \\frac{{t1cc^{a384b384}_{l439m439}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{a384} a_{b384} {a^\\dagger_{l439}} {a^\\dagger_{m439}}\\right\\}}{4}", + "ket": "\\frac{{t1^{d393e393}_{j450k450}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{d393}} {a^\\dagger_{e393}} a_{j450} a_{k450}\\right\\}}{4} - {t1cc^{ab}_{ij}}" }, "2": { - "bra": "- {t2cc^{e33}_{o37}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{e33} {a^\\dagger_{o37}}\\right\\} - \\frac{{t2cc^{e33f33}_{o37i38}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{e33} a_{f33} {a^\\dagger_{i38}} {a^\\dagger_{o37}}\\right\\}}{4} + \\frac{{t2cc^{e33f33g33}_{o37i38j38}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{e33} a_{f33} a_{g33} {a^\\dagger_{i38}} {a^\\dagger_{j38}} {a^\\dagger_{o37}}\\right\\}}{36} - \\frac{{t2cc^{e33f33g33h33}_{o37i38j38k38}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{e33} a_{f33} a_{g33} a_{h33} {a^\\dagger_{i38}} {a^\\dagger_{j38}} {a^\\dagger_{k38}} {a^\\dagger_{o37}}\\right\\}}{576} - \\left(\\frac{{t1^{ab}_{ij}} {t1cc^{g34h34}_{k39l39}} \\left\\{a_{g34} a_{h34} {a^\\dagger_{k39}} {a^\\dagger_{l39}}\\right\\}}{4} + {t2^{ab}_{ij}}\\right) - \\left({t2^{a}_{i}} \\left\\{a_{b} {a^\\dagger_{j}}\\right\\} - {t2^{a}_{j}} \\left\\{a_{b} {a^\\dagger_{i}}\\right\\} - {t2^{b}_{i}} \\left\\{a_{a} {a^\\dagger_{j}}\\right\\} + {t2^{b}_{j}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\}\\right)", - "ket": "{t2^{h84}_{m96}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{h84}} a_{m96}\\right\\} - \\frac{{t2^{h84a85}_{m96n96}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{a85}} {a^\\dagger_{h84}} a_{m96} a_{n96}\\right\\}}{4} - \\frac{{t2^{h84a85b85}_{m96n96o96}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{a85}} {a^\\dagger_{b85}} {a^\\dagger_{h84}} a_{m96} a_{n96} a_{o96}\\right\\}}{36} + \\frac{{t2^{h84a85b85c85}_{m96n96o96i97}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{a85}} {a^\\dagger_{b85}} {a^\\dagger_{c85}} {a^\\dagger_{h84}} a_{i97} a_{m96} a_{n96} a_{o96}\\right\\}}{576} - \\left(\\frac{{t1^{b87c87}_{j99k99}} {t1cc^{ab}_{ij}} \\left\\{{a^\\dagger_{b87}} {a^\\dagger_{c87}} a_{j99} a_{k99}\\right\\}}{4} + {t2cc^{ab}_{ij}}\\right) - \\left(- {t2cc^{a}_{i}} \\left\\{{a^\\dagger_{b}} a_{j}\\right\\} + {t2cc^{a}_{j}} \\left\\{{a^\\dagger_{b}} a_{i}\\right\\} + {t2cc^{b}_{i}} \\left\\{{a^\\dagger_{a}} a_{j}\\right\\} - {t2cc^{b}_{j}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\}\\right)" + "bra": "- {t2cc^{g402}_{o460}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{g402} {a^\\dagger_{o460}}\\right\\} - \\frac{{t2cc^{g402h402}_{o460i461}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{g402} a_{h402} {a^\\dagger_{i461}} {a^\\dagger_{o460}}\\right\\}}{4} + \\frac{{t2cc^{g402h402a403}_{o460i461j461}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{a403} a_{g402} a_{h402} {a^\\dagger_{i461}} {a^\\dagger_{j461}} {a^\\dagger_{o460}}\\right\\}}{36} - \\frac{{t2cc^{g402h402a403b403}_{o460i461j461k461}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{a403} a_{b403} a_{g402} a_{h402} {a^\\dagger_{i461}} {a^\\dagger_{j461}} {a^\\dagger_{k461}} {a^\\dagger_{o460}}\\right\\}}{576} - \\left(\\frac{{t1^{ab}_{ij}} {t1cc^{a404b404}_{k462l462}} \\left\\{a_{a404} a_{b404} {a^\\dagger_{k462}} {a^\\dagger_{l462}}\\right\\}}{4} + {t2^{ab}_{ij}}\\right) - \\left({t2^{a}_{i}} \\left\\{a_{b} {a^\\dagger_{j}}\\right\\} - {t2^{a}_{j}} \\left\\{a_{b} {a^\\dagger_{i}}\\right\\} - {t2^{b}_{i}} \\left\\{a_{a} {a^\\dagger_{j}}\\right\\} + {t2^{b}_{j}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\}\\right)", + "ket": "{t2^{b454}_{m519}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{b454}} a_{m519}\\right\\} + \\frac{{t2^{b454c454}_{m519n519}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{b454}} {a^\\dagger_{c454}} a_{m519} a_{n519}\\right\\}}{4} - \\frac{{t2^{b454c454d454}_{m519n519o519}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{b454}} {a^\\dagger_{c454}} {a^\\dagger_{d454}} a_{m519} a_{n519} a_{o519}\\right\\}}{36} - \\frac{{t2^{b454c454d454e454}_{m519n519o519i520}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{b454}} {a^\\dagger_{c454}} {a^\\dagger_{d454}} {a^\\dagger_{e454}} a_{i520} a_{m519} a_{n519} a_{o519}\\right\\}}{576} - \\left(\\frac{{t1^{d456e456}_{j522k522}} {t1cc^{ab}_{ij}} \\left\\{{a^\\dagger_{d456}} {a^\\dagger_{e456}} a_{j522} a_{k522}\\right\\}}{4} + {t2cc^{ab}_{ij}}\\right) - \\left(- {t2cc^{a}_{i}} \\left\\{{a^\\dagger_{b}} a_{j}\\right\\} + {t2cc^{a}_{j}} \\left\\{{a^\\dagger_{b}} a_{i}\\right\\} + {t2cc^{b}_{i}} \\left\\{{a^\\dagger_{a}} a_{j}\\right\\} - {t2cc^{b}_{j}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\}\\right)" } } } diff --git a/tests/reference_data/isr_precursor_overlap.json b/tests/reference_data/isr_precursor_overlap.json index 7775a74..50e823e 100644 --- a/tests/reference_data/isr_precursor_overlap.json +++ b/tests/reference_data/isr_precursor_overlap.json @@ -3,7 +3,7 @@ "ph,ph": { "0": "\\delta_{a b} \\delta_{i j}", "1": "0", - "2": "- \\frac{\\delta_{a b} {t1^{a3a4}_{ii3}} {t1cc^{a3a4}_{ji3}}}{2} - \\frac{\\delta_{i j} {t1^{aa3}_{i3j3}} {t1cc^{ba3}_{i3j3}}}{2} + {t1^{aa3}_{ii3}} {t1cc^{ba3}_{ji3}}" + "2": "- \\frac{\\delta_{a b} {t1^{e205f205}_{im234}} {t1cc^{e205f205}_{jm234}}}{2} - \\frac{\\delta_{i j} {t1^{ae205}_{m234n234}} {t1cc^{be205}_{m234n234}}}{2} + {t1^{ae205}_{im234}} {t1cc^{be205}_{jm234}}" } } } \ No newline at end of file diff --git a/tests/reference_data/properties_expectation_value.json b/tests/reference_data/properties_expectation_value.json index 5bc8e06..3075bcd 100644 --- a/tests/reference_data/properties_expectation_value.json +++ b/tests/reference_data/properties_expectation_value.json @@ -18,8 +18,8 @@ } }, "2": { - "expectation_value": "- {X^{a}_{i}} {Y^{a}_{j}} {d^{b}_{i}} {t2cc^{b}_{j}} - {X^{a}_{i}} {Y^{a}_{j}} {d^{c}_{d}} {t1^{bd}_{ik}} {t1cc^{bc}_{jk}} - {X^{a}_{i}} {Y^{a}_{j}} {d^{j}_{b}} {t2^{b}_{i}} - {X^{a}_{i}} {Y^{a}_{j}} {d^{j}_{i}} - \\frac{{X^{a}_{i}} {Y^{a}_{j}} {d^{j}_{l}} {t1^{bc}_{ik}} {t1cc^{bc}_{kl}}}{4} + \\frac{{X^{a}_{i}} {Y^{a}_{j}} {d^{k}_{i}} {t1^{bc}_{kl}} {t1cc^{bc}_{jl}}}{4} + \\frac{{X^{a}_{i}} {Y^{a}_{j}} {d^{l}_{k}} {t1^{bc}_{il}} {t1cc^{bc}_{jk}}}{2} + {X^{a}_{i}} {Y^{b}_{i}} {d^{a}_{b}} - \\frac{{X^{a}_{i}} {Y^{b}_{i}} {d^{a}_{c}} {t1^{cd}_{jk}} {t1cc^{bd}_{jk}}}{4} - {X^{a}_{i}} {Y^{b}_{i}} {d^{a}_{j}} {t2cc^{b}_{j}} - \\frac{{X^{a}_{i}} {Y^{b}_{i}} {d^{c}_{b}} {t1^{ad}_{jk}} {t1cc^{cd}_{jk}}}{4} - \\frac{{X^{a}_{i}} {Y^{b}_{i}} {d^{c}_{d}} {t1^{ad}_{jk}} {t1cc^{bc}_{jk}}}{2} - {X^{a}_{i}} {Y^{b}_{i}} {d^{j}_{b}} {t2^{a}_{j}} - {X^{a}_{i}} {Y^{b}_{i}} {d^{l}_{j}} {t1^{ac}_{kl}} {t1cc^{bc}_{jk}} + \\frac{{X^{a}_{i}} {Y^{b}_{j}} {d^{a}_{c}} {t1^{cd}_{ik}} {t1cc^{bd}_{jk}}}{2} + {X^{a}_{i}} {Y^{b}_{j}} {d^{c}_{d}} {t1^{ad}_{ik}} {t1cc^{bc}_{jk}} - \\frac{{X^{a}_{i}} {Y^{b}_{j}} {d^{d}_{b}} {t1^{ac}_{ik}} {t1cc^{cd}_{jk}}}{2} + \\frac{{X^{a}_{i}} {Y^{b}_{j}} {d^{j}_{l}} {t1^{ac}_{ik}} {t1cc^{bc}_{kl}}}{2} - \\frac{{X^{a}_{i}} {Y^{b}_{j}} {d^{k}_{i}} {t1^{ac}_{kl}} {t1cc^{bc}_{jl}}}{2} - {X^{a}_{i}} {Y^{b}_{j}} {d^{l}_{k}} {t1^{ac}_{il}} {t1cc^{bc}_{jk}} + 2 {X^{a}_{i}} {Y^{ab}_{ij}} {d^{c}_{k}} {t1cc^{bc}_{jk}} - 2 {X^{a}_{i}} {Y^{ab}_{ij}} {d^{j}_{b}} + {X^{a}_{i}} {Y^{ab}_{jk}} {d^{c}_{i}} {t1cc^{bc}_{jk}} + {X^{a}_{i}} {Y^{bc}_{ij}} {d^{a}_{k}} {t1cc^{bc}_{jk}} + 2 {X^{ab}_{ij}} {Y^{a}_{j}} {d^{b}_{i}} - 2 {X^{ab}_{ij}} {Y^{a}_{j}} {d^{k}_{c}} {t1^{bc}_{ik}} - {X^{ab}_{ij}} {Y^{b}_{k}} {d^{k}_{c}} {t1^{ac}_{ij}} - {X^{ab}_{ij}} {Y^{c}_{j}} {d^{k}_{c}} {t1^{ab}_{ik}} + 2 {X^{ab}_{ij}} {Y^{ab}_{jk}} {d^{k}_{i}} + 2 {X^{ab}_{ij}} {Y^{ac}_{ij}} {d^{b}_{c}}", - "real_symmetric_state_expectation_value": "- {Y^{a}_{i}} {Y^{a}_{j}} {d^{c}_{d}} {t1^{bc}_{jk}} {t1^{bd}_{ik}} - 2 {Y^{a}_{i}} {Y^{a}_{j}} {d^{i}_{b}} {t2^{b}_{j}} - {Y^{a}_{i}} {Y^{a}_{j}} {d^{i}_{j}} - \\frac{{Y^{a}_{i}} {Y^{a}_{j}} {d^{j}_{l}} {t1^{bc}_{ik}} {t1^{bc}_{kl}}}{2} + \\frac{{Y^{a}_{i}} {Y^{a}_{j}} {d^{k}_{l}} {t1^{bc}_{il}} {t1^{bc}_{jk}}}{2} + {Y^{a}_{i}} {Y^{b}_{i}} {d^{a}_{b}} - \\frac{{Y^{a}_{i}} {Y^{b}_{i}} {d^{a}_{c}} {t1^{bd}_{jk}} {t1^{cd}_{jk}}}{2} - \\frac{{Y^{a}_{i}} {Y^{b}_{i}} {d^{c}_{d}} {t1^{ad}_{jk}} {t1^{bc}_{jk}}}{2} - 2 {Y^{a}_{i}} {Y^{b}_{i}} {d^{j}_{a}} {t2^{b}_{j}} - {Y^{a}_{i}} {Y^{b}_{i}} {d^{j}_{l}} {t1^{ac}_{kl}} {t1^{bc}_{jk}} + {Y^{a}_{i}} {Y^{b}_{j}} {d^{a}_{c}} {t1^{bd}_{jk}} {t1^{cd}_{ik}} + {Y^{a}_{i}} {Y^{b}_{j}} {d^{c}_{d}} {t1^{ad}_{ik}} {t1^{bc}_{jk}} + {Y^{a}_{i}} {Y^{b}_{j}} {d^{j}_{l}} {t1^{ac}_{ik}} {t1^{bc}_{kl}} - {Y^{a}_{i}} {Y^{b}_{j}} {d^{k}_{l}} {t1^{ac}_{il}} {t1^{bc}_{jk}} - 4 {Y^{a}_{i}} {Y^{ab}_{ij}} {d^{j}_{b}} + 2 {Y^{a}_{i}} {Y^{ab}_{jk}} {d^{i}_{c}} {t1^{bc}_{jk}} + 2 {Y^{a}_{i}} {Y^{bc}_{ij}} {d^{k}_{a}} {t1^{bc}_{jk}} - 4 {Y^{a}_{j}} {Y^{ab}_{ij}} {d^{k}_{c}} {t1^{bc}_{ik}} + 2 {Y^{ab}_{ij}} {Y^{ab}_{jk}} {d^{i}_{k}} + 2 {Y^{ab}_{ij}} {Y^{ac}_{ij}} {d^{b}_{c}}", + "expectation_value": "- \\frac{{X^{a}_{i}} {Y^{a}_{j}} {t1^{bc}_{ik}} {t1cc^{bc}_{kl}} {d^{j}_{l}}}{4} + \\frac{{X^{a}_{i}} {Y^{a}_{j}} {t1^{bc}_{il}} {t1cc^{bc}_{jk}} {d^{l}_{k}}}{2} + \\frac{{X^{a}_{i}} {Y^{a}_{j}} {t1^{bc}_{kl}} {t1cc^{bc}_{jl}} {d^{k}_{i}}}{4} - {X^{a}_{i}} {Y^{a}_{j}} {t1^{bd}_{ik}} {t1cc^{bc}_{jk}} {d^{c}_{d}} - {X^{a}_{i}} {Y^{a}_{j}} {t2^{b}_{i}} {d^{j}_{b}} - {X^{a}_{i}} {Y^{a}_{j}} {t2cc^{b}_{j}} {d^{b}_{i}} - {X^{a}_{i}} {Y^{a}_{j}} {d^{j}_{i}} + \\frac{{X^{a}_{i}} {Y^{b}_{i}} {t1^{ac}_{jk}} {t1cc^{cd}_{jk}} {d^{d}_{b}}}{4} - {X^{a}_{i}} {Y^{b}_{i}} {t1^{ac}_{kl}} {t1cc^{bc}_{jk}} {d^{l}_{j}} - \\frac{{X^{a}_{i}} {Y^{b}_{i}} {t1^{ad}_{jk}} {t1cc^{bc}_{jk}} {d^{c}_{d}}}{2} - \\frac{{X^{a}_{i}} {Y^{b}_{i}} {t1^{cd}_{jk}} {t1cc^{bd}_{jk}} {d^{a}_{c}}}{4} - {X^{a}_{i}} {Y^{b}_{i}} {t2^{a}_{j}} {d^{j}_{b}} - {X^{a}_{i}} {Y^{b}_{i}} {t2cc^{b}_{j}} {d^{a}_{j}} + {X^{a}_{i}} {Y^{b}_{i}} {d^{a}_{b}} + \\frac{{X^{a}_{i}} {Y^{b}_{j}} {t1^{ac}_{ik}} {t1cc^{bc}_{kl}} {d^{j}_{l}}}{2} - \\frac{{X^{a}_{i}} {Y^{b}_{j}} {t1^{ac}_{ik}} {t1cc^{cd}_{jk}} {d^{d}_{b}}}{2} - {X^{a}_{i}} {Y^{b}_{j}} {t1^{ac}_{il}} {t1cc^{bc}_{jk}} {d^{l}_{k}} - \\frac{{X^{a}_{i}} {Y^{b}_{j}} {t1^{ac}_{kl}} {t1cc^{bc}_{jl}} {d^{k}_{i}}}{2} + {X^{a}_{i}} {Y^{b}_{j}} {t1^{ad}_{ik}} {t1cc^{bc}_{jk}} {d^{c}_{d}} + \\frac{{X^{a}_{i}} {Y^{b}_{j}} {t1^{cd}_{ik}} {t1cc^{bd}_{jk}} {d^{a}_{c}}}{2} + 2 {X^{a}_{i}} {Y^{ab}_{ij}} {t1cc^{bc}_{jk}} {d^{c}_{k}} - 2 {X^{a}_{i}} {Y^{ab}_{ij}} {d^{j}_{b}} + {X^{a}_{i}} {Y^{ab}_{jk}} {t1cc^{bc}_{jk}} {d^{c}_{i}} + {X^{a}_{i}} {Y^{bc}_{ij}} {t1cc^{bc}_{jk}} {d^{a}_{k}} - 2 {X^{ab}_{ij}} {Y^{a}_{j}} {t1^{bc}_{ik}} {d^{k}_{c}} + 2 {X^{ab}_{ij}} {Y^{a}_{j}} {d^{b}_{i}} - {X^{ab}_{ij}} {Y^{b}_{k}} {t1^{ac}_{ij}} {d^{k}_{c}} - {X^{ab}_{ij}} {Y^{c}_{j}} {t1^{ab}_{ik}} {d^{k}_{c}} + 2 {X^{ab}_{ij}} {Y^{ab}_{jk}} {d^{k}_{i}} + 2 {X^{ab}_{ij}} {Y^{ac}_{ij}} {d^{b}_{c}}", + "real_symmetric_state_expectation_value": "- \\frac{{Y^{a}_{i}} {Y^{a}_{j}} {t1^{bc}_{ik}} {t1^{bc}_{kl}} {d^{j}_{l}}}{2} + \\frac{{Y^{a}_{i}} {Y^{a}_{j}} {t1^{bc}_{il}} {t1^{bc}_{jk}} {d^{k}_{l}}}{2} - {Y^{a}_{i}} {Y^{a}_{j}} {t1^{bc}_{jk}} {t1^{bd}_{ik}} {d^{c}_{d}} - 2 {Y^{a}_{i}} {Y^{a}_{j}} {t2^{b}_{i}} {d^{j}_{b}} - {Y^{a}_{i}} {Y^{a}_{j}} {d^{i}_{j}} - {Y^{a}_{i}} {Y^{b}_{i}} {t1^{ac}_{kl}} {t1^{bc}_{jk}} {d^{j}_{l}} - \\frac{{Y^{a}_{i}} {Y^{b}_{i}} {t1^{ad}_{jk}} {t1^{bc}_{jk}} {d^{c}_{d}}}{2} - \\frac{{Y^{a}_{i}} {Y^{b}_{i}} {t1^{bd}_{jk}} {t1^{cd}_{jk}} {d^{a}_{c}}}{2} - 2 {Y^{a}_{i}} {Y^{b}_{i}} {t2^{a}_{j}} {d^{j}_{b}} + {Y^{a}_{i}} {Y^{b}_{i}} {d^{a}_{b}} + {Y^{a}_{i}} {Y^{b}_{j}} {t1^{ac}_{ik}} {t1^{bc}_{kl}} {d^{j}_{l}} - {Y^{a}_{i}} {Y^{b}_{j}} {t1^{ac}_{il}} {t1^{bc}_{jk}} {d^{k}_{l}} + {Y^{a}_{i}} {Y^{b}_{j}} {t1^{ad}_{ik}} {t1^{bc}_{jk}} {d^{c}_{d}} + {Y^{a}_{i}} {Y^{b}_{j}} {t1^{bd}_{jk}} {t1^{cd}_{ik}} {d^{a}_{c}} - 4 {Y^{a}_{i}} {Y^{ab}_{ij}} {d^{j}_{b}} + 2 {Y^{a}_{i}} {Y^{ab}_{jk}} {t1^{bc}_{jk}} {d^{i}_{c}} + 2 {Y^{a}_{i}} {Y^{bc}_{ij}} {t1^{bc}_{jk}} {d^{k}_{a}} - 4 {Y^{a}_{j}} {Y^{ab}_{ij}} {t1^{bc}_{ik}} {d^{k}_{c}} + 2 {Y^{ab}_{ij}} {Y^{ab}_{jk}} {d^{i}_{k}} + 2 {Y^{ab}_{ij}} {Y^{ac}_{ij}} {d^{b}_{c}}", "real_symmetric_state_dm": { "vv": "{Y^{a}_{i}} {Y^{b}_{i}} - \\frac{{Y^{a}_{i}} {Y^{c}_{i}} {p2^{b}_{c}}}{2} + \\frac{{Y^{a}_{i}} {Y^{c}_{j}} {t1^{bd}_{ik}} {t1^{cd}_{jk}}}{2} - \\frac{{Y^{b}_{i}} {Y^{c}_{i}} {p2^{a}_{c}}}{2} + \\frac{{Y^{b}_{i}} {Y^{c}_{j}} {t1^{ad}_{ik}} {t1^{cd}_{jk}}}{2} - {Y^{c}_{i}} {Y^{c}_{j}} {t1^{ad}_{ik}} {t1^{bd}_{jk}} - \\frac{{Y^{c}_{i}} {Y^{d}_{i}} {t1^{ac}_{jk}} {t1^{bd}_{jk}}}{2} + {Y^{c}_{i}} {Y^{d}_{j}} {t1^{ac}_{ik}} {t1^{bd}_{jk}} + 2 {Y^{ac}_{ij}} {Y^{bc}_{ij}}", "oo": "- {Y^{a}_{i}} {Y^{a}_{j}} - \\frac{{Y^{a}_{i}} {Y^{a}_{k}} {p2^{j}_{k}}}{2} - \\frac{{Y^{a}_{j}} {Y^{a}_{k}} {p2^{i}_{k}}}{2} + \\frac{{Y^{a}_{k}} {Y^{a}_{l}} {t1^{bc}_{ik}} {t1^{bc}_{jl}}}{2} - {Y^{a}_{k}} {Y^{b}_{l}} {t1^{ac}_{ik}} {t1^{bc}_{jl}} + \\frac{{Y^{a}_{l}} {Y^{b}_{i}} {t1^{ac}_{kl}} {t1^{bc}_{jk}}}{2} + \\frac{{Y^{a}_{l}} {Y^{b}_{j}} {t1^{ac}_{kl}} {t1^{bc}_{ik}}}{2} + {Y^{a}_{l}} {Y^{b}_{l}} {t1^{ac}_{ik}} {t1^{bc}_{jk}} - 2 {Y^{ab}_{ik}} {Y^{ab}_{jk}}", diff --git a/tests/reference_data/properties_trans_moment.json b/tests/reference_data/properties_trans_moment.json index 2ee7a6f..a4e7329 100644 --- a/tests/reference_data/properties_trans_moment.json +++ b/tests/reference_data/properties_trans_moment.json @@ -9,16 +9,16 @@ } }, "1": { - "expectation_value": "{X^{a}_{i}} {d^{a}_{i}} - {X^{a}_{i}} {d^{j}_{b}} {t1^{ab}_{ij}}", - "real_expectation_value": "{X^{a}_{i}} {d^{a}_{i}} - {X^{a}_{i}} {d^{j}_{b}} {t1^{ab}_{ij}}", + "expectation_value": "- {X^{a}_{i}} {t1^{ab}_{ij}} {d^{j}_{b}} + {X^{a}_{i}} {d^{a}_{i}}", + "real_expectation_value": "- {X^{a}_{i}} {t1^{ab}_{ij}} {d^{j}_{b}} + {X^{a}_{i}} {d^{a}_{i}}", "real_transition_dm": { "vo": "{X^{a}_{i}}", "ov": "- {X^{b}_{j}} {t1^{ab}_{ij}}" } }, "2": { - "expectation_value": "{X^{a}_{i}} {d^{a}_{b}} {t2^{b}_{i}} + {X^{a}_{i}} {d^{a}_{i}} + \\frac{{X^{a}_{i}} {d^{a}_{k}} {t1^{bc}_{ij}} {t1cc^{bc}_{jk}}}{4} + \\frac{{X^{a}_{i}} {d^{c}_{i}} {t1^{ab}_{jk}} {t1cc^{bc}_{jk}}}{4} + \\frac{{X^{a}_{i}} {d^{c}_{k}} {t1^{ab}_{ij}} {t1cc^{bc}_{jk}}}{2} - {X^{a}_{i}} {d^{j}_{b}} {t1^{ab}_{ij}} - {X^{a}_{i}} {d^{j}_{b}} {t2^{ab}_{ij}} - {X^{a}_{i}} {d^{j}_{i}} {t2^{a}_{j}} - {X^{ab}_{ij}} {d^{a}_{c}} {t1^{bc}_{ij}} - {X^{ab}_{ij}} {d^{k}_{j}} {t1^{ab}_{ik}}", - "real_expectation_value": "{X^{a}_{i}} {d^{a}_{b}} {t2^{b}_{i}} + {X^{a}_{i}} {d^{a}_{i}} + \\frac{{X^{a}_{i}} {d^{a}_{k}} {t1^{bc}_{ij}} {t1^{bc}_{jk}}}{4} + \\frac{{X^{a}_{i}} {d^{c}_{i}} {t1^{ab}_{jk}} {t1^{bc}_{jk}}}{4} + \\frac{{X^{a}_{i}} {d^{c}_{k}} {t1^{ab}_{ij}} {t1^{bc}_{jk}}}{2} - {X^{a}_{i}} {d^{j}_{b}} {t1^{ab}_{ij}} - {X^{a}_{i}} {d^{j}_{b}} {t2^{ab}_{ij}} - {X^{a}_{i}} {d^{j}_{i}} {t2^{a}_{j}} - {X^{ab}_{ij}} {d^{a}_{c}} {t1^{bc}_{ij}} - {X^{ab}_{ij}} {d^{k}_{j}} {t1^{ab}_{ik}}", + "expectation_value": "\\frac{{X^{a}_{i}} {t1^{ab}_{ij}} {t1cc^{bc}_{jk}} {d^{c}_{k}}}{2} - {X^{a}_{i}} {t1^{ab}_{ij}} {d^{j}_{b}} + \\frac{{X^{a}_{i}} {t1^{ab}_{jk}} {t1cc^{bc}_{jk}} {d^{c}_{i}}}{4} + \\frac{{X^{a}_{i}} {t1^{bc}_{ij}} {t1cc^{bc}_{jk}} {d^{a}_{k}}}{4} - {X^{a}_{i}} {t2^{a}_{j}} {d^{j}_{i}} + {X^{a}_{i}} {t2^{b}_{i}} {d^{a}_{b}} - {X^{a}_{i}} {t2^{ab}_{ij}} {d^{j}_{b}} + {X^{a}_{i}} {d^{a}_{i}} - {X^{ab}_{ij}} {t1^{ab}_{ik}} {d^{k}_{j}} - {X^{ab}_{ij}} {t1^{bc}_{ij}} {d^{a}_{c}}", + "real_expectation_value": "\\frac{{X^{a}_{i}} {t1^{ab}_{ij}} {t1^{bc}_{jk}} {d^{c}_{k}}}{2} - {X^{a}_{i}} {t1^{ab}_{ij}} {d^{j}_{b}} + \\frac{{X^{a}_{i}} {t1^{ab}_{jk}} {t1^{bc}_{jk}} {d^{c}_{i}}}{4} + \\frac{{X^{a}_{i}} {t1^{bc}_{ij}} {t1^{bc}_{jk}} {d^{a}_{k}}}{4} - {X^{a}_{i}} {t2^{a}_{j}} {d^{j}_{i}} + {X^{a}_{i}} {t2^{b}_{i}} {d^{a}_{b}} - {X^{a}_{i}} {t2^{ab}_{ij}} {d^{j}_{b}} + {X^{a}_{i}} {d^{a}_{i}} - {X^{ab}_{ij}} {t1^{ab}_{ik}} {d^{k}_{j}} - {X^{ab}_{ij}} {t1^{bc}_{ij}} {d^{a}_{c}}", "real_transition_dm": { "vo": "{X^{a}_{i}} + \\frac{{X^{a}_{j}} {p2^{i}_{j}}}{2} - \\frac{{X^{b}_{i}} {p2^{a}_{b}}}{2} + \\frac{{X^{c}_{k}} {t1^{ab}_{ij}} {t1^{bc}_{jk}}}{2}", "vv": "{X^{a}_{i}} {t2^{b}_{i}} + {X^{ac}_{ij}} {t1^{bc}_{ij}}", diff --git a/tests/reference_data/ri_gs_energy.json b/tests/reference_data/ri_gs_energy.json new file mode 100644 index 0000000..c69bc94 --- /dev/null +++ b/tests/reference_data/ri_gs_energy.json @@ -0,0 +1,66 @@ +{ + "mp": { + "0": { + "r": { + "sym": "2 {f^{i_{\\alpha}}_{i_{\\alpha}}}", + "asym": "2 {f^{i_{\\alpha}}_{i_{\\alpha}}}" + }, + "u": { + "sym": "{f^{i_{\\alpha}}_{i_{\\alpha}}} + {f^{i_{\\beta}}_{i_{\\beta}}}", + "asym": "{f^{i_{\\alpha}}_{i_{\\alpha}}} + {f^{i_{\\beta}}_{i_{\\beta}}}" + } + }, + "1": { + "r": { + "sym": "- 2 {B^{P_{\\alpha}}_{i_{\\alpha}i_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}j_{\\alpha}}} + {B^{P_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}}^{2}", + "asym": "- 2 {C^{P_{\\alpha}}_{i_{\\alpha}i_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}j_{\\alpha}}} + {C^{P_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {G^{P_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}}" + }, + "u": { + "sym": "- {B^{P_{\\alpha}}_{i_{\\alpha}i_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\beta}i_{\\beta}}} - \\frac{{B^{P_{\\alpha}}_{i_{\\alpha}i_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}j_{\\alpha}}}}{2} + \\frac{{B^{P_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}}^{2}}{2} - \\frac{{B^{P_{\\alpha}}_{i_{\\beta}i_{\\beta}}} {B^{P_{\\alpha}}_{j_{\\beta}j_{\\beta}}}}{2} + \\frac{{B^{P_{\\alpha}}_{i_{\\beta}j_{\\beta}}}^{2}}{2}", + "asym": "- {C^{P_{\\alpha}}_{i_{\\alpha}i_{\\alpha}}} {G^{P_{\\alpha}}_{i_{\\beta}i_{\\beta}}} - \\frac{{C^{P_{\\alpha}}_{i_{\\alpha}i_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}j_{\\alpha}}}}{2} + \\frac{{C^{P_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {G^{P_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}}}{2} - \\frac{{C^{P_{\\alpha}}_{i_{\\beta}i_{\\beta}}} {G^{P_{\\alpha}}_{j_{\\beta}j_{\\beta}}}}{2} + \\frac{{C^{P_{\\alpha}}_{i_{\\beta}j_{\\beta}}} {G^{P_{\\alpha}}_{i_{\\beta}j_{\\beta}}}}{2}" + } + }, + "2": { + "r": { + "sym": "- \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{2} + \\frac{3 {t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{2}", + "asym": "- \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{2} + \\frac{3 {t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{2}" + }, + "u": { + "sym": "- {t1^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} {B^{P_{\\alpha}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\beta}b_{\\beta}}} {B^{P_{\\alpha}}_{j_{\\beta}a_{\\beta}}}}{4}", + "asym": "- {t1^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} {G^{P_{\\alpha}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\beta}b_{\\beta}}} {G^{P_{\\alpha}}_{j_{\\beta}a_{\\beta}}}}{4}" + } + } + }, + "re": { + "0": { + "r": { + "sym": "2 {f^{i_{\\alpha}}_{i_{\\alpha}}} - 2 {B^{P_{\\alpha}}_{i_{\\alpha}i_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}j_{\\alpha}}} + {B^{P_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}}^{2}", + "asym": "2 {f^{i_{\\alpha}}_{i_{\\alpha}}} - 2 {C^{P_{\\alpha}}_{i_{\\alpha}i_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}j_{\\alpha}}} + {C^{P_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {G^{P_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}}" + }, + "u": { + "sym": "{f^{i_{\\alpha}}_{i_{\\alpha}}} + {f^{i_{\\beta}}_{i_{\\beta}}} - {B^{P_{\\alpha}}_{i_{\\alpha}i_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\beta}i_{\\beta}}} - \\frac{{B^{P_{\\alpha}}_{i_{\\alpha}i_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}j_{\\alpha}}}}{2} + \\frac{{B^{P_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}}^{2}}{2} - \\frac{{B^{P_{\\alpha}}_{i_{\\beta}i_{\\beta}}} {B^{P_{\\alpha}}_{j_{\\beta}j_{\\beta}}}}{2} + \\frac{{B^{P_{\\alpha}}_{i_{\\beta}j_{\\beta}}}^{2}}{2}", + "asym": "{f^{i_{\\alpha}}_{i_{\\alpha}}} + {f^{i_{\\beta}}_{i_{\\beta}}} - {C^{P_{\\alpha}}_{i_{\\alpha}i_{\\alpha}}} {G^{P_{\\alpha}}_{i_{\\beta}i_{\\beta}}} - \\frac{{C^{P_{\\alpha}}_{i_{\\alpha}i_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}j_{\\alpha}}}}{2} + \\frac{{C^{P_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {G^{P_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}}}{2} - \\frac{{C^{P_{\\alpha}}_{i_{\\beta}i_{\\beta}}} {G^{P_{\\alpha}}_{j_{\\beta}j_{\\beta}}}}{2} + \\frac{{C^{P_{\\alpha}}_{i_{\\beta}j_{\\beta}}} {G^{P_{\\alpha}}_{i_{\\beta}j_{\\beta}}}}{2}" + } + }, + "1": { + "r": { + "sym": "0", + "asym": "0" + }, + "u": { + "sym": "0", + "asym": "0" + } + }, + "2": { + "r": { + "sym": "- \\frac{3 {t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{2} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{2}", + "asym": "- \\frac{3 {t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{2} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{2}" + }, + "u": { + "sym": "- {t1^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} {B^{P_{\\alpha}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\beta}b_{\\beta}}} {B^{P_{\\alpha}}_{j_{\\beta}a_{\\beta}}}}{4}", + "asym": "- {t1^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} {G^{P_{\\alpha}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\beta}b_{\\beta}}} {G^{P_{\\alpha}}_{j_{\\beta}a_{\\beta}}}}{4}" + } + } + } +} \ No newline at end of file diff --git a/tests/reference_data/secular_matrix.json b/tests/reference_data/secular_matrix.json index caf79c4..2faadcf 100644 --- a/tests/reference_data/secular_matrix.json +++ b/tests/reference_data/secular_matrix.json @@ -44,8 +44,8 @@ "vv": "- \\frac{\\delta_{a b} {t1^{cd}_{ik}} {t2eri3^{jk}_{cd}}}{8} + \\frac{\\delta_{a b} {t1^{cd}_{ik}} {t2eri4_{jkdc}}}{2} - \\frac{\\delta_{a b} {t1^{cd}_{jk}} {t2eri3^{ik}_{cd}}}{8} + \\frac{\\delta_{a b} {t1^{cd}_{jk}} {t2eri4_{ikdc}}}{2} - \\delta_{a b} {t2^{c}_{k}} {V^{ik}_{jc}} - \\delta_{a b} {t2^{c}_{k}} {V^{jk}_{ic}} + \\frac{\\delta_{a b} {t2^{cd}_{ik}} {V^{jk}_{cd}}}{4} + \\frac{\\delta_{a b} {t2^{cd}_{jk}} {V^{ik}_{cd}}}{4} - \\delta_{a b} {V^{ic}_{jd}} {p2^{c}_{d}} - \\delta_{a b} {V^{il}_{jk}} {p2^{k}_{l}}" }, "real_factored_cvs": { - "none": "\\frac{{t1^{ac}_{kl}} {t1^{bd}_{kl}} {V^{Ic}_{Jd}}}{2} - {t2^{a}_{k}} {V^{Ib}_{kJ}} - {t2^{b}_{k}} {V^{Ja}_{kI}} + \\frac{{V^{Ib}_{Jc}} {p2^{a}_{c}}}{2} + \\frac{{V^{Ic}_{Ja}} {p2^{b}_{c}}}{2} - {V^{kJ}_{lI}} {t2sq^{ka}_{lb}}", - "vv": "\\delta_{a b} {t2^{c}_{k}} {V^{Ic}_{kJ}} + \\delta_{a b} {t2^{c}_{k}} {V^{Jc}_{kI}} - \\delta_{a b} {V^{Ic}_{Jd}} {p2^{c}_{d}} - \\delta_{a b} {V^{kJ}_{lI}} {p2^{k}_{l}}", + "none": "\\frac{{t1^{ac}_{kl}} {t1^{bd}_{kl}} {V^{Ic}_{Jd}}}{2} - {t2^{a}_{k}} {V^{kJ}_{Ib}} - {t2^{b}_{k}} {V^{kI}_{Ja}} + \\frac{{V^{Ib}_{Jc}} {p2^{a}_{c}}}{2} + \\frac{{V^{Ic}_{Ja}} {p2^{b}_{c}}}{2} - {V^{kJ}_{lI}} {t2sq^{ka}_{lb}}", + "vv": "\\delta_{a b} {t2^{c}_{k}} {V^{kI}_{Jc}} + \\delta_{a b} {t2^{c}_{k}} {V^{kJ}_{Ic}} - \\delta_{a b} {V^{Ic}_{Jd}} {p2^{c}_{d}} - \\delta_{a b} {V^{kJ}_{lI}} {p2^{k}_{l}}", "cc": "- \\frac{\\delta_{I J} {t1^{ac}_{kl}} {t2eri5^{kl}_{bc}}}{8} + \\frac{\\delta_{I J} {t1^{ac}_{kl}} {t2eri4_{klcb}}}{2} - \\frac{\\delta_{I J} {t1^{bc}_{kl}} {t2eri5^{kl}_{ac}}}{8} + \\frac{\\delta_{I J} {t1^{bc}_{kl}} {t2eri4_{klca}}}{2} - \\delta_{I J} {t2^{c}_{k}} {V^{ka}_{bc}} - \\delta_{I J} {t2^{c}_{k}} {V^{kb}_{ac}} + \\frac{\\delta_{I J} {t2^{ac}_{kl}} {V^{kl}_{bc}}}{4} + \\frac{\\delta_{I J} {t2^{bc}_{kl}} {V^{kl}_{ac}}}{4} + \\delta_{I J} {V^{ad}_{bc}} {p2^{c}_{d}} + \\delta_{I J} {V^{ka}_{lb}} {p2^{k}_{l}}" } } diff --git a/tests/reference_data/spatial_gs_energy.json b/tests/reference_data/spatial_gs_energy.json new file mode 100644 index 0000000..6fecefa --- /dev/null +++ b/tests/reference_data/spatial_gs_energy.json @@ -0,0 +1,30 @@ +{ + "mp": { + "0": { + "r": "2 {f^{i_{\\alpha}}_{i_{\\alpha}}}", + "u": "{f^{i_{\\alpha}}_{i_{\\alpha}}} + {f^{i_{\\beta}}_{i_{\\beta}}}" + }, + "1": { + "r": "- 2 {v^{i_{\\alpha}i_{\\alpha}}_{j_{\\alpha}j_{\\alpha}}} + {v^{i_{\\alpha}j_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}}", + "u": "- {v^{i_{\\alpha}i_{\\alpha}}_{i_{\\beta}i_{\\beta}}} - \\frac{{v^{i_{\\alpha}i_{\\alpha}}_{j_{\\alpha}j_{\\alpha}}}}{2} + \\frac{{v^{i_{\\alpha}j_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}}}{2} - \\frac{{v^{i_{\\beta}i_{\\beta}}_{j_{\\beta}j_{\\beta}}}}{2} + \\frac{{v^{i_{\\beta}j_{\\beta}}_{i_{\\beta}j_{\\beta}}}}{2}" + }, + "2": { + "r": "- \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}a_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{2} + \\frac{3 {t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}b_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{2}", + "u": "- {t1^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {v^{i_{\\alpha}a_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}a_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}b_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {v^{i_{\\beta}a_{\\beta}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {v^{i_{\\beta}b_{\\beta}}_{j_{\\beta}a_{\\beta}}}}{4}" + } + }, + "re": { + "0": { + "r": "2 {f^{i_{\\alpha}}_{i_{\\alpha}}} - 2 {v^{i_{\\alpha}i_{\\alpha}}_{j_{\\alpha}j_{\\alpha}}} + {v^{i_{\\alpha}j_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}}", + "u": "{f^{i_{\\alpha}}_{i_{\\alpha}}} + {f^{i_{\\beta}}_{i_{\\beta}}} - {v^{i_{\\alpha}i_{\\alpha}}_{i_{\\beta}i_{\\beta}}} - \\frac{{v^{i_{\\alpha}i_{\\alpha}}_{j_{\\alpha}j_{\\alpha}}}}{2} + \\frac{{v^{i_{\\alpha}j_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}}}{2} - \\frac{{v^{i_{\\beta}i_{\\beta}}_{j_{\\beta}j_{\\beta}}}}{2} + \\frac{{v^{i_{\\beta}j_{\\beta}}_{i_{\\beta}j_{\\beta}}}}{2}" + }, + "1": { + "r": "0", + "u": "0" + }, + "2": { + "r": "- \\frac{3 {t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}a_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{2} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}b_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{2}", + "u": "- {t1^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {v^{i_{\\alpha}a_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}a_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}b_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {v^{i_{\\beta}a_{\\beta}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {v^{i_{\\beta}b_{\\beta}}_{j_{\\beta}a_{\\beta}}}}{4}" + } + } +} \ No newline at end of file diff --git a/tests/resolution_of_identity_test.py b/tests/resolution_of_identity_test.py new file mode 100644 index 0000000..3995035 --- /dev/null +++ b/tests/resolution_of_identity_test.py @@ -0,0 +1,32 @@ +from adcgen.spatial_orbitals import transform_to_spatial_orbitals +from adcgen.resolution_of_identity import apply_resolution_of_identity +from adcgen.simplify import simplify +from adcgen.expression import ExprContainer + +from sympy import S + +import pytest + + +class TestResolutionOfIdentity(): + + @pytest.mark.parametrize('variant', ['mp', 're']) + @pytest.mark.parametrize('order', [0, 1, 2, 3]) + @pytest.mark.parametrize('restriction', ['r', 'u']) + @pytest.mark.parametrize('symmetry', ['sym', 'asym']) + def test_ri_gs_energy(self, variant, order, restriction, symmetry, + cls_instances, reference_data): + # load the reference data + ref = reference_data['ri_gs_energy'][variant][order] + ref = ref[restriction][symmetry].inner + # transform restriction and symmetry to bool + restricted = restriction == 'r' + symmetric = symmetry == 'sym' + # compute the energy + e = cls_instances[variant]['gs'].energy(order) + expr = ExprContainer(e, real=True) + + sp_expr = transform_to_spatial_orbitals(expr, '', '', restricted) + ri_expr = apply_resolution_of_identity(sp_expr, symmetric) + + assert simplify(ri_expr - ref).substitute_contracted().inner is S.Zero From 104923ea5667833c560aaa4031d5ec74005275f0 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Tue, 20 May 2025 12:34:31 +0200 Subject: [PATCH 02/26] Changed gitignore to include build folder --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 3557f10..d2f425b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ __pycache__/ .DS_STORE .vscode -*.egg-info \ No newline at end of file +*.egg-info +build/ From 1933098e1ab1c2363fcfe44c88dd6886d976d657 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Tue, 20 May 2025 12:34:55 +0200 Subject: [PATCH 03/26] Removed build folder --- build/lib/adcgen/__init__.py | 51 - build/lib/adcgen/core_valence_separation.py | 421 ---- build/lib/adcgen/derivative.py | 103 - build/lib/adcgen/eri_orbenergy.py | 615 ------ build/lib/adcgen/expression/__init__.py | 12 - build/lib/adcgen/expression/container.py | 228 --- build/lib/adcgen/expression/expr_container.py | 555 ----- .../expression/normal_ordered_container.py | 199 -- .../lib/adcgen/expression/object_container.py | 879 -------- .../adcgen/expression/polynom_container.py | 282 --- build/lib/adcgen/expression/term_container.py | 770 ------- build/lib/adcgen/factor_intermediates.py | 1813 ----------------- build/lib/adcgen/func.py | 540 ----- build/lib/adcgen/generate_code/__init__.py | 8 - build/lib/adcgen/generate_code/config.json | 8 - build/lib/adcgen/generate_code/contraction.py | 250 --- .../lib/adcgen/generate_code/generate_code.py | 390 ---- .../generate_code/optimize_contractions.py | 329 --- build/lib/adcgen/groundstate.py | 476 ----- build/lib/adcgen/indices.py | 527 ----- build/lib/adcgen/intermediate_states.py | 598 ------ build/lib/adcgen/intermediates.py | 1663 --------------- build/lib/adcgen/logger.py | 63 - build/lib/adcgen/logger_config.json | 40 - build/lib/adcgen/misc.py | 116 -- build/lib/adcgen/operators.py | 213 -- build/lib/adcgen/properties.py | 423 ---- build/lib/adcgen/reduce_expr.py | 330 --- build/lib/adcgen/resolution_of_identity.py | 71 - build/lib/adcgen/rules.py | 65 - build/lib/adcgen/secular_matrix.py | 436 ---- build/lib/adcgen/simplify.py | 765 ------- build/lib/adcgen/sort_expr.py | 382 ---- build/lib/adcgen/spatial_orbitals.py | 443 ---- build/lib/adcgen/symmetry.py | 368 ---- build/lib/adcgen/sympy_objects.py | 399 ---- build/lib/adcgen/tensor_names.json | 15 - build/lib/adcgen/tensor_names.py | 225 -- 38 files changed, 15071 deletions(-) delete mode 100644 build/lib/adcgen/__init__.py delete mode 100644 build/lib/adcgen/core_valence_separation.py delete mode 100644 build/lib/adcgen/derivative.py delete mode 100644 build/lib/adcgen/eri_orbenergy.py delete mode 100644 build/lib/adcgen/expression/__init__.py delete mode 100644 build/lib/adcgen/expression/container.py delete mode 100644 build/lib/adcgen/expression/expr_container.py delete mode 100644 build/lib/adcgen/expression/normal_ordered_container.py delete mode 100644 build/lib/adcgen/expression/object_container.py delete mode 100644 build/lib/adcgen/expression/polynom_container.py delete mode 100644 build/lib/adcgen/expression/term_container.py delete mode 100644 build/lib/adcgen/factor_intermediates.py delete mode 100644 build/lib/adcgen/func.py delete mode 100644 build/lib/adcgen/generate_code/__init__.py delete mode 100644 build/lib/adcgen/generate_code/config.json delete mode 100644 build/lib/adcgen/generate_code/contraction.py delete mode 100644 build/lib/adcgen/generate_code/generate_code.py delete mode 100644 build/lib/adcgen/generate_code/optimize_contractions.py delete mode 100644 build/lib/adcgen/groundstate.py delete mode 100644 build/lib/adcgen/indices.py delete mode 100644 build/lib/adcgen/intermediate_states.py delete mode 100644 build/lib/adcgen/intermediates.py delete mode 100644 build/lib/adcgen/logger.py delete mode 100644 build/lib/adcgen/logger_config.json delete mode 100644 build/lib/adcgen/misc.py delete mode 100644 build/lib/adcgen/operators.py delete mode 100644 build/lib/adcgen/properties.py delete mode 100644 build/lib/adcgen/reduce_expr.py delete mode 100644 build/lib/adcgen/resolution_of_identity.py delete mode 100644 build/lib/adcgen/rules.py delete mode 100644 build/lib/adcgen/secular_matrix.py delete mode 100644 build/lib/adcgen/simplify.py delete mode 100644 build/lib/adcgen/sort_expr.py delete mode 100644 build/lib/adcgen/spatial_orbitals.py delete mode 100644 build/lib/adcgen/symmetry.py delete mode 100644 build/lib/adcgen/sympy_objects.py delete mode 100644 build/lib/adcgen/tensor_names.json delete mode 100644 build/lib/adcgen/tensor_names.py diff --git a/build/lib/adcgen/__init__.py b/build/lib/adcgen/__init__.py deleted file mode 100644 index e62ee16..0000000 --- a/build/lib/adcgen/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -from .core_valence_separation import apply_cvs_approximation -from .derivative import derivative -from .eri_orbenergy import EriOrbenergy -from .expression import ExprContainer -from .factor_intermediates import factor_intermediates -from .func import import_from_sympy_latex, evaluate_deltas, wicks -from .generate_code import (generate_code, optimize_contractions, Contraction, - unoptimized_contraction) -from .groundstate import GroundState -from .indices import Indices, get_symbols -from .intermediate_states import IntermediateStates -from .intermediates import Intermediates -from .logger import set_log_level, _config_logger -from .operators import Operators -from .properties import Properties -from .reduce_expr import reduce_expr -from .secular_matrix import SecularMatrix -from .simplify import simplify, simplify_unitary, remove_tensor -from .spatial_orbitals import transform_to_spatial_orbitals -from .sympy_objects import (AntiSymmetricTensor, SymmetricTensor, Amplitude, - NonSymmetricTensor, KroneckerDelta, SymbolicTensor) -from .tensor_names import tensor_names -from .resolution_of_identity import apply_resolution_of_identity -from . import sort_expr as sort - - -__all__ = ["AntiSymmetricTensor", "SymmetricTensor", "NonSymmetricTensor", - "Amplitude", "SymbolicTensor", "KroneckerDelta", - "Operators", "GroundState", "IntermediateStates", - "SecularMatrix", "Properties", - "Indices", "get_symbols", - "ExprContainer", "EriOrbenergy", "import_from_sympy_latex", - "evaluate_deltas", "wicks", - "simplify", "simplify_unitary", "remove_tensor", - "derivative", - "Intermediates", "reduce_expr", "factor_intermediates", - "sort", - "transform_to_spatial_orbitals", - "apply_resolution_of_identity" - "apply_cvs_approximation", - "generate_code", "optimize_contractions", - "unoptimized_contraction", "Contraction", - "set_log_level", - "tensor_names"] - -__authors__ = ["Jonas Leitner", "Linus Dittmer"] -__version__ = "0.0.4" - - -# load the logger configuration and apply it -_config_logger() diff --git a/build/lib/adcgen/core_valence_separation.py b/build/lib/adcgen/core_valence_separation.py deleted file mode 100644 index 4e60305..0000000 --- a/build/lib/adcgen/core_valence_separation.py +++ /dev/null @@ -1,421 +0,0 @@ -from collections.abc import Callable, Sequence -import itertools - -from sympy.physics.secondquant import FermionicOperator -from sympy import S - -from .expression import ( - ExprContainer, TermContainer, ObjectContainer, - NormalOrderedContainer, PolynomContainer -) -from .indices import Index, get_symbols, sort_idx_canonical -from .logger import logger -from .misc import Inputerror -from .sympy_objects import SymbolicTensor, KroneckerDelta -from .tensor_names import tensor_names, is_t_amplitude - - -def apply_cvs_approximation( - expr: ExprContainer, core_indices: str, spin: str | None = None, - cvs_approximation: Callable[[ObjectContainer, str], bool] | None = None - ) -> ExprContainer: - """ - Apply the core-valence approximation to the given expression by - splitting the occupied space into core and valence space. - Furthermore certain ERI/Coulomb blocks are assumed to vanish. - - Parameters - ---------- - expr: Expr - Expression the CVS approximation should be applied to. - core_indices: str - The names of the core target indices to introduce assuming we currently - have occupied target indices with matching names in the expression, - e.g., "IJ" will transform the occupied target indices "ij" to the core - target indices "IJ". - spin: str | None, optional - The spin of the core indices, e.g., "aa" for two core indices with - alpha spin. - cvs_approximation : callable, optional - Callable that takes an ObjectContainer instance and a space string - (e.g. 'covv'). It returns a bool indicating whether the block of the - object described by the space string is valid within the CVS - approximation, i.e., whether the block is neglected or not. - By default, the "is_allowed_cvs_block" function is used, which applies - the CVS approximation as described in 10.1063/1.453424 and as - implemented in adcman/adcc. - """ - # NOTE: Index substitutions have to be performed for all indices - # simultaneously, to avoid creating an intermediate delta_cx that is then - # further substituted to a delta_cc for instance. However, the delta_cx - # will be evalauted to zero upon creation and therefore some terms might - # vanish by accident. - assert isinstance(expr, ExprContainer) - # get the target indices of the expression - terms: tuple[TermContainer, ...] = expr.terms - target_indices: tuple[Index, ...] = terms[0].target - assert all(term.target == target_indices for term in terms) - # build the substitution dict for the occupied target indices - target_subs = build_target_substitutions( - target_indices, core_indices, spin=spin - ) - result = ExprContainer(0, **expr.assumptions) - for term in terms: - result += expand_contracted_indices( - term, target_subs=target_subs, - cvs_approximation=cvs_approximation - ) - # update the set target indices if necessary - if result.provided_target_idx is not None: - result_target = tuple(target_subs.get(s, s) for s in target_indices) - result.set_target_idx(result_target) - return result - - -def build_target_substitutions(target_indices: tuple[Index, ...], - core_indices: str, - spin: str | None = None) -> dict[Index, Index]: - """ - Determines the necessary index substitutions to introduce the desired - core indices as target indices. - - Parameters - ---------- - target_indices: tuple[Index] - The target indices in which to substitute the core indices. - core_indices: str - Names of the core indices to introduce. - spin: str | None, optional - Spin of the core indices to introduce, e.g., "aa" for two core indices - with alpha spin. - """ - core_symbols: list[Index] = get_symbols(core_indices, spin) - # ensure that the provided core indices are valid - if not all(idx.space == "core" for idx in core_symbols): - raise Inputerror(f"The provided core indices {core_symbols} are no " - "valid core indices, i.e., they do not belong to the" - " core space.") - # for each occupied target index build the corresponding core index - occupied_target_indices: tuple[Index, ...] = tuple( - idx for idx in target_indices if idx.space == "occ" - ) - occ_target_as_core = get_core_indices(occupied_target_indices) - # build the substitution dict for the occupied target indices - return {occ: core for occ, core in - zip(occupied_target_indices, occ_target_as_core) - if core in core_symbols} - - -def expand_contracted_indices( - term: TermContainer, target_subs: dict[Index, Index], - cvs_approximation: Callable[[ObjectContainer, str], bool] | None = None - ) -> ExprContainer: - """ - Expands the contracted occupied indices in the given term into core - and valence indices. Note that valence indices are denoted as occupied - in the result. - - Parameters - ---------- - term: TermContainer - Term in which to expand the occupied contracted indices - target_subs: dict[Index, Index] - The substitution dict containing the necessary occ -> core - substitutions for the target indices. Will not be modified in this - function! - cvs_approximation : callable, optional - Callable that takes an ObjectContainer instance and a space string - (e.g. 'covv'). It returns a bool indicating whether the block of the - object described by the space string is valid within the CVS - approximation, i.e., whether the block is neglected or not. - By default, the "is_allowed_cvs_block" function is used, which applies - the CVS approximation as described in 10.1063/1.453424 and as - implemented in adcman/adcc. - """ - if not term.idx: # term is a number -> nothing to do - return ExprContainer(term.inner, **term.assumptions) - - if cvs_approximation is None: - cvs_approximation = is_allowed_cvs_block - # get the contracted occupied indices - # and build the corresponding core indices - contracted: tuple[Index, ...] = term.contracted - occupied_contracted: tuple[Index, ...] = tuple( - idx for idx in contracted if idx.space == "occ" - ) - core_contracted = get_core_indices(occupied_contracted) - result = ExprContainer(0, **term.assumptions) - # go through all variants of valence and core indices - for variant in itertools.product("oc", repeat=len(occupied_contracted)): - # finish the substitution dict - subs = target_subs.copy() - for space, occ, core in \ - zip(variant, occupied_contracted, core_contracted): - if space != "c": - continue - # check for contradictions in the full substitutions dict - if occ in subs and subs[occ] is not core: - raise RuntimeError("Found contradiction in substitution dict. " - f"The occ index {occ} can not be mapped " - f"onto {subs[occ]} and {core} at the " - "same time.") - subs[occ] = core - # go through the objects and check if there is a block that is - # neglected within the CVS approximation - is_valid_variant = True - for obj in term.objects: - cvs_block = "".join( - subs.get(idx, idx).space[0] for idx in obj.idx - ) - if not cvs_approximation(obj, cvs_block): - is_valid_variant = False - break - if not is_valid_variant: # variant generates a neglected block - continue - # apply the substitutions to the term. This has to happen - # simultaneously in order to avoid intermediates delta_cx which - # evaluate to zero. - sub_term = term.subs(subs, simultaneous=True) - result += sub_term - return result - - -def allowed_cvs_blocks( - expr: ExprContainer, target_idx: Sequence[str] | Sequence[Index], - spin: str | None = None, - cvs_approximation: Callable[[ObjectContainer, str], bool] | None = None - ) -> tuple[str, ...]: - """ - Determines all allowed blocks for the given expression - within the CVS approximation by expanding the occupied indices into - core and valence indices. - - Parameters - ---------- - expr: Expr - The expression in which the allowed cvs blocks should be determined. - target_idx: Sequence[str] | Sequence[Index] - The target indices of the expression. - cvs_approximation : callable, optional - Callable that takes an ObjectContainer instance and a space string - (e.g. 'covv'). It returns a bool indicating whether the block of the - object described by the space string is valid within the CVS - approximation, i.e., whether the block is neglected or not. - By default, the "is_allowed_cvs_block" function is used, which applies - the CVS approximation as described in 10.1063/1.453424 and as - implemented in adcman/adcc. - """ - target_symbols: list[Index] = get_symbols(target_idx, spin) - sorted_target: tuple[Index, ...] = tuple( - sorted(target_symbols, key=sort_idx_canonical) - ) - # identify all occupied target indices - # and build the corresponding core indices - occupied_target: list[Index] = [ - idx for idx in target_symbols if idx.space == "occ" - ] - core_target: list[Index] = get_core_indices(occupied_target) - # determine the possible cvs variants (part of the block string) - cvs_variants: tuple[tuple[str, ...], ...] = tuple( - itertools.product("oc", repeat=len(occupied_target)) - ) - cvs_variants_to_check: list[int] = [i for i in range(len(cvs_variants))] - allowed_blocks: list[str] = [] - # go through all terms and check each for the invalid cvs blocks - for term in expr.terms: - if term.target != sorted_target: - raise ValueError(f"Target indices {term.target} of {term} dont " - "match the provided target indices " - f"{target_symbols}") - - variants_to_remove: set[int] = set() - for variant_i in cvs_variants_to_check: - variant = cvs_variants[variant_i] - # build the target index occ -> core substitution dict - target_subs = {occ: core for space, occ, core in - zip(variant, occupied_target, core_target) - if space == "c"} - # expand the occupied contracted indices - sub_term = expand_contracted_indices( - term, target_subs=target_subs, - cvs_approximation=cvs_approximation - ) - # invalid substitutions -> invalid variant - if sub_term.inner is S.Zero: - continue - # build the full block string - variant = list(reversed(variant)) - block = "".join( - idx.space[0] if idx.space != "occ" else variant.pop() - for idx in target_symbols - ) - assert not variant - allowed_blocks.append(block) - variants_to_remove.add(variant_i) - cvs_variants_to_check = [i for i in cvs_variants_to_check - if i not in variants_to_remove] - return tuple(allowed_blocks) - - -def allow_all_cvs_blocks(obj: ObjectContainer, cvs_block: str) -> bool: - _ = obj, cvs_block - return True - - -def is_allowed_cvs_block(obj: ObjectContainer, cvs_block: str) -> bool: - """ - Whether the object is allowed within the CVS approximation. - """ - from .intermediates import Intermediates, RegisteredIntermediate - - if not obj.idx: # prefactor or symbol - return True - # skip Polynoms for now. - # The MP orbital energy denoms should not be important - if isinstance(obj, PolynomContainer): - return True - elif isinstance(obj, NormalOrderedContainer): - return all( - is_allowed_cvs_block(o, b) for o, b in zip(obj.objects, cvs_block) - ) - - sympy_obj = obj.base - if isinstance(sympy_obj, SymbolicTensor): - name = sympy_obj.name - if name == tensor_names.eri: - return is_allowed_cvs_eri_block(cvs_block) - elif name == tensor_names.coulomb: - return is_allowed_cvs_coulomb_block(cvs_block) - elif is_t_amplitude(name): - return is_allowed_cvs_t_amplitude_block(cvs_block) - elif name == tensor_names.fock: - return is_allowed_cvs_fock_block(cvs_block) - elif isinstance(sympy_obj, KroneckerDelta): - return is_allowed_cvs_delta_block(cvs_block) - elif isinstance(sympy_obj, FermionicOperator): - return True - - # check if the obj is a known intermediate - longname = obj.longname(use_default_names=True) - assert longname is not None - itmd = Intermediates().available.get(longname, None) - if itmd is None: - # the object is no intermediate - # assume that all blocks are valid in this case - logger.warning( - f"Could not determine whether {obj} is valid within the CVS " - "approximation." - ) - return True - # the object is a known intermediate: - # expand the intermediate, and determine the allowed spin blocks - assert isinstance(itmd, RegisteredIntermediate) - return cvs_block in itmd.allowed_cvs_blocks(is_allowed_cvs_block) - - -def is_allowed_cvs_coulomb_block(coulomb_block: str) -> bool: - """ - Whether the given Coulomb integral (in chemist notation) block - is allowed within the CVS approximation - """ - # NOTE: according to 10.1063/1.453424 (from 1987) coulomb integrals with - # 1 and 3 core indices vanish. Furthermore, the Coulomb integrals - # , , , - # vanish, i.e., all integrals co/cv vanish. - # However, in a later paper 10.1063/1.1418437 (from 2001) the integrals - # , , , - # = (co|co), (cv|cv), (oc|oc), (vc|vc) - # only vanish when arising from different core-level occupations (DCO), - # i.e., when they appear in matrix blocks that we are neglecting anyway. - # In the current implementation in adcman/adcc those blocks are assumed - # to vanish following the earlier paper. - # The current implementation follows the implementation in adcman/adcc. - assert len(coulomb_block) == 4 - assert "g" not in coulomb_block # no general indices - if "c" in coulomb_block and (coulomb_block[:2].count("c") == 1 or - coulomb_block[2:].count("c") == 1): - return False - return True - - -def is_allowed_cvs_eri_block(eri_block: str) -> bool: - """ - Whether the given anti-symmetric ERI block (in physicist notation) - is allowed within the CVS approximation. - """ - assert len(eri_block) == 4 - assert "g" not in eri_block # no general indices - n_core = eri_block.count("c") - if n_core == 1 or n_core == 3: - return False - # additionally, the blocks ccxx and xxcc are not allowed - # (see comment in is_allowed_cvs_coulomb_block) - elif n_core == 2 and (eri_block[:2] == "cc" or eri_block[2:] == "cc"): - return False - return True - - -def is_allowed_cvs_fock_block(fock_block: str) -> bool: - """ - Whether the given Fock matrix block is allowed within the CVS - approximation. - """ - assert len(fock_block) == 2 - assert "g" not in fock_block # no general indices - if fock_block.count("c") == 1: # f_cx / f_xc - return False - return True # f_cc / f_xx - - -def is_allowed_cvs_t_amplitude_block(amplitude_block: str) -> bool: - """ - Whether the given block of a ground state t-amplitude is valid within - the CVS approximation - """ - # t-amplitudes seem to follow the rule that only the valence space - # has to be considered, i.e., all core orbitals can simply - # be neglected. - # t2_1: oovv t1_2: ov t2_2: oovv t3_2: ooovvv t4_2: oooovvvv - assert not len(amplitude_block) % 2 - assert all(sp == "v" for sp in amplitude_block[len(amplitude_block)//2:]) - if amplitude_block.count("c"): - return False - assert all(sp == "o" for sp in amplitude_block[:len(amplitude_block)//2]) - return True - - -def is_allowed_cvs_delta_block(delta_block: str) -> bool: - """ - Whether the given delta block is allowed within the CVS approximation. - """ - assert len(delta_block) == 2 - assert "g" not in delta_block # no general indices - return delta_block[0] == delta_block[1] - - -def get_core_indices(occupied_indices: Sequence[Index]) -> list[Index]: - """ - Builds core indices for the given occupied indices, i.e., - I for the occupied index i. - """ - assert all(idx.space == "occ" for idx in occupied_indices) - names = [] - spins = [] - for idx in occupied_indices: - names.append(idx.name.upper()) - spins.append(idx.spin) - return get_symbols(names, spins) - - -def get_occ_indices(core_indices: Sequence[Index]) -> list[Index]: - """ - Builds the occupied/valence indices for the given core indices, i.e., - i for the core index I. - """ - assert all(idx.space == "core" for idx in core_indices) - names = [] - spins = [] - for idx in core_indices: - names.append(idx.name.lower()) - spins.append(idx.spin) - return get_symbols(names, spins) diff --git a/build/lib/adcgen/derivative.py b/build/lib/adcgen/derivative.py deleted file mode 100644 index a296ed9..0000000 --- a/build/lib/adcgen/derivative.py +++ /dev/null @@ -1,103 +0,0 @@ -from sympy import Mul, Rational, S, diff - -from .expression import ExprContainer, ObjectContainer -from .indices import minimize_tensor_indices, Index -from .sympy_objects import SymbolicTensor - - -def derivative(expr: ExprContainer, t_string: str - ) -> dict[tuple[str, str], ExprContainer]: - """Computes the derivative of an expression with respect to a tensor. - The derivative is separated block whise, e.g, terms that contribute to - the derivative w.r.t. the oooo ERI block are separated from terms that - contribute to the ooov block. - Assumptions of the input expression are NOT updated or modified. - The derivative is NOT simplified.""" - assert isinstance(t_string, str) - assert isinstance(expr, ExprContainer) - expr = expr.expand() - # create some Dummy Symbol. Replace the tensor with the Symbol and - # compute the derivative with respect to the Symbol. Afterwards - # resubstitute the Tensor for the Dummy Symbol. - x = Index('x') - - derivative = {} - for term in expr.terms: - assumptions = term.assumptions - objects = term.objects - # - find all occurences of the desired tensor - tensor_obj: list[ObjectContainer] = [] - remaining_obj = ExprContainer(1, **term.assumptions) - for obj in objects: - if obj.name == t_string: - tensor_obj.append(obj) - else: - remaining_obj *= obj - - # - extract the names of target indices of the term - target_names_by_space: dict[tuple[str, str], set[str]] = {} - for s in term.target: - if (key := s.space_and_spin) not in target_names_by_space: - target_names_by_space[key] = set() - target_names_by_space[key].add(s.name) - - # 2) go through the tensor_obj list and compute the derivative - # for all found occurences one after another (product rule) - for i, obj in enumerate(tensor_obj): - # - extract the exponent of the tensor - exponent = obj.exponent - # - rebuild the term without the current occurence of the - # tensor obj - deriv_contrib = remaining_obj.copy() - for other_i, other_obj in enumerate(tensor_obj): - if i != other_i: - deriv_contrib *= other_obj - # - minimize the indices of the removed tensor - _, perms = minimize_tensor_indices(obj.idx, target_names_by_space) - # - apply the permutations to the remaining term - deriv_contrib = deriv_contrib.permute(*perms) - if deriv_contrib.inner is S.Zero: - raise RuntimeError(f"Mnimization permutations {perms} let " - f"the remaining term {deriv_contrib} " - "vanish.") - # - Apply the permutations to the object. Might introduce - # a prefactor of -1 that we need to move to the deriv_contrib. - # Also the indices might be further minimized due to the - # symmetry of the tensor obj - obj = obj.permute(*perms).terms[0] - if (factor := obj.prefactor) < S.Zero: - deriv_contrib *= factor - # - Apply the symmetry of the removed tensor to the remaining - # term to ensure that the result has the correct symmetry. - # Also replace the removed tensor by a Dummy Variable x. - # This allows to compute the symbolic derivative with diff. - tensor_sym = obj.symmetry() - deriv_contrib *= Rational(1, len(tensor_sym) + 1) - symmetrized_deriv_contrib = S.Zero - symmetrized_deriv_contrib += Mul(deriv_contrib.inner, x**exponent) - for perms, factor in tensor_sym.items(): - symmetrized_deriv_contrib += Mul( - deriv_contrib.copy().permute(*perms).inner, - factor, - x**exponent - ) - # - compute the derivative with respect to x - symmetrized_deriv_contrib = diff(symmetrized_deriv_contrib, x) - # - replace x by the removed tensor (due to diff the exponent - # is lowered by 1) - obj = [ - o for o in obj.objects if isinstance(o.base, SymbolicTensor) - ] - assert len(obj) == 1 - obj = obj[0] - symmetrized_deriv_contrib = ( - symmetrized_deriv_contrib.subs(x, obj.base) - ) - # - sort the derivative according to the space of the minimal - # tensor indices - # -> sort the derivative block whise. - key = (obj.space, obj.spin) - if key not in derivative: - derivative[key] = ExprContainer(0, **assumptions) - derivative[key] += symmetrized_deriv_contrib - return derivative diff --git a/build/lib/adcgen/eri_orbenergy.py b/build/lib/adcgen/eri_orbenergy.py deleted file mode 100644 index c6e8360..0000000 --- a/build/lib/adcgen/eri_orbenergy.py +++ /dev/null @@ -1,615 +0,0 @@ -from collections.abc import Sequence -from collections import Counter -from typing import TypeGuard - -from sympy import Add, Basic, Expr, Mul, Pow, Rational, S, nsimplify - -from .expression import ( - ExprContainer, ObjectContainer, PolynomContainer, TermContainer -) -from .logger import logger -from .misc import Inputerror -from .symmetry import Permutation -from .sympy_objects import SymmetricTensor -from .tensor_names import tensor_names - - -class EriOrbenergy: - """ - Splits a single term into an orbital energy fraction, a prefactor and a - remainder. - - Parameters - ---------- - term : TermContainer | ExprContainer - The term to split. - """ - - def __init__(self, term: TermContainer | ExprContainer) -> None: - # ensure the input consists of a single term either as term or expr - if not isinstance(term, TermContainer) or not len(term) == 1: - Inputerror("Expected a single term as input.") - # factor the term to ensure all prefactors are in the numerator - factored: ExprContainer = term.factor() # returns an expr - - # split the term in num, denom and eri - splitted = factored.terms[0].split_orb_energy() - # validate the denominator: has to be of the form: (a+b)(c+d) or (a+b) - self._denom: ExprContainer = splitted['denom'] - self._validate_denom() - - # validate eri: has to consist of a single term - eri = splitted['remainder'] - if len(eri) != 1: - raise Inputerror("Remainder/ERI part should consist of a single " - f"term. Got {eri} from term {splitted}.") - self._eri: TermContainer = eri.terms[0] - - # numerator can essentially be anything: a or a+b - # extract the prefactor with the smallest abs value from the numerator - # NOTE: this is not mandatory. It is also possible to just use - # term.prefactor as pref. Then we might have prefactors < 1 - # in the numerator. Should not be important except for - # canceling the orbital energy fraction. - # But if we keep it like it is, we should have a more clear - # definition of the prefactor (only the sign might be ambiguous - # +0.5 vs -0.5) - self._pref: Expr = min( # type: ignore - [t.prefactor for t in splitted["num"].terms], - key=abs # type: ignore - ) - - # only possiblity to extract 0 should be if the numerator is 0 - if self._pref is S.Zero: - if not splitted['num'].inner.is_number: - raise NotImplementedError(f"Extracted pref {self._pref} from " - "unexpected numerator " - f"{splitted['num']}") - self._num: ExprContainer = splitted['num'] - elif self._pref is S.One: # nothing to factor - self._num: ExprContainer = splitted['num'] - else: - # we can factor a number and remove it afterwards from the term - # the result of the division needs to be converted to rational - # again! - self._num: ExprContainer = factor_and_remove_number( - splitted['num'], self._pref - ) - # ensure that the numerator is what we expect - self._validate_num() - - def __str__(self): - return f"{self.pref} * [{self.num}] / [{self.denom}] * {self.eri}" - - def _validate_denom(self) -> None: - """ - Ensures that the denominator only consists of brackets of the form - (e_a + e_b - ...)(...). - """ - # only objects that contain only e tensors with a single idx can - # occur in the denominator - - if self._denom.inner.is_number: # denom is a number -> has to be 1 - if self._denom.inner is not S.One: - raise Inputerror(f"Invalid denominator {self._denom}") - else: - # check that each bracket consists of terms that each contain - # a single epsilon and possibly a prefactor of -1 - for bracket in self.denom_brackets: - if not isinstance(bracket, (ExprContainer, PolynomContainer)): - raise TypeError(f"Invalid bracket {bracket} in " - f"{self._denom}.") - for term in bracket.terms: - n_orb_energy = 0 - for o in term.objects: - if o.is_orbital_energy and o.exponent == 1: - n_orb_energy += 1 - # denominator has to contain prefactors +- 1 - # prefactors need to be +-1 for cancelling to work - elif o.inner.is_number and o.inner is S.NegativeOne: - continue - else: - raise Inputerror(f"Invalid bracket {bracket} in " - f"{self._denom}.") - if n_orb_energy != 1: - raise Inputerror(f"Invalid bracket {bracket} in " - f"{self._denom}.") - - def _validate_num(self) -> None: - """ - Ensures that the numerator is of the form (e_a + e_b - ...) only - allowing prefactors +-1. - """ - # numerator can only contain terms that consist of e tensors with a - # single index and prefactors - # checking that each term only contains a single tensor with exponent 1 - # ensures that each term only holds a single index - - if self._num.inner.is_number: # is a number -> 1 or 0 possible - if self._num.inner not in [S.One, S.Zero]: - raise Inputerror(f"Invalid numerator {self._num}.") - else: # an expr object (a + b + ...) - for term in self._num.terms: - n_orb_energy = 0 - for o in term.objects: - if o.is_orbital_energy and o.exponent == 1: - n_orb_energy += 1 - elif o.inner.is_number: # any prefactors allowed - continue - else: - raise Inputerror(f"Invalid object {o} in {self._num}.") - if n_orb_energy != 1: - raise Inputerror(f"Invalid term {term} in numerator " - f"{self._num}.") - - @property - def denom(self) -> ExprContainer: - """Returns the denominator of the orbital energy fraction.""" - return self._denom - - @property - def eri(self) -> TermContainer: - """Returns the remainder of the term.""" - return self._eri - - @property - def num(self) -> ExprContainer: - """Returns the numerator of the orbital energy fraction.""" - return self._num - - @property - def pref(self) -> Expr: # sympy rational - """Returns the prefactor of the term.""" - return self._pref - - @property - def denom_brackets(self - ) -> tuple[ExprContainer] | tuple[PolynomContainer, ...]: # noqa E501 - """Returns a tuple containing the brackets of the denominator.""" - if len(self.denom) != 1 or self.denom.inner.is_number: - return (self.denom,) - else: # denom consists of brackets - brackets = self.denom.terms[0].objects - assert _is_polynom_tuple(brackets) - return brackets - - def copy(self) -> "EriOrbenergy": - return EriOrbenergy(self.expr) - - @property - def expr(self) -> ExprContainer: - """Rebuild the original term.""" - return self.num * self.eri / self.denom * self.pref - - def denom_description(self) -> str | None: - """ - Returns a string that describes the denominator containing the - number of brackets, as well as the length and exponent of each - bracket. - """ - if self.denom.inner.is_number: - return None - - brackets = self.denom_brackets - bracket_data = [] - for bk in brackets: - exponent = S.One if isinstance(bk, ExprContainer) else bk.exponent - assert isinstance(exponent, Expr) - bracket_data.append(f"{len(bk)}-{exponent}") - # reverse sorting -> longest braket will be listed first - bracket_data = "_".join(sorted(bracket_data, reverse=True)) - return f"{len(brackets)}_{bracket_data}" - - def cancel_denom_brackets(self, braket_idx_list: Sequence[int] - ) -> ExprContainer: - """ - Cancels brackets by their index in the denominator lowering the - exponent by 1 or removing the bracket completely if an exponent - of 0 is reached. If an index is listed n times the exponent - will be lowered by n. - The original denominator is not modified. - """ - denom: list[ExprContainer | PolynomContainer | None | Expr] = list( - self.denom_brackets - ) - for idx, n in Counter(braket_idx_list).items(): - braket = denom[idx] - assert braket is not None and not isinstance(braket, Expr) - if isinstance(braket, ExprContainer): - exponent = S.One - base = braket.inner - else: - base, exponent = braket.base_and_exponent - assert exponent.is_Integer - if (new_exp := int(exponent) - n) == 0: - denom[idx] = None - else: - denom[idx] = Pow(base, new_exp) - new_denom = Mul(*( - bk if isinstance(bk, Expr) else bk.inner for bk in denom - if bk is not None - )) - return ExprContainer(new_denom, **self.denom.assumptions) - - def cancel_eri_objects(self, obj_idx_list: Sequence[int]) -> ExprContainer: - """ - Cancels objects in the remainder (eri) part according to their index - lowering their exponent by 1 for each time the objects index is - provided. If a final exponent of 0 is reached, the object is removed - from the remainder entirely. - The original remainder is not changed. - """ - objects: list[ObjectContainer | None | Expr] = list(self.eri.objects) - for idx, n in Counter(obj_idx_list).items(): - obj = objects[idx] - assert obj is not None and not isinstance(obj, Expr) - base, exponent = obj.base_and_exponent - assert exponent.is_Integer - if (new_exp := int(exponent) - n) == 0: - objects[idx] = None - else: - objects[idx] = Pow(base, new_exp) - new_eri = Mul(*( - obj if isinstance(obj, Basic) else obj.inner for obj in objects - if obj is not None - )) - return ExprContainer(new_eri, **self.eri.assumptions) - - def denom_eri_sym(self, - eri_sym: dict[tuple[Permutation, ...], int] | None = None, # noqa E501 - **kwargs) -> dict[tuple[Permutation, ...], int]: - """ - Apply the symmetry of the remainder (eri) part to the denominator - identifying the common symmetry of both parts of the term. - - Parameters - ---------- - eri_sym : dict, optional - The symmetry of the remainder (eri) part of the term. - If not provided it will be determined on the fly. - **kwargs : dict, optional - Additional arguments that are forwarded to the 'Term.symmetry' - method to determine the symmetry of the remainder on the fly. - """ - # if the denominator is a number -> just return symmetry of eri part - if self.denom.inner.is_number: - return self.eri.symmetry(**kwargs) if eri_sym is None else eri_sym - - if eri_sym is None: - # if the eri part is just a number all possible permutations of the - # denom would be required with their symmetry - if not self.eri.idx: - raise NotImplementedError("Symmetry of an expr (the " - "denominator) not implemented") - eri_sym = self.eri.symmetry(**kwargs) - - ret = {} - denom = self.denom.inner - for perms, factor in eri_sym.items(): - perm_denom = self.denom.copy().permute(*perms).inner - # permutations are not valid for the denominator - if perm_denom is S.Zero and denom is not S.Zero: - continue - - if Add(denom, -perm_denom) is S.Zero: - ret[perms] = factor # P_pq Denom = Denom -> +1 - elif Add(denom, perm_denom) is S.Zero: - ret[perms] = factor * -1 # P_pq Denom = -Denom -> -1 - else: # permutation changes the denominator - ret[perms] = None - return ret - - def permute_num(self, - eri_sym: dict[tuple[Permutation, ...], int] | None = None - ) -> "EriOrbenergy": - """ - Symmetrize the orbital energy numerator by applying the common symmetry - of the remainder (eri) part and the orbital energy denominator - - only considering contracted indices! - to the numerator keeping the - result normalized. - For instance, a numerator (e_i - e_a) may be expanded to - 1/2 (e_i + e_j - e_a - e_b) by applying the permutation P_{ij}P_{ab}. - The new prefactor is automatically extracted from the - new numerator and added to the existing prefactor. - The class instance is modified in place. - """ - # if the numerator is a number no permutation will do anything useful - if self.num.inner.is_number: - return self - # apply all permutations to the numerator that satisfy - # P_pq ERI = a * ERI and P_pq Denom = b * Denom - # with a, b in [-1, +1] and a*b = 1 - permutations = [ - (perms, factor) for perms, factor in - self.denom_eri_sym(eri_sym=eri_sym, only_contracted=True).items() - if factor is not None - ] - num = self.num.copy() - for perms, factor in permutations: - num += self.num.copy().permute(*perms) * factor - num = num * Rational(1, len(permutations) + 1) - assert isinstance(num, ExprContainer) - num.expand() - # this possibly introduced prefactors in the numerator again - # -> extract the smallest prefactor and shift to self.pref - additional_pref = min( # type: ignore - [t.prefactor for t in num.terms], key=abs # type: ignore - ) - self._pref *= additional_pref - if additional_pref is S.Zero: # permuted num = 0 - if not num.inner.is_number: - raise ValueError("Only expected to obtain 0 as pref" - "from a 0 numerator. Got " - f"{additional_pref} from {num}.") - self._num = num - elif additional_pref is S.One: # nothing to factor - self._num = num - else: - self._num = factor_and_remove_number(num, additional_pref) - self._validate_num() - return self - - def canonicalize_sign(self, only_denom: bool = False) -> "EriOrbenergy": - """ - Adjusts the sign of orbital energies in the numerator and denominator: - virtual orbital energies are subtracted, while occupied orbital - energies are added. The possible factor of -1 is extracted to the - prefactor. - Modifies the class instance in place. - - Parameters - ---------- - only_denom : bool, optional - If set, only the signs in the denominator will be adjusted - (default: False). - """ - - def adjust_sign(expr: ExprContainer | PolynomContainer) -> bool: - # function that extracts the sign of the occupied and virtual - # indices in a term. - - signs = {} - for term in expr.terms: - idx = term.idx - if len(idx) != 1: - raise RuntimeError("Expected a bracket to consist of " - "epsilons that each hold a single index" - f". Found: {term} in {expr}.") - ov = idx[0].space[0] - if ov not in signs: - signs[ov] = [] - signs[ov].append(term.sign) - - # map that connects sign and space - desired_sign = {"o": "plus", "v": "minus"} - - # adjust sign if necessary - change_sign = [] - for ov, sign in signs.items(): - # first check that all o/v terms have the same sign - if not all(pm == sign[0] for pm in sign): - raise RuntimeError(f"Ambiguous signs of the {ov} indices " - f"in {expr} in\n{self}") - if ov not in desired_sign: - raise NotImplementedError("No desired sign defined for " - "orbital energies of the space " - f"{ov}.") - if sign[0] != desired_sign[ov]: - change_sign.append(True) - if change_sign: - if len(change_sign) != len(signs): - raise RuntimeError(f"Apparently not all {signs.keys()} " - "spaces require a sign change in " - f"{expr}.") - return True - else: - return False - - # numerator - if not only_denom and not self.num.inner.is_number and \ - adjust_sign(self.num): - self._pref *= S.NegativeOne - self._num *= S.NegativeOne - assert isinstance(self._pref, Expr) - assert isinstance(self._num, ExprContainer) - - # denominator - if not self.denom.inner.is_number: - denom = S.One - for bracket in self.denom_brackets: - if adjust_sign(bracket): - if isinstance(bracket, ExprContainer): - exponent = S.One - base = bracket.inner - else: - base, exponent = bracket.base_and_exponent - assert exponent.is_Integer - if int(exponent) % 2: - self._pref *= S.NegativeOne - bracket = ExprContainer( - Pow(S.NegativeOne*base, exponent), - **bracket.assumptions - ) - denom *= bracket - assert isinstance(denom, ExprContainer) - self._denom = denom - return self - - def cancel_orb_energy_frac(self) -> ExprContainer: - """ - Cancel the orbital energy fraction. Thereby, long denominator brackets - or brackets with rare indices are priorized. - """ - def multiply(expr_list: list[ExprContainer | PolynomContainer] - ) -> Expr | ExprContainer: - res = S.One - assert isinstance(res, Expr) - for term in expr_list: - res *= term - return res - - def cancel(num: ExprContainer, - denom: list[ExprContainer | PolynomContainer], - pref: Expr) -> ExprContainer: - num = num.copy() # avoid in place modification - cancelled_result = None - for bracket_i, bracket in enumerate(denom): - bracket_indices = bracket.idx - - # get the prefactors of all orbital energies that occur in the - # bracket that we currently want to remove - relevant_prefs = [term.prefactor for term in num.terms - if term.idx[0] in bracket_indices] - # do all indices that occur in the bracket also occur in the - # numerator? - if len(relevant_prefs) != len(bracket_indices): - continue - - # find the smallest relevant prefactor and factor the prefactor - # -> this ensures that at least one of the relevant orbital - # energies has a prefactor of 1 - # -> at least 1 of the orbital energies will not be present - # in the new numerator - # -> can only cancel each bracket at most 1 time - # -> no need to recurse just iterate through the list - min_pref = min(relevant_prefs, key=abs) # type: ignore - # the sign in the numerator has been fixed before entering this - # function -> dont change it! - if min_pref < 0: - min_pref *= -1 - - if min_pref is not S.One: - pref *= min_pref - num = factor_and_remove_number(num, min_pref) - - # all orbital energies that also occur in the bracket now - # have at least a prefactor of 1 - # others might have a pref < 1 - # construct the new numerator by subtracting the content - # of the bracket from the numerator. This works, because - # - all relevant orbital energies in the numerator have a - # prefactor of at least 1 and the signs in the numerator and - # in the bracket match - # - all orbital energies in the numerator have an exponent - # of 1 - if isinstance(bracket, ExprContainer): - exponent = S.One - assert isinstance(exponent, Expr) - base = bracket.inner - else: # polynom - base, exponent = bracket.base_and_exponent - logger.info(f"Cancelling: {ExprContainer(base)}") - num -= base - # build the new denominator -> lower bracket exponent by 1 - if exponent == 1: - new_denom = denom[:bracket_i] + denom[bracket_i+1:] - else: - new_denom = denom[:] - new_denom[bracket_i] = ExprContainer( - Pow(base, exponent-S.One), **bracket.assumptions - ) - # result <- 1/new_denom + new_num/denom - if cancelled_result is None: - cancelled_result = S.Zero - cancelled_result += pref * self.eri / multiply(new_denom) - # check if we have something left to cancel - if num.inner.is_number: - if num.inner is not S.Zero: - cancelled_result += \ - pref * self.eri * num / multiply(denom) - break - # return just the term if it was not possible to successfully - # cancel any bracket - assert (cancelled_result is None or - isinstance(cancelled_result, ExprContainer)) - return self.expr if cancelled_result is None else cancelled_result - - # fix the sign of the orbital energies in numerator and denominator: - # occupied orb energies are added, while virtual ones are subtracted - self.canonicalize_sign() - - # do we have something to do? - if self.num.inner.is_number or self.denom.inner.is_number: - return self.expr - - # sort the brackets in the denominator: - # - length of the braket: tiples > doubles - # - rarity of the contained indices: prioritize brackets with target - # indices - denom_indices = Counter(self.denom.idx) - - def bracket_sort_key(bracket: ExprContainer | PolynomContainer): - bracket_indices = bracket.idx - rarest_idx = min(bracket_indices, key=lambda s: denom_indices[s]) - return (-len(bracket), - denom_indices[rarest_idx], - sum(denom_indices[s] for s in bracket_indices)) - denom = sorted(self.denom_brackets, key=bracket_sort_key) - - return cancel(self.num, denom, self.pref) - - def symbolic_denominator(self) -> ExprContainer: - """ - Replaces the explicit orbital energy denominator with a tensor - of the correct symmetry (a SymmetricTensor with bra-ket antisymmetry): - (e_i + e_j - e_a - e_b) -> D^{ij}_{ab}. - """ - if self.denom.inner.is_number: # denom is a number -> nothing to do - return self.denom - - symbolic_denom = ExprContainer(1, **self.denom.assumptions) - has_symbolic_denom = False - for bracket in self.denom_brackets: - signs = {'-': set(), '+': set()} - for term in bracket.terms: - idx = term.idx - if len(idx) != 1: - raise RuntimeError("Expected a denominator bracket to " - "consists of orbital energies that each" - " hold a single index. " - f"Found: {term} in {bracket}.") - pref = term.prefactor - if pref is S.One: - signs['+'].add(idx[0]) - elif pref is S.NegativeOne: - signs['-'].add(idx[0]) - else: - raise RuntimeError(f"Found invalid prefactor {pref} in " - f"denominator bracket {bracket}.") - if signs['+'] & signs['-']: - raise RuntimeError(f"Found index that is added and " - f"subtracted in a denominator: {bracket}.") - has_symbolic_denom = True - exponent = ( - S.One if isinstance(bracket, ExprContainer) - else bracket.exponent - ) - symbolic_denom *= Pow(SymmetricTensor( - tensor_names.sym_orb_denom, tuple(signs['+']), - tuple(signs['-']), -1 - ), exponent) - if has_symbolic_denom: - symbolic_denom.antisym_tensors = ( - symbolic_denom.antisym_tensors + (tensor_names.sym_orb_denom,) - ) - return symbolic_denom - - -def factor_and_remove_number(expr: ExprContainer, number) -> ExprContainer: - """ - Factors the given number in the expression and removes it afterwards by - dividing through the number. The operations are performed in place! - """ - expr.factor(num=number) - expr /= number - expr.doit() - expr._inner = nsimplify(expr.inner, rational=True) - return expr - - -####################### -# Usefull type guards # -####################### -def _is_polynom_tuple(sequence: tuple - ) -> TypeGuard[tuple[PolynomContainer, ...]]: - return all(isinstance(item, PolynomContainer) for item in sequence) diff --git a/build/lib/adcgen/expression/__init__.py b/build/lib/adcgen/expression/__init__.py deleted file mode 100644 index d88cab0..0000000 --- a/build/lib/adcgen/expression/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -from .expr_container import ExprContainer -from .normal_ordered_container import NormalOrderedContainer -from .object_container import ObjectContainer -from .polynom_container import PolynomContainer -from .term_container import TermContainer - - -__all__ = [ - "ExprContainer", "NormalOrderedContainer", - "PolynomContainer", - "ObjectContainer", "TermContainer" -] diff --git a/build/lib/adcgen/expression/container.py b/build/lib/adcgen/expression/container.py deleted file mode 100644 index 37f0c67..0000000 --- a/build/lib/adcgen/expression/container.py +++ /dev/null @@ -1,228 +0,0 @@ -from collections.abc import Iterable -from typing import Any, TYPE_CHECKING - -from sympy import Expr, latex, sympify - -from ..indices import Index, order_substitutions - -# imports only required for type checking (avoid circular imports) -if TYPE_CHECKING: - from .expr_container import ExprContainer - - -class Container: - """ - Base class for all container classes that wrap a native - sympy object. - - Parameters - ---------- - inner: Expr | Container | Any - The algebraic expression to wrap, e.g., a sympy.Add or sympy.Mul object - real : bool, optional - Whether the expression is represented in a real orbital basis. - sym_tensors: Iterable[str] | None, optional - Names of tensors with bra-ket-symmetry, i.e., - d^{pq}_{rs} = d^{rs}_{pq}. Adjusts the corresponding tensors to - correctly represent this additional symmetry if they are not aware - of it yet. - antisym_tensors: Iterable[str] | None, optional - Names of tensors with bra-ket-antisymmetry, i.e., - d^{pq}_{rs} = - d^{rs}_{pq}. Adjusts the corresponding tensors to - correctly represent this additional antisymmetry if they are not - aware of it yet. - target_idx: Iterable[Index] | None, optional - Target indices of the expression. By default the Einstein sum - convention will be used to identify target and contracted indices, - which is not always sufficient. - """ - - def __init__(self, inner: "Expr | Container | Any", - real: bool = False, - sym_tensors: Iterable[str] = tuple(), - antisym_tensors: Iterable[str] = tuple(), - target_idx: Iterable[Index] | None = None) -> None: - # possibly extract or import the expression to wrap - if isinstance(inner, Container): - inner = inner.inner - if not isinstance(inner, Expr): - inner = sympify(inner) - assert isinstance(inner, Expr) - self._inner: Expr = inner - # set the assumptions - self._real: bool = real - - if isinstance(sym_tensors, str): - sym_tensors = (sym_tensors,) - elif not isinstance(sym_tensors, tuple): - sym_tensors = tuple(sym_tensors) - self._sym_tensors: tuple[str, ...] = sym_tensors - - if isinstance(antisym_tensors, str): - antisym_tensors = (antisym_tensors,) - elif not isinstance(antisym_tensors, tuple): - antisym_tensors = tuple(antisym_tensors) - self._antisym_tensors: tuple[str, ...] = antisym_tensors - if target_idx is not None and not isinstance(target_idx, tuple): - target_idx = tuple(target_idx) - self._target_idx: tuple[Index, ...] | None = target_idx - - def __str__(self) -> str: - return latex(self.inner) - - @property - def assumptions(self) -> dict[str, Any]: - return { - "real": self.real, - "sym_tensors": self.sym_tensors, - "antisym_tensors": self.antisym_tensors, - "target_idx": self.provided_target_idx, - } - - @property - def real(self) -> bool: - return self._real - - @property - def sym_tensors(self) -> tuple[str, ...]: - return self._sym_tensors - - @property - def antisym_tensors(self) -> tuple[str, ...]: - return self._antisym_tensors - - @property - def provided_target_idx(self) -> tuple[Index, ...] | None: - return self._target_idx - - @property - def inner(self) -> Expr: - return self._inner - - def permute(self, *perms: tuple[Index, Index]) -> "ExprContainer": - """ - Permute indices by applying permutation operators P_pq. - - Parameters - ---------- - *perms : tuple[Index, Index] - Permutations to apply to the wrapped object. Permutations are - applied one after another in the order they are provided. - """ - sub = {} - for p, q in perms: - addition = {p: q, q: p} - for old, new in sub.items(): - if new is p: - sub[old] = q - del addition[p] - elif new is q: - sub[old] = p - del addition[q] - if addition: - sub.update(addition) - return self.subs(order_substitutions(sub)) - - ################################ - # Forwards some calls to inner # - ################################ - def expand(self) -> "ExprContainer": - """ - Forwards the expand call to inner and wraps the result in a new - Container - """ - from .expr_container import ExprContainer - return ExprContainer(inner=self.inner.expand(), **self.assumptions) - - def doit(self, *args, **kwargs) -> "ExprContainer": - """ - Forwards the doit call to inner and wraps the result in a new Container - """ - from .expr_container import ExprContainer - return ExprContainer( - inner=self.inner.doit(*args, **kwargs), **self.assumptions - ) - - def subs(self, *args, **kwargs) -> "ExprContainer": - """ - Forwards the subs call to inner and wraps the result in a new Container - """ - from .expr_container import ExprContainer - return ExprContainer( - inner=self.inner.subs(*args, **kwargs), **self.assumptions - ) - - ############# - # Operators # - ############# - def __add__(self, other: Any) -> "ExprContainer": - from .expr_container import ExprContainer - - if isinstance(other, Container): - if self.assumptions != other.assumptions: - raise TypeError("Assumptions need to be equal. Got: " - f"{self.assumptions} and {other.assumptions}") - other = other.inner - return ExprContainer(self.inner + other, **self.assumptions) - - def __iadd__(self, other: Any) -> "ExprContainer": - return self.__add__(other) - - def __radd__(self, other: Any) -> "ExprContainer": - from .expr_container import ExprContainer - # other: some sympy stuff or some number - return ExprContainer(other + self.inner, **self.assumptions) - - def __sub__(self, other: Any) -> "ExprContainer": - from .expr_container import ExprContainer - - if isinstance(other, Container): - if self.assumptions != other.assumptions: - raise TypeError("Assumptions need to be equal. Got: " - f"{self.assumptions} and {other.assumptions}") - other = other.inner - return ExprContainer(self.inner - other, **self.assumptions) - - def __isub__(self, other: Any) -> "ExprContainer": - return self.__sub__(other) - - def __rsub__(self, other: Any) -> "ExprContainer": - from .expr_container import ExprContainer - # other: some sympy stuff or some number - return ExprContainer(other - self.inner, **self.assumptions) - - def __mul__(self, other: Any) -> "ExprContainer": - from .expr_container import ExprContainer - - if isinstance(other, Container): - if self.assumptions != other.assumptions: - raise TypeError("Assumptions need to be equal. Got: " - f"{self.assumptions} and {other.assumptions}") - other = other.inner - return ExprContainer(self.inner * other, **self.assumptions) - - def __imul__(self, other: Any) -> "ExprContainer": - return self.__mul__(other) - - def __rmul__(self, other: Any) -> "ExprContainer": - from .expr_container import ExprContainer - # other: some sympy stuff or some number - return ExprContainer(other * self.inner, **self.assumptions) - - def __truediv__(self, other: Any) -> "ExprContainer": - from .expr_container import ExprContainer - - if isinstance(other, Container): - if self.assumptions != other.assumptions: - raise TypeError("Assumptions need to be equal. Got: " - f"{self.assumptions} and {other.assumptions}") - other = other.inner - return ExprContainer(self.inner / other, **self.assumptions) - - def __itruediv__(self, other: Any) -> "ExprContainer": - return self.__truediv__(other) - - def __rtruediv__(self, other: Any) -> "ExprContainer": - from .expr_container import ExprContainer - # other: some sympy stuff or some number - return ExprContainer(other / self.inner, **self.assumptions) diff --git a/build/lib/adcgen/expression/expr_container.py b/build/lib/adcgen/expression/expr_container.py deleted file mode 100644 index 29504e9..0000000 --- a/build/lib/adcgen/expression/expr_container.py +++ /dev/null @@ -1,555 +0,0 @@ -from collections.abc import Iterable, Sequence -from typing import Any - -from sympy import Add, Basic, Expr, Mul, Pow, S, Symbol, factor, nsimplify - -from ..indices import ( - Index, get_symbols, sort_idx_canonical, - _is_str_sequence, _is_index_sequence -) -from ..tensor_names import tensor_names -from .container import Container -from .term_container import TermContainer - - -class ExprContainer(Container): - """ - Wraps an arbitrary algebraic expression. - - Parameters - ---------- - inner: - The algebraic expression to wrap, e.g., a sympy.Add or sympy.Mul object - real : bool, optional - Whether the expression is represented in a real orbital basis. - sym_tensors: Iterable[str] | None, optional - Names of tensors with bra-ket-symmetry, i.e., - d^{pq}_{rs} = d^{rs}_{pq}. Adjusts the corresponding tensors to - correctly represent this additional symmetry if they are not aware - of it yet. - antisym_tensors: Iterable[str] | None, optional - Names of tensors with bra-ket-antisymmetry, i.e., - d^{pq}_{rs} = - d^{rs}_{pq}. Adjusts the corresponding tensors to - correctly represent this additional antisymmetry if they are not - aware of it yet. - target_idx: Iterable[Index] | None, optional - Target indices of the expression. By default the Einstein sum - convention will be used to identify target and contracted indices, - which is not always sufficient. - """ - def __init__(self, inner: Expr | Container | Any, - real: bool = False, - sym_tensors: Iterable[str] = tuple(), - antisym_tensors: Iterable[str] = tuple(), - target_idx: Iterable[Index] | Iterable[str] | None = None - ) -> None: - # import target index strings - if target_idx is not None: - if isinstance(target_idx, str) or isinstance(target_idx, Sequence): - target_idx = get_symbols(target_idx) - else: - target_tpl = tuple(target_idx) - assert (_is_str_sequence(target_tpl) or - _is_index_sequence(target_tpl)) - target_idx = get_symbols(target_tpl) - del target_tpl - # set the class attributes and import the inner expression - super().__init__( - inner=inner, real=real, sym_tensors=sym_tensors, - antisym_tensors=antisym_tensors, target_idx=target_idx - ) - # ensure that sym_tensor and antisym_tensor are immutable tuples and - # remove duplicates - self._sym_tensors = tuple(sorted(set(self._sym_tensors))) - self._antisym_tensors = tuple(sorted(set(self._antisym_tensors))) - # Now apply the given assumptions: this only happens in this class - # store target indices as sorted tuple - if self._target_idx is not None: - self.set_target_idx(self._target_idx) - # applying the tensor symmetry has a certain overlap with - # make_real: make_real will try to add ERI and Fock matrix - # to sym_tensor and apply the tensor symmetry (but only - # if the tensors were not already marked as symmetric). - # Therefore, it makes sense to manually add them here - # to avoid applying the tensor symmetry twice. - if self._sym_tensors or self._antisym_tensors: - if self._real: - self._sym_tensors = tuple(sorted(set( - self._sym_tensors + (tensor_names.fock, tensor_names.eri) - ))) - self._apply_tensor_braket_sym() - if self._real: - self.make_real(force=True) - - def __len__(self) -> int: - # ExprContainer(0) also has length 1! - if isinstance(self._inner, Add): - return len(self._inner.args) - else: - return 1 - - def copy(self) -> "ExprContainer": - """ - Creates a new container with the same expression and assumptions. - The wrapped expression will not be copied. - """ - return ExprContainer(self.inner, **self.assumptions) - - @property - def terms(self) -> tuple[TermContainer, ...]: - """ - Returns all terms of the expression, where a term might be a single - tensor 'a' or a product of the form 'a * b * c'. - """ - kwargs = self.assumptions - if isinstance(self._inner, Add): - return tuple( - TermContainer(inner=term, **kwargs) - for term in self._inner.args - ) - else: - return (TermContainer(inner=self._inner, **kwargs),) - - @property - def idx(self) -> tuple[Index, ...]: - """ - Returns all indices that occur in the expression. Indices that occur - multiple times will be listed multiple times. - """ - idx = [s for t in self.terms for s in t.idx] - return tuple(sorted(idx, key=sort_idx_canonical)) - - ############################### - # setters for the assumptions # - ############################### - def set_target_idx(self, target_idx: Sequence[str] | Sequence[Index] | None - ) -> None: - """ - Set the target indices of the expression. Only necessary if the - Einstein sum contension is not sufficient to determine them - automatically. - """ - if target_idx is None: - self._target_idx = target_idx - else: # import the indices - self._target_idx = tuple( - sorted(set(get_symbols(target_idx)), key=sort_idx_canonical) - ) - - @Container.sym_tensors.setter - def sym_tensors(self, tensors: Iterable[str]) -> None: - """ - Add bra-ket-symmetry to tensors according to their name. - Note that it is only formally possible to remove tensors from - sym_tensors, because the original state of a tensor is lost when the - bra-ket-symmetry is applied, i.e., after bra-ket-symmetry was added to - a tensor d^{p}_{q} it is not knwon whether it's original state was - d^{q}_{p} or d^{p}_{q}. - """ - if isinstance(tensors, str): - tensors = {tensors, } - else: - assert all(isinstance(t, str) for t in tensors) - tensors = set(tensors) - - if self.real: - tensors.update([tensor_names.fock, tensor_names.eri]) - tensors = tuple(sorted(tensors)) - if tensors != self._sym_tensors: - self._sym_tensors = tensors - self._apply_tensor_braket_sym() - - @Container.antisym_tensors.setter - def antisym_tensors(self, tensors: Iterable[str]) -> None: - """ - Add bra-ket-antisymmetry to tensors according to their name. - Note that it is only formally possible to remove tensors from - sym_tensors, because the original state of a tensor is lost when the - bra-ket-symmetry is applied, i.e., after bra-ket-antisymmetry was - added to a tensor d^{p}_{q} it is not knwon whether it's original - state was d^{q}_{p} or d^{p}_{q}. - """ - if isinstance(tensors, str): - tensors = (tensors,) - else: - assert all(isinstance(t, str) for t in tensors) - tensors = tuple(sorted(set(tensors))) - - if tensors != self._antisym_tensors: - self._antisym_tensors = tensors - self._apply_tensor_braket_sym() - - ################################################# - # methods that modify the expression (in place) # - ################################################# - def _apply_tensor_braket_sym(self) -> "ExprContainer": - """ - Adds the bra-ket symmetry and antisymmetry defined in - sym_tensors and antisym_tensors to the tensor objects - in the expression. - """ - if self.inner.is_number: - return self - # actually do something - res = S.Zero - for term in self.terms: - res += term._apply_tensor_braket_sym(wrap_result=False) - assert isinstance(res, Expr) - self._inner = res - return self - - def make_real(self, force: bool = False) -> "ExprContainer": - """ - Represent the expression in a real orbital basis. - - names of complex conjugate t-amplitudes, for instance t1cc -> t1 - - adds bra-ket-symmetry to the fock matrix and the ERI. - - Parameters - ---------- - force: bool, optional - If set the function will also run also if 'real' is already set. - (default: False) - """ - if (self.real and not force): - return self - # actually so something: first adjust the tensor symmetry - self._real = True - sym_tensors = self._sym_tensors - if tensor_names.fock not in sym_tensors or \ - tensor_names.eri not in sym_tensors: - self._sym_tensors = tuple(sorted(set( - sym_tensors + (tensor_names.fock, tensor_names.eri) - ))) - self._apply_tensor_braket_sym() - if self.inner.is_number: - return self - # and then adjust the tensor names - res = S.Zero - for term in self.terms: - res += term.make_real(wrap_result=False) - assert isinstance(res, Expr) - self._inner = res - return self - - def block_diagonalize_fock(self) -> "ExprContainer": - """ - Block diagonalize the Fock matrix, i.e. all terms that contain off - diagonal Fock matrix blocks (f_ov/f_vo) are set to 0. - """ - self.expand() - res = S.Zero - for term in self.terms: - res += term.block_diagonalize_fock(wrap_result=False) - assert isinstance(res, Expr) - self._inner = res - return self - - def diagonalize_fock(self) -> "ExprContainer": - """ - Represent the expression in the canonical orbital basis, where the - fock matrix is diagonal. Because it is not possible to - determine the target indices in the resulting expression according - to the Einstein sum convention, the current target indices will - be set manually in the resulting expression. - """ - # expand to get rid of polynoms as much as possible - self.expand() - diag = S.Zero - for term in self.terms: - contrib = term.diagonalize_fock(wrap_result=True) - assert isinstance(contrib, ExprContainer) - diag += contrib - assert isinstance(diag, ExprContainer) - self._inner = diag.inner - self._target_idx = diag.provided_target_idx - return self - - def rename_tensor(self, current: str, new: str) -> 'ExprContainer': - """Changes the name of a tensor from current to new.""" - assert isinstance(current, str) and isinstance(new, str) - - renamed = S.Zero - for term in self.terms: - renamed += term.rename_tensor(current, new, wrap_result=False) - assert isinstance(renamed, Expr) - self._inner = renamed - return self - - def expand_antisym_eri(self) -> 'ExprContainer': - """ - Expands the antisymmetric ERI using chemists notation - = (pr|qs) - (ps|qr). - ERI's in chemists notation are by default denoted as 'v'. - Currently this only works for real orbitals, i.e., - for symmetric ERI's = .""" - res = S.Zero - for term in self.terms: - res += term.expand_antisym_eri(wrap_result=False) - assert isinstance(res, Expr) - self._inner = res - # only update the assumptions if there was an eri to expand - if Symbol(tensor_names.coulomb) in self.inner.atoms(Symbol): - self._sym_tensors = tuple(sorted(set( - self._sym_tensors + (tensor_names.coulomb,) - ))) - return self - - def use_explicit_denominators(self) -> 'ExprContainer': - """ - Switch to an explicit representation of orbital energy denominators by - replacing all symbolic denominators by their explicit counter part, - i.e., D^{ij}_{ab} -> (e_i + e_j - e_a - e_b)^{-1}. - """ - res = S.Zero - for term in self.terms: - res += term.use_explicit_denominators(wrap_result=False) - assert isinstance(res, Expr) - self._inner = res - # remove the symbolic denom from the assumptions if necessary - if tensor_names.sym_orb_denom in self._antisym_tensors: - self._antisym_tensors = tuple( - t for t in self._antisym_tensors - if t != tensor_names.sym_orb_denom - ) - return self - - def substitute_contracted(self) -> 'ExprContainer': - """ - Tries to substitute all contracted indices with pretty indices, i.e. - i, j, k instad of i3, n4, o42 etc. - """ - self.expand() - res = S.Zero - for term in self.terms: - contrib = term.substitute_contracted(wrap_result=False) - assert isinstance(contrib, Expr) - res += contrib - self._inner = res - return self - - def substitute_with_generic(self) -> 'ExprContainer': - """ - Subsitutes all contracted indices with new, unused generic indices. - """ - self.expand() - res = S.Zero - for term in self.terms: - res += term.substitute_with_generic(wrap_result=False) - assert isinstance(res, Expr) - self._inner = res - return self - - def factor(self, num=None) -> 'ExprContainer': - """ - Tries to factors the expression. Note: this only works for simple cases - - Parameters - ---------- - num : optional - Number to factor in the expression. - """ - - if num is None: - res = factor(self.inner) - else: - num = nsimplify(num, rational=True) - factored = map( - lambda t: Mul(nsimplify(Pow(num, -1), rational=True), t.inner), - self.terms - ) - res = Mul(num, Add(*factored), evaluate=False) - assert isinstance(res, Expr) - self._inner = res - return self - - def expand_intermediates(self, fully_expand: bool = True - ) -> 'ExprContainer': - """ - Expand the known intermediates in the expression. - - Parameters - ---------- - fully_expand: bool, optional - True (default): The intermediates are recursively expanded - into orbital energies and ERI (if possible) - False: The intermediates are only expanded once, e.g., n'th - order MP t-amplitudes are expressed by means of (n-1)'th order - MP t-amplitudes and ERI. - """ - # TODO: only expand specific intermediates - # need to adjust the target indices -> not necessarily possible to - # determine them after expanding intermediates - expanded = S.Zero - for t in self.terms: - expanded += t.expand_intermediates(fully_expand=fully_expand) - assert isinstance(expanded, ExprContainer) - self._inner = expanded.inner - self.set_target_idx(expanded.provided_target_idx) - return self - - def use_symbolic_denominators(self) -> "ExprContainer": - """ - Replace all orbital energy denominators in the expression by tensors, - e.g., (e_a + e_b - e_i - e_j)^{-1} will be replaced by D^{ab}_{ij}, - where D is a SymmetricTensor. - """ - symbolic_denom = S.Zero - has_symbolic_denom = False - for term in self.terms: - term = term.use_symbolic_denominators() - symbolic_denom += term.inner - if tensor_names.sym_orb_denom in term.antisym_tensors: - has_symbolic_denom = True - # the symbolic denominators have additional antisymmetry - # for bra ket swaps - # -> this is the only possible change in the assumptions - # -> only set if we replaced a denominator in the expr - assert isinstance(symbolic_denom, Expr) - self._inner = symbolic_denom - if has_symbolic_denom: - self._antisym_tensors = tuple(sorted(set( - self._antisym_tensors + (tensor_names.sym_orb_denom,) - ))) - return self - - ########################################################### - # Overwrite parent class methods for inplace modification # - ########################################################### - def expand(self): - """ - Forwards the expand call to inner replacing the wrapped - expression - """ - res = self._inner.expand() - assert isinstance(res, Expr) - self._inner = res - return self - - def doit(self, *args, **kwargs): - """ - Forwards the doit call to inner replacing the wrapped - expression - """ - res = self._inner.doit(*args, **kwargs) - assert isinstance(res, Expr) - self._inner = res - return self - - def subs(self, *args, **kwargs): - """ - Forwards the subs call to inner replacing the wrapped - expression - """ - res = self._inner.subs(*args, **kwargs) - assert isinstance(res, Expr) - self._inner = res - return self - - ####################################### - # Operators for in-place modification # - ####################################### - def __iadd__(self, other: Any) -> "ExprContainer": - if isinstance(other, Container): - if self.assumptions != other.assumptions: - raise TypeError("Assumptions need to be equal. Got: " - f"{self.assumptions} and {other.assumptions}") - other = other.inner - elif isinstance(other, Basic): - # Apply the assumptions to the sympy object - other = ExprContainer(other, **self.assumptions).inner - res = self.inner + other - assert isinstance(res, Expr) - self._inner = res - return self - - def __isub__(self, other: Any) -> "ExprContainer": - if isinstance(other, Container): - if self.assumptions != other.assumptions: - raise TypeError("Assumptions need to be equal. Got: " - f"{self.assumptions} and {other.assumptions}") - other = other.inner - elif isinstance(other, Basic): - # Apply the assumptions to the sympy object - other = ExprContainer(other, **self.assumptions).inner - res = self.inner - other - assert isinstance(res, Expr) - self._inner = res - return self - - def __imul__(self, other: Any) -> "ExprContainer": - if isinstance(other, Container): - if self.assumptions != other.assumptions: - raise TypeError("Assumptions need to be equal. Got: " - f"{self.assumptions} and {other.assumptions}") - other = other.inner - elif isinstance(other, Basic): - # Apply the assumptions to the sympy object - other = ExprContainer(other, **self.assumptions).inner - res = self.inner * other - assert isinstance(res, Expr) - self._inner = res - return self - - def __itruediv__(self, other: Any) -> "ExprContainer": - if isinstance(other, Container): - if self.assumptions != other.assumptions: - raise TypeError("Assumptions need to be equal. Got: " - f"{self.assumptions} and {other.assumptions}") - other = other.inner - elif isinstance(other, Basic): - other = ExprContainer(other, **self.assumptions).inner - res = self.inner / other - assert isinstance(res, Expr) - self._inner = res - return self - - def to_latex_str(self, terms_per_line: int | None = None, - only_pull_out_pref: bool = False, - spin_as_overbar: bool = False) -> str: - """ - Transforms the expression to a latex string. - - Parameters - ---------- - terms_per_line: int, optional - Returns the expression using the syntax from an 'align' - environment with the provided number of terms per line. - only_pull_out_pref: bool, optional - Use the 'latex' printout from sympy, while prefactors are printed - in front of each term. This avoids long fractions with a huge - number of tensors in the numerator and only a factor in the - denominator. - spin_as_overbar: bool, optional - Instead of printing the spin of an index as suffix (idxname_spin) - use an overbar for beta spin and no indication for alpha. Because - alpha indices and indices without spin are not distinguishable - anymore, this only works if all indices have a spin set (the - expression is completely represented in spatial orbitals). - """ - tex_terms = [ - term.to_latex_str(only_pull_out_pref, spin_as_overbar) - for term in self.terms - ] - # remove '+' in the first term - if tex_terms[0].lstrip().startswith("+"): - tex_terms[0] = tex_terms[0].replace('+', '', 1).lstrip() - # just the raw output without linebreaks - if terms_per_line is None: - return " ".join(tex_terms) - assert isinstance(terms_per_line, int) - # only print x terms per line in an align environment - # create the string of all but the last line - tex_string = "" - for i in range(0, len(tex_terms) - terms_per_line, terms_per_line): - tex_string += ( - "& " + " ".join(tex_terms[i:i+terms_per_line]) + - " \\nonumber\\\\\n" - ) - # add the last line. Could ommit this if the equation is not supposed - # to have a number. - if len(tex_terms) % terms_per_line: - remaining = len(tex_terms) % terms_per_line - else: - remaining = terms_per_line - tex_string += "& " + " ".join(tex_terms[-remaining:]) - return tex_string diff --git a/build/lib/adcgen/expression/normal_ordered_container.py b/build/lib/adcgen/expression/normal_ordered_container.py deleted file mode 100644 index 4989bc5..0000000 --- a/build/lib/adcgen/expression/normal_ordered_container.py +++ /dev/null @@ -1,199 +0,0 @@ -from collections.abc import Iterable, Sequence -from functools import cached_property -from typing import Any -import itertools - -from sympy.physics.secondquant import F, Fd, NO -from sympy import Expr, Mul - -from ..indices import Index -from ..misc import cached_member -from .container import Container -from .object_container import ObjectContainer - - -class NormalOrderedContainer(ObjectContainer): - """ - Wrapper for a normal ordered operator string. - - Parameters - ---------- - inner: - The NO object to wrap - real : bool, optional - Whether the expression is represented in a real orbital basis. - sym_tensors: Iterable[str] | None, optional - Names of tensors with bra-ket-symmetry, i.e., - d^{pq}_{rs} = d^{rs}_{pq}. Adjusts the corresponding tensors to - correctly represent this additional symmetry if they are not aware - of it yet. - antisym_tensors: Iterable[str] | None, optional - Names of tensors with bra-ket-antisymmetry, i.e., - d^{pq}_{rs} = - d^{rs}_{pq}. Adjusts the corresponding tensors to - correctly represent this additional antisymmetry if they are not - aware of it yet. - target_idx: Iterable[Index] | None, optional - Target indices of the expression. By default the Einstein sum - convention will be used to identify target and contracted indices, - which is not always sufficient. - """ - def __init__(self, inner: Expr | Container | Any, - real: bool = False, - sym_tensors: Iterable[str] = tuple(), - antisym_tensors: Iterable[str] = tuple(), - target_idx: Iterable[Index] | None = None) -> None: - # call init from ObjectContainers parent class - super(ObjectContainer, self).__init__( - inner=inner, real=real, sym_tensors=sym_tensors, - antisym_tensors=antisym_tensors, target_idx=target_idx - ) - assert isinstance(self._inner, NO) - - def __len__(self) -> int: - return len(self._extract_operators.args) - - #################################### - # Some helpers for accessing inner # - #################################### - @property - def _extract_operators(self) -> Expr: - operators = self._inner.args[0] - assert isinstance(operators, Mul) - return operators - - @cached_property - def objects(self) -> tuple["ObjectContainer", ...]: - return tuple( - ObjectContainer(op, **self.assumptions) - for op in self._extract_operators.args - ) - - @property - def exponent(self) -> Expr: - # actually sympy should throw an error if a NO object contains a Pow - # obj or anything else than a*b*c - exp = set(o.exponent for o in self.objects) - if len(exp) == 1: - return exp.pop() - else: - raise NotImplementedError( - 'Exponent only implemented for NO objects, where all ' - f'operators share the same exponent. {self}' - ) - - @cached_property - def idx(self) -> tuple[Index, ...]: - """ - Indices of the normal ordered operator string. Indices that appear - multiple times will be listed multiple times. - """ - objects = self.objects - exp = self.exponent - assert exp.is_Integer - exp = int(exp) - ret = tuple(s for o in objects for s in o.idx for _ in range(exp)) - if len(objects) != len(ret): - raise NotImplementedError('Expected a NO object only to contain' - "second quantized operators with an " - f"exponent of 1. {self}") - return ret - - ################################################ - # compute additional properties for the object # - ################################################ - @property - def type_as_str(self) -> str: - return 'NormalOrdered' - - @cached_member - def description(self, target_idx: Sequence[Index] | None = None, - include_exponent: bool = True) -> str: - """ - Generates a string that describes the operators. - - Parameters - ---------- - target_idx: Sequence[Index] | None, optional - The target indices of the term the operators are a part of. - If given, the explicit names of target indices will be - included in the description. - include_exponent: bool, optional - If set the exponent of the object will be included in the - description. (default: True) - """ - # exponent has to be 1 for all contained operators - assert self.exponent == 1 - _ = include_exponent - - obj_contribs = [] - for o in self.objects: - # add either index space or target idx name - idx = o.idx - assert len(idx) == 1 - idx = idx[0] - if target_idx is not None and idx in target_idx: - op_str = f"{idx.name}_{idx.space[0]}{idx.spin}" - else: - op_str = idx.space[0] + idx.spin - # add a plus for creation operators - base = o.base - if isinstance(base, Fd): - op_str += '+' - elif not isinstance(base, F): # has to be annihilate here - raise TypeError("Unexpected content for " - f"NormalOrderedContainer: {o}, {type(o)}.") - obj_contribs.append(op_str) - return f"{self.type_as_str}-{'-'.join(sorted(obj_contribs))}" - - @cached_member - def crude_pos(self, target_idx: Sequence[Index] | None = None, - include_exponent: bool = True) -> dict[Index, list[str]]: - """ - Returns the 'crude' position of the indices in the operator string. - - Parameters - ---------- - target_idx: Sequence[Index] | None, optional - The target indices of the term the operators are a part of. - If given, the names of target indices will be included in - the positions. - include_exponent: bool, optional - If set the exponent of the object will be considered in the - positions. (default: True) - """ - - descr = self.description( - target_idx=target_idx, include_exponent=include_exponent - ) - ret = {} - for o in self.objects: - o_descr = o.description( - target_idx=target_idx, include_exponent=include_exponent - ) - idx = o.idx - assert len(idx) == 1 - idx = idx[0] - if idx not in ret: - ret[idx] = [] - ret[idx].append(f"{descr}_{o_descr}") - return ret - - @property - def allowed_spin_blocks(self) -> tuple[str, ...]: - """ - Returns the valid spin blocks of the operator string. - """ - allowed_blocks = [] - for obj in self.objects: - blocks = obj.allowed_spin_blocks - if blocks is not None: - allowed_blocks.append(blocks) - return tuple("".join(b) for b in itertools.product(*allowed_blocks)) - - def to_latex_str(self, only_pull_out_pref: bool = False, - spin_as_overbar: bool = False) -> str: - """Returns a latex string for the object.""" - return " ".join( - o.to_latex_str(only_pull_out_pref, spin_as_overbar) - for o in self.objects - ) diff --git a/build/lib/adcgen/expression/object_container.py b/build/lib/adcgen/expression/object_container.py deleted file mode 100644 index 46e6799..0000000 --- a/build/lib/adcgen/expression/object_container.py +++ /dev/null @@ -1,879 +0,0 @@ -from collections.abc import Iterable -from functools import cached_property -from typing import Any, Sequence, TYPE_CHECKING -import itertools - -from sympy.physics.secondquant import F, Fd, FermionicOperator, NO -from sympy import Add, Expr, Mul, Number, Pow, S, Symbol, latex, sympify - -from ..indices import Index, _is_index_tuple -from ..logger import logger -from ..misc import cached_member -from ..sympy_objects import ( - Amplitude, AntiSymmetricTensor, KroneckerDelta, NonSymmetricTensor, - SymbolicTensor, SymmetricTensor -) -from ..tensor_names import ( - is_adc_amplitude, is_t_amplitude, is_gs_density, split_gs_density_name, - split_t_amplitude_name, tensor_names -) -from .container import Container -# imports only required for type checking (avoid circular imports) -if TYPE_CHECKING: - from .expr_container import ExprContainer - - -class ObjectContainer(Container): - """ - Wrapper for a single object, e.g., a tensor that is part of a term. - - Parameters - ---------- - inner: - The object to wrap, e.g., an AntiSymmetricTensor - real : bool, optional - Whether the expression is represented in a real orbital basis. - sym_tensors: Iterable[str] | None, optional - Names of tensors with bra-ket-symmetry, i.e., - d^{pq}_{rs} = d^{rs}_{pq}. Adjusts the corresponding tensors to - correctly represent this additional symmetry if they are not aware - of it yet. - antisym_tensors: Iterable[str] | None, optional - Names of tensors with bra-ket-antisymmetry, i.e., - d^{pq}_{rs} = - d^{rs}_{pq}. Adjusts the corresponding tensors to - correctly represent this additional antisymmetry if they are not - aware of it yet. - target_idx: Iterable[Index] | None, optional - Target indices of the expression. By default the Einstein sum - convention will be used to identify target and contracted indices, - which is not always sufficient. - """ - def __init__(self, inner: Expr | Container | Any, - real: bool = False, - sym_tensors: Iterable[str] = tuple(), - antisym_tensors: Iterable[str] = tuple(), - target_idx: Iterable[Index] | None = None) -> None: - super().__init__( - inner=inner, real=real, sym_tensors=sym_tensors, - antisym_tensors=antisym_tensors, target_idx=target_idx - ) - # we can not wrap an Add object: should be wrapped by ExprContainer - # we can not wrap an Mul object: should be wrapped by TermContainer - # we can not wrap an NO object: should be wrapped by - # NormalOrderedContainer - # we can not wrap a polynom: should be wrapped by PolynomContainer - # But everything else should be fine (single objects) - assert not isinstance(self._inner, (Add, Mul, NO)) - if isinstance(self._inner, Pow): # polynom - assert not isinstance(self._inner.args[0], Add) - - #################################### - # Some helpers for accessing inner # - #################################### - @property - def base(self) -> Expr: - """ - Returns the base of an :py:class:`Pow` object (base^exp) - if we have - a Pow object. Otherwise the object itself is returned. - """ - if isinstance(self.inner, Pow): - return self.inner.args[0] - else: - return self.inner - - @property - def exponent(self) -> Expr: - """ - Returns the exponent of an :py:class:`Pow` object (base^exp). - """ - if isinstance(self.inner, Pow): - return self.inner.args[1] - else: - return sympify(1) - - @property - def base_and_exponent(self) -> tuple[Expr, Expr]: - """Return base and exponent of the object.""" - base = self.inner - if isinstance(base, Pow): - return base.args - else: - return base, sympify(1) - - @property - def name(self) -> str | None: - """Extract the name of tensor objects.""" - base = self.base - if isinstance(base, SymbolicTensor): - return base.name - return None - - @property - def is_t_amplitude(self) -> bool: - """Whether the object is a ground state t-amplitude.""" - name = self.name - return False if name is None else is_t_amplitude(name) - - @property - def is_gs_density(self) -> bool: - """Check whether the object is a ground state density tensor.""" - name = self.name - return False if name is None else is_gs_density(name) - - @property - def is_orbital_energy(self) -> bool: - """Whether the object is a orbital energy tensor.""" - # all orb energies should be nonsym_tensors actually - return self.name == tensor_names.orb_energy and len(self.idx) == 1 - - @property - def contains_only_orb_energies(self) -> bool: - """Whether the object is a orbital energy tensor.""" - # To have a common interface with e.g. Polynoms - return self.is_orbital_energy - - @cached_property - def idx(self) -> tuple[Index, ...]: - """Return the indices of the object.""" - if self.inner.is_number: # prefactor - return tuple() - obj = self.base - # Antisym-, Sym-, Nonsymtensor, Amplitude, Kroneckerdelta - if isinstance(obj, (SymbolicTensor, KroneckerDelta)): - return obj.idx - elif isinstance(obj, FermionicOperator): # F and Fd - idx = obj.args - assert _is_index_tuple(idx) - return idx - elif isinstance(obj, Symbol): # a symbol without indices - return tuple() - else: - raise TypeError("Can not determine the indices for an obj of type" - f"{type(obj)}: {self}.") - - @property - def space(self) -> str: - """Returns the index space (tensor block) of the object.""" - return "".join(s.space[0] for s in self.idx) - - @property - def spin(self) -> str: - """Returns the spin block of the current object.""" - return "".join(s.spin if s.spin else "n" for s in self.idx) - - ################################################ - # compute additional properties for the object # - ################################################ - @property - def type_as_str(self) -> str: - """Returns a string that describes the type of the object.""" - if self.inner.is_number: - return "prefactor" - obj = self.base - if isinstance(obj, Amplitude): - return "amplitude" - elif isinstance(obj, SymmetricTensor): - return "symtensor" - elif isinstance(obj, AntiSymmetricTensor): - return "antisymtensor" - elif isinstance(obj, NonSymmetricTensor): - return "nonsymtensor" - elif isinstance(obj, KroneckerDelta): - return "delta" - elif isinstance(obj, F): - return "annihilate" - elif isinstance(obj, Fd): - return "create" - elif isinstance(obj, Symbol): - return "symbol" - else: - raise TypeError(f"Unknown object {self} of type {type(obj)}.") - - def longname(self, use_default_names: bool = False) -> str | None: - """ - Returns a more exhaustive name of the object. Used for intermediates - and transformation to code. - - Parameters - ---------- - use_default_names: bool, optional - If set, the default names are used to generate the longname. - This is necessary to e.g., map a tensor name to an intermediate - name, since they are defined using the default names. - (default: False) - """ - if any(s.spin for s in self.idx): - logger.warning("Longname only covers the space of indices. The " - "spin is omitted.") - name = None - base = self.base - if isinstance(base, SymbolicTensor): - name = base.name - # t-amplitudes - if is_t_amplitude(name): - assert isinstance(base, Amplitude) - if len(base.upper) != len(base.lower): - raise RuntimeError("Number of upper and lower indices not " - f"equal for t-amplitude {self}.") - base_name, ext = split_t_amplitude_name(name) - if use_default_names: - base_name = tensor_names.defaults().get("gs_amplitude") - assert base_name is not None - if ext: - name = f"{base_name}{len(base.upper)}_{ext}" - else: # name for t-amplitudes without a order - name = f"{base_name}{len(base.upper)}" - elif is_adc_amplitude(name): # adc amplitudes - assert isinstance(base, Amplitude) - # need to determine the excitation space as int - space = self.space - assert all(sp in ["o", "v", "c"] for sp in space) - n_o = space.count("o") + space.count("c") - n_v = space.count("v") - if n_o == n_v: # pp-ADC - n = n_o # p-h -> 1 // 2p-2h -> 2 etc. - else: # ip-/ea-/dip-/dea-ADC - n = min([n_o, n_v]) + 1 # h -> 1 / 2h -> 1 / p-2h -> 2... - lr = "l" if name == tensor_names.left_adc_amplitude else 'r' - name = f"u{lr}{n}" - elif is_gs_density(name): # mp densities - assert isinstance(base, AntiSymmetricTensor) - if len(base.upper) != len(base.lower): - raise RuntimeError("Number of upper and lower indices not " - f"equal for mp density {self}.") - base_name, ext = split_gs_density_name(name) - if use_default_names: - base_name = tensor_names.defaults().get("gs_density") - assert base_name is not None - if ext: - name = f"{base_name}0_{ext}_{self.space}" - else: # name for gs-dentity without a order - name = f"{base_name}0_{self.space}" - elif name.startswith('t2eri'): # t2eri - name = f"t2eri_{name[5:]}" - elif name == 't2sq': - pass - else: # arbitrary other tensor - name += f"_{self.space}" - elif isinstance(base, KroneckerDelta): # deltas -> d_oo / d_vv - name = f"d_{self.space}" - return name - - @cached_property - def order(self) -> int: - """ - Returns the perturbation theoretical order of the object (tensor). - """ - from ..intermediates import Intermediates - - if isinstance(self.base, SymbolicTensor): - name = self.name - assert name is not None - if name == tensor_names.eri: # eri - return 1 - elif is_t_amplitude(name): - _, ext = split_t_amplitude_name(name) - return int(ext.replace('c', '')) - elif is_gs_density(name): - # we might have p / p2 / p3 / ... - _, ext = split_gs_density_name(name) - if ext: - return int(ext) - # all intermediates - longname = self.longname(True) - assert longname is not None - itmd_cls = Intermediates().available.get(longname, None) - if itmd_cls is not None: - return itmd_cls.order - return 0 - - @cached_member - def description(self, target_idx: Sequence[Index] | None = None, - include_exponent: bool = True) -> str: - """ - Generates a string that describes the object. - - Parameters - ---------- - target_idx: Sequence[Index] | None, optional - The target indices of the term the object is a part of. - If given, the explicit names of target indices will be - included in the description. - include_exponent: bool, optional - If set the exponent of the object will be included in the - description. (default: True) - """ - - descr = [self.type_as_str] - if descr[0] in ["prefactor", "symbol"]: - return descr[0] - - if descr[0] in ["antisymtensor", "amplitude", "symtensor"]: - base, exponent = self.base_and_exponent - assert isinstance(base, AntiSymmetricTensor) - # - space separated in upper and lower part - upper, lower = base.upper, base.lower - assert _is_index_tuple(upper) and _is_index_tuple(lower) - data_u = "".join(s.space[0] + s.spin for s in upper) - data_l = "".join(s.space[0] + s.spin for s in lower) - descr.append(f"{base.name}-{data_u}-{data_l}") - # names of target indices, also separated in upper and lower part - # indices in upper and lower have been sorted upon tensor creation! - if target_idx is not None: - target_u = "".join(s.name for s in upper if s in target_idx) - target_l = "".join(s.name for s in lower if s in target_idx) - if target_l or target_u: # we have at least 1 target idx - if base.bra_ket_sym is S.Zero: # no bra ket symmetry - if not target_u: - target_u = "none" - if not target_l: - target_l = "none" - descr.append(f"{target_u}-{target_l}") - else: # bra ket sym or antisym - # target indices in both spaces - if target_u and target_l: - descr.extend(sorted([target_u, target_l])) - else: # only in 1 space at least 1 target idx - descr.append(target_u + target_l) - if include_exponent: # add exponent to description - descr.append(str(exponent)) - elif descr[0] == "nonsymtensor": - data = "".join(s.space[0] + s.spin for s in self.idx) - descr.append(f"{self.name}-{data}") - if target_idx is not None: - target_str = "".join( - s.name + str(i) for i, s in - enumerate(self.idx) if s in target_idx - ) - if target_str: - descr.append(target_str) - if include_exponent: - descr.append(str(self.exponent)) - elif descr[0] in ["delta", "annihilate", "create"]: - data = "".join(s.space[0] + s.spin for s in self.idx) - descr.append(data) - if target_idx is not None: - target_str = "".join( - s.name for s in self.idx if s in target_idx - ) - if target_str: - descr.append(target_str) - if include_exponent: - descr.append(str(self.exponent)) - else: - raise ValueError(f"Unknown object {self} of type {descr[0]}") - return "-".join(descr) - - @cached_member - def crude_pos(self, target_idx: Sequence[Index] | None = None, - include_exponent: bool = True) -> dict[Index, list[str]]: - """ - Returns the 'crude' position of the indices in the object. - (e.g. only if they are located in bra/ket, not the exact position). - - Parameters - ---------- - target_idx: Sequence[Index] | None, optional - The target indices of the term the object is a part of. - If given, the names of target indices will be included in - the positions. - include_exponent: bool, optional - If set the exponent of the object will be considered in the - positions. (default: True) - """ - if not self.idx: # just a number (prefactor or symbol) - return {} - - ret = {} - description = self.description( - include_exponent=include_exponent, target_idx=target_idx - ) - obj = self.base - # antisym-, symtensor and amplitude - if isinstance(obj, AntiSymmetricTensor): - for uplo, idx_tpl in (("u", obj.upper), ("l", obj.lower)): - assert _is_index_tuple(idx_tpl) - for s in idx_tpl: - # space (upper/lower) in which the tensor occurs - pos = [description] - if obj.bra_ket_sym is S.Zero: - pos.append(uplo) - # space (occ/virt) of neighbour indices - neighbours = [i for i in idx_tpl if i is not s] - if neighbours: - neighbour_data = "".join( - i.space[0] + i.spin for i in neighbours - ) - pos.append(neighbour_data) - # names of neighbour target indices - if target_idx is not None: - neighbour_target = [ - i.name for i in neighbours if i in target_idx - ] - if neighbour_target: - pos.append("".join(neighbour_target)) - if s not in ret: - ret[s] = [] - ret[s].append("-".join(pos)) - elif isinstance(obj, NonSymmetricTensor): # nonsymtensor - # target idx position is already in the description - idx = self.idx - for i, s in enumerate(idx): - if s not in ret: - ret[s] = [] - ret[s].append(f"{description}_{i}") - # delta, create, annihilate - elif isinstance(obj, (KroneckerDelta, F, Fd)): - for s in self.idx: - if s not in ret: - ret[s] = [] - ret[s].append(description) - else: - raise ValueError(f"Unknown object {self} of type {type(obj)}") - return ret - - @property - def allowed_spin_blocks(self) -> tuple[str, ...] | None: - """ - Returns the valid spin blocks of the object. - """ - from ..intermediates import Intermediates, RegisteredIntermediate - - # prefactor or symbol have no indices -> no allowed spin blocks - if not self.idx: - return None - - obj = self.base - # antisym-, sym-, nonsymtensor and amplitude - if isinstance(obj, SymbolicTensor): - name = obj.name - if name == tensor_names.eri: # hardcode the ERI spin blocks - return ("aaaa", "abab", "abba", "baab", "baba", "bbbb") - # t-amplitudes: all spin conserving spin blocks are allowed, i.e., - # all blocks with the same amount of alpha and beta indices - # in upper and lower - elif is_t_amplitude(name): - idx = obj.idx - assert not len(idx) % 2 - n = len(idx)//2 - return tuple(sorted([ - "".join(block) for block in - itertools.product("ab", repeat=len(idx)) - if block[:n].count("a") == block[n:].count("a") - ])) - elif name == tensor_names.coulomb: # ERI in chemist notation - return ("aaaa", "aabb", "bbaa", "bbbb") - elif isinstance(obj, KroneckerDelta): # delta - # spins have to be equal - return ("aa", "bb") - elif isinstance(obj, FermionicOperator): # create / annihilate - # both spins allowed! - return ("a", "b") - # the known allowed spin blocks of eri, t-amplitudes and deltas - # may be used to generate the spin blocks of other intermediates - longname = self.longname(True) - assert longname is not None - itmd = Intermediates().available.get(longname, None) - if itmd is None: - logger.warning( - f"Could not determine valid spin blocks for {self}." - ) - return None - assert isinstance(itmd, RegisteredIntermediate) - return itmd.allowed_spin_blocks - - def to_latex_str(self, only_pull_out_pref: bool = False, - spin_as_overbar: bool = False) -> str: - """Returns a latex string for the object.""" - - def format_indices(indices) -> str: - if spin_as_overbar: - spins = [s.spin for s in indices] - if any(spins) and not all(spins): - raise ValueError("All indices have to have a spin " - "assigned in order to differentiate " - "indices without spin from indices with " - f"alpha spin: {self}") - return "".join( - f"\\overline{{{i.name}}}" if s == "b" else i.name - for i, s in zip(indices, spins) - ) - else: - return "".join(latex(i) for i in indices) - - if only_pull_out_pref: # use sympy latex print - return self.__str__() - - name = self.name - obj, exp = self.base_and_exponent - if isinstance(obj, SymbolicTensor): - assert name is not None - special_tensors = { - tensor_names.eri: ( # antisym ERI physicist - lambda up, lo: f"\\langle {up}\\vert\\vert {lo}\\rangle" - ), - tensor_names.fock: ( # fock matrix - lambda up, lo: f"{tensor_names.fock}_{{{up}{lo}}}" - ), - # coulomb integral chemist notation - tensor_names.coulomb: lambda up, lo: f"({up}\\vert {lo})", - # 2e3c integral in asymmetric RI - tensor_names.ri_asym_eri: lambda up, lo: f"({up}\\vert {lo})", - # orbital energy - tensor_names.orb_energy: lambda _, lo: f"\\varepsilon_{{{lo}}}" - } - # convert the indices to string - if isinstance(obj, AntiSymmetricTensor): - upper = format_indices(obj.upper) - lower = format_indices(obj.lower) - elif isinstance(obj, NonSymmetricTensor): - upper, lower = None, format_indices(obj.indices) - else: - raise TypeError(f"Unknown tensor object {obj} of type " - f"{type(obj)}") - - if name in special_tensors: - tex_str = special_tensors[name](upper, lower) - else: - order_str = None - if is_t_amplitude(name): # mp t-amplitudes - base_name, ext = split_t_amplitude_name(name) - if "c" in ext: - order_str = f"({ext.replace('c', '')})\\ast" - else: - order_str = f"({ext})" - order_str = f"}}^{{{order_str}}}" - name = f"{{{base_name}" - elif is_gs_density(name): # mp densities - _, ext = split_gs_density_name(name) - order_str = f"}}^{{({ext})}}" - name = "{\\rho" - - tex_str = name - if upper is not None: - tex_str += f"^{{{upper}}}" - tex_str += f"_{{{lower}}}" - - # append pt order for amplitude and mp densities - if order_str is not None: - tex_str += order_str - elif isinstance(obj, KroneckerDelta): - tex_str = f"\\delta_{{{format_indices(obj.idx)}}}" - elif isinstance(obj, F): # annihilate - tex_str = f"a_{{{format_indices(obj.args)}}}" - elif isinstance(obj, Fd): # create - tex_str = f"a^\\dagger_{{{format_indices(obj.args)}}}" - else: - return self.__str__() - - if exp != 1: - # special case for ERI and coulomb - if name in [tensor_names.eri, tensor_names.coulomb]: - tex_str += f"^{{{exp}}}" - else: - tex_str = f"\\bigl({tex_str}\\bigr)^{{{exp}}}" - return tex_str - - ################################### - # methods manipulating the object # - ################################### - def _apply_tensor_braket_sym(self, wrap_result: bool = True - ) -> "ExprContainer | Expr": - """ - Applies the bra-ket symmetry defined in sym_tensors and antisym_tensors - to the current object. If wrap_result is set, the new object will be - wrapped by :py:class:`ExprContainer`. - """ - from .expr_container import ExprContainer - - obj = self.inner - base, exponent = self.base_and_exponent - # antisymtensor, symtensor or amplitude - if isinstance(base, AntiSymmetricTensor): - name = base.name - braketsym: None | Number = None - if name in self.sym_tensors and base.bra_ket_sym is not S.One: - braketsym = S.One - elif name in self.antisym_tensors and \ - base.bra_ket_sym is not S.NegativeOne: - braketsym = S.NegativeOne - if braketsym is not None: - obj = Pow( - base.add_bra_ket_sym(braketsym), - exponent - ) - if wrap_result: - obj = ExprContainer(inner=obj, **self.assumptions) - return obj - - def make_real(self, wrap_result: bool = True) -> "ExprContainer | Expr": - """ - Represent the object in a real orbital basis by renaming the - complex conjugate t-amplitudes, for instance 't1cc' -> 't1'. - - Parameters - ---------- - wrap_result: bool, optional - If set the result will be wrapped with - :py:class:`ExprContainer`. Otherwise the unwrapped - object is returned. (default: True) - """ - from .expr_container import ExprContainer - - real_obj = self.inner - if self.is_t_amplitude: - old = self.name - assert old is not None - base_name, ext = split_t_amplitude_name(old) - new = f"{base_name}{ext.replace('c', '')}" - if old != new: # only rename when name changes - base, exponent = self.base_and_exponent - assert isinstance(base, Amplitude) - real_obj = Pow( - Amplitude(new, base.upper, base.lower, base.bra_ket_sym), - exponent - ) - if wrap_result: - kwargs = self.assumptions - kwargs["real"] = True - real_obj = ExprContainer(real_obj, **kwargs) - return real_obj - - def block_diagonalize_fock(self, wrap_result: bool = True - ) -> "ExprContainer | Expr": - """ - Block diagonalize the Fock matrix, i.e. if the object is part of an - off-diagonal fock matrix block, it is set to 0. - - Parameters - ---------- - wrap_result: bool, optional - If this is set the result will be wrapped with an - :py:class:`ExprContainer`. (default: True) - """ - from .expr_container import ExprContainer - - bl_diag = self.inner - if self.name == tensor_names.fock: - sp1, sp2 = self.space - if sp1 != sp2: - bl_diag = S.Zero - if wrap_result: - bl_diag = ExprContainer(bl_diag, **self.assumptions) - return bl_diag - - def diagonalize_fock(self, target: Sequence[Index], - wrap_result: bool = False - ) -> tuple["ExprContainer | Expr", dict[Index, Index]]: # noqa E501 - """ - Diagonalize the fock matrix, i.e., if the object is a fock matrix - element it is replaced by an orbital energy - but only if no - information is lost. - If the result is wrapped, the target indices will be set in the - resulting expression, because it might not be possible to - determine them according to the einstein sum convention - (f_ij X_j -> e_i X_i). - """ - from ..func import evaluate_deltas - - def pack_result(diag, sub, target): - if wrap_result: - assumptions = self.assumptions - assumptions["target_idx"] = target - diag = Expr(diag, **assumptions) - return diag, sub - - if self.name != tensor_names.fock: # no fock matrix - return pack_result(self.inner, {}, target) - # build a delta with the fock indices - p, q = self.idx - delta = KroneckerDelta(p, q) - if delta is S.Zero: # off diagonal block - assert isinstance(delta, Number) - return pack_result(delta, {}, target) - elif delta is S.One: - # diagonal fock element: if we evaluate it, we might loose a - # contracted index. - return pack_result(self.inner, {}, target) - # try to evaluate the delta - result = evaluate_deltas(Mul(self.inner, delta), target_idx=target) - if isinstance(result, Mul): # could not evaluate - return pack_result(self.inner, {}, target) - # check which of the indices survived - remaining_idx = result.atoms(Index) - assert len(remaining_idx) == 1 # only one of the indices can survive - remaining_idx = remaining_idx.pop() - # dict holding the necessary index substitution - sub = {} - if p is remaining_idx: # p survived - sub[q] = p - else: # q surived - assert q is remaining_idx - sub[p] = q - diag = Pow( - NonSymmetricTensor(tensor_names.orb_energy, (remaining_idx,)), - self.exponent - ) - return pack_result(diag, sub, target) - - def rename_tensor(self, current: str, new: str, - wrap_result: bool = True) -> "ExprContainer | Expr": - """ - Renames a tensor object with name 'current' to 'new'. If wrap_result - is set, the result will be wrapped with an :py:class:`ExprContainer`. - """ - from .expr_container import ExprContainer - - obj = self.inner - base, exponent = self.base_and_exponent - if isinstance(base, SymbolicTensor) and base.name == current: - if isinstance(base, AntiSymmetricTensor): - # antisym, amplitude, symmetric - base = base.__class__( - new, base.upper, base.lower, base.bra_ket_sym - ) - elif isinstance(base, NonSymmetricTensor): - # nonsymmetric - base = base.__class__(new, base.indices) - else: - raise TypeError(f"Unknown tensor type {type(base)}.") - obj = Pow(base, exponent) - if wrap_result: - obj = ExprContainer(obj, **self.assumptions) - return obj - - def expand_antisym_eri(self, wrap_result: bool = True - ) -> "ExprContainer | Expr": - """ - Expands the antisymmetric ERI using chemists notation - = (pr|qs) - (ps|qr). - ERI's in chemists notation are by default denoted as 'v'. - Currently this only works for real orbitals, i.e., - for symmetric ERI's = . - """ - from .expr_container import ExprContainer - - expanded_coulomb = False - res = self.inner - base, exponent = self.base_and_exponent - if isinstance(base, AntiSymmetricTensor) and \ - base.name == tensor_names.eri: - # ensure that the eri is Symmetric. Otherwise we would introduce - # additional unwanted symmetry in the result - if base.bra_ket_sym != 1: - raise NotImplementedError("Can only expand antisymmetric ERI " - "with bra-ket symmetry " - "(real orbitals).") - p, q, r, s = self.idx # - res = S.Zero - if p.spin == r.spin and q.spin == s.spin: - res += SymmetricTensor(tensor_names.coulomb, (p, r), (q, s), 1) - expanded_coulomb = True - if p.spin == s.spin and q.spin == r.spin: - res -= SymmetricTensor(tensor_names.coulomb, (p, s), (q, r), 1) - expanded_coulomb = True - res = Pow(res, exponent) - - if wrap_result: - kwargs = self.assumptions - if expanded_coulomb: - kwargs["sym_tensors"] += (tensor_names.coulomb,) - res = ExprContainer(res, **kwargs) - return res - - def expand_intermediates(self, target: Sequence[Index], - wrap_result: bool = True, - fully_expand: bool = True - ) -> "ExprContainer | Expr": - """ - Expand the object if it is a known intermediate. - - Parameters - ---------- - target: tuple[Index] - The target indices of the term the object is a part of. - wrap_result: bool, optional - If set, the result will be wrapped with an - :py:class:`ExprContainer`. Note that the target indices will - be set in the resturned container, since the einstein - sum convention is often not valid after intermediate - expansion. (default: True) - fully_expand: bool, optional - True (default): The intermediate is recursively expanded - into orbital energies and ERI (if possible) - False: The intermediate is only expanded once, e.g., n'th - order MP t-amplitudes are expressed by means of (n-1)'th order - MP t-amplitudes and ERI. - """ - from ..intermediates import Intermediates, RegisteredIntermediate - from .expr_container import ExprContainer - - # intermediates only defined for tensors - if not isinstance(self.base, SymbolicTensor): - ret = self.inner - if wrap_result: - assumptions = self.assumptions - assumptions["target_idx"] = target - ret = ExprContainer(ret, **assumptions) - return ret - - longname = self.longname(use_default_names=True) - assert longname is not None - itmd = Intermediates().available.get(longname, None) - expanded = self.inner - if itmd is not None: - assert isinstance(itmd, RegisteredIntermediate) - # Use a for loop to obtain different contracted itmd indices - # for each x in: x * x * ... - expanded = S.One - exponent = self.exponent - assert exponent.is_Integer - for _ in range(abs(int(exponent))): - expanded *= itmd.expand_itmd( - indices=self.idx, wrap_result=False, - fully_expand=fully_expand - ) - if exponent < S.Zero: - expanded = Pow(expanded, -1) - if wrap_result: - assumptions = self.assumptions - assumptions["target_idx"] = target - ret = ExprContainer(expanded, **assumptions) - return expanded - - def use_explicit_denominators(self, wrap_result: bool = True - ) -> "ExprContainer | Expr": - """ - Switch to an explicit representation of orbital energy denominators by - replacing all symbolic denominators by their explicit counter part, - i.e., D^{ij}_{ab} -> (e_i + e_j - e_a - e_b)^{-1}.+ - """ - from .expr_container import ExprContainer - - explicit_denom = self.inner - if self.name == tensor_names.sym_orb_denom: - tensor, exponent = self.base_and_exponent - assert isinstance(tensor, AntiSymmetricTensor) - # upper indices are added, lower indices subtracted - explicit_denom = S.Zero - for s in tensor.upper: - assert isinstance(s, Index) - explicit_denom += NonSymmetricTensor( - tensor_names.orb_energy, (s,) - ) - for s in tensor.lower: - assert isinstance(s, Index) - explicit_denom -= NonSymmetricTensor( - tensor_names.orb_energy, (s,) - ) - explicit_denom = Pow(explicit_denom, -exponent) - if wrap_result: - assumptions = self.assumptions - # remove the symbolic denom from the assumptions if necessary - if tensor_names.sym_orb_denom in self.antisym_tensors: - assumptions["antisym_tensors"] = tuple( - n for n in assumptions["antisym_tensors"] - if n != tensor_names.sym_orb_denom - ) - explicit_denom = ExprContainer(explicit_denom, **assumptions) - return explicit_denom diff --git a/build/lib/adcgen/expression/polynom_container.py b/build/lib/adcgen/expression/polynom_container.py deleted file mode 100644 index ac6610d..0000000 --- a/build/lib/adcgen/expression/polynom_container.py +++ /dev/null @@ -1,282 +0,0 @@ -from collections.abc import Iterable, Sequence -from functools import cached_property -from typing import Any, TYPE_CHECKING - -from sympy import Add, Expr, Pow, Symbol, S - -from ..indices import Index, sort_idx_canonical -from ..tensor_names import tensor_names -from .container import Container -from .object_container import ObjectContainer - -# imports only required for type checking (avoid circular imports) -if TYPE_CHECKING: - from .term_container import TermContainer - from .expr_container import ExprContainer - - -class PolynomContainer(ObjectContainer): - """ - Wrapper for a polynom of the form (a + b + ...)^x - - Parameters - ---------- - inner: - The polynom to wrap - real : bool, optional - Whether the expression is represented in a real orbital basis. - sym_tensors: Iterable[str] | None, optional - Names of tensors with bra-ket-symmetry, i.e., - d^{pq}_{rs} = d^{rs}_{pq}. Adjusts the corresponding tensors to - correctly represent this additional symmetry if they are not aware - of it yet. - antisym_tensors: Iterable[str] | None, optional - Names of tensors with bra-ket-antisymmetry, i.e., - d^{pq}_{rs} = - d^{rs}_{pq}. Adjusts the corresponding tensors to - correctly represent this additional antisymmetry if they are not - aware of it yet. - target_idx: Iterable[Index] | None, optional - Target indices of the expression. By default the Einstein sum - convention will be used to identify target and contracted indices, - which is not always sufficient. - """ - def __init__(self, inner: Expr | Container | Any, - real: bool = False, - sym_tensors: Iterable[str] = tuple(), - antisym_tensors: Iterable[str] = tuple(), - target_idx: Iterable[Index] | None = None) -> None: - # call init from ObjectContainers parent class - super(ObjectContainer, self).__init__( - inner=inner, real=real, sym_tensors=sym_tensors, - antisym_tensors=antisym_tensors, target_idx=target_idx - ) - if isinstance(self._inner, Pow): - assert isinstance(self._inner.args[0], Add) - else: - # (a + b + ...)^1 (orbital energy denominator) - assert isinstance(self._inner, Add) - - def __len__(self) -> int: - return len(self.base.args) - - @cached_property - def terms(self) -> "tuple[TermContainer, ...]": - from .term_container import TermContainer - - return tuple( - TermContainer(inner=term, **self.assumptions) - for term in self.base.args - ) - - ################################################# - # compute additional properties for the Polynom # - ################################################# - @property - def type_as_str(self) -> str: - return 'polynom' - - @cached_property - def idx(self) -> tuple[Index, ...]: - """ - Returns all indices that occur in the polynom. Indices that occur - multiple times will be listed multiple times. - """ - idx = [s for t in self.terms for s in t.idx] - return tuple(sorted(idx, key=sort_idx_canonical)) - - @cached_property - def order(self): - raise NotImplementedError("Order not implemented for polynoms: " - f"{self}") - - def crude_pos(self, *args, **kwargs): - _, _ = args, kwargs - raise NotImplementedError("crude_pos for determining index positions " - f"not implemented for polynoms: {self}") - - def description(self, *args, **kwargs): - _, _ = args, kwargs - raise NotImplementedError("description not implemented for polynoms:", - f"{self}") - - @property - def allowed_spin_blocks(self) -> None: - # allowed spin blocks not available for Polynoms - return None - - @property - def contains_only_orb_energies(self) -> bool: - """Whether the poylnom only contains orbital energy tensors.""" - return all(term.contains_only_orb_energies for term in self.terms) - - #################################### - # methods manipulating the polynom # - #################################### - def _apply_tensor_braket_sym(self, wrap_result: bool = True - ) -> "ExprContainer | Expr": - """ - Applies the tensor bra-ket symmetry defined in sym_tensors and - antisym_tensors to all tensors in the polynom. If wrap_result is set, - the new term will be wrapped by :py:class:`ExprContainer`. - """ - from .expr_container import ExprContainer - - with_sym = S.Zero - for term in self.terms: - with_sym += term._apply_tensor_braket_sym(wrap_result=False) - assert isinstance(with_sym, Expr) - with_sym = Pow(with_sym, self.exponent) - - if wrap_result: - with_sym = ExprContainer(inner=with_sym, **self.assumptions) - return with_sym - - def make_real(self, wrap_result: bool = True) -> "ExprContainer | Expr": - """ - Represent the polynom in a real orbital basis. - - names of complex conjugate t-amplitudes, for instance t1cc -> t1 - - adds bra-ket-symmetry to the fock matrix and the ERI. - - Parameters - ---------- - wrap_result : bool, optional - If set the result will be wrapped with an - :py:class:`ExprContainer`. (default: True) - """ - from .expr_container import ExprContainer - - real = S.Zero - for term in self.terms: - real += term.make_real(wrap_result=False) - assert isinstance(real, Expr) - real = Pow(real, self.exponent) - - if wrap_result: - assumptions = self.assumptions - assumptions["real"] = True - real = ExprContainer(inner=real, **assumptions) - return real - - def block_diagonalize_fock(self, wrap_result: bool = True - ) -> "ExprContainer | Expr": - """ - Block diagonalize the fock matrix in the polynom by removing terms - that contain elements of off-diagonal blocks. - """ - from .expr_container import ExprContainer - - bl_diag = S.Zero - for term in self.terms: - bl_diag += term.block_diagonalize_fock(wrap_result=False) - assert isinstance(bl_diag, Expr) - bl_diag = Pow(bl_diag, self.exponent) - - if wrap_result: - bl_diag = ExprContainer(inner=bl_diag, **self.assumptions) - return bl_diag - - def diagonalize_fock(self, target: Sequence[Index], - wrap_result: bool = True - ): - _, _ = target, wrap_result - raise NotImplementedError("Fock matrix diagonalization not implemented" - f" for polynoms: {self}") - - def rename_tensor(self, current: str, new: str, - wrap_result: bool = True - ) -> "ExprContainer | Expr": - """Rename a tensor from current to new.""" - from .expr_container import ExprContainer - - renamed = S.Zero - for term in self.terms: - renamed += term.rename_tensor(current, new, wrap_result=False) - assert isinstance(renamed, Expr) - renamed = Pow(renamed, self.exponent) - - if wrap_result: - renamed = ExprContainer(inner=renamed, **self.assumptions) - return renamed - - def expand_antisym_eri(self, wrap_result: bool = True): - """ - Expands the antisymmetric ERI using chemists notation - = (pr|qs) - (ps|qr). - ERI's in chemists notation are by default denoted as 'v'. - Currently this only works for real orbitals, i.e., - for symmetric ERI's = . - """ - from .expr_container import ExprContainer - - expanded = S.Zero - for term in self.terms: - expanded += term.expand_antisym_eri(wrap_result=False) - assert isinstance(expanded, Expr) - expanded = Pow(expanded, self.exponent) - - if wrap_result: - assumptions = self.assumptions - # add the coulomb tensor to sym_tensors if necessary - if Symbol(tensor_names.coulomb) in expanded.atoms(Symbol): - assumptions["sym_tensors"] += (tensor_names.coulomb,) - expanded = ExprContainer(inner=expanded, **assumptions) - return expanded - - def expand_intermediates(self, target: Sequence[Index], - wrap_result: bool = True, - fully_expand: bool = True - ) -> "ExprContainer | Expr": - """Expands all known intermediates in the polynom.""" - from .expr_container import ExprContainer - - expanded = S.Zero - for term in self.terms: - expanded += term.expand_intermediates( - target, wrap_result=False, fully_expand=fully_expand - ) - assert isinstance(expanded, Expr) - expanded = Pow(expanded, self.exponent) - - if wrap_result: - assumptions = self.assumptions - assumptions["target_idx"] = target - return ExprContainer(expanded, **assumptions) - return expanded - - def use_explicit_denominators(self, wrap_result: bool = True - ) -> "ExprContainer | Expr": - """ - Switch to an explicit representation of orbital energy denominators by - replacing all symbolic denominators by their explicit counter part, - i.e., D^{ij}_{ab} -> (e_i + e_j - e_a - e_b)^{-1}. - """ - from .expr_container import ExprContainer - - explicit_denom = S.Zero - for term in self.terms: - explicit_denom += term.use_explicit_denominators(wrap_result=False) - assert isinstance(explicit_denom, Expr) - explicit_denom = Pow(explicit_denom, self.exponent) - - if wrap_result: - assumptions = self.assumptions - if tensor_names.sym_orb_denom in self.antisym_tensors: - assumptions["antisym_tensors"] = tuple( - n for n in assumptions["antisym_tensors"] - if n != tensor_names.sym_orb_denom - ) - explicit_denom = ExprContainer(inner=explicit_denom, **assumptions) - return explicit_denom - - def to_latex_str(self, only_pull_out_pref: bool = False, - spin_as_overbar: bool = False) -> str: - """Returns a latex string for the polynom.""" - tex_str = " ".join( - term.to_latex_str(only_pull_out_pref=only_pull_out_pref, - spin_as_overbar=spin_as_overbar) - for term in self.terms - ) - tex_str = f"\\bigl({tex_str}\\bigr)" - if self.exponent != 1: - tex_str += f"^{{{self.exponent}}}" - return tex_str diff --git a/build/lib/adcgen/expression/term_container.py b/build/lib/adcgen/expression/term_container.py deleted file mode 100644 index 6a385a4..0000000 --- a/build/lib/adcgen/expression/term_container.py +++ /dev/null @@ -1,770 +0,0 @@ -from collections.abc import Iterable -from collections import Counter -from functools import cached_property -from typing import Any, TYPE_CHECKING, Sequence - -from sympy import Add, Expr, Mul, Pow, S, Symbol, factor, latex, nsimplify -from sympy.physics.secondquant import NO - -from ..indices import ( - Index, Indices, get_lowest_avail_indices, get_symbols, order_substitutions, - sort_idx_canonical -) -from ..misc import Inputerror, cached_member -from ..sympy_objects import NonSymmetricTensor -from ..tensor_names import tensor_names -from .container import Container -from .normal_ordered_container import NormalOrderedContainer -from .polynom_container import PolynomContainer -from .object_container import ObjectContainer - -# imports only required for type checking (avoid circular imports) -if TYPE_CHECKING: - from ..symmetry import Permutation - from .expr_container import ExprContainer - - -class TermContainer(Container): - """ - Wrapper for a single term of the form a * b * c. - - Parameters - ---------- - inner: - The algebraic term to wrap, e.g., a sympy.Mul object - real : bool, optional - Whether the expression is represented in a real orbital basis. - sym_tensors: Iterable[str] | None, optional - Names of tensors with bra-ket-symmetry, i.e., - d^{pq}_{rs} = d^{rs}_{pq}. Adjusts the corresponding tensors to - correctly represent this additional symmetry if they are not aware - of it yet. - antisym_tensors: Iterable[str] | None, optional - Names of tensors with bra-ket-antisymmetry, i.e., - d^{pq}_{rs} = - d^{rs}_{pq}. Adjusts the corresponding tensors to - correctly represent this additional antisymmetry if they are not - aware of it yet. - target_idx: Iterable[Index] | None, optional - Target indices of the expression. By default the Einstein sum - convention will be used to identify target and contracted indices, - which is not always sufficient. - """ - - def __init__(self, inner: Expr | Container | Any, - real: bool = False, - sym_tensors: Iterable[str] = tuple(), - antisym_tensors: Iterable[str] = tuple(), - target_idx: Iterable[Index] | None = None) -> None: - super().__init__( - inner=inner, real=real, sym_tensors=sym_tensors, - antisym_tensors=antisym_tensors, target_idx=target_idx - ) - # we can not wrap an Add object: should be wrapped by ExprContainer - # But everything else should be fine (Mul or single objects) - assert not isinstance(self._inner, Add) - - def __len__(self) -> int: - if isinstance(self.inner, Mul): - return len(self.inner.args) - else: - return 1 - - @cached_property - def objects(self) -> tuple[ObjectContainer, ...]: - """ - Returns all objects the term contains, e.g. tensors. - """ - def dispatch(obj, kwargs) -> ObjectContainer: - if isinstance(obj, NO): - return NormalOrderedContainer(inner=obj, **kwargs) - elif (isinstance(obj, Pow) and isinstance(obj.args[0], Add)) or \ - isinstance(obj, Add): - return PolynomContainer(inner=obj, **kwargs) - else: - return ObjectContainer(inner=obj, **kwargs) - - kwargs = self.assumptions - if isinstance(self.inner, Mul): - return tuple( - dispatch(obj, kwargs) - for obj in self.inner.args - ) - else: - return (dispatch(self.inner, kwargs),) - - ############################################### - # methods that compute additional information # - ############################################### - @cached_property - def order(self) -> int: - return sum( - obj.order for obj in self.objects - if not isinstance(obj, PolynomContainer) - ) - - @cached_property - def prefactor(self) -> Expr: - """Returns the (numeric) prefactor of the term.""" - return nsimplify( - Mul(*(o.inner for o in self.objects if o.inner.is_number)), - rational=True - ) - - @property - def sign(self) -> str: - """Returns the sign of the term.""" - return "minus" if self.prefactor < S.Zero else "plus" - - @property - def contracted(self) -> tuple[Index, ...]: - """ - Returns all contracted indices of the term. If no target indices - have been provided to the parent expression, the Einstein sum - convention will be applied. - """ - # target indices have been provided -> no need to count indices - if (target := self.provided_target_idx) is not None: - return tuple(s for s, _ in self._idx_counter if s not in target) - else: # count indices to determine target and contracted indices - return tuple(s for s, n in self._idx_counter if n) - - @property - def target(self) -> tuple[Index, ...]: - """ - Returns all target indices of the term. If no target indices have been - provided to the parent expression, the Einstein sum convention will - be applied. - """ - if (target := self.provided_target_idx) is not None: - return target - else: - return tuple(s for s, n in self._idx_counter if not n) - - @cached_property - def idx(self) -> tuple[Index, ...]: - """ - Returns all indices that occur in the term. Indices that occur multiple - times will be listed multiple times. - """ - return tuple(s for s, n in self._idx_counter for _ in range(n + 1)) - - @cached_property - def _idx_counter(self) -> tuple[tuple[Index, int], ...]: - idx: dict[Index, int] = {} - for o in self.objects: - if o.inner.is_number: - continue - n = abs(o.exponent) # abs value for denominators - assert n.is_Integer - n = int(n) - for s in o.idx: - if s in idx: - idx[s] += n - else: # start counting at 0 - idx[s] = n - 1 - return tuple(sorted( - idx.items(), key=lambda itms: sort_idx_canonical(itms[0]) - )) - - @cached_member - def pattern(self, include_target_idx: bool = True, - include_exponent: bool = True - ) -> dict[tuple[str, str], dict[Index, list[str]]]: - """ - Determins the pattern of the indices in the term. This is a (kind of) - readable string hash for each index that is based upon the positions - the index appears and the coupling of the objects. - - Parameters - ---------- - include_target_idx: bool, optional - If set, the explicit names of target indices are included to make - the pattern more precise. Should be set if the target indices - are not allowed to be renamed. (default: True) - include_exponent: bool, optional - If set, the exponents of the objects are included in the pattern - (default: True) - """ - - target_idx = self.target if include_target_idx else None - coupl = self.coupling( - include_target_idx=include_target_idx, - include_exponent=include_exponent - ) - pattern: dict[tuple[str, str], dict[Index, list[str]]] = {} - for i, o in enumerate(self.objects): - positions = o.crude_pos(target_idx=target_idx, - include_exponent=include_exponent) - c = f"_{'_'.join(sorted(coupl[i]))}" if i in coupl else None - for s, pos in positions.items(): - key = s.space_and_spin - if key not in pattern: - pattern[key] = {} - if s not in pattern[key]: - pattern[key][s] = [] - if c is None: - pattern[key][s].extend(p for p in pos) - else: - pattern[key][s].extend(p + c for p in pos) - # sort pattern to allow for direct comparison - for ov, idx_pat in pattern.items(): - for s, pat in idx_pat.items(): - pattern[ov][s] = sorted(pat) - return pattern - - @cached_member - def coupling(self, include_target_idx: bool = True, - include_exponent: bool = True) -> dict[int, list[str]]: - """ - Returns the coupling between the objects in the term, where two objects - are coupled when they share common indices. Only the coupling of non - unique objects is returned, i.e., the coupling of e.g. a t2_1 amplitude - is only returned if there is another one in the same term. - """ - # - collect all the couplings (e.g. if a index s occurs at two tensors - # t and V: the crude_pos of s at t will be extended by the crude_pos - # of s at V. And vice versa for V.) - objects = self.objects - target_idx = self.target if include_target_idx else None - descriptions = [ - o.description(include_exponent=include_exponent, - target_idx=target_idx) - for o in objects - ] - descr_counter = Counter(descriptions) - positions = [ - o.crude_pos(include_exponent=include_exponent, - target_idx=target_idx) - for o in objects - ] - coupling = {} - for i, (descr, idx_pos) in enumerate(zip(descriptions, positions)): - # if the tensor is unique in the term -> no coupling necessary - if descr_counter[descr] < 2: - continue - for other_i, other_idx_pos in enumerate(positions): - if i == other_i: - continue - matches = [idx for idx in idx_pos if idx in other_idx_pos] - if not matches: - continue - if i not in coupling: - coupling[i] = [] - coupling[i].extend( - [p for s in matches for p in other_idx_pos[s]] - ) - return coupling - - @cached_member - def symmetry(self, only_contracted: bool = False, - only_target: bool = False - ) -> "dict[tuple[Permutation, ...], int]": - """ - Determines the symmetry of the term with respect to index permutations. - By default all indices of the term are considered. However, by setting - either only_contracted or only_target the indices may be restricted to - the respective subset of indices. - """ - from itertools import combinations, permutations, chain, product - from math import factorial - from ..indices import split_idx_string - from ..symmetry import Permutation, PermutationProduct - - def permute_str(string, *perms): - string = split_idx_string(string) - for perm in perms: - p, q = [s.name for s in perm] - sub = {p: q, q: p} - string = [sub.get(s, s) for s in string] - return "".join(string) - - def get_perms(*space_perms): - for perms in chain.from_iterable(space_perms): - yield perms - if len(space_perms) > 1: # form the product - for perm_tpl in product(*space_perms): - yield PermutationProduct(*chain.from_iterable(perm_tpl)) - - if only_contracted and only_target: - raise Inputerror("Can not set only_contracted and only_target " - "simultaneously.") - if self.inner.is_number or isinstance(self.inner, NonSymmetricTensor): - return {} # in both cases we can't find any symmetry - - if only_contracted: - indices = self.contracted - elif only_target: - indices = self.target - else: - indices = self.idx - - if len(indices) < 2: # not enough indices for any permutations - return {} - - # split in occ and virt indices to only generate P_oo, P_vv and P_gg. - # Similarly, the spin has to be the same! - sorted_idx = {} - for s in indices: - if (key := s.space_and_spin) not in sorted_idx: - sorted_idx[key] = [] - sorted_idx[key].append(s) - - space_perms: list[list] = [] # find all permutations within a space - for idx_list in sorted_idx.values(): - if len(idx_list) < 2: - continue - max_n_perms = factorial(len(idx_list)) - # generate idx string that will also be permuted to avoid - # redundant permutations - idx_string = "".join([s.name for s in idx_list]) - permuted_str = [idx_string] - # form all index pairs - all permutations operators - pairs = [Permutation(*pair) for pair in combinations(idx_list, 2)] - # form all combinations of permutation operators - combs = chain.from_iterable( - permutations(pairs, n) for n in range(1, len(idx_list)) - ) - # remove redundant combinations - temp = [] - for perms in combs: - if len(permuted_str) == max_n_perms: - break # did find enough permutations - perm_str = permute_str(idx_string, *perms) - if perm_str in permuted_str: # is the perm redundant? - continue - permuted_str.append(perm_str) - temp.append(perms) - space_perms.append(temp) - # now apply all found perms to the term and determine the symmetry - # -> add/subtract permuted versions of the term and see if we get 0 - symmetry: dict[tuple, int] = {} - original_term = self.inner - for perms in get_perms(*space_perms): - permuted = self.permute(*perms).inner - if Add(original_term, permuted) is S.Zero: - symmetry[perms] = -1 - elif Add(original_term, -permuted) is S.Zero: - symmetry[perms] = +1 - return symmetry - - @property - def contains_only_orb_energies(self) -> bool: - """Whether the term only contains orbital energies.""" - return all( - o.contains_only_orb_energies for o in self.objects - if not o.inner.is_number - ) - - ############################### - # method that modify the term # - ############################### - def _apply_tensor_braket_sym(self, wrap_result: bool = True - ) -> "ExprContainer | Expr": - """ - Applies the tensor bra-ket symmetry defined in sym_tensors and - antisym_tensors to all tensors in the term. If wrap_result is set, - the new term will be wrapped by :py:class:`ExprContainer`. - """ - from .expr_container import ExprContainer - - term = S.One - for obj in self.objects: - term *= obj._apply_tensor_braket_sym(wrap_result=False) - if wrap_result: - term = ExprContainer(inner=term, **self.assumptions) - return term - - def make_real(self, wrap_result: bool = True) -> "ExprContainer | Expr": - """ - Represent the tern in a real orbital basis. - - names of complex conjugate t-amplitudes, for instance t1cc -> t1 - - adds bra-ket-symmetry to the fock matrix and the ERI. - - Parameters - ---------- - wrap_result: bool, optional - If this is set the result will be wrapped in an - :py:class:`ExprContainer`. Otherwhise that unwrapped object - is returned. (default: True) - """ - from .expr_container import ExprContainer - - real_term = S.One - for obj in self.objects: - real_term *= obj.make_real(wrap_result=False) - - if wrap_result: - kwargs = self.assumptions - kwargs["real"] = True - real_term = ExprContainer(inner=real_term, **kwargs) - return real_term - - def block_diagonalize_fock(self, wrap_result: bool = True - ) -> "ExprContainer | Expr": - """ - Block diagonalize the Fock matrix, i.e. if the term contains a off - diagonal Fock matrix block (f_ov/f_vo) it is set to 0. - - Parameters - ---------- - wrap_result: bool, optional - If this is set the result will be wrapped with an - :py:class:`ExprContainer`. - """ - bl_diag = S.One - for obj in self.objects: - bl_diag *= obj.block_diagonalize_fock(wrap_result=False) - - if wrap_result: - bl_diag = ExprContainer(bl_diag, **self.assumptions) - return bl_diag - - def diagonalize_fock(self, target: Sequence[Index] | None = None, - wrap_result: bool = True, - apply_substitutions: bool = True - ) -> "ExprContainer | Expr | tuple[ExprContainer | Expr, dict[Index, Index]]": # noqa E501 - """ - Represent the term in the canonical orbital basis, where the - Fock matrix is diagonal. Because it is not possible to - determine the target indices in the resulting term according - to the Einstein sum convention, the current target indices will - be set manually in the resulting term. - - Parameters - ---------- - target: Sequence[Index] | None - The target indices of a potential parent term. - wrap_result: bool, optional - If this is set the result will be wrapped with an - :py:class:`ExprContainer`. - apply_substitutions: bool, optional - If set the index substitutions will be applied to the result. - Otherwhise the substitutions will be returned in addition to the - expression (without applying them). - In both cases fock matrix elements will be replaced by orbital - energie elements, e.g., f_ij will be replaced by e_i. - """ - from .expr_container import ExprContainer - - if target is None: - target = self.target - - sub: dict[Index, Index] = {} - diag = S.One - for o in self.objects: - diag_obj, sub_obj = o.diagonalize_fock(target, wrap_result=False) - diag *= diag_obj - if any(k in sub and sub[k] != v for k, v in sub_obj.items()): - raise NotImplementedError("Did not implement the case of " - "multiple fock matrix elements with " - f"intersecting indices: {self}") - sub.update(sub_obj) - - if wrap_result: - kwargs = self.assumptions - kwargs["target_idx"] = target - diag = ExprContainer(diag, **kwargs) - if apply_substitutions: - return diag.subs(order_substitutions(sub)) - else: - return diag, sub - - def substitute_contracted(self, wrap_result: bool = True, - apply_substitutions: bool = True - ) -> "ExprContainer | Expr | list[tuple[Index, Index]]": # noqa E501 - """ - Replace the contracted indices in the term with the lowest available - (non-target) indices. This is done for each space and spin - independently, i.e., - i_{\\alpha} j_{\\beta} -> i_{\\alpha} i_{\\beta} - assuming both indices are contracted indices and - i_{\\alpha} i_{\\beta} are not used as target indices. - - Parameters - ---------- - wrap_result: bool, optional - If set the result will be wrapped in an - :py:class:`ExprContainer`. (default: True) - apply_substitutions: bool, optional - If set the substitutions will be applied to the - term and the new expression is returned. Otherwise, - the index substitutions will be returned without - applying them to the expression. (default: True) - """ - from .expr_container import ExprContainer - - # - determine the target and contracted indices - # and split them according to their space - # Don't use atoms to obtain the contracted indices! Atoms is a set - # and therefore not sorted -> will produce a random result. - contracted = {} - for s in self.contracted: - if (key := s.space_and_spin) not in contracted: - contracted[key] = [] - contracted[key].append(s) - used = {} - for s in set(self.target): - if (key := s.space_and_spin) not in used: - used[key] = set() - used[key].add(s.name) - - # - generate new indices the contracted will be replaced with - # and build a substitution dictionary - # Don't filter out indices that will not change! - sub = {} - for (space, spin), idx_list in contracted.items(): - new_idx = get_lowest_avail_indices( - len(idx_list), used.get((space, spin), []), space - ) - if spin: - new_idx = get_symbols(new_idx, spin * len(idx_list)) - else: - new_idx = get_symbols(new_idx) - sub.update({o: n for o, n in zip(idx_list, new_idx)}) - # - apply substitutions while ensuring the substitutions are - # performed in the correct order - sub = order_substitutions(sub) - - if not apply_substitutions: # only build and return the sub_list - return sub - - substituted = self.inner.subs(sub) - assert isinstance(substituted, Expr) - # ensure that the substitutions are valid - if substituted is S.Zero and self.inner is not S.Zero: - raise ValueError(f"Invalid substitutions {sub} for {self}.") - - if wrap_result: - substituted = ExprContainer(substituted, **self.assumptions) - return substituted - - def substitute_with_generic(self, wrap_result: bool = True - ) -> "ExprContainer | Expr": - """ - Replace the contracted indices in the term with new, unused generic - indices. - """ - from .expr_container import ExprContainer - # sort the contracted indices according to their space and spin - contracted: dict[tuple[str, str], list[Index]] = {} - for idx in self.contracted: - if (key := idx.space_and_spin) not in contracted: - contracted[key] = [] - contracted[key].append(idx) - # generate new generic indices - kwargs = {f"{space}_{spin}" if spin else space: len(indices) - for (space, spin), indices in contracted.items()} - generic = Indices().get_generic_indices(**kwargs) - # build the subs dict - subs: dict[Index, Index] = {} - for key, old_indices in contracted.items(): - new_indices = generic[key] - subs.update({ - idx: new_idx for idx, new_idx in zip(old_indices, new_indices) - }) - # substitute the indices - substituted = self.inner.subs(order_substitutions(subs)) - assert isinstance(substituted, Expr) - # ensure that the substitutions are valid - if substituted is S.Zero and self.inner is not S.Zero: - raise ValueError(f"Invalid substitutions {subs} for {self}.") - - if wrap_result: - substituted = ExprContainer(substituted, **self.assumptions) - return substituted - - def rename_tensor(self, current: str, new: str, wrap_result: bool = True - ) -> "ExprContainer | Expr": - """ - Rename tensors in a terms. - - Parameters - ---------- - wrap_result: bool, optional - If this is set the result will be wrapped with an - :py:class:`ExprContainer`. (default: True) - """ - from .expr_container import ExprContainer - - renamed = S.One - for obj in self.objects: - renamed *= obj.rename_tensor( - current=current, new=new, wrap_result=False - ) - - if wrap_result: - renamed = ExprContainer(renamed, **self.assumptions) - return renamed - - def expand_antisym_eri(self, wrap_result: bool = True - ) -> "ExprContainer | Expr": - """ - Expands the antisymmetric ERI using chemists notation - = (pr|qs) - (ps|qr). - ERI's in chemists notation are by default denoted as 'v'. - Currently this only works for real orbitals, i.e., for - symmetric ERI's = . - """ - from .expr_container import ExprContainer - - expanded = S.One - for obj in self.objects: - expanded *= obj.expand_antisym_eri(wrap_result=False) - - if wrap_result: - assumptions = self.assumptions - if Symbol(tensor_names.coulomb) in expanded.atoms(Symbol): - assumptions["sym_tensors"] += (tensor_names.coulomb,) - expanded = ExprContainer(expanded, **assumptions) - return expanded - - def expand_intermediates(self, target: Sequence[Index] | None = None, - wrap_result: bool = True, - fully_expand: bool = True - ) -> "ExprContainer | Expr": - """ - Expands all known intermediates in the term. - - Parameters - ---------- - target: tuple[Index] | None, optional - The target indices of the term. Determined automatically if not - given. Since it might not be possible to determine the - target indices in the resulting expression (e.g. after - expanding MP t-amplitudes) the target indices will be - set in the expression. - wrap_result: bool, optional - If set the result is wrapped in an - :py:class:`ExprContainer`. (default: True) - fully_expand: bool, optional - True (default): The intermediates are recursively expanded - into orbital energies and ERI (if possible) - False: The intermediates are only expanded once, e.g., n'th - order MP t-amplitudes are expressed by means of (n-1)'th order - MP t-amplitudes and ERI. - """ - from .expr_container import ExprContainer - - if target is None: - target = self.target - - expanded = S.One - for obj in self.objects: - expanded *= obj.expand_intermediates( - target, wrap_result=False, fully_expand=fully_expand - ) - - if wrap_result: - assumptions = self.assumptions - assumptions["target_idx"] = target - expanded = ExprContainer(expanded, **assumptions) - return expanded - - def factor(self) -> "ExprContainer": - """ - Tries to factor the term. - """ - from .expr_container import ExprContainer - - return ExprContainer( - inner=factor(self.inner), **self.assumptions - ) - - def use_explicit_denominators(self, wrap_result: bool = True - ) -> "ExprContainer | Expr": - """ - Switch to an explicit representation of orbital energy denominators by - replacing all symbolic denominators by their explicit counter part, - i.e., D^{ij}_{ab} -> (e_i + e_j - e_a - e_b)^{-1}. - """ - from .expr_container import ExprContainer - - explicit_denom = S.One - for obj in self.objects: - explicit_denom *= obj.use_explicit_denominators(wrap_result=False) - - if wrap_result: - assumptions = self.assumptions - # remove the tensor from the assumptions - if tensor_names.sym_orb_denom in self.antisym_tensors: - assumptions["antisym_tensors"] = tuple( - n for n in assumptions["antisym_tensors"] - if n != tensor_names.sym_orb_denom - ) - explicit_denom = ExprContainer(explicit_denom, **assumptions) - return explicit_denom - - def split_orb_energy(self) -> "dict[str, ExprContainer]": - """ - Splits the term in a orbital energy fraction and a remainder, e.g. - (e_i + e_j) / (e_i + e_j - e_a - e_b) * (tensor1 * tensor2). - To this end all polynoms that only contain orbital energy tensors - ('e' by default) are collected to form the numerator and denominator, - while the rest of the term is collected in the remainder. - Prefactors are collected in the numerator. - """ - from .expr_container import ExprContainer - - assumptions = self.assumptions - assumptions['target_idx'] = self.target - ret = {"num": ExprContainer(1, **assumptions), - 'denom': ExprContainer(1, **assumptions), - 'remainder': ExprContainer(1, **assumptions)} - for o in self.objects: - base, exponent = o.base_and_exponent - if o.inner.is_number: - key = "num" - elif o.contains_only_orb_energies: - key = "denom" if exponent < S.Zero else "num" - else: - key = 'remainder' - ret[key] *= Pow(base, abs(exponent)) - return ret - - def use_symbolic_denominators(self) -> "ExprContainer": - """ - Replace all orbital energy denominators in the expression by tensors, - e.g., (e_a + e_b - e_i - e_j)^{-1} will be replaced by D^{ab}_{ij}, - where D is a SymmetricTensor.""" - from ..eri_orbenergy import EriOrbenergy - - term = EriOrbenergy(self) - symbolic_denom = term.symbolic_denominator() - # symbolic denom might additionaly have D set as antisym tensor - return symbolic_denom * term.pref * term.num.inner * term.eri.inner - - def to_latex_str(self, only_pull_out_pref: bool = False, - spin_as_overbar: bool = False): - """ - Transforms the term to a latex string. - - Parameters - ---------- - only_pull_out_pref: bool, optional - Use the 'latex' printout from sympy, while prefactors are printed - in front of each term. This avoids long fractions with a huge - number of tensors in the numerator and only a factor in the - denominator. - spin_as_overbar: bool, optional - Instead of printing the spin of an index as suffix (name_spin) - use an overbar for beta spin and no indication for alpha. Because - alpha indices and indices without spin are not distinguishable - anymore, this only works if all indices have a spin set (the - expression is completely represented in spatial orbitals). - """ - # - sign and prefactor - pref = self.prefactor - tex_str = "+ " if pref >= S.Zero else "- " - # term only consists of a number (only pref) - if self.inner.is_number: - return tex_str + f"{latex(abs(pref))}" - # avoid printing +- 1 prefactors - if pref not in [+1, -1]: - tex_str += f"{latex(abs(pref))} " - - # - latex strings for the remaining objects - tex_str += " ".join([ - o.to_latex_str(only_pull_out_pref, spin_as_overbar) - for o in self.objects if not o.inner.is_number - ]) - return tex_str diff --git a/build/lib/adcgen/factor_intermediates.py b/build/lib/adcgen/factor_intermediates.py deleted file mode 100644 index df3c195..0000000 --- a/build/lib/adcgen/factor_intermediates.py +++ /dev/null @@ -1,1813 +0,0 @@ -from collections.abc import Iterable, Sequence, Generator -from collections import Counter -from functools import cached_property -from typing import Any, TYPE_CHECKING, TypeGuard -import itertools - -from sympy import Add, Expr, Mul, Rational, S, sympify - -from .eri_orbenergy import EriOrbenergy -from .expression import ExprContainer, TermContainer, ObjectContainer -from .indices import ( - Index, - order_substitutions, get_symbols, minimize_tensor_indices -) -from .logger import logger -from .symmetry import LazyTermMap, Permutation -from .sympy_objects import AntiSymmetricTensor, SymbolicTensor - -if TYPE_CHECKING: - from .intermediates import RegisteredIntermediate - - -def factor_intermediates(expr: ExprContainer, - types_or_names: Sequence[str] | None = None, - max_order: int | None = None, - allow_repeated_itmd_indices: bool = False - ) -> ExprContainer: - """ - Factors the intermediates defined in 'intermediates.py' in an expression. - Note that the implementation assumes that a real orbital basis is used. - - Parameters - ---------- - expr : Expr - Expression in which to factor intermediates. - types_or_names : Sequence[str], optional - The types or names of the intermediates to factor. If not given, the - function tries to factor all available intermediates. - max_order : int, optional - The maximum perturbation theoretical order of intermediates to - consider. - allow_repeated_itmd_indices: bool, optional - If set, the factorization of intermediates of the form I_iij are - allowed, i.e., indices on the intermediate may appear more than once. - This corresponds to either a partial trace or a diagonal element of - the intermediate. Note that this does not consistently work for - "long" intermediates (at least 2 terms), because the number of terms - might be reduced which is not correctly handled currently. - """ - from .intermediates import Intermediates, RegisteredIntermediate - from time import perf_counter - - assert isinstance(expr, ExprContainer) - if expr.inner.is_number: # nothing to factor - return expr - - # get all intermediates that are about to be factored in the expr - itmd = Intermediates() - if types_or_names is not None: - if isinstance(types_or_names, str): - itmd_to_factor: dict[str, RegisteredIntermediate] = getattr( - itmd, types_or_names - ) - else: # list / tuple / set of strings - itmd_to_factor: dict[str, RegisteredIntermediate] = {} - for t_or_n in types_or_names: - if not isinstance(t_or_n, str): - raise TypeError("Intermediate types/names to factor have " - "to be provided as str or list of strings." - f"Got {t_or_n} of type {type(t_or_n)}.") - itmd_to_factor |= getattr(itmd, t_or_n) - else: - itmd_to_factor: dict[str, RegisteredIntermediate] = itmd.available - - if max_order is not None: - itmd_to_factor = {n: itmd_cls for n, itmd_cls in itmd_to_factor.items() - if itmd_cls.order <= max_order} - - logger.info("".join([ - "\n\n", "#"*80, "\n", " "*25, "INTERMEDIATE FACTORIZATION\n", "#"*80, - "\n\n", - f"Trying to factor intermediates in expr of length {len(expr)}\n" - ])) - for i, term in enumerate(expr.terms): - logger.info(f"{i+1}: {EriOrbenergy(term)}\n") - logger.info('#'*80) - # try to factor all requested intermediates - factored: list[str] = [] - for name, itmd_cls in itmd_to_factor.items(): - logger.info("".join(["\n", ' '*25, f"Factoring {name}\n\n", '#'*80])) - start = perf_counter() - expr = itmd_cls.factor_itmd( - expr, factored_itmds=factored, max_order=max_order, - allow_repeated_itmd_indices=allow_repeated_itmd_indices - ) - factored.append(name) - logger.info("".join([ - "\n", "-"*80, "\n" - f"Done in {perf_counter()-start:.2f}s. {len(expr)} terms remain", - "\n", "-"*80, "\n" - ])) - for i, term in enumerate(expr.terms): - logger.info( - f"{i+1: >{len(str(len(expr)+1))}}: {EriOrbenergy(term)}\n" - ) - logger.info("#"*80) - logger.info("".join(["\n\n", '#'*80, "\n", " "*25, - "INTERMEDIATE FACTORIZATION FINISHED\n", '#'*80])) - # make the result pretty by minimizing contracted indices: - # some contracted indices might be hidden inside some intermediates. - # -> ensure that the remaining ones are the lowest available - expr = expr.substitute_contracted() - logger.info(f"\n{len(expr)} terms in the final result:") - width = len(str(len(expr)+1)) - for i, term in enumerate(expr.terms): - logger.info(f"{i+1: >{width}}: {EriOrbenergy(term)}") - return expr - - -def _factor_long_intermediate(expr: ExprContainer, - itmd: Sequence[EriOrbenergy], - itmd_data: Sequence["FactorizationTermData"], - itmd_term_map: LazyTermMap, - itmd_cls: "RegisteredIntermediate", - allow_repeated_itmd_indices: bool = False - ) -> ExprContainer: - """ - Factores a long intermediate - an intermediate that consists of more - than one term - in an expression. - - Parameters - ---------- - expr : Expr - The expression to factor the intermediate in. - itmd : list[EriOrbenergy] - The expression of the intermediate to factor splitted into terms - and separating orbital energy fractions. - itmd_data : tuple[FactorizationTermData] - Data for each term in the intermediate to map the itmd term onto - subparts of terms in the expression. - itmd_term_map : LazyTermMap - Provides information about the mapping of terms in the intermediate - if the target indices of the intermediate are permuted. - idmd_cls - The class instance of the intermediate to factor. - allow_repeated_itmd_indices: bool, optional - If set, the factorization of intermediates of the form I_iij are - allowed, i.e., indices on the intermediate may appear more than - once. This does not consistently work for "long" intermediates - (at least 2 terms), because the number of terms might be reduced which - is not correctly handled currently. - """ - - if expr.inner.is_number: - return expr - - # does any itmd term has a denominator? - itmd_has_denom: bool = any( - term_data.denom_bracket_lengths is not None for term_data in itmd_data - ) - itmd_length: int = len(itmd) - # get the default symbols of the intermediate - itmd_default_symbols: tuple[Index, ...] = tuple( - get_symbols(itmd_cls.default_idx) - ) - - terms: tuple[TermContainer, ...] = expr.terms - - # class that manages the found itmd variants - intermediate_variants: LongItmdVariants = LongItmdVariants(itmd_length) - for term_i, term in enumerate(terms): - term = EriOrbenergy(term).canonicalize_sign() - # prescan: check that the term holds the correct tensors and - # denominator brackets - term_data = FactorizationTermData(term) - # description of all objects in the eri part, exponent implicitly - # included - obj_descr = term_data.eri_obj_descriptions - if itmd_has_denom: - bracket_lengths = term_data.denom_bracket_lengths - else: - bracket_lengths = None - - # compare to all of the itmd terms -> only try to map on a subset of - # intermediate terms later - possible_matches: list[int] = [] - for itmd_i, itmd_term_data in enumerate(itmd_data): - # do all tensors in the eri part occur at least as often as - # in the intermediate - if any(obj_descr[descr] < n for descr, n in - itmd_term_data.eri_obj_descriptions.items()): - continue - # itmd_term has a denominator? - itmd_bracket_lengths = itmd_term_data.denom_bracket_lengths - if itmd_bracket_lengths is not None: - if bracket_lengths is None: # term has no denom -> cant match - continue - else: # term also has a denominator - # ensure that bracket of the correct length are available - if any(bracket_lengths[length] < n for length, n in - itmd_bracket_lengths.items()): - continue - possible_matches.append(itmd_i) - if not possible_matches: # did not find any possible matches - continue - - # extract the target idx names of the term - target_idx_by_space: dict[tuple[str, str], set[str]] = {} - for s in term.eri.target: - if (key := s.space_and_spin) not in target_idx_by_space: - target_idx_by_space[key] = set() - target_idx_by_space[key].add(s.name) - - # go through all possible matches - for itmd_i in possible_matches: - # - compare and obtain data (sub_dict, obj indices, factor) - # that makes the itmd_term equal to the defined sub part - # of the term. - variants = _compare_terms( - term, itmd[itmd_i], term_data=term_data, - itmd_term_data=itmd_data[itmd_i] - ) - if variants is None: # was not possible to map the terms - continue - - # The term_map allows to spread a term assignement to multiple - # terms taking the symmetry of the remainder into account, e.g., - # for the t2_2 amplitudes: - # t2_2 <- (1-P_ij)(1-P_ab) X - # - Depending on the symmetry of the remainder these 4 terms - # might occur as 4, 2 or 1 terms in the expression to factor: - # Rem * (1-P_ij)(1-P_ab) X -> 4 * Rem * X - # (if Rem has ij and ab antisymmetry) - # - If a term with such a remainder is matched with one of the 4 - # 4 terms he will automatically also be matched with the other - # 3 terms using the term_map for the intermediate. - # NOTE: it is not possible to exploit this to reduce the workload - # by exploiting the fact that the current term has already - # been matched to a itmd_term through the term map, because - # more complicated permutations do not provide a back and - # forth relation ship between terms: - # P_ij P_ik A(ijk) -> B(kij) - # P_ij P_ik B(kij) -> C(jki) - # comparing the current term to A can also provide a match - # with B through the term_map. - # comparing the current term to B however can provide a match - # with C! - # Therefore, the comparison with B can not be skipped, even - # if remainder and itmd_indices are identical to a previously - # found variant that matched to A and B! - # What can be done: for matching term 1 -> itmd_term A - # due to the symmetry of tensors one probably obtains multiple - # variants for the same itmd_indices and remainder that only - # differ in contracted indices. - # -> for each itmd_indices only consider one variant for each - # remainder - - # get the contracted indices of the itmd term - itmd_contracted_symbols = tuple( - s for s in set(itmd[itmd_i].expr.idx) - if s not in itmd_default_symbols - ) - # {itmd_indices: [remainder]} - found_remainders: dict[tuple[Index, ...], list[ExprContainer]] = {} - for variant_data in variants: # go through all valid variants - # - extract the remainder of the term (objects, excluding - # prefactors that will remain if the current variant is - # used to factor the itmd) - - remainder = _get_remainder( - term, variant_data['eri_i'], variant_data['denom_i'] - ) - - # - obtain the indices of the intermediate - itmd_indices: tuple[Index, ...] = tuple( - variant_data['sub'].get(s, s) for s in itmd_default_symbols - ) - - # - ensure that we have no repeated indices on the itmd - if not allow_repeated_itmd_indices and itmd_indices and \ - any(c != 1 for c in Counter(itmd_indices).values()): - continue - - # - check that none of the contracted itmd indices appears - # in the remainder! - # error or continue? probably better have a look if the error - # thrown and decide then - contracted_itmd_indices: tuple[Index, ...] = tuple( - variant_data['sub'].get(s, s) - for s in itmd_contracted_symbols - ) - remainder_indices: set[Index] = set(remainder.idx) - if any(s in remainder_indices - for s in contracted_itmd_indices): - raise RuntimeError("Invalid contracted itmd indices " - f"{contracted_itmd_indices} found " - "that also appear in the remainder:\n" - f"{remainder}") - - # - minimize the indices of the intermediate to ensure that - # the same indices are used in each term of the long itmd - # (use the lowest non target indices) - itmd_indices, minimization_perms = minimize_tensor_indices( - itmd_indices, target_idx_by_space - ) - - # - apply the substitutions to the remainder - remainder = remainder.permute(*minimization_perms) - # if this ever triggers probably switch to a continue - assert remainder.inner is not S.Zero - - # - Further minimize the tensor indices taking the tensor - # symmetry of the itmd into account by building a tensor - # using the minimized tensor indices - # -> returns the tensor with completely minimized indices - # and possibly a factor of -1 - tensor_obj = itmd_cls.tensor( - indices=itmd_indices, wrap_result=True - ) - assert isinstance(tensor_obj, ExprContainer) - tensor_obj = tensor_obj.terms[0] - if len(tensor_obj) > 2: - raise ValueError("Expected the term to be at most of " - f"length 2. Got: {tensor_obj}.") - for obj in tensor_obj.objects: - if isinstance(obj.base, SymbolicTensor): - itmd_indices = obj.idx - elif obj.inner.is_number: - variant_data['factor'] *= obj.inner - else: - raise TypeError("Only expected tensor and prefactor." - f"Found {obj} in {tensor_obj}") - - # check if we already found another variant that gives the - # same itmd_indices and remainder (an identical result that - # only differs in contracted itmd_indices) - if itmd_indices not in found_remainders: - found_remainders[itmd_indices] = [] - if any(_compare_remainder(remainder, found_rem, itmd_indices) - is not None - for found_rem in found_remainders[itmd_indices]): - continue # go to the next variant - else: - found_remainders[itmd_indices].append(remainder) - - # - check if the current itmd_term can be mapped onto other - # itmd terms - matching_itmd_terms = _map_on_other_terms( - itmd_i, remainder, itmd_term_map, itmd_indices, - itmd_default_symbols - ) - - # - calculate the final prefactor of the remainder if the - # current variant is applied for factorization - # keep the term normalized if spreading to multiple terms! - # (factor is +-1) - prefactor: Expr = ( - term.pref * variant_data['factor'] * - Rational(1, len(matching_itmd_terms)) / itmd[itmd_i].pref - ) - - # - compute the factor that the term should have if we want - # to factor the current variant with a prefactor of 1 - # (required for factoring mixed prefactors) - unit_factorization_pref: Expr = ( - itmd[itmd_i].pref * variant_data['factor'] - * len(matching_itmd_terms) - ) - - # - add the match to the pool where intermediate variants - # are build from - intermediate_variants.add( - term_i=term_i, itmd_indices=itmd_indices, - matching_itmd_terms=matching_itmd_terms, - remainder=remainder, prefactor=prefactor, - unit_factorization_pref=unit_factorization_pref - ) - logger.debug("\nMATCHED INTERMEDIATE TERMS:") - logger.debug(intermediate_variants) - - result: ExprContainer = ExprContainer(0, **expr.assumptions) - # keep track which terms have already been factored - factored_terms: set[int] = set() - factored_successfully: bool = False - - # first try to factor all complete intermediate variants - result, successful = _factor_complete( - result, terms, itmd_cls, factored_terms, intermediate_variants - ) - factored_successfully |= successful - - # go again through the remaining itmd variants and try to build more - # complete variants by allowing mixed prefactors, i.e., - # add a term that belongs to a variant with prefactor 1 - # to the nearly complete variant with prefactor 2. To compensate - # for this, additional terms are added to the result. - result, factored_mixed_pref_successfully = _factor_mixed_prefactors( - result, terms, itmd_cls, factored_terms, intermediate_variants - ) - factored_successfully |= factored_mixed_pref_successfully - - # TODO: - # go again through the remaining itmds and see if we can factor another - # intermediate by filling up some terms, e.g. if we found 5 out of 6 terms - # it still makes sense to factor the itmd - - # add all terms that were not involved in itmd_factorization to the result - for term_i, term in enumerate(terms): - if term_i not in factored_terms: - factored_terms.add(term_i) - result += term.inner - assert len(factored_terms) == len(terms) - - # if we factored the itmd successfully it might be necessary to adjust - # sym_tensors or antisym_tensors of the returned expression - if factored_successfully: - tensor = itmd_cls.tensor(wrap_result=False) - if isinstance(tensor, AntiSymmetricTensor): - name = tensor.name - if tensor.bra_ket_sym is S.One and \ - name not in (sym_tensors := result.sym_tensors): - result.sym_tensors = sym_tensors + (name,) - elif tensor.bra_ket_sym is S.NegativeOne and \ - name not in (antisym_t := result.antisym_tensors): - result.antisym_tensors = antisym_t + (name,) - return result - - -def _factor_short_intermediate(expr: ExprContainer, itmd: EriOrbenergy, - itmd_data: "FactorizationTermData", - itmd_cls: "RegisteredIntermediate", - allow_repeated_itmd_indices: bool = False - ) -> ExprContainer: - """ - Factors a short intermediate - an intermediate that consits only of one - term - in an expression. - - Parameters - ---------- - expr : ExprContainer - The expression to factor the intermediate in. - itmd : EriOrbenergy - The expression of the intermediate (a single term). - itmd_data : FactorizationTermData - Data of the intermediate term to map it onto subparts of terms in the - expression. - itmd_cls: RegisteredIntermediate - The class instance of the intermediate to factor. - allow_repeated_itmd_indices: bool, optional - If set, the factorization of intermediates of the form I_iij are - allowed, i.e., indices on the intermediate may appear more than - once. This corresponds to either a partial trace or a diagonal - element of the intermediate. - """ - - if expr.inner.is_number: - return expr - - # get the default symbols of the intermediate - itmd_default_symbols: tuple[Index, ...] = tuple( - get_symbols(itmd_cls.default_idx) - ) - # and the itmd contracted indices - itmd_contracted_symbols: tuple[Index, ...] = tuple( - s for s in set(itmd.expr.idx) if s not in itmd_default_symbols - ) - - terms: tuple[TermContainer, ...] = expr.terms - - factored: ExprContainer = ExprContainer(0, **expr.assumptions) - # factored expression that is returned - factored_sucessfully: bool = False # bool to indicate whether we factored - for term in terms: - term = EriOrbenergy(term).canonicalize_sign() - data = FactorizationTermData(term) - # check if the current term and the itmd are compatible: - # - check if all necessary objects occur in the eri part - obj_descr = data.eri_obj_descriptions - if any(obj_descr[descr] < n for descr, n in - itmd_data.eri_obj_descriptions.items()): - factored += term.expr.inner - continue - # - check if brackets of the correct length occur in the denominator - if itmd_data.denom_bracket_lengths is not None: # itmd has a denom - bracket_lengths = data.denom_bracket_lengths - if bracket_lengths is None: # term has no denom - factored += term.expr.inner - continue - else: # term also has a denom - if any(bracket_lengths[length] < n for length, n in - itmd_data.denom_bracket_lengths.items()): - factored += term.expr.inner - continue - # ok, the term seems to be a possible match -> try to factor - - # compare the term and the itmd term - variants = _compare_terms(term, itmd, data, itmd_data) - - if variants is None: - factored += term.expr.inner - continue - - # filter out variants, where repeated indices appear on the - # intermediate to factor - if not allow_repeated_itmd_indices: - variants = [ - var for var in variants - if all(c == 1 for c in - Counter(var["sub"].get(s, s) - for s in itmd_default_symbols).values()) - ] - if not variants: - factored += term.expr.inner - continue - - # choose the variant with the lowest overlap to other variants - # - find all unique obj indices (eri and denom) - # - and determine all itmd_indices - unique_obj_i: \ - dict[tuple[tuple[int, ...], tuple[int, ...]], list[int]] = {} - for var_idx, var in enumerate(variants): - key = (tuple(sorted(set(var["eri_i"]))), - tuple(sorted(set(var["denom_i"])))) - if key not in unique_obj_i: - unique_obj_i[key] = [] - unique_obj_i[key].append(var_idx) - - if len(unique_obj_i) == 1: # always the same objects in each variant - _, rel_variant_indices = unique_obj_i.popitem() - min_overlap: list[int] = [] - del unique_obj_i - else: - # multiple different objects -> try to find the one with the - # lowest overlap to the other variants (so that we can possibly - # factor the itmd more than once) - unique_obj_i_list = list(unique_obj_i.items()) - del unique_obj_i - overlaps: list[list[int]] = [] - for i, (key, _) in enumerate(unique_obj_i_list): - eri_i, denom_i = set(key[0]), set(key[1]) - # determine the intersection of the objects - overlaps.append(sorted([ - len(eri_i & set(other_key[0])) + - len(denom_i & set(other_key[1])) - for other_i, (other_key, _) in enumerate(unique_obj_i_list) - if i != other_i - ])) - # get the idx of the unique_obj_i with minimal intersections, - # get the variant_data of the first element in the variant_idx_list - min_overlap: list[int] = min(overlaps) - # collect all variant indices that have this overlap - rel_variant_indices = [] - for overlap, (_, var_idx_list) in zip(overlaps, unique_obj_i_list): - if overlap == min_overlap: - rel_variant_indices.extend(var_idx_list) - del overlaps - del unique_obj_i_list - # choose the variant with the minimal itmd_indices - variant_data = min( - [variants[var_idx] for var_idx in rel_variant_indices], - key=lambda var: [var["sub"].get(s, s).name for s in - itmd_default_symbols] - ) - - # now start with factoring - # - extract the remainder that survives the factorization (excluding - # the prefactor) - remainder: ExprContainer = _get_remainder( - term, variant_data["eri_i"], variant_data["denom_i"] - ) - # - find the itmd indices: - # for short itmds it is not necessary to minimize the itmd indices - # just use whatever is found - itmd_indices: tuple[Index, ...] = tuple( - variant_data["sub"].get(s, s) for s in itmd_default_symbols - ) - - contracted_itmd_indices: tuple[Index, ...] = tuple( - variant_data["sub"].get(s, s) for s in itmd_contracted_symbols - ) - remainder_indices = set(remainder.idx) - if any(s in remainder_indices for s in contracted_itmd_indices): - raise RuntimeError("Invalid contracted itmd indices " - f"{contracted_itmd_indices} found that also " - f"appear in the remainder:\n{remainder}") - - # - determine the prefactor of the factored term - pref = term.pref * variant_data["factor"] / itmd.pref - # - check if it is possible to factor the itmd another time: - # should be possible if there is a 0 in the min_overlap list: - # -> Currently factoring a variant that has 0 overlap with another - # variant - # -> It should be possible to factor the intermediate in the - # remainder again! - if 0 in min_overlap: - # factor again and ensure that the factored result has the - # the current assumptions - remainder = ExprContainer( - _factor_short_intermediate(remainder, itmd, itmd_data, - itmd_cls).inner, - **remainder.assumptions - ) - # - build the new term including the itmd - factored_term = _build_factored_term(remainder, pref, itmd_cls, - itmd_indices) - - factored_sucessfully = True - logger.info(f"\nFactoring {itmd_cls.name} in:\n{term}\n" - f"result:\n{EriOrbenergy(factored_term)}") - factored += factored_term.inner - # if we factored the itmd sucessfully it might be necessary to add - # the itmd tensor to the sym or antisym tensors - if factored_sucessfully: - tensor = itmd_cls.tensor(wrap_result=False) - if isinstance(tensor, AntiSymmetricTensor): - name = tensor.name - if tensor.bra_ket_sym is S.One and \ - name not in (sym_tensors := factored.sym_tensors): - factored.sym_tensors = sym_tensors + (name,) - elif tensor.bra_ket_sym is S.NegativeOne and \ - name not in (antisym_t := factored.antisym_tensors): - factored.antisym_tensors = antisym_t + (name,) - return factored - - -def _factor_complete(result: ExprContainer, - terms: Sequence[TermContainer], - itmd_cls: "RegisteredIntermediate", - factored_terms: set[int], - intermediate_variants: 'LongItmdVariants' - ) -> tuple[ExprContainer, bool]: - """ - Factors all found complete intermediate variants of a long intermediate - in an expression, i.e., variants where for all terms a match with the same - prefactor could be found meaning that nothing has to be added to the - expression to factor the intermediate. - - Parameters - ---------- - result : ExprContainer - The resulting expression where newly factored terms are added. - term : Sequence[TermContainer] - The original expression where the intermediate should be factored - split into terms. - itmd_cls : RegisteredIntermediate - The class instance of the intermediate to factor. - factored_terms : set[int] - Terms which were already involved in the factorization of an - intermediate variant. - intermediate_variants : LongItmdVariants - The intermediate variants found in the expression. - - Returns - ------- - tuple[ExprContainer, bool] - The result expression - with only the factored terms added - and a bool - indicating whether a complete intermediate could be factored. - """ - factored_successfully: bool = False - for itmd_indices, remainders in intermediate_variants.items(): - for rem in remainders: - complete_variant = intermediate_variants.get_complete_variant( - itmd_indices, rem - ) - while complete_variant is not None: - # Found a complete intermediate with matching prefactors!! - pref, term_list = complete_variant - - logger.info(f"\nFactoring {itmd_cls.name} in terms:") - for term_i in term_list: - logger.info(EriOrbenergy(terms[term_i])) - - new_term = _build_factored_term( - rem, pref, itmd_cls, itmd_indices - ) - logger.info(f"result:\n{EriOrbenergy(new_term)}") - result += new_term.inner - - # remove the used terms from the pool of available terms - # and add the terms to the already factored terms - intermediate_variants.remove_used_terms(term_list) - factored_terms.update(term_list) - factored_successfully = True - - # try to find the next complete variant - complete_variant = intermediate_variants.get_complete_variant( - itmd_indices, rem - ) - # remove empty itmd_indices and remainders - if factored_successfully: - intermediate_variants.clean_empty() - - return result, factored_successfully - - -def _factor_mixed_prefactors(result: ExprContainer, - terms: Sequence[TermContainer], - itmd_cls: "RegisteredIntermediate", - factored_terms: set[int], - intermediate_variants: "LongItmdVariants" - ) -> tuple[ExprContainer, bool]: - """ - Factors intermediate variants where all terms were found, though with - different prefactors, i.e., the intermediate might be factored by - adding one or more of the original terms to the result expression to - compensate: - z = a + 2b + c + d - a + b + c + d = z - b - - Parameters - ---------- - result : ExprContainer - The result expression where newly factored terms are added. - terms : Sequence[TermContainer] - The original expression where the intermediate should be factored - split into terms. - itmd_cls: RegisteredIntermediate - The class instance of the intermediate to factor. - factored_terms : set[int] - Terms which were already involved in the factorization of an - intermediate variant. - intermediate_variants : LongItmdVariants - The found intermediate variants. - - Returns - ------- - tuple[Expr, bool] - The result expression - with only the factored terms added - and a bool - indicating whether an interemdiate with mixed prefactors was factored. - """ - factored_successfully = False - for itmd_indices, remainders in intermediate_variants.items(): - for rem in remainders: - mixed_variant = intermediate_variants.get_mixed_pref_variant( - itmd_indices, rem - ) - while mixed_variant is not None: - prefs, term_list, unit_factors, pref_counter = mixed_variant - - # determine the most common prefactor and which terms needs - # to be added (have a different prefactor) - most_common_pref = max(pref_counter.items(), - key=lambda tpl: tpl[1])[0] - terms_to_add: dict[int, Expr] = {} - for p, term_i in zip(prefs, term_list): - if p == most_common_pref or term_i in terms_to_add: - continue - terms_to_add[term_i] = p - - # for all terms that don't have the most common prefactor: - # determine the 'extension' that needs to be added to the - # result to factor the intermediate using the most common pref - logger.info("\nAdding terms:") - for term_i, p in terms_to_add.items(): - desired_pref = Mul(most_common_pref, unit_factors[term_i]) - term = EriOrbenergy(terms[term_i]).canonicalize_sign() - extension_pref = Add(term.pref, -desired_pref) - term = term.num * extension_pref * term.eri / term.denom - logger.info(EriOrbenergy(term)) - result += term.inner - - logger.info(f"\nFactoring {itmd_cls.name} with mixed " - "prefactors in:") - for term_i in term_list: - logger.info(EriOrbenergy(terms[term_i])) - - new_term = _build_factored_term( - rem, most_common_pref, itmd_cls, itmd_indices - ) - logger.info(f"result:\n{EriOrbenergy(new_term)}") - result += new_term.inner - - # remove the used terms from the pool of available terms - # and add the terms to the already factored terms - intermediate_variants.remove_used_terms(term_list) - factored_terms.update(term_list) - factored_successfully = True - - # try to find the next mixed intermediate - mixed_variant = intermediate_variants.get_mixed_pref_variant( - itmd_indices, rem - ) - # remove empty itmd_indices and remainders - if factored_successfully: - intermediate_variants.clean_empty() - - return result, factored_successfully - - -def _build_factored_term(remainder: ExprContainer, pref: Expr, - itmd_cls: "RegisteredIntermediate", - itmd_indices: tuple[Index, ...] - ) -> ExprContainer: - """Builds the factored term.""" - tensor = itmd_cls.tensor(indices=itmd_indices, wrap_result=False) - # if the itmd_indices are completely minimized, we should always - # get a tensor (and no Mul object) - assert isinstance(tensor, SymbolicTensor) - # resolve the Zero placeholder for residuals - if tensor.name == "Zero": - return ExprContainer(0, **remainder.assumptions) - return remainder * pref * tensor - - -def _get_remainder(term: EriOrbenergy, obj_i: Sequence[int], - denom_i: Sequence[int]) -> ExprContainer: - """ - Builds the remaining part of the provided term that survives the - factorization of the itmd, excluding the prefactor! - Note that the returned remainder can still hold a prefactor of -1, - because sympy is not maintaining the canonical sign in the orbtial energy - fraction. - """ - eri: ExprContainer = term.cancel_eri_objects(obj_i) - denom: ExprContainer = term.cancel_denom_brackets(denom_i) - rem = term.num * eri / denom - # explicitly set the target indices, because the remainder not necessarily - # has to contain all of them. - if rem.provided_target_idx is None: # no target indices set - rem.set_target_idx(term.eri.target) - return rem - - -def _map_on_other_terms(itmd_i: int, remainder: ExprContainer, - itmd_term_map: LazyTermMap, - itmd_indices: tuple[Index, ...], - itmd_default_idx: tuple[Index, ...]) -> set[int]: - """ - Checks on which other itmd_terms the current itmd_term can be mapped if - the symmetry of the remainder is taken into account. The set of - intermediate terms (by index) is returned. - """ - from .symmetry import Permutation, PermutationProduct - - # find the itmd indices that are no target indices of the overall term - # -> those are available for permutations - target_indices = remainder.terms[0].target - idx_to_permute = {s for s in itmd_indices if s not in target_indices} - # copy the remainder and set the previously determined - # indices as target indices - rem: ExprContainer = remainder.copy() - rem.set_target_idx(tuple(idx_to_permute)) - # create a substitution dict to map the minimal indices to the - # default indices of the intermediate - minimal_to_default = {o: n for o, n in zip(itmd_indices, itmd_default_idx)} - # iterate over the subset of remainder symmetry that only involves - # non-target intermediate indices - matching_itmd_terms: set[int] = {itmd_i} - for perms, perm_factor in rem.terms[0].symmetry(only_target=True).items(): - # translate the permutations to the default indices - perms = PermutationProduct(*( - Permutation(minimal_to_default[p], minimal_to_default[q]) - for p, q in perms - )) - # look up the translated symmetry in the term map - term_map = itmd_term_map[(perms, perm_factor)] - if itmd_i in term_map: - matching_itmd_terms.add(term_map[itmd_i]) - return matching_itmd_terms - - -def _compare_eri_parts( - term: EriOrbenergy, itmd_term: EriOrbenergy, - term_data: "FactorizationTermData | None" = None, - itmd_term_data: 'FactorizationTermData | None' = None - ) -> list[tuple[list[int], dict[Index, Index], list[tuple[Index, Index]], int]] | None: # noqa E501 - """ - Compares the ERI parts of two terms. Determines - - the objects (by index) in the term on which the objects in the itmd term - can be mapped, i.e., the tensors that have to be removed from the term - if the intermediate is factored. - - the necessary index substitutions to bring the ERI part of the itmd - term into the form found in the expression term. - - the additional factor (+-1) that needs to be introduced after applying - the index substitutions. - """ - - # the eri part of the term to factor has to be at least as long as the - # eri part of the itmd (prefactors are separated!) - if len(itmd_term.eri) > len(term.eri): - return None - - objects: tuple[ObjectContainer, ...] = term.eri.objects - itmd_objects: tuple[ObjectContainer, ...] = itmd_term.eri.objects - - # generate term_data if not provided - if term_data is None: - term_data = FactorizationTermData(term) - # generate itmd_data if not provided - if itmd_term_data is None: - itmd_term_data = FactorizationTermData(itmd_term) - - relevant_itmd_data = zip(enumerate(itmd_term_data.eri_pattern), - itmd_term_data.eri_obj_indices, - itmd_term_data.eri_obj_symmetry) - - # compare all objects in the eri parts - variants: list[tuple[list[int], dict[Index, Index], int]] = [] - for (itmd_i, (itmd_descr, itmd_coupl)), itmd_indices, itmd_obj_sym in \ - relevant_itmd_data: - itmd_obj_exponent = itmd_objects[itmd_i].exponent - assert itmd_obj_exponent.is_Integer - - relevant_data = zip(enumerate(term_data.eri_pattern), - term_data.eri_obj_indices) - # list to collect all obj that can match the itmd_obj - # with their corresponding sub variants - itmd_obj_matches: list[tuple[list[int], dict[Index, Index], int]] = [] - for (i, (descr, coupl)), indices in relevant_data: - # tensors have same name and space? - # is the coupling of the itmd_obj a subset of the obj coupling? - if descr != itmd_descr or any(coupl[c] < n for c, n in - itmd_coupl.items()): - continue - # collect the obj index n-times to indicate how often the - # object has to be cancelled (possibly multiple times depending - # on the exponent of the itmd_obj) - to_cancel: list[int] = [i for _ in range(int(itmd_obj_exponent))] - # create all possibilites to map the indices onto each other - # by taking the symmetry of the itmd_obj into account - # store them as tuple: (obj_indices, sub, factor) - itmd_obj_matches.append((to_cancel, - dict(zip(itmd_indices, indices)), - 1)) - for perms, factor in itmd_obj_sym.items(): - perm_itmd_indices = itmd_indices - for p, q in perms: - sub = {p: q, q: p} - perm_itmd_indices = [sub.get(s, s) for s in - perm_itmd_indices] - itmd_obj_matches.append((to_cancel, - dict(zip(perm_itmd_indices, indices)), - factor)) - # was not possible to map the itmd_obj onto any obj in the term - # -> terms can not match - if not itmd_obj_matches: - return None - - if not variants: # initialize variants - variants.extend(itmd_obj_matches) - else: # try to add the mapping of the current itmd_obj - extended_variants: \ - list[tuple[list[int], dict[Index, Index], int]] = [] - for (i_list, sub, factor), (new_i_list, new_sub, new_factor) in \ - itertools.product(variants, itmd_obj_matches): - # was the obj already mapped onto another itmd_obj? - # do we have a contradiction in the sub_dicts? - # -> a index in the itmd can only be mapped onto 1 index - # in the term simultaneously - if new_i_list[0] not in i_list and all( - o not in sub or sub[o] is n - for o, n in new_sub.items()): - extended_variants.append((i_list + new_i_list, - sub | new_sub, # OR combine dict - factor * new_factor)) - if not extended_variants: # no valid combinations -> cant match - return None - variants = extended_variants - # validate the found variants to map the terms onto each other - valid: list[tuple[list[int], dict[Index, Index], list[tuple[Index, Index]], int]] = [] # noqa E501 - for i_list, sub_dict, factor in variants: - i_set: set[int] = set(i_list) - # did we find a match for all itmd_objects? - if len(i_set) != len(itmd_objects): - continue - # extract the objects of the term - relevant_obj: Expr = Mul(*(objects[i].inner for i in i_set)) - # apply the substitutions to the itmd_term, remove the prefactor - # (the substitutions might introduce a factor of -1 that we don't need) - # and check if the substituted itmd_term is identical to the subset - # of objects - sub_list = order_substitutions(sub_dict) - sub_itmd_eri = itmd_term.eri.subs(sub_list) - - if sub_itmd_eri.inner is S.Zero: # invalid substitution list - continue - pref = sub_itmd_eri.terms[0].prefactor # +-1 - - if Add(relevant_obj, -Mul(sub_itmd_eri.inner, pref)) is S.Zero: - valid.append((i_list, sub_dict, sub_list, factor)) - return valid if valid else None - - -def _compare_terms(term: EriOrbenergy, itmd_term: EriOrbenergy, - term_data: "FactorizationTermData | None" = None, - itmd_term_data: "FactorizationTermData | None" = None - ) -> None | list[dict[str, Any]]: - """ - Compares two terms and determines - - the index of objects in the eri and denominator part of the term that - need to be removed in the term to factor the interemdiate. - - the necessary index substitutions to bring the itmd term into the form - found in the expression. - - the additional factor (+-1) that needs to be introduced after applying - the index substitutions. - Note: orbital energy numerators are currently not treated! - """ - - eri_variants = _compare_eri_parts( - term, itmd_term, term_data, itmd_term_data - ) - - if eri_variants is None: - return None - - # itmd_term has no denominator -> stop here - if itmd_term.denom.inner.is_number: - return [{'eri_i': eri_i, 'denom_i': [], - 'sub': sub_dict, 'sub_list': sub_list, 'factor': factor} - for eri_i, sub_dict, sub_list, factor in eri_variants] - - # term and itmd_term should have a denominator at this point - # -> extract the brackets - brackets = term.denom_brackets - itmd_brackets = itmd_term.denom_brackets - # extract the lengths of all brakets - bracket_lengths: list[int] = [len(bk) for bk in brackets] - # prescan the brackets according to their length to avoid unnecessary - # substitutions - compatible_brackets: dict[int, list[int]] = {} - for itmd_denom_i, itmd_bk in enumerate(itmd_brackets): - itmd_bk_length = len(itmd_bk) - matching_brackets = [denom_i for denom_i, bk_length - in enumerate(bracket_lengths) - if bk_length == itmd_bk_length] - if not matching_brackets: # could not find a match for a itmd bracket - return None - compatible_brackets[itmd_denom_i] = matching_brackets - - # check which of the found substitutions are also valid for the denominator - variants: list[dict[str, Any]] = [] - for eri_i, sub_dict, sub_list, factor in eri_variants: - # can only map each bracket onto 1 itmd bracket - # otherwise something should be wrong - denom_matches: list[int] = [] - for itmd_denom_i, denom_idx_list in compatible_brackets.items(): - itmd_bk = itmd_brackets[itmd_denom_i] - # extract base and exponent of the bracket - if isinstance(itmd_bk, ExprContainer): - itmd_bk_exponent = S.One - itmd_bk = itmd_bk.inner - else: # polynom -> Pow object - itmd_bk, itmd_bk_exponent = itmd_bk.base_and_exponent - assert itmd_bk_exponent.is_Integer - - # apply the substitutions to the base of the bracket - sub_itmd_bk = itmd_bk.subs(sub_list) - if sub_itmd_bk is S.Zero: # invalid substitution list - continue - - # try to find a match in the subset of brackets of equal length - for denom_i in denom_idx_list: - if denom_i in denom_matches: # denom bk is already assigned - continue - bk = brackets[denom_i] - # extract the base of the bracket - bk = bk.inner if isinstance(bk, ExprContainer) else bk.base - if Add(sub_itmd_bk, -bk) is S.Zero: # brackets are equal? - denom_matches.extend( - denom_i for _ in range(int(itmd_bk_exponent)) - ) - break - # did not run into the break: - # -> could not find a match for the itmd_bracket - # -> directly skip to next eri_variant - else: - break - # did we find a match for all itmd brackets? - if len(set(denom_matches)) == len(itmd_brackets): - variants.append({'eri_i': eri_i, 'denom_i': denom_matches, - 'sub': sub_dict, 'sub_list': sub_list, - 'factor': factor}) - return variants if variants else None - - -def _compare_remainder(remainder: ExprContainer, ref_remainder: ExprContainer, - itmd_indices: tuple[Index, ...]) -> int | None: - """ - Compares two remainders and tries to map remainder onto ref_remainder. - - Returns - int | None - None if the remainder can not be mapped onto each other. - The factor (+-1) that is necessary to achieve equality for both - remainder. - """ - from .reduce_expr import factor_eri_parts, factor_denom - - # if we have a number as remainder, it should be +-1 - if remainder.inner is S.Zero or ref_remainder.inner is S.Zero: - raise ValueError("It should not be possible for a remainder to " - "be equal to 0.") - - # in addition to the target indices, the itmd_indices have to be fixed too. - # -> set both indices sets as target indices of the expressions - fixed_indices = remainder.terms[0].target - assert fixed_indices == ref_remainder.terms[0].target - fixed_indices += itmd_indices - - # create a copy of the expressions to keep the assumptions of the original - # expressions valid (assumptions should hold the correct target indices) - remainder, ref_remainder = remainder.copy(), ref_remainder.copy() - remainder.set_target_idx(fixed_indices) - ref_remainder.set_target_idx(fixed_indices) - - # TODO: we have a different situation in this function, because not all - # contracted indices have to occur in the eri part of the remainder: - # eri indices: jkln. Additionally we have m in the denominator. - # the function will only map n->m but not m->n, because it does - # not occur in the eri part. This might mess up the denominator - # or numerator of the term completely! - # -> can neither use find_compatible_terms nor compare_terms!! - # I think in a usual run this should only occur if previously some - # intermediate was not found correctly, because for t-amplitudes all - # removed indices either only occur in the eri part or occur in eri and - # denom. But if we did not find some t-amplitude and have some denominator - # left, this problem might occur if a denom idx is a contracted index - # in the eri part of the itmd. - # -> but then we can not factor the itmd anyway, because the contracted - # idx in the eri part and the denom have to be identical - # -> need to be solved at another point - - difference = remainder - ref_remainder - if len(difference) == 1: # already identical -> 0 or added to 1 term - return 1 if difference.inner is S.Zero else -1 - # check if the eri parts of both remainders can be mapped onto each other - factored = factor_eri_parts(difference) - if len(factored) > 1: # eri parts not compatible - return None - - # check if the denominators are compatible too. - factored = factor_denom(factored[0]) - if len(factored) > 1: # denominators are not compatible - return None - return 1 if factored[0].inner is S.Zero else -1 - - -class LongItmdVariants( - dict[tuple[Index, ...], dict[ExprContainer, dict[tuple[int, ...], list[tuple[int, Expr, Expr]]]]] # noqa E501 - ): - """ - Class to manage the variants of long intermediates. - - Parameters - ---------- - n_itmd_terms : int - The number of terms in the long intermediate. - """ - - def __init__(self, n_itmd_terms: int, *args, **kwargs): - self.n_itmd_terms: int = n_itmd_terms - # The number of terms we require to share a common prefactor - # for mixed prefactor intermediates - self.n_common_pref_terms: int = (0.6 * self.n_itmd_terms).__ceil__() - super().__init__(*args, **kwargs) - - def add(self, term_i: int, itmd_indices: tuple[Index, ...], - remainder: ExprContainer, matching_itmd_terms: Iterable[int], - prefactor: Expr, unit_factorization_pref: Expr) -> None: - """ - Add a matching term-itmd_term pair to the pool for building - intermediate variants. - - Parameters - ---------- - term_i : int - The index of the term. - itmd_indices : tuple[Index] - The indices of the factored interemdiate. - remainder : ExprContainer - Remaining objects of the term after factoring the intermediate. - matching_itmd_terms : Iterable[int] - Index of itmd terms the term can be mapped. - prefactor: Expr - The prefactor of the resulting term after factoring the itmd. - unit_factorization_pref: Expr - The factor that the current term would need if the intermediate - would be factored with a prefactor of 1. - """ - # trivial separation by itmd_indices (the indices of the itmd we - # try to factor with the current variant) - if itmd_indices not in self: - self[itmd_indices] = {} - - matching_itmd_terms = tuple(sorted(matching_itmd_terms)) - is_new_remainder = True - for rem, found_matches in self[itmd_indices].items(): - # next we can separate the variants by the remainder they will - # create when the variant is factored - factor = _compare_remainder(remainder=remainder, ref_remainder=rem, - itmd_indices=itmd_indices) - if factor is None: # remainder did not match - continue - - is_new_remainder = False - # possibly we got another -1 from matching the remainder - prefactor *= sympify(factor) - - # next, we can separate them according to the itmd_positions - # so we can later build intermediate variants more efficient - if matching_itmd_terms not in found_matches: - found_matches[matching_itmd_terms] = [] - # It is possible to obtain entries that have the same - # term_i and pref, but differ in the sign of the unit factor - # this is probably a result of the permutation symmetry - # of some intermediates - # -> only add the term if term_i and pref have not been found yet - is_dublicate = any( - (term_i == other_term_i and prefactor == other_pref - and abs(unit_factorization_pref) == abs(other_unit_factor)) - for other_term_i, other_pref, other_unit_factor in - found_matches[matching_itmd_terms] - ) - if not is_dublicate: - found_matches[matching_itmd_terms].append( - (term_i, prefactor, unit_factorization_pref) - ) - break - if is_new_remainder: - self[itmd_indices][remainder] = {} - self[itmd_indices][remainder][matching_itmd_terms] = [ - (term_i, prefactor, unit_factorization_pref) - ] - - def get_complete_variant(self, itmd_indices: tuple[Index, ...], - remainder: ExprContainer - ) -> None | tuple[Expr, list[int]]: - """ - Returns prefactor and terms (by index) of a complete intermediate - variant for the given itmd_indices and remainder. - Only variants that are complete and share a common prefactor are - considered here. - If no variant can be found None is returned. - """ - - def sort_matches(pool: list[tuple[tuple[int, ...], set[int]]] - ) -> list[tuple[tuple[int, ...], list[int]]]: - term_i_counter: dict[int, dict[int, int]] = {} - for positions, matches in pool: - for term_i in matches: - if term_i not in term_i_counter: - term_i_counter[term_i] = {} - for p in positions: - if p not in term_i_counter[term_i]: - term_i_counter[term_i][p] = 0 - term_i_counter[term_i][p] += 1 - term_i_counter_evaluated: dict[int, tuple[int, int]] = { - term_i: (len(positions), sum(positions.values())) - for term_i, positions in term_i_counter.items() - } - del term_i_counter - return [ - (pos, sorted(matches, - key=lambda m: term_i_counter_evaluated[m])) - for pos, matches in pool] - - # itmd_indices and or remainder not found - if itmd_indices not in self or \ - remainder not in self[itmd_indices]: - return None - pool = self[itmd_indices][remainder] - if not pool: # empty pool: already factored everything - return None - - # construct base variants that are likely to form complete variants - for pref, term_list in self._complete_base_variants(pool): - # filter the pool: - # - remove all already occupied positions - # - remove all matches of already used terms - # - remove all matches that have a different prefactor - relevant_pool: dict[tuple[int, ...], set[int]] = {} - for positions, matches in pool.items(): - if any(term_list[p] is not None for p in positions): - continue - relevant_matches = { - term_i for term_i, other_pref, _ in matches - if other_pref == pref and term_i not in term_list - } - if relevant_matches: - relevant_pool[positions] = relevant_matches - if not relevant_pool: # nothing relevant left - continue - - # sort the pool: - # - start with the positions with the lowest number of matches - # - prioritize rare indices - relevant_pool_sorted = sorted( - relevant_pool.items(), key=lambda kv: len(kv[1]) - ) - del relevant_pool - relevant_pool_sorted = sort_matches(relevant_pool_sorted) - - # set up masks to avoid creating copies of the pool - pos_mask = [True for _ in relevant_pool_sorted] - match_masks = [[True for _ in matches] - for _, matches in relevant_pool_sorted] - # try to complete the base variant from the relevant pool - success = self._build_complete_variant( - term_list, relevant_pool_sorted, pos_mask, match_masks - ) - if success: - assert _is_int_list(term_list) - return pref, term_list - # continue with the next base variant - # loop completed -> no complete variant found - return None - - def _complete_base_variants( - self, pool: dict[tuple[int, ...], list[tuple[int, Expr, Expr]]] - ) -> Generator[tuple[Expr, list[int | None]], None, None]: - """Iterator over the base variants for complete intermediates.""" - - def sort_matches( - pool: dict[tuple[int, ...], list[tuple[int, Expr, Expr]]], - matches_to_sort: list[tuple[int, Expr, Expr]] - ) -> list[tuple[int, Expr, Expr]]: - term_i_counter: dict[int, dict[int, int]] = {} - pref_available_pos: dict[Expr, list[bool]] = {} - for positions, matches in pool.items(): - for term_i, pref, _, in matches: - if term_i not in term_i_counter: - term_i_counter[term_i] = {} - if pref not in pref_available_pos: - pref_available_pos[pref] = [False for _ in - range(self.n_itmd_terms)] - for p in positions: - if p not in term_i_counter[term_i]: - term_i_counter[term_i][p] = 0 - term_i_counter[term_i][p] += 1 - pref_available_pos[pref][p] = True - term_i_counter_evaluated = {term_i: ( - len(positions), - sum(positions.values()) - ) for term_i, positions in term_i_counter.items()} - # remove prefactors where not all positions are available - matches_to_sort = [m for m in matches_to_sort - if all(pref_available_pos[m[1]])] - return sorted( - matches_to_sort, key=lambda m: term_i_counter_evaluated[m[0]] - ) - - # find the position with the lowest number of matches - pos, matches = min(pool.items(), key=lambda kv: len(kv[1])) - # sort the matches so that rare term_i are covered first - # and remove prefactors where not all positions are available - if len(matches) > 1: - matches = sort_matches(pool, matches) - - # ensure we only ever try once per term_i and pref combination - prev_tried: dict[Expr, set[int]] = {} - for term_i, pref, _ in matches: - if pref not in prev_tried: - prev_tried[pref] = set() - if term_i in prev_tried[pref]: - continue - prev_tried[pref].add(term_i) - - yield (pref, - [term_i if i in pos else None - for i in range(self.n_itmd_terms)]) - - def _build_complete_variant(self, term_list: list[int | None], - pool: list[tuple[tuple[int, ...], list[int]]], - pos_mask: list[bool], - match_masks: list[list[bool]]) -> bool: - """ - Recursively builds the complete variant from the pool of matches. - """ - # check if the variant can be completed with the available - # positions - unique_positions = {p for pos, _ in itertools.compress(pool, pos_mask) - for p in pos} - n_missing_terms = term_list.count(None) - if n_missing_terms > len(unique_positions): - return False - - for i, (positions, matches) in \ - itertools.compress(enumerate(pool), pos_mask): - # update the mask: - # mask the positions that will be filled in the following loop - pos_mask[i] = False - - # since all positions have to be available we can already - # predict here whether we will be able to complete the variant - completed = (n_missing_terms == len(positions)) - for term_i in itertools.compress(matches, match_masks[i]): - # don't copy the term_list. Instead revert the changes - # before continue the iteration - for p in positions: - term_list[p] = term_i - - if completed: # check if we completed the variant - return True - - # update the mask: - # mask all positions that intersect with the filled - # positions and mask term_i as not available - # for now just store the mask changes, but we can - # also recompute the changes to revert them. - masked_pos: list[int] = [] - masked_matches: list[tuple[int, int]] = [] - for other_i, (pos, other_matches) in \ - itertools.compress(enumerate(pool), pos_mask): - if any(p in positions for p in pos): - # no need to update the match mask here - pos_mask[other_i] = False - masked_pos.append(other_i) - continue - for j, other_term_i in \ - itertools.compress(enumerate(other_matches), - match_masks[other_i]): - if term_i == other_term_i: - match_masks[other_i][j] = False - masked_matches.append((other_i, j)) - if not any(match_masks[other_i]): - pos_mask[other_i] = False - masked_pos.append(other_i) - - # recurse and try to complete the variant - success = self._build_complete_variant( - term_list, pool, pos_mask, match_masks - ) - if success: # found complete variant - return True - - # revert the mask changes - for other_i in masked_pos: - pos_mask[other_i] = True - for other_i, j in masked_matches: - match_masks[other_i][j] = True - - # revert the changes to term_list and continue the loop - for p in positions: - term_list[p] = None - # unmask the position - pos_mask[i] = True - return False - - def get_mixed_pref_variant( - self, itmd_indices: tuple[Index, ...], remainder: ExprContainer - ) -> None | tuple[list[Expr], list[int], dict[int, Expr], Counter[Expr]]: # noqa E501 - """ - Returns a complete variant allowing mixed prefactors for the given - itmd_indices and remainder. Only variants where at leas 60% of the - terms share a common prefactor are considered. - """ - def _is_pool( - sequence: list - ) -> TypeGuard[list[tuple[tuple[int, ...], list[tuple[int, Expr, Expr]]]]]: # noqa E501 - # the alternative return type would have a length of 3 - return all(len(tpl) == 2 for tpl in sequence) - - # itmd_indices or remainder not available - if itmd_indices not in self or \ - remainder not in self[itmd_indices]: - return None - pool = self[itmd_indices][remainder] - if not pool: # empty pool: already factored all term_i - return None - - for (prefs, term_list, unit_factors) in \ - self._mixed_pref_base_variants(pool): - # filter the pool by removing all positions that are already - # occupied. Addtionally, remove all term_i that are already - # in use. - relevant_pool: \ - dict[tuple[int, ...], list[tuple[int, Expr, Expr]]] = {} - for positions, matches in pool.items(): - if any(term_list[p] is not None for p in positions): - continue - relevant_matches = [ - data for data in matches if data[0] not in term_list - ] - if relevant_matches: - relevant_pool[positions] = relevant_matches - if not relevant_pool: - continue - - # sort the pool to start with the position with the lowest amount - # of valid matches and prioritize rare term_i and common prefactors - relevant_pool_sorted = sorted( - relevant_pool.items(), key=lambda kv: len(kv[1]) - ) - del relevant_pool - relevant_pool_sorted = self._sort_mixed_pref_matches( - relevant_pool_sorted - ) - assert _is_pool(relevant_pool_sorted) - - # set up masks for position and matches to avoid copying data - pos_mask: list[bool] = [True for _ in relevant_pool_sorted] - match_masks: list[list[bool]] = [ - [True for _ in matches] - for _, matches in relevant_pool_sorted - ] - pref_counter = Counter([p for p in prefs if p is not None]) - # try to complete the base variant using the relevant pool - success = self._complete_mixed_variant( - term_list, prefs, unit_factors, relevant_pool_sorted, - pref_counter, pos_mask, match_masks - ) - if success: - assert _is_int_list(term_list) and _is_expr_list(prefs) - return prefs, term_list, unit_factors, pref_counter - # else continue with the next base variant - # loop completed -> no mixed variant found - return None - - def _mixed_pref_base_variants( - self, pool: dict[tuple[int, ...], list[tuple[int, Expr, Expr]]] - ) -> Generator[tuple[list[Expr | None], list[int | None], dict[int, Expr]], None, None]: # noqa E501 - """ - Iterator over the base variants for intermediates with mixed prefactors - """ - def _is_long_itmd_data_list( - sequence: list) -> TypeGuard[list[tuple[int, Expr, Expr]]]: - return all( - len(tpl) == 3 and isinstance(tpl[0], int) - and isinstance(tpl[1], Expr) and isinstance(tpl[2], Expr) - for tpl in sequence - ) - # find the positions with the lowest number of matches - pos, matches = min(pool.items(), key=lambda kv: len(kv[1])) - # sort the matches so that - # rare indices and common prefactors are preferred - if len(matches) > 1: - matches = self._sort_mixed_pref_matches( - tuple(pool.items()), matches - ) - assert _is_long_itmd_data_list(matches) - - # filter out matches that have the same term_i and pref - prev_tried: dict[Expr, set[int]] = {} - for term_i, pref, unit_factor in matches: - if pref not in prev_tried: - prev_tried[pref] = set() - if term_i in prev_tried[pref]: - continue - prev_tried[pref].add(term_i) - - yield ( - [pref if i in pos else None for i in range(self.n_itmd_terms)], - [term_i if i in pos else None - for i in range(self.n_itmd_terms)], - {term_i: unit_factor} - ) - - def _sort_mixed_pref_matches( - self, - pool: Sequence[tuple[tuple[int, ...], list[tuple[int, Expr, Expr]]]], # noqa E501 - matches_to_sort: list[tuple[int, Expr, Expr]] | None = None - ) -> list[tuple[tuple[int, ...], list[tuple[int, Expr, Expr]]]] | \ - list[tuple[int, Expr, Expr]]: - """ - Sorts all matches in the pool so that rare term_i and common - prefactors are preferred. If an additional match list is provided - instead this match list will be sorted instead of all matches in - the pool. - """ - term_i_counter: dict[int, dict[int, int]] = {} - pref_counter: dict[Expr, dict[int, int]] = {} - for positions, matches in pool: - for term_i, pref, _ in matches: - if term_i not in term_i_counter: - term_i_counter[term_i] = {} - if pref not in pref_counter: - pref_counter[pref] = {} - for pos in positions: - if pos not in term_i_counter[term_i]: - term_i_counter[term_i][pos] = 0 - term_i_counter[term_i][pos] += 1 - if pos not in pref_counter[pref]: - pref_counter[pref][pos] = 0 - pref_counter[pref][pos] += 1 - term_i_counter_evaluated: dict[int, tuple[int, int]] = { - term_i: (len(positions), sum(positions.values())) - for term_i, positions in term_i_counter.items() - } - del term_i_counter - pref_counter_evaluated: dict[Expr, tuple[int, int]] = { - pref: (-len(positions), -sum(positions.values())) - for pref, positions in pref_counter.items() - } - del pref_counter - - if matches_to_sort is None: - return [ - (pos, sorted(matches, - key=lambda m: (*term_i_counter_evaluated[m[0]], - *pref_counter_evaluated[m[1]]))) - for pos, matches in pool - ] - else: - return sorted( - matches_to_sort, - key=lambda m: (*term_i_counter_evaluated[m[0]], - *pref_counter_evaluated[m[1]]) - ) - - def _complete_mixed_variant( - self, term_list: list[int | None], prefactors: list[Expr | None], - unit_factors: dict[int, Expr], - pool: list[tuple[tuple[int, ...], list[tuple[int, Expr, Expr]]]], - pref_counter: Counter[Expr], pos_mask: list[bool], - match_masks: list[list[bool]]) -> bool: - """ - Recursively builds the complete mixed prefactor variant - Only variants where at least 60% of the terms share a common prefactor - are accepted. - """ - # check if the variant can be completed with the available - # positions - unique_positions = {p for pos, _ in itertools.compress(pool, pos_mask) - for p in pos} - n_missing_terms = term_list.count(None) - if n_missing_terms > len(unique_positions): - return False - - for i, (positions, matches) in \ - itertools.compress(enumerate(pool), pos_mask): - # update the poositions mask - pos_mask[i] = False - - completed = (n_missing_terms == len(positions)) - for term_i, pref, unit_factor in \ - itertools.compress(matches, match_masks[i]): - # if we add the match: will we still be able to - # create a valid variant that hast at least 60% common - # prefactor? - pref_counter[pref] += len(positions) - - max_terms_common_pref = max(pref_counter.values()) + \ - n_missing_terms - len(positions) - if max_terms_common_pref < self.n_common_pref_terms: - # we will not be able to complete the variant - # with the current addition - pref_counter[pref] -= len(positions) - continue - - # add the current match to the variant - for p in positions: - term_list[p] = term_i - prefactors[p] = pref - unit_factors[term_i] = unit_factor - - if completed and max(pref_counter.values()) >= \ - self.n_common_pref_terms: - return True - - # update the mask: - # - mask any position that intersects with the the added - # positions - # - mask all otherm matches of term_i - masked_pos: list[int] = [] - masked_matches: list[tuple[int, int]] = [] - for other_i, (pos, other_matches) in \ - itertools.compress(enumerate(pool), pos_mask): - if any(p in positions for p in pos): - pos_mask[other_i] = False - masked_pos.append(other_i) - continue - for j, (other_term_i, _, _) in \ - itertools.compress(enumerate(other_matches), - match_masks[other_i]): - if term_i == other_term_i: - match_masks[other_i][j] = False - masked_matches.append((other_i, j)) - if not any(match_masks[other_i]): - pos_mask[other_i] = False - masked_pos.append(other_i) - - # recurse and try to complete the variant - success = self._complete_mixed_variant( - term_list, prefactors, unit_factors, pool, pref_counter, - pos_mask, match_masks - ) - if success: - return True - - # revert the mask changes - for other_i in masked_pos: - pos_mask[other_i] = True - for other_i, j in masked_matches: - match_masks[other_i][j] = True - - # undo the changes to the variant - for p in positions: - term_list[p] = None - prefactors[p] = None - del unit_factors[term_i] - - # undo the prefcounter changes - pref_counter[pref] -= len(positions) - - # unmaks the position - pos_mask[i] = True - return False - - def remove_used_terms(self, used_terms: list[int]) -> None: - """ - Removes the provided terms from the pool, so they can not - be used to build further variants. - """ - for remainders in self.values(): - for positions in remainders.values(): - empty_pos: list[tuple[int, ...]] = [] - for pos, matches in positions.items(): - to_delete = [i for i, m in enumerate(matches) - if m[0] in used_terms] - # need to remove element with highest index first! - for i in sorted(to_delete, reverse=True): - del matches[i] - if not matches: # removed all matches for the position - empty_pos.append(pos) - for pos in empty_pos: - del positions[pos] - - def clean_empty(self) -> None: - """Removes all empty entries in the nested dictionary.""" - empty_indices: list[tuple[Index, ...]] = [] - for itmd_indices, remainders in self.items(): - empty_rem = [rem for rem, positions in remainders.items() - if not positions] - for rem in empty_rem: - del remainders[rem] - if not remainders: - empty_indices.append(itmd_indices) - for itmd_indices in empty_indices: - del self[itmd_indices] - - -class FactorizationTermData: - """ - Class that extracts some data needed for the intermediate factorization. - - Parameters - ---------- - term : EriOrbenergy - The term to extract data from. - """ - - def __init__(self, term: EriOrbenergy): - self._term: EriOrbenergy = term - - @cached_property - def eri_pattern(self) -> tuple[tuple[str, Counter[str]], ...]: - """ - Returns the pattern of the eri part of the term. In contrast to the - pattern used in simplify, the pattern is determined for each object - as tuple that consists of the object description and the - coupling of the object. - """ - coupling: dict[int, list[str]] = self._term.eri.coupling( - include_exponent=False, include_target_idx=False - ) - return tuple( - (obj.description(include_exponent=False, target_idx=None), - Counter(coupling.get(i, []))) - for i, obj in enumerate(self._term.eri.objects) - ) - - @cached_property - def eri_obj_indices(self) -> tuple[tuple[Index, ...], ...]: - """Indices hold by each of the objects in the eri part.""" - return tuple(obj.idx for obj in self._term.eri.objects) - - @cached_property - def eri_obj_symmetry(self - ) -> tuple[dict[tuple[Permutation, ...], int], ...]: - """Symmetry of all objects in the eri part.""" - return tuple( - TermContainer(obj, **obj.assumptions).symmetry() - for obj in self._term.eri.objects - ) - - @cached_property - def eri_obj_descriptions(self) -> Counter[str]: - """ - Counts how often each description occurs in the eri part. - Exponent of the objects is included implicitly by incrementing - the description counter. - """ - descr = [] - for obj in self._term.eri.objects: - exp = obj.exponent - assert exp.is_Integer - descr.extend( - obj.description(target_idx=None, include_exponent=False) - for _ in range(int(exp)) - ) - return Counter(descr) - - @cached_property - def denom_bracket_lengths(self) -> None | Counter[int]: - """ - Determine the length of all brackets in the orbital energy - denominator and count how often each length occurs in the denominator. - """ - if self._term.denom.inner.is_number: - return None - else: - return Counter(len(bk) for bk in self._term.denom_brackets) - - -def _is_int_list(sequence: list) -> TypeGuard[list[int]]: - return all(isinstance(item, int) for item in sequence) - - -def _is_expr_list(sequence: list) -> TypeGuard[list[Expr]]: - return all(isinstance(item, Expr) for item in sequence) - - -_ = _factor_long_intermediate diff --git a/build/lib/adcgen/func.py b/build/lib/adcgen/func.py deleted file mode 100644 index ae8e5ad..0000000 --- a/build/lib/adcgen/func.py +++ /dev/null @@ -1,540 +0,0 @@ -from collections.abc import Sequence -import itertools - -from sympy.physics.secondquant import ( - F, Fd, FermionicOperator, NO -) -from sympy import S, Add, Expr, Mul, Pow, sqrt, Symbol, sympify - -from .expression import ExprContainer -from .misc import Inputerror -from .rules import Rules -from .indices import Index, Indices, get_symbols, split_idx_string -from .sympy_objects import ( - KroneckerDelta, NonSymmetricTensor, AntiSymmetricTensor, SymmetricTensor, - Amplitude -) -from .tensor_names import is_adc_amplitude, is_t_amplitude, tensor_names - - -def gen_term_orders(order: int, term_length: int, min_order: int - ) -> list[tuple[int, ...]]: - """ - Generate all combinations of orders that contribute to the n'th-order - contribution of a term of the given length - (a * b * c * ...)^{(n)}, - where a, b and c are each subject of a perturbation expansion. - - Parameters - ---------- - order : int - The perturbation theoretical order n. - term_length : int - The number of objects in the term. - min_order : int - The minimum perturbation theoretical order of the objects in the - term to consider. For instance, 2 if the first and zeroth order - contributions are not relevant, because they vanish or are considered - separately. - """ - - if not all(isinstance(n, int) and n >= 0 - for n in [order, term_length, min_order]): - raise Inputerror("Order, term_length and min_order need to be " - "non-negative integers.") - - orders = (o for o in range(min_order, order + 1)) - combinations = itertools.product(orders, repeat=term_length) - return [comb for comb in combinations if sum(comb) == order] - - -def import_from_sympy_latex(expr_string: str, - convert_default_names: bool = False - ) -> ExprContainer: - """ - Imports an expression from a string created by the 'sympy.latex' function. - - Parameters - ---------- - convert_default_names : bool, optional - If set, all default tensor names found in the expression to import - will be converted to the currently configured names. - - Returns - ------- - ExprContainer - The imported expression in a 'Expr' container. Note that no assumptions - (sym_tensors or antisym_tensors) have been applied yet. - """ - - def import_indices(indices: str) -> list[Index]: - # split at the end of each index with a spin label - # -> n1n2n3_{spin} - idx: list[Index] = [] - for sub_part in indices.split("}"): - if not sub_part: # skip empty string - continue - if "_{\\" in sub_part: # the last index has a spin label - names, spin = sub_part.split("_{\\") - if spin not in ["alpha", "beta"]: - raise RuntimeError(f"Found invalid spin on Index: {spin}. " - f"Input: {indices}") - names = split_idx_string(names) - idx.extend(get_symbols(names[:-1])) - idx.extend(get_symbols(names[-1], spin[0])) - else: # no index has a spin label - idx.extend(get_symbols(sub_part)) - return idx - - def import_tensor(tensor: str) -> Expr: - # split the tensor in base and exponent - stack: list[str] = [] - separator: int | None = None - for i, c in enumerate(tensor): - if c == "{": - stack.append(c) - elif c == "}": - assert stack.pop() == "{" - elif not stack and c == "^": - separator = i - break - if separator is None: - exponent = 1 - else: - exponent = tensor[separator+1:] - exponent = int(exponent.lstrip("{").rstrip("}")) - tensor = tensor[:separator] - # done with processing the exponent - # -> deal with the tensor. remove 1 layer of curly brackets and - # afterwards split the tensor string into its components - if tensor[0] == "{": - tensor = tensor[1:] - if tensor[-1] == "}": - tensor = tensor[:-1] - stack.clear() - components: list[str] = [] - temp: list[str] = [] - for i, c in enumerate(tensor): - if c == "{": - stack.append(c) - elif c == "}": - assert stack.pop() == "{" - elif not stack and c in ["^", "_"]: - components.append("".join(temp)) - temp.clear() - continue - temp.append(c) - if temp: - components.append("".join(temp)) - name, indices = components[0], components[1:] - # if desired map the default tensor names to their currently - # configured name - # -> this allows expressions with the default names to - # be imported and mapped to the current configuration, correctly - # recognizing Amplitudes and SymmetricTensors. - if convert_default_names: - name = tensor_names.map_default_name(name) - - # remove 1 layer of brackets from all indices - for i, idx in enumerate(indices): - if idx[0] == "{": - idx = idx[1:] - if idx[-1] == "}": - idx = idx[:-1] - indices[i] = idx - - if len(indices) == 0: # no indices -> a symbol - base: Expr = Symbol(name) - elif name == "a": # create / annihilate - if len(indices) == 2 and indices[0] == "\\dagger": - base: Expr = Fd(*import_indices(indices[1])) - elif len(indices) == 1: - base: Expr = F(*import_indices(indices[0])) - else: - raise RuntimeError("Unknown second quantized operator: ", - tensor) - elif len(indices) == 2: # antisym-/symtensor or amplitude - upper = import_indices(indices[0]) - lower = import_indices(indices[1]) - # ADC-Amplitude or t-amplitudes - if is_adc_amplitude(name) or is_t_amplitude(name): - base: Expr = Amplitude(name, upper, lower) - elif name == tensor_names.coulomb: # eri in chemist notation - base: Expr = SymmetricTensor(name, upper, lower) - else: - base: Expr = AntiSymmetricTensor(name, upper, lower) - elif len(indices) == 1: # nonsymtensor - base: Expr = NonSymmetricTensor(name, import_indices(indices[0])) - else: - raise RuntimeError(f"Unknown tensor object: {tensor}") - assert isinstance(base, Expr) - return Pow(base, exponent) - - def import_obj(obj_str: str) -> Expr: - # import an individial object - if obj_str.isnumeric(): # prefactor - return sympify(int(obj_str)) - elif obj_str.startswith("\\sqrt{"): # sqrt{x} prefactor - return sqrt(int(obj_str[:-1].replace("\\sqrt{", "", 1))) - elif obj_str.startswith("\\delta_"): # KroneckerDelta - idx = obj_str[:-1].replace("\\delta_{", "", 1).split() - idx = import_indices("".join(idx)) - if len(idx) != 2: - raise RuntimeError(f"Invalid indices for delta: {idx}.") - ret = KroneckerDelta(*idx) - assert isinstance(ret, Expr) - return ret - elif obj_str.startswith("\\left("): # braket - # need to take care of exponent of the braket! - base, exponent = obj_str.rsplit('\\right)', 1) - if exponent: # exponent != "" -> ^{x} -> exponent != 1 - exponent = int(exponent[:-1].lstrip('^{')) - else: - exponent = 1 - obj_str = base.replace("\\left(", "", 1) - obj = import_from_sympy_latex( - obj_str, convert_default_names=convert_default_names - ) - return Pow(obj.inner, exponent) - elif obj_str.startswith("\\left\\{"): # NO - no, unexpected_stuff = obj_str.rsplit("\\right\\}", 1) - if unexpected_stuff: - raise NotImplementedError(f"Unexpected NO object: {obj_str}.") - obj_str = no.replace("\\left\\{", "", 1) - obj = import_from_sympy_latex( - obj_str, convert_default_names=convert_default_names - ) - return NO(obj.inner) - else: # tensor or creation/annihilation operator or symbol - return import_tensor(obj_str) - - def split_terms(expr_string: str) -> list[str]: - stack: list[str] = [] - terms: list[str] = [] - - term_start_idx = 0 - for i, char in enumerate(expr_string): - if char in ['{', '(']: - stack.append(char) - elif char == '}': - assert stack.pop() == '{' - elif char == ')': - assert stack.pop() == '(' - elif char in ['+', '-'] and not stack and i != term_start_idx: - terms.append(expr_string[term_start_idx:i]) - term_start_idx = i - terms.append(expr_string[term_start_idx:]) # append last term - return terms - - def import_term(term_string: str) -> Expr: - from sympy import Mul - - stack: list[str] = [] - objects: list[str] = [] - - obj_start_idx = 0 - for i, char in enumerate(term_string): - if char in ['{', '(']: - stack.append(char) - elif char == '}': - assert stack.pop() == '{' - elif char == ')': - assert stack.pop() == '(' - # in case we have a denom of the form: - # 2a+2b+4c and not 2 * (a+b+2c) - elif char in ['+', '-'] and not stack: - return import_from_sympy_latex( - term_string, convert_default_names=convert_default_names - ).inner - elif char == " " and not stack and i != obj_start_idx: - objects.append(term_string[obj_start_idx:i]) - obj_start_idx = i + 1 - objects.append(term_string[obj_start_idx:]) # last object - return Mul(*(import_obj(o) for o in objects)) - - expr_string = expr_string.strip() - if not expr_string: - return ExprContainer(0) - - terms = split_terms(expr_string) - if terms[0][0] not in ['+', '-']: - terms[0] = '+ ' + terms[0] - - sympy_expr = S.Zero - for term in terms: - sign = term[0] # extract the sign of the term - if sign not in ['+', '-']: - raise ValueError(f"Found invalid sign {sign} in term {term}") - term = term[1:].strip() - - sympy_term = S.NegativeOne if sign == '-' else S.One - assert isinstance(sympy_term, Expr) - - if term.startswith("\\frac"): # fraction - # remove frac layout and split: \\frac{...}{...} - num, denom = term[:-1].replace("\\frac{", "", 1).split("}{") - else: # no denominator - num, denom = term, None - - sympy_term = Mul(sympy_term, import_term(num)) - assert isinstance(sympy_term, Expr) - if denom is not None: - sympy_term = Mul(sympy_term, S.One/import_term(denom)) - sympy_expr += sympy_term - assert isinstance(sympy_expr, Expr) - return ExprContainer(sympy_expr) - - -def evaluate_deltas( - expr: Expr, - target_idx: Sequence[str] | Index | Sequence[Index] | None = None - ) -> Expr: - """ - Evaluates the KroneckerDeltas in an expression. - The function only removes contracted indices from the expression and - ensures that no information is lost if an index is removed. - Adapted from the implementation in 'sympy.physics.secondquant'. - Note that KroneckerDeltas in a Polynom (a*b + c*d)^n will not be evaluated. - However, in most cases the expression can simply be expanded before - calling this function. - - Parameters - ---------- - expr: Expr - Expression containing the KroneckerDeltas to evaluate. This function - expects a plain object from sympy (Add/Mul/...) and no container class. - target_idx : Sequence[str] | Sequence[Index] | None, optional - Optionally, target indices can be provided if they can not be - determined from the expression using the Einstein sum convention. - """ - assert isinstance(expr, Expr) - - if isinstance(expr, Add): - return Add(*( - evaluate_deltas(arg, target_idx) for arg in expr.args - )) - elif isinstance(expr, Mul): - if target_idx is None: - # for determining the target indices it is sufficient to use - # atoms, which lists every index only once per object, i.e., - # (f_ii).atoms(Index) -> i. - # We are only interested in indices on deltas - # -> it is sufficient to know that an index occurs on another - # object. (twice on the same delta is not possible) - deltas: list[KroneckerDelta] = [] - indices: dict[Index, int] = {} - for obj in expr.args: - for s in obj.atoms(Index): - if s in indices: - indices[s] += 1 - else: - indices[s] = 0 - if isinstance(obj, KroneckerDelta): - deltas.append(obj) - # extract the target indices and use them in next recursion - # so they only need to be determined once - target_idx = [s for s, n in indices.items() if not n] - else: - # find all occurrences of kronecker delta - deltas = [d for d in expr.args if isinstance(d, KroneckerDelta)] - target_idx = get_symbols(target_idx) - - for d in deltas: - # determine the killable and preferred index - # in the case we have delta_{i p_alpha} we want to keep i_alpha - # -> a new index is required. But for now just don't evaluate - # these deltas - idx = d.preferred_and_killable - if idx is None: # delta_{i p_alpha} - continue - preferred, killable = idx - # try to remove killable - if killable not in target_idx: - res = expr.subs(killable, preferred) - assert isinstance(res, Expr) - expr = res - if len(deltas) > 1: - return evaluate_deltas(expr, target_idx) - # try to remove preferred. - # But only if no information is lost if doing so - # -> killable has to be of length 1 - elif preferred not in target_idx \ - and d.indices_contain_equal_information: - res = expr.subs(preferred, killable) - assert isinstance(res, Expr) - expr = res - if len(deltas) > 1: - return evaluate_deltas(expr, target_idx) - return expr - else: - return expr - - -def wicks(expr: Expr, rules: Rules | None = None, - simplify_kronecker_deltas: bool = False) -> Expr: - """ - Evaluates Wicks theorem in the provided expression only returning fully - contracted contributions. - Adapted from the implementation in 'sympy.physics.secondquant'. - - Parameters - ---------- - expr: Expr - Expression containing the second quantized operator strings to - evaluate. This function expects plain sympy objects (Add/Mul/...) - and no container class. - rules : Rules, optional - Rules that are applied to the result before returning, e.g., in the - context of RE not all tensor blocks might be allowed in the result. - simplify_kronecker_deltas : bool, optional - If set, the KroneckerDeltas generated through the contractions - will be evaluated before returning. - """ - assert isinstance(expr, Expr) - # normal ordered operator string has to evaluate to zero - # and a single second quantized operator can not be contracted - if isinstance(expr, (NO, FermionicOperator)): - return S.Zero - - # break up any NO-objects, and evaluate commutators - expr = expr.doit(wicks=True).expand() - assert isinstance(expr, Expr) - - if isinstance(expr, Add): - return Add(*( - wicks(term, rules=rules, - simplify_kronecker_deltas=simplify_kronecker_deltas) - for term in expr.args - )) - elif not isinstance(expr, Mul): - # nether Add, Mul, NO, F, Fd -> maybe a number or tensor - return expr - # -> we have a Mul object - # we don't want to mess around with commuting part of Mul - # so we factorize it out before starting recursion - c_part: list[Expr] = [] - op_string: list[FermionicOperator] = [] - for factor in expr.args: - if factor.is_commutative: - c_part.append(factor) - else: - assert isinstance(factor, FermionicOperator) - op_string.append(factor) - - if (n := len(op_string)) == 0: # no operators - result = expr - elif n == 1: # a single operator - return S.Zero - else: # at least 2 operators - result = _contract_operator_string(op_string) - result = Mul(*c_part, result).expand() - assert isinstance(result, Expr) - if simplify_kronecker_deltas: - result = evaluate_deltas(result) - - # apply rules to the result - if rules is None: - return result - assert isinstance(rules, Rules) - return rules.apply(ExprContainer(result)).inner - - -def _contract_operator_string(op_string: list[FermionicOperator]) -> Expr: - """ - Contracts the operator string only returning fully contracted - contritbutions. - Adapted from 'sympy.physics.secondquant'. - """ - # check that we can get a fully contracted contribution - if not _has_fully_contracted_contribution(op_string): - return S.Zero - - result = [] - for i in range(1, len(op_string)): - c = _contraction(op_string[0], op_string[i]) - if c is S.Zero: - continue - if not i % 2: # introduce -1 for swapping operators - c *= S.NegativeOne - - if len(op_string) - 2 > 0: # at least one operator left - # remove the contracted operators from the string and recurse - remaining = op_string[1:i] + op_string[i+1:] - result.append(Mul(c, _contract_operator_string(remaining))) - else: # no operators left - result.append(c) - return Add(*result) - - -def _contraction(p: FermionicOperator, q: FermionicOperator) -> Expr: - """ - Evaluates the contraction between two sqcond quantized fermionic - operators. - Adapted from 'sympy.physics.secondquant'. - """ - assert isinstance(p, FermionicOperator) - assert isinstance(q, FermionicOperator) - - p_idx, q_idx = p.args[0], q.args[0] - assert isinstance(p_idx, Index) and isinstance(q_idx, Index) - if p_idx.spin or q_idx.spin: - raise NotImplementedError("Contraction not implemented for indices " - "with spin.") - # get the space and ensure we have no unexpected space - space_p, space_q = p_idx.space[0], q_idx.space[0] - assert space_p in ["o", "v", "g"] and space_q in ["o", "v", "g"] - - if isinstance(p, F) and isinstance(q, Fd): - if space_p == "o" or space_q == "o": - res = S.Zero - elif space_p == "v" or space_q == "v": - res = KroneckerDelta(p_idx, q_idx) - else: - res = Mul( - KroneckerDelta(p_idx, q_idx), - KroneckerDelta(q_idx, Index('a', above_fermi=True)) - ) - elif isinstance(p, Fd) and isinstance(q, F): - if space_p == "v" or space_q == "v": - res = S.Zero - elif space_p == "o" or space_q == "o": - res = KroneckerDelta(p_idx, q_idx) - else: - res = Mul( - KroneckerDelta(p_idx, q_idx), - KroneckerDelta(q_idx, Index('i', below_fermi=True)) - ) - else: # vanish if 2xAnnihilator or 2xCreator - res = S.Zero - assert isinstance(res, Expr) - return res - - -def _has_fully_contracted_contribution(op_string: list[FermionicOperator] - ) -> bool: - """ - Takes a list of second quantized operators and checks whether a - non-vanishing fully contracted contribution can exist. - """ - if len(op_string) % 2: # odd number of operators - return False - # count the number of creation and annihilation operators per space - create = {space: 0 for space in Indices.base} - annihilate = {space: 0 for space in Indices.base} - for op in op_string: - if isinstance(op, Fd): - counter = create - else: - counter = annihilate - idx = op.args[0] - assert isinstance(idx, Index) - counter[idx.space] += 1 - # check that we have a matching amount of creation and annihilation - # operators - for space, n_create in create.items(): - if space == "general": - continue - n_annihilate = annihilate[space] + annihilate["general"] - if n_create - n_annihilate > 0: - return False - return True diff --git a/build/lib/adcgen/generate_code/__init__.py b/build/lib/adcgen/generate_code/__init__.py deleted file mode 100644 index 0533c13..0000000 --- a/build/lib/adcgen/generate_code/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .contraction import Contraction -from .generate_code import generate_code -from .optimize_contractions import ( - optimize_contractions, unoptimized_contraction -) - -__all__ = ["Contraction", "generate_code", "optimize_contractions", - "unoptimized_contraction"] diff --git a/build/lib/adcgen/generate_code/config.json b/build/lib/adcgen/generate_code/config.json deleted file mode 100644 index 23780fa..0000000 --- a/build/lib/adcgen/generate_code/config.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "sizes": { - "core": 5, - "occ": 20, - "virt": 200, - "ri": 250 - } -} \ No newline at end of file diff --git a/build/lib/adcgen/generate_code/contraction.py b/build/lib/adcgen/generate_code/contraction.py deleted file mode 100644 index 2119288..0000000 --- a/build/lib/adcgen/generate_code/contraction.py +++ /dev/null @@ -1,250 +0,0 @@ -from collections.abc import Sequence -from collections import Counter -from dataclasses import dataclass, fields -from pathlib import Path -from typing import Any -import itertools -import json - -from ..expression import TermContainer -from ..indices import Index, Indices, sort_idx_canonical - - -_config_file = "config.json" - - -@dataclass(frozen=True, slots=True) -class Sizes: - """ - Explicit sizes for each of the spaces (occ, virt, ...). - Used to estimate the costs of a contraction. - """ - core: int = 0 - occ: int = 0 - virt: int = 0 - general: int = 0 - ri: int = 0 - - @staticmethod - def from_dict(input: dict[str, int]) -> "Sizes": - """ - Construct an instance from dictionary. The size of the "general" space - is evaluated on the fly as sum of the sizes of the other spaces - if not provided. - """ - if "general" not in input: - input["general"] = sum(input.values()) - return Sizes(**input) - - @staticmethod - def from_config() -> "Sizes": - """ - Construct an instance using the values in the config file - (by default: "config.json"). The size of the "general" space is - evaluated on the fly as sum of the sizes of the other spaces if not - present in the config file. - """ - config_file = Path(__file__).parent.resolve() / _config_file - sizes: dict[str, int] | None = ( - json.load(open(config_file, "r")).get("sizes", None) - ) - if sizes is None: - raise KeyError(f"Invalid config file {config_file}. " - "Missing key 'sizes'.") - return Sizes.from_dict(sizes) - - -class Contraction: - """ - Represents a single contration of n objects. - - Parameters - ---------- - indices: tuple[tuple[Index]] - The indices of the contracted tensors. - names: tuple[str] - The names of the contracted tensors. - term_target_indices: tuple[Index] - The target indices of the term the contraction belongs to. - """ - # use counter that essentially counts how many class instances have - # been created - # -> unique id for every instance - # -> easy to differentiate and identify individual instances - _base_name = "contraction" - _instance_counter = itertools.count(0, 1) - - # fallback sizes to estimate the costs of a contraction - _sizes = Sizes.from_config() - - def __init__(self, indices: Sequence[tuple[Index, ...]], - names: Sequence[str], - term_target_indices: Sequence[Index]) -> None: - if not isinstance(indices, tuple): - indices = tuple(indices) - if isinstance(names, str): - names = (names,) - elif not isinstance(names, tuple): - names = tuple(names) - - self.indices: tuple[tuple[Index, ...], ...] = indices - self.names: tuple[str, ...] = names - self.contracted: tuple[Index, ...] = tuple() - self.target: tuple[Index, ...] = tuple() - self.scaling: Scaling - self.id: int = next(self._instance_counter) - self.contraction_name: str = f"{self._base_name}_{self.id}" - self._determine_contracted_and_target(term_target_indices) - self._determine_scaling() - - def __str__(self): - return (f"Contraction(indices={self.indices}, names={self.names}, " - f"contracted={self.contracted}, target={self.target}, " - f"scaling={self.scaling}), id={self.id}, " - f"contraction_name={self.contraction_name})") - - def __repr__(self): - return self.__str__() - - def _determine_contracted_and_target(self, - term_target_indices: Sequence[Index] - ) -> None: - """ - Determines and sets the contracted and target indices on the - contraction using the provided target indices of the term - the contraction is a part of. In case the target indices of the - contraction contain the same indices as the target indices of the - term, the target indices of the term will be used instead. - """ - contracted, target = self._split_contracted_and_target( - self.indices, term_target_indices - ) - # sort the indices canonical - contracted = sorted(contracted, key=sort_idx_canonical) - target = sorted(target, key=sort_idx_canonical) - # if the contraction is an outer contraction, we have to use the - # provided target indices as target indices since their order - # might be different from the canonical order. - if sorted(term_target_indices, key=sort_idx_canonical) == target: - target = term_target_indices - self.contracted = tuple(contracted) - self.target = tuple(target) - - @staticmethod - def _split_contracted_and_target(indices: Sequence[tuple[Index, ...]], - term_target_indices: Sequence[Index] - ) -> tuple[list[Index], list[Index]]: - """ - Splits the given indices in contracted and target indices using - the provided target indices of the term the contraction is a - part of. - """ - idx_counter = Counter(itertools.chain.from_iterable(indices)) - contracted: list[Index] = [] - target: list[Index] = [] - for idx, count in idx_counter.items(): - if count == 1 or idx in term_target_indices: - target.append(idx) - else: - contracted.append(idx) - return contracted, target - - def _determine_scaling(self) -> None: - """Determine the computational and memory scaling of the contraction""" - contracted_by_space = Counter(idx.space for idx in self.contracted) - target_by_space = Counter(idx.space for idx in self.target) - # computational scaling - componentwise = { - space: contracted_by_space[space] + target_by_space[space] - for space in Indices.base - } - comp_scaling = ScalingComponent(total=sum(componentwise.values()), - **componentwise) - # memory scaling - componentwise = { - space: target_by_space[space] for space in Indices.base - } - mem_scaling = ScalingComponent(total=len(self.target), - **componentwise) - # overall scaling - self.scaling = Scaling(computational=comp_scaling, memory=mem_scaling) - - def evaluate_costs(self, sizes: Sizes | None = None - ) -> tuple[int, int]: - """ - Estimate the costs of the contraction. Returns a tuple containing - the flop count and the memory foot print of the result tensor. - - Parameters - ---------- - sizes: dict[str, int] | Sizes | None, optional - The sizes of the individual spaces used to estimate the - computational costs and the memory footprint of the contraction. - """ - if sizes is None: - sizes = self._sizes - return self.scaling.evaluate_costs(sizes) - - def __eq__(self, other: Any) -> bool: - if not isinstance(other, Contraction): - return False - return (self.indices == other.indices and - self.names == other.names and - self.contracted == other.contracted and - self.target == other.target and self.scaling == other.scaling) - - @staticmethod - def is_contraction(name: str) -> bool: - return name.startswith(Contraction._base_name) - - -def term_memory_requirements(term: TermContainer) -> "ScalingComponent": - """Determines the maximum memory requirements for the given term.""" - mem_scaling: list[ScalingComponent] = [] - for obj in term.objects: - space = obj.space - scaling = {"total": len(space)} - for field in fields(ScalingComponent): - if field.name == "total": - continue - scaling[field.name] = space.count(field.name[0]) - mem_scaling.append(ScalingComponent(**scaling)) - return max(mem_scaling) - - -@dataclass(frozen=True, slots=True, order=True) -class Scaling: - computational: "ScalingComponent" - memory: "ScalingComponent" - - def evaluate_costs(self, sizes: Sizes) -> tuple[int, int]: - """ - Estimate the computational costs and the memory footprint using the - provided sizes for the spaces. - """ - return (self.computational.evaluate_costs(sizes), - self.memory.evaluate_costs(sizes)) - - -@dataclass(frozen=True, slots=True, order=True) -class ScalingComponent: - total: int - general: int - virt: int - occ: int - core: int - ri: int - - def evaluate_costs(self, sizes: Sizes) -> int: - """ - Estimate the costs of the component using the provided sizes for the - spaces. - """ - costs = 1 - for field in fields(sizes): - base = getattr(sizes, field.name) - power = getattr(self, field.name, None) - assert power is not None - if base: - costs *= base ** power - return costs diff --git a/build/lib/adcgen/generate_code/generate_code.py b/build/lib/adcgen/generate_code/generate_code.py deleted file mode 100644 index f2bc25d..0000000 --- a/build/lib/adcgen/generate_code/generate_code.py +++ /dev/null @@ -1,390 +0,0 @@ -from collections.abc import Sequence -from collections import Counter - -from sympy import Expr, Symbol, Rational, Pow, Mul, S - -from ..expression import ExprContainer, TermContainer -from ..indices import Index, Indices -from ..logger import logger -from ..sort_expr import exploit_perm_sym -from ..symmetry import Permutation -from ..tensor_names import tensor_names - -from .contraction import Contraction, term_memory_requirements -from .optimize_contractions import ( - optimize_contractions, unoptimized_contraction -) - - -def generate_code(expr: ExprContainer, target_indices: str, - target_spin: str | None = None, - bra_ket_sym: int = 0, - antisymmetric_result_tensor: bool = True, - backend: str = "einsum", max_itmd_dim: int | None = None, - max_n_simultaneous_contracted: int | None = None, - optimize_contraction_scheme: bool = True, - space_dims: dict[str, int] | None = None) -> str: - """ - Generates contractions for a given expression using either 'einsum' - (Python) or 'libtensor' (C++) syntax. - - Parameters - ---------- - expr: ExprContainer - The expression to generate contractions for. - target_indices: str - String of target indices. A ',' might be inserted to indicate where - the indices are split in upper and lower indices of the result tensor, - e.g., 'ia,jb' for 'r^{ia}_{jb}'. - target_spin: str | None, optional - The spin of the target indices, e.g., 'aabb' to indicate that the - first 2 target indices have alpha spin, while number 3 and 4 have - beta spin. If not given, target indices without spin will be used. - bra_ket_sym: int, optional - The bra-ket symmetry of the result tensor. (default: 0, i.e., - no bra-ket symmetry) - antisymmetric_result_tensor: bool, optional - If set, teh result tensor will be treated as AntiSymmetricTensor - d_{ij}^{ab} = - d_{ji}^{ab}. Otherwise, a SymmetricTensor will be used - to mimic the symmetry of the result tensor, i.e., - d_{ij}^{ab} = d_{ji}^{ab}. (default: True) - backend: str, optional - The backend for which to generate contractions. (default: einsum) - max_itmd_dim: int | None, optional - Upper bound for the dimensionality of intermediate results, that - may be generated if the contractions are optimized. - max_n_simultaneous_contracted: int | None, optional - The maximum number of objects allowed to be contracted - simultaneously in a single contraction. (default: None) - optimize_contraction_scheme: bool, optional - If set, we try to find the contractions with the lowest arithmetic - and memory scaling, i.e., if possible only 2 tensors are contracted - simultaneously. (default: True) - space_dims: dict[str, int] | None, optional - The sizes of the spaces (occ, virt, ...) used to estimate the cost of - contractions. If not provided, the sizes from "config.json" will be - used. - """ - assert isinstance(expr, ExprContainer) - # try to reduce the number of terms by exploiting permutational symmetry - expr_with_perm_sym = exploit_perm_sym( - expr=expr, target_indices=target_indices, target_spin=target_spin, - bra_ket_sym=bra_ket_sym, - antisymmetric_result_tensor=antisymmetric_result_tensor - ) - # remove the bra-ket separator in target indices and target spin - if "," in target_indices: - target_indices = target_indices.replace(",", "") - if target_spin is not None and "," in target_spin: - target_spin = target_spin.replace(",", "") - - code = [] - for perm_symmetry, sub_expr in expr_with_perm_sym.items(): - perm_str = format_perm_symmetry(perm_symmetry) - - # generate the contrations for each of the terms - contraction_code = [] - for term in sub_expr.terms: - prefactor = format_prefactor(term, backend) - - if not term.idx: # term is just a prefactor - contraction_code.append(prefactor) - continue - if len({idx.spin for idx in term.idx}) > 1: - logger.warning("Found more than one spin in the indices of " - f"term {term}. Indices with different spin " - "might not be distinguishable in the " - "generated contractions, because only the name " - "of the indices is considered.") - - # generate the contractions for the term - if optimize_contraction_scheme: - contractions = optimize_contractions( - term=term, target_indices=target_indices, - target_spin=target_spin, max_itmd_dim=max_itmd_dim, - space_dims=space_dims, - max_n_simultaneous_contracted=max_n_simultaneous_contracted - ) - else: - contractions = unoptimized_contraction( - term=term, target_indices=target_indices, - target_spin=target_spin - ) - # build a comment describing the scaling of the contraction - # scheme - scaling_comment = format_scaling_comment( - term=term, contractions=contractions, backend=backend - ) - # identify inner and outer contractions. - # They are sorted in the way they need to be executed - # -> contraction can only be used in a later contraction - inner: list[Contraction] = [] - outer: list[Contraction] = [] - for i, contr in enumerate(contractions): - if any(contr.contraction_name in other_contr.names - for other_contr in contractions[i+1:]): - inner.append(contr) - else: - outer.append(contr) - # currently, there has to be only 1 outer contraction (the last - # contraction), because even if an inner contraction gives a - # number, the contraction is still kept in the pool of objects, - # i.e., contractions might contain objects without indices! - contraction_cache: dict[str, str] = {} - for contr in inner: - contr_str = format_contraction(contr, contraction_cache, - backend=backend) - contraction_cache[contr.contraction_name] = contr_str - assert len(outer) == 1 - contr_str = format_contraction(outer[0], contraction_cache, - backend=backend) - contraction_code.append( - f"{prefactor} * {contr_str} {scaling_comment}" - ) - contraction_code = '\n'.join(contraction_code) - code.append( - "The scaling comment is given as: [comp_scaling] / [mem_scaling]\n" - f"Apply {perm_str} to:\n{contraction_code}" - ) - return "\n\n".join(code) - - -def format_contraction(contraction: Contraction, - contraction_cache: dict[str, str], - backend: str) -> str: - """ - Builds a backend specific string for the given contraction. - """ - # split the objects in tensors and factors - # and transform the indices of the tensors to string - tensors: list[str] = [] - factors: list[str] = [] - idx_str: list[str] = [] - for name, indices in zip(contraction.names, contraction.indices): - # check the cache for the contraction string of the inner contraction - if Contraction.is_contraction(name): - name = contraction_cache.get(name, None) - if name is None: - raise KeyError("Could not find contraction string for inner " - f"contraction {contraction}.") - # we have a tensor that we need to treat depening on the backend - elif backend == "einsum": # translate eri and fock matrix - name = translate_adcc_names(name, indices) - elif backend == "libtensor": - # we can not form a partial trace in libtensor - contracted_obj_indices = [ - idx for idx in indices if idx in contraction.contracted - ] - if any(n > 1 for _, n in Counter(contracted_obj_indices).items()): - raise NotImplementedError( - "Libtensor can not handle a partial trace, i.e., a trace " - f"with a tensor as result. Found {indices} on tensor " - f"{name} of contraction\n{contraction}" - ) - # translate eri and t2eri - name = translate_libadc_names(name, indices) - name = f"{name}({'|'.join(idx.name for idx in indices)})" - - if indices: # we have a tensor - tensors.append(name) - # build a string for the indices - idx_str.append("".join(idx.name for idx in indices)) - else: # we have a factor without indices - factors.append(name) - # also transform the target indices to string - target = "".join(idx.name for idx in contraction.target) - - if backend == "einsum": - return format_einsum_contraction(tensors=tensors, factors=factors, - indices=idx_str, target=target) - elif backend == "libtensor": - return format_libtensor_contraction(tensors=tensors, factors=factors, - target=target, - contracted=contraction.contracted) - else: - raise NotImplementedError("Contraction not implemented for backend " - f"{backend}.") - - -def format_einsum_contraction(tensors: list[str], factors: list[str], - indices: list[str], target: str) -> str: - """ - Builds a contraction string for the given contraction using Python - numpy einsum syntax. - """ - - components = [*factors] - # special case: single tensor with the correct target indices - # -> no einsum needed - if len(tensors) == 1 and indices[0] == target: - components.append(tensors[0]) - elif tensors: # we need a einsum: reorder or contraction or outer - contr_str = f"\"{','.join(indices)}->{target}\"" - components.append( - f"einsum({contr_str}, {', '.join(tensors)})" - ) - return " * ".join(components) - - -def format_libtensor_contraction(tensors: list[str], factors: list[str], - target: str, contracted: Sequence[Index] - ) -> str: - """ - Builds a contraction string for the given contraction using libtensor - C++ syntax. - """ - - components = [*factors] - if len(tensors) == 1: # single tensor - assert not contracted # trace - components.append(tensors[0]) - elif len(tensors) > 1: # multipe tensors - # hyper-contraction only implemented for 3 tensors i think - if contracted and target: # contract - components.append( - f"contract({'|'.join(s.name for s in contracted)}, " - f"{', '.join(tensors)})" - ) - elif not contracted and target: # outer product - components.extend(tensors) - elif contracted and not target: # inner product - components.append(f"dot_product({', '.join(tensors)})") - else: - raise NotImplementedError("No target and contracted indices in " - f"contraction of {tensors} and " - f"{factors}.") - return " * ".join(components) - - -def translate_adcc_names(name: str, indices: Sequence[Index]) -> str: - """Translates tensor names specifically for adcc.""" - if name.startswith(tensor_names.eri): - space = "".join(s.space[0] for s in indices) - return f"hf.{space}" - elif name.startswith(tensor_names.fock): - space = "".join(s.space[0] for s in indices) - return f"hf.f{space}" - return name - - -def translate_libadc_names(name: str, indices: Sequence[Index]) -> str: - if name.startswith(tensor_names.eri): - space = "".join(s.space[0] for s in indices) - return f"i_{space}" - elif name.startswith("t2eri"): - _, n = name.split("_") - return f"pi{n}" - return name - - -def format_scaling_comment(term: TermContainer, - contractions: list[Contraction], - backend: str) -> str: - """ - Builds a backend specific comment describing the scaling of the - contraction scheme. - """ - max_comp_scaling = max(contr.scaling.computational - for contr in contractions) - max_mem_scaling = max(contr.scaling.memory for contr in contractions) - max_mem_scaling = max(max_mem_scaling, term_memory_requirements(term)) - comp = [f"N^{max_comp_scaling.total}: "] - mem = [f"N^{max_mem_scaling.total}: "] - for space in Indices.base: - if (n := getattr(max_comp_scaling, space)): - comp.append(f"{space[0].capitalize()}^{n}") - if (n := getattr(max_mem_scaling, space)): - mem.append(f"{space[0].capitalize()}^{n}") - if backend == "einsum": - comment_token = "#" - elif backend == "libtensor": - comment_token = "//" - else: - raise NotImplementedError("Comment token not implemented for backend " - f"{backend}.") - return f"{comment_token} {''.join(comp)} / {''.join(mem)}" - - -def format_prefactor(term: TermContainer, backend: str) -> str: - """Formats the prefactor for Python (einsum) or C++ (libtensor).""" - # extract number and symbolic prefactor - number_pref = term.prefactor - symbol_pref = " * ".join( - [obj.base.name for obj in term.objects if isinstance(obj.base, Symbol) - for _ in range(int(obj.exponent))] - ) - # extract the sign - if number_pref < S.Zero: - sign = "-" - number_pref *= S.NegativeOne - else: - sign = "+" - # format the number prefactor (depends on the backend) - if backend == "einsum": # python - number_pref = _format_python_prefactor(number_pref) - elif backend == "libtensor": # C++ - number_pref = _format_cpp_prefactor(number_pref) - else: - raise NotImplementedError(f"Prefactor for backend {backend} not " - "implemented.") - # combine the contributions - if symbol_pref: - return f"{sign} {number_pref} * {symbol_pref}" - else: - return f"{sign} {number_pref}" - - -def _format_python_prefactor(prefactor: Expr) -> str: - """Formats a prefactor using Python syntax.""" - - if prefactor == int(prefactor): # natural number - return str(prefactor) - elif prefactor in [Rational(1, 2), Rational(1, 4)]: # simple Rational - return str(float(prefactor)) - elif isinstance(prefactor, Rational): # more complex rational - return f"{prefactor.p} / {prefactor.q}" - elif isinstance(prefactor, Pow) and prefactor.args[1] == Rational(1, 2): - return f"sqrt({prefactor.args[0]})" - elif isinstance(prefactor, Mul): - return " * ".join( - _format_python_prefactor(pref) for pref in prefactor.args - ) - raise NotImplementedError( - f"Formatting of prefactor {prefactor}, {type(prefactor)} " - "not implemented." - ) - - -def _format_cpp_prefactor(prefactor: Expr) -> str: - """Formats a prefactor using C++ syntax.""" - - if prefactor == int(prefactor) or \ - prefactor in [Rational(1, 2), Rational(1, 4)]: - return str(float(prefactor)) - elif isinstance(prefactor, Rational): - return f"{float(prefactor.p)} / {float(prefactor.q)}" - elif isinstance(prefactor, Pow) and prefactor.args[1] == Rational(1, 2): - return f"constants::sq{prefactor.args[0]}" - elif isinstance(prefactor, Mul): - return " * ".join( - _format_cpp_prefactor(pref) for pref in prefactor.args - ) - raise NotImplementedError( - f"Formatting of prefactor {prefactor}, {type(prefactor)} " - "not implemented." - ) - - -def format_perm_symmetry( - perm_symmetry: tuple[tuple[tuple[Permutation, ...], int], ...]) -> str: - """Formats the permutational symmetry.""" - perm_sym = ["1"] - for permutations, factor in perm_symmetry: - assert factor in [1, -1] - contrib = ["+ "] if factor == 1 else ["- "] - for perm in permutations: - contrib.append(str(perm)) - perm_sym.append("".join(contrib)) - if len(perm_sym) == 1: - return "1" - return f"({' '.join(perm_sym)})" diff --git a/build/lib/adcgen/generate_code/optimize_contractions.py b/build/lib/adcgen/generate_code/optimize_contractions.py deleted file mode 100644 index f90f23e..0000000 --- a/build/lib/adcgen/generate_code/optimize_contractions.py +++ /dev/null @@ -1,329 +0,0 @@ -from collections.abc import Sequence -from typing import Generator -import itertools - -from sympy import Symbol, S - -from ..expression import TermContainer -from ..indices import get_symbols, Index -from ..sympy_objects import SymbolicTensor, KroneckerDelta -from .contraction import Contraction, Sizes - - -def optimize_contractions(term: TermContainer, - target_indices: str | None = None, - target_spin: str | None = None, - max_itmd_dim: int | None = None, - max_n_simultaneous_contracted: int | None = None, - space_dims: dict[str, int] | None = None - ) -> list[Contraction]: - """ - Find the optimal contraction scheme with the lowest computational - and memory scaling for a given term. Thereby, the computational scaling - is prioritized over the memory scaling. - - Parameters - ---------- - term: TermContainer - Find the optimal contraction scheme for this term. - target_indices: str | None, optional - The target indices of the term. If not given, the canonical target - indices of the term according to the Einstein sum convention - will be used. For instance, 2 occupied and 2 virtual - indices will always be in the order 'ijab'. Therefore, target indices - have to be provided if the result tensor has indices 'iajb'. - target_spin: str | None, optional - The spin of the target indices, e.g., "aabb" for - alpha, alpha, beta, beta. If not given, target indices without spin - will be used. - max_itmd_dim: int | None, optional - Upper bound for the dimensionality of intermediates created by - inner contractions if the contractions are nested, i.e., - the dimensionality of the result of contr2 and contr3 is restricted in - "contr1(contr2(contr3(...)))". - max_n_simultaneous_contracted: int | None, optional - The maximum number of objects allowed to be contracted - simultaneously in a single contraction. (default: None) - space_dims: dict[str, int] | None, optional - The sizes of the spaces (occ, virt, ...) used to estimate the cost of - contractions. If not provided, the sizes from "config.json" will be - used. - """ - # - import (or extract) the target indices - if target_indices is None: - target_symbols = term.target - else: - target_symbols = tuple(get_symbols(target_indices, target_spin)) - # - import the space sizes/dims - if isinstance(space_dims, dict): - space_sizes = Sizes.from_dict(space_dims) - else: - assert space_dims is None - space_sizes = None - # - extract the relevant part (tensors and deltas) of the term - relevant_obj_names: list[str] = [] - relevant_obj_indices: list[tuple[Index, ...]] = [] - for obj in term.objects: - base, exp = obj.base_and_exponent - if obj.inner.is_number: # skip number prefactor - continue - elif exp < S.Zero: - raise NotImplementedError(f"Found object {obj} with exponent " - f"{exp} < 0. Contractions not " - "implemented for divisions.") - elif isinstance(base, Symbol): # skip symbolic prefactor - continue - elif not isinstance(base, (SymbolicTensor, KroneckerDelta)): - raise NotImplementedError("Contractions can only be optimized for " - "tensors and KroneckerDeltas.") - name, indices = obj.longname(), obj.idx - assert name is not None - assert exp.is_Integer - relevant_obj_names.extend(name for _ in range(int(exp))) - relevant_obj_indices.extend(indices for _ in range(int(exp))) - assert len(relevant_obj_names) == len(relevant_obj_indices) - - if not relevant_obj_names: # no tensors or deltas in the term - return [] - elif len(relevant_obj_names) == 1: - # trivial: only a single tensor/delta with exponent 1 - # - resorting of indices - # - trace - return [Contraction( - indices=relevant_obj_indices, names=relevant_obj_names, - term_target_indices=target_symbols - )] - # lazily find the contraction schemes - contraction_schemes = _optimize_contractions( - relevant_obj_names=tuple(relevant_obj_names), - relevant_obj_indices=tuple(relevant_obj_indices), - target_indices=target_symbols, max_itmd_dim=max_itmd_dim, - max_n_simultaneous_contracted=max_n_simultaneous_contracted - ) - # go through all schemes and find the one with the lowest scaling by - # considering the: - # 1) Computational scaling (flop count) - # 2) Memory scaling (number of elements to store) - optimal_scaling = None - optimal_scheme = None - for scheme in contraction_schemes: - # determine the costs for the current contraction scheme - arithmetic = 0 - memory = 0 - for contr in scheme: - nflops, mem = contr.evaluate_costs(space_sizes) - arithmetic += nflops - memory += mem - scaling = (arithmetic, memory) - if optimal_scaling is None or scaling < optimal_scaling: - optimal_scheme = scheme - optimal_scaling = scaling - # the generator is empty, i.e., we could not find any contraction scheme - if optimal_scheme is None: - raise RuntimeError("Could not find a valid contraction scheme for " - f"term {term} while restricting the maximum " - f"dimensionality of intermediates to " - f"{max_itmd_dim} and allowing simultaneous " - f"contractions of {max_n_simultaneous_contracted} " - "objects.") - return optimal_scheme - - -def _optimize_contractions(relevant_obj_names: Sequence[str], - relevant_obj_indices: Sequence[tuple[Index, ...]], - target_indices: Sequence[Index], - max_itmd_dim: int | None = None, - max_n_simultaneous_contracted: int | None = None, - ) -> Generator[list[Contraction], None, None]: - """ - Find the optimal contractions for the given relevant objects of a term. - """ - assert len(relevant_obj_indices) == len(relevant_obj_names) - if len(relevant_obj_names) < 2: - raise ValueError("Need at least 2 objects to define a contraction.") - - # split the relevant objects into subgroups that share contracted indices - # and therefore should be contracted simultaneously - connected_groups = _group_objects( - obj_indices=relevant_obj_indices, target_indices=target_indices, - max_group_size=max_n_simultaneous_contracted - ) - - for group in connected_groups: - contr_indices = tuple(relevant_obj_indices[pos] for pos in group) - contr_names = tuple(relevant_obj_names[pos] for pos in group) - contraction = Contraction(indices=contr_indices, names=contr_names, - term_target_indices=target_indices) - # if the contraction is not an outer contraction we have to check - # the dimensionality of the intermediate tensor - if max_itmd_dim is not None and \ - contraction.target != target_indices and \ - len(contraction.target) > max_itmd_dim: - continue - # remove the contracted names and indices - remaining_pos = [pos for pos in range(len(relevant_obj_names)) - if pos not in group] - remaining_names = (contraction.contraction_name, - *(relevant_obj_names[pos] for pos in remaining_pos)) - remaining_indices = (contraction.target, *(relevant_obj_indices[pos] - for pos in remaining_pos)) - # there are no objects left to contract -> we are done - if len(remaining_names) == 1: - yield [contraction] - continue - # recurse to build further contractions - completed_schemes = _optimize_contractions( - relevant_obj_names=remaining_names, - relevant_obj_indices=remaining_indices, - target_indices=target_indices, max_itmd_dim=max_itmd_dim, - max_n_simultaneous_contracted=max_n_simultaneous_contracted - ) - for contraction_scheme in completed_schemes: - contraction_scheme.insert(0, contraction) - yield contraction_scheme - - -def _group_objects(obj_indices: Sequence[tuple[Index, ...]], - target_indices: Sequence[Index], - max_group_size: int | None = None - ) -> tuple[tuple[int, ...], ...]: - """ - Split the provided relevant objects into subgroups that share common - contracted indices. Thereby, a group can at most contain 'max_group_size' - objects. By default, all objects are allowed to be in one group. - """ - # NOTE: the algorithm currently maximizes the number of contracted - # indices, i.e., a contraction runs over all common contracted - # indices. While this is fine in most cases, it might be benefitial - # to not contract all possible indices simultaneously - # in certain cases, since this leads to an increased group size: - # 0 1 2 - # d_ijk d_ij d_jl - # 0 and 1 share i and j. A contraction running only over i can be - # performed for the pair (0, 1). However, if the contraction runs - # runs over i and j, we have to consider the triple (0, 1, 2). - - # sanity checks for input - assert len(obj_indices) > 1 # we need at least 2 objects - if max_group_size is None: - max_group_size = len(obj_indices) - assert max_group_size > 1 # group size has to be at least 2 - - # track on which objects the indices appear - idx_occurences: dict[Index, list[int]] = {} - for pos, indices in enumerate(obj_indices): - for idx in indices: - if idx not in idx_occurences: - idx_occurences[idx] = [] - idx_occurences[idx].append(pos) - - # store grouped objects and isolated objects (outer products) - # for the groups we are using a dict, since it by default returns - # keys in the order they were inserted. A set would need to be sorted - # before returning to produce consistent results. - groups: dict[tuple[int, ...], None] = {} - outer_products: list[tuple[int, int]] = [] - # iterate over all pairs of objects (index tuples) - for (pos1, indices1), (pos2, indices2) in \ - itertools.combinations(enumerate(obj_indices), 2): - # check if the objects have any common contracted indices - # -> outer products can be treated as pair - contracted, _ = Contraction._split_contracted_and_target( - (indices1, indices2), target_indices - ) - if not contracted: - outer_products.append((pos1, pos2)) - continue - # get all the objects any of the contracted indices appears - positions = {pos for idx in contracted for pos in idx_occurences[idx]} - # group too large - if len(positions) > max_group_size: - continue - # avoid duplication: 0, 1 and 2 are connected by a common index - # -> the pair 0,1 and 0,2 will both give the triple 0,1,2 - # which will then grow in the same way independent of the starting - # pair. - key = tuple(sorted(positions)) - if key in groups: - continue - # store the minimal group - groups[key] = None - - # self-consistently update the contracted indices and the positions - # This corresponds to maximizing the group size. - # However, it is unclear if growing the group leads to a better - # scaling contraction. Therefore, also store smaller groups - while True: - # update the contracted indices - new_contracted, _ = Contraction._split_contracted_and_target( - [obj_indices[pos] for pos in positions], target_indices - ) - # no new contracted indices pulled in -> we are done - if contracted == new_contracted: - break - # update the positions - new_positions = { - pos for idx in new_contracted for pos in idx_occurences[idx] - } - # no new positions or the extended group is too large - if new_positions == positions or \ - len(new_positions) > max_group_size: - break - # store the current group before trying to further - # increase the size - groups[tuple(sorted(new_positions))] = None - contracted = new_contracted - positions = new_positions - return (*groups.keys(), *outer_products) - - -def unoptimized_contraction(term: TermContainer, - target_indices: str | None = None, - target_spin: str | None = None - ) -> list[Contraction]: - """ - Determines the unoptimized contraction for the given term, i.e., - a simultaneous hyper-contraction of all tensors and deltas. - - Parameters - ---------- - term: TermContainer - Build an unoptimized contraction for the given term. - target_indices: str | None, optional - The target indices of the term. If not given, the canonical target - indices of the term according to the Einstein sum convention - will be used. - target_sin: str | None, optional - The spin of the target indices, e.g., "aabb" for - alpha, alpha, beta, beta. If not given, target indices without spin - will be used. - """ - # - import (or extract) the target indices - if target_indices is None: - target_symbols = term.target - else: - target_symbols = tuple(get_symbols(target_indices, target_spin)) - # extract the relevant part of the term - relevant_obj_names: list[str] = [] - relevant_obj_indices: list[tuple[Index, ...]] = [] - for obj in term.objects: - base, exp = obj.base_and_exponent - if obj.inner.is_number: # skip number prefactor - continue - elif exp < S.Zero: - raise NotImplementedError(f"Found object {obj} with exponent " - f"{exp} < 0. Contractions not " - "implemented for divisions.") - elif isinstance(base, Symbol): # skip symbolic prefactor - continue - elif not isinstance(base, (SymbolicTensor, KroneckerDelta)): - raise NotImplementedError("Contractions only implemented for " - "tensors and KroneckerDeltas.") - name, indices = obj.longname(), obj.idx - assert name is not None - assert exp.is_Integer - relevant_obj_names.extend(name for _ in range(int(exp))) - relevant_obj_indices.extend(indices for _ in range(int(exp))) - assert len(relevant_obj_indices) == len(relevant_obj_names) - return [Contraction(indices=relevant_obj_indices, names=relevant_obj_names, - term_target_indices=target_symbols)] diff --git a/build/lib/adcgen/groundstate.py b/build/lib/adcgen/groundstate.py deleted file mode 100644 index 4ee5128..0000000 --- a/build/lib/adcgen/groundstate.py +++ /dev/null @@ -1,476 +0,0 @@ -from math import factorial - -from sympy.physics.secondquant import NO, Dagger -from sympy import Expr, Mul, Rational, S, latex - -from .expression import ExprContainer -from .func import gen_term_orders, wicks -from .indices import Indices, n_ov_from_space -from .logger import logger -from .misc import cached_member, Inputerror, validate_input -from .operators import Operators -from .simplify import simplify -from .sympy_objects import Amplitude -from .tensor_names import tensor_names - - -class GroundState: - """ - Constructs ground state expressions using Rayleigh-Schrödinger - perturbation theory. - - Parameters - ---------- - hamiltonian : Operators - An Operators instance to request the partitioned Hamiltonian and - other Operators. - first_order_singles : bool, optional - If set, the first order wavefunction will contain single amplitudes. - (Defaults to False) - """ - def __init__(self, hamiltonian: Operators, - first_order_singles: bool = False): - assert isinstance(hamiltonian, Operators) - self.indices: Indices = Indices() - self.h: Operators = hamiltonian - self.singles: bool = first_order_singles - - @cached_member - def energy(self, order: int) -> Expr: - """ - Constructs an expression for the n'th-order ground state energy - contribution. - - Parameters - ---------- - order : int - The perturbation theoretical order. - """ - # NOTE: this function assumes a block diagonal H0 - # in the general case we have to include <0|H0|n> - - validate_input(order=order) - - h, rules = self.h.h0 if order == 0 else self.h.h1 - bra = self.psi(order=0, braket="bra") - ket = self.psi(order=0, braket='ket') if order == 0 else \ - self.psi(order=order-1, braket='ket') - e = Mul(bra, h, ket) - e = wicks(e, simplify_kronecker_deltas=True, rules=rules) - # option 1: return the not simplified energy -> will give a lot more - # terms later on - # option 2: simplify the energy expression and replace the indices with - # new, generic indices - # guess option 2 is nicer, because energy is more readable and shorter - e = simplify(ExprContainer(e)).substitute_with_generic() - logger.debug(f"E^({order}) = {e}") - return e.inner - - def psi(self, order: int, braket: str) -> Expr: - """ - Constructs the n'th-order ground state wavefunction without inserting - definitions of the respective ground state amplitudes. - - Parameters - ---------- - order : int - The perturbation theoretical order. - braket: str - Possible values: 'bra', 'ket'. Defines whether a bra or ket - wavefunction is constructed. - """ - # Can't cache ground state wave function! - # Leads to an error for terms of the form: - # |1><2|1>... the two |1> need to have different indices!! - # |1><1|2>... |1> and |2> can't share indices - # -> Therefore, each time a gs wavefunction is requested new indices - # need to be used. - # But one can still use overlapping indices within a wavefunction - # e.g. singles: ia, doubles ijab, triples ijkabc - - validate_input(order=order, braket=braket) - - # catch 0th order wavefunction - if order == 0: - logger.debug(f"gs({order}) {braket} = 1") - return S.One - - # generalized gs wavefunction generation - tensor_name = f"{tensor_names.gs_amplitude}{order}" - if braket == "bra": - tensor_name += "cc" - idx = self.indices.get_generic_indices(occ=2*order, virt=2*order) - virtual = idx[("virt", "")] - occupied = idx[("occ", "")] - psi = S.Zero - for excitation in range(1, order * 2 + 1): - # skip singles for the first order wavefunction if - # they are not requested - if order == 1 and not self.singles and excitation == 1: - continue - # build tensor - virt: list = virtual[:excitation] - occ: list = occupied[:excitation] - t = Amplitude(tensor_name, virt, occ) - # build operators - operators = self.h.excitation_operator( - creation=virt, annihilation=occ, reverse_annihilation=True - ) - if braket == "bra": - operators = Dagger(operators) - # prefactor for lifting index restrictions - prefactor = Rational(1, factorial(excitation) ** 2) - # For signs: Decided to subtract all Doubles to stay consistent - # with existing implementations of the amplitudes. - # The remaining amplitudes (S/T/Q...) are added! - # (denominator for Triples: i+j+k-a-b-c - # Doubles: a+b-i-j) - if excitation == 2: # doubles - psi -= prefactor * t * NO(operators) - else: - psi += prefactor * t * NO(operators) - assert isinstance(psi, Expr) - logger.debug(f"gs({order}) {braket} = {latex(psi)}") - return psi - - def amplitude(self, order: int, space: str, indices: str) -> Expr: - """ - Constructs the n'th-order expression for the ground state t-amplitudes. - - Parameters - ---------- - order : int - The perturbation theoretical order. - space : str - The excitation space, e.g., 'ph' or 'pphh' for singly or doubly - excited configurations. - indices : str - The indices the t-amplitude. - """ - variant = self.h._variant - if variant == 'mp': - return self.mp_amplitude(order, space, indices) - elif variant == 're': - return self.amplitude_residual(order, space, indices) - else: - raise NotImplementedError("Amplitudes not implemented for " - f"{self.h._variant}") - - @cached_member - def mp_amplitude(self, order: int, space: str, indices: str) -> Expr: - """ - Constructs the closed n'th-order expression for the MP t-amplitudes. - - Parameters - ---------- - order : int - The perturbation theoretical order. - space : str - The excitation space, e.g., 'ph' or 'pphh' for singly or doubly - excited configurations. - indices : str - The indices of the constructed t-amplitude. - """ - from .intermediates import orb_energy - - validate_input(order=order, space=space, indices=indices) - - n_ov = n_ov_from_space(space) - if n_ov["occ"] != n_ov["virt"]: - raise Inputerror("Invalid space string for a MP t-amplitude: " - f"{space}.") - # if the space is not present at the requested order return 0 - if n_ov["occ"] > 2 * order: - return S.Zero - - idx = self.indices.get_indices(indices) - lower = idx.get(("occ", ""), []) - upper = idx.get(("virt", ""), []) - if n_ov["occ"] != len(lower) or n_ov["virt"] != len(upper): - raise Inputerror(f"Provided indices {indices} are not adequate for" - f" space {space}.") - - # build the denominator - if len(lower) == 2: # doubles amplitude: a+b-i-j - occ_factor = S.NegativeOne - virt_factor = S.One - else: # any other amplitude: i-a // i+j+k-a-b-c // ... - occ_factor = S.One - virt_factor = S.NegativeOne - - denom = S.Zero - for s in lower: - denom += occ_factor * orb_energy(s) - for s in upper: - denom += virt_factor * orb_energy(s) - - # build the bra state: - h1, rules = self.h.h1 - contrib = Mul(bra, h1, self.psi(order-1, "ket")) - numerator += wicks( - contrib, simplify_kronecker_deltas=True, rules=rules - ) - # subtract: - sum_{m=1} E_0^(m) * t_k^(n-m) - terms = gen_term_orders(order=order, term_length=2, min_order=1) - for o1, o2 in terms: - # check if a t-amplitude of order o2 exists with special - # treatment of the first order singles amplitude - if (n_ov["occ"] > 2 * o2) or \ - (n_ov["occ"] == 1 and o2 == 1 and not self.singles): - continue - name = f"{tensor_names.gs_amplitude}{o2}" - contrib = Mul( - self.energy(o1), Amplitude(name, upper, lower) - ).expand() - if n_ov["occ"] == 2: # doubles... special sign - numerator += contrib - else: - numerator -= contrib - res = numerator / denom - assert isinstance(res, Expr) - return res - - @cached_member - def amplitude_residual(self, order: int, space: str, indices: str) -> Expr: - """ - Constructs the n'th-order residual for ground state amplitudes. - - Parameters - ---------- - order : int - The perturbation theoretical order. - space : str - The excitation space, e.g., 'ph' or 'pphh' for singly or doubly - excited configurations. - indices : str - The indices of the constructed t-amplitude. - """ - # + - sum_{m=0}^n E^{(m)} t_k^{(n-m)} = 0 - - # NOTE: Currently the implementation is general and should work for - # arbitrary 0th order Hamiltonians. - # Performance can be improved if the block structure - # of the RE hamiltonian is taken into account before evaluting - # wicks theorem! (Currently its done afterwards) - - # validate the input - validate_input(order=order, space=space, indices=indices) - n_ov = n_ov_from_space(space) - if n_ov["occ"] != n_ov["virt"]: - raise Inputerror(f"Invalid space for a RE t-amplitude: {space}.") - if n_ov["occ"] > 2 * order: # space not present at the order - return S.Zero - - # get the target indices and validate - idx = self.indices.get_indices(indices) - occupied = idx.get(("occ", ""), []) - virtual = idx.get(("virt", ""), []) - if n_ov["occ"] != len(occupied) or n_ov["virt"] != len(virtual): - raise Inputerror(f"Indices {indices} are not valid for space " - f"{space}.") - - # - build + ) - h0, rule = self.h.h0 - term = Mul(bra, h0, self.psi(order, 'ket')) - res += wicks(term, rules=rule, simplify_kronecker_deltas=True) - - h1, rule = self.h.h1 - term = Mul(bra, h1, self.psi(order - 1, 'ket')) - res += wicks(term, rules=rule, simplify_kronecker_deltas=True) - - # - subtract sum_{m=0}^n E^{(m)} t_k^{(n-m)} - for e_order, t_order in gen_term_orders(order, 2, 0): - # check if a t amplitude of order t_order exists - # special treatment of first order singles - if n_ov["occ"] > 2 * t_order or \ - (n_ov["occ"] == 1 and t_order == 1 and not self.singles): - continue - name = f"{tensor_names.gs_amplitude}{t_order}" - contrib = Mul( - self.energy(e_order), Amplitude(name, virtual, occupied) - ).expand() - if n_ov["occ"] == 2: # doubles -> different sign! - res += contrib - else: - res -= contrib - assert isinstance(res, Expr) - return res - - def overlap(self, order: int) -> Expr: - """ - Computes the n'th-order contribution to the ground state overlap - matrix. - - Parameters - ---------- - order : int - The perturbation theoretical order. - """ - validate_input(order=order) - - # catch zeroth order - if order == 0: - return S.One - - orders = gen_term_orders(order=order, term_length=2, min_order=0) - res = S.Zero - for term in orders: - # each wfn is requested only once -> no need to precompute and - # cache - i1 = Mul( - self.psi(order=term[0], braket='bra'), - self.psi(order=term[1], braket='ket') - ) - res += wicks(i1, simplify_kronecker_deltas=True) - # simplify the result by permuting contracted indices - res = simplify(ExprContainer(res)) - logger.debug(f"gs S^({order}) = {res}") - return res.inner - - @cached_member - def expectation_value(self, order: int, n_particles: int) -> Expr: - """ - Constructs the n'th-order contribution to the expectation value for - the given operator. - - Parameters - ---------- - order : int - The perturbation theoretical order. - n_particles : int - The number of creation and annihilation operators in the operator - string. - """ - validate_input(order=order) - # - import all mp wavefunctions. It should be possible here, because - # it is not possible to obtain a term |1>*x*|1>. - wfn = {} - for o in range(order + 1): - wfn[o] = {} - for bk in ["bra", "ket"]: - wfn[o][bk] = self.psi(order=o, braket=bk) - - # better to generate twice orders for length 2 than once for length 3 - orders = gen_term_orders(order=order, term_length=2, min_order=0) - res = S.Zero - # get the operator - op, rules = self.h.operator(n_create=n_particles, - n_annihilate=n_particles) - # iterate over all norm*d combinations of n'th order - for norm_term in orders: - norm = self.norm_factor(norm_term[0]) - if norm is S.Zero: - continue - # compute d for a given norm factor - orders_d = gen_term_orders( - order=norm_term[1], term_length=2, min_order=0 - ) - d = S.Zero - for term in orders_d: - i1 = wfn[term[0]]['bra'] * op * wfn[term[1]]['ket'] - d += wicks(i1, simplify_kronecker_deltas=True, rules=rules) - res += (norm * d).expand() - return simplify(ExprContainer(res)).inner - - def norm_factor(self, order: int) -> Expr: - """ - Constructs the n'th-order contribution of the factor - that corrects the the norm of the ground state wavefunction: - 1 - sum_i S^(i) + (sum_i S^(i))^2 - ... - which is the result of a taylor expansion of a^2 - S = a^2 sum_{i=0} S^{(i)} = 1 -> a^2 = [sum_{i=0} S^{(i)}]^{(-1)}. - - Parameters - ---------- - order : int - The perturbation theoretical order. - """ - # This can not be cached! - # in case there is something like a(2)*a(2)*x - # do the two a(2) need to be different? - # all a(n) only consist of t-amplitudes and all indices are - # contracted - # a(2) = 0.25*t_d^(2) - # a(2)*a(2) = 1/16 * t_d^(2) * t_d'^(2) - # -> no caching allowed - # Then it is also not possible to cache the overlap matrix - validate_input(order=order) - - taylor_expansion = self.expand_norm_factor(order=order, min_order=2) - norm_factor = S.Zero - for pref, termlist in taylor_expansion: - for term in termlist: - i1 = pref - for o in term: - i1 = Mul(i1, self.overlap(o)) - if i1 is S.Zero: - break - norm_factor += i1.expand() - assert isinstance(norm_factor, Expr) - logger.debug(f"norm_factor^({order}): {latex(norm_factor)}") - return norm_factor - - def expand_norm_factor(self, order, min_order=2 - ) -> list[tuple[Expr, list[tuple[int, ...]]]]: - """ - Constructs the taylor expansion of the n'th-order contribution to the - normalization factor a - f = (1 + x)^(-1), - where x is defined as x = sum_i S^{(i)}. - - Parameters - ---------- - order : int - The perturbation theoretical order. - min_order : int, optional - The lowest order non-vanishing contribution of the overlap matrix S - excluding the zeroth order contribution which is assumed to have - a value of 1. - - Returns - ------- - list - Iterable containing tuples of prefactors and perturbation - theoretical orders, for instance with a min_order of 2 the - 5'th order contribution reads - [(-1, [(5,)]), (1, [(2, 3), (3, 2)])]. - """ - from sympy import symbols, diff, nsimplify - - validate_input(order=order, min_order=min_order) - if min_order == 0: - raise Inputerror("A minimum order of 0 does not make sense here.") - - # below min_order all orders of the overlap matrix should be 0. - # only the zeroth order contribution should be 1 - # -> obtain 0 or 1 from the overlap function -> handled automatically - if order < min_order: - return [(S.One, [(order,)])] - - x = symbols('x') - f = (1 + x) ** -1.0 - ret: list[tuple[Expr, list[tuple[int, ...]]]] = [] - for exp in range(1, order//min_order + 1): - f = diff(f, x) - pref = nsimplify( - f.subs(x, 0) * S.One / factorial(exp), rational=True - ) - orders = gen_term_orders( - order=order, term_length=exp, min_order=min_order - ) - ret.append((pref, orders)) - return ret diff --git a/build/lib/adcgen/indices.py b/build/lib/adcgen/indices.py deleted file mode 100644 index dd1b34e..0000000 --- a/build/lib/adcgen/indices.py +++ /dev/null @@ -1,527 +0,0 @@ -from collections.abc import Sequence, Collection, Mapping -from typing import Any, TypeGuard, TYPE_CHECKING - -from sympy import Dummy, Tuple - -from .misc import Inputerror, Singleton - -if TYPE_CHECKING: - from .symmetry import Permutation - - -class Index(Dummy): - """ - Represents an Index. Wrapper implementation around the 'sympy.Dummy' - class, which means Index("x") != Index("x"). - Important assumptions: - - below_fermi: The index represents an occupied orbital. - - above_fermi: The index represents a virtual orbital. - - core: The index represents a core orbital. - - alpha: The index represents an alpha (spatial) orbital. - - beta: The index represents a beta (spatial) orbital. - """ - - @property - def spin(self) -> str: - """ - The spin of the index. An empty string is returned if no spin is - defined. - """ - if self.assumptions0.get("alpha"): - return "a" - elif self.assumptions0.get("beta"): - return "b" - else: - return "" - - @property - def space(self) -> str: - """ - The space to which the index belongs (core/occupied/virtual/general). - """ - if self.assumptions0.get("below_fermi"): - return "occ" - elif self.assumptions0.get("above_fermi"): - return "virt" - elif self.assumptions0.get("core"): - return "core" - elif self.assumptions0.get("ri"): - return "ri" - else: - return "general" - - @property - def space_and_spin(self) -> tuple[str, str]: - """Returns space and spin of the Index.""" - return self.space, self.spin - - def __str__(self): - spin = self.spin - return f"{self.name}_{spin}" if spin else self.name - - def __repr__(self) -> str: - return self.__str__() - - def _sympystr(self, printer): - _ = printer - return self.__str__() - - def _latex(self, printer) -> str: - _ = printer - ret = self.name - if (spin := self.spin): - spin = "alpha" if spin == "a" else "beta" - ret += "_{\\" + spin + "}" - return ret - - -class Indices(metaclass=Singleton): - """ - Manages the indices used thoughout the package and ensures that - only a single class instance exists for each index. - This is necessary because the 'equality' operator is essentially replaced - by the 'is' operator for the indices: Index("x") != Index("x"). - """ - # the valid spaces with their corresponding associated index names - base = { - "occ": "ijklmno", "virt": "abcdefgh", "general": "pqrstuvw", - "core": "IJKLMNO", "ri": "PQRSTUVWXYZ" - } - # the valid spins - spins = ("", "a", "b") - # the generation of generic indices starts with e.g., "i3" for occupied - # indices. Therefore, the indices "i", "i1" and "i2" are only available - # through a specific request to get_indices - _initial_counter = 3 - - def __init__(self) -> None: - # dict that holds all symbols that have been created previously. - # structure: {space: {spin: {name: symbol}}} - self._symbols: dict[str, dict[str, dict[str, Index]]] = {} - # dict that holds the automatically generated generic index names - # (strings) that have not been used yet. - # structure: {space: {spin: [names]}} - self._generic_indices: dict[str, dict[str, list[str]]] = {} - # dict holding the counter (the number appended to the index names) - # for the generation of generic indices. - self._counter: dict[str, dict[str, int]] = {} - # initialize the data structures - for space in self.base: - self._symbols[space] = {} - self._generic_indices[space] = {} - self._counter[space] = {} - for spin in self.spins: - self._symbols[space][spin] = {} - self._generic_indices[space][spin] = [] - self._counter[space][spin] = self._initial_counter - - def is_cached_index(self, index: Index) -> bool: - """ - Whether an index was generated with the 'Indices' class and is thus - cached in the class. - """ - cached_symbol = ( - self._symbols[index.space][index.spin].get(index.name, None) - ) - return cached_symbol is index - - def _gen_generic_idx(self, space: str, spin: str = ""): - """ - Generated the next 'generation' of generic indices, i.e. extends - _generic_indices by incrementing the _counter attached to the index - base. - """ - # generate the not used indices of the new generation - counter = str(self._counter[space][spin]) - used_names = self._symbols[space][spin] - new_idx = [idx + counter for idx in self.base[space] - if idx + counter not in used_names] - # extend the available generic indices and increment the counter - self._generic_indices[space][spin].extend(new_idx) - self._counter[space][spin] += 1 - - def get_indices(self, indices: Sequence[str], - spins: Sequence[str] | None = None - ) -> dict[tuple[str, str], list[Index]]: - """ - Obtain the Indices for the provided string of names. - - Parameters - ---------- - indices : Sequence[str] - The names of the indices as a single string that is split - automatically as "ij21kl3" -> i, j21, k, l3. - They can also provided as list/tuple of index names. - spins : Sequence[str] | None, optional - The spins of the indices as a single string, e.g., "aaba" - to obtain four indices with spin: alpha, alpha, beta, alpha. - Since no spin is represented by the empty string, it is only - possible to obtain indices with and without spin when the spins - are provided as list/tuple. - - Returns - ------- - dict - key: tuple containing the space and spin of the indices. - value: list containing the indices in the order the indices are - provided in the input. - """ - # split the string of indices - if isinstance(indices, str): - indices = split_idx_string(indices) - if spins is None: - spins = ["" for _ in range(len(indices))] - if len(indices) != len(spins): - raise Inputerror(f"Indices {indices} and spins {spins} do not " - "match.") - - ret: dict[tuple[str, str], list[Index]] = {} - for idx, spin in zip(indices, spins): - space = index_space(idx) - key = (space, spin) - if key not in ret: - ret[key] = [] - # check the cache for the index - symbol = self._symbols[space][spin].get(idx, None) - if symbol is not None: - ret[key].append(symbol) - continue - # not found in cache - # -> create new symbol and cache it - symbol = self._new_symbol(idx, space, spin) - self._symbols[space][spin][idx] = symbol - ret[key].append(symbol) - # -> also remove it from the available generic indices - try: - self._generic_indices[space][spin].remove(idx) - except ValueError: - continue - return ret - - def get_generic_indices(self, **kwargs - ) -> dict[tuple[str, str], list[Index]]: - """ - Request indices with unique names that have not been used in the - current run of the program. Easy way to ensure that contracted indices - do not appear anywhere else in a term. - Indices can be requested using the syntax "{space}_{spin}", - where spin is optional. For instance, occupied indices without - spin can be obtained with "occ=5", or "occ_a=5" for occupied indices - with alpha spin. - - Returns - ------- - dict - key: tuple of space and spin of the indices. - value: list containing the indices. - """ - - ret = {} - for key, n in kwargs.items(): - if n == 0: - continue - key = key.split("_") - if len(key) == 2: - space, spin = key - elif len(key) == 1: - space, spin = key[0], "" - else: - raise Inputerror(f"{'_'.join(key)} is not valid for " - "requesting generic indices.") - # generate generic index names until we have enough - while n > len(self._generic_indices[space][spin]): - self._gen_generic_idx(space, spin) - # get the indices - idx = self._generic_indices[space][spin][:n] - spins = tuple(spin for _ in range(n)) - ret.update(self.get_indices(idx, spins)) - return ret - - def _new_symbol(self, name: str, space: str, spin: str) -> Index: - """Creates a new symbol with the defined name, space and spin.""" - assumptions = {} - if space == "occ": - assumptions["below_fermi"] = True - elif space == "virt": - assumptions["above_fermi"] = True - elif space == "core": - assumptions["core"] = True - elif space == "ri": - assumptions["ri"] = True - elif space != "general": - raise ValueError(f"Invalid space {space}") - if spin: - if spin == "a": - assumptions["alpha"] = True - elif spin == "b": - assumptions["beta"] = True - else: - raise ValueError(f"Invalid spin {spin}.") - return Index(name, **assumptions) - - -def index_space(idx: str) -> str: - """Returns the space an index belongs to (occ/virt/general).""" - for sp, idx_string in Indices.base.items(): - if idx[0] in idx_string: - return sp - raise Inputerror(f"Could not assign the index {idx} to a space.") - - -def sort_idx_canonical(idx: Index | Any): - """Use as sort key to to bring indices in canonical order.""" - if isinstance(idx, Index): - # - also add the hash here for wicks, where multiple i are around - # - we have to map the spaces onto numbers, since in adcman and adcc - # the ordering o < c < v is used for the definition of canonical blocks - space_keys = {"g": 0, "o": 1, "c": 2, "v": 3, "r": 4} - return (space_keys[idx.space[0]], - idx.spin, - int(idx.name[1:]) if idx.name[1:] else 0, - idx.name[0], - hash(idx)) - else: # necessary for subs to work correctly with simultaneous=True - return (-1, "", 0, str(idx), hash(idx)) - - -def split_idx_string(str_tosplit: str) -> list[str]: - """ - Splits an index string of the form 'ij12a3b' in a list ['i','j12','a3','b'] - """ - splitted = [] - temp = [] - for i, idx in enumerate(str_tosplit): - temp.append(idx) - try: - if str_tosplit[i+1].isdigit(): - continue - else: - splitted.append("".join(temp)) - temp.clear() - except IndexError: - splitted.append("".join(temp)) - return splitted - - -def n_ov_from_space(space_str: str): - """ - Number of required occupied and virtual indices required for the given - exictation space, e.g., "pph" -> 2 virtual and 1 occupied index. - """ - return {"occ": space_str.count("h"), "virt": space_str.count("p")} - - -def generic_indices_from_space(space_str: str) -> list[Index]: - """ - Constructs generic indices from a given space string (e.g. 'pphh'). - Thereby, occupied indices are listed before virtual indices! - """ - generic_idx = Indices().get_generic_indices(**n_ov_from_space(space_str)) - assert len(generic_idx) <= 2 # only occ and virt - occ = generic_idx.get(("occ", ""), []) - occ.extend(generic_idx.get(("virt", ""), [])) - occ.extend(generic_idx.get(("core", ""), [])) - occ.extend(generic_idx.get(("ri", ""), [])) - return occ - - -def repeated_indices(idx_a: str, idx_b: str) -> bool: - """Checks whether both index strings share an index.""" - split_a = split_idx_string(idx_a) - split_b = split_idx_string(idx_b) - return any(i in split_b for i in split_a) - - -def get_lowest_avail_indices(n: int, used: Collection[str], space: str - ) -> list[str]: - """ - Return the names of the n lowest available indices belonging to the desired - space. - - Parameters - ---------- - n : int - The number of available indices. - used : Collection[str] - The names of the indices that are already in use. - space : str - The space (occ/virt/general) to which the indices belong. - """ - # generate idx pool to pick the lowest indices from - base = Indices.base[space] - idx = list(base) - required = len(used) + n # the number of indices present in the term - suffix = 1 - while len(idx) < required: - idx.extend(s + str(suffix) for s in base) - suffix += 1 - # remove the already used indices (that are not available anymore) - # and return the first n elements of the resulting list - return [s for s in idx if s not in used][:n] - - -def get_symbols(indices: Sequence[str] | Index | Sequence[Index], - spins: Sequence[str] | None = None) -> list[Index]: - """ - Wrapper around the Indices class to initialize 'Index' instances with the - provided names and spin. - - Parameters - ---------- - indices : Index | Sequence[str] | Sequence[Index] - The names of the indices to generate. If they are already instances - of the 'Index' class we do nothing. - spins : Sequence[str] | None, optional - The spin of the indices, e.g., "aab" to obtain 3 indices with - alpha, alpha and beta spin. - """ - - if not indices: # empty string/list - return [] - elif isinstance(indices, Index): # a single symbol is not iterable - return [indices] - elif _is_index_sequence(indices): - return indices if isinstance(indices, list) else list(indices) - # we actually have to do something - # -> prepare indices and spin - if isinstance(indices, str): - indices = split_idx_string(indices) - if spins is None: - spins = ["" for _ in range(len(indices))] - # at this point we should have a list/tuple of strings - # construct the indices - assert _is_str_sequence(indices) - symbols = Indices().get_indices(indices, spins) - # and return them in the order they were provided in the input - for val in symbols.values(): - val.reverse() - ret = [symbols[(index_space(idx), spin)].pop() - for idx, spin in zip(indices, spins)] - assert not any(symbols.values()) # ensure we consumed all indices - return ret - - -def order_substitutions(subsdict: dict[Index, Index] - ) -> list[tuple[Index, Index]]: - """ - Order substitutions such that only a minial amount of intermediate - indices is required when the substitutions are executed one after another - and the usage of the 'simultaneous=True' option of sympys 'subs' method. - Adapted from the 'substitute_dummies' function defined in - 'sympy.physics.secondquant'. - """ - - subs = [] - final_subs = [] - for o, n in subsdict.items(): - if o is n: # indices are identical -> nothing to do - continue - # the new index is substituted by another index - if (other_n := subsdict.get(n, None)) is not None: - if other_n in subsdict: - # i -> j / j -> i - # temporary variable is needed - p = Index('p') - subs.append((o, p)) - final_subs.append((p, n)) - else: - # i -> j / j -> k - # in this case it is sufficient to do the i -> j substitution - # after the j -> k substitution, but before temporary variables - # are resubstituted again. - final_subs.insert(0, (o, n)) - else: - subs.append((o, n)) - subs.extend(final_subs) - return subs - - -def minimize_tensor_indices( - tensor_indices: Sequence[Index], - target_idx_names: Mapping[tuple[str, str], Collection[str]] - ) -> "tuple[tuple[Index, ...], tuple[Permutation, ...]]": - """ - Minimizes the indices on a tensor using the lowest available indices that - are no target indices. - - Parameters - ---------- - tensor_indices : Sequence[Index] - The indices of the tensor. - target_idx : dict[tuple[str, str], Collection[str]] - The names of target indices sorted by their space and spin with - key = (space, spin). - - Returns - ------- - tuple - The minimized indices and the list of index permutations that was - applied to reach this minimized state. - """ - from .symmetry import Permutation, PermutationProduct - - for target in target_idx_names.values(): - if not all(isinstance(s, str) for s in target): - raise TypeError("Target indices need to be provided as string.") - - tensor_idx: list[Index] = list(tensor_indices) - n_unique_indices: int = len(set(tensor_idx)) - minimal_indices: dict[tuple[str, str], list[Index]] = {} - permutations: list[Permutation] = [] - minimized = set() - for s in tensor_idx: - if s in minimized: - continue - idx_key = s.space_and_spin - # target indices of the corresponding space - space_target = target_idx_names.get(idx_key, []) - # index is a target idx -> keep as is - if s.name in space_target: - minimized.add(s) - continue - # generate minimal indices for the corresponding space and spin - if idx_key not in minimal_indices: - space, spin = idx_key - min_names = get_lowest_avail_indices(n_unique_indices, - space_target, space) - if spin: - spins = spin * n_unique_indices - else: - spins = None - min_symbols = get_symbols(min_names, spins) - min_symbols.reverse() - minimal_indices[idx_key] = min_symbols - - # get the lowest available index for the corresponding space - min_s = minimal_indices[idx_key].pop() - minimized.add(min_s) - if s is min_s: # s is already the lowest available index - continue - # found a lower index - # -> permute tensor indices and append permutation to permutations - # list - perm = {s: min_s, min_s: s} - for i, other_s in enumerate(tensor_idx): - tensor_idx[i] = perm.get(other_s, other_s) - permutations.append(Permutation(s, min_s)) - return tuple(tensor_idx), PermutationProduct(*permutations) - - -################################################ -# Some TypeGuards to make the type checker happy -############################################### -def _is_index_sequence(sequence: Sequence) -> TypeGuard[Sequence[Index]]: - return all(isinstance(s, Index) for s in sequence) - - -def _is_index_tuple(sequence: tuple | Tuple) -> TypeGuard[tuple[Index, ...]]: - return all(isinstance(s, Index) for s in sequence) - - -def _is_str_sequence(sequence: Sequence) -> TypeGuard[Sequence[str]]: - return ( - isinstance(sequence, str) or all(isinstance(s, str) for s in sequence) - ) - - -_ = _is_index_tuple diff --git a/build/lib/adcgen/intermediate_states.py b/build/lib/adcgen/intermediate_states.py deleted file mode 100644 index 2580ac6..0000000 --- a/build/lib/adcgen/intermediate_states.py +++ /dev/null @@ -1,598 +0,0 @@ -from collections.abc import Sequence -from math import factorial - -from sympy.physics.secondquant import NO, Dagger -from sympy import Expr, Mul, Rational, S, latex, nsimplify, diff, symbols - -from .expression import ExprContainer -from .func import gen_term_orders, wicks, evaluate_deltas -from .groundstate import GroundState -from .indices import ( - n_ov_from_space, repeated_indices, Indices, generic_indices_from_space -) -from .logger import logger -from .misc import cached_member, Inputerror, transform_to_tuple, validate_input -from .simplify import simplify -from .sympy_objects import Amplitude -from .tensor_names import tensor_names - - -class IntermediateStates: - """ - Class for constructing epxressions for Precursor or Intermediate states. - - Parameters - ---------- - mp : GroundState - Representation of the underlying ground state. Used to generate - ground state related expressions. - variant : str, optional - The ADC variant for which Intermediates are constructed, e.g., - 'pp', 'ip' or 'ea' for PP-, IP- or EA-ADC expressions, respectively - (default: 'pp'). - """ - def __init__(self, mp: GroundState, variant: str = "pp"): - assert isinstance(mp, GroundState) - self.gs: GroundState = mp - self.indices: Indices = Indices() - - variants: dict[str, tuple[str, ...]] = { - "pp": ("ph", "hp"), - "ea": ("p",), - "ip": ("h",), - "dip": ("hh",), - "dea": ("pp",), - } - if variant not in variants.keys(): - raise Inputerror(f"The ADC variant {variant} is not valid. " - "Supported variants are " - f"{list(variants.keys())}.") - self.variant: str = variant - self.min_space: tuple[str, ...] = variants[variant] - - @cached_member - def precursor(self, order: int, space: str, braket: str, indices: str - ) -> Expr: - """ - Constructs expressions for precursor states. - - Parameters - ---------- - order : int - The perturbation theoretical order. - space : str - The excitation space of the desired precursor state, e.g., 'ph' or - 'pphh' for singly or doubly excited precursor states. - braket : str - Defines whether a bra or ket precursor state is constructed. - indices : str - The indices of the precursor state. - """ - - # check input parameters - indices_tpl = transform_to_tuple(indices) - validate_input(order=order, space=space, braket=braket, - indices=indices_tpl) - if len(indices_tpl) != 1: - raise Inputerror(f"{indices} are not valid for constructing a " - "precursor state.") - indices = indices_tpl[0] - del indices_tpl - # check that the space is valid for the given ADC variant - if not self.validate_space(space): - raise Inputerror(f"{space} is not a valid space for " - f"{self.variant} ADC.") - - # get the target symbols of the precursor state - idx = self.indices.get_indices(indices) - # check compatibility of indices and space - if idx.get(("general", "")): - raise Inputerror(f"The provided indices {indices} include a " - "general index.") - n_ov = n_ov_from_space(space) - occupied = idx.get(("occ", ""), []) - virtual = idx.get(("virt", ""), []) - if len(occupied) != n_ov["occ"] or len(virtual) != n_ov["virt"]: - raise Inputerror(f"The indices {indices} and the space {space} " - "are not compatible.") - del n_ov # prevent accidentally usage below - - # in contrast to the gs, here the operators are ordered as - # abij instead of abji in order to stay consistent with the - # ADC literature. - operators = self.gs.h.excitation_operator( - creation=virtual, annihilation=occupied, reverse_annihilation=False - ) - if braket == "bra": - operators = Dagger(operators) - - res = S.Zero - - # leading term: - # no need to differentiate bra/ket here, because - # operators * mp = mp * operators (there is always an equal number of - # p/h operators in mp that needs to be moved to the other side. - # Will always give +.) - max_gs = self.gs.psi(order=order, braket=braket) - res += Mul(NO(operators), max_gs).expand() - - # get all terms of a*b of the desired order (ground state norm) - orders = gen_term_orders(order=order, term_length=2, min_order=0) - - # orthogonalise with respect to the ground state for pp ADC. - # checked up to 4th order! - if self.variant == "pp": - # import all ground state wave functions that may not appear twice - # in |a>, i.e. all of: order > int(order/2) - gs_psi: dict[str, dict[int, Expr]] = {'bra': {}, 'ket': {}} - gs_psi[braket][order] = max_gs - for o in range(order//2 + 1, order+1): - if not gs_psi['bra'].get(o): - gs_psi['bra'][o] = self.gs.psi(order=o, braket='bra') - if not gs_psi['ket'].get(o): - gs_psi['ket'][o] = self.gs.psi(order=o, braket='ket') - - def get_gs_wfn(o, bk): return gs_psi[bk][o] if o > order//2 else \ - self.gs.psi(order=o, braket=bk) - - # 1) iterate through all combinations of norm_factor*projector - for norm_term in orders: - norm = self.gs.norm_factor(norm_term[0]) - if norm is S.Zero: - continue - # 2) construct the projector for a given norm_factor - # the overall order is split between the norm_factor and the - # projector - orders_projection = gen_term_orders( - order=norm_term[1], term_length=3, min_order=0 - ) - projection = S.Zero - for term in orders_projection: - # |Y> <-- -|X> - if braket == "ket": - i1 = Mul( - get_gs_wfn(term[1], 'bra'), NO(operators), - get_gs_wfn(term[2], 'ket') - ) - state = get_gs_wfn(term[0], 'ket') - # - n_ov = n_ov_from_space(lower_space) - prefactor = Rational( - 1, factorial(n_ov["occ"]) * factorial(n_ov["virt"]) - ) - del n_ov - - # orthogonalise with respsect to the lower excited ISR state - # 1) iterate through all combinations of norm_factor*projector - for norm_term in orders: - norm = self.gs.norm_factor(norm_term[0]) - if norm is S.Zero: - continue - # 2) construct the projector for a given norm factor - # the overall order is split between he norm_factor and the - # projector - orders_projection = gen_term_orders( - norm_term[1], term_length=3, min_order=0 - ) - projection = S.Zero - for term in orders_projection: - # |Y#> <-- -|X> - if braket == "ket": - i1 = Mul( - self.intermediate_state(order=term[1], - space=lower_space, - braket="bra", - indices=idx_isr), - NO(operators), - self.gs.psi(order=term[2], braket="ket") - ) - state = self.intermediate_state( - order=term[0], space=lower_space, braket="ket", - indices=idx_isr - ) - # Expr: - """ - Constructs expressions for elements of the overlap matrix of the - precursor states. - - Parameters - ---------- - order : int - The perturbation theoretical order. - block : Sequence[str] - The block of the overlap matrix, e.g., 'ph,ph' for an element of - the 1p-1h/1p-1h block. - indices : Sequence[str] - The indices of the overlap matrix element, e.g., 'ia,jb' for - S_{ia,jb}. - """ - - # no need to do more validation here -> will be done in precursor - block = transform_to_tuple(block) - indices = transform_to_tuple(indices) - validate_input(order=order, block=block, indices=indices) - if len(indices) != 2: - raise Inputerror("2 index strings required for an overlap matrix " - f"block. Got: {indices}.") - - if repeated_indices(indices[0], indices[1]): - raise Inputerror("Repeated index found in indices of precursor " - f"overlap matrix: {indices}.") - - orders = gen_term_orders(order=order, term_length=2, min_order=0) - - res = S.Zero - # 1) iterate through all combinations of norm_factor*S - for norm_term in orders: - norm = self.gs.norm_factor(norm_term[0]) - if norm is S.Zero: - continue - # 2) construct S for a given norm factor - # the overall order is split between the norm_factor and S - orders_overlap = gen_term_orders( - order=norm_term[1], term_length=2, min_order=0 - ) - overlap = S.Zero - for term in orders_overlap: - i1 = Mul( - self.precursor(order=term[0], space=block[0], - braket="bra", indices=indices[0]), - self.precursor(order=term[1], space=block[1], - braket="ket", indices=indices[1]) - ) - i1 = wicks(i1, simplify_kronecker_deltas=True) - overlap += i1 - res += (norm * overlap).expand() - # It should be valid to simplifiy the result by permuting contracted - # indices before returning -> should lower the overall size of the - # final expression - res = simplify(ExprContainer(res)) - logger.debug(f"overlap {block} S_{indices}^({order}) = {res}") - return res.inner - - @cached_member - def s_root(self, order: int, block: Sequence[str], - indices: Sequence[str]) -> Expr: - """ - Constructs expression for elements of the inverse square root of the - precursor overlap matrix (S^{-0.5})_{I,J} by expanding - S^{-0.5} in a Taylor series. - - Parameters - ---------- - order : int - The perturbation theoretical order. - block : Sequence[str] - The desired matrix block, e.g., 'ph,pphh' for an element of the - 1p-1h/2p-2h block. - indices : Sequence[str] - The indices of the matrix element, e.g., 'ia,jkcd' for - (S^{-0.5})_{ia,jkcd}. - """ - - block = transform_to_tuple(block) - indices = transform_to_tuple(indices) - validate_input(order=order, block=block, indices=indices) - if len(indices) != 2: - raise Inputerror("2 index strings required for a block of the " - "inverse suqare root of the overlap matrix. " - f"Got: {indices}.") - if repeated_indices(indices[0], indices[1]): - raise Inputerror(f"Repeated index found in indices {indices}.") - if block[0] != block[1]: - raise NotImplementedError("Off diagonal blocks of the overlap " - "matrix should be 0 by definition. " - "Simply don't know how to handle the " - "index generation needed in this case.") - - taylor_expansion = self.expand_S_taylor(order, min_order=2) - # create an index list: first and last element are the two provided - # idx strings - idx: list[str] = list(indices) - # create more indices: exponent-1 or len(taylor_expansion)-1 indices - # - x*x 1 additional index 'pair' is required: I,I' = I,I'' * I'',I' - # - x^3: I,I' = I,I'' * I'',I''' * I''',I' - for _ in range(len(taylor_expansion) - 1): - new_idx: str = "".join( - s.name for s in generic_indices_from_space(block[0]) - ) - idx.insert(-1, new_idx) - # iterate over exponents and terms, starting with the lowest exponent - res = S.Zero - for pref, termlist in taylor_expansion: - # all terms in the list should have the same length, i.e. - # all originate from x*x or x^3 etc. - for term in termlist: - relevant_idx = idx[:len(term)] + [idx[-1]] - i1 = S.One * pref - for o in term: - i1 *= self.overlap_precursor( - order=o, block=block, - indices=tuple(relevant_idx[:2]) - ) - del relevant_idx[0] - if i1 is S.Zero: - break - assert ( - len(relevant_idx) == 1 and - relevant_idx[0] == indices[1] - ) - # in squared or higher terms S*S*... delta evaluation might - # be necessary - res += evaluate_deltas(i1.expand()) - assert isinstance(res, Expr) - logger.debug( - f"{block} S_root_{indices}^({order}) = {latex(res)}" - ) - return res - - @cached_member - def intermediate_state(self, order: int, space: str, braket: str, - indices: str) -> Expr: - """ - Constructs expressions for intermediate states. - - Parameters - ---------- - order : int - The perturbation theoretical order. - space : str - The excitation space of the desired intermediate state, e.g., - 'ph' and 'pphh' for singly and doubly excited intermediate states. - braket : str - Defines whether a bra or ket intermediate state is constructed. - indices : str - The indices of the intermediate state. - """ - indices_tpl: tuple[str, ...] = transform_to_tuple(indices) - validate_input(order=order, space=space, braket=braket, - indices=indices_tpl) - if len(indices_tpl) != 1: - raise Inputerror(f"{indices} are not valid for " - "constructing an intermediate state.") - indices = indices_tpl[0] - del indices_tpl - - # generate additional indices for the precursor state - idx_pre: str = "".join( - s.name for s in generic_indices_from_space(space) - ) - - n_ov = n_ov_from_space(space) - prefactor = Rational( - 1, factorial(n_ov["occ"]) * factorial(n_ov["virt"]) - ) - del n_ov - - # sandwich the IS and precursor indices together - s_indices = { - 'bra': ",".join([indices, idx_pre]), - 'ket': ",".join([idx_pre, indices]) - } - - orders = gen_term_orders(order=order, term_length=2, min_order=0) - res = S.Zero - for term in orders: - i1 = Mul( - prefactor, - self.s_root(order=term[0], block=(space, space), - indices=s_indices[braket]), - self.precursor(order=term[1], space=space, braket=braket, - indices=idx_pre) - ) - res += evaluate_deltas(i1.expand()) - assert isinstance(res, Expr) - logger.debug(f"{space} ISR_({indices}^({order}) {braket} = " - f"{latex(res)}") - return res - - @cached_member - def overlap_isr(self, order: int, block: Sequence[str], - indices: Sequence[str]) -> Expr: - """ - Computes a block of the overlap matrix in the basis of intermediate - states. - - Parameters - ---------- - order : int - The perturbation theoretical order. - block : Sequence[str] - The desired matrix block. - indices : Sequence[str] - The indices of the matrix element. - """ - - block = transform_to_tuple(block) - indices = transform_to_tuple(indices) - validate_input(order=order, block=block, indices=indices) - if len(indices) != 2: - raise Inputerror("Constructing a ISR overlap matrix block requires" - f" 2 index strings. Provided: {indices}.") - - orders = gen_term_orders(order=order, term_length=2, min_order=0) - res = S.Zero - # 1) iterate through all combinations of norm_factor*S - for norm_term in orders: - norm = self.gs.norm_factor(norm_term[0]) - if norm is S.Zero: - continue - # 2) construct S for a given norm factor - # the overall order is split between he norm_factor and S - orders_overlap = gen_term_orders( - order=norm_term[1], term_length=2, min_order=0 - ) - overlap = S.Zero - for term in orders_overlap: - i1 = Mul( - self.intermediate_state(order=term[0], space=block[0], - braket="bra", - indices=indices[0]), - self.intermediate_state(order=term[1], space=block[1], - braket="ket", - indices=indices[1]) - ) - i1 = wicks(i1, simplify_kronecker_deltas=True) - overlap += i1 - res += (norm * overlap).expand() - assert isinstance(res, Expr) - logger.debug(f"ISR overlap {block} S_{indices}^({order}) = " - f"{latex(res)}") - return res - - @cached_member - def amplitude_vector(self, indices: str, lr: str = "right") -> Expr: - """ - Constructs an amplitude vector with the provided indices. - - Parameters - ---------- - indices : str - The indices of the amplitude vector. - lr : str, optional - Whether a left (X) or right (Y) amplitude vector is constructed - (default: 'right'). - """ - - validate_input(indices=indices, lr=lr) - - idx = self.indices.get_indices(indices) - occ = idx.get(("occ", ""), []) - virt = idx.get(("virt", ""), []) - - name = getattr(tensor_names, f"{lr}_adc_amplitude") - return Amplitude(name, virt, occ) - - def expand_S_taylor(self, order: int, min_order: int = 2 - ) -> list[tuple[Expr, list[tuple[int, ...]]]]: - """ - Performs a Taylor expansion of the inverse square root of the - overlap matrix - S^{0.5} = (1 + x)^{-0.5} with x = sum_{n=1} S^(n) - returning all n'th-order contributions. - - Parameters - ---------- - order : int - The perturbation theoretical order. - min_order : int, optional - The lowest order at which the overlap matrix S has a non-vanishing - caontribution excluding the zeroth order contribution - (default: 2). - - Returns - ------- - list - Iterable containing tuples of prefactors and perturbation - theoretical orders, for instance, with a min_order of 2 the - 5'th order contributions read - [(-1/2, [(5,)]), (3/8, [(2, 3), (3, 2)])]. - """ - validate_input(order=order, min_order=min_order) - if min_order == 0: - raise Inputerror("A minimum order of 0 does not make sense here.") - - # below min_order all orders - except the zeroth order contribution - - # should be zero. Should be handled automatically if the corresponding - # orders are forwarded to the overlap method. - if order < min_order: - return [(S.One, [(order,)])] - - x = symbols('x') - f = (1 + x) ** -0.5 - ret: list[tuple[Expr, list[tuple[int, ...]]]] = [] - for exp in range(1, order//min_order + 1): - f = diff(f, x) - pref = nsimplify( - f.subs(x, 0) * S.One / factorial(exp), rational=True - ) - orders = gen_term_orders( - order=order, term_length=exp, min_order=min_order - ) - assert isinstance(pref, Expr) - ret.append((pref, orders)) - return ret - - def _generate_lower_spaces(self, space_str: str) -> list[str]: - """ - Generates all strings of lower excited configurations for a given - excitation space. - - Parameters - ---------- - space_str : str - The space for which to construct lower excitation spaces, e.g., - ['ph'] for 'pphh'. - """ - lower_spaces: list[str] = [] - for _ in range(min(space_str.count('p'), space_str.count('h'))): - space_str = space_str.replace('p', '', 1).replace('h', '', 1) - if not space_str: - break - lower_spaces.append(space_str) - return lower_spaces - - def validate_space(self, space_str: str) -> bool: - """ - Checks whether the given space is valid for the current ADC variant. - - Parameters - ---------- - space_str : str - The excitation space to validate. - """ - - if space_str in self.min_space: - return True - - lower_spaces = self._generate_lower_spaces(space_str) - return any(sp in self.min_space for sp in lower_spaces) diff --git a/build/lib/adcgen/intermediates.py b/build/lib/adcgen/intermediates.py deleted file mode 100644 index 2022f18..0000000 --- a/build/lib/adcgen/intermediates.py +++ /dev/null @@ -1,1663 +0,0 @@ -from collections.abc import Sequence, Callable -from collections import Counter -from dataclasses import dataclass -from functools import cached_property -import itertools - -from sympy import Add, Expr, Min, Mul, Pow, Rational, S - -from .expression import ExprContainer, ObjectContainer -from .core_valence_separation import allowed_cvs_blocks -from .indices import ( - Indices, Index, - get_symbols, order_substitutions, sort_idx_canonical, -) -from .logger import logger -from .misc import Inputerror, Singleton, cached_member -from .eri_orbenergy import EriOrbenergy -from .sympy_objects import NonSymmetricTensor, AntiSymmetricTensor, Amplitude -from .symmetry import LazyTermMap, Permutation -from .spatial_orbitals import allowed_spin_blocks -from .tensor_names import tensor_names - - -@dataclass(frozen=True, slots=True) -class ItmdExpr: - expr: Expr - target: tuple[Index, ...] - contracted: tuple[Index, ...] | None - - -class Intermediates(metaclass=Singleton): - """ - Manages all defined intermediates. - New intermediates can be defined by inheriting from - 'RegisteredIntermediate'. - """ - - def __init__(self): - self._registered: dict[str, dict[str, RegisteredIntermediate]] = ( - RegisteredIntermediate()._registry - ) - self._available: dict[str, RegisteredIntermediate] = { - name: obj for objects in self._registered.values() - for name, obj in objects.items() - } - - @property - def available(self) -> dict[str, "RegisteredIntermediate"]: - """ - Returns all available intermediates using their name as dict key. - """ - return self._available - - @property - def types(self) -> list[str]: - """Returns all available types of intermediates.""" - return list(self._registered.keys()) - - def __getattr__(self, attr: str) -> dict[str, "RegisteredIntermediate"]: - if attr in self._registered: # is the attr an intermediate type? - return self._registered[attr] - elif attr in self._available: # is the attr an intermediate name? - return {attr: self._available[attr]} - else: - raise AttributeError(f"{self} has no attribute {attr}. " - f"The intermediate types: {self.types} " - "and the intermediate names: " - f"{list(self.available.keys())} are " - "available.") - - -class RegisteredIntermediate: - """ - Base class for defined intermediates. - New intermediates can be added by inheriting from this class and require: - - an itmd type '_itmd_type' - - an perturbation theoretical order '_order' - - names of default indices '_default_idx' - - a method that fully expands the itmd into orbital energies and ERI - '_build_expanded_itmd' - - a method that returns the itmd tensor '_build_tensor' - """ - _registry: dict[str, dict[str, "RegisteredIntermediate"]] = {} - _itmd_type: str | None = None - _order: int | None = None - _default_idx: tuple[str, ...] | None = None - - def __init_subclass__(cls) -> None: - itmd_type = cls._itmd_type - assert itmd_type is not None - if itmd_type not in cls._registry: - cls._registry[itmd_type] = {} - if (name := cls.__name__) not in cls._registry[itmd_type]: - cls._registry[itmd_type][name] = cls() - - @property - def name(self) -> str: - """Name of the intermediate (the class name).""" - return type(self).__name__ - - @property - def order(self) -> int: - """Perturbation theoretical order of the intermediate.""" - if not hasattr(self, "_order") or self._order is None: - raise AttributeError(f"No order defined for {self.name}.") - return self._order - - @property - def default_idx(self) -> tuple[str, ...]: - """Names of the default indices of the intermediate.""" - if not hasattr(self, "_default_idx") or self._default_idx is None: - raise AttributeError(f"No default indices defined for {self.name}") - return self._default_idx - - @property - def itmd_type(self) -> str: - """The type of the intermediate.""" - if not hasattr(self, "_itmd_type") or self._itmd_type is None: - raise AttributeError(f"No itmd_type defined for {self.name}.") - return self._itmd_type - - def validate_indices(self, - indices: Sequence[str] | Sequence[Index] | None = None - ) -> list[Index]: - """ - Ensures that the indices are valid for the intermediate and - transforms them to 'Index' instances. - """ - if indices is None: # no need to validate the default indices - return get_symbols(self.default_idx) - - indices = get_symbols(indices) - default = get_symbols(self.default_idx) - if len(indices) != len(default): - raise Inputerror("Wrong number of indices for the itmd " - f"{self.name}.") - elif any(s.space != d.space for s, d in zip(indices, default)): - raise Inputerror(f"The indices {indices} are not valid for the " - f"itmd {self.name}") - return indices - - def expand_itmd(self, - indices: Sequence[str] | Sequence[Index] | None = None, - wrap_result: bool = True, fully_expand: bool = True - ) -> Expr | ExprContainer: - """ - Expands the intermediate into orbital energies and ERI. - - Parameters - ---------- - indices : Sequence[str] | Sequence[Index], optional - The names of the indices of the intermediate. By default the - default indices (defined on the itmd class) will be used. - wrap_result : bool, optional - Whether to wrap the result in an - :py:class:ExprContainer. (default: True) - fully_expand : bool, optional - True (default): The returned intermediate is recursively fully - expanded into orbital energies and ERI (if possible). - False: Returns a more readable version which is not recusively - expanded, e.g., n't-order MP t-amplitudes are expressed by - means of (n-1)'th-order MP t-amplitudes. - """ - # check that the provided indices are fine for the itmd - indices = self.validate_indices(indices) - # currently all intermediates are only implemented for spin orbitals, - # because the intermediate definition depends on the spin, i.e., - # we would need either multiple definitions per intermediate or - # incorporate the spin in the intermediate names. - if any(idx.spin for idx in indices): - raise NotImplementedError( - "Intermediates not implemented for indices with spin " - "(spatial orbitals)." - ) - - # build a cached base version of the intermediate where we can just - # substitute indices in - expanded_itmd = self._build_expanded_itmd(fully_expand) - - # build the substitution dict - subs: dict[Index, Index] = {} - # map target indices onto each other - if (base_target := expanded_itmd.target) is not None: - subs.update({o: n for o, n in zip(base_target, indices)}) - # map contracted indices onto each other (replace them by generic idx) - if (base_contracted := expanded_itmd.contracted) is not None: - spaces = [s.space_and_spin for s in base_contracted] - kwargs = Counter( - f"{sp}_{spin}" if spin else sp for sp, spin in spaces - ) - contracted = Indices().get_generic_indices(**kwargs) - for new in contracted.values(): - new.reverse() - for old, sp in zip(base_contracted, spaces): - subs[old] = contracted[sp].pop() - if any(li for li in contracted.values()): - raise RuntimeError("Generated more contracted indices than " - f"necessary. {contracted} are left.") - - # do some extra work with the substitutions to avoid using the - # simultantous=True option for subs (very slow) - itmd = expanded_itmd.expr.subs(order_substitutions(subs)) - assert isinstance(itmd, Expr) - - if itmd is S.Zero and expanded_itmd.expr is not S.Zero: - raise ValueError(f"The substitutions {subs} are not valid for " - f"{expanded_itmd.expr}.") - - if wrap_result: - itmd = ExprContainer(itmd, target_idx=indices) - return itmd - - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - """ - Expand the intermediate using the default indices. - """ - _ = fully_expand - raise NotImplementedError("Build expanded intermediate not implemented" - f" on {self.name}") - - def tensor(self, indices: Sequence[str] | Sequence[Index] | None = None, - wrap_result: bool = True): - """ - Returns the itmd tensor. - - Parameters - ---------- - indices : str, optional - The names of the indices of the intermediate. By default the - default indices (defined on the itmd class) will be used. - wrap_result : bool, optional - Whether to wrap the result in an - :py:class:ExprContainer. (default: True) - """ - # check that the provided indices are sufficient for the itmd - indices = self.validate_indices(indices) - - # build the tensor object - tensor = self._build_tensor(indices=indices) - if wrap_result: - kwargs = {} - if isinstance(tensor, AntiSymmetricTensor): - if tensor.bra_ket_sym is S.One: # bra ket symmetry - kwargs["sym_tensors"] = (tensor.name,) - elif tensor.bra_ket_sym is S.NegativeOne: # bra ket anisym - kwargs["antisym_tensors"] = (tensor.name,) - return ExprContainer(tensor, **kwargs) - else: - return tensor - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - """ - Build the tensor representing the intermediate using the given indices. - """ - _ = indices - raise NotImplementedError("Build tensor not implemented on " - f"{self.name}") - - @cached_property - def tensor_symmetry(self) -> dict[tuple[Permutation, ...], int]: - """ - Determines the symmetry of the itmd tensor object using the - default indices, e.g., ijk/abc triples symmetry for t3_2. - """ - tensor = self.tensor(wrap_result=True) - assert isinstance(tensor, ExprContainer) and len(tensor) == 1 - return tensor.terms[0].symmetry() - - @cached_property - def allowed_spin_blocks(self) -> tuple[str, ...]: - """Determines all non-zero spin block of the intermediate.""" - - target_idx = self.default_idx - itmd = self.expand_itmd( - indices=target_idx, wrap_result=True, fully_expand=False - ) - assert isinstance(itmd, ExprContainer) - return allowed_spin_blocks(itmd.expand(), target_idx) - - @cached_member - def allowed_cvs_blocks( - self, - cvs_approximation: Callable[[ObjectContainer, str], bool] | None = None # noqa E501 - ) -> tuple[str, ...]: - """ - Splits the occupied orbitals in core and valence orbitals and - determines the valid blocks if the CVS approximation is applied. - - Parameters - ---------- - cvs_approximation : callable, optional - Callable that takes an expr_container.Obj instance and a space - string (e.g. 'covv'). It returns a bool indicating whether the - block of the object described by the space string is valid within - the CVS approximation, i.e., whether the block is neglected or not. - By default, the "is_allowed_cvs_block" function is used, - which applies the CVS approximation as described in - 10.1063/1.453424 and as implemented in adcman/adcc. - """ - target_idx = self.default_idx - itmd = self.expand_itmd( - indices=target_idx, wrap_result=True, fully_expand=False - ) - assert isinstance(itmd, ExprContainer) - return allowed_cvs_blocks( - itmd.expand(), target_idx, cvs_approximation=cvs_approximation - ) - - @cached_member - def itmd_term_map(self, factored_itmds: Sequence[str] = tuple() - ) -> LazyTermMap: - """ - Returns a map that lazily determines permutations of target indices - that map terms in the intermediate definition onto each other. - - Parameters - ---------- - factored_itmds : Sequence[str], optional - Names of other intermediates to factor in the fully expanded - definition of the current intermediate which (if factorization is - successful) changes the form of the intermediate. - By default the fully expanded version will be used. - """ - # - load the appropriate version of the intermediate - itmd = self._prepare_itmd(factored_itmds) - return LazyTermMap(itmd) - - @cached_member - def _prepare_itmd(self, factored_itmds: Sequence[str] = tuple() - ) -> ExprContainer: - """" - Generates a variant of the intermediate with default indices and - simplifies it as much as possible. - - Parameters - ---------- - factored_itmds : tuple[str], optional - Names of other intermediates to factor in the fully expanded - definition of the current intermediate. By default the fully - expanded version will be used. - """ - from .reduce_expr import factor_eri_parts, factor_denom - - # In a usual run we only need 1 variant of an intermediate: - # a b c d e - # a b c d - # a b c - # a b - # a - # For example, always the version of b where a is factorized - # -> for b this function will always be called with a as factored_itmds - # -> caching decorator is sufficient... no need to additionally - # cache the simplified base version - - # build the base version of the itmd and simplify it - # - factor eri and denominator - itmd = self.expand_itmd(wrap_result=True, fully_expand=True) - assert isinstance(itmd, ExprContainer) - itmd.expand().make_real() - reduced = itertools.chain.from_iterable( - factor_denom(sub_expr) for sub_expr in factor_eri_parts(itmd) - ) - itmd = ExprContainer(0, **itmd.assumptions) - for term in reduced: - itmd += term.factor() - - logger.info("".join([ - "\n", "-"*80, "\n", - f"Preparing Intermediate: Factoring {factored_itmds}" - ])) - - if factored_itmds: - available = Intermediates().available - # iterate through factored_itmds and factor them one after another - # in the simplified base itmd - for i, it in enumerate(factored_itmds): - logger.info("\n".join([ - "-"*80, f"Factoring {it} in {self.name}:" - ])) - itmd = available[it].factor_itmd( - itmd, factored_itmds=factored_itmds[:i], - max_order=self.order - ) - logger.info("".join([ - "\n", "-"*80, "\n", - f"Done with factoring {factored_itmds} in {self.name}", "\n", - "-"*80 - ])) - return itmd - - def factor_itmd(self, expr: ExprContainer, - factored_itmds: Sequence[str] = tuple(), - max_order: int | None = None, - allow_repeated_itmd_indices: bool = False - ) -> ExprContainer: - """ - Factors the intermediate in an expression assuming a real orbital - basis. - - Parameters - ---------- - expr : Expr - Expression in which to factor intermediates. - factored_itmds : Sequence[str], optional - Names of other intermediates that have already been factored in - the expression. It is necessary to factor those intermediates in - the current intermediate definition as well, because the - definition might change. By default the fully expanded version - of the intermediate will be used. - max_order : int, optional - The maximum perturbation theoretical order of intermediates - to consider. - allow_repeated_itmd_indices: bool, optional - If set, the factorization of intermediates of the form I_iij are - allowed, i.e., indices on the intermediate may appear more than - once. This corresponds to either a partial trace or a diagonal - element of the intermediate. Note that this does not consistently - work for "long" intermediates (at least 2 terms), because the - number of terms might be reduced which is not correctly handled - currently. - """ - - from .factor_intermediates import ( - _factor_long_intermediate, _factor_short_intermediate, - FactorizationTermData - ) - - assert isinstance(expr, ExprContainer) - if not expr.real: - raise NotImplementedError("Intermediates only implemented for " - "a real orbital basis.") - - # ensure that the previously factored intermediates - # are provided as tuple -> can use them as dict key - if isinstance(factored_itmds, str): - factored_itmds = (factored_itmds,) - elif not isinstance(factored_itmds, tuple): - factored_itmds = tuple(factored_itmds) - - # can not factor if the expr is just a number or the intermediate - # has already been factored or the order of the pt order of the - # intermediate is to high. - # also it does not make sense to factor t4_2 again, because of the - # used factorized form. - if expr.inner.is_number or self.name in factored_itmds or \ - self.name == 't4_2' or \ - (max_order is not None and max_order < self.order): - return expr - - expr = expr.expand() - terms = expr.terms - - # if want to factor a t_amplitude - # -> terms to consider need to have a denominator - # Also the pt order of the term needs to be high enough for the - # current intermediate - if self.itmd_type == 't_amplitude' and self.name != 't4_2': - term_is_relevant = [ - term.order >= self.order and - any(o.exponent < S.Zero and o.contains_only_orb_energies - for o in term.objects) - for term in terms - ] - else: - term_is_relevant = [term.order >= self.order for term in terms] - # no term has a denominator or a sufficient pt order - # -> can't factor the itmd - if not any(term_is_relevant): - return expr - - # determine the maximum pt order present in the expr (order is cached) - max_order = max(term.order for term in terms) - - # build a new expr that only contains the relevant terms - remainder = S.Zero - to_factor = ExprContainer(0, **expr.assumptions) - for term, is_relevant in zip(terms, term_is_relevant): - if is_relevant: - to_factor += term - else: - remainder += term.inner - - # - prepare the itmd for factorization and extract data to speed - # up the later comparison - itmd_expr = self._prepare_itmd(factored_itmds=factored_itmds) - itmd: tuple[EriOrbenergy, ...] = tuple( - EriOrbenergy(term).canonicalize_sign() for term in itmd_expr.terms - ) - itmd_data: tuple[FactorizationTermData, ...] = tuple( - FactorizationTermData(term) for term in itmd - ) - - # factor the intermediate in the expr - if len(itmd) == 1: # short intermediate that consists of a single term - factored = _factor_short_intermediate( - to_factor, itmd[0], itmd_data[0], self, - allow_repeated_itmd_indices=allow_repeated_itmd_indices - ) - factored += remainder - else: # long intermediate that consists of multiple terms - itmd_term_map = self.itmd_term_map(factored_itmds) - for _ in range(max_order // self.order): - to_factor = _factor_long_intermediate( - to_factor, itmd, itmd_data, itmd_term_map, self, - allow_repeated_itmd_indices=allow_repeated_itmd_indices - ) - factored = to_factor + remainder - return factored - - -# ----------------------------------------------------------------------------- -# INTERMEDIATE DEFINITIONS: - - -class t2_1(RegisteredIntermediate): - """First order MP doubles amplitude.""" - _itmd_type = 't_amplitude' # type has to be a class variable - _order = 1 - _default_idx = ("i", "j", "a", "b") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - _ = fully_expand - # build a basic version of the intermediate using minimal indices - # 'like on paper' - i, j, a, b = get_symbols(self.default_idx) - denom = Add( - orb_energy(a), orb_energy(b), -orb_energy(i), -orb_energy(j) - ) - ampl = eri((a, b, i, j)) * S.One / denom - assert isinstance(ampl, Expr) - return ItmdExpr(ampl, (i, j, a, b), None) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - # guess its not worth caching here. Maybe if used a lot. - # build the tensor - return Amplitude( - f"{tensor_names.gs_amplitude}1", indices[2:], indices[:2] - ) - - def factor_itmd(self, expr: ExprContainer, - factored_itmds: Sequence[str] | None = None, - max_order: int | None = None, - allow_repeated_itmd_indices: bool = False - ) -> ExprContainer: - """ - Factors the t2_1 intermediate in an expression assuming a real - orbital basis. - """ - _ = allow_repeated_itmd_indices - assert isinstance(expr, ExprContainer) - if not expr.real: - raise NotImplementedError("Intermediates only implemented for a " - "real orbital basis.") - # do we have something to factor? did we already factor the itmd? - if expr.inner.is_number or \ - (factored_itmds and self.name in factored_itmds): - return expr - - # no need to determine max order for a first order intermediate - if max_order is not None and max_order < self.order: - return expr - - # prepare the itmd and extract information - t2 = self.expand_itmd(wrap_result=True, fully_expand=True) - assert isinstance(t2, ExprContainer) - t2.make_real() - t2 = EriOrbenergy(t2).canonicalize_sign() - t2_eri: ObjectContainer = t2.eri.objects[0] - t2_eri_descr: str = t2_eri.description(include_exponent=False, - target_idx=None) - t2_denom = t2.denom.inner - t2_eri_idx: tuple[Index, ...] = t2_eri.idx - - expr = expr.expand() - - factored = ExprContainer(0, **expr.assumptions) - for term in expr.terms: - term = EriOrbenergy(term) # split the term - - if term.denom.inner.is_number: # term needs to have a denominator - factored += term.expr.inner - continue - term = term.canonicalize_sign() # fix the sign of the denominator - - brackets = term.denom_brackets - removed_brackets: set[int] = set() - factored_term = ExprContainer(1, **expr.assumptions) - eri_obj_to_remove: list[int] = [] - denom_brackets_to_remove: list[int] = [] - for eri_idx, eri in enumerate(term.eri.objects): - # - compare the eri objects (check if we have a oovv eri) - # coupling is not relevant for t2_1 (only a single object) - descr: str = eri.description( - include_exponent=False, target_idx=None - ) - if descr != t2_eri_descr: - continue - # repeated indices on t2_1 make no sense since - # t^aa_ij = / (2a - i - j) = 0 - # due to the permutational antisymmetry of V - assert all(c == 1 for c in Counter(eri.idx).values()) - # - have a correct eri -> zip indices together and substitute - # the itmd denominator - sub = order_substitutions(dict(zip(t2_eri_idx, eri.idx))) - sub_t2_denom = t2_denom.subs(sub) - # consider the exponent! - # ^2 may be factored twice - eri_exp = eri.exponent - # - check if we find a matching denominator - for bk_idx, bk in enumerate(brackets): - # was the braket already removed? - if bk_idx in removed_brackets: - continue - if isinstance(bk, ExprContainer): - bk_exponent = S.One - bk = bk.inner - else: - bk, bk_exponent = bk.base_and_exponent - # found matching bracket in denominator - if bk == sub_t2_denom: - # can possibly factor multiple times, depending - # on the exponent of the eri and the denominator - min_exp = Min(eri_exp, bk_exponent) - # are we removing the bracket completely? - if min_exp == bk_exponent: - removed_brackets.add(bk_idx) - # found matching eri and denominator - # replace eri and bracket by a t2_1 tensor - assert min_exp.is_Integer - denom_brackets_to_remove.extend( - bk_idx for _ in range(int(min_exp)) - ) - eri_obj_to_remove.extend( - eri_idx for _ in range(int(min_exp)) - ) - # can simply use the indices of the eri as target - # indices for the tensor - amplitude = self.tensor( - indices=eri.idx, wrap_result=False - ) - assert isinstance(amplitude, Expr) - factored_term *= Pow( - amplitude / t2.pref, - min_exp - ) - # - remove the matched eri and denominator objects - denom = term.cancel_denom_brackets(denom_brackets_to_remove) - eri = term.cancel_eri_objects(eri_obj_to_remove) - # - collect the remaining objects in the term and add to result - factored_term *= term.pref * eri * term.num / denom - logger.info(f"\nFactoring {self.name} in:\n{term}\nresult:\n" - f"{EriOrbenergy(factored_term)}") - factored += factored_term - return factored - - -class t1_2(RegisteredIntermediate): - """Second order MP singles amplitude.""" - _itmd_type = "t_amplitude" - _order = 2 - _default_idx = ("i", "a") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - # target_indices - i, a = get_symbols(self.default_idx) - # additional contracted indices - j, k, b, c = get_symbols('jkbc') - # t2_1 class instance - t2 = self._registry['t_amplitude']['t2_1'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - # build the amplitude - denom = Add(orb_energy(i), -orb_energy(a)) - term1 = (Rational(1, 2) * - t2(indices=(i, j, b, c), wrap_result=False) * - eri([j, a, b, c])) - term2 = (Rational(1, 2) * - t2(indices=(j, k, a, b), wrap_result=False) * - eri([j, k, i, b])) - return ItmdExpr(term1/denom + term2/denom, (i, a), (j, k, b, c)) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - return Amplitude( - f"{tensor_names.gs_amplitude}2", (indices[1],), (indices[0],) - ) - - -class t2_2(RegisteredIntermediate): - """Second order MP doubles amplitude.""" - _itmd_type = "t_amplitude" - _order = 2 - _default_idx = ("i", "j", "a", "b") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, j, a, b = get_symbols(self.default_idx) - # generate additional contracted indices (2o / 2v) - k, l, c, d = get_symbols('klcd') - # t2_1 class instance for generating t2_1 amplitudes - t2 = self._registry['t_amplitude']['t2_1'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - # build the t2_2 amplitude - denom = Add( - orb_energy(a), orb_energy(b), -orb_energy(i), -orb_energy(j) - ) - itmd = S.Zero - # - 0.5 t2eri_3 - itmd += (- Rational(1, 2) * eri((i, j, k, l)) * - t2(indices=(k, l, a, b), wrap_result=False)) - # - 0.5 t2eri_5 - itmd += (- Rational(1, 2) * eri((a, b, c, d)) * - t2(indices=(i, j, c, d), wrap_result=False)) - # + (1 - P_ij) (1 - P_ab) P_ij t2eri_4 - ampl = t2(indices=(i, k, a, c), wrap_result=True) - assert isinstance(ampl, ExprContainer) - base = ampl * eri((k, b, j, c)) - itmd += Add( - base.inner, -base.copy().permute((i, j)).inner, - -base.copy().permute((a, b)).inner, - base.copy().permute((i, j), (a, b)).inner - ) - return ItmdExpr(itmd * S.One / denom, (i, j, a, b), (k, l, c, d)) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - return Amplitude( - f"{tensor_names.gs_amplitude}2", indices[2:], indices[:2] - ) - - -class t3_2(RegisteredIntermediate): - """Second order MP triples amplitude.""" - _itmd_type = "t_amplitude" - _order = 2 - _default_idx = ("i", "j", "k", "a", "b", "c") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, j, k, a, b, c = get_symbols(self.default_idx) - # generate additional contracted indices (1o / 1v) - l, d = get_symbols('ld') - # t2_1 class instance for generating t2_1 amplitudes - t2 = self._registry['t_amplitude']['t2_1'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - # build the t3_2 amplitude - denom = Add( - orb_energy(i), orb_energy(j), orb_energy(k), - -orb_energy(a), -orb_energy(b), -orb_energy(c) - ) - itmd = S.Zero - # (1 - P_ik - P_jk) (1 - P_ab - P_ac) t_ij^ad - ampl = t2(indices=(i, j, a, d), wrap_result=True) - assert isinstance(ampl, ExprContainer) - base = ampl * eri((k, d, b, c)) - itmd += Add( - base.inner, - -base.copy().permute((i, k)).inner, - -base.copy().permute((j, k)).inner, - -base.copy().permute((a, b)).inner, - -base.copy().permute((a, c)).inner, - base.copy().permute((i, k), (a, b)).inner, - base.copy().permute((i, k), (a, c)).inner, - base.copy().permute((j, k), (a, b)).inner, - base.copy().permute((j, k), (a, c)).inner - ) - # (1 - P_ij - P_ik) (1 - P_ac - P_bc) t_il^ab - ampl = t2(indices=(i, l, a, b), wrap_result=True) - assert isinstance(ampl, ExprContainer) - base = ampl * eri((j, k, l, c)) - itmd += Add( - base.inner, - -base.copy().permute((i, j)).inner, - -base.copy().permute((i, k)).inner, - -base.copy().permute((a, c)).inner, - -base.copy().permute((b, c)).inner, - base.copy().permute((i, j), (a, c)).inner, - base.copy().permute((i, j), (b, c)).inner, - base.copy().permute((i, k), (a, c)).inner, - base.copy().permute((i, k), (b, c)).inner - ) - return ItmdExpr(itmd/denom, (i, j, k, a, b, c), (l, d)) - - def _build_tensor(self, indices) -> Expr: - return Amplitude( - f"{tensor_names.gs_amplitude}2", indices[3:], indices[:3] - ) - - -class t4_2(RegisteredIntermediate): - """ - Second order MP quadruple amplitudes in a factorized form that avoids - the construction of the quadruples denominator. - """ - _itmd_type = "t_amplitude" - _order = 2 - _default_idx = ("i", "j", "k", "l", "a", "b", "c", "d") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, j, k, l, a, b, c, d = get_symbols(self.default_idx) - # t2_1 class instance - t2 = self._registry['t_amplitude']['t2_1'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - # build the t4_2 amplitude - # (1 - P_ac - P_ad - P_bc - P_bd + P_ac P_bd) (1 - P_jk - P_jl) - # t_ij^ab t_kl^cd - ampl = t2(indices=(i, j, a, b)) - assert isinstance(ampl, ExprContainer) - base = ampl * t2(indices=(k, l, c, d), wrap_result=False) - v_permutations = {tuple(tuple()): 1, ((a, c),): -1, ((a, d),): -1, - ((b, c),): -1, ((b, d),): -1, ((a, c), (b, d)): +1} - o_permutations = {tuple(tuple()): 1, ((j, k),): -1, ((j, l),): -1} - t4 = S.Zero - for (o_perms, o_factor), (v_perms, v_factor) in \ - itertools.product(o_permutations.items(), - v_permutations.items()): - perms = o_perms + v_perms - t4 += Mul(o_factor, v_factor, base.copy().permute(*perms).inner) - return ItmdExpr(t4, (i, j, k, l, a, b, c, d), None) - - def _build_tensor(self, indices) -> Expr: - return Amplitude( - f"{tensor_names.gs_amplitude}2", indices[4:], indices[:4] - ) - - -class t1_3(RegisteredIntermediate): - """Third order MP single amplitude.""" - _itmd_type = "t_amplitude" - _order = 3 - _default_idx = ("i", "a") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, a = get_symbols('ia') - # generate additional contracted indices (2o / 2v) - j, k, b, c = get_symbols('jkbc') - # other intermediate class instances - t1 = self._registry['t_amplitude']['t1_2'] - t2 = self._registry['t_amplitude']['t2_2'] - t3 = self._registry['t_amplitude']['t3_2'] - if fully_expand: - t1 = t1.expand_itmd - t2 = t2.expand_itmd - t3 = t3.expand_itmd - else: - t1 = t1.tensor - t2 = t2.tensor - t3 = t3.tensor - # build the amplitude - denom = Add(orb_energy(i), -orb_energy(a)) - itmd = (Rational(1, 2) * eri([j, a, b, c]) * - t2(indices=(i, j, b, c), wrap_result=False)) - itmd += (Rational(1, 2) * eri([j, k, i, b]) * - t2(indices=(j, k, a, b), wrap_result=False)) - amplitude = t1(indices=(j, b), wrap_result=False) - assert isinstance(amplitude, Expr) - itmd -= amplitude * eri([i, b, j, a]) - itmd += (Rational(1, 4) * eri([j, k, b, c]) * - t3(indices=(i, j, k, a, b, c), wrap_result=False)) - # need to keep track of all contracted indices... also contracted - # indices within each of the second order t-amplitudes - # -> substitute_contracted indices to minimize the number of contracted - # indices - target = (i, a) - if fully_expand: - itmd = ExprContainer(itmd, target_idx=target) - itmd = itmd.substitute_contracted().inner - contracted = tuple(sorted( - [s for s in itmd.atoms(Index) if s not in target], - key=sort_idx_canonical - )) - else: - contracted = (j, k, b, c) - return ItmdExpr(itmd * S.One / denom, target, contracted) - - def _build_tensor(self, indices) -> Expr: - return Amplitude( - f"{tensor_names.gs_amplitude}3", (indices[1],), (indices[0],)) - - -class t2_3(RegisteredIntermediate): - """Third order MP double amplitude.""" - _itmd_type = "t_amplitude" - _order = 3 - _default_idx = ("i", "j", "a", "b") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, j, a, b = get_symbols(self.default_idx) - # generate additional contracted indices (2o / 2v) - k, l, c, d = get_symbols('klcd') - # other intermediate class instances - _t2_1 = self._registry['t_amplitude']['t2_1'] - t1 = self._registry['t_amplitude']['t1_2'] - t2 = self._registry['t_amplitude']['t2_2'] - t3 = self._registry['t_amplitude']['t3_2'] - t4 = self._registry['t_amplitude']['t4_2'] - if fully_expand: - _t2_1 = _t2_1.expand_itmd - t1 = t1.expand_itmd - t2 = t2.expand_itmd - t3 = t3.expand_itmd - t4 = t4.expand_itmd - else: - _t2_1 = _t2_1.tensor - t1 = t1.tensor - t2 = t2.tensor - t3 = t3.tensor - t4 = t4.tensor - # build the amplitude - denom = Add( - orb_energy(a), orb_energy(b), -orb_energy(i), -orb_energy(j) - ) - itmd = S.Zero - # +(1-P_ij) * t^c_j(2) - ampl = t1(indices=(j, c)) - assert isinstance(ampl, ExprContainer) - base = ampl * eri((i, c, a, b)) - itmd += Add(base.inner, -base.permute((i, j)).inner) - # +(1-P_ab) * t^b_k(2) - ampl = t1(indices=(k, b)) - assert isinstance(ampl, ExprContainer) - base = ampl * eri((i, j, k, a)) - itmd += Add(base.inner, -base.permute((a, b)).inner) - # - 0.5 * t^cd_ij(2) - itmd -= (Rational(1, 2) * eri((a, b, c, d)) * - t2(indices=(i, j, c, d), wrap_result=False)) - # - 0.5 * t^ab_kl(2) - itmd -= (Rational(1, 2) * eri((i, j, k, l)) * - t2(indices=(k, l, a, b), wrap_result=False)) - # + (1-P_ij)*(1-P_ab) * t^ac_ik(2) - ampl = t2(indices=(i, k, a, c)) - assert isinstance(ampl, ExprContainer) - base = ampl * eri((j, c, k, b)) - itmd += Add( - base.inner, - -base.copy().permute((i, j)).inner, - -base.copy().permute((a, b)).inner, - base.copy().permute((i, j), (a, b)).inner - ) - # + 0.5 * (1-P_ab) * t^bcd_ijk(2) - ampl = t3(indices=(i, j, k, b, c, d)) - assert isinstance(ampl, ExprContainer) - base = ampl * eri((k, a, c, d)) - itmd += (Rational(1, 2) * base.inner - - Rational(1, 2) * base.copy().permute((a, b)).inner) - # + 0.5 * (1-P_ij) t^abc_jkl(2) - ampl = t3(indices=(j, k, l, a, b, c)) - assert isinstance(ampl, ExprContainer) - base = ampl * eri((k, l, i, c)) - itmd += (Rational(1, 2) * base.inner - - Rational(1, 2) * base.copy().permute((i, j)).inner) - # + 0.25 t^abcd_ijkl(2) - itmd += (Rational(1, 4) * eri((k, l, c, d)) * - t4(indices=(i, j, k, l, a, b, c, d), wrap_result=False)) - # - 0.25 t^ab_ij(1) t^kl_cd(1) - itmd -= (Rational(1, 4) * eri((k, l, c, d)) * - _t2_1(indices=(i, j, a, b), wrap_result=False) * - _t2_1(indices=(k, l, c, d), wrap_result=False)) - # minimize the number of contracted indices - target = (i, j, a, b) - if fully_expand: - itmd = ExprContainer(itmd, target_idx=target) - itmd = itmd.substitute_contracted().inner - contracted = tuple(sorted( - [s for s in itmd.atoms(Index) if s not in target], - key=sort_idx_canonical - )) - else: - contracted = (k, l, c, d) - return ItmdExpr(itmd * S.One / denom, target, contracted) - - def _build_tensor(self, indices) -> Expr: - return Amplitude( - f"{tensor_names.gs_amplitude}3", indices[2:], indices[:2] - ) - - -class t2_1_re_residual(RegisteredIntermediate): - """ - Residual of the first order RE doubles amplitudes. - """ - _itmd_type = "re_residual" - _order = 2 # according to MP the maximum order of the residual is 2 - _default_idx = ("i", "j", "a", "b") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - # re intermediates can not be fully expanded, but add the bool - # anyway for a consistent interface - _ = fully_expand - i, j, a, b = get_symbols(self.default_idx) - # additional contracted indices - k, l, c, d = get_symbols('klcd') - # t2_1 class instance - t2 = self._registry['t_amplitude']['t2_1'] - - itmd = S.Zero - - # (1 - P_ij)(1 - P_ab) t_jk^bc - ampl = t2.tensor(indices=[j, k, b, c]) - assert isinstance(ampl, ExprContainer) - base = ampl * eri([i, c, k, a]) - itmd += Add( - base.inner, - -base.copy().permute((i, j)).inner, - -base.copy().permute((a, b)).inner, - base.copy().permute((i, j), (a, b)).inner - ) - # (1 - P_ab) f_ac t_ij^bc - ampl = t2.tensor(indices=[i, j, b, c]) - assert isinstance(ampl, ExprContainer) - base = ampl * fock([a, c]) - itmd += Add(base.inner, -base.copy().permute((a, b)).inner) - # (1 - P_ij) f_jk t_ik^ab - ampl = t2.tensor(indices=[i, k, a, b]) - assert isinstance(ampl, ExprContainer) - base = ampl * fock([j, k]) - itmd += Add(base.inner, -base.copy().permute((i, j)).inner) - # - 0.5 * t_ij^cd - itmd -= (Rational(1, 2) * eri((a, b, c, d)) * - t2.tensor(indices=(i, j, c, d), wrap_result=False)) - # -0.5 * t_kl^ab - itmd -= (Rational(1, 2) * eri((i, j, k, l)) * - t2.tensor(indices=(k, l, a, b), wrap_result=False)) - # + - itmd += eri((i, j, a, b)) - target = (i, j, a, b) - contracted = (k, l, c, d) - return ItmdExpr(itmd, target, contracted) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - # placeholder for 0, will be replaced in factor_intermediate - return AntiSymmetricTensor("Zero", indices[:2], indices[2:]) - - -class t1_2_re_residual(RegisteredIntermediate): - """ - Residual of the second order RE singles amplitudes. - """ - _itmd_type = "re_residual" - _order = 3 # according to MP the maximum order of the residual is 3 - _default_idx = ("i", "a") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True): - _ = fully_expand - i, a = get_symbols(self.default_idx) - # additional contracted indices - j, k, b, c = get_symbols('jkbc') - - # t amplitudes - t2 = self._registry['t_amplitude']['t2_1'] - ts2 = self._registry['t_amplitude']['t1_2'] - - # - {V^{ib}_{ja}} {t2^{b}_{j}} - itmd = ( - -eri([i, b, j, a]) * ts2.tensor(indices=[j, b], wrap_result=False) - ) - # + {f^{a}_{b}} {t2^{b}_{i}} - itmd += ( - fock([a, b]) * ts2.tensor(indices=[i, b], wrap_result=False) - ) - # - {f^{i}_{j}} {t2^{a}_{j}} - itmd -= ( - fock([i, j]) * ts2.tensor(indices=[j, a], wrap_result=False) - ) - # + \frac{{V^{ja}_{bc}} {t1^{bc}_{ij}}}{2} - itmd += (Rational(1, 2) * eri([j, a, b, c]) - * t2.tensor(indices=[i, j, b, c], wrap_result=False)) - # + \frac{{V^{jk}_{ib}} {t1^{ab}_{jk}}}{2} - itmd += (Rational(1, 2) * eri([j, k, i, b]) - * t2.tensor(indices=[j, k, a, b], wrap_result=False)) - # - {f^{j}_{b}} {t1^{ab}_{ij}} - itmd -= ( - fock([j, b]) * t2.tensor(indices=[i, j, a, b], wrap_result=False) - ) - target = (i, a) - contracted = (j, k, b, c) - return ItmdExpr(itmd, target, contracted) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - # placeholder for 0, will be replaced in factor_intermediate - return AntiSymmetricTensor("Zero", (indices[0],), (indices[1],)) - - -class t2_2_re_residual(RegisteredIntermediate): - """ - Residual of the second order RE doubles amplitudes. - """ - _itmd_type = "re_residual" - _order = 3 # according to MP the maximum order of the residual is 3 - _default_idx = ("i", "j", "a", "b") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - _ = fully_expand - i, j, a, b = get_symbols(self.default_idx) - # additional contracted indices - k, l, c, d = get_symbols('klcd') - # t2_1 class instance - t2 = self._registry['t_amplitude']['t2_2'] - - itmd = S.Zero - - # (1 - P_ij)(1 - P_ab) t_jk^bc - ampl = t2.tensor(indices=[j, k, b, c]) - assert isinstance(ampl, ExprContainer) - base = ampl * eri([i, c, k, a]) - itmd += Add( - base.inner, - -base.copy().permute((i, j)).inner, - -base.copy().permute((a, b)).inner, - base.copy().permute((i, j), (a, b)).inner - ) - # (1 - P_ab) f_ac t_ij^bc - ampl = t2.tensor(indices=[i, j, b, c]) - assert isinstance(ampl, ExprContainer) - base = ampl * fock([a, c]) - itmd += Add( - base.inner, -base.copy().permute((a, b)).inner - ) - # (1 - P_ij) f_jk t_ik^ab - ampl = t2.tensor(indices=[i, k, a, b]) - assert isinstance(ampl, ExprContainer) - base = ampl * fock([j, k]) - itmd += Add( - base.inner, -base.copy().permute((i, j)).inner - ) - # - 0.5 * t_ij^cd - itmd -= (Rational(1, 2) * eri((a, b, c, d)) * - t2.tensor(indices=(i, j, c, d), wrap_result=False)) - # -0.5 * t_kl^ab - itmd -= (Rational(1, 2) * eri((i, j, k, l)) * - t2.tensor(indices=(k, l, a, b), wrap_result=False)) - target = (i, j, a, b) - contracted = (k, l, c, d) - return ItmdExpr(itmd, target, contracted) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - # placeholder for 0, will be replaced in factor_intermediate - return AntiSymmetricTensor("Zero", indices[:2], indices[2:]) - - -class p0_2_oo(RegisteredIntermediate): - """ - Second order contribution to the occupied occupied block of the MP - one-particle density matrix. - """ - _itmd_type = "mp_density" - _order = 2 - _default_idx = ("i", "j") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, j = get_symbols(self.default_idx) - # additional contracted indices (1o / 2v) - k, a, b = get_symbols('kab') - # t2_1 class instance - t2 = self._registry['t_amplitude']['t2_1'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - # build the density - p0 = (- Rational(1, 2) * - t2(indices=(i, k, a, b), wrap_result=False) * - t2(indices=(j, k, a, b), wrap_result=False)) - return ItmdExpr(p0, (i, j), (k, a, b)) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - return AntiSymmetricTensor( - f"{tensor_names.gs_density}2", (indices[0],), (indices[1],), 1 - ) - - -class p0_2_vv(RegisteredIntermediate): - """ - Second order contribution to the virtual virtual block of the MP - one-particle density matrix. - """ - _itmd_type = "mp_density" - _order = 2 - _default_idx = ("a", "b") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - a, b = get_symbols(self.default_idx) - # additional contracted indices (2o / 1v) - i, j, c = get_symbols('ijc') - # t2_1 class instance - t2 = self._registry['t_amplitude']['t2_1'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - # build the density - p0 = (Rational(1, 2) * - t2(indices=(i, j, a, c), wrap_result=False) * - t2(indices=(i, j, b, c), wrap_result=False)) - return ItmdExpr(p0, (a, b), (i, j, c)) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - return AntiSymmetricTensor( - f"{tensor_names.gs_density}2", (indices[0],), (indices[1],), 1) - - -class p0_3_oo(RegisteredIntermediate): - """ - Third order contribution to the occupied occupied block of the MP - one-particle density matrix. - """ - _itmd_type = "mp_density" - _order = 3 - _default_idx = ("i", "j") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, j = get_symbols(self.default_idx) - # generate additional contracted indices (1o / 2v) - k, a, b = get_symbols('kab') - # t amplitude cls - t2 = self._registry['t_amplitude']['t2_1'] - td2 = self._registry['t_amplitude']['t2_2'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - td2 = td2.expand_itmd if fully_expand else td2.tensor - # build the density - p0 = (- Rational(1, 2) * - t2(indices=(i, k, a, b), wrap_result=False) * - td2(indices=(j, k, a, b), wrap_result=False)) - p0 += p0.subs({i: j, j: i}, simultaneous=True) - - target = (i, j) - if fully_expand: - p0 = ExprContainer( - p0, target_idx=target - ).substitute_contracted().inner - contracted = tuple(sorted( - [s for s in p0.atoms(Index) if s not in target], - key=sort_idx_canonical - )) - else: - contracted = (k, a, b) - return ItmdExpr(p0, target, contracted) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - return AntiSymmetricTensor( - f"{tensor_names.gs_density}3", (indices[0],), (indices[1],), 1) - - -class p0_3_ov(RegisteredIntermediate): - """ - Third order contribution to the occupied virtual block of the MP - one-particle density matrix. - """ - _itmd_type = "mp_density" - _order = 3 - _default_idx = ("i", "a") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, a = get_symbols(self.default_idx) - # generate additional contracted indices (2o / 2v) - j, k, b, c = get_symbols('jkbc') - # t_amplitude cls instances - t2 = self._registry['t_amplitude']['t2_1'] - ts2 = self._registry['t_amplitude']['t1_2'] - tt2 = self._registry['t_amplitude']['t3_2'] - ts3 = self._registry['t_amplitude']['t1_3'] - if fully_expand: - t2 = t2.expand_itmd - ts2 = ts2.expand_itmd - tt2 = tt2.expand_itmd - ts3 = ts3.expand_itmd - else: - t2 = t2.tensor - ts2 = ts2.tensor - tt2 = tt2.tensor - ts3 = ts3.tensor - p0 = S.Zero - # build the density - # - t^ab_ij(1) t^b_j(2) - p0 += ( - S.NegativeOne * t2(indices=(i, j, a, b), wrap_result=False) * - ts2(indices=(j, b), wrap_result=False) - ) - # - 0.25 * t^bc_jk(1) t^abc_ijk(2) - p0 -= (Rational(1, 4) * - t2(indices=(j, k, b, c), wrap_result=False) * - tt2(indices=(i, j, k, a, b, c), wrap_result=False)) - # + t^a_i(3) - p0 += ts3(indices=(i, a), wrap_result=False) - - target = (i, a) - if fully_expand: - p0 = ExprContainer( - p0, target_idx=target - ).substitute_contracted().inner - contracted = tuple(sorted( - [s for s in p0.atoms(Index) if s not in target], - key=sort_idx_canonical - )) - else: - contracted = (j, k, b, c) - assert isinstance(p0, Expr) - return ItmdExpr(p0, target, contracted) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - return AntiSymmetricTensor( - f"{tensor_names.gs_density}3", (indices[0],), (indices[1],), 1) - - -class p0_3_vv(RegisteredIntermediate): - """ - Third order contribution to the virtual virtual block of the MP - one-particle density matrix. - """ - _itmd_type = "mp_density" - _order = 3 - _default_idx = ("a", "b") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - a, b = get_symbols(self.default_idx) - # additional contracted indices (2o / 1v) - i, j, c = get_symbols('ijc') - # t_amplitude cls instances - t2 = self._registry['t_amplitude']['t2_1'] - td2 = self._registry['t_amplitude']['t2_2'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - td2 = td2.expand_itmd if fully_expand else td2.tensor - # build the density - p0 = (Rational(1, 2) * - t2(indices=(i, j, a, c), wrap_result=False) * - td2(indices=(i, j, b, c), wrap_result=False)) - p0 += p0.subs({a: b, b: a}, simultaneous=True) - - target = (a, b) - if fully_expand: - p0 = ExprContainer( - p0, target_idx=target - ).substitute_contracted().inner - contracted = tuple(sorted( - [s for s in p0.atoms(Index) if s not in target], - key=sort_idx_canonical - )) - else: - contracted = (i, j, c) - return ItmdExpr(p0, target, contracted) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - return AntiSymmetricTensor( - f"{tensor_names.gs_density}3", (indices[0],), (indices[1],), 1) - - -class t2eri_1(RegisteredIntermediate): - """t2eri1 in adcc / pi1 in libadc.""" - _itmd_type = "misc" - _order = 2 - _default_idx = ("i", "j", "k", "a") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, j, k, a = get_symbols(self.default_idx) - # generate additional contracted indices (2v) - b, c = get_symbols('bc') - # t2_1 class instance for generating t2_1 amplitudes - t2 = self._registry['t_amplitude']['t2_1'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - # build the intermediate - t2eri = ( - t2(indices=(i, j, b, c), wrap_result=False) * - eri((k, a, b, c)) - ) - assert isinstance(t2eri, Expr) - return ItmdExpr(t2eri, (i, j, k, a), (b, c)) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - return AntiSymmetricTensor('t2eri1', indices[:2], indices[2:]) - - -class t2eri_2(RegisteredIntermediate): - """t2eri2 in adcc / pi2 in libadc.""" - _itmd_type = "misc" - _order = 2 - _default_idx = ("i", "j", "k", "a") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, j, k, a = get_symbols(self.default_idx) - # generate additional contracted indices (1o / 1v) - b, l = get_symbols('bl') # noqa E741 - t2 = self._registry['t_amplitude']['t2_1'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - # build the intermediate - t2eri = ( - t2(indices=(i, l, a, b), wrap_result=False) * - eri((l, k, j, b)) - ) - assert isinstance(t2eri, Expr) - return ItmdExpr(t2eri, (i, j, k, a), (b, l)) - - def _build_tensor(self, indices: Sequence[Index]) -> NonSymmetricTensor: - return NonSymmetricTensor('t2eri2', indices) - - -class t2eri_3(RegisteredIntermediate): - """t2eri3 in adcc / pi3 in libadc.""" - _itmd_type = "misc" - _order = 2 - _default_idx = ("i", "j", "a", "b") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, j, a, b = get_symbols(self.default_idx) - # generate additional contracted indices (2o) - k, l = get_symbols('kl') # noqa E741 - # t2_1 class instance for generating t2_1 amplitudes - t2 = self._registry['t_amplitude']['t2_1'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - # build the intermediate - t2eri = ( - t2(indices=(k, l, a, b), wrap_result=False) * - eri((i, j, k, l)) - ) - assert isinstance(t2eri, Expr) - return ItmdExpr(t2eri, (i, j, a, b), (k, l)) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - return AntiSymmetricTensor('t2eri3', indices[:2], indices[2:]) - - -class t2eri_4(RegisteredIntermediate): - """t2eri4 in adcc / pi4 in libadc.""" - _itmd_type = "misc" - _order = 2 - _default_idx = ("i", "j", "a", "b") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, j, a, b = get_symbols(self.default_idx) - # generate additional contracted indices (1o / 1v) - k, c = get_symbols('kc') - # t2_1 class instance for generating t2_1 amplitudes - t2 = self._registry['t_amplitude']['t2_1'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - # build the intermediate - t2eri = ( - t2(indices=(j, k, a, c), wrap_result=False) * - eri((k, b, i, c)) - ) - assert isinstance(t2eri, Expr) - return ItmdExpr(t2eri, (i, j, a, b), (k, c)) - - def _build_tensor(self, indices: Sequence[Index]) -> NonSymmetricTensor: - return NonSymmetricTensor('t2eri4', indices) - - -class t2eri_5(RegisteredIntermediate): - """t2eri5 in adcc / pi5 in libadc.""" - _itmd_type = "misc" - _order = 2 - _default_idx = ("i", "j", "a", "b") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, j, a, b = get_symbols(self.default_idx) - # generate additional contracted indices (2v) - c, d = get_symbols('cd') - # t2_1 class instance for generating t2_1 amplitudes - t2 = self._registry['t_amplitude']['t2_1'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - # build the intermediate - t2eri = ( - t2(indices=(i, j, c, d), wrap_result=False) * - eri((a, b, c, d)) - ) - assert isinstance(t2eri, Expr) - return ItmdExpr(t2eri, (i, j, a, b), (c, d)) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - return AntiSymmetricTensor('t2eri5', indices[:2], indices[2:]) - - -class t2eri_6(RegisteredIntermediate): - """t2eri6 in adcc / pi6 in libadc.""" - _itmd_type = "misc" - _order = 2 - _default_idx = ("i", "a", "b", "c") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, a, b, c = get_symbols(self.default_idx) - # generate additional contracted indices (2o) - j, k = get_symbols('jk') - # t2_1 class instance for generating t2_1 amplitudes - t2 = self._registry['t_amplitude']['t2_1'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - # build the intermediate - t2eri = ( - t2(indices=(j, k, b, c), wrap_result=False) * - eri((j, k, i, a)) - ) - assert isinstance(t2eri, Expr) - return ItmdExpr(t2eri, (i, a, b, c), (j, k)) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - return AntiSymmetricTensor('t2eri6', indices[:2], indices[2:]) - - -class t2eri_7(RegisteredIntermediate): - """t2eri7 in adcc / pi7 in libadc.""" - _itmd_type = "misc" - _order = 2 - _default_idx = ("i", "a", "b", "c") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, a, b, c = get_symbols(self.default_idx) - # generate additional contracted indices (1o / 1v) - j, d = get_symbols('jd') - # t2_1 class instance for generating t2_1 amplitudes - t2 = self._registry['t_amplitude']['t2_1'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - # build the intermediate - t2eri = ( - t2(indices=(i, j, b, d), wrap_result=False) * - eri((j, c, a, d)) - ) - assert isinstance(t2eri, Expr) - return ItmdExpr(t2eri, (i, a, b, c), (j, d)) - - def _build_tensor(self, indices: Sequence[Index]) -> NonSymmetricTensor: - return NonSymmetricTensor('t2eri7', indices) - - -class t2eri_A(RegisteredIntermediate): - """pia intermediate in libadc""" - _itmd_type = "misc" - _order = 2 - _default_idx = ("i", "j", "k", "a") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, j, k, a = get_symbols(self.default_idx) - # t2eri cls instances for generating the itmd - pi1 = self._registry['misc']['t2eri_1'] - pi2 = self._registry['misc']['t2eri_2'] - pi1 = pi1.expand_itmd if fully_expand else pi1.tensor - pi2 = pi2.expand_itmd if fully_expand else pi2.tensor - # build the itmd - pia = ( - Rational(1, 2) * pi1(indices=(i, j, k, a), wrap_result=False) - + pi2(indices=(i, j, k, a), wrap_result=False) - + S.NegativeOne * pi2(indices=(j, i, k, a), wrap_result=False) - ) - target = (i, j, k, a) - if fully_expand: - pia = ExprContainer( - pia, target_idx=target - ).substitute_contracted().inner - contracted = tuple(sorted( - [s for s in pia.atoms(Index) if s not in target], - key=sort_idx_canonical - )) - else: - contracted = tuple() - return ItmdExpr(pia, target, contracted) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - return AntiSymmetricTensor('t2eriA', indices[:2], indices[2:]) - - -class t2eri_B(RegisteredIntermediate): - """pib intermediate in libadc""" - _itmd_type = "misc" - _order = 2 - _default_idx = ("i", "a", "b", "c") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, a, b, c = get_symbols(self.default_idx) - # t2eri cls instances for generating the itmd - pi6 = self._registry['misc']['t2eri_6'] - pi7 = self._registry['misc']['t2eri_7'] - pi6 = pi6.expand_itmd if fully_expand else pi6.tensor - pi7 = pi7.expand_itmd if fully_expand else pi7.tensor - # build the itmd - pib = (-Rational(1, 2) * pi6(indices=(i, a, b, c), wrap_result=False) - + pi7(indices=(i, a, b, c), wrap_result=False) - + S.NegativeOne * pi7(indices=(i, a, c, b), wrap_result=False)) - target = (i, a, b, c) - if fully_expand: - pib = ExprContainer( - pib, target_idx=target - ).substitute_contracted().inner - contracted = tuple(sorted( - [s for s in pib.atoms(Index) if s not in target], - key=sort_idx_canonical - )) - else: - contracted = tuple() - return ItmdExpr(pib, target, contracted) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - return AntiSymmetricTensor('t2eriB', indices[:2], indices[2:]) - - -class t2sq(RegisteredIntermediate): - """t2sq intermediate from adcc and libadc.""" - _itmd_type = "misc" - _order = 2 - _default_idx = ("i", "a", "j", "b") - - @cached_member - def _build_expanded_itmd(self, fully_expand: bool = True) -> ItmdExpr: - i, a, j, b = get_symbols(self.default_idx) - # generate additional contracted indices (1o / 1v) - c, k = get_symbols('ck') - # t2_1 class instance for generating t2_1 amplitudes - t2 = self._registry['t_amplitude']['t2_1'] - t2 = t2.expand_itmd if fully_expand else t2.tensor - # build the intermediate - itmd = ( - t2(indices=(i, k, a, c), wrap_result=False) * - t2(indices=(j, k, b, c), wrap_result=False) - ) - assert isinstance(itmd, Expr) - return ItmdExpr(itmd, (i, a, j, b), (k, c)) - - def _build_tensor(self, indices: Sequence[Index]) -> Expr: - return AntiSymmetricTensor('t2sq', indices[:2], indices[2:], 1) - - -def eri(idx: Sequence[str] | Sequence[Index]) -> Expr: - """ - Builds an antisymmetric electron repulsion integral. - Indices may be provided as list of sympy symbols or as string. - """ - idx = get_symbols(idx) - if len(idx) != 4: - raise Inputerror(f'4 indices required to build a ERI. Got: {idx}.') - return AntiSymmetricTensor(tensor_names.eri, idx[:2], idx[2:]) - - -def fock(idx: Sequence[Index] | Sequence[str]) -> Expr: - """ - Builds a fock matrix element. - Indices may be provided as list of sympy symbols or as string. - """ - idx = get_symbols(idx) - if len(idx) != 2: - raise Inputerror('2 indices required to build a Fock matrix element.' - f'Got: {idx}.') - return AntiSymmetricTensor(tensor_names.fock, idx[:1], idx[1:]) - - -def orb_energy(idx: Index | Sequence[str] | Sequence[Index] - ) -> NonSymmetricTensor: - """ - Builds an orbital energy. - Indices may be provided as list of sympy symbols or as string. - """ - idx = get_symbols(idx) - if len(idx) != 1: - raise Inputerror("1 index required to build a orbital energy. Got: " - f"{idx}.") - return NonSymmetricTensor(tensor_names.orb_energy, idx) diff --git a/build/lib/adcgen/logger.py b/build/lib/adcgen/logger.py deleted file mode 100644 index 8bf0a58..0000000 --- a/build/lib/adcgen/logger.py +++ /dev/null @@ -1,63 +0,0 @@ -from pathlib import Path -import os -import logging -import json - - -logger: logging.Logger = logging.getLogger("adcgen") -_config_file = "logger_config.json" - - -def set_log_level(level: str) -> None: - """Set the level of the adcgen logger.""" - logger.setLevel(level) - - -def _config_logger() -> None: - """ - Config the logger. - The path to a logging configuration JSON file can be provided through the - 'ADCGEN_LOG_CONFIG' environment variable. By default - 'logging_config.json' will be used. - The level of the adcgen logger can additionally be modified through the - 'ADCGEN_LOG_LEVEL' environment variable. The level will be set after - reading the config. - """ - import logging.config - - # load the configuration - config = os.environ.get("ADCGEN_LOG_CONFIG", None) - if config is None: - config = Path(__file__).parent.resolve() / _config_file - else: - config = Path(config).resolve() - if not config.exists: - raise FileNotFoundError(f"logging config file {config} does not exist") - config = json.load(open(config, "r")) - logging.config.dictConfig(config) - # set the print level - level = os.environ.get("ADCGEN_LOG_LEVEL", None) - if level is not None: - logger.setLevel(level) - - -class Formatter(logging.Formatter): - colors = { - "WARNING": "\033[93m", # yellow - "ERROR": "\033[91m", # red - "CRITICAL": "\033[95m" # pink - } - reset_color = "\033[0m" - - def format(self, record: logging.LogRecord) -> str: - # Color the log message - col = self.colors.get(record.levelname, None) - if col is not None: - record.msg = f"{col}{record.msg}{self.reset_color}" - return super().format(record) - - -class DropErrors(logging.Filter): - # Only keep debug and info messages - def filter(self, record: logging.LogRecord) -> bool: - return record.levelno < logging.WARNING diff --git a/build/lib/adcgen/logger_config.json b/build/lib/adcgen/logger_config.json deleted file mode 100644 index d563c13..0000000 --- a/build/lib/adcgen/logger_config.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "version": 1, - "disable_existing_loggers": false, - "formatters": { - "colored": { - "class": "adcgen.logger.Formatter" - } - }, - "filters": { - "droperrors": { - "()": "adcgen.logger.DropErrors" - } - }, - "handlers": { - "stdout": { - "class": "logging.StreamHandler", - "level": "DEBUG", - "filters": [ - "droperrors" - ], - "formatter": "colored", - "stream": "ext://sys.stdout" - }, - "stderr": { - "class": "logging.StreamHandler", - "level": "WARNING", - "formatter": "colored", - "stream": "ext://sys.stderr" - } - }, - "loggers": { - "adcgen": { - "level": "INFO", - "handlers": [ - "stdout", - "stderr" - ] - } - } -} \ No newline at end of file diff --git a/build/lib/adcgen/misc.py b/build/lib/adcgen/misc.py deleted file mode 100644 index 0916a04..0000000 --- a/build/lib/adcgen/misc.py +++ /dev/null @@ -1,116 +0,0 @@ -from collections.abc import Sequence -from functools import wraps -import inspect - - -class Inputerror(ValueError): - pass - - -def cached_member(function): - """ - Decorator for a class method that is called with at least one argument - or keyword argument. THe result is cached in the variable '_function_cache' - of the class instance. - """ - - fname = function.__name__ - - # create the signature of the wrapped function and check that we dont - # have any keyword only arguments in the wrapped function - func_sig = inspect.signature(function) - invalid_arg_types = (inspect.Parameter.KEYWORD_ONLY, - inspect.Parameter.VAR_KEYWORD) - if any(arg.kind in invalid_arg_types for arg in - func_sig.parameters.values()): - raise TypeError("Functions with keyword only arguments are not " - "supported by cached_member.") - - @wraps(function) - def wrapper(self, *args, **kwargs): - # - transform all arguments to positional arguments - # and add not provided default arguments - bound_args: inspect.BoundArguments = ( - func_sig.bind(self, *args, **kwargs) - ) - bound_args.apply_defaults() - assert len(bound_args.kwargs) == 0 - args = bound_args.args[1:] # remove self from the positional arguments - - try: # load/create the cache - fun_cache = self._function_cache[fname] - except AttributeError: - self._function_cache = {} - fun_cache = self._function_cache[fname] = {} - except KeyError: - fun_cache = self._function_cache[fname] = {} - - try: # try to load the data from the cache - return fun_cache[args] - except KeyError: - fun_cache[args] = result = function(self, *args) - return result - - return wrapper - - -def validate_input(**kwargs) -> None: - # order, min_order, adc_order, braket, space, lr: single input - # indices, block: 2 strings possible - validate = { - 'order': lambda o: isinstance(o, int) and o >= 0, # int - 'braket': lambda bk: bk in ['bra', 'ket'], # bra/ket - 'space': lambda sp: all(s in ['p', 'h'] for s in sp), - 'indices': lambda idx: all(isinstance(i, str) for i in idx), - 'min_order': lambda o: isinstance(o, int) and o >= 0, # int - 'lr': lambda lr: lr in ['left', 'right'], # left/right - 'block': lambda b: all(validate['space'](sp) for sp in b), - 'adc_order': lambda o: isinstance(o, int) and o >= 0, # int - 'lr_isr': lambda lr: lr in ['left', 'right'], # left/right - } - # braket, lr are exprected as str! - # order, min_order, adc_order are expected as int! - # space, block and indices as list/tuple or ',' separated string - for var, val in kwargs.items(): - if var == 'space': - tpl = transform_to_tuple(val) - if len(tpl) != 1: - raise Inputerror(f'Invalid input for {var}: {val}.') - val = tpl[0] - elif var == 'block': - tpl = transform_to_tuple(val) - if len(tpl) != 2: - raise Inputerror(f'Invalid input for {var}: {val}') - val = tpl - elif var == 'indices': - tpl = transform_to_tuple(val) - if len(tpl) not in [1, 2]: - raise Inputerror(f'Invalid indices input: {val}.') - val = tpl - if not validate[var](val): - raise Inputerror(f'Invalid input for {var}: {val}.') - - -def transform_to_tuple(input: Sequence[str]) -> tuple[str, ...]: - convertor = { - str: lambda x: tuple(i for i in x.split(",")), - list: lambda x: tuple(x), - tuple: lambda x: x - } - conversion = convertor.get(type(input)) - if not conversion: - raise Inputerror(f"{input} of type {type(input)} is not convertable " - "to tuple.") - return conversion(input) - - -class Singleton(type): - """Simple metaclass that implements the Singleton pattern on a class.""" - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = ( - super(Singleton, cls).__call__(*args, **kwargs) - ) - return cls._instances[cls] diff --git a/build/lib/adcgen/operators.py b/build/lib/adcgen/operators.py deleted file mode 100644 index 87b30e6..0000000 --- a/build/lib/adcgen/operators.py +++ /dev/null @@ -1,213 +0,0 @@ -from collections.abc import Sequence -from functools import cached_property -from typing import Any - -from sympy import Add, Expr, Rational, Mul, factorial, latex -from sympy.physics.secondquant import Fd, F - -from .misc import cached_member -from .indices import Indices, Index, get_symbols -from .rules import Rules -from .sympy_objects import AntiSymmetricTensor -from .logger import logger -from .tensor_names import tensor_names - - -class Operators: - """ - Constructs operators, like the zeroth and first order Hamiltonian or - arbitrary N-particle operators. - - Parameters - ---------- - variant : str, optional - Defines the partitioning of the Hamiltonian. - (default: the MP Hamiltonian) - """ - def __init__(self, variant: str = "mp"): - self._indices = Indices() - self._variant = variant - - @cached_property - def hamiltonian(self) -> Expr: - """Constructs the full electronic Hamiltonian.""" - h = Add(self.mp_h0()[0], self.mp_h1()[0]) - assert isinstance(h, Expr) - return h - - @cached_property - def h0(self) -> tuple[Expr, Rules | None]: - """Constructs the zeroth order Hamiltonian.""" - if self._variant == 'mp': - return self.mp_h0() - elif self._variant == 're': - return self.re_h0() - else: - raise NotImplementedError( - f"H0 not implemented for {self._variant}" - ) - - @cached_property - def h1(self) -> tuple[Expr, Rules | None]: - """Constructs the first order Hamiltonian.""" - if self._variant == 'mp': - return self.mp_h1() - elif self._variant == 're': - return self.re_h1() - else: - raise NotImplementedError( - f"H1 not implented for {self._variant}" - ) - - @cached_member - def operator(self, n_create: int, n_annihilate: int) -> tuple[Expr, None]: - """ - Constructs an arbitrary second quantized operator placing creation - operators to the left of annihilation operators. - - Parameters - ---------- - n_create : int - The number of creation operators. Placed left of the annihilation - operators. - n_annihilate : int - The number of annihilation operators. Placed right of the creation - operators. - """ - # generate general indices for the operator - idx = self._indices.get_generic_indices( - general=n_create + n_annihilate - ) - idx = idx[("general", "")] - create = idx[:n_create] - annihilate = idx[n_create:] - name = tensor_names.operator - - pref = Rational(1, Mul(factorial(n_create), factorial(n_annihilate))) - d = AntiSymmetricTensor(name, create, annihilate) - op = self.excitation_operator(creation=create, - annihilation=annihilate, - reverse_annihilation=True) - return pref * d * op, None - - def excitation_operator( - self, creation: Sequence[Index] | Sequence[str] | None = None, - annihilation: Sequence[Index] | Sequence[str] | None = None, - reverse_annihilation: bool = True - ) -> Expr: - """ - Creates an arbitrary string of second quantized excitation operators. - Operators are concatenated as [creation * annihilation] - - Parameters - ---------- - creation : Sequence[Index] | Sequence[str], optional - Each of the provided indices is placed on a creation operator. - Operators are concatenated in the provided order: - [i, j] -> Fd(i) * Fd(j). - annihilation : Sequence[Index] | Sequence[str], optional - Each of the provided indices is placed on a annihilation operator. - Operators are concatenated in the provided order: - [i, j] -> F(i) * F(j) - reverse_annihilation : bool, optional - If set, the order of the annihilation operators is reversed, i.e. - [i, j] -> F(j) * F(i) - (default: True). - """ - res = [] - if creation is not None: - res.extend(Fd(s) for s in get_symbols(creation)) - if annihilation is not None: - symbols = get_symbols(annihilation) - if reverse_annihilation: - symbols = reversed(symbols) - res.extend(F(s) for s in symbols) - expr = Mul(*res) - assert isinstance(expr, Expr) - return expr - - @staticmethod - def mp_h0() -> tuple[Expr, None]: - """Constructs the zeroth order MP-Hamiltonian.""" - idx_cls = Indices() - p, q = idx_cls.get_indices("pq")[("general", "")] - f = AntiSymmetricTensor(tensor_names.fock, (p,), (q,)) - pq = Mul(Fd(p), F(q)) - h0 = Mul(f, pq) - assert isinstance(h0, Expr) - logger.debug(f"H0 = {latex(h0)}") - return h0, None - - @staticmethod - def mp_h1() -> tuple[Expr, None]: - """Constructs the first order MP-Hamiltonian.""" - idx_cls = Indices() - p, q, r, s = idx_cls.get_indices("pqrs")[("general", "")] - # get an occ index for 1 particle part of H1 - occ = idx_cls.get_generic_indices(occ=1)[("occ", "")][0] - v1 = AntiSymmetricTensor(tensor_names.eri, (p, occ), (q, occ)) - pq = Mul(Fd(p), F(q)) - v2 = AntiSymmetricTensor(tensor_names.eri, (p, q), (r, s)) - pqsr = Mul(Fd(p), Fd(q), F(s), F(r)) - h1 = Add(Mul(-v1, pq), Rational(1, 4) * v2 * pqsr) - assert isinstance(h1, Expr) - logger.debug(f"H1 = {latex(h1)}") - return h1, None - - @staticmethod - def re_h0() -> tuple[Expr, Rules]: - """Constructs the zeroth order RE-Hamiltonian.""" - idx_cls = Indices() - p, q, r, s = idx_cls.get_indices('pqrs')[("general", "")] - # get an occ index for 1 particle part of H0 - occ = idx_cls.get_generic_indices(occ=1)[("occ", "")][0] - - f = AntiSymmetricTensor(tensor_names.fock, (p,), (q,)) - piqi = AntiSymmetricTensor(tensor_names.eri, (p, occ), (q, occ)) - pqrs = AntiSymmetricTensor(tensor_names.eri, (p, q), (r, s)) - op_pq = Mul(Fd(p), F(q)) - op_pqsr = Mul(Fd(p), Fd(q), F(s), F(r)) - - h0 = Add( - Mul(f, op_pq), -Mul(piqi, op_pq), Rational(1, 4) * pqrs * op_pqsr - ) - assert isinstance(h0, Expr) - logger.debug(f"H0 = {latex(h0)}") - # construct the rules for forbidden blocks in H0 - # we are not in a real orbital basis!! -> More canonical blocks - rules = Rules(forbidden_tensor_blocks={ - tensor_names.fock: ('ov', 'vo'), - tensor_names.eri: ('ooov', 'oovv', 'ovvv', 'ovoo', 'vvoo', 'vvov') - }) - return h0, rules - - @staticmethod - def re_h1() -> tuple[Expr, Rules]: - """Constructs the first order RE-Hamiltonian.""" - idx_cls = Indices() - p, q, r, s = idx_cls.get_indices('pqrs')[("general", "")] - # get an occ index for 1 particle part of H0 - occ = idx_cls.get_generic_indices(occ=1)[("occ", "")][0] - - f = AntiSymmetricTensor(tensor_names.fock, (p,), (q,)) - piqi = AntiSymmetricTensor(tensor_names.eri, (p, occ), (q, occ)) - pqrs = AntiSymmetricTensor(tensor_names.eri, (p, q), (r, s)) - op_pq = Mul(Fd(p), F(q)) - op_pqsr = Mul(Fd(p), Fd(q), F(s), F(r)) - - h1 = Add( - Mul(f, op_pq), -Mul(piqi, op_pq), Rational(1, 4) * pqrs * op_pqsr - ) - assert isinstance(h1, Expr) - logger.debug(f"H1 = {latex(h1)}") - # construct the rules for forbidden blocks in H1 - rules = Rules(forbidden_tensor_blocks={ - tensor_names.fock: ['oo', 'vv'], - tensor_names.eri: ['oooo', 'ovov', 'vvvv'] - }) - return h1, rules - - def __eq__(self, other: Any) -> bool: - if isinstance(other, Operators): - return self._variant == other._variant - return False diff --git a/build/lib/adcgen/properties.py b/build/lib/adcgen/properties.py deleted file mode 100644 index 41ed670..0000000 --- a/build/lib/adcgen/properties.py +++ /dev/null @@ -1,423 +0,0 @@ -from collections.abc import Sequence -from math import factorial - -from sympy import Add, Expr, S, sqrt, sympify - -from .expression import ExprContainer -from .func import gen_term_orders, wicks -from .indices import n_ov_from_space, generic_indices_from_space -from .intermediate_states import IntermediateStates -from .misc import Inputerror, cached_member, transform_to_tuple, validate_input -from .rules import Rules -from .secular_matrix import SecularMatrix -from .simplify import simplify - - -class Properties: - """ - Constructs ISR property expressions. - - Parameters - ---------- - l_isr : IntermediateStates - The intermediate states used for the construction of properties. - r_isr : IntermediateStates, optional - Optionally, another IntermediateStates can be passed to the class - allowing for the construction of transition properties between - intermediate states of different ADC variants like PP- or IP-ADC - (default: l_isr). - """ - - def __init__(self, l_isr: IntermediateStates, - r_isr: IntermediateStates | None = None): - assert isinstance(l_isr, IntermediateStates) - assert ( - r_isr is None or isinstance(r_isr, IntermediateStates) - ) - self.l_isr: IntermediateStates = l_isr - self.r_isr: IntermediateStates = self.l_isr if r_isr is None else r_isr - self.l_m: SecularMatrix = SecularMatrix(l_isr) - self.r_m: SecularMatrix = ( - self.l_m if r_isr is None else SecularMatrix(r_isr) - ) - # Check if both ground states are compatible. Currently this only means - # to check that either None ot both have singles enabled. - self.gs = l_isr.gs - if self.gs.singles != self.r_isr.gs.singles: - raise Inputerror("Both ISR need to share the same GS, " - "i.e. neither or both have singles enabled.") - # also check that both isr use the same hamiltonian - if self.l_isr.gs.h != self.r_isr.gs.h: - raise Inputerror("The Operator of left and right isr has to be " - "equal") - self.h = l_isr.gs.h - - def operator(self, order: int, n_create: int, n_annihilate: int, - subtract_gs=True) -> tuple[Expr, Rules | None]: - """ - Constructs an arbitrary n'th-order operator. - - Parameters - ---------- - order : int - The perturbation theoretical order. - n_create : int - The number of creation operators. Placed left of the annihilation - operators. - n_annihilate : int - The number of annihilation operators. Placed right of the - creation operators. - subtract_gs : bool, optional - If set, the n'th-order ground state expectation value of the - corresponding operator is subtracted if the operator string - contains an equal amount of creation and annihilation operators - (otherwise the ground state contribution vanishes). - (Defaults to True) - """ - validate_input(order=order) - - if order == 0: - d, rules = self.h.operator( - n_create=n_create, n_annihilate=n_annihilate - ) - else: - d, rules = S.Zero, None - - if subtract_gs and n_create == n_annihilate: - e0 = self.gs.expectation_value(order=order, n_particles=n_create) - return Add(d, -e0), rules - else: - return d, rules - - @cached_member - def expec_block_contribution(self, order: int, block: Sequence[str], - n_particles: int = 1, - subtract_gs: bool = True) -> Expr: - """ - Constructs the n'th order contribution of an individual block IJ to the - expectation value of the operator - d_{pq...} X_I ^(n) Y_J. - - Parameters - ---------- - order : int - The perturbation theoretical order. - block : Sequence[str] - The block of the ADC matrix for which the expectation value - is generated, e.g., 'ph,pphh' for the 1p-1h/2p-2h block. - n_particles : int - The number of creation and annihilation operators in the operator - string. (Defaults to 1.) - subtract_gs : bool, optional - If set, the ground state expectation value of the corresponding - operator is subtracted from the result. (Defaults to True) - """ - - block = transform_to_tuple(block) - validate_input(order=order, block=block) - - # generate indices for the block and compute the prefactors for the - # contraction over the block space - left_idx: str = "".join( - s.name for s in generic_indices_from_space(block[0]) - ) - n_ov = n_ov_from_space(block[0]) - left_pref = S.One / sqrt( - factorial(n_ov["occ"]) * factorial(n_ov["virt"]) - ) - - right_idx: str = "".join( - s.name for s in generic_indices_from_space(block[1]) - ) - n_ov = n_ov_from_space(block[1]) - right_pref = S.One / sqrt( - factorial(n_ov["occ"]) * factorial(n_ov["virt"]) - ) - - # build the ADC amplitude vectors - left_ampl = self.l_isr.amplitude_vector(indices=left_idx, lr='left') - right_ampl = self.r_isr.amplitude_vector(indices=right_idx, lr='right') - - orders = gen_term_orders(order=order, term_length=2, min_order=0) - res = S.Zero - # iterate over all norm*d combinations of n'th order - for norm_term in orders: - norm = self.gs.norm_factor(norm_term[0]) - if norm is S.Zero: - continue - # compute d for a given norm (the overall order is split inbetween - # both factors) - orders_d = gen_term_orders( - order=norm_term[1], term_length=3, min_order=0 - ) - expec = S.Zero - for term in orders_d: - op, rules = self.operator(order=term[1], - n_create=n_particles, - n_annihilate=n_particles, - subtract_gs=subtract_gs) - if op is S.Zero: - continue - i1 = (left_pref * right_pref * left_ampl * - self.l_isr.intermediate_state(order=term[0], - space=block[0], - braket='bra', - indices=left_idx) * - op * - self.r_isr.intermediate_state(order=term[2], - space=block[1], - braket='ket', - indices=right_idx) * - right_ampl) - expec += wicks(i1, simplify_kronecker_deltas=True, rules=rules) - res += (norm * expec).expand() - return simplify(ExprContainer(res)).inner - - @cached_member - def expectation_value(self, adc_order: int, n_particles: int = 1, - order: int | None = None, - subtract_gs: bool = True) -> Expr: - """ - Constructs the expectation value taking all blocks into account - that are available at the specified order of perturbation theory - in the ADC secular matrix - sum_IJ sum_pq... d_{pq...} X_I Y_J. - Note that also lower order contributions are considered, i.e., the - ADC(0) and ADC(1) expectation values are included in the ADC(2) - expectation value. - - Parameters - ---------- - adc_order : int - The perturbation theoretical order of the ADC scheme for which the - expectation value is generated. - n_particles : int - The number of creation and annihilation operators in the operator - string. (Defaults to 1) - order : int, optional - Only consider contributions of the specified order, e.g., - only the zeroth order contributions of all available blocks in - the ADC(n) matrix. - subtract_gs : bool, optional - If set, the ground state expectation value is subtracted from the - result. (Defaults to True) - """ - validate_input(adc_order=adc_order) - if order is not None: - validate_input(order=order) - # get all blocks that are present in the ADC(n) secular matrix and - # the order through which they are expanded. - left_blocks = self.l_m.block_order(adc_order) - left_blocks = sorted( - left_blocks.items(), - key=lambda tpl: (len(tpl[0][0]), len(tpl[0][1])) - ) - right_blocks = self.r_m.block_order(adc_order) - right_blocks = sorted( - right_blocks, key=lambda bl: (len(bl[0]), len(bl[1])) - ) - # iterate over the blocks, replacing the second space in each block - # with the corresponding space of block_2 from isr_2 - # This only works for python3.7 or newer, because it assumes that - # the two block dicts are in the same order -> which is only - # garuanteed from python3.7 - res = sympify(0) - for i, (l_block, max_order) in enumerate(left_blocks): - r_block = right_blocks[i] - # block is not expanded through the given order - if order is not None and max_order < order: - continue - # combine the two spaces to build the correct block with mixed - # ADC variant spaces. - block = (l_block[0], r_block[1]) - - if order is None: - orders_to_gen = list(range(max_order + 1)) - else: - orders_to_gen = [order] - - for o in orders_to_gen: - res += self.expec_block_contribution( - order=o, block=block, n_particles=n_particles, - subtract_gs=subtract_gs - ) - assert isinstance(res, Expr) - return res - - @cached_member - def trans_moment_space(self, order: int, space: str, - n_create: int | None = None, - n_annihilate: int | None = None, - lr_isr: str = 'left', - subtract_gs: bool = True) -> Expr: - """ - Constructs the n'th-order contribution to the transition moment - for the desired excitation space and operator - d_pq... X_I ^(n). - - Parameters - ---------- - order : int - The perturbation theoretical order. - space : str - The excitation space, e.g., 'ph' or 'pphh' for singly or doubly - excited configurations, respectively. - n_create : int, optional - The number of creation operators in the operator string. - By default, the operator string with the lowest amount of - creation and annihilation operators is constructed for which in - general a non-zero result can be expected, e.g., 'ca' and 'a' - for PP- and IP-ADC, respectively. - n_annihilate : int, optional - The number of annihilation operators in the operator string. - By default, the operator string with the lowest amount of - creation and annihilation operators is constructed for which in - general a non-zero result can be expected, e.g., 'ca' and 'a' - for PP- and IP-ADC, respectively. - l_isr : str, optional - Controls whether the left or right 'IntermediateStates' instance - is used to construct the transition moment contribution. - (Defaults to 'left') - subtract_gs : bool, optional - If set, ground state contributions are subtracted if the - operator contains an equal amount of creation and annihilation - operators. (Defaults to True) - """ - # Subtraction of the ground state contribution is probably not - # necessary, because all terms cancel (at least in second order - # Singles PP-ADC). For all other ADC variants (IP/EA...) the ground - # state expectation value is Zero, because the number of creation and - # annihilation operators will never be equal. - # Give the option anyway, because I'm not sure whether it will be - # required at higher orders for PP-ADC - - validate_input(order=order, space=space, lr_isr=lr_isr) - - # - generate indices for the ISR state - n_ov = n_ov_from_space(space) - idx = "".join(s.name for s in generic_indices_from_space(space)) - - # - map lr on the correct intermediate_states instance - if lr_isr == "left": - isr = self.l_isr - else: - assert lr_isr == "right" - isr = self.r_isr - - # - if no operator string is given -> generate a default, i.e. - # 'a' for IP- / 'ca' for PP-ADC - if n_create is None and n_annihilate is None: - n_create = isr.min_space[0].count('p') - n_annihilate = isr.min_space[0].count('h') - elif n_create is None: - n_create = 0 - elif n_annihilate is None: - n_annihilate = 0 - assert isinstance(n_create, int) and isinstance(n_annihilate, int) - - # - generate amplitude vector and prefactor for the summation - ampl = isr.amplitude_vector(indices=idx, lr='left') - pref = S.One / sqrt(factorial(n_ov["occ"]) * factorial(n_ov["virt"])) - - # - import the gs wavefunction (possible here) - mp = {o: self.gs.psi(order=o, braket='ket') for o in range(order + 1)} - - # iterate over all norm*d combinations - orders = gen_term_orders(order=order, term_length=2, min_order=0) - res = S.Zero - for norm_term in orders: - norm = self.gs.norm_factor(norm_term[0]) - if norm is S.Zero: - continue - # compute d for a given norm factor - orders_d = gen_term_orders( - order=norm_term[1], term_length=3, min_order=0 - ) - trans_mom = S.Zero - for term in orders_d: - op, rules = self.operator( - order=term[1], n_create=n_create, - n_annihilate=n_annihilate, subtract_gs=subtract_gs - ) - if op is S.Zero: - continue - i1 = (pref * ampl * - isr.intermediate_state(order=term[0], space=space, - braket='bra', indices=idx) * - op * - mp[term[2]]) - trans_mom += wicks(i1, simplify_kronecker_deltas=True, - rules=rules) - res += (norm * trans_mom).expand() - return simplify(ExprContainer(res)).inner - - @cached_member - def trans_moment(self, adc_order: int, n_create: int | None = None, - n_annihilate: int | None = None, order: int | None = None, - lr_isr: str = 'left', subtract_gs: bool = True) -> Expr: - """ - Constructs the ADC(n) transition moment - sum_I sum_pq... d_pq... X_I - considering all available configurations. - Note that also lower order contributions are considered, i.e., - the ADC(0) and ADC(1) contributions are included in the ADC(2) - transition moments. - - Parameters - ---------- - adc_order : int - The perturbation theoretical order of the ADC scheme. - n_create : int, optional - The number of creation operators in the operator string. - By default, the operator string with the lowest amount of - creation and annihilation operators is constructed for which in - general a non-zero result can be expected, e.g., 'ca' and 'a' - for PP- and IP-ADC, respectively. - n_annihilate : int, optional - The number of annihilation operators in the operator string. - By default, the operator string with the lowest amount of - creation and annihilation operators is constructed for which in - general a non-zero result can be expected, e.g., 'ca' and 'a' - for PP- and IP-ADC, respectively. - order : int, optional - Only consider contributions of the specified order, e.g., - only the zeroth order contributions of all available configurations - in the ADC(n) matrix. - lr_isr : str, optional - Constrols whether the left or right 'IntermediateStates' instance - is used to construct the transition moment. - (Defaults to 'left') - subtract_gs : bool, optional - If set, the ground state contributions are subtracted if the - operator contains an equal amount of creation and annihilation - operators. (Defaults to True) - """ - validate_input(lr_isr=lr_isr) - - # obtain the maximum order through which all the spaces are expanded - # in the secular matrix - if lr_isr == "left": - m = self.l_m - else: - assert lr_isr == "right" - m = self.r_m - max_orders = m.max_ptorder_spaces(adc_order) - - res = S.Zero - for space, max_order in max_orders.items(): - if order is None: - orders_to_gen = list(range(max_order + 1)) - else: - # the space is not expanded through the desired order - if max_order < order: - continue - orders_to_gen = [order] - - for o in orders_to_gen: - res += self.trans_moment_space( - order=o, space=space, n_create=n_create, - n_annihilate=n_annihilate, lr_isr=lr_isr, - subtract_gs=subtract_gs - ) - assert isinstance(res, Expr) - return res diff --git a/build/lib/adcgen/reduce_expr.py b/build/lib/adcgen/reduce_expr.py deleted file mode 100644 index 6d301cc..0000000 --- a/build/lib/adcgen/reduce_expr.py +++ /dev/null @@ -1,330 +0,0 @@ -from collections.abc import Sequence -from collections import defaultdict -import itertools -import time - -from sympy import S - -from .eri_orbenergy import EriOrbenergy -from .expression import ExprContainer, TermContainer -from .indices import Index -from .logger import logger -from .symmetry import Permutation - - -def reduce_expr(expr: ExprContainer) -> ExprContainer: - """ - Fully expands all available intermediates in an expression such that the - expression only exists of orbital energies and electron repulsion - integrals. The expanded expression is then simplified to collect as much - terms as possible. - The implementation assumes a real orbital basis. - """ - assert isinstance(expr, ExprContainer) - if not expr.real: - raise NotImplementedError("Intermediates only implemented for a real " - "orbital basis.") - expr = expr.expand() - - # check if we have anything to do - if expr.inner.is_number: - return expr - - logger.info("".join( - ['\n', '#'*80, '\n', ' '*25, "REDUCING EXPRESSION\n", '#'*80, '\n'] - )) - - # 1) Insert the definitions of all defined intermediates in the expr - # and reduce the number of terms by factoring the ERI in each term. - start = time.perf_counter() - logger.info("Expanding intermediates... ") - expanded_expr: list[ExprContainer] = [] - for term_i, term in enumerate(expr.terms): - logger.info( - "#"*80 + f"\nExpanding term {term_i+1} of {len(expr)}: {term}... ") - term = term.expand_intermediates() - assert isinstance(term, ExprContainer) - term = term.expand() - logger.info(f"into {len(term)} terms.\nCollecting terms.... ") - term = factor_eri_parts(term) - logger.info('-'*80) - for j, equal_eri in enumerate(term): - # minimize the contracted indices - # each term in eri should hold exactly the same indices - # -> build substitutions once and apply to the whole expr - sub = equal_eri.terms[0].substitute_contracted( - apply_substitutions=False - ) - assert isinstance(sub, list) - sub_equal_eri = equal_eri.subs(sub) - # ensure that we are not creating a complete mess - if sub_equal_eri.inner is S.Zero and equal_eri.inner is not S.Zero: - raise ValueError(f"Invalid substitutions {sub} for " - f"{equal_eri}") - term[j] = sub_equal_eri - logger.info(f"\n{j+1}: {EriOrbenergy(sub_equal_eri.terms[0]).eri}") - logger.info("-"*80 + f"\nFound {len(term)} different ERI Structures") - expanded_expr.extend(term) - del expr - # 2) Now try to factor the whole expression - # Only necessary to consider the first term of each of the expressions - # in the list (they all have same ERI) - # -> build new term list and try to factor ERI + Denominator - # -> simplify the orbital energy fraction in the resulting terms - logger.info("\nExpanding and ERI factoring took " - f"{time.perf_counter() - start:.2f}s\n") - logger.info("".join(['#'*80, "\n", '#'*80])) - start = time.perf_counter() - logger.info("\nSumming up all terms...\n" + "#"*80) - unique_terms = [unique_expr.terms[0] for unique_expr in expanded_expr] - logger.info("Factoring ERI...") - unique_compatible_eri = find_compatible_eri_parts(unique_terms) - n = 1 - n_eri_denom = 0 - factored = 0 - # - factor eri again - for i, compatible_eri_subs in unique_compatible_eri.items(): - temp = expanded_expr[i] - eri = EriOrbenergy(expanded_expr[i].terms[0]).eri - logger.info("\n" + "#"*80) - logger.info(f"ERI {n} of {len(unique_compatible_eri)}: {eri}") - n += 1 - for other_i, sub in compatible_eri_subs.items(): - temp += expanded_expr[other_i].subs(sub) - - # collected all terms with equal ERI -> factor denominators - eri_sym = eri.symmetry(only_contracted=True) - logger.info("\nFactoring Denominators...") - for j, term in enumerate(factor_denom(temp, eri_sym=eri_sym)): - term = term.factor() - if len(term) != 1: - raise RuntimeError("Expected the sub expression to have " - "identical Denoms and ERI, which should " - "allow factorization to a single term:\n" - f"{term}") - # symmetrize the numerator and cancel the orbital energy fraction - term = EriOrbenergy(term) - logger.info("-"*80 + f"\nERI/Denom {j}: {term}\n") - logger.info("Permuting numerator... ") - term = term.permute_num(eri_sym=eri_sym) - logger.info(f"Term now reads:\n{term}\n") - logger.info("Cancel orbital energy fraction...") - term = term.cancel_orb_energy_frac() - logger.info("Done.") - - if not all(EriOrbenergy(t).num.inner.is_number - for t in term.terms): - logger.warning("\nNUMERATOR NOT CANCELLED COMPLETELY:") - for t in term.terms: - logger.warning(EriOrbenergy(t)) - - factored += term - n_eri_denom += 1 - del expanded_expr # not up to date anymore - assert isinstance(factored, ExprContainer) - logger.info("#"*80 + - "\n\nFactorizing and cancelling the orbital energy fractions " - f"in {n_eri_denom} terms took " - f"{time.perf_counter() - start:.2f}s.\n" - f"Expression consists now of {len(factored)} terms.") - - # 3) Since we modified some denominators by canceling the orbital energy - # fractions, try to factor eri and denominator again - logger.info("#"*80 + "\n\nFactoring again...") - result = 0 - for term in itertools.chain.from_iterable( - factor_denom(sub_expr) for sub_expr in factor_eri_parts(factored) - ): - # factor the resulting term again, because we can have something like - # 2/(4*a + 4*b) * X - 1/(2 * (a + b)) * X - result += term.factor() - assert isinstance(result, ExprContainer) - logger.info(f"Done. {len(result)} terms remaining.\n\n" + "#"*80) - return result - - -def factor_eri_parts(expr: ExprContainer) -> list[ExprContainer]: - """ - Finds compatible remainder (eri) parts of an expression and collects - the terms in subexpressions. - - Returns - list[ExprContainer] - List of subexpressions, where each subexpression contains terms with - equal eri parts. - """ - - if len(expr) == 1: # trivial case - return [expr] - - terms = expr.terms - ret: list[ExprContainer] = [] - for i, compatible_eri_subs in find_compatible_eri_parts(terms).items(): - temp = ExprContainer(terms[i].inner, **expr.assumptions) - for other_i, sub in compatible_eri_subs.items(): - temp += terms[other_i].subs(sub) - ret.append(temp) - return ret - - -def find_compatible_eri_parts( - term_list: Sequence[TermContainer] - ) -> dict[int, dict[int, list[tuple[Index, Index]]]]: - """ - Determines the necessary index substitutions to make the remainder (eri) - parts of terms equal to each other - so they can be factored easily. - Does not modify the terms, but returns a dict that connects the index of - the terms with a substitution list. - """ - from .simplify import find_compatible_terms - - if len(term_list) == 1: # trivial: only a single eri - return {0: {}} - - # dont use EriOrbenergy class, but rather only do whats necessary to - # extract the eri part of the terms - eri_parts: list[TermContainer] = [] - for term in term_list: - assumptions = term.assumptions - assumptions["target_idx"] = term.target - eris = ExprContainer(1, **assumptions) - for o in term.objects: - if not o.inner.is_number and not o.contains_only_orb_energies: - eris *= o - assert len(eris) == 1 - eri_parts.append(eris.terms[0]) - return find_compatible_terms(eri_parts) - - -def factor_denom(expr: ExprContainer, - eri_sym: dict[tuple[Permutation, ...], int] | None = None - ) -> list[ExprContainer]: - """ - Finds compatible orbital energy denominators in an expression with the - restriction that the necessary index permutations do not modify the - remainder (eri) part of the terms. - - Parameters - ---------- - expr : ExprContainer - Expression to find compatible denominators in. - eri_sym : dict, optional - The symmetry of the eri part of the terms. Warning: if provided, all - terms in the expression are assumed to have the same eri symmetry! - - Returns - list[ExprContainer] - List of subexpressions, where each subexpression contains terms with - equal orbital energy denominators. - """ - - if len(expr) == 1: # trivial case: single term - return [expr] - - terms: tuple[TermContainer, ...] = expr.terms - compatible_denoms = find_compatible_denom(terms, eri_sym=eri_sym) - ret: list[ExprContainer] = [] - for i, compatible_denom_perms in compatible_denoms.items(): - temp = ExprContainer(terms[i].inner, **expr.assumptions) - for other_i, perms in compatible_denom_perms.items(): - temp += terms[other_i].permute(*perms) - ret.append(temp) - return ret - - -def find_compatible_denom( - terms: Sequence[TermContainer], - eri_sym: dict[tuple[Permutation, ...], int] | None = None - ) -> dict[int, dict[int, tuple[Permutation, ...]]]: - """ - Determines the necessary index substitutions to make the orbital energy - denominators of the terms equal to each other - so they can be factored - easily. Only permutations that do not change the remainder (eri) part of - the terms are considered. - Does not modify the terms but returns a dict that connects the index of - the terms with a substitution list. - - Parameters - ---------- - terms : Sequence[TermContainer] - List of terms to find compatible orbital energy denominators. - eri_sym : dict, optional - The symmetry of the eri part of the terms. Warning: if provided, all - terms are assumed to have the same eri symmetry! - """ - if len(terms) == 1: # trivial case: single term - return {0: {}} - - terms_imported: list[EriOrbenergy] = [ - EriOrbenergy(term).canonicalize_sign(only_denom=True) - for term in terms - ] - - # split the terms according to length and and number of denominator - # brackets - filtered_terms = defaultdict(list) - for term_i, term in enumerate(terms_imported): - filtered_terms[term.denom_description()].append(term_i) - - ret: dict[int, dict[int, tuple[Permutation, ...]]] = {} - matched: set[int] = set() - permutations: dict[int, tuple[tuple[Permutation, ...], ...]] = {} - for term_idx_list in filtered_terms.values(): - # check which denominators are already equal - identical_denom: dict[int, list[int]] = {} - for i, term_i in enumerate(term_idx_list): - if term_i in matched: - continue - term: EriOrbenergy = terms_imported[term_i] - identical_denom[term_i] = [] - for other_i in range(i+1, len(term_idx_list)): - other_term_i = term_idx_list[other_i] - if other_term_i in matched: - continue - other_term = terms_imported[other_term_i] - if term.denom.inner == other_term.denom.inner: - identical_denom[term_i].append(other_term_i) - matched.add(other_term_i) - - if len(identical_denom) == 1: # all denoms are equal - term_i, matches = identical_denom.popitem() - ret[term_i] = {other_term_i: tuple() for other_term_i in matches} - continue - - # try to match more denominators by applying index permutations that - # satisfy: P_pq ERI = +- ERI AND P_pq Denom != +- Denom - identical_denom_list = list(identical_denom.items()) - del identical_denom - for i, (term_i, matches) in enumerate(identical_denom_list): - if term_i in matched: - continue - ret[term_i] = {} - for other_term_i in matches: # add all identical denominators - ret[term_i][other_term_i] = tuple() - - denom = terms_imported[term_i].denom.inner - for other_i in range(i+1, len(identical_denom_list)): - other_term_i, other_matches = identical_denom_list[other_i] - if other_term_i in matched: - continue - - other_term: EriOrbenergy = terms_imported[other_term_i] - other_denom: ExprContainer = other_term.denom - - # find all valid permutations - if other_term_i not in permutations: - permutations[other_term_i] = tuple( - perms for perms, factor in - other_term.denom_eri_sym(eri_sym=eri_sym, - only_contracted=True).items() - if factor is None - ) - for perms in permutations[other_term_i]: - # found a permutation! - if denom == other_denom.copy().permute(*perms).inner: - ret[term_i][other_term_i] = perms - for match in other_matches: - ret[term_i][match] = perms - matched.add(other_term_i) - break - return ret diff --git a/build/lib/adcgen/resolution_of_identity.py b/build/lib/adcgen/resolution_of_identity.py deleted file mode 100644 index 08061e9..0000000 --- a/build/lib/adcgen/resolution_of_identity.py +++ /dev/null @@ -1,71 +0,0 @@ -from .expression import ExprContainer -from .sympy_objects import SymmetricTensor -from .tensor_names import tensor_names -from .indices import Indices - - -def apply_resolution_of_identity(expr: ExprContainer, - symmetric: bool = True) -> ExprContainer: - """ - Applies the Resolution of Identity approximation (RI, sometimes also - called density fitting, DF) to an expression. This implies that every - spatial ERI is replaced by its factorised form. Two types of factorisation - are supported: symmetric and asymmetric. In the symmetric decomposition, - a spatial ERI is approximated as: - - (pq | rs) ~ B^P_{pq} B^P_{rs} - B^P_{pq} = (P | Q)^{-1/2} (Q | pq) - - This decomposition is the default. In the asymmetric factorisation, the - same spatial ERI is approximated as: - - (pq | rs) ~ C^P_{pq} (P | rs) - C^P_{pq} = (P | Q)^{-1} (Q | pq) - - Note that the RI approximation is only meaningful on spatial ERIs. - Therefore, this routine will crash and exit if the given expression has - not been spin-integrated before. All RI indices receive an alpha spin - by default - - Args: - expr : ExprContainer - The expression to be spin-integrated. - symmetric : bool, optional - If true, the symmetric factorisation variant is employed. - If false, the asymmetric factorisation variant is employed instead. - """ - - resolved_expr = 0 - - # We iterate over all terms in the expression and apply RI individually - for term in expr.terms: - # Check if the term is spin-integrated - assert ("n" not in "".join([o.spin for o in term.objects])) - # Check that no antisymmetric ERIs remain - assert (tensor_names.eri not in - ",".join([o.name for o in term.objects])) - idx_cls = Indices() - - for object in term.objects: - # Replace spatial ERIs - if object.name == tensor_names.coulomb: - # Extract indices - lower = object.idx[0:2] - upper = object.idx[2:4] - ri_idx = idx_cls.get_generic_indices(ri_a=1)[("ri", "a")] - - if symmetric: - # v_pqrs = B^P_pq B^P_rs - ri_expr = (SymmetricTensor(tensor_names.ri_sym, - (ri_idx,), lower) - * SymmetricTensor(tensor_names.ri_sym, - (ri_idx,), upper)) - else: - ri_expr = (SymmetricTensor(tensor_names.ri_asym_eri, - (ri_idx,), upper) - * SymmetricTensor(tensor_names.ri_asym_factor, - (ri_idx,), lower)) - term.subs(object, ri_expr) - - resolved_expr += term - return resolved_expr diff --git a/build/lib/adcgen/rules.py b/build/lib/adcgen/rules.py deleted file mode 100644 index e2f22c5..0000000 --- a/build/lib/adcgen/rules.py +++ /dev/null @@ -1,65 +0,0 @@ -from collections.abc import Sequence -from typing import Any - -from .expression import ExprContainer - - -class Rules: - """ - Rules to apply to expressions. - - Parameters - ---------- - forbidden_tensor_blocks : dict[str, Sequence[str]], optional - Tensor blocks to remove from an expression, i.e., only allow - a certain subset of blocks in the expression. A dictionary of the form - {tensor_name: [block1, block2, ...]} - is expected. - """ - - def __init__( - self, - forbidden_tensor_blocks: dict[str, Sequence[str]] | None = None): - if forbidden_tensor_blocks is None: - forbidden_tensor_blocks = {} - self._forbidden_blocks: dict[str, Sequence[str]] = ( - forbidden_tensor_blocks - ) - - def apply(self, expr: ExprContainer) -> ExprContainer: - """Applies the rules to the provided expression.""" - assert isinstance(expr, ExprContainer) - if self.is_empty: # nothing to do - return expr - - res = ExprContainer(0, **expr.assumptions) - for term in expr.terms: - # remove the forbidden blocks of tensors - if any(obj.name in self._forbidden_blocks - and obj.space in self._forbidden_blocks[obj.name] - for obj in term.objects): - continue - res += term - return res - - @property - def is_empty(self) -> bool: - return not bool(self._forbidden_blocks) - - def __eq__(self, other: "Rules | Any") -> bool: - if not isinstance(other, Rules): - return False - - empty, other_empty = self.is_empty, other.is_empty - if empty and other_empty: # both are empty - return True - elif empty or other_empty: # only self or other is empty - return False - - # both not empty -> compare forbidden blocks (keys and values) - if self._forbidden_blocks.keys() != other._forbidden_blocks.keys(): - return False - if any(sorted(v) != sorted(other._forbidden_blocks[k]) - for k, v in self._forbidden_blocks.items()): - return False - return True diff --git a/build/lib/adcgen/secular_matrix.py b/build/lib/adcgen/secular_matrix.py deleted file mode 100644 index d374db1..0000000 --- a/build/lib/adcgen/secular_matrix.py +++ /dev/null @@ -1,436 +0,0 @@ -from collections.abc import Sequence -from math import factorial - -from sympy import Add, Expr, Mul, S, sqrt - -from .expression import ExprContainer -from .func import gen_term_orders, wicks, evaluate_deltas -from .groundstate import GroundState -from .indices import ( - repeated_indices, Indices, generic_indices_from_space, n_ov_from_space -) -from .intermediate_states import IntermediateStates -from .misc import Inputerror, cached_member, transform_to_tuple, validate_input -from .operators import Operators -from .rules import Rules -from .simplify import simplify - - -class SecularMatrix: - """ - Constructs expressions for the ADC secular matrix M. - - Parameters - ---------- - isr : IntermediateStates - The intermediate states the secular matrix is represented in. - """ - def __init__(self, isr: IntermediateStates): - assert isinstance(isr, IntermediateStates) - self.isr: IntermediateStates = isr - self.gs: GroundState = isr.gs - self.h: Operators = isr.gs.h - self.indices: Indices = Indices() - - def hamiltonian(self, order: int, subtract_gs: bool - ) -> tuple[Expr, Rules | None]: - """Constructs the n'th-order shifted Hamiltonian operator.""" - if order == 0: - h, rules = self.h.h0 - elif order == 1: - h, rules = self.h.h1 - else: - assert order > 0 - h, rules = S.Zero, None - if subtract_gs: - return Add(h, -self.gs.energy(order)), rules - else: - return h, rules - - @cached_member - def precursor_matrix_block(self, order: int, block: Sequence[str], - indices: Sequence[str], - subtract_gs: bool = True) -> Expr: - """ - Constructs the n'th order contribution to a secular matrix block in - the basis of the precursor states. - - Parameters - ---------- - order : int - The perturbation theoretical order. - block : Sequence[str] - The block of the secular matrix, e.g. "ph,pphh" for the - 1p-1h/2p-2h coupling block. - indices : Sequence[str] - The indices of the matrix block. - subtract_gs : bool, optional - Whether ground state contrubitions should be subtracted - (default: True). - """ - - block = transform_to_tuple(block) - indices = transform_to_tuple(indices) - validate_input(order=order, block=block, indices=indices) - if len(indices) != 2: - raise Inputerror("Precursor matrix requires two index strings.") - - if repeated_indices(indices[0], indices[1]): - raise Inputerror("Found repeating index in bra and ket.") - bra_space, ket_space = block - bra_idx, ket_idx = indices - - orders = gen_term_orders(order=order, term_length=2, min_order=0) - res = S.Zero - # 1) iterate through all combinations of norm_factor*M^# - for (norm_order, matrix_order) in orders: - norm = self.gs.norm_factor(norm_order) - if norm is S.Zero: - continue - # 2) construct M^# for a given norm_factor - # the overall order is split between the norm factor and M^# - orders_M = gen_term_orders( - order=matrix_order, term_length=3, min_order=0 - ) - matrix = S.Zero - for (bra_order, op_order, ket_order) in orders_M: - operator, rules = self.hamiltonian(op_order, subtract_gs) - if operator == 0: - continue - itmd = Mul( - self.isr.precursor(order=bra_order, space=bra_space, - braket='bra', indices=bra_idx), - operator, - self.isr.precursor(order=ket_order, space=ket_space, - braket='ket', indices=ket_idx) - ) - itmd = wicks(itmd, simplify_kronecker_deltas=True, rules=rules) - matrix += itmd - # evaluate_deltas should not be necessary here, because norm only - # contains contracted indices - res += (norm * matrix).expand() - return simplify(ExprContainer(res)).inner - - @cached_member - def isr_matrix_block(self, order: int, block: Sequence[str], - indices: Sequence[str], - subtract_gs: bool = True) -> Expr: - """ - Constructs the n'th order contribution to a secular matrix block in - the basis of the intermediate states. - - Parameters - ---------- - order : int - The perturbation theoretical order. - block : Sequence[str] - The block of the secular matrix, e.g. "ph,pphh" for the - 1p-1h/2p-2h coupling block. - indices : Sequence[str] - The indices of the matrix block. - subtract_gs : bool, optional - Whether ground state contrubitions should be subtracted - (default: True). - """ - - block = transform_to_tuple(block) - indices = transform_to_tuple(indices) - validate_input(order=order, block=block, indices=indices) - if len(indices) != 2: - raise Inputerror("ISR matrix requires 2 index strings.") - - if repeated_indices(indices[0], indices[1]): - raise Inputerror("Found a repeating index in bra and ket.") - bra_space, ket_space = block - bra_idx, ket_idx = indices - - orders = gen_term_orders(order=order, term_length=2, min_order=0) - res = S.Zero - # 1) iterate through all combinations of norm_factor*M - for (norm_order, matrix_order) in orders: - norm = self.gs.norm_factor(norm_order) - if norm is S.Zero: - continue - # 2) construct M for a given norm_factor - # the overall order is split between the norm_factor and M - orders_M = gen_term_orders( - order=matrix_order, term_length=3, min_order=0 - ) - matrix = S.Zero - for (bra_order, op_order, ket_order) in orders_M: - operator, rules = self.hamiltonian(op_order, subtract_gs) - if operator == 0: - continue - itmd = Mul( - self.isr.intermediate_state(order=bra_order, - space=bra_space, - braket='bra', - indices=bra_idx), - operator, - self.isr.intermediate_state(order=ket_order, - space=ket_space, - braket='ket', - indices=ket_idx) - ) - itmd = wicks(itmd, simplify_kronecker_deltas=True, rules=rules) - matrix += itmd - # evaluate deltas should not be necessary here - res += (norm * matrix).expand() - return simplify(ExprContainer(res)).inner - - @cached_member - def mvp_block_order(self, order: int, space: str, block: Sequence[str], - indices: str, subtract_gs: bool = True) -> Expr: - """ - Constructs the n'th-order contribution of a secular matrix block to - the matrix vector product - r_{I} = M_{I,J} Y_(J). - - Parameters - ---------- - order : int - The perturbation theoretical order. - space : str - The excitation space of the result vector of the matrix vector - product, e.g., "ph" if the contribution to the 1p-1h MVP - is constructed. - block : Sequence[str] - The block of the secular matrix, e.g. "ph,pphh" for the - 1p-1h/2p-2h coupling block. - indices : str - The indices of the result vector r of the matrix vector product. - subtract_gs : bool, optional - Whether ground state contrubitions should be subtracted - (default: True). - """ - space_tpl = transform_to_tuple(space) - block = transform_to_tuple(block) - indices_tpl = transform_to_tuple(indices) - validate_input(order=order, space=space_tpl, block=block, - indices=indices_tpl) - if len(indices_tpl) != 1: - raise Inputerror(f"Invalid index input for MVP: {indices}") - space = space_tpl[0] - indices = indices_tpl[0] - del space_tpl, indices_tpl - if space != block[0]: - raise Inputerror(f"The desired MVP space {space} has to match " - f"the bra space of the secular matrix block: " - f"{block}.") - - # generate additional indices for the ket state of the secular matrix - idx: str = "".join( - s.name for s in generic_indices_from_space(block[1]) - ) - - # contruct the secular matrix block - m = self.isr_matrix_block( - order=order, block=block, indices=(indices, idx), - subtract_gs=subtract_gs - ) - - # generate the amplitude vector - y = self.isr.amplitude_vector(indices=idx, lr="right") - - # Lifting index restrictions leads to two prefactors - # p = 1/sqrt(n_o! * n_v!), in order to keep the amplitude vector and - # the resulting mvp vector normalized! - # Note, that n_o and n_v might differ for both amplitudes, leading to - # generally different prefactors p. - # In order to keep both vectors normalized they are each multiplied by - # a factor p, i.e., a factor p is 'hidden' in both vectors. - - # - To keep the equality r = M * Y we also have to multiply the right - # hand side of the equation with p if we multiply r with p - n_ov = n_ov_from_space(space) - prefactor_mvp = S.One / sqrt( - factorial(n_ov["occ"]) * factorial(n_ov["virt"]) - ) - - # - lifting the sum restrictions leads to a prefactor of p ** 2. - # However, p is hidden inside the amplitude vector -> only p present - # in the MVP equations - n_ov = n_ov_from_space(block[1]) - prefactor_ampl = S.One / sqrt( - factorial(n_ov["occ"]) * factorial(n_ov["virt"]) - ) - - return evaluate_deltas( - (prefactor_mvp * prefactor_ampl * m * y).expand() - ) - - @cached_member - def mvp(self, adc_order: int, space: str, indices: str, - order: int | None = None, subtract_gs: bool = True) -> Expr: - """ - Constructs the matrix vector product - r_{I} = sum_{J} M_{I,J} Y_{J} - for a given excitation space considering all available ADC(n) - secular matrix blocks. - - Parameters - ---------- - adc_order : int - The perturbation theoretical order the ADC(n) scheme. - space : str - The excitation space of the result vector of the matrix vector - product, e.g., "ph" for the 1p-1h MVP. - order : int, optional - Only consider contributions of the provided order, e.g., - only the zeroth order contributions of all - ADC(n) secular matrix blocks that contribute to the desired - MVP (default: None). - subtract_gs : bool, optional - If set, ground state contributions are subtracted (default: True). - """ - # validate the input parameters - space_tpl = transform_to_tuple(space) - indices_tpl = transform_to_tuple(indices) - validate_input( - adc_order=adc_order, space=space_tpl, indices=indices_tpl - ) - if order is not None: - validate_input(order=order) - if len(indices_tpl) != 1: - raise Inputerror(f"Invalid indices for MVP: {indices}") - space, indices = space_tpl[0], indices_tpl[0] - del space_tpl, indices_tpl - # check that the space is valid for the current adc variant - if not self.isr.validate_space(space): - raise Inputerror(f"The space {space} is not valid for the given " - f"adc variant {self.isr.variant}.") - # and that the space is present at the desired adc_order - if space not in self.max_ptorder_spaces(adc_order): - raise Inputerror(f"The space {space} is not present in " - f"{self.isr.variant}-ADC({adc_order})") - - # add up all blocks that contribute to the given mvp - mvp = S.Zero - for block, max_order in self.block_order(adc_order).items(): - if space != block[0] or (order is not None and max_order < order): - continue - if order is None: # compute all contributions of the block - for o in range(max_order + 1): - mvp += self.mvp_block_order( - order=o, space=space, block=block, indices=indices, - subtract_gs=subtract_gs - ) - else: # only compute contributions of the specified order - mvp += self.mvp_block_order( - order=order, space=space, block=block, indices=indices, - subtract_gs=subtract_gs - ) - assert isinstance(mvp, Expr) - return mvp - - @cached_member - def expectation_value_block_order(self, order: int, - block: Sequence[str], - subtract_gs: bool = True) -> Expr: - """ - Constructs the n'th-order contribution of a secular matrix block - to the energy expectation value. - - Parameters - ---------- - order : int - The perturbation theoretical order. - block : Sequence[str] - The block of the secular matrix. - subtract_gs : bool, optional - If set, ground state contributions are subtracted (default: True). - """ - block = transform_to_tuple(block) - validate_input(order=order, block=block) - - # generate indices for the mvp - mvp_idx: str = "".join( - s.name for s in generic_indices_from_space(block[0]) - ) - # compute the MVP - mvp = self.mvp_block_order( - order, space=block[0], block=block, indices=mvp_idx, - subtract_gs=subtract_gs - ) - # generate the left amplitude vector - left = self.isr.amplitude_vector(mvp_idx, lr='left') - # call simplify -> symmetry of left amplitude vector might reduce - # the number of terms - # prefactors: I think there is no need for any further prefactors - # E = 1/sqrt(l) * 1/sqrt(r) sum_I,J X_I M_I,J Y_J - # -> already included in the mvp function - return simplify(ExprContainer(Mul(left, mvp))).inner - - @cached_member - def expectation_value(self, adc_order: int, order: int | None = None, - subtract_gs: bool = True) -> Expr: - """ - Constructs the ADC(n) energy expectation value considering all - available secular matrix blocks. - - Parameters - ---------- - adc_order : int - The perturbation theoretical order of the ADC(n) scheme. - order : int, optional - Only consider contributions of the provided order, e.g., - only the zeroth order contributions of all - ADC(n) secular matrix (default: None). - subtract_gs : bool, optional - If set, ground state contributions are subtracted (default: True). - """ - expec = S.Zero - for block, max_order in self.block_order(adc_order).items(): - # is the mvp expanded through the desired order? - # e.g. ADC(4) S -> 4 // D -> 3 // T -> 2 - if order is not None and max_order < order: - continue - if order is None: # compute all contributions of the block - for o in range(max_order + 1): - expec += self.expectation_value_block_order( - order=o, block=block, subtract_gs=subtract_gs - ) - else: # only compute contibutions of the specified order - expec += self.expectation_value_block_order( - order=order, block=block, subtract_gs=subtract_gs - ) - # it should not be possible to simplify any further here, because left - # and right amplitude vector have different names - assert isinstance(expec, Expr) - return expec - - def max_ptorder_spaces(self, order: int) -> dict[str, int]: - """ - Returns the maximum perturbation theoretical order of all excitation - spaces in the ADC(n) matrix. - """ - - space = self.isr.min_space[0] - ret: dict[str, int] = {space: order} - for i in range(1, order//2 + 1): - space = f"p{space}h" - ret[space] = order - i - return ret - - def block_order(self, order: int) -> dict[tuple[str, str], int]: - """ - Returns the perturbation theoretical orders through which all blocks - are expanded in the ADC(n) secular matrix. - """ - from itertools import product - - max_orders = self.max_ptorder_spaces(order) - spaces = sorted(max_orders, key=lambda sp: len(sp)) - min_space = self.isr.min_space[0] - ret: dict[tuple[str, str], int] = {} - for block in product(spaces, spaces): - s1, s2 = block - # diagonal - if s1 == s2: - ret[block] = order - (len(s1) - len(min_space)) - # off diagonal - else: - dif = abs(len(s1) - len(s2)) // 2 - diag = order - (len(min(block)) - len(min_space)) - ret[block] = diag - dif - return ret diff --git a/build/lib/adcgen/simplify.py b/build/lib/adcgen/simplify.py deleted file mode 100644 index 9196858..0000000 --- a/build/lib/adcgen/simplify.py +++ /dev/null @@ -1,765 +0,0 @@ -from collections.abc import Sequence -from collections import Counter, defaultdict -import itertools - -from sympy import Add, Expr, Rational, Pow, S, sqrt - -from . import func -from .expression import ExprContainer, TermContainer, ObjectContainer -from .indices import ( - get_symbols, order_substitutions, Index, get_lowest_avail_indices, - minimize_tensor_indices, _is_index_tuple -) -from .misc import Inputerror -from .sympy_objects import ( - KroneckerDelta, Amplitude, AntiSymmetricTensor, NonSymmetricTensor -) -from .tensor_names import is_adc_amplitude, is_t_amplitude - - -def filter_tensor(expr: ExprContainer, t_strings: Sequence[str], - strict: str = 'low', - ignore_amplitudes: bool = True) -> ExprContainer: - """ - Filter an expression keeping only terms that contain the desired tensors. - - Parameters - ---------- - t_strings : Sequence[str] - List containing the desired tensor names. - struct : str, optional - 3 possible options: - - 'high': return all terms that ONLY contain the desired tensors the - requested amount of times, e.g., ['V', 'V'] returns only - terms that contain not other tensors than 'V*V' - Setting ignore_amplitudes, ignores all not requested - t and ADC ampltiudes amplitudes. - - 'medium': return all terms that contain the desired tensors the - requested amount, but other tensors may additionally be - present in the term. E.g. ['V', 'V'] also returns terms - that contain 'V*V*x', where x may be any amount of - arbitrary other tensors. - - 'low': return all terms that contain all of the requested tensors, - e.g., ['V', 'V'] returns all terms that contain 'V' at least - once. - - Returns - Expr - The filtered expression. - """ - - def check_term(term: TermContainer) -> bool: - available = [] - for obj in term.objects: - name = obj.name - if name is None: - continue - exp = obj.exponent - assert exp.is_Integer - available.extend(name for _ in range(int(exp))) - # True if all requested tensors are in the term - if strict == 'low': - return all(t in available for t in set(t_strings)) - # True if all requested Tensors occur the correct amount of times - elif strict == 'medium': - available = Counter(available) - desired = Counter(t_strings) - return desired.items() <= available.items() - # True if only the requested Tensors are in the term in the correct - # amount - elif strict == 'high': - if ignore_amplitudes: - requested_amplitudes = [ - name for name in t_strings - if is_adc_amplitude(name) or is_t_amplitude(name) - ] - ignored_amplitudes = { - name for name in available if - (is_adc_amplitude(name) or is_t_amplitude(name)) - and name not in requested_amplitudes - } - available = Counter([t for t in available - if t not in ignored_amplitudes]) - else: - available = Counter(available) - desired = Counter(t_strings) - return desired == available - raise ValueError(f"invalid value for strict {strict}") - - if not all(isinstance(t, str) for t in t_strings): - raise Inputerror("Tensor names need to be provided as str.") - if strict not in ['low', 'medium', 'high']: - raise Inputerror(f"{strict} is not a valid option for strict. Valid" - "options are 'low', 'medium' or 'high'.") - assert isinstance(expr, ExprContainer) - - expr = expr.expand() - filtered = Add(*( - term.inner for term in expr.terms if check_term(term) - )) - return ExprContainer(filtered, **expr.assumptions) - - -def find_compatible_terms(terms: Sequence[TermContainer] - ) -> dict[int, dict[int, list[tuple[Index, Index]]]]: - """ - Determines the substitutions of contracted needed to map terms onto each - other. - - Parameters - ---------- - terms: Sequence[Term] - The list of terms to compare and map onto each other. - - Returns - ------- - dict - Nested dictionary containing the indices of terms and the substitution - dict to map the terms onto each other, e.g., the substitutions to - map term j onto term i are stored as - {i: {j: substitutions}}. - If it was not possible to find a match for term_i, the inner dictionary - will be empty {i: {}}. - """ - - def compare_terms( - pattern: dict[tuple[str, str], dict[Index, list[str]]], - other_pattern: dict[tuple[str, str], dict[Index, list[str]]], - target: tuple[Index, ...], term: TermContainer, - other_term: TermContainer) -> None | list[tuple[Index, Index]]: - # function to compare two terms that are compatible, i.e., have the - # same amount of indices in each space, the same amount and type of - # objects and the same target indices - sub_list: list[dict[Index, Index]] = [] - for ov, idx_pattern in pattern.items(): - # only compare indices that belong to the same space - other_idx_pattern = other_pattern.get(ov, None) - # the other space is not available in the other term - # -> they cant match - if other_idx_pattern is None: - return None - # list to hold the substitution dictionaries of the current space - ov_sub_list: list[dict[Index, Index]] = [] - - for idx, pat in idx_pattern.items(): - # find all possible matches for the current idx - # if its a target idx -> only allow mapping on other target idx - is_target = idx in target - # list to collect all possible matches - matching_idx: list[Index] = [] - for other_idx, other_pat in other_idx_pattern.items(): - other_is_target = other_idx in target - # only 1 index is a target index -> cant map - # or both are different target indices - # -> cant map because we cant substitute target indices - if is_target != other_is_target or \ - (is_target and other_is_target and - idx is not other_idx): - continue - # the pattern of both indices is identical - # -> possible match - if pat == other_pat: - matching_idx.append(other_idx) - # could not find a match for idx -> no need to check further - if not matching_idx: - break - - if not ov_sub_list: # initialize the subdicts - ov_sub_list.extend({s: idx} for s in matching_idx) - else: # already initialized -> add when possible - new_ov_sub_list: list[dict[Index, Index]] = [] - for sub, other_idx in \ - itertools.product(ov_sub_list, matching_idx): - # other_idx is already mapped onto another idx - if other_idx in sub: - continue - # copy the sub_dict to avoid inplace modification - extended_sub = sub.copy() - extended_sub[other_idx] = idx - new_ov_sub_list.append(extended_sub) - ov_sub_list = new_ov_sub_list - if not ov_sub_list: # did not find any valid combination - # will not be able to construct complete sub dicts - # say we matched idx1 to some indices and then obtain - # no valid sub dicts after matching idx2 - # -> can only obtain sub dicts that do not contain idx1 - # and idx2 -> they can not be valid - # -> terms can not match! - return None - # Done with comparing the indices of a space - # -> check the result and create total substitution dicts - - # remove incomplete sub lists - # This might not be necessary anymore - ov_sub_list = [ - sub for sub in ov_sub_list - if sub.keys() == other_idx_pattern.keys() - ] - - if not ov_sub_list: # did not find a single complete sub dict - return None - - # initialize the final substitution dicts - if not sub_list: - sub_list.extend(ov_sub_list) - else: # combine the sub dicts. different spaces can not overlap - sub_list = [ - other_sp_sub | sub for other_sp_sub, sub in - itertools.product(sub_list, ov_sub_list) - ] - - # test all sub dicts to identify the correct one (if one exists) - for sub in sub_list: - sub = order_substitutions(sub) - sub_other_term = other_term.inner.subs(sub) - assert isinstance(sub_other_term, Expr) - # sub is not valid for other term: evaluates to 0 due to - # some antisymmetry e.g. t_ijcd -> t_ijcc = 0 - if sub_other_term is S.Zero and other_term.inner is not S.Zero: - continue - # diff (or sum) is a single term (no Add obj) - # can either sum up to 0 or to a single term with a different pref - # -> check for type of result and not for result value - if not isinstance(Add(term.inner, -sub_other_term), Add): - return sub - return None # no valid sub dict -> return None - - def repeating_idx_sp(idx_list: list[tuple[str, set[Index], set[Index]]]): - repeating_idx = [] - for idx1, idx2 in itertools.combinations(idx_list, 2): - descr1, descr2 = idx1[0], idx2[0] - for i1, i2 in itertools.product(idx1[1:], idx2[1:]): - repeated = i1 & i2 - if len(repeated) > 1: - repeated = "".join(sorted( - s.space[0] + s.spin for s in repeated - )) - repeating_idx.append((repeated, *sorted([descr1, descr2]))) - return tuple(sorted(repeating_idx)) - - if not all(isinstance(term, TermContainer) for term in terms): - raise Inputerror("Expected terms as a list of term Containers.") - - # prefilter terms according to - # - number of objects, excluding prefactor - # - type, name, space, spin, obj target indices and exponent of objects - # - the space of repeating indices subsets (2, 3, ...) that repeat on - # on multiple objects together in a common index subspace (upper/lower) - # - number of indices in each space - # - the target indices - filtered_terms: defaultdict[tuple, list[int]] = defaultdict(list) - term_pattern: list[dict[tuple[str, str], dict[Index, list[str]]]] = [] - term_target: list[tuple[Index, ...]] = [] - for term_i, term in enumerate(terms): - # target indices - target = term.target - term_target.append(target) - # pattern - pattern = term.pattern() - term_pattern.append(pattern) - # obj name, space, exponent, obj_target_indices, repeating_indices - descriptions: list[str] = [] - tensor_idx_list: list[tuple[str, set[Index], set[Index]]] = [] - length = 0 - for o in term.objects: - base = o.base - if (descr := o.description()) == 'prefactor': - continue - elif isinstance(base, AntiSymmetricTensor): - upper, lower = base.upper, base.lower - assert _is_index_tuple(upper) and _is_index_tuple(lower) - tensor_idx_list.append( - (descr, set(upper), set(lower)) - ) - elif isinstance(base, (KroneckerDelta, NonSymmetricTensor)): - tensor_idx_list.append((descr, set(o.idx), set())) - length += 1 - descriptions.append(descr) - pattern_key = tuple(sorted( - (sp, len(idx_pat)) for sp, idx_pat in pattern.items() - )) - key = (length, tuple(sorted(descriptions)), - repeating_idx_sp(tensor_idx_list), pattern_key, target) - filtered_terms[key].append(term_i) - - compatible_terms: dict[int, dict[int, list[tuple[Index, Index]]]] = {} - for term_idx_list in filtered_terms.values(): - # set to keep track of the already mapped terms - matched: set[int] = set() - for i, term_i in enumerate(term_idx_list): - if term_i in matched: # term already mapped - continue - - compatible_terms[term_i] = {} - - # data of the current term - term = terms[term_i] - target = term_target[term_i] - pattern = term_pattern[term_i] - - for other_i in range(i+1, len(term_idx_list)): - other_term_i = term_idx_list[other_i] - if other_term_i in matched: # term already mapped - continue - - sub = compare_terms( - pattern, term_pattern[other_term_i], - target, term, terms[other_term_i] - ) - # was possible to map the terms onto each other! - if sub is not None: - compatible_terms[term_i][other_term_i] = sub - matched.add(other_term_i) - return compatible_terms - - -def simplify(expr: ExprContainer) -> ExprContainer: - """ - Simplify an expression by permuting contracted indices. Thereby, terms - are mapped onto each other reducing the number of terms. - Currently this does not work for denominators of the form (a + b + ...). - However, this restriction can often be bypassed by using symbolic, - denominators, i.e., using a tensor of the correct symmetry to represent the - denominator. Alternatively, the functions found in 'reduce_expr' are - capable to handle orbital energy denominators. - - Parameters - ---------- - expr : ExprContainer - The expression to simplify - - Returns - ------- - ExprContainer - The simplified expression. - """ - assert isinstance(expr, ExprContainer) - expr = expr.expand() - if len(expr) == 1: # trivial: only a single term - return expr - # create terms and try to find comaptible terms that may be - # simplified by substituting indices - terms = expr.terms - equal_terms = find_compatible_terms(terms) - # substitue the indices in other_n and keep n as is - res = ExprContainer(0, **expr.assumptions) - for n, matches in equal_terms.items(): - res += terms[n] - for other_n, sub in matches.items(): - res += terms[other_n].subs(sub) - return res - - -def simplify_unitary(expr: ExprContainer, t_name: str, - evaluate_deltas: bool = False) -> ExprContainer: - """ - Simplifies an expression that contains unitary tensors by exploiting - U_pq * U_pr * Remainder = delta_qr * Remainder, - where the Remainder does not contain the index p. - - Parameters - ---------- - expr : Expr - The expression to simplify. - t_name : str - Name of the unitary tensor. - evaluate_deltas: bool, optional - If this is set, the generated KroneckerDeltas will be evaluated - before returning. - - Returns - ------- - Expr - The simplified expression. - """ - - def simplify_term_unitary(term: TermContainer) -> TermContainer: - objects = term.objects - # collect the indices of all unitary tensors in the term - unitary_tensors: list[int] = [] - for i, obj in enumerate(objects): - if obj.name == t_name: - exp = obj.exponent - assert exp.is_Integer - unitary_tensors.extend(i for _ in range(int(exp))) - - # only implemented for 2 dimensional unitary tensors - if any(len(objects[i].idx) != 2 for i in unitary_tensors): - raise NotImplementedError("Did only implement the case of 2D " - f"unitary tensors. Found {t_name} in " - f"{term}") - - # TODO: if we have a AntiSymmetricTensor as unitary tensor - # -> what kind of bra ket symmetry is possible? - # throw an error if it is set to +-1? - - # need at least 2 unitary tensors - if len(unitary_tensors) < 2: - return term - - # find the target indices - target = term.target - idx_counter = Counter(term.idx) - - # iterate over all pairs and look for matching contracted indices - # that do only occur on the two unitary tensors we want to simplify - for (i1, i2) in itertools.combinations(unitary_tensors, 2): - idx1 = objects[i1].idx - idx2 = objects[i2].idx - # U_pq U_pr = delta_qr - if idx1[0] == idx2[0] and idx1[0] not in target and \ - idx_counter[idx1[0]] == 2: - delta = KroneckerDelta(idx1[1], idx2[1]) - # U_qp U_rp = delta_qr - elif idx1[1] == idx2[1] and idx1[1] not in target and \ - idx_counter[idx1[1]] == 2: - delta = KroneckerDelta(idx1[0], idx2[0]) - else: # no matching indices - continue - - # lower the exponent of the 2 unitary tensors and - # add the created delta to the term - new_term = ExprContainer(delta, **term.assumptions) - if i1 == i2: - base, exponent = objects[i1].base_and_exponent - assert exponent.is_Integer - new_term *= Pow(base, int(exponent) - 2) - else: - b1, exponent1 = objects[i1].base_and_exponent - b2, exponent2 = objects[i2].base_and_exponent - assert exponent1.is_Integer and exponent2.is_Integer - new_term *= Pow(b1, int(exponent1) - 1) - new_term *= Pow(b2, int(exponent2) - 1) - - # add remaining objects - for i, o in enumerate(objects): - if i == i1 or i == i2: - continue - else: - new_term *= o - return simplify_term_unitary(new_term.terms[0]) - # could not find simplification -> return - return term - - assert isinstance(expr, ExprContainer) - - res = ExprContainer(0, **expr.assumptions) - for term in expr.terms: - res += simplify_term_unitary(term) - - # evaluate the generated deltas if requested - if evaluate_deltas: - res = ExprContainer(func.evaluate_deltas(res.inner), **res.assumptions) - return res - - -def remove_tensor(expr: ExprContainer, t_name: str - ) -> dict[tuple[str, ...], ExprContainer]: - """ - Removes a tensor from each term of an expression by undoing the contraction - of the remaining term with the tensor. The resulting expression is split - according to the blocks of the removed tensor. Note that only canonical - tensor blocks are considered, because the non-canonical blocks can be - generated from the canonical ones, e.g., removing a symmetric matrix d_{pq} - from an expression can only result in expressions for the 'oo', 'ov' and - 'vv' blocks, since d_{ai} = d_{ia}. - The symmetry of the removed tensor is taken into account, such that the - original expression can be restored if all block expressions are - contracted with the corresponding tensor blocks again. - Note that for ADC-Amplitudes a special prefactor is used. - - Parameters - ---------- - expr : ExprContainer - The expression where the tensor should be removed. - t_name : str - Name of the tensor that should be removed. - - Returns - ------- - dict[tuple[str, ...], ExprContainer] - key: Tuple of removed tensor blocks - value: Part of the original expression that contained the corresponding - blocks. If contracted with the tensor blocks again, a part of - the original expression is recovered. - """ - - def remove(term: TermContainer, tensor: ObjectContainer, - target_indices: dict[tuple[str, str], set[str]] - ) -> ExprContainer: - # - get the tensor indices - indices: Sequence[Index] = list(tensor.idx) - # - split the indices that are in the remaining term according - # to their space and spin to gather information about used indices - used_indices: dict[tuple[str, str], set[str]] = {} - for s in set(s for s, _ in term._idx_counter): - if (idx_key := s.space_and_spin) not in used_indices: - used_indices[idx_key] = set() - used_indices[idx_key].add(s.name) - # - check if the tensor is holding target indices. - # have to introduce a KroneckerDelta for each target index to avoid - # loosing indices in the term and replace the target indices on the - # tensor by new, unused indices: - # f_bc * Y^ac_ij -> delta_ik * delta_jl * delta_ad * f_bc * Y^dc_kl - - # get all target indices on the tensor, split according to their space - # and spin - tensor_target_indices: dict[tuple[str, str], list[Index]] = {} - for s in indices: - idx_key = s.space_and_spin - if s.name in target_indices.get(idx_key, []): - if idx_key not in tensor_target_indices: - tensor_target_indices[idx_key] = [] - if s not in tensor_target_indices[idx_key]: - tensor_target_indices[idx_key].append(s) - # - add the tensor indices to the term_indices to collect all - # not available indices - for s in indices: - if (idx_key := s.space_and_spin) not in used_indices: - used_indices[idx_key] = set() - used_indices[idx_key].add(s.name) - - if tensor_target_indices: - term_with_deltas = ExprContainer(term.inner, **term.assumptions) - for idx_key, idx_list in tensor_target_indices.items(): - if idx_key not in used_indices: - used_indices[idx_key] = set() - space, spin = idx_key - additional_indices = get_lowest_avail_indices( - len(idx_list), used_indices[idx_key], space - ) - # add the new indices to the unavailable indices - used_indices[idx_key].update(additional_indices) - # transform them from string to Dummies - if spin: - spins = spin * len(idx_list) - else: - spins = None - additional_indices = get_symbols(additional_indices, spins) - - sub = { - s: new_s for s, new_s in zip(idx_list, additional_indices) - } - # create a delta for each index and attach to the term - # and replace the index in tensor indices - for s, new_s in sub.items(): - term_with_deltas *= KroneckerDelta(s, new_s) - indices = [sub.get(s, s) for s in indices] - assert len(term_with_deltas) == 1 - term = term_with_deltas.terms[0] - del term_with_deltas - # - check for repeating indices: - # introduce a delta in the term for each repeating index - # e.g. d_iiij -> d_iklj // term <- delta_ik * delta_il - # Problem: this might introduce unstable deltas... - repeating_indices = {} - for s, n in Counter(indices).items(): - if n > 1: - if (idx_key := s.space_and_spin) not in repeating_indices: - repeating_indices[idx_key] = [] - repeating_indices[idx_key].extend(s for _ in range(n-1)) - if repeating_indices: - indices_i: dict[Index, list[int]] = {} - for i, s in enumerate(indices): - if s not in indices_i: - indices_i[s] = [] - indices_i[s].append(i) - # - iterate through the repeating indices and generate a new - # index for each repeating index. Use the repeating and the - # new index to create a KroneckerDelta. On AntiSymmetricTensors - # indices can at most twice, once in upper and once in lower. - # On NonSymmetricTensors no such limit exists -> implement for - # an arbitrary amount of repetitions - term_without_repeating = ExprContainer( - term.inner, **term.assumptions - ) - for idx_key, idx_list in repeating_indices.items(): - space, spin = idx_key - additional_indices = get_lowest_avail_indices( - len(idx_list), used_indices.get(idx_key, []), space - ) - if spin: - spins = spin * len(idx_list) - else: - spins = None - additional_indices = get_symbols(additional_indices, spins) - for s, new_s in zip(idx_list, additional_indices): - term_without_repeating *= KroneckerDelta(s, new_s) - # substitute the second occurence of s in tensor indices - indices[indices_i[s].pop(1)] = new_s - # no repeating indices left - assert max(Counter(indices).values()) == 1 - assert len(term_without_repeating) == 1 - term = term_without_repeating.terms[0] - del term_without_repeating - # - minimize the tensor indices by permuting contracted indices. - # Ensure indices occur in ascending order: kijab -> ijkab. - # target indices are excluded from this procedure: - # with target indices i, a: kijab -> jikab - indices, perms = minimize_tensor_indices(indices, target_indices) - # - apply the index permuations for minimizig the indices - # also to the term - res_term = term.permute(*perms) - del term - assert res_term.inner is not S.Zero - # - build a new tensor that holds the minimized indices - # further minimization might be possible taking the tensor - # symmetry into account, because we did not touch target indices: - # jikab -> d^jik_ab = - d^ijk_ab - raw_tensor = tensor.inner - if isinstance(raw_tensor, AntiSymmetricTensor): - bra_ket_sym = raw_tensor.bra_ket_sym - if isinstance(raw_tensor, Amplitude): # indices = lower, upper - n_l = len(raw_tensor.lower) - upper, lower = indices[n_l:], indices[:n_l] - else: # symtensor / antisymtensor, indices = upper, lower - n_u = len(raw_tensor.upper) - upper, lower = indices[:n_u], indices[n_u:] - res_tensor = ExprContainer(raw_tensor.__class__( - raw_tensor.name, upper, lower, bra_ket_sym - )).terms[0] - elif isinstance(raw_tensor, NonSymmetricTensor): - bra_ket_sym = None - res_tensor = ExprContainer( - NonSymmetricTensor(raw_tensor.name, indices) - ).terms[0] - else: - raise TypeError(f"Unknown tensor type {type(tensor.inner)}") - del raw_tensor - del tensor - # if we got a -1 -> move to the term - res_term *= res_tensor.prefactor - assert isinstance(res_term, ExprContainer) - # PREFACTOR: - # - For a contraction d^ij_ab we obtain an additional prefactor of 1/4 - # in the term, for d^ij_ka it is 1/2, or 1/4 for d^ij_kl - # -> it depends on the symmetry of the tensor we want to remove - # factor = n_perms + 1 - # -> need to remove it from the term: multiply by the term by the - # inverse factor (4 for d^ij_ab) - # - Additionally we need to ensure that the resulting expression - # preserves symmetry that was included in the input expression - # through the tensor we want to remove - # -> apply the tensor symmetry to the term - # d^ij_ab * X -> 1/4 (X - P_ij X - P_ab X + P_ij P_ab X) - # -> this leads to another factor of 1/(n_perms + 1) - # - For usual tensors both factors cancel each other exactly: - # (n_perms + 1) / (n_perms + 1) = 1 - # -> don't change the prefactor and just symmetrize the term - # - If the tensor has additionaly bra ket symmetry: - # swapping bra and ket will either result in an identical - # tensor block (diagonal block) - # or will give a non canonical block which is folded into - # the canonical block we are treating currently - # - diagonal block: multiply the term by 1/2 to keep the result - # normalized... we will get twice as many terms from applying - # the tensor symmetry as without bra ket symmetry - # -> the factor from lifting the index restrictions remains - # constant, while the factor for the symmetrisation is - # multiplied by 2: - # (n_perms + 1) / [2 (n_perms + 1)] = 1/2 - # - non-diagonal block: bra ket swap gives a non canonical block - # which can be folded into the canonical block: - # f_ia + f_ai = 2 f_ia - # However we only want to treat canonical tensor blocks. - # Therefore, we need to "remove" the contributions from the - # non-canonical blocks by multiplying with 1/2 - # -> if we have bra ket symmetry introduce a factor 1/2 - if bra_ket_sym is not None and bra_ket_sym is not S.Zero: - res_term *= Rational(1, 2) - assert isinstance(res_term, ExprContainer) - # - For ADC amplitudes we only have to multiply the term by - # sqrt(n_perms + 1), because the other part of the factor - # is hidden inside the amplitude vector to keep the vector - # norm constant when lifting index restrictions - # -> we obtain an overall factor of - # sqrt(n_perms + 1) / (n_perms + 1) = 1 / sqrt(n_perms + 1) - tensor_sym = res_tensor.symmetry() - if is_adc_amplitude(t_name): # are we removing an ADC amplitude? - if bra_ket_sym is not S.Zero: - raise ValueError("ADC amplitude vectors should have " - "no bra ket symmetry.") - res_term *= S.One / sqrt(len(tensor_sym) + 1) - assert isinstance(res_term, ExprContainer) - # - add the tensor indices to the target indices of the term - # but only if it is not possible to determine them with the einstein - # sum convention -> only if target indices have been set manually - if res_term.provided_target_idx is not None: - res_term.set_target_idx(res_term.provided_target_idx + indices) - # - apply the symmetry of the removed tensor to the term - symmetrized_term = res_term.copy() - for perms, sym_factor in tensor_sym.items(): - symmetrized_term += res_term.copy().permute(*perms) * sym_factor - # - reduce the number of terms as much as possible - return simplify(symmetrized_term) - - def process_term(term: TermContainer, t_name: str - ) -> dict[tuple[str, ...], ExprContainer | TermContainer]: - # print(f"\nProcessing term {term}") - # collect all occurences of the desired tensor - tensors: list[ObjectContainer] = [] - remaining_term = ExprContainer(1, **term.assumptions) - for obj in term.objects: - if obj.name == t_name: - tensors.append(obj) # we take care of the exponent later! - else: - remaining_term *= obj - if not tensors: # could not find the tensor - return {("none",): term} - # extract all the target indices and split according to their space - target_indices: dict[tuple[str, str], set[str]] = {} - for s in term.target: - if (idx_key := s.space_and_spin) not in target_indices: - target_indices[idx_key] = set() - target_indices[idx_key].add(s.name) - # remove the first occurence of the tensor - # and add all the remaining occurences back to the term - for remaining_t in tensors[1:]: - remaining_term *= remaining_t - # the tensor might have an exponent that we need to take care of! - tensor = tensors[0] - exponent = tensor.exponent - # I am not 100% sure atm how to remove tensors with exponents != 1 - # so wait for an actual example to come up and implement it then. - if exponent != 1: - raise NotImplementedError("Did not implement the case of removing " - f"tensors with exponents != 1: {t_name} " - f"in {term}") - assert len(remaining_term) == 1 - remaining_term = remove( - remaining_term.terms[0], tensor, target_indices - ) - # determine the space/block of the removed tensor - # used as key in the returned dict - spin = tensor.spin - if all(c == "n" for c in spin): - t_block = [tensor.space] - else: - t_block = [f"{tensor.space}_{spin}"] - # print(t_block, remaining_term) - if len(tensors) == 1: # only a single occurence no need to recurse - return {tuple(t_block): remaining_term} - else: # more than one occurence of the tensor - # iterate through the terms that already have the first occurence - # removed and recurse for each term - ret = {} - for t in remaining_term.terms: - # add the blocks to the already removed block - contribution = process_term(t, t_name) - for blocks, contrib in contribution.items(): - key = tuple(sorted(t_block + list(blocks))) - if key not in ret: - ret[key] = 0 - ret[key] += contrib - return ret - - assert isinstance(expr, ExprContainer) - assert isinstance(t_name, str) - # expr sorted by tensor block - ret: dict[tuple[str, ...], ExprContainer] = {} - for term in expr.terms: - for key, contrib in process_term(term, t_name).items(): - if key not in ret: - ret[key] = ExprContainer(0, **contrib.assumptions) - ret[key] += contrib - return ret diff --git a/build/lib/adcgen/sort_expr.py b/build/lib/adcgen/sort_expr.py deleted file mode 100644 index 2be1992..0000000 --- a/build/lib/adcgen/sort_expr.py +++ /dev/null @@ -1,382 +0,0 @@ -from collections import defaultdict -import itertools - -from sympy import Add, S - -from .eri_orbenergy import EriOrbenergy -from .expression import ExprContainer, TermContainer -from .indices import get_symbols, sort_idx_canonical -from .misc import Inputerror -from .simplify import simplify -from .symmetry import Permutation -from .sympy_objects import AntiSymmetricTensor, KroneckerDelta, SymmetricTensor - - -def by_delta_types(expr: ExprContainer - ) -> dict[tuple[str, ...], ExprContainer]: - """Sort the terms in an expression according to their space and spin.""" - assert isinstance(expr, ExprContainer) - expr = expr.expand() - ret: dict[tuple[str, ...], ExprContainer] = {} - for term in expr.terms: - d_blocks = [] - for delta in term.objects: - if not isinstance(delta.base, KroneckerDelta): - continue - spin = delta.spin - if all(c == "n" for c in spin): # no indices with spin - block = delta.space - else: - block = f"{delta.space}_{spin}" - exp = delta.exponent - assert exp.is_Integer - d_blocks.extend(block for _ in range(int(exp))) - d_blocks = tuple(sorted(d_blocks)) - if not d_blocks: - d_blocks = ('none',) - if d_blocks not in ret: - ret[d_blocks] = ExprContainer(0, **term.assumptions) - ret[d_blocks] += term - return ret - - -def by_delta_indices(expr: ExprContainer - ) -> dict[tuple[str, ...], ExprContainer]: - """ - Sort the terms in an expression according to the names and spin of indices - on the KroneckerDeltas in each term. - """ - assert isinstance(expr, ExprContainer) - expr = expr.expand() - ret: dict[tuple[str, ...], ExprContainer] = {} - for term in expr.terms: - d_idx = tuple(sorted( - "".join(str(s) for s in o.idx) for o in term.objects - if isinstance(o.base, KroneckerDelta) - for _ in range(int(o.exponent)) - )) - if not d_idx: - d_idx = ('none',) - if d_idx not in ret: - ret[d_idx] = ExprContainer(0, **term.assumptions) - ret[d_idx] += term - return ret - - -def by_tensor_block(expr: ExprContainer, t_name: str - ) -> dict[tuple[str, ...], ExprContainer]: - """ - Sort the terms in an expression according to the blocks of a tensor. - """ - assert isinstance(t_name, str) - assert isinstance(expr, ExprContainer) - expr = expr.expand() - ret: dict[tuple[str, ...], ExprContainer] = {} - for term in expr.terms: - t_blocks = [] - for tensor in term.objects: - if tensor.name != t_name: - continue - spin = tensor.spin - if all(c == "n" for c in spin): - block = tensor.space - else: - block = f"{tensor.space}_{spin}" - exp = tensor.exponent - assert exp.is_Integer - t_blocks.extend(block for _ in range(int(exp))) - t_blocks = tuple(sorted(t_blocks)) - if not t_blocks: - t_blocks = ("none",) - if t_blocks not in ret: - ret[t_blocks] = ExprContainer(0, **term.assumptions) - ret[t_blocks] += term - return ret - - -def by_tensor_target_block(expr: ExprContainer, t_name: str - ) -> dict[tuple[str, ...], ExprContainer]: - """ - Sort the terms in an expression according to the type of target indices on - the specified tensor, e.g. f_cc Y_ij^ac, where i, j and a are target - indices: - -> if sorting according to the indices on Y: (oov,); - if sorting acording to the indices on f: (none,). - """ - assert isinstance(t_name, str) - assert isinstance(expr, ExprContainer) - expr = expr.expand() - ret: dict[tuple[str, ...], ExprContainer] = {} - for term in expr.terms: - key = [] - target = term.target - for tensor in term.objects: - if tensor.name == t_name: - # indices are in canonical order - tensor_target = [s for s in tensor.idx if s in target] - if not tensor_target: # no target indices on the tensor - key.append("none") - continue - tensor_target_block = "".join( - s.space[0] for s in tensor_target - ) - if any(s.spin for s in tensor_target): # spin is defined - spin = "".join( - s.spin if s.spin else "n" for s in tensor_target - ) - tensor_target_block += f"_{spin}" - key.append(tensor_target_block) - key = tuple(sorted(key)) # in case of multiple occurences - if not key: # did not find a single occurence of the tensor - key = (f'no_{t_name}',) - if key not in ret: - ret[key] = ExprContainer(0, **term.assumptions) - ret[key] += term - return ret - - -def by_tensor_target_indices(expr: ExprContainer, t_name: str - ) -> dict[tuple[str, ...], ExprContainer]: - """ - Sort the terms in an expression according to the names of target indices on - the specified tensor. - """ - assert isinstance(t_name, str) - assert isinstance(expr, ExprContainer) - expr = expr.expand() - ret: dict[tuple[str, ...], ExprContainer] = {} - for term in expr.terms: - key = [] - target = term.target - for obj in term.objects: - if obj.name == t_name: - # indices are in canonical order - obj_target_idx = "".join( - [s.name for s in obj.idx if s in target] - ) - if not obj_target_idx: - obj_target_idx = "none" - key.append(obj_target_idx) - key = tuple(sorted(key)) # in case the tensor occurs more than once - if not key: # tensor did not occur in the term - key = (f"no_{t_name}",) - if key not in ret: - ret[key] = ExprContainer(0, **term.assumptions) - ret[key] += term - return ret - - -def exploit_perm_sym( - expr: ExprContainer, target_indices: str | None = None, - target_spin: str | None = None, bra_ket_sym: int = 0, - antisymmetric_result_tensor: bool = True - ) -> dict[tuple[tuple[tuple[Permutation, ...], int], ...], ExprContainer]: # noqa E501 - """ - Reduces the number of terms in an expression by exploiting the symmetry: - by applying permutations of target indices it might be poossible to map - terms onto each other reducing the overall number of terms. - - Parameters - ---------- - expr : Expr - The expression to probe for symmetry. - target_indices : str | None, optional - The names of target indices of the expression. Bra and ket indices - should be separated by a ',' to lower the amount of permutations the - expression has to be probed for, e.g., to differentiate 'ia,jb' - from 'ij,ab'. If not provided, the function will try to determine the - target indices automatically and probe for the complete symmetry found - for these indices. - target_spin : str | None , optional - The spin of the target indices, e.g., 'aabb' to indicate that the - first 2 target indices have alpha spin, while number 3 and 4 have - beta spin. If not given, target indices without spin will be used. - bra_ket_sym : int, optional - Defines the bra-ket symmetry of the result tensor of the expression. - Only considered if the names of target indices are separated by a ','. - antisymmetric_result_tensor : bool, optional - If set, the result tensor will be treated as AntiSymmetricTensor - d_{ij}^{ab} = - d_{ji}^{ab}. Otherwise, a SymmetricTensor will be used - to mimic the symmetry of the result tensor, i.e., - d_{ij}^{ab} = d_{ji}^{ab}. (default: True) - - Returns - ------- - dict - The remaining terms sorted by the found permutations. - key: The permutations. - value: The part of the expression to which the permutations have to be - applied in order to recover the original expression. - """ - from .reduce_expr import factor_eri_parts, factor_denom - - def simplify_terms_with_denom(sub_expr: ExprContainer): - factored = itertools.chain.from_iterable( - factor_denom(sub_e) for sub_e in factor_eri_parts(sub_expr) - ) - ret = ExprContainer(0, **sub_expr.assumptions) - for term in factored: - ret += term.factor() - return ret - - assert isinstance(expr, ExprContainer) - if expr.inner.is_number: - return {tuple(): expr} - expr.expand() - terms: tuple[TermContainer, ...] = expr.terms - - # check that each term in the expr contains the same target indices - ref_target = terms[0].target - if not expr.provided_target_idx and \ - any(term.target != ref_target for term in terms): - raise Inputerror("Each term in the expression needs to contain the " - "same target indices.") - - # if target indices have been provided - # -> check that they match with the found target indices - if target_indices is not None: - # split in upper/lower indices if possible - if "," in target_indices: - upper, lower = target_indices.split(",") - else: - if bra_ket_sym: - raise Inputerror("Target indices need to be separated by a " - "',' to indicate where to split them in " - "upper and lower indices if the target tensor" - "has bra-ket-symmetry.") - upper, lower = target_indices, "" - # treat the spin - if target_spin is not None: - if "," in target_spin: - upper_spin, lower_spin = target_spin.split(",") - else: - upper_spin = target_spin[:len(upper)] - lower_spin = target_spin[len(upper):] - if len(upper) != len(upper_spin) or len(lower) != len(lower_spin): - raise Inputerror(f"The target indices {target_indices} are " - " not compatible with the provided spin " - f"{target_spin}.") - else: - upper_spin, lower_spin = None, None - - upper = get_symbols(upper, upper_spin) - lower = get_symbols(lower, lower_spin) - sorted_provided_target = tuple(sorted( - upper + lower, key=sort_idx_canonical - )) - if sorted_provided_target != ref_target: - raise Inputerror(f"The provided target indices {target_indices} " - "are not equal to the target indices found in " - f"the expr: {ref_target}.") - else: # just use the found target indices - # if no target indices have been provided all indices are in upper - # -> bra ket sym is irrelevant - upper, lower = ref_target, tuple() - bra_ket_sym = 0 - # build a tensor holding the target indices and determine its symmetry - if antisymmetric_result_tensor: - tensor = AntiSymmetricTensor("x", upper, lower, bra_ket_sym) - else: - tensor = SymmetricTensor("x", upper, lower, bra_ket_sym) - symmetry = ExprContainer(tensor).terms[0].symmetry() - - # prefilter the terms according to the contained objects (name, space, exp) - # and if a denominator is present -> number and length of the brackets - filtered_terms: defaultdict[tuple, list[int]] = defaultdict(list) - has_denom: list[bool] = [] - for term_i, term in enumerate(terms): - term_splitted = EriOrbenergy(term) - has_denom.append(not term_splitted.denom.inner.is_number) - eri_descr: tuple[str, ...] = tuple(sorted( - o.description(target_idx=None) - for o in term_splitted.eri.objects - )) - idx_space = "".join(sorted( - s.space[0] + s.spin for s in term_splitted.eri.contracted - )) - key = (eri_descr, term_splitted.denom_description(), idx_space) - filtered_terms[key].append(term_i) - - ret: dict[tuple[tuple[tuple[Permutation, ...], int], ...], ExprContainer] = {} # noqa E501 - removed_terms: set[int] = set() - for term_idx_list in filtered_terms.values(): - # term is unique -> nothing to compare with - # can not map this term onto any other terms - if len(term_idx_list) == 1: - if tuple() not in ret: - ret[tuple()] = ExprContainer(0, **expr.assumptions) - ret[tuple()] += terms[term_idx_list[0]] - continue - - # decide which function to use for comparing the terms - terms_have_denom = has_denom[term_idx_list[0]] - assert all( - terms_have_denom == has_denom[term_i] for term_i in term_idx_list - ) - if terms_have_denom: - simplify_terms = simplify_terms_with_denom - else: - simplify_terms = simplify - - # first loop over terms!! - # Otherwise it is not garuanteed that all matches for a term can - # be found: consider 4 terms with ia, ja, ib and jb - # we want to find: P_ab, P_ij and P_ijP_ab for ia (or any other term) - # if we first loop over perms, e.g., P_ab we may find - # ia -> ib, ja -> jb for instance. - # -> we will not be able to find the full symmetry of the terms - for term_i in term_idx_list: - if term_i in removed_terms: - continue - term = terms[term_i] - found_sym: list[tuple[tuple[Permutation, ...], int]] = [] - for perms, factor in symmetry.items(): - # apply the permutations to the current term - perm_term = term.permute(*perms) - # permutations are not valid for the current term - if perm_term.inner is S.Zero and term.inner is not S.Zero: - continue - # check if the permutations did change the term - # if the term is still the same (up to the sign) continue - # thereby only looking for the desired symmetry - if factor == -1: - # looking for antisym: P_pq X = - X -> P_pq X + X = 0? - if Add(perm_term.inner, term.inner) is S.Zero: - continue - elif factor == 1: - # looking for sym: P_pq X = + X -> P_pq X - X = 0? - if Add(perm_term.inner, -term.inner) is S.Zero: - continue - else: - raise ValueError(f"Invalid sym factor {factor}.") - # perm term != term -> compare to other terms - for other_term_i in term_idx_list: - if term_i == other_term_i or other_term_i in removed_terms: - continue - # compare the terms: again only look for the desired - # symmetry - if factor == -1: - # looking for antisymmetry: X - X' - # P_pq X + (-X') = 0 | P_pq X = +X' - simplified = ( - simplify_terms(perm_term + terms[other_term_i]) - ) - else: # factor == 1 - # looking for symmetry: X + (X') - # P_pq X - X' = 0 | P_pq X = +X' - simplified = ( - simplify_terms(perm_term - terms[other_term_i]) - ) - # could not map the terms onto each other - if simplified.inner is not S.Zero: - continue - # mapped the terms onto each other - removed_terms.add(other_term_i) - found_sym.append((perms, factor)) - break - # use the found symmetry as dict key - found_sym_tpl = tuple(found_sym) - if found_sym_tpl not in ret: - ret[found_sym_tpl] = ExprContainer(0, **expr.assumptions) - ret[found_sym_tpl] += term - return ret diff --git a/build/lib/adcgen/spatial_orbitals.py b/build/lib/adcgen/spatial_orbitals.py deleted file mode 100644 index 732df87..0000000 --- a/build/lib/adcgen/spatial_orbitals.py +++ /dev/null @@ -1,443 +0,0 @@ -from collections import Counter -from itertools import product -from typing import Sequence - -from .expression import ExprContainer -from .logger import logger -from .misc import Inputerror -from .indices import ( - Index, get_symbols, order_substitutions, sort_idx_canonical, - _is_str_sequence -) -from .simplify import simplify - - -def transform_to_spatial_orbitals(expr: ExprContainer, target_idx: str, - target_spin: str, - restricted: bool = False, - expand_eri: bool = True) -> ExprContainer: - """ - Transforms an expression to a spatial orbital basis by integrating over - the spin of the spin orbitals, i.e., a spin is attached to all indices. - Furthermore, the antisymmetric ERI's are replaced by the in this context - more commonly used coulomb integrals in chemist notation. - Target indices of the expression are updated if necessary. - - Parameters - ---------- - expr : ExprContainer - Expression to express in terms of spatial orbitals. - target_idx : str - The names of target indices of the expression. Needs to be provided, - because the target indices in the expression are stored in canonical - order, which might not be correct. - target_spin : str - The spin of the target indices, e.g., 'aa' for 2 alpha orbitals. - restricted : bool, optional - Whether a restricted reference (equal alpha and beta orbitals) - should be assumed. In case of a restricted reference, only alpha - orbitals will be present in the returned expression. - (default: False) - expand_eri : bool, optional - If set, the antisymmetric ERI (in physicist notation) are expanded - to coulomb integrals using chemist notation - = - = (pr|qs) - (ps|qr), - where by default a SymmetricTensor 'v' is used to represent the - coulomb integrals. - """ - - # perform the integration first, since the intermediates are defined - # in terms of the antisymmetric ERI - expr = integrate_spin(expr, target_idx, target_spin) - if expand_eri: - expr.expand_antisym_eri().expand() - if not restricted: - return expr - # in the restricted case we can replace all beta orbitals by the - # corresponding alpha orbitals. - # It should be fine to keep the name and only adjust the spin of the - # indices: - # - in the input expression we only have spin orbitals - # - during the integration we generate multiple terms mapping each index - # to a spin - # -> the names are still unique, i.e., at this point each term might only - # hold an index of a certain name with either alpha or beta spin but - # not both of them simultaneously - restricted_expr: ExprContainer = ExprContainer(0, **expr.assumptions) - if expr.provided_target_idx is not None: - # update the target indices - restricted_target = get_symbols(target_idx, "a" * len(target_spin)) - restricted_expr.set_target_idx(restricted_target) - for term in expr.terms: - idx = set(term.idx) - beta_idx = [i for i in idx if i.spin == "b"] - if not beta_idx: - restricted_expr += term.inner - continue - new_idx = get_symbols([i.name for i in beta_idx], "a"*len(beta_idx)) - sub: dict[Index, Index] = {} - for old, new in zip(beta_idx, new_idx): - # conststruct the alpha index - if new in idx: - raise RuntimeError("It is not safe to replace the beta index " - f"{old} with the corresponding alpha index," - " because the index with alpha spin is " - f"already used in the term: {term}.") - sub[old] = new - restricted_expr += term.inner.subs(order_substitutions(sub)) - assert isinstance(restricted_expr, ExprContainer) - return restricted_expr - - -def integrate_spin(expr: ExprContainer, target_idx: str, - target_spin: str) -> ExprContainer: - """ - Integrates over the spin of the spin orbitals to transform an expression - to a spatial orbital basis, i.e, a spin is attached to all indices. - Target indices in the expression will be updated if necessary. - - Parameters - ---------- - expr : ExprContainer - Expression where the spin is integrated. - target_idx : str - Names of target indices of the expression. - target_spin : str - Spin of target indices of the expression. - """ - assert isinstance(expr, ExprContainer) - # - validate the target indices and target spin - target_symbols = get_symbols(target_idx) - if len(target_symbols) != len(target_spin): - raise Inputerror(f"Spin {target_spin} and indices {target_symbols} are" - " not compatible.") - target_idx_spins: dict[Index, str] = {} - for idx, spin in zip(target_symbols, target_spin): - if idx in target_idx_spins and target_idx_spins[idx] != spin: - raise ValueError(f"The index {idx} can not be assigned to alpha " - "and beta spin simultaneously.") - target_idx_spins[idx] = spin - # - sort the target indices to validate that the terms have the correct - # target indices and build the target spins - sorted_target = tuple(sorted(target_idx_spins, key=sort_idx_canonical)) - target_spins = [target_idx_spins[idx] for idx in sorted_target] - del target_idx_spins - # - generate the new target indices of the resulting expression to set - # them if needed - result_target = get_symbols([s.name for s in target_symbols], target_spin) - - result: ExprContainer = ExprContainer(0, **expr.assumptions) - if expr.provided_target_idx is not None: - result.set_target_idx(result_target) - - for term in expr.terms: - logger.debug(f"Integrating spin in term {term}") - # - ensure that the term has matching target indices - term_target = term.target - if term_target != sorted_target: - raise ValueError(f"Target indices {term_target} of term {term} " - "don't match the desired target indices " - f"{target_symbols}") - # - ensure that no index in the term is holding a spin - if any(s.spin for s in term.idx): - raise ValueError("The function assumes that the input expression " - "is expressed in terms of spin orbitals. Found " - f"a spatial orbital in term {term}.") - # we have no indices (the term is a number) we don't have anything - # to do - if not term.idx: - logger.debug(f"Result = {term}") - result += term.inner - continue - # - build a list of indices and base map for the spins of the indices - # starting with the target indices - term_contracted = term.contracted - term_indices = (*term_target, *term_contracted) - assert all(v == 1 for v in Counter(term_indices).values()) - base_spins: list[str | None] = [spin for spin in target_spins] - base_spins.extend(None for _ in range(len(term_contracted))) - # - for each object in the term: go through the allowed spin_blocks and - # try to add them to the base spins (target spins) in order to form a - # valid variants where all indices are assigned to a spin. - spin_variants: list[list[str | None]] = [base_spins] - term_vanishes: bool = False - for obj in term.objects: - allowed_blocks = obj.allowed_spin_blocks - # hit a Polynom, Prefactor or unknown tensor - if allowed_blocks is None: - continue - # we have some allowed blocks to check - # -> try to form valid combinations assigning all indices to a spin - indices: tuple[int, ...] = tuple( - term_indices.index(idx) for idx in obj.idx - ) - old_spin_variants = spin_variants.copy() - spin_variants.clear() - for block in allowed_blocks: - # - ensure that the block is valid: a index can not be - # assigned to alpha and beta at the same time - addition: list[str | None] = [ - None for _ in range(len(term_indices)) - ] - for spin, idx in zip(block, indices): - if addition[idx] is not None and addition[idx] != spin: - raise ValueError("Found invalid allowed spin block " - f"{block} for {obj}.") - addition[idx] = spin - # check for contracdictions with the target_spin and skip the - # block if this is the case - if any(sp1 != sp2 for sp1, sp2 in - zip(target_spins, addition[:len(term_target)]) - if sp2 is not None): - continue - # iterate over the existing variants and try to add the - # addition - for old_variant in old_spin_variants: - # check for any contradiction - if any(sp1 != sp2 for sp1, sp2 in - zip(old_variant, addition) - if sp1 is not None and sp2 is not None): - continue - # add the addition to the old variant - combination = [sp1 if sp2 is None else sp2 - for sp1, sp2 in zip(old_variant, addition)] - # we only need unique variants -> remove duplicates - if any(comb == combination for comb in spin_variants): - continue - spin_variants.append(combination) - # we could not find a single valid combination for the given - # object -> the term has to vanish - if not spin_variants: - term_vanishes = True - break - if term_vanishes: - logger.debug("Result = 0") - continue - # collect the result in a separate expression such that we can call - # simplify before adding the contribution to the result - contribution: ExprContainer = ExprContainer(0, **expr.assumptions) - if expr.provided_target_idx is not None: # if necessary update target - contribution.set_target_idx(result_target) - # - iterate over the unique combinations, replace the spin orbitals - # by the corresponding spatial orbitals (assign a spin to the - # indices) and add the corresponding terms to the result. - # Thereby, ensure that all indices have a spin assigned and - # try to assign a spin for not yet assigned indices: - # since all variants are initialized with the target spins - # set, only contracted indices can not be assigned - # -> generate a variant for alpha and beta since both are allowed - for spin_var in spin_variants: - missing_contracted = [ - idx for idx, spin in enumerate(spin_var) - if idx >= len(target_spin) and spin is None - ] - # construct variants for missing contracted indices assuming that - # alpha and beta spin is allowed. - if missing_contracted: - variants: list[list[str | None]] = [] - for spins in product("ab", repeat=len(missing_contracted)): - complete_variant = spin_var.copy() - for spin, idx in zip(spins, missing_contracted): - complete_variant[idx] = spin - variants.append(complete_variant) - else: - variants: list[list[str | None]] = [spin_var] - # go through the variants and perform the actual substitutions - for variant in variants: - # ensure that we indeed assigned all spins - assert _is_str_sequence(variant) - - new_indices = get_symbols( - indices=[s.name for s in term_indices], - spins="".join(variant) - ) - sub = { - old: new for old, new in zip(term_indices, new_indices) - } - contrib = term.inner.subs(order_substitutions(sub)) - logger.debug(f"Found contribution {contrib}") - contribution += contrib - # TODO: if we simplify the result it will throw an error for any - # polynoms or denominators. Should we skip the simplification altough - # we currently don't treat polynoms correctly in this function - # since their allowed_spin_blocks are not considered. - assert isinstance(contribution, ExprContainer) - result += simplify(contribution) - return result - - -def allowed_spin_blocks(expr: ExprContainer, - target_idx: Sequence[str]) -> tuple[str, ...]: - """ - Determines the allowed spin blocks of an expression. Thereby, it is assumed - that the allowed spin blocks of tensors in the expression are either known - or can be determined on the fly, i.e., this only works for closed - expressions. - - Parameters - ---------- - expr : ExprContainer - The expression to check. - target_idx : Sequence[str] - The target indices of the expression. - """ - - assert isinstance(expr, ExprContainer) - - target_symbols = get_symbols(target_idx) - sorted_target = tuple(sorted(target_symbols, key=sort_idx_canonical)) - - # - determine all possible spin blocks - spin_blocks: list[str] = [ - "".join(b) for b in product("ab", repeat=len(target_symbols)) - ] - spin_blocks_to_check: list[int] = [i for i in range(len(spin_blocks))] - - allowed_blocks: set[str] = set() - for term in expr.terms: - # - ensure that the term has matching target indices - if term.target != sorted_target: - raise ValueError(f"Target indices {term.target} of {term} dont " - "match the provided target indices " - f"{target_symbols}") - # - extract the allowed blocks for all tensors and initialize - # index maps to relate indices to a spin - term_idx_maps: list[tuple[list[dict[Index, str]], int]] = [] - for obj in term.objects: - allowed_object_blocks = obj.allowed_spin_blocks - # hit a Polynom, Prefactor or unknown tensor - if allowed_object_blocks is None: - continue - obj_indices = obj.idx - n_target = len([ - idx for idx in obj_indices if idx in target_symbols - ]) - object_idx_maps: list[dict[Index, str]] = [] - for block in allowed_object_blocks: - idx_map = {} - for spin, idx in zip(block, obj_indices): - if idx in idx_map and idx_map[idx] != spin: - raise ValueError("Found invalid allowed spin block " - f"{block} for {obj}.") - idx_map[idx] = spin - object_idx_maps.append(idx_map) - term_idx_maps.append((object_idx_maps, n_target)) - # - sort the allowed_tensor_blocks such that tensors with a high - # number of target indices are preferred - term_idx_maps = sorted(term_idx_maps, - key=lambda tpl: tpl[1], reverse=True) - - term_indices = set(term.idx) - blocks_to_remove: set[int] = set() - for block_i in spin_blocks_to_check: - block = spin_blocks[block_i] - if block in allowed_blocks: - blocks_to_remove.add(block_i) - continue - valid_block = True - - # - assign the target indices to a spin - target_spin: dict[Index, str] = {} - for spin, idx in zip(block, target_symbols): - # in case we have target indices iiab only spin blocks - # aaxx or bbxx are valid - if idx in target_spin and target_spin[idx] != spin: - valid_block = False - break - target_spin[idx] = spin - if not valid_block: - continue - - # - remove all object spin blocks that are in contradiction to the - # current spin block - relevant_term_spin_idx_maps: list[list[dict[str, set[Index]]]] = [] - for tensor_idx_maps, _ in term_idx_maps: - relevant_object_spin_idx_maps: list[dict[str, set[Index]]] = [] - for idx_map in tensor_idx_maps: - # are all target idx compatible with the block? - if any(spin != idx_map[t_idx] - for t_idx, spin in target_spin.items() - if t_idx in idx_map): - continue - spin_idx_map: dict[str, set[Index]] = { - "a": set(), "b": set() - } - for idx, spin in idx_map.items(): - spin_idx_map[spin].add(idx) - relevant_object_spin_idx_maps.append(spin_idx_map) - # the object has not a single allowed spin block that is - # compatible to the currently probed block - if not relevant_object_spin_idx_maps: - valid_block = False - break - relevant_term_spin_idx_maps.append( - relevant_object_spin_idx_maps - ) - # at least 1 object has no compatible allowed spin block - # -> the current term can not contribute to the current block - if not valid_block: - continue - - # - try to find a valid combination of the remaining spin blocks - spin_idx_map: dict[str, set[Index]] = {"a": set(), "b": set()} - if not _has_valid_combination(relevant_term_spin_idx_maps, 0, - spin_idx_map): - continue - # - verify the result: - # ensure that all indices are assigned - # the target indices have the desired spin - # there is no intersection between the different spins - if spin_idx_map["a"] & spin_idx_map["b"]: - raise RuntimeError("Indices are assigned to alpha and beta " - f"simultaneously in term {term}: ", - spin_idx_map) - if term_indices ^ (spin_idx_map["a"] | spin_idx_map["b"]): - raise RuntimeError("Not all indices were assigned to a spin: " - f"{term_indices} -> {spin_idx_map}") - if any(idx not in spin_idx_map[spin] - for idx, spin in target_spin.items()): - raise RuntimeError("Target index has wrong spin. Desired: " - f"{target_spin}. Found: {spin_idx_map}.") - # everything should be fine! - # also add the 'inverse' block to the allowed blocks - allowed_blocks.add(block) - allowed_blocks.add("".join("a" if spin == "b" else "b" - for spin in block)) - blocks_to_remove.add(block_i) - # blocks that have been found dont need to be checked again - spin_blocks_to_check = [i for i in spin_blocks_to_check - if i not in blocks_to_remove] - return tuple(sorted(allowed_blocks)) - - -def _has_valid_combination(tensor_idx_maps: list[list[dict[str, set[Index]]]], - current_pos: int, variant: dict[str, set[Index]] - ) -> bool: - """ - Tries to recursively assign all indices to a spin without introducing - contradictions. Returns immediately when all indices could be assigned - successfully. - """ - - for idx_map in tensor_idx_maps[current_pos]: - # look for any contradictions - if idx_map["a"] & variant["b"] or idx_map["b"] & variant["a"]: - continue - # compute the indices which are added to remove them later again - # if necessary - addition: dict[str, tuple[Index, ...]] = { - "a": tuple(idx for idx in idx_map["a"] if idx not in variant["a"]), - "b": tuple(idx for idx in idx_map["b"] if idx not in variant["b"]) - } - variant["a"].update(idx_map["a"]) - variant["b"].update(idx_map["b"]) - if len(tensor_idx_maps) == current_pos + 1: # we are done!! - return True - # recurse further and try to complete - if _has_valid_combination(tensor_idx_maps, current_pos+1, variant): - return True - # could not complete -> revert the addition and continue looping - variant["a"].difference_update(addition["a"]) - variant["b"].difference_update(addition["b"]) - # could not add anything to the variant - return False diff --git a/build/lib/adcgen/symmetry.py b/build/lib/adcgen/symmetry.py deleted file mode 100644 index 9f17fba..0000000 --- a/build/lib/adcgen/symmetry.py +++ /dev/null @@ -1,368 +0,0 @@ -from collections.abc import Sequence -from collections import defaultdict -from functools import cached_property -import itertools - -from sympy import Add, S - -from .expression import ExprContainer, TermContainer -from .indices import Index, sort_idx_canonical -from .misc import cached_member, Inputerror - - -class Permutation(tuple[Index, Index]): - """ - Represents a permutation operator P_{pq} that permutes the indices p and q. - """ - - def __new__(cls, p: Index, q: Index): - if sort_idx_canonical(p) < sort_idx_canonical(q): - args = (p, q) - else: - args = (q, p) - return super().__new__(cls, args) - - def __str__(self): - return f"P_{self[0].name}{self[1].name}" - - def __repr__(self): - return f"P_{self[0].name}{self[1].name}" - - -class PermutationProduct(tuple[Permutation, ...]): - """ - Represents a product of permutation operators P_{pq}P{rs}. - The permutations are sorted taking into account that it is only possible - to rearrange permutation operators if the indices belong to different - spaces, e.g., P_{ab}P_{ij} = P_{ij}P_{ab}. - """ - - def __new__(cls, *args: Permutation): - # identify spaces that are linked to each other - # the order of permutations within a linked group has to be maintained! - # e.g. P_ia * P_ij * P_ab * P_pq - # the spaces o and v are linked -> the order of the first 3 - # permutations has to be maintained, while P_pq can be moved - # to any arbitrary place - splitted = cls.split_in_separable_parts(args) - sorted_args = [val for _, val in sorted(splitted.items())] - return super().__new__(cls, itertools.chain.from_iterable(sorted_args)) - - @staticmethod - def split_in_separable_parts(permutations: Sequence[Permutation] - ) -> dict[str, list[Permutation]]: - """ - Splits the permutations in subsets that can be treated independently - of each other. - """ - - # split the permutations according to their index space - # and identify spaces that are linked to each other through at least - # 1 permutation - perm_spaces: list[set[str]] = [] - links: list[set[str]] = [] - for perm in permutations: - p, q = perm - space: set[str] = set((p.space[0] + p.spin, q.space[0] + q.spin)) - perm_spaces.append(space) - - if len(space) > 1: # identify linking permutations - if space not in links: - links.append(space) - - if len(links) == 0: # no links, all spaces separated - linked_spaces: list[set[str]] = [] - elif len(links) == 1: # exactly 2 spaces are linked - linked_spaces: list[set[str]] = links - else: # more than 2 spaces linked: either ov, ox or ov, xy - treated: set[int] = set() - linked_spaces: list[set[str]] = [] - for i, linked_sp in enumerate(links): - if i in treated: - continue - linked = linked_sp.copy() - for other_i in range(i+1, len(links)): - if other_i in treated: - continue - if linked_sp & links[other_i]: - linked.update(links[other_i]) - treated.add(other_i) - linked_spaces.append(linked) - - # sort them in groups that can be treated independently - ret: dict[str, list[Permutation]] = {} - for perm, space in zip(permutations, perm_spaces): - # if the current space is linked to other spaces - # -> replace the space by the linked space - for linked_sp in linked_spaces: - if any(sp in linked_sp for sp in space): - space = linked_sp - break - space_str = "".join(sorted(space)) - if space_str not in ret: - ret[space_str] = [] - ret[space_str].append(perm) - return ret - - -class LazyTermMap: - """ - Establishes a term map for an expression that contains information about - terms that can be mapped onto each other when permuting target indices of - the expression. - """ - - def __init__(self, expr: ExprContainer): - self._expr = expr - # init all term container objects - self._terms: tuple[TermContainer, ...] = expr.terms - # {(perms, factor): {i: other_i}} - self._term_map: \ - dict[tuple[tuple[Permutation, ...], int], dict[int, int]] = {} - - def evaluate(self, antisymmetric_result_tensor: bool = True - ) -> dict: - """ - Fully evaluates the term map of the expression by probing all - possible permutations of target indices. - Due to an ambiguous definition of the symmetry by means of products of - permutation operators - (ijk -> kij can be obtained by applying P_{ij}P_{ik} or P_{ik}P_{jk}) - it might still be possible to encounter unevaluated entries at a - later point. - - Parameters - ---------- - antisymmetric_result_tensor: bool, optional - The result tensor is either represented by an AntiSymmetricTensor - (True) or by a SymmetricTensor (False). (default: True) - """ - from .sympy_objects import AntiSymmetricTensor, SymmetricTensor - - # if we put all indices in lower bra-ket sym is not important - if antisymmetric_result_tensor: - tensor = AntiSymmetricTensor("x", tuple(), self.target_indices) - else: - tensor = SymmetricTensor("x", tuple(), self.target_indices) - tensor = ExprContainer(tensor).terms[0] - for sym in tensor.symmetry().items(): - self[sym] - return self._term_map - - def __getitem__(self, symmetry: tuple[tuple[Permutation, ...], int] - ) -> dict[int, int]: - """ - Checks whether a given symmetry as already been evaluated and probes - the expression for the symmetry if this is not the case. - - Parameters - ---------- - symmetry : tuple - A tuple containing the permutations and the corresponding factor: - +1 to probe for symmetry (+ P_{pq}P_{rs}...) and - -1 to probe for antisymmetry (- P_{pq}P_{rs}...). - """ - # did we already compute the map for the desired symmetry? - if symmetry in self._term_map: - return self._term_map[symmetry] - # split the permutations according to their index space. - # invert the permutations in possible space combinations - # and check if we computed any of the partially or fully inverted - # symmetries - permutations, factor = symmetry - splitted = list( - PermutationProduct.split_in_separable_parts(permutations).items() - ) - # also check the sorted version before inverting - if not isinstance(permutations, PermutationProduct): - permutations = tuple(itertools.chain.from_iterable( - [val for _, val in sorted(splitted)] - )) - sym = (permutations, factor) - if sym in self._term_map: - return self._term_map[sym] - - invertable_subsets: list[int] = [ - i for i, (_, perms) in enumerate(splitted) if len(perms) > 1 - ] - for n_inverts in range(1, len(invertable_subsets)+1): - for to_invert in \ - itertools.combinations(invertable_subsets, n_inverts): - inv_perms: list[tuple[str, list[Permutation]]] = [] - for i, val in enumerate(splitted): - if i in to_invert: # invert the order of the permutations - inv_perms.append((val[0], val[1][::-1])) - else: - inv_perms.append(val) - inv_perms_tpl = tuple(itertools.chain.from_iterable( - [val for _, val in sorted(inv_perms)] - )) - # check if the inverted variant has been already computed - sym = (inv_perms_tpl, factor) - if sym in self._term_map: - return self._term_map[sym] - # could not find any variant in the term_map - # -> probe the expression for the original variant - assert isinstance(permutations, PermutationProduct) - return self.probe_symmetry(permutations, factor) - - @cached_property - def target_indices(self) -> tuple[Index, ...]: - """Returns the target indices of the expression.""" - - if self._expr.provided_target_idx is not None: - return self._expr.provided_target_idx - - # determine the target indices of each term and ensure all terms hold - # the same target indices - target = self._terms[0].target - if any(term.target != target for term in self._terms): - raise NotImplementedError("Can only create a term map for an " - "expression where each term is holding " - "the same target indices.") - return target - - @cached_member - def _prescan_terms(self) -> tuple[tuple[bool, list[int]], ...]: - """ - Prescan the terms of the expression collecting compatible terms that - might be mapped onto each other. - - Returns - ------- - tuple[bool, list] - First entry: Indicates whether the corresopnding terms have an - orbital energy denominator. - Second entry: The terms by their index. - """ - from .eri_orbenergy import EriOrbenergy - - filtered_terms = defaultdict(list) - for term_i, term in enumerate(self._terms): - # split the term in pref, orbital energy frac and remainder - term = EriOrbenergy(term) - # get the description of all objects in the remainder (eri) part - # don't include target indices in the description since thats - # what we want to probe the expr for (contracted permutations - # can be simplified, which is assumed to have happened before.) - eri_descriptions: tuple[str, ...] = tuple(sorted( - o.description(target_idx=None) - for o in term.eri.objects - )) - # space of contracted indices - idx_space = "".join(sorted( - s.space[0] + s.spin for s in term.eri.contracted - )) - # the number and length of brackets in the denominator - key = (eri_descriptions, term.denom_description(), idx_space) - filtered_terms[key].append(term_i) - # rearrange the term idx lists so the information whether they - # contain a denominator is directly available - # Also remove lists with a single entry... cant map them onto - # anything else anyway - return tuple( - (False, term_i_list) if key[1] is None else (True, term_i_list) - for key, term_i_list in filtered_terms.items() - if len(term_i_list) > 1 - ) - - def probe_symmetry(self, permutations: PermutationProduct, - sym_factor: int) -> dict: - """ - Probes which terms in the expression can be mapped onto each other - by applying the given symmetry. - - Parameters - ---------- - permutations : PermutationProduct - A prdocut of permutations of target indices of the expression. - sym_factor : int - Possible values: - +1 -> probe for symmetry (Term + P_{pq}P_{rs}... Term) - -1 -> probe for antisymmetry (Term - P_{pq}P_{rs}... Term) - - Returns - ------- - dict - Contains the index of terms which, when the provided permutations - are applied, become equal to other non-permuted terms. - key: The index of the permuted term. - value: The index of the term it can be mapped onto. - """ - from .reduce_expr import factor_eri_parts, factor_denom - from .simplify import simplify - - def simplify_with_denom(expr: ExprContainer) -> ExprContainer: - if expr.inner.is_number: # trivial - return expr - - factored = itertools.chain.from_iterable( - factor_denom(sub_e) for sub_e in factor_eri_parts(expr) - ) - ret = ExprContainer(0, **expr.assumptions) - for term in factored: - ret += term.factor() - return ret - - if sym_factor not in [1, -1]: - raise Inputerror(f"Invalid symmetry factor {sym_factor}. +-1 " - "is valid.") - - # check that the given permutations only contain target indices - target_indices = self.target_indices - if any(s not in target_indices - for s in itertools.chain.from_iterable(permutations)): - raise NotImplementedError("Found non target index in " - f"{permutations}. Target indices are " - f"{target_indices}.") - - map_contribution: dict[int, int] = {} - for has_denom, term_i_list in self._prescan_terms(): - # go through the terms and filter out terms that are symmetric or - # antisymmetric with respect to the given symmetry - relevant_terms: list[tuple[int, ExprContainer]] = [] - for term_i in term_i_list: - term: TermContainer = self._terms[term_i] - perm_term: ExprContainer = term.permute(*permutations) - # check that the permutations are valid - if perm_term.inner is S.Zero and term.inner is not S.Zero: - continue - # only look for the desired symmetry which is defined by - # sym_factor - if sym_factor == -1: # looking for antisym: P_pq X != -X - if Add(perm_term.inner, term.inner) is not S.Zero: - relevant_terms.append((term_i, perm_term)) - else: # looking for sym: P_pq X != X - if Add(perm_term.inner, -term.inner) is not S.Zero: - relevant_terms.append((term_i, perm_term)) - # choose a function for simplifying the sum/difference of 2 terms - # it might be neccessary to permute contracted indices to - # achieve equality of the 2 terms - simplify_terms = simplify_with_denom if has_denom else simplify - # now compare all relevant terms with each other - for term_i, perm_term in relevant_terms: - for other_term_i, _ in relevant_terms: - if term_i == other_term_i: # dont compare to itself - continue - # looking for antisym: X - (P_pq X) = X - X' - # P_pq X + (- X') = 0 - if sym_factor == -1: - sum = simplify_terms( - perm_term + self._terms[other_term_i] - ) - # looking for sym: X + (P_pq X) = X + X' - # P_pq X - X' = 0 - else: # +1 - sum = simplify_terms( - perm_term - self._terms[other_term_i] - ) - # was it possible to map the terms onto each other? - if sum.inner is S.Zero: - map_contribution[term_i] = other_term_i - # can break the loop: if we are assuming that the - # expression is completely simplified, it will not - # be possible to find another match for term_i - # (otherwise 2 other_term_i would have to be identical) - break - self._term_map[(tuple(permutations), sym_factor)] = map_contribution - return map_contribution diff --git a/build/lib/adcgen/sympy_objects.py b/build/lib/adcgen/sympy_objects.py deleted file mode 100644 index abc8162..0000000 --- a/build/lib/adcgen/sympy_objects.py +++ /dev/null @@ -1,399 +0,0 @@ -from collections.abc import Sequence - -from sympy.physics.secondquant import ( - _sort_anticommuting_fermions, ViolationOfPauliPrinciple -) -from sympy.core.logic import fuzzy_not -from sympy.core.function import DefinedFunction -from sympy import sympify, Tuple, Symbol, S, Number, Expr - -from .misc import Inputerror -from .indices import Index, _is_index_tuple, sort_idx_canonical - - -class SymbolicTensor(Expr): - """Base class for symbolic tensors.""" - - is_commutative = True - - @property - def symbol(self) -> Symbol: - """Returns the symbol of the tensor.""" - symbol = self.args[0] - assert isinstance(symbol, Symbol) - return symbol - - @property - def name(self) -> str: - """Returns the name of the tensor.""" - return self.symbol.name - - @property - def idx(self) -> tuple[Index, ...]: - """Returns all indices of the tensor.""" - raise NotImplementedError("'idx' not implemented on " - f"{self.__class__.__name__}") - - -class AntiSymmetricTensor(SymbolicTensor): - """ - Represents antisymmetric tensors - d^{pq}_{rs} = - d^{qp}_{rs} = - d^{pq}_{sr} = d^{qp}_{sr}. - Based on the implementation in 'sympy.physics.secondquant'. - - Parameters - ---------- - name : str | Symbol - The name of the tensor. - upper : Sequence[Index] | Tuple - The upper indices of the tensor. - lower : Sequence[Index] | Tuple - The lower indices of the tensor. - bra_ket_sym : int | Number, optional - The bra-ket symmetry of the tensor: - - 0 no bra-ket-symmetry (d^{i}_{j} != d^{j}_{i}) - - 1 bra-ket symmetry (d^{i}_{j} = d^{j}_{i}) - - -1 bra-ket antisymmetry (d^{i}_{j} = - d^{j}_{i}) - (default: 0) - """ - - def __new__(cls, name: str | Symbol, upper: Sequence[Index] | Tuple, - lower: Sequence[Index] | Tuple, bra_ket_sym: int | Number = 0): - # sort the upper and lower indices - try: - upper_sorted, sign_u = _sort_anticommuting_fermions( - upper, key=sort_idx_canonical - ) - lower_sorted, sign_l = _sort_anticommuting_fermions( - lower, key=sort_idx_canonical - ) - except ViolationOfPauliPrinciple: - return S.Zero - # additionally account for the bra ket symmetry - # add the Index check for subs to work correctly - bra_ket_sym_imported = sympify(bra_ket_sym) - if bra_ket_sym_imported is not S.Zero and \ - all(isinstance(s, Index) for s in upper_sorted+lower_sorted): - if bra_ket_sym_imported not in [S.One, S.NegativeOne]: - raise Inputerror("Invalid bra ket symmetry given " - f"{bra_ket_sym}. Valid are 0, 1 or -1.") - if cls._need_bra_ket_swap(upper_sorted, lower_sorted): - upper_sorted, lower_sorted = lower_sorted, upper_sorted # swap - if bra_ket_sym_imported is S.NegativeOne: # add another -1 - sign_u += 1 - # import all quantities - name_imported = sympify(name) - upper_imported = Tuple(*upper_sorted) - lower_imported = Tuple(*lower_sorted) - # attach -1 if necessary - if (sign_u + sign_l) % 2: - return - super().__new__( - cls, name_imported, upper_imported, lower_imported, - bra_ket_sym_imported - ) - else: - return super().__new__( - cls, name_imported, upper_imported, lower_imported, - bra_ket_sym_imported - ) - - @classmethod - def _need_bra_ket_swap(cls, upper: Sequence, lower: Sequence) -> bool: - if len(upper) != len(lower): - raise NotImplementedError("Bra Ket symmetry only implemented " - "for tensors with an equal amount " - "of upper and lower indices.") - # Build the sort key for each index and collect the first, second, ... - # entries of the keys - # -> Compare each component of the sort keys individually and abort - # if it is clear, that we need or don't need to swap - # Assumes that upper indices should have the smaller keys. - upper_sort_keys = (sort_idx_canonical(s) for s in upper) - lower_sort_keys = (sort_idx_canonical(s) for s in lower) - for upper_keys, lower_keys in \ - zip(zip(*upper_sort_keys), zip(*lower_sort_keys)): - if lower_keys < upper_keys: - return True - elif upper_keys < lower_keys: - return False - return False - - def _latex(self, printer) -> str: - upper = self.upper.args - lower = self.lower.args - assert _is_index_tuple(upper) and _is_index_tuple(lower) - return "{%s^{%s}_{%s}}" % ( - self.symbol, - "".join(i._latex(printer) for i in upper), - "".join(i._latex(printer) for i in lower) - ) - - def __str__(self): - return f"{self.symbol}({self.upper},{self.lower})" - - @property - def upper(self) -> Tuple: - """Returns the upper indices of the tensor.""" - upper = self.args[1] - assert isinstance(upper, Tuple) - return upper - - @property - def lower(self) -> Tuple: - """Returns the lower indices of the tensor.""" - lower = self.args[2] - assert isinstance(lower, Tuple) - return lower - - @property - def bra_ket_sym(self) -> Number: - """Returns the bra-ket symmetry of the tensor.""" - braketsym = self.args[3] - assert isinstance(braketsym, Number) - return braketsym - - def add_bra_ket_sym(self, bra_ket_sym: int | Number - ) -> 'AntiSymmetricTensor': - """ - Adds bra-ket symmetry to the tensor if none has been set yet. - - Parameters - ---------- - bra_ket_sym : int - The bra-ket symmetry to set (0, 1 and -1 are valid.) - """ - - if bra_ket_sym == self.bra_ket_sym: - return self - elif self.bra_ket_sym is S.Zero: - return self.__class__(self.symbol, self.upper, self.lower, - bra_ket_sym) - else: - raise Inputerror("bra ket symmetry already set. The original " - "indices are no longer available. Can not apply " - "any other bra ket sym.") - - @property - def idx(self) -> tuple[Index, ...]: - """ - Returns all indices of the tensor. The upper indices are listed before - the lower indices. - """ - idx = self.upper.args + self.lower.args - assert _is_index_tuple(idx) - return idx - - -class Amplitude(AntiSymmetricTensor): - """ - Represents antisymmetric Amplitudes. - """ - - @property - def idx(self) -> tuple[Index, ...]: - """ - Returns all indices of the amplitude. The lower indices are - listed before the upper indices. - """ - idx = self.lower.args + self.upper.args - assert _is_index_tuple(idx) - return idx - - -class SymmetricTensor(AntiSymmetricTensor): - """ - Represents symmetric tensors - d^{pq}_{rs} = d^{qp}_{rs} = d^{pq}_{sr} = d^{qp}_{sr}. - - Parameters - ---------- - name : str | Symbol - The name of the tensor. - upper : Sequence[Index] - The upper indices of the tensor. - lower : Sequence[Index] - The lower indices of the tensor. - bra_ket_sym : int, optional - The bra-ket symmetry of the tensor: - - 0 no bra-ket-symmetry (d^{i}_{j} != d^{j}_{i}) - - 1 bra-ket symmetry (d^{i}_{j} = d^{j}_{i}) - - -1 bra-ket antisymmetry (d^{i}_{j} = - d^{j}_{i}) - (default: 0) - """ - - def __new__(cls, name: str | Symbol, upper: Sequence[Index], - lower: Sequence[Index], bra_ket_sym: int = 0): - # sort upper and lower. No need to track the number of swaps - upper = sorted(upper, key=sort_idx_canonical) - lower = sorted(lower, key=sort_idx_canonical) - # account for the bra ket symmetry - # add the Index check for subs to work correctly - negative_sign = False - bra_ket_sym_imported = sympify(bra_ket_sym) - if bra_ket_sym_imported is not S.Zero and \ - all(isinstance(s, Index) for s in upper+lower): - if bra_ket_sym_imported not in [S.One, S.NegativeOne]: - raise Inputerror("Invalid bra ket symmetry given " - f"{bra_ket_sym}. Valid are 0, 1 or -1.") - if cls._need_bra_ket_swap(upper, lower): - upper, lower = lower, upper # swap - if bra_ket_sym_imported is S.NegativeOne: - negative_sign = True - # import all quantities to sympy - name_imported = sympify(name) - upper_imported, lower_imported = Tuple(*upper), Tuple(*lower) - # attach -1 if necessary - if negative_sign: - return - super(AntiSymmetricTensor, cls).__new__( - cls, name_imported, upper_imported, lower_imported, - bra_ket_sym_imported - ) - else: - return super(AntiSymmetricTensor, cls).__new__( - cls, name_imported, upper_imported, lower_imported, - bra_ket_sym_imported - ) - - -class NonSymmetricTensor(SymbolicTensor): - """ - Represents tensors that do not have any symmetry. - - Parameters - ---------- - name : str | Symbol - The name of the tensor. - indices : Sequence[Index] | Tuple - The indices of the tensor. - """ - - def __new__(cls, name: str | Symbol, indices: Sequence[Index] | Tuple): - symbol_imported = sympify(name) - indices_imported = Tuple(*indices) - return super().__new__(cls, symbol_imported, indices_imported) - - def _latex(self, printer) -> str: - indices = self.indices.args - assert _is_index_tuple(indices) - return "{%s_{%s}}" % ( - self.symbol, - "".join(i._latex(printer) for i in indices) - ) - - def __str__(self): - return "%s%s" % self.args - - @property - def indices(self) -> Tuple: - """Returns the indices of the tensor.""" - indices = self.args[1] - assert isinstance(indices, Tuple) - return indices - - @property - def idx(self) -> tuple[Index, ...]: - """Returns the indices of the tensor.""" - idx = self.args[1].args - assert _is_index_tuple(idx) - return idx - - -class KroneckerDelta(DefinedFunction): - """ - Represents a Kronecker delta. - Based on the implementation in 'sympy.functions.special.tensor_functions'. - """ - - @classmethod - def eval(cls, i: Expr, j: Expr) -> Expr | None: # type: ignore[override] - """ - Evaluates the KroneckerDelta. Adapted from sympy to also cover Spin. - """ - # This is needed for subs with simultaneous=True - if not isinstance(i, Index) or not isinstance(j, Index): - return None - - diff = i - j - if diff.is_zero: - return S.One - elif fuzzy_not(diff.is_zero): - return S.Zero - - spi, spj = i.space[0], j.space[0] - valid_spaces = ["o", "v", "g", "c", "r"] - assert spi in valid_spaces and spj in valid_spaces - if spi != "g" and spj != "g" and spi != spj: # delta_ov / delta_vo - return S.Zero - spi, spj = i.spin, j.spin - assert spi in ["", "a", "b"] and spj in ["", "a", "b"] - if spi and spj and spi != spj: # delta_ab / delta_ba - return S.Zero - # sort the indices of the delta - if i != min(i, j, key=sort_idx_canonical): - return cls(j, i) - return None - - def _eval_power(self, exp) -> Expr: # type: ignore[override] - # we don't want exponents > 1 on deltas! - if exp.is_positive: - return self - elif exp.is_negative and exp is not S.NegativeOne: - return S.One / self - - def _latex(self, printer) -> str: - return ( - "\\delta_{" + " ".join(s._latex(printer) for s in self.idx) + "}" - ) - - @property - def idx(self) -> tuple[Index, Index]: - """Returns the indices of the Kronecker delta.""" - idx = self.args - assert _is_index_tuple(idx) and len(idx) == 2 - return idx - - @property - def preferred_and_killable(self) -> tuple[Index, Index] | None: - """ - Returns the preferred (first) and killable (second) index of the - kronecker delta. The preferred index contains at least as much - information as the killable index. Therefore, 'evaluate_deltas' - will always try to keep the preferred index in the expression. - """ - i, j = self.args - assert isinstance(i, Index) and isinstance(j, Index) - space1, spin1 = i.space[0], i.spin - space2, spin2 = j.space[0], j.spin - # ensure we have no unexpected space and spin - assert ( - space1 in ["o", "v", "g", "c", "r"] - and space2 in ["o", "v", "g", "c", "r"] - ) - assert spin1 in ["", "a", "b"] and spin2 in ["", "a", "b"] - - if spin1 == spin2: # nn / aa / bb -> equal spin information - # oo / vv / cc / gg / og / vg / cg / rr - # RI indices will always end up here - if space1 == space2 or space2 == "g": - return (i, j) - else: # go / gv / gc - return (j, i) - elif spin2: # na / nb -> 2 holds more spin information - # oo / vv / cc / gg / go / gv / gc - if space1 == space2 or space1 == "g": - return (j, i) - else: # og / vg / cg -> 1 holds more space information - return None - else: # an / bn -> 1 holds more spin information - # oo / vv / cc / gg / og / vg / cg - if space1 == space2 or space2 == "g": - return (i, j) - else: # go / gv / gc -> 2 holds more space information - return None - - @property - def indices_contain_equal_information(self) -> bool: - """Whether both indices contain the same amount of information.""" - i, j = self.args - assert isinstance(i, Index) and isinstance(j, Index) - return i.space == j.space and i.spin == j.spin diff --git a/build/lib/adcgen/tensor_names.json b/build/lib/adcgen/tensor_names.json deleted file mode 100644 index 57309ad..0000000 --- a/build/lib/adcgen/tensor_names.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "eri": "V", - "coulomb": "v", - "fock": "f", - "operator": "d", - "gs_amplitude": "t", - "gs_density": "p", - "left_adc_amplitude": "X", - "right_adc_amplitude": "Y", - "orb_energy": "e", - "sym_orb_denom": "D", - "ri_sym": "B", - "ri_asym_factor": "C", - "ri_asym_eri": "W" -} \ No newline at end of file diff --git a/build/lib/adcgen/tensor_names.py b/build/lib/adcgen/tensor_names.py deleted file mode 100644 index c9302cc..0000000 --- a/build/lib/adcgen/tensor_names.py +++ /dev/null @@ -1,225 +0,0 @@ -from dataclasses import dataclass, fields -from pathlib import Path -from typing import TYPE_CHECKING -import json - -from sympy import Symbol - -from .misc import Singleton - -if TYPE_CHECKING: - from .expression import ExprContainer - - -_config_file = "tensor_names.json" - - -# NOTE: Currently it is only possible to modify the values through -# 'tensor_names.json'. It is not possible to adjust them by modifying the -# class attributes. This is less flexible but ensures that throughout -# a run of the program consistent names are used. Especially, -# due to the caching weird behaviour might be possible -# if names are changed in the middle of a run. -@dataclass(slots=True, frozen=True) -class TensorNames(metaclass=Singleton): - """ - Singleton class that that is used to define the names of tensors - used throughout the code. The names can be changed by modifying - 'tensor_names.json'. By default, the following names are used, where - the attributes storing the names are given in brackets: - - antisymmetric ERI in physicist notation (eri): V - - Coulomb integrals in chemist notation (coulomb): v - - The fock matrix (fock): f - - The arbitrary N-particle operator matrix (operator): d - - Ground state amplitudes (gs_amplitude): t - Additionally, an integer representing the perturbation theoretical order - and/or 'cc' to represent complex conjugate amplitudes are appended - to the name. - - The ground state density matrix (gs_density): p - Additionally, an integer representing the perturbation theoretical order - is appended to the name. - - ADC amplitudes belonging to the bra (left) state (left_adc_amplitude): X - - ADC amplitudes belonging to the ket (right) state - (right_adc_amplitude): X - - Orbital energies (orb_energy): e - - Symbolic orbital energy denominators [(e_i - e_a)^-1 -> D^{i}_{a}] - (sym_orb_denom): D - """ - eri: str = "V" - coulomb: str = "v" - ri_sym: str = "B" - ri_asym_factor: str = "C" - ri_asym_eri: str = "W" - fock: str = "f" - operator: str = "d" - gs_amplitude: str = "t" - gs_density: str = "p" - left_adc_amplitude: str = "X" - right_adc_amplitude: str = "Y" - orb_energy: str = "e" - sym_orb_denom: str = "D" - - @staticmethod - def _from_config() -> 'TensorNames': - """ - Construct the TensorNames instance with values from the config file - 'tensor_names.json'. - """ - config_file = Path(__file__).parent.resolve() / _config_file - tensor_names: dict[str, str] = json.load(open(config_file, "r")) - return TensorNames(**tensor_names) - - @staticmethod - def defaults() -> dict[str, str]: - """Returns the default values of all fields.""" - ret = {} - for field in fields(TensorNames): - val = field.default - assert isinstance(val, str) - ret[field.name] = val - return ret - - def map_default_name(self, name: str) -> str: - """ - Takes a tensor name, checks whether it corresponds to any of the - default names and returns the currently used name. - """ - # split the name in base and extension for t-amplitudes and - # ground state densities - if (split_name := _split_default_t_amplitude(name)) is not None: - _, ext = split_name - return self.gs_amplitude + ext - elif (split_name := _split_default_gs_density(name)) is not None: - _, ext = split_name - return self.gs_density + ext - - for field in fields(self): - if field.default == name: - return getattr(self, field.name) - return name # found not matching default name -> return input - - def rename_tensors(self, expr: "ExprContainer") -> "ExprContainer": - """ - Renames all tensors in the expression form their default names to the - currently configured names. Note that only the name of the tensors - is changed, while their type (Amplitude, AntiSymmetricTensor, ...) - remains the same. - - Parameters - ---------- - expr: Expr - The expression using the default tensor names. - """ - from .expression import ExprContainer - - assert isinstance(expr, ExprContainer) - for field in fields(self): - default = field.default - assert isinstance(default, str) - new = getattr(self, field.name) - if default == new: # nothing to do - continue - # find the necessary substitutions - if field.name == "gs_amplitude": # special case for t_amplitudes - subs: list[tuple[str, str]] = [] - for sym in expr.inner.atoms(Symbol): - assert isinstance(sym, Symbol) - split_name = _split_default_t_amplitude(sym.name) - if split_name is None: - continue - subs.append((sym.name, new + split_name[1])) - elif field.name == "gs_density": # and for gs densities - subs = [] - for sym in expr.inner.atoms(Symbol): - assert isinstance(sym, Symbol) - split_name = _split_default_gs_density(sym.name) - if split_name is None: - continue - subs.append((sym.name, new + split_name[1])) - else: - subs = [(default, new)] - - for old, new in subs: - expr.rename_tensor(old, new) - return expr - - -# init the TensorNames instance and overwrite the defaults with -# values from the config file -tensor_names = TensorNames._from_config() - - -def is_t_amplitude(name: str) -> bool: - """ - Checks whether the tensor name belongs to a ground state amplitude. - Possible patterns for names are (assuming the default gs_amplitude name t): - - t - - tcc (complex conjugate amplitude) - - tn (where n is any positive integer) - - tncc - """ - base, order = split_t_amplitude_name(name) - order = order.replace("c", "") - if order: - return base == tensor_names.gs_amplitude and order.isnumeric() - else: - return base == tensor_names.gs_amplitude - - -def split_t_amplitude_name(name: str) -> tuple[str, str]: - """ - Split the name of a ground state amplitude in base and extension, e.g., - 't3cc' -> ('t', '3cc'). - """ - n = len(tensor_names.gs_amplitude) - return name[:n], name[n:] - - -def is_adc_amplitude(name: str) -> bool: - """Checks whether the tensor name belongs to a ADC amplitude.""" - return (name == tensor_names.left_adc_amplitude or - name == tensor_names.right_adc_amplitude) - - -def is_gs_density(name: str) -> bool: - """ - Checks whether the tensor name belongs to the ground state density matrix - """ - base, order = split_gs_density_name(name) - if order: - return base == tensor_names.gs_density and order.isnumeric() - else: - return base == tensor_names.gs_density - - -def split_gs_density_name(name: str) -> tuple[str, str]: - """Splits the name of a ground state density matrix in base and order.""" - n = len(tensor_names.gs_density) - return name[:n], name[n:] - - -def _split_default_t_amplitude(name: str) -> tuple[str, str] | None: - """ - Checks whether the name belongs to a t amplitude with the default name - and return the base and extension of the name - """ - default = tensor_names.defaults()["gs_amplitude"] - base, ext = name[:len(default)], name[len(default):] - if base != default: - return None - order = ext.replace("c", "") - if order and not order.isnumeric(): - return None - return base, ext - - -def _split_default_gs_density(name: str) -> tuple[str, str] | None: - """ - Checks whether the name belongs to a ground stat density with the default - name and returns the base and extension of the name. - """ - default = tensor_names.defaults()["gs_density"] - base, order = name[:len(default)], name[len(default):] - if base != default or (order and not order.isnumeric()): - return None - return base, order From 6ec94924081dc8ed47076da17aed1a6d76a9795f Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Tue, 20 May 2025 12:37:10 +0200 Subject: [PATCH 04/26] Updated gitignore for distribution --- .gitignore | 184 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 182 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index d2f425b..008f3e2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,185 @@ -__pycache__/ +# VS Code .DS_STORE .vscode -*.egg-info + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# UV +# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +#uv.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# Ruff stuff: +.ruff_cache/ + +# PyPI configuration file +.pypirc + +# Cursor +# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to +# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data +# refer to https://docs.cursor.com/context/ignore-files +.cursorignore +.cursorindexingignore From 9cb3ace794aac6993b9a2b32f8559f5b46dacc43 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Tue, 20 May 2025 12:58:42 +0200 Subject: [PATCH 05/26] Changed CI to work for every branch --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index a706246..c5f6c90 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -5,7 +5,7 @@ name: CI on: push: - branches: [ "master" ] + branches: [ "*" ] pull_request: branches: [ "master" ] From 966efaf6134ce059550a6cc61506a52f742f376f Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Tue, 20 May 2025 13:02:31 +0200 Subject: [PATCH 06/26] Updated __init__ to include RI --- adcgen/__init__.py | 2 +- tests/contraction_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/adcgen/__init__.py b/adcgen/__init__.py index e62ee16..f2aad38 100644 --- a/adcgen/__init__.py +++ b/adcgen/__init__.py @@ -36,7 +36,7 @@ "Intermediates", "reduce_expr", "factor_intermediates", "sort", "transform_to_spatial_orbitals", - "apply_resolution_of_identity" + "apply_resolution_of_identity", "apply_cvs_approximation", "generate_code", "optimize_contractions", "unoptimized_contraction", "Contraction", diff --git a/tests/contraction_test.py b/tests/contraction_test.py index f3986de..ba76101 100644 --- a/tests/contraction_test.py +++ b/tests/contraction_test.py @@ -67,7 +67,7 @@ def test_evalute_costs(self): print(comp.evaluate_costs(sizes)) print(mem.evaluate_costs(sizes)) print(scaling.evaluate_costs(sizes)) - assert comp.evaluate_costs(sizes) == 2477260800 + assert comp.evaluate_costs(sizes) == 2477260800 assert mem.evaluate_costs(sizes) == 9075780000 assert scaling.evaluate_costs(sizes) == (2477260800, 9075780000) # ensure that zero sized spaces are ignored From 020590e1aca2d086c7a537a9a774d4099b0d4360 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Tue, 20 May 2025 13:05:49 +0200 Subject: [PATCH 07/26] Included MP3 GS energy as test data --- tests/reference_data/generate_data.py | 2 +- tests/reference_data/ri_gs_energy.json | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/tests/reference_data/generate_data.py b/tests/reference_data/generate_data.py index 8e5c77e..0db7ff0 100644 --- a/tests/reference_data/generate_data.py +++ b/tests/reference_data/generate_data.py @@ -366,7 +366,7 @@ def gen_ri_gs_energies(self): results: dict = {} outfile = "ri_gs_energy.json" - variations = itertools.product(['mp', 're'], [0, 1, 2], ['r', 'u'], + variations = itertools.product(['mp', 're'], [0, 1, 2, 3], ['r', 'u'], ['sym', 'asym']) for variant, order, restriction, symmetry in variations: diff --git a/tests/reference_data/ri_gs_energy.json b/tests/reference_data/ri_gs_energy.json index c69bc94..bfa2d61 100644 --- a/tests/reference_data/ri_gs_energy.json +++ b/tests/reference_data/ri_gs_energy.json @@ -29,6 +29,16 @@ "sym": "- {t1^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} {B^{P_{\\alpha}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\beta}b_{\\beta}}} {B^{P_{\\alpha}}_{j_{\\beta}a_{\\beta}}}}{4}", "asym": "- {t1^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} {G^{P_{\\alpha}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\beta}b_{\\beta}}} {G^{P_{\\alpha}}_{j_{\\beta}a_{\\beta}}}}{4}" } + }, + "3": { + "r": { + "sym": "- \\frac{3 {t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{2} + \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{2}", + "asym": "- \\frac{3 {t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{2} + \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{2}" + }, + "u": { + "sym": "- {t2^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t2^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} {B^{P_{\\alpha}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t2^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\beta}b_{\\beta}}} {B^{P_{\\alpha}}_{j_{\\beta}a_{\\beta}}}}{4}", + "asym": "- {t2^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t2^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} {G^{P_{\\alpha}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t2^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\beta}b_{\\beta}}} {G^{P_{\\alpha}}_{j_{\\beta}a_{\\beta}}}}{4}" + } } }, "re": { @@ -61,6 +71,16 @@ "sym": "- {t1^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} {B^{P_{\\alpha}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\beta}b_{\\beta}}} {B^{P_{\\alpha}}_{j_{\\beta}a_{\\beta}}}}{4}", "asym": "- {t1^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} {G^{P_{\\alpha}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\beta}b_{\\beta}}} {G^{P_{\\alpha}}_{j_{\\beta}a_{\\beta}}}}{4}" } + }, + "3": { + "r": { + "sym": "2 {t2^{a_{\\alpha}}_{i_{\\alpha}}} {f^{i_{\\alpha}}_{a_{\\alpha}}} - \\frac{3 {t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{2} + \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{2}", + "asym": "2 {t2^{a_{\\alpha}}_{i_{\\alpha}}} {f^{i_{\\alpha}}_{a_{\\alpha}}} - \\frac{3 {t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{2} + \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{2}" + }, + "u": { + "sym": "{t2^{a_{\\alpha}}_{i_{\\alpha}}} {f^{i_{\\alpha}}_{a_{\\alpha}}} + {t2^{a_{\\beta}}_{i_{\\beta}}} {f^{i_{\\beta}}_{a_{\\beta}}} - {t2^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {B^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {B^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t2^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} {B^{P_{\\alpha}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t2^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {B^{P_{\\alpha}}_{i_{\\beta}b_{\\beta}}} {B^{P_{\\alpha}}_{j_{\\beta}a_{\\beta}}}}{4}", + "asym": "{t2^{a_{\\alpha}}_{i_{\\alpha}}} {f^{i_{\\alpha}}_{a_{\\alpha}}} + {t2^{a_{\\beta}}_{i_{\\beta}}} {f^{i_{\\beta}}_{a_{\\beta}}} - {t2^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}a_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {C^{P_{\\alpha}}_{i_{\\alpha}b_{\\alpha}}} {G^{P_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t2^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\beta}a_{\\beta}}} {G^{P_{\\alpha}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t2^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {C^{P_{\\alpha}}_{i_{\\beta}b_{\\beta}}} {G^{P_{\\alpha}}_{j_{\\beta}a_{\\beta}}}}{4}" + } } } } \ No newline at end of file From deeb57c7e2deddb5a31d49ca985cb1eb72f85e6c Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Wed, 21 May 2025 12:19:07 +0200 Subject: [PATCH 08/26] Rewrote RI implementation in greater similarity to ERI expansion. Restored some test data and CI --- .github/workflows/ci.yaml | 2 +- .gitignore | 187 +----------------- adcgen/expression/expr_container.py | 20 ++ adcgen/expression/object_container.py | 55 ++++++ adcgen/expression/polynom_container.py | 34 ++++ adcgen/expression/term_container.py | 29 +++ adcgen/resolution_of_identity.py | 50 +---- adcgen/sympy_objects.py | 2 + tests/reference_data/gs_energy.json | 4 +- tests/reference_data/isr_precursor.json | 16 +- .../reference_data/isr_precursor_overlap.json | 2 +- .../properties_expectation_value.json | 4 +- .../properties_trans_moment.json | 8 +- tests/reference_data/secular_matrix.json | 4 +- 14 files changed, 174 insertions(+), 243 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c5f6c90..a706246 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -5,7 +5,7 @@ name: CI on: push: - branches: [ "*" ] + branches: [ "master" ] pull_request: branches: [ "master" ] diff --git a/.gitignore b/.gitignore index 008f3e2..2462954 100644 --- a/.gitignore +++ b/.gitignore @@ -1,185 +1,6 @@ -# VS Code -.DS_STORE -.vscode - -# Byte-compiled / optimized / DLL files __pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ .pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# UV -# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -#uv.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/latest/usage/project/#working-with-version-control -.pdm.toml -.pdm-python -.pdm-build/ - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ - -# Ruff stuff: -.ruff_cache/ - -# PyPI configuration file -.pypirc - -# Cursor -# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to -# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data -# refer to https://docs.cursor.com/context/ignore-files -.cursorignore -.cursorindexingignore +.DS_STORE +.vscode +*.egg-info +build/ \ No newline at end of file diff --git a/adcgen/expression/expr_container.py b/adcgen/expression/expr_container.py index 29504e9..bec6eac 100644 --- a/adcgen/expression/expr_container.py +++ b/adcgen/expression/expr_container.py @@ -275,6 +275,26 @@ def rename_tensor(self, current: str, new: str) -> 'ExprContainer': self._inner = renamed return self + def factorise_eri(self, factorisation: str = 'sym') -> 'ExprContainer': + """ + Factorises symmetric ERIs in chemist notation into RI format. + This can be done both symmetrically and asymetrically + + Args: + factorisation : str, optional + Which mode of factorisation to use. Defaults to 'sym'. + + Returns: + ExprContainer: The factorised result + """ + res = S.Zero + for term in self.terms: + res += term.factorise_eri(factorisation=factorisation, + wrap_result=False) + assert isinstance(res, Expr) + self._inner = res + return self + def expand_antisym_eri(self) -> 'ExprContainer': """ Expands the antisymmetric ERI using chemists notation diff --git a/adcgen/expression/object_container.py b/adcgen/expression/object_container.py index 0c54abf..80bc789 100644 --- a/adcgen/expression/object_container.py +++ b/adcgen/expression/object_container.py @@ -747,6 +747,61 @@ def rename_tensor(self, current: str, new: str, obj = ExprContainer(obj, **self.assumptions) return obj + def factorise_eri(self, factorisation: str = 'sym', + wrap_result: bool = True) -> "ExprContainer | Expr": + """ + Factorises symmetric ERIs in chemist notation into RI format. + This is done either symmetrically or asymmetrically + + Args: + factorisation : str, optional + Either 'sym' or 'asym'. Determines the type of factorisation. + Defaults to 'sym'. + wrap_result : bool, optional + Whether to wrap the result in an ExprContainer. + Defaults to True. + + Returns: + ExprContainer | Expr: The factorised result + """ + from .expr_container import ExprContainer + + res = self.inner + base, exponent = self.base_and_exponent + if isinstance(base, SymmetricTensor) and \ + base.name == tensor_names.coulomb: + # ensure that the ERI is symmetric as an implicit check + # whether it is real + if base.bra_ket_sym != 1: + raise NotImplementedError("Can only apply RI approximation to " + "ERIs with bra-ket symmetry " + "(real orbitals).") + p, q, r, s = self.idx + res = S.One + if p.spin == q.spin and r.spin == s.spin: + assumptions = {"ri": True} + if p.spin: + # Check if RI is applied before or after + # spin integration. RI indices are always alpha + assumptions["alpha"] = True + for _ in range(exponent): + aux_idx = Index('P', **assumptions) + if factorisation == 'sym': + res *= SymmetricTensor(tensor_names.ri_sym, + (aux_idx,), (p, q), 0) + res *= SymmetricTensor(tensor_names.ri_sym, + (aux_idx,), (r, s), 0) + elif factorisation == 'asym': + res *= SymmetricTensor(tensor_names.ri_asym_factor, + (aux_idx,), (p, q), 0) + res *= SymmetricTensor(tensor_names.ri_asym_eri, + (aux_idx,), (r, s), 0) + + if wrap_result: + kwargs = self.assumptions + res = ExprContainer(res, **kwargs) + return res + def expand_antisym_eri(self, wrap_result: bool = True ) -> "ExprContainer | Expr": """ diff --git a/adcgen/expression/polynom_container.py b/adcgen/expression/polynom_container.py index ac6610d..63ce28b 100644 --- a/adcgen/expression/polynom_container.py +++ b/adcgen/expression/polynom_container.py @@ -198,6 +198,40 @@ def rename_tensor(self, current: str, new: str, renamed = ExprContainer(inner=renamed, **self.assumptions) return renamed + def factorise_eri(self, factorisation: str = 'sym', + wrap_result: bool = True) -> "Expr | ExprContainer": + """ + Fatorises the symmetric ERIs in chemist notation into an RI format. + Note that this expands the polynomial to account for the uniqueness + of each RI auxilliary index. + + Args: + factorisation : str, optional + Which type of factorisation to use (sym or asym). + Defaults to 'sym' + wrap_result : bool, optional + Whether to wrap the result in an ExprContainer. + Defaults to True. + + Returns: + Expr | ExprContainer: The fatorised result + """ + from .expr_container import ExprContainer + + factorised = S.One + for _ in range(self.exponent): + expanded = S.Zero + for term in self.terms: + expanded += term.factorise_eri(factorisation=factorisation, + wrap_result=False) + factorised *= expanded + assert isinstance(factorised, Expr) + + if wrap_result: + assumptions = self.assumptions + factorised = ExprContainer(inner=factorised, **assumptions) + return factorised + def expand_antisym_eri(self, wrap_result: bool = True): """ Expands the antisymmetric ERI using chemists notation diff --git a/adcgen/expression/term_container.py b/adcgen/expression/term_container.py index 6a385a4..81d12b9 100644 --- a/adcgen/expression/term_container.py +++ b/adcgen/expression/term_container.py @@ -596,6 +596,35 @@ def rename_tensor(self, current: str, new: str, wrap_result: bool = True renamed = ExprContainer(renamed, **self.assumptions) return renamed + def factorise_eri(self, factorisation: str = 'sym', + wrap_result: bool = True) -> "Expr | ExprContainer": + """ + Factorises symmetric ERIs in chemist notation into RI format. + This is done either symmetrically or asymmetrically + + Args: + factorisation : str, optional + Either 'sym' or 'asym'. Determines the type of factorisation. + Defaults to 'sym'. + wrap_result : bool, optional + Whether to wrap the result in an ExprContainer. + Defaults to True. + + Returns: + ExprContainer | Expr: The factorised result + """ + from .expr_container import ExprContainer + + factorised = S.One + for obj in self.objects: + factorised *= obj.factorise_eri(factorisation=factorisation, + wrap_result=False) + + if wrap_result: + assumptions = self.assumptions + factorised = ExprContainer(factorised, **assumptions) + return factorised + def expand_antisym_eri(self, wrap_result: bool = True ) -> "ExprContainer | Expr": """ diff --git a/adcgen/resolution_of_identity.py b/adcgen/resolution_of_identity.py index 3f0c4b7..108c1e6 100644 --- a/adcgen/resolution_of_identity.py +++ b/adcgen/resolution_of_identity.py @@ -1,7 +1,7 @@ from .expression import ExprContainer -from .sympy_objects import SymmetricTensor from .tensor_names import tensor_names -from .indices import Indices +from .misc import Inputerror +from sympy import Symbol def apply_resolution_of_identity(expr: ExprContainer, @@ -35,43 +35,13 @@ def apply_resolution_of_identity(expr: ExprContainer, If false, the asymmetric factorisation variant is employed instead. """ - resolved_expr = ExprContainer(0, **expr.assumptions) - idx_cls = Indices() + factorisation = 'asym' + if symmetric: + factorisation = 'sym' - # We iterate over all terms in the expression and apply RI individually - for term in expr.terms: - # Check if the term is spin-integrated - assert ("n" not in "".join([o.spin for o in term.objects])) - # Check that no antisymmetric ERIs remain - assert (tensor_names.eri not in - ",".join([str(o.name) for o in term.objects])) + # Check whether the expression contains antisymmetric ERIs + if Symbol(tensor_names.eri) in expr.inner.atoms(Symbol): + raise Inputerror('Resolution of Identity requires that the ERIs' + ' be expanded first.') - resolved_term = 1 - - for object in term.objects: - # Replace spatial ERIs - if object.name == tensor_names.coulomb: - # Extract indices - lower = object.idx[0:2] - upper = object.idx[2:4] - ri_idx = idx_cls.get_generic_indices(ri_a=1)[("ri", "a")] - - if symmetric: - # v_pqrs = B^P_pq B^P_rs - ri_expr = (SymmetricTensor(tensor_names.ri_sym, - ri_idx, tuple(lower)) - * SymmetricTensor(tensor_names.ri_sym, - ri_idx, tuple(upper))) - else: - # v_pqrs = C^P_pq W^P_rs - ri_expr = (SymmetricTensor(tensor_names.ri_asym_eri, - ri_idx, tuple(upper)) - * SymmetricTensor(tensor_names.ri_asym_factor, - ri_idx, tuple(lower))) - resolved_term *= ri_expr - else: - # Everything else is unaffected by RI - resolved_term *= object - - resolved_expr += resolved_term - return resolved_expr + return expr.factorise_eri(factorisation=factorisation) diff --git a/adcgen/sympy_objects.py b/adcgen/sympy_objects.py index abc8162..d5c83fd 100644 --- a/adcgen/sympy_objects.py +++ b/adcgen/sympy_objects.py @@ -230,6 +230,8 @@ def __new__(cls, name: str | Symbol, upper: Sequence[Index], # add the Index check for subs to work correctly negative_sign = False bra_ket_sym_imported = sympify(bra_ket_sym) + print(f"{name}: {bra_ket_sym}") + print(f"{upper}, {lower}") if bra_ket_sym_imported is not S.Zero and \ all(isinstance(s, Index) for s in upper+lower): if bra_ket_sym_imported not in [S.One, S.NegativeOne]: diff --git a/tests/reference_data/gs_energy.json b/tests/reference_data/gs_energy.json index f348233..aa6e000 100644 --- a/tests/reference_data/gs_energy.json +++ b/tests/reference_data/gs_energy.json @@ -2,11 +2,11 @@ "mp": { "0": "{f^{i}_{i}}", "1": "- \\frac{{V^{ij}_{ij}}}{2}", - "2": "- \\frac{{t1^{ab}_{ij}} {V^{ij}_{ab}}}{4}" + "2": "- \\frac{{V^{ij}_{ab}} {t1^{ab}_{ij}}}{4}" }, "re": { "0": "- \\frac{{V^{ij}_{ij}}}{2} + {f^{i}_{i}}", "1": "0", - "2": "- \\frac{{t1^{ab}_{ij}} {V^{ij}_{ab}}}{4}" + "2": "- \\frac{{V^{ij}_{ab}} {t1^{ab}_{ij}}}{4}" } } \ No newline at end of file diff --git a/tests/reference_data/isr_precursor.json b/tests/reference_data/isr_precursor.json index c0466f7..2dfbd4f 100644 --- a/tests/reference_data/isr_precursor.json +++ b/tests/reference_data/isr_precursor.json @@ -6,12 +6,12 @@ "ket": "\\left\\{{a^\\dagger_{a}} a_{i}\\right\\}" }, "1": { - "bra": "- \\frac{{t1cc^{b210c210}_{k240l240}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{b210} a_{c210} {a^\\dagger_{k240}} {a^\\dagger_{l240}}\\right\\}}{4}", - "ket": "\\frac{{t1^{g377h377}_{k432l432}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{g377}} {a^\\dagger_{h377}} a_{k432} a_{l432}\\right\\}}{4}" + "bra": "- \\frac{{t1cc^{a3b3}_{i3j3}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{a3} a_{b3} {a^\\dagger_{i3}} {a^\\dagger_{j3}}\\right\\}}{4}", + "ket": "\\frac{{t1^{a4b4}_{j4k4}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{a4}} {a^\\dagger_{b4}} a_{j4} a_{k4}\\right\\}}{4}" }, "2": { - "bra": "- {t2^{a}_{i}} + {t2cc^{d242}_{j277}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{d242} {a^\\dagger_{j277}}\\right\\} - \\frac{{t2cc^{d242e242}_{j277k277}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{d242} a_{e242} {a^\\dagger_{j277}} {a^\\dagger_{k277}}\\right\\}}{4} - \\frac{{t2cc^{d242e242f242}_{j277k277l277}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{d242} a_{e242} a_{f242} {a^\\dagger_{j277}} {a^\\dagger_{k277}} {a^\\dagger_{l277}}\\right\\}}{36} - \\frac{{t2cc^{d242e242f242g242}_{j277k277l277m277}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{d242} a_{e242} a_{f242} a_{g242} {a^\\dagger_{j277}} {a^\\dagger_{k277}} {a^\\dagger_{l277}} {a^\\dagger_{m277}}\\right\\}}{576}", - "ket": "{t2^{g378}_{l433}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{g378}} a_{l433}\\right\\} + \\frac{{t2^{g378h378}_{l433m433}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{g378}} {a^\\dagger_{h378}} a_{l433} a_{m433}\\right\\}}{4} - \\frac{{t2^{g378h378a379}_{l433m433n433}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{a379}} {a^\\dagger_{g378}} {a^\\dagger_{h378}} a_{l433} a_{m433} a_{n433}\\right\\}}{36} + \\frac{{t2^{g378h378a379b379}_{l433m433n433o433}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{a379}} {a^\\dagger_{b379}} {a^\\dagger_{g378}} {a^\\dagger_{h378}} a_{l433} a_{m433} a_{n433} a_{o433}\\right\\}}{576} - {t2cc^{a}_{i}}" + "bra": "- {t2^{a}_{i}} + {t2cc^{a5}_{k5}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{a5} {a^\\dagger_{k5}}\\right\\} - \\frac{{t2cc^{a5b5}_{k5l5}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{a5} a_{b5} {a^\\dagger_{k5}} {a^\\dagger_{l5}}\\right\\}}{4} - \\frac{{t2cc^{a5b5c5}_{k5l5m5}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{a5} a_{b5} a_{c5} {a^\\dagger_{k5}} {a^\\dagger_{l5}} {a^\\dagger_{m5}}\\right\\}}{36} - \\frac{{t2cc^{a5b5c5d5}_{k5l5m5n5}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\} \\left\\{a_{a5} a_{b5} a_{c5} a_{d5} {a^\\dagger_{k5}} {a^\\dagger_{l5}} {a^\\dagger_{m5}} {a^\\dagger_{n5}}\\right\\}}{576}", + "ket": "{t2^{e9}_{l10}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{e9}} a_{l10}\\right\\} + \\frac{{t2^{e9f9}_{l10m10}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{e9}} {a^\\dagger_{f9}} a_{l10} a_{m10}\\right\\}}{4} - \\frac{{t2^{e9f9g9}_{l10m10n10}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{e9}} {a^\\dagger_{f9}} {a^\\dagger_{g9}} a_{l10} a_{m10} a_{n10}\\right\\}}{36} + \\frac{{t2^{e9f9g9h9}_{l10m10n10o10}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\} \\left\\{{a^\\dagger_{e9}} {a^\\dagger_{f9}} {a^\\dagger_{g9}} {a^\\dagger_{h9}} a_{l10} a_{m10} a_{n10} a_{o10}\\right\\}}{576} - {t2cc^{a}_{i}}" } }, "pphh": { @@ -20,12 +20,12 @@ "ket": "\\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\}" }, "1": { - "bra": "- {t1^{ab}_{ij}} + \\frac{{t1cc^{a384b384}_{l439m439}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{a384} a_{b384} {a^\\dagger_{l439}} {a^\\dagger_{m439}}\\right\\}}{4}", - "ket": "\\frac{{t1^{d393e393}_{j450k450}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{d393}} {a^\\dagger_{e393}} a_{j450} a_{k450}\\right\\}}{4} - {t1cc^{ab}_{ij}}" + "bra": "- {t1^{ab}_{ij}} + \\frac{{t1cc^{g14h14}_{l16m16}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{g14} a_{h14} {a^\\dagger_{l16}} {a^\\dagger_{m16}}\\right\\}}{4}", + "ket": "\\frac{{t1^{b24c24}_{j27k27}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{b24}} {a^\\dagger_{c24}} a_{j27} a_{k27}\\right\\}}{4} - {t1cc^{ab}_{ij}}" }, "2": { - "bra": "- {t2cc^{g402}_{o460}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{g402} {a^\\dagger_{o460}}\\right\\} - \\frac{{t2cc^{g402h402}_{o460i461}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{g402} a_{h402} {a^\\dagger_{i461}} {a^\\dagger_{o460}}\\right\\}}{4} + \\frac{{t2cc^{g402h402a403}_{o460i461j461}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{a403} a_{g402} a_{h402} {a^\\dagger_{i461}} {a^\\dagger_{j461}} {a^\\dagger_{o460}}\\right\\}}{36} - \\frac{{t2cc^{g402h402a403b403}_{o460i461j461k461}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{a403} a_{b403} a_{g402} a_{h402} {a^\\dagger_{i461}} {a^\\dagger_{j461}} {a^\\dagger_{k461}} {a^\\dagger_{o460}}\\right\\}}{576} - \\left(\\frac{{t1^{ab}_{ij}} {t1cc^{a404b404}_{k462l462}} \\left\\{a_{a404} a_{b404} {a^\\dagger_{k462}} {a^\\dagger_{l462}}\\right\\}}{4} + {t2^{ab}_{ij}}\\right) - \\left({t2^{a}_{i}} \\left\\{a_{b} {a^\\dagger_{j}}\\right\\} - {t2^{a}_{j}} \\left\\{a_{b} {a^\\dagger_{i}}\\right\\} - {t2^{b}_{i}} \\left\\{a_{a} {a^\\dagger_{j}}\\right\\} + {t2^{b}_{j}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\}\\right)", - "ket": "{t2^{b454}_{m519}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{b454}} a_{m519}\\right\\} + \\frac{{t2^{b454c454}_{m519n519}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{b454}} {a^\\dagger_{c454}} a_{m519} a_{n519}\\right\\}}{4} - \\frac{{t2^{b454c454d454}_{m519n519o519}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{b454}} {a^\\dagger_{c454}} {a^\\dagger_{d454}} a_{m519} a_{n519} a_{o519}\\right\\}}{36} - \\frac{{t2^{b454c454d454e454}_{m519n519o519i520}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{b454}} {a^\\dagger_{c454}} {a^\\dagger_{d454}} {a^\\dagger_{e454}} a_{i520} a_{m519} a_{n519} a_{o519}\\right\\}}{576} - \\left(\\frac{{t1^{d456e456}_{j522k522}} {t1cc^{ab}_{ij}} \\left\\{{a^\\dagger_{d456}} {a^\\dagger_{e456}} a_{j522} a_{k522}\\right\\}}{4} + {t2cc^{ab}_{ij}}\\right) - \\left(- {t2cc^{a}_{i}} \\left\\{{a^\\dagger_{b}} a_{j}\\right\\} + {t2cc^{a}_{j}} \\left\\{{a^\\dagger_{b}} a_{i}\\right\\} + {t2cc^{b}_{i}} \\left\\{{a^\\dagger_{a}} a_{j}\\right\\} - {t2cc^{b}_{j}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\}\\right)" + "bra": "- {t2cc^{e33}_{o37}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{e33} {a^\\dagger_{o37}}\\right\\} - \\frac{{t2cc^{e33f33}_{o37i38}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{e33} a_{f33} {a^\\dagger_{i38}} {a^\\dagger_{o37}}\\right\\}}{4} + \\frac{{t2cc^{e33f33g33}_{o37i38j38}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{e33} a_{f33} a_{g33} {a^\\dagger_{i38}} {a^\\dagger_{j38}} {a^\\dagger_{o37}}\\right\\}}{36} - \\frac{{t2cc^{e33f33g33h33}_{o37i38j38k38}} \\left\\{a_{a} a_{b} {a^\\dagger_{i}} {a^\\dagger_{j}}\\right\\} \\left\\{a_{e33} a_{f33} a_{g33} a_{h33} {a^\\dagger_{i38}} {a^\\dagger_{j38}} {a^\\dagger_{k38}} {a^\\dagger_{o37}}\\right\\}}{576} - \\left(\\frac{{t1^{ab}_{ij}} {t1cc^{g34h34}_{k39l39}} \\left\\{a_{g34} a_{h34} {a^\\dagger_{k39}} {a^\\dagger_{l39}}\\right\\}}{4} + {t2^{ab}_{ij}}\\right) - \\left({t2^{a}_{i}} \\left\\{a_{b} {a^\\dagger_{j}}\\right\\} - {t2^{a}_{j}} \\left\\{a_{b} {a^\\dagger_{i}}\\right\\} - {t2^{b}_{i}} \\left\\{a_{a} {a^\\dagger_{j}}\\right\\} + {t2^{b}_{j}} \\left\\{a_{a} {a^\\dagger_{i}}\\right\\}\\right)", + "ket": "{t2^{h84}_{m96}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{h84}} a_{m96}\\right\\} - \\frac{{t2^{h84a85}_{m96n96}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{a85}} {a^\\dagger_{h84}} a_{m96} a_{n96}\\right\\}}{4} - \\frac{{t2^{h84a85b85}_{m96n96o96}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{a85}} {a^\\dagger_{b85}} {a^\\dagger_{h84}} a_{m96} a_{n96} a_{o96}\\right\\}}{36} + \\frac{{t2^{h84a85b85c85}_{m96n96o96i97}} \\left\\{{a^\\dagger_{a}} {a^\\dagger_{b}} a_{i} a_{j}\\right\\} \\left\\{{a^\\dagger_{a85}} {a^\\dagger_{b85}} {a^\\dagger_{c85}} {a^\\dagger_{h84}} a_{i97} a_{m96} a_{n96} a_{o96}\\right\\}}{576} - \\left(\\frac{{t1^{b87c87}_{j99k99}} {t1cc^{ab}_{ij}} \\left\\{{a^\\dagger_{b87}} {a^\\dagger_{c87}} a_{j99} a_{k99}\\right\\}}{4} + {t2cc^{ab}_{ij}}\\right) - \\left(- {t2cc^{a}_{i}} \\left\\{{a^\\dagger_{b}} a_{j}\\right\\} + {t2cc^{a}_{j}} \\left\\{{a^\\dagger_{b}} a_{i}\\right\\} + {t2cc^{b}_{i}} \\left\\{{a^\\dagger_{a}} a_{j}\\right\\} - {t2cc^{b}_{j}} \\left\\{{a^\\dagger_{a}} a_{i}\\right\\}\\right)" } } } diff --git a/tests/reference_data/isr_precursor_overlap.json b/tests/reference_data/isr_precursor_overlap.json index 50e823e..7775a74 100644 --- a/tests/reference_data/isr_precursor_overlap.json +++ b/tests/reference_data/isr_precursor_overlap.json @@ -3,7 +3,7 @@ "ph,ph": { "0": "\\delta_{a b} \\delta_{i j}", "1": "0", - "2": "- \\frac{\\delta_{a b} {t1^{e205f205}_{im234}} {t1cc^{e205f205}_{jm234}}}{2} - \\frac{\\delta_{i j} {t1^{ae205}_{m234n234}} {t1cc^{be205}_{m234n234}}}{2} + {t1^{ae205}_{im234}} {t1cc^{be205}_{jm234}}" + "2": "- \\frac{\\delta_{a b} {t1^{a3a4}_{ii3}} {t1cc^{a3a4}_{ji3}}}{2} - \\frac{\\delta_{i j} {t1^{aa3}_{i3j3}} {t1cc^{ba3}_{i3j3}}}{2} + {t1^{aa3}_{ii3}} {t1cc^{ba3}_{ji3}}" } } } \ No newline at end of file diff --git a/tests/reference_data/properties_expectation_value.json b/tests/reference_data/properties_expectation_value.json index 3075bcd..5bc8e06 100644 --- a/tests/reference_data/properties_expectation_value.json +++ b/tests/reference_data/properties_expectation_value.json @@ -18,8 +18,8 @@ } }, "2": { - "expectation_value": "- \\frac{{X^{a}_{i}} {Y^{a}_{j}} {t1^{bc}_{ik}} {t1cc^{bc}_{kl}} {d^{j}_{l}}}{4} + \\frac{{X^{a}_{i}} {Y^{a}_{j}} {t1^{bc}_{il}} {t1cc^{bc}_{jk}} {d^{l}_{k}}}{2} + \\frac{{X^{a}_{i}} {Y^{a}_{j}} {t1^{bc}_{kl}} {t1cc^{bc}_{jl}} {d^{k}_{i}}}{4} - {X^{a}_{i}} {Y^{a}_{j}} {t1^{bd}_{ik}} {t1cc^{bc}_{jk}} {d^{c}_{d}} - {X^{a}_{i}} {Y^{a}_{j}} {t2^{b}_{i}} {d^{j}_{b}} - {X^{a}_{i}} {Y^{a}_{j}} {t2cc^{b}_{j}} {d^{b}_{i}} - {X^{a}_{i}} {Y^{a}_{j}} {d^{j}_{i}} + \\frac{{X^{a}_{i}} {Y^{b}_{i}} {t1^{ac}_{jk}} {t1cc^{cd}_{jk}} {d^{d}_{b}}}{4} - {X^{a}_{i}} {Y^{b}_{i}} {t1^{ac}_{kl}} {t1cc^{bc}_{jk}} {d^{l}_{j}} - \\frac{{X^{a}_{i}} {Y^{b}_{i}} {t1^{ad}_{jk}} {t1cc^{bc}_{jk}} {d^{c}_{d}}}{2} - \\frac{{X^{a}_{i}} {Y^{b}_{i}} {t1^{cd}_{jk}} {t1cc^{bd}_{jk}} {d^{a}_{c}}}{4} - {X^{a}_{i}} {Y^{b}_{i}} {t2^{a}_{j}} {d^{j}_{b}} - {X^{a}_{i}} {Y^{b}_{i}} {t2cc^{b}_{j}} {d^{a}_{j}} + {X^{a}_{i}} {Y^{b}_{i}} {d^{a}_{b}} + \\frac{{X^{a}_{i}} {Y^{b}_{j}} {t1^{ac}_{ik}} {t1cc^{bc}_{kl}} {d^{j}_{l}}}{2} - \\frac{{X^{a}_{i}} {Y^{b}_{j}} {t1^{ac}_{ik}} {t1cc^{cd}_{jk}} {d^{d}_{b}}}{2} - {X^{a}_{i}} {Y^{b}_{j}} {t1^{ac}_{il}} {t1cc^{bc}_{jk}} {d^{l}_{k}} - \\frac{{X^{a}_{i}} {Y^{b}_{j}} {t1^{ac}_{kl}} {t1cc^{bc}_{jl}} {d^{k}_{i}}}{2} + {X^{a}_{i}} {Y^{b}_{j}} {t1^{ad}_{ik}} {t1cc^{bc}_{jk}} {d^{c}_{d}} + \\frac{{X^{a}_{i}} {Y^{b}_{j}} {t1^{cd}_{ik}} {t1cc^{bd}_{jk}} {d^{a}_{c}}}{2} + 2 {X^{a}_{i}} {Y^{ab}_{ij}} {t1cc^{bc}_{jk}} {d^{c}_{k}} - 2 {X^{a}_{i}} {Y^{ab}_{ij}} {d^{j}_{b}} + {X^{a}_{i}} {Y^{ab}_{jk}} {t1cc^{bc}_{jk}} {d^{c}_{i}} + {X^{a}_{i}} {Y^{bc}_{ij}} {t1cc^{bc}_{jk}} {d^{a}_{k}} - 2 {X^{ab}_{ij}} {Y^{a}_{j}} {t1^{bc}_{ik}} {d^{k}_{c}} + 2 {X^{ab}_{ij}} {Y^{a}_{j}} {d^{b}_{i}} - {X^{ab}_{ij}} {Y^{b}_{k}} {t1^{ac}_{ij}} {d^{k}_{c}} - {X^{ab}_{ij}} {Y^{c}_{j}} {t1^{ab}_{ik}} {d^{k}_{c}} + 2 {X^{ab}_{ij}} {Y^{ab}_{jk}} {d^{k}_{i}} + 2 {X^{ab}_{ij}} {Y^{ac}_{ij}} {d^{b}_{c}}", - "real_symmetric_state_expectation_value": "- \\frac{{Y^{a}_{i}} {Y^{a}_{j}} {t1^{bc}_{ik}} {t1^{bc}_{kl}} {d^{j}_{l}}}{2} + \\frac{{Y^{a}_{i}} {Y^{a}_{j}} {t1^{bc}_{il}} {t1^{bc}_{jk}} {d^{k}_{l}}}{2} - {Y^{a}_{i}} {Y^{a}_{j}} {t1^{bc}_{jk}} {t1^{bd}_{ik}} {d^{c}_{d}} - 2 {Y^{a}_{i}} {Y^{a}_{j}} {t2^{b}_{i}} {d^{j}_{b}} - {Y^{a}_{i}} {Y^{a}_{j}} {d^{i}_{j}} - {Y^{a}_{i}} {Y^{b}_{i}} {t1^{ac}_{kl}} {t1^{bc}_{jk}} {d^{j}_{l}} - \\frac{{Y^{a}_{i}} {Y^{b}_{i}} {t1^{ad}_{jk}} {t1^{bc}_{jk}} {d^{c}_{d}}}{2} - \\frac{{Y^{a}_{i}} {Y^{b}_{i}} {t1^{bd}_{jk}} {t1^{cd}_{jk}} {d^{a}_{c}}}{2} - 2 {Y^{a}_{i}} {Y^{b}_{i}} {t2^{a}_{j}} {d^{j}_{b}} + {Y^{a}_{i}} {Y^{b}_{i}} {d^{a}_{b}} + {Y^{a}_{i}} {Y^{b}_{j}} {t1^{ac}_{ik}} {t1^{bc}_{kl}} {d^{j}_{l}} - {Y^{a}_{i}} {Y^{b}_{j}} {t1^{ac}_{il}} {t1^{bc}_{jk}} {d^{k}_{l}} + {Y^{a}_{i}} {Y^{b}_{j}} {t1^{ad}_{ik}} {t1^{bc}_{jk}} {d^{c}_{d}} + {Y^{a}_{i}} {Y^{b}_{j}} {t1^{bd}_{jk}} {t1^{cd}_{ik}} {d^{a}_{c}} - 4 {Y^{a}_{i}} {Y^{ab}_{ij}} {d^{j}_{b}} + 2 {Y^{a}_{i}} {Y^{ab}_{jk}} {t1^{bc}_{jk}} {d^{i}_{c}} + 2 {Y^{a}_{i}} {Y^{bc}_{ij}} {t1^{bc}_{jk}} {d^{k}_{a}} - 4 {Y^{a}_{j}} {Y^{ab}_{ij}} {t1^{bc}_{ik}} {d^{k}_{c}} + 2 {Y^{ab}_{ij}} {Y^{ab}_{jk}} {d^{i}_{k}} + 2 {Y^{ab}_{ij}} {Y^{ac}_{ij}} {d^{b}_{c}}", + "expectation_value": "- {X^{a}_{i}} {Y^{a}_{j}} {d^{b}_{i}} {t2cc^{b}_{j}} - {X^{a}_{i}} {Y^{a}_{j}} {d^{c}_{d}} {t1^{bd}_{ik}} {t1cc^{bc}_{jk}} - {X^{a}_{i}} {Y^{a}_{j}} {d^{j}_{b}} {t2^{b}_{i}} - {X^{a}_{i}} {Y^{a}_{j}} {d^{j}_{i}} - \\frac{{X^{a}_{i}} {Y^{a}_{j}} {d^{j}_{l}} {t1^{bc}_{ik}} {t1cc^{bc}_{kl}}}{4} + \\frac{{X^{a}_{i}} {Y^{a}_{j}} {d^{k}_{i}} {t1^{bc}_{kl}} {t1cc^{bc}_{jl}}}{4} + \\frac{{X^{a}_{i}} {Y^{a}_{j}} {d^{l}_{k}} {t1^{bc}_{il}} {t1cc^{bc}_{jk}}}{2} + {X^{a}_{i}} {Y^{b}_{i}} {d^{a}_{b}} - \\frac{{X^{a}_{i}} {Y^{b}_{i}} {d^{a}_{c}} {t1^{cd}_{jk}} {t1cc^{bd}_{jk}}}{4} - {X^{a}_{i}} {Y^{b}_{i}} {d^{a}_{j}} {t2cc^{b}_{j}} - \\frac{{X^{a}_{i}} {Y^{b}_{i}} {d^{c}_{b}} {t1^{ad}_{jk}} {t1cc^{cd}_{jk}}}{4} - \\frac{{X^{a}_{i}} {Y^{b}_{i}} {d^{c}_{d}} {t1^{ad}_{jk}} {t1cc^{bc}_{jk}}}{2} - {X^{a}_{i}} {Y^{b}_{i}} {d^{j}_{b}} {t2^{a}_{j}} - {X^{a}_{i}} {Y^{b}_{i}} {d^{l}_{j}} {t1^{ac}_{kl}} {t1cc^{bc}_{jk}} + \\frac{{X^{a}_{i}} {Y^{b}_{j}} {d^{a}_{c}} {t1^{cd}_{ik}} {t1cc^{bd}_{jk}}}{2} + {X^{a}_{i}} {Y^{b}_{j}} {d^{c}_{d}} {t1^{ad}_{ik}} {t1cc^{bc}_{jk}} - \\frac{{X^{a}_{i}} {Y^{b}_{j}} {d^{d}_{b}} {t1^{ac}_{ik}} {t1cc^{cd}_{jk}}}{2} + \\frac{{X^{a}_{i}} {Y^{b}_{j}} {d^{j}_{l}} {t1^{ac}_{ik}} {t1cc^{bc}_{kl}}}{2} - \\frac{{X^{a}_{i}} {Y^{b}_{j}} {d^{k}_{i}} {t1^{ac}_{kl}} {t1cc^{bc}_{jl}}}{2} - {X^{a}_{i}} {Y^{b}_{j}} {d^{l}_{k}} {t1^{ac}_{il}} {t1cc^{bc}_{jk}} + 2 {X^{a}_{i}} {Y^{ab}_{ij}} {d^{c}_{k}} {t1cc^{bc}_{jk}} - 2 {X^{a}_{i}} {Y^{ab}_{ij}} {d^{j}_{b}} + {X^{a}_{i}} {Y^{ab}_{jk}} {d^{c}_{i}} {t1cc^{bc}_{jk}} + {X^{a}_{i}} {Y^{bc}_{ij}} {d^{a}_{k}} {t1cc^{bc}_{jk}} + 2 {X^{ab}_{ij}} {Y^{a}_{j}} {d^{b}_{i}} - 2 {X^{ab}_{ij}} {Y^{a}_{j}} {d^{k}_{c}} {t1^{bc}_{ik}} - {X^{ab}_{ij}} {Y^{b}_{k}} {d^{k}_{c}} {t1^{ac}_{ij}} - {X^{ab}_{ij}} {Y^{c}_{j}} {d^{k}_{c}} {t1^{ab}_{ik}} + 2 {X^{ab}_{ij}} {Y^{ab}_{jk}} {d^{k}_{i}} + 2 {X^{ab}_{ij}} {Y^{ac}_{ij}} {d^{b}_{c}}", + "real_symmetric_state_expectation_value": "- {Y^{a}_{i}} {Y^{a}_{j}} {d^{c}_{d}} {t1^{bc}_{jk}} {t1^{bd}_{ik}} - 2 {Y^{a}_{i}} {Y^{a}_{j}} {d^{i}_{b}} {t2^{b}_{j}} - {Y^{a}_{i}} {Y^{a}_{j}} {d^{i}_{j}} - \\frac{{Y^{a}_{i}} {Y^{a}_{j}} {d^{j}_{l}} {t1^{bc}_{ik}} {t1^{bc}_{kl}}}{2} + \\frac{{Y^{a}_{i}} {Y^{a}_{j}} {d^{k}_{l}} {t1^{bc}_{il}} {t1^{bc}_{jk}}}{2} + {Y^{a}_{i}} {Y^{b}_{i}} {d^{a}_{b}} - \\frac{{Y^{a}_{i}} {Y^{b}_{i}} {d^{a}_{c}} {t1^{bd}_{jk}} {t1^{cd}_{jk}}}{2} - \\frac{{Y^{a}_{i}} {Y^{b}_{i}} {d^{c}_{d}} {t1^{ad}_{jk}} {t1^{bc}_{jk}}}{2} - 2 {Y^{a}_{i}} {Y^{b}_{i}} {d^{j}_{a}} {t2^{b}_{j}} - {Y^{a}_{i}} {Y^{b}_{i}} {d^{j}_{l}} {t1^{ac}_{kl}} {t1^{bc}_{jk}} + {Y^{a}_{i}} {Y^{b}_{j}} {d^{a}_{c}} {t1^{bd}_{jk}} {t1^{cd}_{ik}} + {Y^{a}_{i}} {Y^{b}_{j}} {d^{c}_{d}} {t1^{ad}_{ik}} {t1^{bc}_{jk}} + {Y^{a}_{i}} {Y^{b}_{j}} {d^{j}_{l}} {t1^{ac}_{ik}} {t1^{bc}_{kl}} - {Y^{a}_{i}} {Y^{b}_{j}} {d^{k}_{l}} {t1^{ac}_{il}} {t1^{bc}_{jk}} - 4 {Y^{a}_{i}} {Y^{ab}_{ij}} {d^{j}_{b}} + 2 {Y^{a}_{i}} {Y^{ab}_{jk}} {d^{i}_{c}} {t1^{bc}_{jk}} + 2 {Y^{a}_{i}} {Y^{bc}_{ij}} {d^{k}_{a}} {t1^{bc}_{jk}} - 4 {Y^{a}_{j}} {Y^{ab}_{ij}} {d^{k}_{c}} {t1^{bc}_{ik}} + 2 {Y^{ab}_{ij}} {Y^{ab}_{jk}} {d^{i}_{k}} + 2 {Y^{ab}_{ij}} {Y^{ac}_{ij}} {d^{b}_{c}}", "real_symmetric_state_dm": { "vv": "{Y^{a}_{i}} {Y^{b}_{i}} - \\frac{{Y^{a}_{i}} {Y^{c}_{i}} {p2^{b}_{c}}}{2} + \\frac{{Y^{a}_{i}} {Y^{c}_{j}} {t1^{bd}_{ik}} {t1^{cd}_{jk}}}{2} - \\frac{{Y^{b}_{i}} {Y^{c}_{i}} {p2^{a}_{c}}}{2} + \\frac{{Y^{b}_{i}} {Y^{c}_{j}} {t1^{ad}_{ik}} {t1^{cd}_{jk}}}{2} - {Y^{c}_{i}} {Y^{c}_{j}} {t1^{ad}_{ik}} {t1^{bd}_{jk}} - \\frac{{Y^{c}_{i}} {Y^{d}_{i}} {t1^{ac}_{jk}} {t1^{bd}_{jk}}}{2} + {Y^{c}_{i}} {Y^{d}_{j}} {t1^{ac}_{ik}} {t1^{bd}_{jk}} + 2 {Y^{ac}_{ij}} {Y^{bc}_{ij}}", "oo": "- {Y^{a}_{i}} {Y^{a}_{j}} - \\frac{{Y^{a}_{i}} {Y^{a}_{k}} {p2^{j}_{k}}}{2} - \\frac{{Y^{a}_{j}} {Y^{a}_{k}} {p2^{i}_{k}}}{2} + \\frac{{Y^{a}_{k}} {Y^{a}_{l}} {t1^{bc}_{ik}} {t1^{bc}_{jl}}}{2} - {Y^{a}_{k}} {Y^{b}_{l}} {t1^{ac}_{ik}} {t1^{bc}_{jl}} + \\frac{{Y^{a}_{l}} {Y^{b}_{i}} {t1^{ac}_{kl}} {t1^{bc}_{jk}}}{2} + \\frac{{Y^{a}_{l}} {Y^{b}_{j}} {t1^{ac}_{kl}} {t1^{bc}_{ik}}}{2} + {Y^{a}_{l}} {Y^{b}_{l}} {t1^{ac}_{ik}} {t1^{bc}_{jk}} - 2 {Y^{ab}_{ik}} {Y^{ab}_{jk}}", diff --git a/tests/reference_data/properties_trans_moment.json b/tests/reference_data/properties_trans_moment.json index a4e7329..2ee7a6f 100644 --- a/tests/reference_data/properties_trans_moment.json +++ b/tests/reference_data/properties_trans_moment.json @@ -9,16 +9,16 @@ } }, "1": { - "expectation_value": "- {X^{a}_{i}} {t1^{ab}_{ij}} {d^{j}_{b}} + {X^{a}_{i}} {d^{a}_{i}}", - "real_expectation_value": "- {X^{a}_{i}} {t1^{ab}_{ij}} {d^{j}_{b}} + {X^{a}_{i}} {d^{a}_{i}}", + "expectation_value": "{X^{a}_{i}} {d^{a}_{i}} - {X^{a}_{i}} {d^{j}_{b}} {t1^{ab}_{ij}}", + "real_expectation_value": "{X^{a}_{i}} {d^{a}_{i}} - {X^{a}_{i}} {d^{j}_{b}} {t1^{ab}_{ij}}", "real_transition_dm": { "vo": "{X^{a}_{i}}", "ov": "- {X^{b}_{j}} {t1^{ab}_{ij}}" } }, "2": { - "expectation_value": "\\frac{{X^{a}_{i}} {t1^{ab}_{ij}} {t1cc^{bc}_{jk}} {d^{c}_{k}}}{2} - {X^{a}_{i}} {t1^{ab}_{ij}} {d^{j}_{b}} + \\frac{{X^{a}_{i}} {t1^{ab}_{jk}} {t1cc^{bc}_{jk}} {d^{c}_{i}}}{4} + \\frac{{X^{a}_{i}} {t1^{bc}_{ij}} {t1cc^{bc}_{jk}} {d^{a}_{k}}}{4} - {X^{a}_{i}} {t2^{a}_{j}} {d^{j}_{i}} + {X^{a}_{i}} {t2^{b}_{i}} {d^{a}_{b}} - {X^{a}_{i}} {t2^{ab}_{ij}} {d^{j}_{b}} + {X^{a}_{i}} {d^{a}_{i}} - {X^{ab}_{ij}} {t1^{ab}_{ik}} {d^{k}_{j}} - {X^{ab}_{ij}} {t1^{bc}_{ij}} {d^{a}_{c}}", - "real_expectation_value": "\\frac{{X^{a}_{i}} {t1^{ab}_{ij}} {t1^{bc}_{jk}} {d^{c}_{k}}}{2} - {X^{a}_{i}} {t1^{ab}_{ij}} {d^{j}_{b}} + \\frac{{X^{a}_{i}} {t1^{ab}_{jk}} {t1^{bc}_{jk}} {d^{c}_{i}}}{4} + \\frac{{X^{a}_{i}} {t1^{bc}_{ij}} {t1^{bc}_{jk}} {d^{a}_{k}}}{4} - {X^{a}_{i}} {t2^{a}_{j}} {d^{j}_{i}} + {X^{a}_{i}} {t2^{b}_{i}} {d^{a}_{b}} - {X^{a}_{i}} {t2^{ab}_{ij}} {d^{j}_{b}} + {X^{a}_{i}} {d^{a}_{i}} - {X^{ab}_{ij}} {t1^{ab}_{ik}} {d^{k}_{j}} - {X^{ab}_{ij}} {t1^{bc}_{ij}} {d^{a}_{c}}", + "expectation_value": "{X^{a}_{i}} {d^{a}_{b}} {t2^{b}_{i}} + {X^{a}_{i}} {d^{a}_{i}} + \\frac{{X^{a}_{i}} {d^{a}_{k}} {t1^{bc}_{ij}} {t1cc^{bc}_{jk}}}{4} + \\frac{{X^{a}_{i}} {d^{c}_{i}} {t1^{ab}_{jk}} {t1cc^{bc}_{jk}}}{4} + \\frac{{X^{a}_{i}} {d^{c}_{k}} {t1^{ab}_{ij}} {t1cc^{bc}_{jk}}}{2} - {X^{a}_{i}} {d^{j}_{b}} {t1^{ab}_{ij}} - {X^{a}_{i}} {d^{j}_{b}} {t2^{ab}_{ij}} - {X^{a}_{i}} {d^{j}_{i}} {t2^{a}_{j}} - {X^{ab}_{ij}} {d^{a}_{c}} {t1^{bc}_{ij}} - {X^{ab}_{ij}} {d^{k}_{j}} {t1^{ab}_{ik}}", + "real_expectation_value": "{X^{a}_{i}} {d^{a}_{b}} {t2^{b}_{i}} + {X^{a}_{i}} {d^{a}_{i}} + \\frac{{X^{a}_{i}} {d^{a}_{k}} {t1^{bc}_{ij}} {t1^{bc}_{jk}}}{4} + \\frac{{X^{a}_{i}} {d^{c}_{i}} {t1^{ab}_{jk}} {t1^{bc}_{jk}}}{4} + \\frac{{X^{a}_{i}} {d^{c}_{k}} {t1^{ab}_{ij}} {t1^{bc}_{jk}}}{2} - {X^{a}_{i}} {d^{j}_{b}} {t1^{ab}_{ij}} - {X^{a}_{i}} {d^{j}_{b}} {t2^{ab}_{ij}} - {X^{a}_{i}} {d^{j}_{i}} {t2^{a}_{j}} - {X^{ab}_{ij}} {d^{a}_{c}} {t1^{bc}_{ij}} - {X^{ab}_{ij}} {d^{k}_{j}} {t1^{ab}_{ik}}", "real_transition_dm": { "vo": "{X^{a}_{i}} + \\frac{{X^{a}_{j}} {p2^{i}_{j}}}{2} - \\frac{{X^{b}_{i}} {p2^{a}_{b}}}{2} + \\frac{{X^{c}_{k}} {t1^{ab}_{ij}} {t1^{bc}_{jk}}}{2}", "vv": "{X^{a}_{i}} {t2^{b}_{i}} + {X^{ac}_{ij}} {t1^{bc}_{ij}}", diff --git a/tests/reference_data/secular_matrix.json b/tests/reference_data/secular_matrix.json index 2faadcf..caf79c4 100644 --- a/tests/reference_data/secular_matrix.json +++ b/tests/reference_data/secular_matrix.json @@ -44,8 +44,8 @@ "vv": "- \\frac{\\delta_{a b} {t1^{cd}_{ik}} {t2eri3^{jk}_{cd}}}{8} + \\frac{\\delta_{a b} {t1^{cd}_{ik}} {t2eri4_{jkdc}}}{2} - \\frac{\\delta_{a b} {t1^{cd}_{jk}} {t2eri3^{ik}_{cd}}}{8} + \\frac{\\delta_{a b} {t1^{cd}_{jk}} {t2eri4_{ikdc}}}{2} - \\delta_{a b} {t2^{c}_{k}} {V^{ik}_{jc}} - \\delta_{a b} {t2^{c}_{k}} {V^{jk}_{ic}} + \\frac{\\delta_{a b} {t2^{cd}_{ik}} {V^{jk}_{cd}}}{4} + \\frac{\\delta_{a b} {t2^{cd}_{jk}} {V^{ik}_{cd}}}{4} - \\delta_{a b} {V^{ic}_{jd}} {p2^{c}_{d}} - \\delta_{a b} {V^{il}_{jk}} {p2^{k}_{l}}" }, "real_factored_cvs": { - "none": "\\frac{{t1^{ac}_{kl}} {t1^{bd}_{kl}} {V^{Ic}_{Jd}}}{2} - {t2^{a}_{k}} {V^{kJ}_{Ib}} - {t2^{b}_{k}} {V^{kI}_{Ja}} + \\frac{{V^{Ib}_{Jc}} {p2^{a}_{c}}}{2} + \\frac{{V^{Ic}_{Ja}} {p2^{b}_{c}}}{2} - {V^{kJ}_{lI}} {t2sq^{ka}_{lb}}", - "vv": "\\delta_{a b} {t2^{c}_{k}} {V^{kI}_{Jc}} + \\delta_{a b} {t2^{c}_{k}} {V^{kJ}_{Ic}} - \\delta_{a b} {V^{Ic}_{Jd}} {p2^{c}_{d}} - \\delta_{a b} {V^{kJ}_{lI}} {p2^{k}_{l}}", + "none": "\\frac{{t1^{ac}_{kl}} {t1^{bd}_{kl}} {V^{Ic}_{Jd}}}{2} - {t2^{a}_{k}} {V^{Ib}_{kJ}} - {t2^{b}_{k}} {V^{Ja}_{kI}} + \\frac{{V^{Ib}_{Jc}} {p2^{a}_{c}}}{2} + \\frac{{V^{Ic}_{Ja}} {p2^{b}_{c}}}{2} - {V^{kJ}_{lI}} {t2sq^{ka}_{lb}}", + "vv": "\\delta_{a b} {t2^{c}_{k}} {V^{Ic}_{kJ}} + \\delta_{a b} {t2^{c}_{k}} {V^{Jc}_{kI}} - \\delta_{a b} {V^{Ic}_{Jd}} {p2^{c}_{d}} - \\delta_{a b} {V^{kJ}_{lI}} {p2^{k}_{l}}", "cc": "- \\frac{\\delta_{I J} {t1^{ac}_{kl}} {t2eri5^{kl}_{bc}}}{8} + \\frac{\\delta_{I J} {t1^{ac}_{kl}} {t2eri4_{klcb}}}{2} - \\frac{\\delta_{I J} {t1^{bc}_{kl}} {t2eri5^{kl}_{ac}}}{8} + \\frac{\\delta_{I J} {t1^{bc}_{kl}} {t2eri4_{klca}}}{2} - \\delta_{I J} {t2^{c}_{k}} {V^{ka}_{bc}} - \\delta_{I J} {t2^{c}_{k}} {V^{kb}_{ac}} + \\frac{\\delta_{I J} {t2^{ac}_{kl}} {V^{kl}_{bc}}}{4} + \\frac{\\delta_{I J} {t2^{bc}_{kl}} {V^{kl}_{ac}}}{4} + \\delta_{I J} {V^{ad}_{bc}} {p2^{c}_{d}} + \\delta_{I J} {V^{ka}_{lb}} {p2^{k}_{l}}" } } From 92bdee019003700f473e05bf34d1d0093f69b80e Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Wed, 21 May 2025 12:36:50 +0200 Subject: [PATCH 09/26] Change RI index name to aux and fixed a type checking bug where int != Expr --- adcgen/expression/object_container.py | 4 ++-- adcgen/expression/polynom_container.py | 2 +- adcgen/generate_code/config.json | 2 +- adcgen/generate_code/contraction.py | 4 ++-- adcgen/indices.py | 14 +++++++------- adcgen/sympy_objects.py | 2 -- 6 files changed, 13 insertions(+), 15 deletions(-) diff --git a/adcgen/expression/object_container.py b/adcgen/expression/object_container.py index 80bc789..b106173 100644 --- a/adcgen/expression/object_container.py +++ b/adcgen/expression/object_container.py @@ -779,12 +779,12 @@ def factorise_eri(self, factorisation: str = 'sym', p, q, r, s = self.idx res = S.One if p.spin == q.spin and r.spin == s.spin: - assumptions = {"ri": True} + assumptions = {"aux": True} if p.spin: # Check if RI is applied before or after # spin integration. RI indices are always alpha assumptions["alpha"] = True - for _ in range(exponent): + for _ in range(int(exponent)): aux_idx = Index('P', **assumptions) if factorisation == 'sym': res *= SymmetricTensor(tensor_names.ri_sym, diff --git a/adcgen/expression/polynom_container.py b/adcgen/expression/polynom_container.py index 63ce28b..046783d 100644 --- a/adcgen/expression/polynom_container.py +++ b/adcgen/expression/polynom_container.py @@ -219,7 +219,7 @@ def factorise_eri(self, factorisation: str = 'sym', from .expr_container import ExprContainer factorised = S.One - for _ in range(self.exponent): + for _ in range(int(self.exponent)): expanded = S.Zero for term in self.terms: expanded += term.factorise_eri(factorisation=factorisation, diff --git a/adcgen/generate_code/config.json b/adcgen/generate_code/config.json index 23780fa..0876473 100644 --- a/adcgen/generate_code/config.json +++ b/adcgen/generate_code/config.json @@ -3,6 +3,6 @@ "core": 5, "occ": 20, "virt": 200, - "ri": 250 + "aux": 250 } } \ No newline at end of file diff --git a/adcgen/generate_code/contraction.py b/adcgen/generate_code/contraction.py index 2119288..dbb6207 100644 --- a/adcgen/generate_code/contraction.py +++ b/adcgen/generate_code/contraction.py @@ -23,7 +23,7 @@ class Sizes: occ: int = 0 virt: int = 0 general: int = 0 - ri: int = 0 + aux: int = 0 @staticmethod def from_dict(input: dict[str, int]) -> "Sizes": @@ -233,7 +233,7 @@ class ScalingComponent: virt: int occ: int core: int - ri: int + aux: int def evaluate_costs(self, sizes: Sizes) -> int: """ diff --git a/adcgen/indices.py b/adcgen/indices.py index dd1b34e..c71dfe9 100644 --- a/adcgen/indices.py +++ b/adcgen/indices.py @@ -45,8 +45,8 @@ def space(self) -> str: return "virt" elif self.assumptions0.get("core"): return "core" - elif self.assumptions0.get("ri"): - return "ri" + elif self.assumptions0.get("aux"): + return "aux" else: return "general" @@ -85,7 +85,7 @@ class Indices(metaclass=Singleton): # the valid spaces with their corresponding associated index names base = { "occ": "ijklmno", "virt": "abcdefgh", "general": "pqrstuvw", - "core": "IJKLMNO", "ri": "PQRSTUVWXYZ" + "core": "IJKLMNO", "aux": "PQRST" } # the valid spins spins = ("", "a", "b") @@ -246,8 +246,8 @@ def _new_symbol(self, name: str, space: str, spin: str) -> Index: assumptions["above_fermi"] = True elif space == "core": assumptions["core"] = True - elif space == "ri": - assumptions["ri"] = True + elif space == "aux": + assumptions["aux"] = True elif space != "general": raise ValueError(f"Invalid space {space}") if spin: @@ -274,7 +274,7 @@ def sort_idx_canonical(idx: Index | Any): # - also add the hash here for wicks, where multiple i are around # - we have to map the spaces onto numbers, since in adcman and adcc # the ordering o < c < v is used for the definition of canonical blocks - space_keys = {"g": 0, "o": 1, "c": 2, "v": 3, "r": 4} + space_keys = {"g": 0, "o": 1, "c": 2, "v": 3, "a": 4} return (space_keys[idx.space[0]], idx.spin, int(idx.name[1:]) if idx.name[1:] else 0, @@ -321,7 +321,7 @@ def generic_indices_from_space(space_str: str) -> list[Index]: occ = generic_idx.get(("occ", ""), []) occ.extend(generic_idx.get(("virt", ""), [])) occ.extend(generic_idx.get(("core", ""), [])) - occ.extend(generic_idx.get(("ri", ""), [])) + occ.extend(generic_idx.get(("aux", ""), [])) return occ diff --git a/adcgen/sympy_objects.py b/adcgen/sympy_objects.py index d5c83fd..abc8162 100644 --- a/adcgen/sympy_objects.py +++ b/adcgen/sympy_objects.py @@ -230,8 +230,6 @@ def __new__(cls, name: str | Symbol, upper: Sequence[Index], # add the Index check for subs to work correctly negative_sign = False bra_ket_sym_imported = sympify(bra_ket_sym) - print(f"{name}: {bra_ket_sym}") - print(f"{upper}, {lower}") if bra_ket_sym_imported is not S.Zero and \ all(isinstance(s, Index) for s in upper+lower): if bra_ket_sym_imported not in [S.One, S.NegativeOne]: From 4df0ebbeef8ea1baeb59632a62228acf7a27da84 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Wed, 21 May 2025 12:45:13 +0200 Subject: [PATCH 10/26] Also changed the name of the RI index in the test cases so they dont fail --- tests/contraction_test.py | 9 +++------ tests/indices_test.py | 4 ++-- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/tests/contraction_test.py b/tests/contraction_test.py index ba76101..b48aaca 100644 --- a/tests/contraction_test.py +++ b/tests/contraction_test.py @@ -50,7 +50,7 @@ def test_scaling(self): def test_sizes(self): # test the automatic evaluation of the size of the general space - sizes = {"occ": 1, "virt": 2, "core": 3, "ri": 0} + sizes = {"occ": 1, "virt": 2, "core": 3, "aux": 0} res = Sizes.from_dict(sizes) sizes["general"] = 6 assert sizes == asdict(res) @@ -59,19 +59,16 @@ def test_sizes(self): assert sizes == asdict(res) def test_evalute_costs(self): - sizes = {"occ": 3, "virt": 5, "core": 2, "general": 7, "ri": 8} + sizes = {"occ": 3, "virt": 5, "core": 2, "general": 7, "aux": 8} sizes = Sizes.from_dict(sizes) comp = ScalingComponent(42, 1, 2, 3, 4, 5) mem = ScalingComponent(42, 5, 4, 3, 2, 1) scaling = Scaling(comp, mem) - print(comp.evaluate_costs(sizes)) - print(mem.evaluate_costs(sizes)) - print(scaling.evaluate_costs(sizes)) assert comp.evaluate_costs(sizes) == 2477260800 assert mem.evaluate_costs(sizes) == 9075780000 assert scaling.evaluate_costs(sizes) == (2477260800, 9075780000) # ensure that zero sized spaces are ignored - sizes = {"occ": 3, "virt": 5, "core": 0, "ri": 0} # general == 8 + sizes = {"occ": 3, "virt": 5, "core": 0, "aux": 0} # general == 8 sizes = Sizes.from_dict(sizes) assert comp.evaluate_costs(sizes) == 5400 comp = ScalingComponent(42, 0, 1, 2, 3, 0) diff --git a/tests/indices_test.py b/tests/indices_test.py index 7468b37..dba0d84 100644 --- a/tests/indices_test.py +++ b/tests/indices_test.py @@ -14,8 +14,8 @@ def test_get_indices(self): assert I.space == "core" and I.spin == "b" assert j.space == "occ" and j.spin == "a" res = idx.get_indices("Pa") - P, a = res[("ri", "")].pop(), res[("virt", "")].pop() - assert P.space == "ri" and P.spin == "" + P, a = res[("aux", "")].pop(), res[("virt", "")].pop() + assert P.space == "aux" and P.spin == "" assert a.space == "virt" and a.spin == "" def test_get_generic_indices(self): From 3e74b55c76b4700e9e884c356123e65228440ef5 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Wed, 21 May 2025 13:33:12 +0200 Subject: [PATCH 11/26] Renamed RI factorisation method and amended docstrings --- adcgen/expression/expr_container.py | 11 ++++++----- adcgen/expression/object_container.py | 10 +++++----- adcgen/expression/polynom_container.py | 10 +++++----- adcgen/expression/term_container.py | 12 ++++++------ adcgen/resolution_of_identity.py | 2 +- 5 files changed, 23 insertions(+), 22 deletions(-) diff --git a/adcgen/expression/expr_container.py b/adcgen/expression/expr_container.py index bec6eac..bf9f949 100644 --- a/adcgen/expression/expr_container.py +++ b/adcgen/expression/expr_container.py @@ -1,5 +1,5 @@ from collections.abc import Iterable, Sequence -from typing import Any +from typing import Any, Literal from sympy import Add, Basic, Expr, Mul, Pow, S, Symbol, factor, nsimplify @@ -275,22 +275,23 @@ def rename_tensor(self, current: str, new: str) -> 'ExprContainer': self._inner = renamed return self - def factorise_eri(self, factorisation: str = 'sym') -> 'ExprContainer': + def expand_coulomb_ri(self, factorisation: Literal['sym', 'asym'] = 'sym' + ) -> 'ExprContainer': """ Factorises symmetric ERIs in chemist notation into RI format. This can be done both symmetrically and asymetrically Args: factorisation : str, optional - Which mode of factorisation to use. Defaults to 'sym'. + Either 'sym' or 'asym'. Defaults to 'sym'. Returns: ExprContainer: The factorised result """ res = S.Zero for term in self.terms: - res += term.factorise_eri(factorisation=factorisation, - wrap_result=False) + res += term.expand_coulomb_ri(factorisation=factorisation, + wrap_result=False) assert isinstance(res, Expr) self._inner = res return self diff --git a/adcgen/expression/object_container.py b/adcgen/expression/object_container.py index b106173..f8472f6 100644 --- a/adcgen/expression/object_container.py +++ b/adcgen/expression/object_container.py @@ -1,6 +1,6 @@ from collections.abc import Iterable from functools import cached_property -from typing import Any, Sequence, TYPE_CHECKING +from typing import Any, Sequence, TYPE_CHECKING, Literal import itertools from sympy.physics.secondquant import F, Fd, FermionicOperator, NO @@ -747,11 +747,11 @@ def rename_tensor(self, current: str, new: str, obj = ExprContainer(obj, **self.assumptions) return obj - def factorise_eri(self, factorisation: str = 'sym', - wrap_result: bool = True) -> "ExprContainer | Expr": + def expand_coulomb_ri(self, factorisation: Literal['sym', 'asym'] = 'sym', + wrap_result: bool = True) -> "ExprContainer | Expr": """ - Factorises symmetric ERIs in chemist notation into RI format. - This is done either symmetrically or asymmetrically + Factorises Coulomb integrals into RI format. + This is done either symmetrically or asymmetrically. Args: factorisation : str, optional diff --git a/adcgen/expression/polynom_container.py b/adcgen/expression/polynom_container.py index 046783d..7e41176 100644 --- a/adcgen/expression/polynom_container.py +++ b/adcgen/expression/polynom_container.py @@ -1,6 +1,6 @@ from collections.abc import Iterable, Sequence from functools import cached_property -from typing import Any, TYPE_CHECKING +from typing import Any, TYPE_CHECKING, Literal from sympy import Add, Expr, Pow, Symbol, S @@ -198,8 +198,8 @@ def rename_tensor(self, current: str, new: str, renamed = ExprContainer(inner=renamed, **self.assumptions) return renamed - def factorise_eri(self, factorisation: str = 'sym', - wrap_result: bool = True) -> "Expr | ExprContainer": + def expand_coulomb_ri(self, factorisation: Literal['sym', 'asym'] = 'sym', + wrap_result: bool = True) -> "Expr | ExprContainer": """ Fatorises the symmetric ERIs in chemist notation into an RI format. Note that this expands the polynomial to account for the uniqueness @@ -222,8 +222,8 @@ def factorise_eri(self, factorisation: str = 'sym', for _ in range(int(self.exponent)): expanded = S.Zero for term in self.terms: - expanded += term.factorise_eri(factorisation=factorisation, - wrap_result=False) + expanded += term.expand_coulomb_ri(factorisation=factorisation, + wrap_result=False) factorised *= expanded assert isinstance(factorised, Expr) diff --git a/adcgen/expression/term_container.py b/adcgen/expression/term_container.py index 81d12b9..05cdc40 100644 --- a/adcgen/expression/term_container.py +++ b/adcgen/expression/term_container.py @@ -1,7 +1,7 @@ from collections.abc import Iterable from collections import Counter from functools import cached_property -from typing import Any, TYPE_CHECKING, Sequence +from typing import Any, TYPE_CHECKING, Sequence, Literal from sympy import Add, Expr, Mul, Pow, S, Symbol, factor, latex, nsimplify from sympy.physics.secondquant import NO @@ -596,10 +596,10 @@ def rename_tensor(self, current: str, new: str, wrap_result: bool = True renamed = ExprContainer(renamed, **self.assumptions) return renamed - def factorise_eri(self, factorisation: str = 'sym', - wrap_result: bool = True) -> "Expr | ExprContainer": + def expand_coulomb_ri(self, factorisation: Literal['sym', 'asym'] = 'sym', + wrap_result: bool = True) -> "Expr | ExprContainer": """ - Factorises symmetric ERIs in chemist notation into RI format. + Factorises Coulomb integrals into RI format. This is done either symmetrically or asymmetrically Args: @@ -617,8 +617,8 @@ def factorise_eri(self, factorisation: str = 'sym', factorised = S.One for obj in self.objects: - factorised *= obj.factorise_eri(factorisation=factorisation, - wrap_result=False) + factorised *= obj.expand_coulomb_ri(factorisation=factorisation, + wrap_result=False) if wrap_result: assumptions = self.assumptions diff --git a/adcgen/resolution_of_identity.py b/adcgen/resolution_of_identity.py index 108c1e6..40d914a 100644 --- a/adcgen/resolution_of_identity.py +++ b/adcgen/resolution_of_identity.py @@ -44,4 +44,4 @@ def apply_resolution_of_identity(expr: ExprContainer, raise Inputerror('Resolution of Identity requires that the ERIs' ' be expanded first.') - return expr.factorise_eri(factorisation=factorisation) + return expr.expand_coulomb_ri(factorisation=factorisation) From ee96f9b453194849b0af4ee44f6d1e582341d132 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Wed, 21 May 2025 13:50:45 +0200 Subject: [PATCH 12/26] Added input validation in object_container.expand_coulomb_ri --- adcgen/expression/object_container.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/adcgen/expression/object_container.py b/adcgen/expression/object_container.py index f8472f6..9786cdb 100644 --- a/adcgen/expression/object_container.py +++ b/adcgen/expression/object_container.py @@ -766,6 +766,11 @@ def expand_coulomb_ri(self, factorisation: Literal['sym', 'asym'] = 'sym', """ from .expr_container import ExprContainer + if factorisation not in ('sym', 'asym'): + raise NotImplementedError("Only symmetric (sym) and asymmetric " + "(asym) factorisation of the Coulomb " + "integral is implemented") + res = self.inner base, exponent = self.base_and_exponent if isinstance(base, SymmetricTensor) and \ From 0cde6189f81f920cd3e793c215f67114be187c45 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Wed, 21 May 2025 14:31:56 +0200 Subject: [PATCH 13/26] Fixed Docstrings, miscellaneous bugs, added more spatial orb test data --- adcgen/expression/expr_container.py | 20 ++++---- adcgen/expression/object_container.py | 43 ++++++++++------ adcgen/expression/polynom_container.py | 43 ++++++++++------ adcgen/expression/term_container.py | 28 +++++------ adcgen/indices.py | 2 - adcgen/resolution_of_identity.py | 55 ++++++++++++++------- adcgen/sympy_objects.py | 2 +- adcgen/tensor_names.py | 3 ++ tests/reference_data/generate_data.py | 5 +- tests/reference_data/spatial_gs_energy.json | 8 +++ tests/resolution_of_identity_test.py | 3 +- tests/spatial_orbitals_test.py | 23 +++++++++ 12 files changed, 156 insertions(+), 79 deletions(-) diff --git a/adcgen/expression/expr_container.py b/adcgen/expression/expr_container.py index bf9f949..cd89bcb 100644 --- a/adcgen/expression/expr_container.py +++ b/adcgen/expression/expr_container.py @@ -1,5 +1,5 @@ from collections.abc import Iterable, Sequence -from typing import Any, Literal +from typing import Any from sympy import Add, Basic, Expr, Mul, Pow, S, Symbol, factor, nsimplify @@ -275,18 +275,20 @@ def rename_tensor(self, current: str, new: str) -> 'ExprContainer': self._inner = renamed return self - def expand_coulomb_ri(self, factorisation: Literal['sym', 'asym'] = 'sym' + def expand_coulomb_ri(self, factorisation: str = 'sym' ) -> 'ExprContainer': """ - Factorises symmetric ERIs in chemist notation into RI format. - This can be done both symmetrically and asymetrically + Expands the Coulomb operators (pq | rs) into RI format - Args: - factorisation : str, optional - Either 'sym' or 'asym'. Defaults to 'sym'. + Parameters + ---------- + factorisation : str, optional + The type of factorisation ('sym' or 'asym'), by default 'sym' - Returns: - ExprContainer: The factorised result + Returns + ------- + ExprContainer + The factorised expression. """ res = S.Zero for term in self.terms: diff --git a/adcgen/expression/object_container.py b/adcgen/expression/object_container.py index 9786cdb..d253d80 100644 --- a/adcgen/expression/object_container.py +++ b/adcgen/expression/object_container.py @@ -1,6 +1,6 @@ from collections.abc import Iterable from functools import cached_property -from typing import Any, Sequence, TYPE_CHECKING, Literal +from typing import Any, Sequence, TYPE_CHECKING import itertools from sympy.physics.secondquant import F, Fd, FermionicOperator, NO @@ -747,22 +747,30 @@ def rename_tensor(self, current: str, new: str, obj = ExprContainer(obj, **self.assumptions) return obj - def expand_coulomb_ri(self, factorisation: Literal['sym', 'asym'] = 'sym', + def expand_coulomb_ri(self, factorisation: str = 'sym', wrap_result: bool = True) -> "ExprContainer | Expr": """ - Factorises Coulomb integrals into RI format. - This is done either symmetrically or asymmetrically. + Expands the Coulomb operators (pq | rs) into RI format - Args: - factorisation : str, optional - Either 'sym' or 'asym'. Determines the type of factorisation. - Defaults to 'sym'. - wrap_result : bool, optional - Whether to wrap the result in an ExprContainer. - Defaults to True. - - Returns: - ExprContainer | Expr: The factorised result + Parameters + ---------- + factorisation : str, optional + The type of factorisation ('sym' or 'asym'), by default 'sym' + wrap_result : bool, optional + Whether to wrap the result in an ExprContainer, by default True + + Returns + ------- + ExprContainer | Expr + The factorised expression. + + Raises + ------ + NotImplementedError + If a factorisation that is not 'sym' or 'asym' is provided or if + the expression is not real. + ValueError + If an invalide exponent is present """ from .expr_container import ExprContainer @@ -773,6 +781,13 @@ def expand_coulomb_ri(self, factorisation: Literal['sym', 'asym'] = 'sym', res = self.inner base, exponent = self.base_and_exponent + if not exponent.is_integer: + raise ValueError("Exponent of Object is not an integer. " + f"Exponent: {exponent}") + elif int(exponent) < 0: + raise ValueError("RI decomposition is not meaningful for " + "reciprocal Coulomb operators") + if isinstance(base, SymmetricTensor) and \ base.name == tensor_names.coulomb: # ensure that the ERI is symmetric as an implicit check diff --git a/adcgen/expression/polynom_container.py b/adcgen/expression/polynom_container.py index 7e41176..dff5768 100644 --- a/adcgen/expression/polynom_container.py +++ b/adcgen/expression/polynom_container.py @@ -1,6 +1,6 @@ from collections.abc import Iterable, Sequence from functools import cached_property -from typing import Any, TYPE_CHECKING, Literal +from typing import Any, TYPE_CHECKING from sympy import Add, Expr, Pow, Symbol, S @@ -198,26 +198,37 @@ def rename_tensor(self, current: str, new: str, renamed = ExprContainer(inner=renamed, **self.assumptions) return renamed - def expand_coulomb_ri(self, factorisation: Literal['sym', 'asym'] = 'sym', + def expand_coulomb_ri(self, factorisation: str = 'sym', wrap_result: bool = True) -> "Expr | ExprContainer": """ - Fatorises the symmetric ERIs in chemist notation into an RI format. - Note that this expands the polynomial to account for the uniqueness - of each RI auxilliary index. - - Args: - factorisation : str, optional - Which type of factorisation to use (sym or asym). - Defaults to 'sym' - wrap_result : bool, optional - Whether to wrap the result in an ExprContainer. - Defaults to True. - - Returns: - Expr | ExprContainer: The fatorised result + Expands the Coulomb operators (pq | rs) into RI format + + Parameters + ---------- + factorisation : str, optional + The type of factorisation ('sym' or 'asym'), by default 'sym' + wrap_result : bool, optional + Whether to wrap the result in an ExprContainer, by default True + + Returns + ------- + ExprContainer | Expr + The factorised expression. + + Raises + ------ + ValueError + If an invalide exponent is present """ from .expr_container import ExprContainer + if not self.exponent.is_integer: + raise ValueError("Only Polynomials with integer exponents can " + "be factorised") + elif int(self.exponent) < 0: + raise ValueError("Decomposition of reciprocal Coulomb operators " + "with RI is not meaningful") + factorised = S.One for _ in range(int(self.exponent)): expanded = S.Zero diff --git a/adcgen/expression/term_container.py b/adcgen/expression/term_container.py index 05cdc40..f7180f5 100644 --- a/adcgen/expression/term_container.py +++ b/adcgen/expression/term_container.py @@ -1,7 +1,7 @@ from collections.abc import Iterable from collections import Counter from functools import cached_property -from typing import Any, TYPE_CHECKING, Sequence, Literal +from typing import Any, TYPE_CHECKING, Sequence from sympy import Add, Expr, Mul, Pow, S, Symbol, factor, latex, nsimplify from sympy.physics.secondquant import NO @@ -596,22 +596,22 @@ def rename_tensor(self, current: str, new: str, wrap_result: bool = True renamed = ExprContainer(renamed, **self.assumptions) return renamed - def expand_coulomb_ri(self, factorisation: Literal['sym', 'asym'] = 'sym', + def expand_coulomb_ri(self, factorisation: str = 'sym', wrap_result: bool = True) -> "Expr | ExprContainer": """ - Factorises Coulomb integrals into RI format. - This is done either symmetrically or asymmetrically + Expands the Coulomb operators (pq | rs) into RI format - Args: - factorisation : str, optional - Either 'sym' or 'asym'. Determines the type of factorisation. - Defaults to 'sym'. - wrap_result : bool, optional - Whether to wrap the result in an ExprContainer. - Defaults to True. - - Returns: - ExprContainer | Expr: The factorised result + Parameters + ---------- + factorisation : str, optional + The type of factorisation ('sym' or 'asym'), by default 'sym' + wrap_result : bool, optional + Whether to wrap the result in an ExprContainer, by default True + + Returns + ------- + ExprContainer | Expr + The factorised expression. """ from .expr_container import ExprContainer diff --git a/adcgen/indices.py b/adcgen/indices.py index c71dfe9..ab61638 100644 --- a/adcgen/indices.py +++ b/adcgen/indices.py @@ -320,8 +320,6 @@ def generic_indices_from_space(space_str: str) -> list[Index]: assert len(generic_idx) <= 2 # only occ and virt occ = generic_idx.get(("occ", ""), []) occ.extend(generic_idx.get(("virt", ""), [])) - occ.extend(generic_idx.get(("core", ""), [])) - occ.extend(generic_idx.get(("aux", ""), [])) return occ diff --git a/adcgen/resolution_of_identity.py b/adcgen/resolution_of_identity.py index 40d914a..8b205d9 100644 --- a/adcgen/resolution_of_identity.py +++ b/adcgen/resolution_of_identity.py @@ -5,39 +5,58 @@ def apply_resolution_of_identity(expr: ExprContainer, - symmetric: bool = True) -> ExprContainer: + factorisation: str = 'sym' + ) -> ExprContainer: """ Applies the Resolution of Identity approximation (RI, sometimes also called density fitting, DF) to an expression. This implies that every - spatial ERI is replaced by its factorised form. Two types of factorisation - are supported: symmetric and asymmetric. In the symmetric decomposition, - a spatial ERI is approximated as: + Coulomb operator is replaced by its factorised form. Two types of + factorisation are supported: symmetric and asymmetric. + In the symmetric decomposition, a Coulomb operator is approximated as: (pq | rs) ~ B^P_{pq} B^P_{rs} B^P_{pq} = (P | Q)^{-1/2} (Q | pq) This decomposition is the default. In the asymmetric factorisation, the - same spatial ERI is approximated as: + same Coulomb operator is approximated as: (pq | rs) ~ C^P_{pq} (P | rs) C^P_{pq} = (P | Q)^{-1} (Q | pq) - Note that the RI approximation is only meaningful on spatial ERIs. + Note that the RI approximation is only meaningful on Coulomb operator. Therefore, this routine will crash and exit if the given expression has - not been spin-integrated before. All RI indices receive an alpha spin - by default - - Args: - expr : ExprContainer - The expression to be spin-integrated. - symmetric : bool, optional - If true, the symmetric factorisation variant is employed. - If false, the asymmetric factorisation variant is employed instead. + not been expanded before. All RI indices receive an alpha spin + by default if the expression has been spin-integrated and no spin + otherwise. + + Parameters + ---------- + expr : ExprContainer + The expression to be factorised into RI format. + factorisation : str, optional + Which type of factorisation to use. + If 'sym', the symmetric factorisation variant is employed. + If 'asym', the asymmetric factorisation variant is employed + instead, by default 'sym' + + Returns + ------- + ExprContainer + The factorised expression + + Raises + ------ + Inputerror + If a factorisation other than 'sym' or 'asym' is provided + Inputerror + If the expression still contains antisymmetric ERIs. """ - factorisation = 'asym' - if symmetric: - factorisation = 'sym' + # Check if a valid factorisation is given + if factorisation not in ('sym', 'asym'): + raise Inputerror('Only symmetric (sym) and asymmetric (asym) ' + 'factorisation modes are supported. ' + f'Received: {factorisation}') # Check whether the expression contains antisymmetric ERIs if Symbol(tensor_names.eri) in expr.inner.atoms(Symbol): diff --git a/adcgen/sympy_objects.py b/adcgen/sympy_objects.py index abc8162..995d63e 100644 --- a/adcgen/sympy_objects.py +++ b/adcgen/sympy_objects.py @@ -320,7 +320,7 @@ def eval(cls, i: Expr, j: Expr) -> Expr | None: # type: ignore[override] return S.Zero spi, spj = i.space[0], j.space[0] - valid_spaces = ["o", "v", "g", "c", "r"] + valid_spaces = ["o", "v", "g", "c", "a"] assert spi in valid_spaces and spj in valid_spaces if spi != "g" and spj != "g" and spi != spj: # delta_ov / delta_vo return S.Zero diff --git a/adcgen/tensor_names.py b/adcgen/tensor_names.py index a5fe4c0..f5fd6db 100644 --- a/adcgen/tensor_names.py +++ b/adcgen/tensor_names.py @@ -29,6 +29,9 @@ class TensorNames(metaclass=Singleton): the attributes storing the names are given in brackets: - antisymmetric ERI in physicist notation (eri): V - Coulomb integrals in chemist notation (coulomb): v + - Symmetrically decomposed RI integrals (ri_sym): B + - The "factor" for an asymmetric RI integral (ri_asym_factor): C + - The pure 2e3c RI integral (ri_asym_eri): G - The fock matrix (fock): f - The arbitrary N-particle operator matrix (operator): d - Ground state amplitudes (gs_amplitude): t diff --git a/tests/reference_data/generate_data.py b/tests/reference_data/generate_data.py index 0db7ff0..a47b94d 100644 --- a/tests/reference_data/generate_data.py +++ b/tests/reference_data/generate_data.py @@ -379,10 +379,9 @@ def gen_ri_gs_energies(self): gs = self.gs[variant] gs_energy = ExprContainer(gs.energy(order), real=True) restricted = restriction == 'r' - symmetric = symmetry == 'sym' gs_energy = transform_to_spatial_orbitals(gs_energy, '', '', restricted=restricted) - gs_energy = apply_resolution_of_identity(gs_energy, symmetric) + gs_energy = apply_resolution_of_identity(gs_energy, symmetry) gs_energy.substitute_contracted() results[variant][order][restriction][symmetry] = str(gs_energy) write_json(results, outfile) @@ -394,7 +393,7 @@ def gen_spatial_gs_energies(self): for variant in ['mp', 're']: results[variant] = {} gs = self.gs[variant] - for order in [0, 1, 2]: + for order in [0, 1, 2, 3]: results[variant][order] = {} for restriction in ['r', 'u']: energy = ExprContainer(gs.energy(order), real=True) diff --git a/tests/reference_data/spatial_gs_energy.json b/tests/reference_data/spatial_gs_energy.json index 6fecefa..0f6ab50 100644 --- a/tests/reference_data/spatial_gs_energy.json +++ b/tests/reference_data/spatial_gs_energy.json @@ -11,6 +11,10 @@ "2": { "r": "- \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}a_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{2} + \\frac{3 {t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}b_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{2}", "u": "- {t1^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {v^{i_{\\alpha}a_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}a_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}b_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {v^{i_{\\beta}a_{\\beta}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {v^{i_{\\beta}b_{\\beta}}_{j_{\\beta}a_{\\beta}}}}{4}" + }, + "3": { + "r": "- \\frac{3 {t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}a_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{2} + \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}b_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{2}", + "u": "- {t2^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {v^{i_{\\alpha}a_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}a_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}b_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t2^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {v^{i_{\\beta}a_{\\beta}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t2^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {v^{i_{\\beta}b_{\\beta}}_{j_{\\beta}a_{\\beta}}}}{4}" } }, "re": { @@ -25,6 +29,10 @@ "2": { "r": "- \\frac{3 {t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}a_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{2} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}b_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{2}", "u": "- {t1^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {v^{i_{\\alpha}a_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}a_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t1^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}b_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {v^{i_{\\beta}a_{\\beta}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t1^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {v^{i_{\\beta}b_{\\beta}}_{j_{\\beta}a_{\\beta}}}}{4}" + }, + "3": { + "r": "2 {t2^{a_{\\alpha}}_{i_{\\alpha}}} {f^{i_{\\alpha}}_{a_{\\alpha}}} - \\frac{3 {t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}a_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{2} + \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}b_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{2}", + "u": "{t2^{a_{\\alpha}}_{i_{\\alpha}}} {f^{i_{\\alpha}}_{a_{\\alpha}}} + {t2^{a_{\\beta}}_{i_{\\beta}}} {f^{i_{\\beta}}_{a_{\\beta}}} - {t2^{a_{\\alpha}a_{\\beta}}_{i_{\\alpha}i_{\\beta}}} {v^{i_{\\alpha}a_{\\alpha}}_{i_{\\beta}a_{\\beta}}} - \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}a_{\\alpha}}_{j_{\\alpha}b_{\\alpha}}}}{4} + \\frac{{t2^{a_{\\alpha}b_{\\alpha}}_{i_{\\alpha}j_{\\alpha}}} {v^{i_{\\alpha}b_{\\alpha}}_{j_{\\alpha}a_{\\alpha}}}}{4} - \\frac{{t2^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {v^{i_{\\beta}a_{\\beta}}_{j_{\\beta}b_{\\beta}}}}{4} + \\frac{{t2^{a_{\\beta}b_{\\beta}}_{i_{\\beta}j_{\\beta}}} {v^{i_{\\beta}b_{\\beta}}_{j_{\\beta}a_{\\beta}}}}{4}" } } } \ No newline at end of file diff --git a/tests/resolution_of_identity_test.py b/tests/resolution_of_identity_test.py index 3995035..5fc5703 100644 --- a/tests/resolution_of_identity_test.py +++ b/tests/resolution_of_identity_test.py @@ -21,12 +21,11 @@ def test_ri_gs_energy(self, variant, order, restriction, symmetry, ref = ref[restriction][symmetry].inner # transform restriction and symmetry to bool restricted = restriction == 'r' - symmetric = symmetry == 'sym' # compute the energy e = cls_instances[variant]['gs'].energy(order) expr = ExprContainer(e, real=True) sp_expr = transform_to_spatial_orbitals(expr, '', '', restricted) - ri_expr = apply_resolution_of_identity(sp_expr, symmetric) + ri_expr = apply_resolution_of_identity(sp_expr, symmetry) assert simplify(ri_expr - ref).substitute_contracted().inner is S.Zero diff --git a/tests/spatial_orbitals_test.py b/tests/spatial_orbitals_test.py index a885630..e512b46 100644 --- a/tests/spatial_orbitals_test.py +++ b/tests/spatial_orbitals_test.py @@ -14,6 +14,8 @@ from sympy import Add, S, Rational, sympify from sympy.physics.secondquant import F, Fd +import pytest + class TestExpandAntiSymEri: def test_no_eri(self): @@ -340,3 +342,24 @@ def test_t1_2(self): target = restricted.provided_target_idx assert target is not None assert set(target) == {i, a} + + +class TestSpatialGroundstateEnergy: + + @pytest.mark.parametrize('variant', ['mp', 're']) + @pytest.mark.parametrize('order', [0, 1, 2]) + @pytest.mark.parametrize('restriction', ['r', 'u']) + def test_spatial_gs_energy(self, variant, order, restriction, + cls_instances, reference_data): + # load the reference data + ref = reference_data['spatial_gs_energy'][variant][order][restriction] + # transform restriction to bool + restricted = restriction == 'r' + # compute the energy + e = cls_instances[variant]['gs'].energy(order) + expr = ExprContainer(e, real=True) + + sp_expr = transform_to_spatial_orbitals(expr, '', '', restricted) + + assert (simplify(sp_expr - ref.inner).substitute_contracted().inner + is S.Zero) From 180da52de10e952111e841416347e3dbdc655580 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Wed, 21 May 2025 14:49:23 +0200 Subject: [PATCH 14/26] Homogenised is_Integer calls and test method names --- adcgen/expression/object_container.py | 2 +- adcgen/expression/polynom_container.py | 2 +- tests/reference_data/generate_data.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/adcgen/expression/object_container.py b/adcgen/expression/object_container.py index d253d80..70e65d6 100644 --- a/adcgen/expression/object_container.py +++ b/adcgen/expression/object_container.py @@ -781,7 +781,7 @@ def expand_coulomb_ri(self, factorisation: str = 'sym', res = self.inner base, exponent = self.base_and_exponent - if not exponent.is_integer: + if not exponent.is_Integer: raise ValueError("Exponent of Object is not an integer. " f"Exponent: {exponent}") elif int(exponent) < 0: diff --git a/adcgen/expression/polynom_container.py b/adcgen/expression/polynom_container.py index dff5768..d7f48ae 100644 --- a/adcgen/expression/polynom_container.py +++ b/adcgen/expression/polynom_container.py @@ -222,7 +222,7 @@ def expand_coulomb_ri(self, factorisation: str = 'sym', """ from .expr_container import ExprContainer - if not self.exponent.is_integer: + if not self.exponent.is_Integer: raise ValueError("Only Polynomials with integer exponents can " "be factorised") elif int(self.exponent) < 0: diff --git a/tests/reference_data/generate_data.py b/tests/reference_data/generate_data.py index a47b94d..5267b1c 100644 --- a/tests/reference_data/generate_data.py +++ b/tests/reference_data/generate_data.py @@ -362,7 +362,7 @@ def gen_adc_properties_trans_moment(self): dump["real_transition_dm"][block] = str(expr) write_json(results, outfile) - def gen_ri_gs_energies(self): + def gen_ri_gs_energy(self): results: dict = {} outfile = "ri_gs_energy.json" @@ -386,7 +386,7 @@ def gen_ri_gs_energies(self): results[variant][order][restriction][symmetry] = str(gs_energy) write_json(results, outfile) - def gen_spatial_gs_energies(self): + def gen_spatial_gs_energy(self): outfile = "spatial_gs_energy.json" results: dict = {} From f17d5be39c9663ec1af57800e41ef2ff36256997 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Wed, 21 May 2025 16:14:12 +0200 Subject: [PATCH 15/26] Fixed KroneckerDeltas and missing aux renaming --- adcgen/sympy_objects.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/adcgen/sympy_objects.py b/adcgen/sympy_objects.py index 995d63e..9a4fc3f 100644 --- a/adcgen/sympy_objects.py +++ b/adcgen/sympy_objects.py @@ -322,6 +322,8 @@ def eval(cls, i: Expr, j: Expr) -> Expr | None: # type: ignore[override] spi, spj = i.space[0], j.space[0] valid_spaces = ["o", "v", "g", "c", "a"] assert spi in valid_spaces and spj in valid_spaces + if (spi == "g" and spj == "a") or (spi == "a" or spj == "g"): + return S.Zero if spi != "g" and spj != "g" and spi != spj: # delta_ov / delta_vo return S.Zero spi, spj = i.spin, j.spin @@ -366,13 +368,13 @@ def preferred_and_killable(self) -> tuple[Index, Index] | None: space2, spin2 = j.space[0], j.spin # ensure we have no unexpected space and spin assert ( - space1 in ["o", "v", "g", "c", "r"] - and space2 in ["o", "v", "g", "c", "r"] + space1 in ["o", "v", "g", "c", "a"] + and space2 in ["o", "v", "g", "c", "a"] ) assert spin1 in ["", "a", "b"] and spin2 in ["", "a", "b"] if spin1 == spin2: # nn / aa / bb -> equal spin information - # oo / vv / cc / gg / og / vg / cg / rr + # oo / vv / cc / gg / og / vg / cg / aa # RI indices will always end up here if space1 == space2 or space2 == "g": return (i, j) From 8e5ae9af487777c4de27db0821b43cc3b9c48f2a Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Wed, 21 May 2025 16:34:06 +0200 Subject: [PATCH 16/26] Fixed Kronecker delta evaluation --- adcgen/sympy_objects.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/adcgen/sympy_objects.py b/adcgen/sympy_objects.py index 9a4fc3f..8b1ddf4 100644 --- a/adcgen/sympy_objects.py +++ b/adcgen/sympy_objects.py @@ -322,7 +322,7 @@ def eval(cls, i: Expr, j: Expr) -> Expr | None: # type: ignore[override] spi, spj = i.space[0], j.space[0] valid_spaces = ["o", "v", "g", "c", "a"] assert spi in valid_spaces and spj in valid_spaces - if (spi == "g" and spj == "a") or (spi == "a" or spj == "g"): + if (spi == "g" and spj == "a") or (spi == "a" and spj == "g"): return S.Zero if spi != "g" and spj != "g" and spi != spj: # delta_ov / delta_vo return S.Zero From 6409596a9fb9e1c68ea33f3e92b7c80f28df1234 Mon Sep 17 00:00:00 2001 From: LinusBDittmer <108735371+LinusBDittmer@users.noreply.github.com> Date: Thu, 22 May 2025 11:43:46 +0200 Subject: [PATCH 17/26] Update handling of Kronecker deltas Co-authored-by: jonasleitner <80265962+jonasleitner@users.noreply.github.com> --- adcgen/sympy_objects.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/adcgen/sympy_objects.py b/adcgen/sympy_objects.py index 8b1ddf4..c9239dd 100644 --- a/adcgen/sympy_objects.py +++ b/adcgen/sympy_objects.py @@ -324,7 +324,12 @@ def eval(cls, i: Expr, j: Expr) -> Expr | None: # type: ignore[override] assert spi in valid_spaces and spj in valid_spaces if (spi == "g" and spj == "a") or (spi == "a" and spj == "g"): return S.Zero - if spi != "g" and spj != "g" and spi != spj: # delta_ov / delta_vo + # delta_ov / delta_vo / ... + # delta_{aux general} / delta_{general aux} + if (spi != "g" and spj != "g" and spi != spj) or \ + (spi == "g" and spj == "a") or \ + (spi == "a" and spj == "g"): + return S.Zero return S.Zero spi, spj = i.spin, j.spin assert spi in ["", "a", "b"] and spj in ["", "a", "b"] From 2772e5272ae42b684e37dd9e54544dc642e221b6 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Thu, 22 May 2025 11:45:34 +0200 Subject: [PATCH 18/26] Fixed mistakes caused by autocommit --- adcgen/sympy_objects.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/adcgen/sympy_objects.py b/adcgen/sympy_objects.py index c9239dd..042f1c0 100644 --- a/adcgen/sympy_objects.py +++ b/adcgen/sympy_objects.py @@ -322,15 +322,11 @@ def eval(cls, i: Expr, j: Expr) -> Expr | None: # type: ignore[override] spi, spj = i.space[0], j.space[0] valid_spaces = ["o", "v", "g", "c", "a"] assert spi in valid_spaces and spj in valid_spaces - if (spi == "g" and spj == "a") or (spi == "a" and spj == "g"): - return S.Zero - # delta_ov / delta_vo / ... - # delta_{aux general} / delta_{general aux} + # delta_ov / delta_vo / delta_{aux general} / delta_{general aux} if (spi != "g" and spj != "g" and spi != spj) or \ (spi == "g" and spj == "a") or \ (spi == "a" and spj == "g"): return S.Zero - return S.Zero spi, spj = i.spin, j.spin assert spi in ["", "a", "b"] and spj in ["", "a", "b"] if spi and spj and spi != spj: # delta_ab / delta_ba From 981b9fbe252a7a27c4bdf8d61bbcf77f3c633f57 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Thu, 22 May 2025 18:51:01 +0200 Subject: [PATCH 19/26] Updated tests to new assumptions format --- tests/resolution_of_identity_test.py | 4 +++- tests/spatial_orbitals_test.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/resolution_of_identity_test.py b/tests/resolution_of_identity_test.py index 5fc5703..596e156 100644 --- a/tests/resolution_of_identity_test.py +++ b/tests/resolution_of_identity_test.py @@ -18,7 +18,7 @@ def test_ri_gs_energy(self, variant, order, restriction, symmetry, cls_instances, reference_data): # load the reference data ref = reference_data['ri_gs_energy'][variant][order] - ref = ref[restriction][symmetry].inner + ref = ref[restriction][symmetry] # transform restriction and symmetry to bool restricted = restriction == 'r' # compute the energy @@ -27,5 +27,7 @@ def test_ri_gs_energy(self, variant, order, restriction, symmetry, sp_expr = transform_to_spatial_orbitals(expr, '', '', restricted) ri_expr = apply_resolution_of_identity(sp_expr, symmetry) + ri_expr.make_real() + ref.make_real() assert simplify(ri_expr - ref).substitute_contracted().inner is S.Zero diff --git a/tests/spatial_orbitals_test.py b/tests/spatial_orbitals_test.py index c65a8af..dc55fd8 100644 --- a/tests/spatial_orbitals_test.py +++ b/tests/spatial_orbitals_test.py @@ -358,6 +358,8 @@ def test_spatial_gs_energy(self, variant, order, restriction, expr = ExprContainer(e, real=True) sp_expr = transform_to_spatial_orbitals(expr, '', '', restricted) + sp_expr.make_real() + ref.make_real() - assert (simplify(sp_expr - ref.inner).substitute_contracted().inner + assert (simplify(sp_expr - ref).substitute_contracted().inner is S.Zero) From f242c96179cf1a1d7cef45bde00bbb304ed4e79c Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Thu, 22 May 2025 18:58:19 +0200 Subject: [PATCH 20/26] Fixed additional test bugs --- tests/spatial_orbitals_test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/spatial_orbitals_test.py b/tests/spatial_orbitals_test.py index dc55fd8..789380a 100644 --- a/tests/spatial_orbitals_test.py +++ b/tests/spatial_orbitals_test.py @@ -359,7 +359,9 @@ def test_spatial_gs_energy(self, variant, order, restriction, sp_expr = transform_to_spatial_orbitals(expr, '', '', restricted) sp_expr.make_real() + sp_expr.add_bra_ket_sym(braket_sym_tensors=tensor_names.coulomb) ref.make_real() + ref.add_bra_ket_sym(braket_sym_tensors=tensor_names.coulomb) assert (simplify(sp_expr - ref).substitute_contracted().inner is S.Zero) From 8ad40d82f4b68a8e3743ba6129755fce06850f4c Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Mon, 26 May 2025 20:19:13 +0200 Subject: [PATCH 21/26] Fixed bug with the handling of RI expressions in exponential ObjectContainers --- adcgen/expression/object_container.py | 22 ++++++++++++++++------ adcgen/expression/polynom_container.py | 26 ++++++++++++++++---------- tests/resolution_of_identity_test.py | 3 +-- tests/spatial_orbitals_test.py | 2 -- 4 files changed, 33 insertions(+), 20 deletions(-) diff --git a/adcgen/expression/object_container.py b/adcgen/expression/object_container.py index 7f9c32f..dbf342b 100644 --- a/adcgen/expression/object_container.py +++ b/adcgen/expression/object_container.py @@ -765,12 +765,9 @@ def expand_coulomb_ri(self, factorisation: str = 'sym', res = self.inner base, exponent = self.base_and_exponent - if not exponent.is_Integer: + if not exponent.is_integer: raise ValueError("Exponent of Object is not an integer. " f"Exponent: {exponent}") - elif int(exponent) < 0: - raise ValueError("RI decomposition is not meaningful for " - "reciprocal Coulomb operators") if isinstance(base, SymmetricTensor) and \ base.name == tensor_names.coulomb: @@ -788,8 +785,20 @@ def expand_coulomb_ri(self, factorisation: str = 'sym', # Check if RI is applied before or after # spin integration. RI indices are always alpha assumptions["alpha"] = True - for _ in range(int(exponent)): - aux_idx = Index('P', **assumptions) + if self.exponent >= S.Zero: + for _ in range(int(exponent)): + aux_idx = Index('P', **assumptions) + if factorisation == 'sym': + res *= SymmetricTensor(tensor_names.ri_sym, + (aux_idx,), (p, q), 0) + res *= SymmetricTensor(tensor_names.ri_sym, + (aux_idx,), (r, s), 0) + elif factorisation == 'asym': + res *= SymmetricTensor(tensor_names.ri_asym_factor, + (aux_idx,), (p, q), 0) + res *= SymmetricTensor(tensor_names.ri_asym_eri, + (aux_idx,), (r, s), 0) + else: if factorisation == 'sym': res *= SymmetricTensor(tensor_names.ri_sym, (aux_idx,), (p, q), 0) @@ -800,6 +809,7 @@ def expand_coulomb_ri(self, factorisation: str = 'sym', (aux_idx,), (p, q), 0) res *= SymmetricTensor(tensor_names.ri_asym_eri, (aux_idx,), (r, s), 0) + res = Pow(res, self.exponent) if wrap_result: kwargs = self.assumptions diff --git a/adcgen/expression/polynom_container.py b/adcgen/expression/polynom_container.py index 12a6a3c..37453f1 100644 --- a/adcgen/expression/polynom_container.py +++ b/adcgen/expression/polynom_container.py @@ -212,20 +212,26 @@ def expand_coulomb_ri(self, factorisation: str = 'sym', """ from .expr_container import ExprContainer - if not self.exponent.is_Integer: + if not self.exponent.is_integer: raise ValueError("Only Polynomials with integer exponents can " "be factorised") - elif int(self.exponent) < 0: - raise ValueError("Decomposition of reciprocal Coulomb operators " - "with RI is not meaningful") factorised = S.One - for _ in range(int(self.exponent)): - expanded = S.Zero - for term in self.terms: - expanded += term.expand_coulomb_ri(factorisation=factorisation, - wrap_result=False) - factorised *= expanded + if self.exponent >= S.Zero: + for _ in range(int(self.exponent)): + expanded = S.Zero + for term in self.terms: + expanded += term.expand_coulomb_ri( + factorisation=factorisation, wrap_result=False + ) + factorised *= expanded + else: + expanded_list = [ + term.expand_coulomb_ri(factorisation=factorisation, + wrap_result=False) + for term in self.terms + ] + factorised = Pow(Add(*expanded_list), self.exponent) assert isinstance(factorised, Expr) if wrap_result: diff --git a/tests/resolution_of_identity_test.py b/tests/resolution_of_identity_test.py index 596e156..fb745d4 100644 --- a/tests/resolution_of_identity_test.py +++ b/tests/resolution_of_identity_test.py @@ -19,6 +19,7 @@ def test_ri_gs_energy(self, variant, order, restriction, symmetry, # load the reference data ref = reference_data['ri_gs_energy'][variant][order] ref = ref[restriction][symmetry] + ref.make_real() # transform restriction and symmetry to bool restricted = restriction == 'r' # compute the energy @@ -27,7 +28,5 @@ def test_ri_gs_energy(self, variant, order, restriction, symmetry, sp_expr = transform_to_spatial_orbitals(expr, '', '', restricted) ri_expr = apply_resolution_of_identity(sp_expr, symmetry) - ri_expr.make_real() - ref.make_real() assert simplify(ri_expr - ref).substitute_contracted().inner is S.Zero diff --git a/tests/spatial_orbitals_test.py b/tests/spatial_orbitals_test.py index 789380a..405f8e4 100644 --- a/tests/spatial_orbitals_test.py +++ b/tests/spatial_orbitals_test.py @@ -358,8 +358,6 @@ def test_spatial_gs_energy(self, variant, order, restriction, expr = ExprContainer(e, real=True) sp_expr = transform_to_spatial_orbitals(expr, '', '', restricted) - sp_expr.make_real() - sp_expr.add_bra_ket_sym(braket_sym_tensors=tensor_names.coulomb) ref.make_real() ref.add_bra_ket_sym(braket_sym_tensors=tensor_names.coulomb) From e8e0446f04b5c0b08dcd4c5b7a03935233713035 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Mon, 26 May 2025 20:23:28 +0200 Subject: [PATCH 22/26] Changes in the expand_intermediates function --- adcgen/expression/object_container.py | 4 ++-- adcgen/expression/polynom_container.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/adcgen/expression/object_container.py b/adcgen/expression/object_container.py index dbf342b..4e4697d 100644 --- a/adcgen/expression/object_container.py +++ b/adcgen/expression/object_container.py @@ -765,7 +765,7 @@ def expand_coulomb_ri(self, factorisation: str = 'sym', res = self.inner base, exponent = self.base_and_exponent - if not exponent.is_integer: + if not exponent.is_Integer: raise ValueError("Exponent of Object is not an integer. " f"Exponent: {exponent}") @@ -903,7 +903,7 @@ def expand_intermediates(self, target: Sequence[Index], fully_expand=fully_expand ) if exponent < S.Zero: - expanded = Pow(expanded, -1) + expanded = Pow(expanded, exponent) # apply assumptions to the expanded object if braket_sym_tensors or braket_antisym_tensors: expanded = ExprContainer(expanded).add_bra_ket_sym( diff --git a/adcgen/expression/polynom_container.py b/adcgen/expression/polynom_container.py index 37453f1..0ddfcd3 100644 --- a/adcgen/expression/polynom_container.py +++ b/adcgen/expression/polynom_container.py @@ -212,7 +212,7 @@ def expand_coulomb_ri(self, factorisation: str = 'sym', """ from .expr_container import ExprContainer - if not self.exponent.is_integer: + if not self.exponent.is_Integer: raise ValueError("Only Polynomials with integer exponents can " "be factorised") From deb632f82a0ae063495f0f12b881b024263af5bd Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Mon, 26 May 2025 21:11:01 +0200 Subject: [PATCH 23/26] Stricter typechecking --- adcgen/expression/object_container.py | 1 + adcgen/expression/polynom_container.py | 15 +++++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/adcgen/expression/object_container.py b/adcgen/expression/object_container.py index 4e4697d..a231aa0 100644 --- a/adcgen/expression/object_container.py +++ b/adcgen/expression/object_container.py @@ -799,6 +799,7 @@ def expand_coulomb_ri(self, factorisation: str = 'sym', res *= SymmetricTensor(tensor_names.ri_asym_eri, (aux_idx,), (r, s), 0) else: + aux_idx = Index('P', **assumptions) if factorisation == 'sym': res *= SymmetricTensor(tensor_names.ri_sym, (aux_idx,), (p, q), 0) diff --git a/adcgen/expression/polynom_container.py b/adcgen/expression/polynom_container.py index 0ddfcd3..665ab0c 100644 --- a/adcgen/expression/polynom_container.py +++ b/adcgen/expression/polynom_container.py @@ -225,13 +225,16 @@ def expand_coulomb_ri(self, factorisation: str = 'sym', factorisation=factorisation, wrap_result=False ) factorised *= expanded + assert isinstance(factorised, Expr) else: - expanded_list = [ - term.expand_coulomb_ri(factorisation=factorisation, - wrap_result=False) - for term in self.terms - ] - factorised = Pow(Add(*expanded_list), self.exponent) + expanded = S.Zero + for term in self.terms: + expanded += term.expand_coulomb_ri( + factorisation=factorisation, wrap_result=False + ) + assert isinstance(expanded, Expr) + factorised = Pow(expanded, self.exponent) + assert isinstance(factorised, Expr) if wrap_result: From afdb3d2abdf8d6bab7aa29d9692b9884136a57cc Mon Sep 17 00:00:00 2001 From: jonasleitner Date: Tue, 27 May 2025 07:58:48 +0200 Subject: [PATCH 24/26] update expand_coulomb_ri and expand_intermediates for negative exponents and distinct contracted indices --- adcgen/expression/object_container.py | 119 ++++++++++++++----------- adcgen/expression/polynom_container.py | 92 +++++++++---------- 2 files changed, 111 insertions(+), 100 deletions(-) diff --git a/adcgen/expression/object_container.py b/adcgen/expression/object_container.py index a231aa0..cb39958 100644 --- a/adcgen/expression/object_container.py +++ b/adcgen/expression/object_container.py @@ -747,71 +747,63 @@ def expand_coulomb_ri(self, factorisation: str = 'sym', ------- ExprContainer | Expr The factorised expression. - - Raises - ------ - NotImplementedError - If a factorisation that is not 'sym' or 'asym' is provided or if - the expression is not real. - ValueError - If an invalide exponent is present """ from .expr_container import ExprContainer if factorisation not in ('sym', 'asym'): - raise NotImplementedError("Only symmetric (sym) and asymmetric " - "(asym) factorisation of the Coulomb " + raise NotImplementedError("Only symmetric ('sym') and asymmetric " + "('asym') factorisation of the Coulomb " "integral is implemented") res = self.inner base, exponent = self.base_and_exponent - if not exponent.is_Integer: - raise ValueError("Exponent of Object is not an integer. " - f"Exponent: {exponent}") - if isinstance(base, SymmetricTensor) and \ base.name == tensor_names.coulomb: - # ensure that the ERI is symmetric as an implicit check - # whether it is real if base.bra_ket_sym != 1: raise NotImplementedError("Can only apply RI approximation to " - "ERIs with bra-ket symmetry " - "(real orbitals).") - p, q, r, s = self.idx + "coulomb integrals with " + "bra-ket symmetry.") + # we dont expand negative exponents, because the result + # (ab)^-n will evaluate to a^-n b^-n, which is + # only correct if the product ab has no contracted + # indices + if not exponent.is_Integer or exponent < S.Zero: + raise NotImplementedError("Can only apply RI approximation to " + "coulomb integrals " + "with positive integer exponents. " + f"{self} has an invalid exponent.") + # setup the assumptions for the aux index: + # assign alpha spin if represented in spatial orbitals + idx = self.idx + has_spin = bool(idx[0].spin) + if any(bool(s) != has_spin for s in idx): + raise NotImplementedError(f"The coulomb integral {self} has " + "to be represented either in spatial" + " or spin orbitals. A mixture is not" + " valid.") + assumptions = {"aux": True} + if has_spin: + assumptions["alpha"] = True + # actually do the expansion + p, q, r, s = idx res = S.One - if p.spin == q.spin and r.spin == s.spin: - assumptions = {"aux": True} - if p.spin: - # Check if RI is applied before or after - # spin integration. RI indices are always alpha - assumptions["alpha"] = True - if self.exponent >= S.Zero: - for _ in range(int(exponent)): - aux_idx = Index('P', **assumptions) - if factorisation == 'sym': - res *= SymmetricTensor(tensor_names.ri_sym, - (aux_idx,), (p, q), 0) - res *= SymmetricTensor(tensor_names.ri_sym, - (aux_idx,), (r, s), 0) - elif factorisation == 'asym': - res *= SymmetricTensor(tensor_names.ri_asym_factor, - (aux_idx,), (p, q), 0) - res *= SymmetricTensor(tensor_names.ri_asym_eri, - (aux_idx,), (r, s), 0) + for _ in range(int(exponent)): # exponent has to be positive + aux_idx = Index("P", **assumptions) + if factorisation == "sym": + res *= SymmetricTensor( + tensor_names.ri_sym, (aux_idx,), (p, q), 0 + ) + res *= SymmetricTensor( + tensor_names.ri_sym, (aux_idx,), (r, s), 0 + ) else: - aux_idx = Index('P', **assumptions) - if factorisation == 'sym': - res *= SymmetricTensor(tensor_names.ri_sym, - (aux_idx,), (p, q), 0) - res *= SymmetricTensor(tensor_names.ri_sym, - (aux_idx,), (r, s), 0) - elif factorisation == 'asym': - res *= SymmetricTensor(tensor_names.ri_asym_factor, - (aux_idx,), (p, q), 0) - res *= SymmetricTensor(tensor_names.ri_asym_eri, - (aux_idx,), (r, s), 0) - res = Pow(res, self.exponent) - + assert factorisation == "asym" + res *= SymmetricTensor( + tensor_names.ri_asym_factor, (aux_idx,), (p, q), 0 + ) + res *= SymmetricTensor( + tensor_names.ri_asym_eri, (aux_idx,), (r, s), 0 + ) if wrap_result: kwargs = self.assumptions res = ExprContainer(res, **kwargs) @@ -875,6 +867,12 @@ def expand_intermediates(self, target: Sequence[Index], False: The intermediate is only expanded once, e.g., n'th order MP t-amplitudes are expressed by means of (n-1)'th order MP t-amplitudes and ERI. + braket_sym_tensors: Sequence[str], optional + Add bra-ket-symmetry to the given tensors of the expanded + expression (after expansion of the intermediates). + braket_antisym_tensors: Sequence[str], optional + Add bra-ket-antisymmetry to the given tensors of the expanded + expression (after expansion of the intermediates). """ from ..intermediates import Intermediates from .expr_container import ExprContainer @@ -893,18 +891,31 @@ def expand_intermediates(self, target: Sequence[Index], itmd = Intermediates().available.get(longname, None) expanded = self.inner if itmd is not None: + # for negative exponents we would have to ensure that the + # intermediate is a "long" intermediate that consists of + # multiple terms. Or if it consists of a single term + # that it does not have any contracted indices + # However, this can only be checked by calling ".expand()" + # on the contributions in the for loop below, which seems bad. + # A short intermediates will be expanded as + # X^-2 = (ab * cd)^-1 -> a^-1 b^-1 c^-1 d^-1 + # where the last step is not correct if the intermediate + # has contracted indices. + exponent = self.exponent + if not exponent.is_Integer or exponent < S.Zero: + raise NotImplementedError( + "Can only expand intermediates with positive " + f"integer exponents. {self} has an invalid exponent." + ) # Use a for loop to obtain different contracted itmd indices # for each x in: x * x * ... expanded = S.One - exponent = self.exponent assert exponent.is_Integer for _ in range(abs(int(exponent))): expanded *= itmd.expand_itmd( indices=self.idx, wrap_result=False, fully_expand=fully_expand ) - if exponent < S.Zero: - expanded = Pow(expanded, exponent) # apply assumptions to the expanded object if braket_sym_tensors or braket_antisym_tensors: expanded = ExprContainer(expanded).add_bra_ket_sym( diff --git a/adcgen/expression/polynom_container.py b/adcgen/expression/polynom_container.py index 665ab0c..b841241 100644 --- a/adcgen/expression/polynom_container.py +++ b/adcgen/expression/polynom_container.py @@ -2,7 +2,7 @@ from functools import cached_property from typing import Any, TYPE_CHECKING -from sympy import Add, Expr, Pow, S +from sympy import Add, Expr, Mul, Pow, S from ..indices import Index, sort_idx_canonical from .container import Container @@ -199,48 +199,34 @@ def expand_coulomb_ri(self, factorisation: str = 'sym', The type of factorisation ('sym' or 'asym'), by default 'sym' wrap_result : bool, optional Whether to wrap the result in an ExprContainer, by default True - - Returns - ------- - ExprContainer | Expr - The factorised expression. - - Raises - ------ - ValueError - If an invalide exponent is present """ from .expr_container import ExprContainer - if not self.exponent.is_Integer: - raise ValueError("Only Polynomials with integer exponents can " - "be factorised") - - factorised = S.One - if self.exponent >= S.Zero: - for _ in range(int(self.exponent)): - expanded = S.Zero - for term in self.terms: - expanded += term.expand_coulomb_ri( - factorisation=factorisation, wrap_result=False - ) - factorised *= expanded - assert isinstance(factorised, Expr) - else: - expanded = S.Zero + exponent = self.exponent + if not exponent.is_Integer: + raise ValueError("Can only apply RI approximation to Polynomials " + "with integer exponents. " + f"{self} has an invalid exponent.") + # use a for loop so the contracted aux indices for each x in + # x * x * ... = x^n are different. + expanded = S.One + for _ in range(abs(int(exponent))): + contrib = S.Zero for term in self.terms: - expanded += term.expand_coulomb_ri( + contrib += term.expand_coulomb_ri( factorisation=factorisation, wrap_result=False ) - assert isinstance(expanded, Expr) - factorised = Pow(expanded, self.exponent) - - assert isinstance(factorised, Expr) - + assert isinstance(contrib, Expr) + if exponent < S.Zero: + # a mul object would be simplified as + # (ab)^-1 -> a^-1 b^-1 + # which is only correct if a and b have no contracted indices. + assert not isinstance(contrib, Mul) + contrib = Pow(contrib, -1) + expanded *= contrib if wrap_result: - assumptions = self.assumptions - factorised = ExprContainer(inner=factorised, **assumptions) - return factorised + expanded = ExprContainer(inner=expanded, **self.assumptions) + return expanded def expand_antisym_eri(self, wrap_result: bool = True): """ @@ -271,16 +257,30 @@ def expand_intermediates(self, target: Sequence[Index], """Expands all known intermediates in the polynom.""" from .expr_container import ExprContainer - expanded = S.Zero - for term in self.terms: - expanded += term.expand_intermediates( - target, wrap_result=False, fully_expand=fully_expand, - braket_sym_tensors=braket_sym_tensors, - braket_antisym_tensors=braket_antisym_tensors - ) - assert isinstance(expanded, Expr) - expanded = Pow(expanded, self.exponent) - + exponent = self.exponent + if not exponent.is_Integer: + raise NotImplementedError("Can only expand intermediates for " + "polynoms with integer exponents." + f"{self} has an invalid exponent.") + # use a for loop so the contracted itmd indices for each x in + # x * x * ... = x^n are different. + expanded = S.One + for _ in range(abs(int(exponent))): + contrib = S.Zero + for term in self.terms: + contrib += term.expand_intermediates( + target, wrap_result=False, fully_expand=fully_expand, + braket_sym_tensors=braket_sym_tensors, + braket_antisym_tensors=braket_antisym_tensors + ) + assert isinstance(contrib, Expr) + if exponent < S.Zero: + # a mul object would be simplified as + # (ab)^-1 -> a^-1 b^-1 + # which is only correct if a and b have no contracted indices. + assert not isinstance(contrib, Mul) + contrib = Pow(contrib, -1) + expanded *= contrib if wrap_result: assumptions = self.assumptions assumptions["target_idx"] = target From d909236aa4bf7209bc8da3c231f6b58ba7a93157 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Tue, 27 May 2025 15:07:59 +0200 Subject: [PATCH 25/26] Updated RI --- adcgen/resolution_of_identity.py | 13 ++++++++-- adcgen/tensor_names.py | 6 +++++ examples/resolution_of_identity.py | 41 ++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 examples/resolution_of_identity.py diff --git a/adcgen/resolution_of_identity.py b/adcgen/resolution_of_identity.py index 8b205d9..d38bb25 100644 --- a/adcgen/resolution_of_identity.py +++ b/adcgen/resolution_of_identity.py @@ -5,7 +5,8 @@ def apply_resolution_of_identity(expr: ExprContainer, - factorisation: str = 'sym' + factorisation: str = 'sym', + resolve_indices: bool = True ) -> ExprContainer: """ Applies the Resolution of Identity approximation (RI, sometimes also @@ -38,6 +39,10 @@ def apply_resolution_of_identity(expr: ExprContainer, If 'sym', the symmetric factorisation variant is employed. If 'asym', the asymmetric factorisation variant is employed instead, by default 'sym' + resolve_indices: bool, optional + Whether the indices should be resolved to unique indices + after applying the RI approximation. This is equivalent + to calling 'substitute_contracted()' afterwards. Returns ------- @@ -63,4 +68,8 @@ def apply_resolution_of_identity(expr: ExprContainer, raise Inputerror('Resolution of Identity requires that the ERIs' ' be expanded first.') - return expr.expand_coulomb_ri(factorisation=factorisation) + ri_expr = expr.expand_coulomb_ri(factorisation=factorisation) + if resolve_indices: + ri_expr.substitute_contracted() + + return ri_expr diff --git a/adcgen/tensor_names.py b/adcgen/tensor_names.py index f5fd6db..b04ed3e 100644 --- a/adcgen/tensor_names.py +++ b/adcgen/tensor_names.py @@ -30,8 +30,14 @@ class TensorNames(metaclass=Singleton): - antisymmetric ERI in physicist notation (eri): V - Coulomb integrals in chemist notation (coulomb): v - Symmetrically decomposed RI integrals (ri_sym): B + These are formally calculated by decomposing the symmetric + four center integrals: (pq | rs) = B^Q_pq B^Q_rs - The "factor" for an asymmetric RI integral (ri_asym_factor): C + This tensor is the dimensionless part of the asymmetric + resolution of identity decomposition: C^P_{pq} = (pq | Q) (Q | P)^{-1} - The pure 2e3c RI integral (ri_asym_eri): G + This tensor is combined with C^P_{pq} to reform the symmetric + four center integrals: C^P_{pq} G^P_{rs} = (pq | rs) - The fock matrix (fock): f - The arbitrary N-particle operator matrix (operator): d - Ground state amplitudes (gs_amplitude): t diff --git a/examples/resolution_of_identity.py b/examples/resolution_of_identity.py new file mode 100644 index 0000000..7460bd4 --- /dev/null +++ b/examples/resolution_of_identity.py @@ -0,0 +1,41 @@ +from adcgen import ( + Operators, GroundState, ExprContainer, transform_to_spatial_orbitals, + apply_resolution_of_identity +) + +# We first declare the Hamiltonian operator +op = Operators() + +# We exemplify this at the MP2 and MP3 energies +# For this, we first define the ground state +gs = GroundState(op) + +# Next, we can calculate the MP2 and MP3 energy +energy_mp2 = ExprContainer(gs.energy(2)) +energy_mp3 = ExprContainer(gs.energy(3)) + +# RI is only valid for real orbitals, wherefore we have to make these +# expressions real +energy_mp2.make_real() +energy_mp3.make_real() + +# These can now be spin-integrated +energy_mp2 = transform_to_spatial_orbitals(energy_mp2, '', '', + restricted=False) +energy_mp3 = transform_to_spatial_orbitals(energy_mp3, '', '', + restricted=False) + +# Lastly, we can apply the resolution of identity approximation +# We will decompose the MP2 energy symmetrically: +energy_mp2 = apply_resolution_of_identity(energy_mp2, factorisation='sym') +# And the MP3 energy asymetrically: +energy_mp3 = apply_resolution_of_identity(energy_mp3, factorisation='asym') + +# We can now print the result +print("RI-MP2 Energy:\n") +print(energy_mp2.to_latex_str(spin_as_overbar=True)) +print() + +print("RI-MP3 Energy:\n") +print(energy_mp3.to_latex_str(spin_as_overbar=True)) +print() From a795036d27b65bc7c9abb45ead0a51ba78a18490 Mon Sep 17 00:00:00 2001 From: LinusBDittmer Date: Wed, 28 May 2025 23:13:53 +0200 Subject: [PATCH 26/26] Fixed Contraction Size Calculation --- adcgen/generate_code/contraction.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/adcgen/generate_code/contraction.py b/adcgen/generate_code/contraction.py index dbb6207..4579518 100644 --- a/adcgen/generate_code/contraction.py +++ b/adcgen/generate_code/contraction.py @@ -33,7 +33,7 @@ def from_dict(input: dict[str, int]) -> "Sizes": if not provided. """ if "general" not in input: - input["general"] = sum(input.values()) + input["general"] = sum(v for k, v in input.items() if k != "aux") return Sizes(**input) @staticmethod