From 30f82adf3d319ab626dbb8b06f29fedf721b68cd Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Thu, 20 Dec 2018 14:15:36 +0100 Subject: [PATCH 01/25] First implementation of the general graph mapper Minor changes --- projectq/cengines/__init__.py | 1 + projectq/cengines/_graphmapper.py | 546 ++++++++++++++++++++++++++++++ 2 files changed, 547 insertions(+) create mode 100644 projectq/cengines/_graphmapper.py diff --git a/projectq/cengines/__init__.py b/projectq/cengines/__init__.py index 966159e78..90b7d95de 100755 --- a/projectq/cengines/__init__.py +++ b/projectq/cengines/__init__.py @@ -32,3 +32,4 @@ from ._tagremover import TagRemover from ._testengine import CompareEngine, DummyEngine from ._twodmapper import GridMapper +from ._graphmapper import GraphMapper diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py new file mode 100644 index 000000000..60e8f57d8 --- /dev/null +++ b/projectq/cengines/_graphmapper.py @@ -0,0 +1,546 @@ +# Copyright 2018 ProjectQ-Framework (wOAww.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Mapper for a quantum circuit to an arbitrary connected graph. + +Input: Quantum circuit with 1 and 2 qubit gates on n qubits. Gates are assumed + to be applied in parallel if they act on disjoint qubit(s) and any pair + of qubits can perform a 2 qubit gate (all-to-all connectivity) +Output: Quantum circuit in which qubits are placed in 2-D square grid in which + only nearest neighbour qubits can perform a 2 qubit gate. The mapper + uses Swap gates in order to move qubits next to each other. +""" +from copy import deepcopy +import itertools + +import random +import numpy as np +import networkx as nx + +from projectq.cengines import (BasicMapperEngine, return_swap_depth) +from projectq.meta import LogicalQubitIDTag +from projectq.ops import (AllocateQubitGate, Command, DeallocateQubitGate, + FlushGate, Swap) +from projectq.types import WeakQubitRef + + +class GraphMapperError(Exception): + """Base class for all exceptions related to the GraphMapper.""" + + +class QubitAllocationError(GraphMapperError): + """ + Exception raised if a qubit allocation is impossible. + + This would typically be the case if the number of allocated qubit is + greater than the number of nodes inside the graph. + """ + + +def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, + stored_commands): + """ + Add active qubits to a mapping + + Qubits that are active but not yet registered in the mapping are added by + mapping them to the next available backend id + + Args: + current_mapping (dict): specify which method should be used to + add the new qubits to the current mapping + graph (networkx.Graph): underlying graph used by the mapper + new_logical_qubit_ids (list): list of logical ids not yet part of the + mapping and that need to be assigned a + backend id + stored_commands (list): list of commands yet to be processed by the + mapper + + Returns: A new mapping + + Pre-conditions: + len(active_qubits) <= num_qubits == len(graph) + """ + #pylint: disable=unused-argument + mapping = deepcopy(current_mapping) + currently_used_nodes = sorted([v for _, v in mapping.items()]) + available_ids = [n for n in graph if n not in currently_used_nodes] + + for i, logical_id in enumerate(new_logical_qubit_ids): + mapping[logical_id] = available_ids[i] + return mapping + + +def _iterate_with_previous(some_iterable): + prevs, items = itertools.tee(some_iterable, 2) + items = itertools.islice(items, 1, None) + prevs = itertools.islice(prevs, len(some_iterable) - 1) + return zip(prevs, items) + + +def _return_swaps(paths): + """ + Generate a list of swap ops based on a list of paths through the graph. + + Args: + paths (list): List of paths through the graph between pairs + of qubits that need to interact + + Returns: A list of swap operations (tuples of logical qubit ids) + required to move the qubits to the correct locations + """ + swap_operations = [] + + for path in paths: + swap_operations.append([]) + path_for_qb0, path_for_qb1 = np.array_split(path, 2) + + # Add swaps operations for first half of the path + for prev, cur in _iterate_with_previous(path_for_qb0): + swap_operations[-1].append((prev, cur)) + + # Add swaps operations for the second half of the path + for prev, cur in _iterate_with_previous(path_for_qb1[::-1]): + swap_operations[-1].append((prev, cur)) + + return swap_operations + + +class GraphMapper(BasicMapperEngine): + """ + Mapper to an arbitrary connected graph. + + Maps a quantum circuit to an arbitrary connected graph of connected qubits + using Swap gates. + + + Attributes: + current_mapping: Stores the mapping: key is logical qubit id, value + is mapped qubit id from 0,...,self.num_qubits + graph (networkx.Graph): Arbitrary connected graph + storage (int): Number of gate it caches before mapping. + num_qubits(int): number of qubits + num_mappings (int): Number of times the mapper changed the mapping + depth_of_swaps (dict): Key are circuit depth of swaps, value is the + number of such mappings which have been + applied + num_of_swaps_per_mapping (dict): Key are the number of swaps per + mapping, value is the number of such + mappings which have been applied + + Note: + 1) Gates are cached and only mapped from time to time. A + FastForwarding gate doesn't empty the cache, only a FlushGate does. + 2) Only 1 and two qubit gates allowed. + 3) Does not optimize for dirty qubits. + + """ + + def __init__(self, + graph, + storage=1000, + add_qubits_to_mapping=_add_qubits_to_mapping): + """ + Initialize a GraphMapper compiler engine. + + Args: + graph (networkx.Graph): Arbitrary connected graph representing + Qubit connectivity + storage (int): Number of gates to temporarily store + Raises: + RuntimeError: if the graph is not a connected graph + """ + BasicMapperEngine.__init__(self) + + # Make sure that we start with a valid graph + if not nx.is_connected(graph): + raise RuntimeError("Input graph must be a connected graph") + elif not all([isinstance(n, int) for n in graph]): + raise RuntimeError( + "All nodes inside the graph needs to be integers") + else: + self.graph = graph + self.num_qubits = self.graph.number_of_nodes() + self.storage = storage + # Randomness to pick permutations if there are too many. + # This creates an own instance of Random in order to not influence + # the bound methods of the random module which might be used in other + # places. + self._rng = random.Random(11) + # Storing commands + self._stored_commands = list() + # Logical qubit ids for which the Allocate gate has already been + # processed and sent to the next engine but which are not yet + # deallocated: + self._currently_allocated_ids = set() + # Our internal mappings + self._current_mapping = dict() # differs from other mappers + self._reverse_current_mapping = dict() + # Function to add new logical qubits ids to the mapping + self._add_qubits_to_mapping = add_qubits_to_mapping + + # Statistics: + self.num_mappings = 0 + self.depth_of_swaps = dict() + self.num_of_swaps_per_mapping = dict() + + @property + def current_mapping(self): + """Return a copy of the current mapping.""" + return deepcopy(self._current_mapping) + + @current_mapping.setter + def current_mapping(self, current_mapping): + """Set the current mapping to a new value.""" + if not current_mapping: + self._current_mapping = dict() + self._reverse_current_mapping = dict() + else: + self._current_mapping = current_mapping + self._reverse_current_mapping = { + v: k + for k, v in self._current_mapping.items() + } + + def is_available(self, cmd): + """Only allows 1 or two qubit gates.""" + num_qubits = 0 + for qureg in cmd.all_qubits: + num_qubits += len(qureg) + return num_qubits <= 2 + + def _process_commands(self): + """ + Process commands and if necessary, calculate paths through the graph. + + Attempts to find as many paths through the graph as possible in order + to generate a new mapping that is able to apply as many gates as + possible. + + It goes through stored_commands and tries to find paths through the + graph that can be applied simultaneously to move the qubits without + side effects so that as many gates can be applied; gates are applied + on on a first come first served basis. + + Args: + None (list): Nothing here for now + + Returns: A list of paths through the graph to move some qubits and have + them interact + """ + # TODO: need to think about merging paths and applying gates in the + # middle of the swaps if possible + + paths = [] + allocated_qubits = deepcopy(self._currently_allocated_ids) + active_qubits = deepcopy(self._currently_allocated_ids) + + for cmd in self._stored_commands: + if (len(allocated_qubits) == self.num_qubits + and not active_qubits): + break + + qubit_ids = [ + qubit.id for qureg in cmd.all_qubits for qubit in qureg + ] + + if len(qubit_ids) > 2 or not qubit_ids: + raise Exception("Invalid command (number of qubits): " + + str(cmd)) + + elif isinstance(cmd.gate, AllocateQubitGate): + qubit_id = cmd.qubits[0][0].id + if len(allocated_qubits) < self.num_qubits: + allocated_qubits.add(qubit_id) + active_qubits.add(qubit_id) + else: + raise QubitAllocationError( + "Unable to allocate new qubit: all possible qubits" + " ({}) have already been allocated".format( + self.num_qubits)) + + elif isinstance(cmd.gate, DeallocateQubitGate): + qubit_id = cmd.qubits[0][0].id + if qubit_id in active_qubits: + active_qubits.remove(qubit_id) + # Do not remove from allocated_qubits as this would + # allow the mapper to add a new qubit to this location + # before the next swaps which is currently not + # supported + + # Process a two qubit gate: + elif len(qubit_ids) == 2: + path = self._process_two_qubit_gate_dumb( + qubit0=qubit_ids[0], + qubit1=qubit_ids[1], + active_qubits=active_qubits) + + paths_start = [p[0] for p in paths] + paths_end = [p[-1] for p in paths] + + if path \ + and not any([p in paths_start for p in path]) \ + and not any([p in paths_end for p in path]): + paths += [path] + # Maybe a bit too conservative: remove all qubits + # of the path from the active qubits list + # This effectively only allows non-intersecting paths + for backend_id in path: + if backend_id in self._reverse_current_mapping: + active_qubits.discard( + self._reverse_current_mapping[backend_id]) + + return paths + + def _process_two_qubit_gate_dumb(self, qubit0, qubit1, active_qubits): + """ + Process a two qubit gate. + + It either removes the two qubits from active_qubits if the gate is + not possible or generate an optimal path through the graph connecting + the two qubits. + + Args: + qubit0 (int): qubit.id of one of the qubits + qubit1 (int): qubit.id of the other qubit + active_qubits (set): contains all qubit ids which for which + gates can be applied in this cycle before + the swaps + + Returns: A path through the graph (can be empty) + """ + # At least one qubit is not an active qubit: + if qubit0 not in active_qubits or qubit1 not in active_qubits: + active_qubits.discard(qubit0) + active_qubits.discard(qubit1) + return [] + + # Path is given using graph nodes (ie. mapped ids) + # If we come here, the two nodes can't be connected on the graph or the + # command would have been applied already + node0 = self._current_mapping[qubit0] + node1 = self._current_mapping[qubit1] + + # Qubits are both active but not connected via an edge + return nx.shortest_path(self.graph, source=node0, target=node1) + + def _send_possible_commands(self): + """ + Send the stored commands possible without changing the mapping. + """ + active_ids = deepcopy(self._currently_allocated_ids) + + for logical_id in self._current_mapping: + # So that loop doesn't stop before AllocateGate applied + active_ids.add(logical_id) + + new_stored_commands = [] + for i in range(len(self._stored_commands)): + cmd = self._stored_commands[i] + if not active_ids: + new_stored_commands += self._stored_commands[i:] + break + if isinstance(cmd.gate, AllocateQubitGate): + if cmd.qubits[0][0].id in self._current_mapping: + self._currently_allocated_ids.add(cmd.qubits[0][0].id) + qb = WeakQubitRef( + engine=self, + idx=self._current_mapping[cmd.qubits[0][0].id]) + new_cmd = Command( + engine=self, + gate=AllocateQubitGate(), + qubits=([qb], ), + tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) + self.send([new_cmd]) + else: + new_stored_commands.append(cmd) + elif isinstance(cmd.gate, DeallocateQubitGate): + if cmd.qubits[0][0].id in active_ids: + qb = WeakQubitRef( + engine=self, + idx=self._current_mapping[cmd.qubits[0][0].id]) + new_cmd = Command( + engine=self, + gate=DeallocateQubitGate(), + qubits=([qb], ), + tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) + self._currently_allocated_ids.remove(cmd.qubits[0][0].id) + active_ids.remove(cmd.qubits[0][0].id) + self._current_mapping.pop(cmd.qubits[0][0].id) + self.send([new_cmd]) + else: + new_stored_commands.append(cmd) + else: + send_gate = True + logical_ids = set() + backend_ids = set() + for qureg in cmd.all_qubits: + for qubit in qureg: + if qubit.id not in active_ids: + send_gate = False + break + logical_ids.add(qubit.id) + backend_ids.add(self._current_mapping[qubit.id]) + + # Check that mapped ids are connected by an edge on the graph + if len(backend_ids) == 2: + send_gate = self.graph.has_edge(*list(backend_ids)) + + if send_gate: + self._send_cmd_with_mapped_ids(cmd) + else: + # Cannot execute gate -> make sure no other gate will use + # any of those qubits to preserve sequence + for qureg in cmd.all_qubits: + for qubit in qureg: + active_ids.discard(qubit.id) + new_stored_commands.append(cmd) + self._stored_commands = new_stored_commands + + def _run(self): + """ + Create a new mapping and executes possible gates. + + It first allocates all 0, ..., self.num_qubits-1 mapped qubit ids, if + they are not already used because we might need them all for the + swaps. Then it creates a new map, swaps all the qubits to the new map, + executes all possible gates, and finally deallocates mapped qubit ids + which don't store any information. + """ + num_of_stored_commands_before = len(self._stored_commands) + + # Go through the command list and generate a list of paths. + # At the same time, add soon-to-be-allocated qubits to the mapping + paths = self._process_commands() + + self._send_possible_commands() + if not self._stored_commands: + return + + swaps = _return_swaps(paths) + + if swaps: # first mapping requires no swaps + backend_ids_used = { + self._current_mapping[logical_id] + for logical_id in self._currently_allocated_ids + } + + # Get a list of all backend ids we require to perform the swaps + required_ids = { + n + for n in list(itertools.chain.from_iterable(paths)) + } + + # Get a list of the qubits we need to allocate just to perform the + # swaps + not_allocated_ids = set(required_ids).difference(backend_ids_used) + + # Allocate all mapped qubit ids (which are not already allocated, + # i.e., contained in self._currently_allocated_ids) + # and add them temporarily to the + for backend_id in not_allocated_ids: + qb = WeakQubitRef(engine=self, idx=backend_id) + cmd = Command( + engine=self, gate=AllocateQubitGate(), qubits=([qb], )) + self.send([cmd]) + + # Send swap operations to arrive at the new mapping + swaps = list(itertools.chain.from_iterable(swaps)) + for bqb0, bqb1 in swaps: + q0 = WeakQubitRef(engine=self, idx=bqb0) + q1 = WeakQubitRef(engine=self, idx=bqb1) + cmd = Command(engine=self, gate=Swap, qubits=([q0], [q1])) + self.send([cmd]) + + # Register statistics: + self.num_mappings += 1 + depth = return_swap_depth(swaps) + if depth not in self.depth_of_swaps: + self.depth_of_swaps[depth] = 1 + else: + self.depth_of_swaps[depth] += 1 + if len(swaps) not in self.num_of_swaps_per_mapping: + self.num_of_swaps_per_mapping[len(swaps)] = 1 + else: + self.num_of_swaps_per_mapping[len(swaps)] += 1 + + # Calculate reverse internal mapping + new_internal_mapping = deepcopy(self._reverse_current_mapping) + + # Add missing entries with invalid id to be able to process the + # swaps operations + for backend_id in not_allocated_ids: + new_internal_mapping[backend_id] = -1 + + # Update internal mapping based on swap operations + for bqb0, bqb1 in swaps: + new_internal_mapping[bqb0], \ + new_internal_mapping[bqb1] = \ + new_internal_mapping[bqb1], \ + new_internal_mapping[bqb0] + + # Calculate the list of "helper" qubits that need to be deallocated + # and remove invalid entries + not_needed_anymore = [] + new_reverse_current_mapping = {} + for backend_id, logical_id in new_internal_mapping.items(): + if logical_id < 0: + not_needed_anymore.append(backend_id) + else: + new_reverse_current_mapping[backend_id] = logical_id + + # Deallocate all previously mapped ids which we only needed for the + # swaps: + for backend_id in not_needed_anymore: + qb = WeakQubitRef(engine=self, idx=backend_id) + cmd = Command( + engine=self, gate=DeallocateQubitGate(), qubits=([qb], )) + self.send([cmd]) + + # Calculate new mapping + new_mapping = { + v: k + for k, v in new_reverse_current_mapping.items() + } + self.current_mapping = new_mapping + + # Send possible gates: + self._send_possible_commands() + # Check that mapper actually made progress + if len(self._stored_commands) == num_of_stored_commands_before: + raise RuntimeError("Mapper is potentially in an infinite loop. " + "It is likely that the algorithm requires " + "too many qubits. Increase the number of " + "qubits for this mapper.") + + def receive(self, command_list): + """ + Receive some commands. + + Receive a command list and, for each command, stores it until + we do a mapping (FlushGate or Cache of stored commands is full). + + Args: + command_list (list of Command objects): list of commands to + receive. + """ + for cmd in command_list: + if isinstance(cmd.gate, FlushGate): + while self._stored_commands: + self._run() + self.send([cmd]) + else: + self._stored_commands.append(cmd) + # Storage is full: Create new map and send some gates away: + if len(self._stored_commands) >= self.storage: + self._run() From b7d323898a0f7900da88f90a0f2aff3dd893e7e4 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Fri, 4 Jan 2019 14:46:59 +0100 Subject: [PATCH 02/25] Add unit tests for GraphMapper --- projectq/cengines/_graphmapper_test.py | 708 +++++++++++++++++++++++++ 1 file changed, 708 insertions(+) create mode 100644 projectq/cengines/_graphmapper_test.py diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py new file mode 100644 index 000000000..21582f06b --- /dev/null +++ b/projectq/cengines/_graphmapper_test.py @@ -0,0 +1,708 @@ +# Copyright 2018 ProjectQ-Framework (www.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for projectq.cengines._graphmapper.py.""" + +from copy import deepcopy +import itertools +import random + +import pytest +import networkx as nx +import projectq +from projectq.cengines import DummyEngine, LocalOptimizer +from projectq.meta import LogicalQubitIDTag +from projectq.ops import (Allocate, BasicGate, Command, Deallocate, FlushGate, + X) +from projectq.types import WeakQubitRef + +from projectq.cengines import _graphmapper as graphm + + +def allocate_all_qubits_cmd(mapper): + qb = [] + allocate_cmds = [] + for i in range(mapper.num_qubits): + qb.append(WeakQubitRef(engine=None, idx=i)) + allocate_cmds.append( + Command(engine=None, gate=Allocate, qubits=([qb[i]], ))) + return qb, allocate_cmds + + +def generate_grid_graph(nrows, ncols): + graph = nx.Graph() + graph.add_nodes_from(range(nrows * ncols)) + + for row in range(nrows): + for col in range(ncols): + node0 = col + ncols * row + + is_middle = ((0 < row < nrows - 1) and (0 < col < ncols - 1)) + add_horizontal = is_middle or (row in (0, nrows - 1) and + (0 < col < ncols - 1)) + add_vertical = is_middle or (col in (0, ncols - 1) and + (0 < row < nrows - 1)) + + if add_horizontal: + graph.add_edge(node0, node0 - 1) + graph.add_edge(node0, node0 + 1) + if add_vertical: + graph.add_edge(node0, node0 - ncols) + graph.add_edge(node0, node0 + ncols) + + return graph + + +@pytest.fixture(scope="module") +def simple_graph(): + # 2 4 + # / \ / | + # 0 - 1 3 | + # \ / \ | + # 5 6 + graph = nx.Graph() + graph.add_nodes_from(range(7)) + graph.add_edges_from([(0, 1), (1, 2), (1, 5), (2, 3), (5, 3), (3, 4), (3, + 6), + (4, 6)]) + return graph + + +@pytest.fixture(scope="module") +def grid22_graph(): + graph = nx.Graph() + graph.add_nodes_from([0, 1, 2, 3]) + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0)]) + return graph + + +@pytest.fixture(scope="module") +def grid33_graph(): + return generate_grid_graph(3, 3) + + +@pytest.fixture +def grid22_graph_mapper(grid22_graph): + mapper = graphm.GraphMapper(graph=grid22_graph) + backend = DummyEngine(save_commands=True) + backend.is_last_engine = True + mapper.next_engine = backend + return mapper, backend + + +@pytest.fixture +def grid33_graph_mapper(grid33_graph): + mapper = graphm.GraphMapper(graph=grid33_graph) + backend = DummyEngine(save_commands=True) + backend.is_last_engine = True + mapper.next_engine = backend + return mapper, backend + + +@pytest.fixture +def simple_mapper(simple_graph): + mapper = graphm.GraphMapper(graph=simple_graph) + backend = DummyEngine(save_commands=True) + backend.is_last_engine = True + mapper.next_engine = backend + return mapper, backend + + +def test_is_available(simple_graph): + mapper = graphm.GraphMapper(graph=simple_graph) + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=2) + cmd0 = Command(None, BasicGate(), qubits=([qb0], )) + assert mapper.is_available(cmd0) + cmd1 = Command(None, BasicGate(), qubits=([qb0], ), controls=[qb1]) + assert mapper.is_available(cmd1) + cmd2 = Command(None, BasicGate(), qubits=([qb0], [qb1, qb2])) + assert not mapper.is_available(cmd2) + cmd3 = Command(None, BasicGate(), qubits=([qb0], [qb1]), controls=[qb2]) + assert not mapper.is_available(cmd3) + + +def test_invalid_gates(simple_mapper): + mapper, backend = simple_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=2) + qb3 = WeakQubitRef(engine=None, idx=-1) + + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], ), controls=[]) + cmd1 = Command(engine=None, gate=Allocate, qubits=([qb1], ), controls=[]) + cmd2 = Command(engine=None, gate=Allocate, qubits=([qb2], ), controls=[]) + cmd3 = Command(engine=None, gate=X, qubits=([qb0], [qb1]), controls=[qb2]) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb3], )) + + with pytest.raises(Exception): + mapper.receive([cmd0, cmd1, cmd2, cmd3, cmd_flush]) + + +def test_run_infinite_loop_detection(simple_mapper): + mapper, backend = simple_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=2) + qb3 = WeakQubitRef(engine=None, idx=-1) + + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb3], )) + + cmd0 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[]) + with pytest.raises(RuntimeError): + mapper.receive([cmd0, cmd_flush]) + + mapper._stored_commands = [] + cmd0 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb1]) + with pytest.raises(RuntimeError): + mapper.receive([cmd0, cmd_flush]) + + +def test_valid_and_invalid_graphs(simple_graph, grid33_graph): + graph = nx.Graph() + graph.add_nodes_from('abcd') + with pytest.raises(RuntimeError): + graphm.GraphMapper(graph=graph) + + graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'a')]) + with pytest.raises(RuntimeError): + graphm.GraphMapper(graph=graph) + + graph = deepcopy(simple_graph) + graph.remove_edge(0, 1) + with pytest.raises(RuntimeError): + graphm.GraphMapper(graph=graph) + + mapper = graphm.GraphMapper(graph=simple_graph) + backend = DummyEngine(save_commands=True) + backend.is_last_engine = True + mapper.next_engine = backend + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + mapper._stored_commands = allocate_cmds + mapper._run() + assert not mapper._send_possible_commands() + assert mapper.current_mapping == dict(enumerate(range(len(simple_graph)))) + + mapper = graphm.GraphMapper(graph=grid33_graph) + backend = DummyEngine(save_commands=True) + backend.is_last_engine = True + mapper.next_engine = backend + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + mapper._stored_commands = allocate_cmds + mapper._run() + assert not mapper._send_possible_commands() + assert mapper.current_mapping == dict(enumerate(range(len(grid33_graph)))) + + +def test_resetting_mapping_to_none(simple_graph): + mapper = graphm.GraphMapper(graph=simple_graph) + mapper.current_mapping = {0: 1} + assert mapper._current_mapping == {0: 1} + assert mapper._reverse_current_mapping == {1: 0} + mapper.current_mapping = {0: 0, 1: 4} + assert mapper._current_mapping == {0: 0, 1: 4} + assert mapper._reverse_current_mapping == {0: 0, 4: 1} + mapper.current_mapping = None + assert mapper._current_mapping == {} + assert mapper._reverse_current_mapping == {} + + +def test_send_possible_commands(simple_graph, simple_mapper): + mapper, backend = simple_mapper + mapper.current_mapping = dict(enumerate(range(len(simple_graph)))) + + neighbours = set() + for node in simple_graph: + for other in simple_graph[node]: + neighbours.add(frozenset((node, other))) + + neighbours = [tuple(s) for s in neighbours] + + for qb0_id, qb1_id in neighbours: + qb0 = WeakQubitRef(engine=None, idx=qb0_id) + qb1 = WeakQubitRef(engine=None, idx=qb1_id) + cmd1 = Command(None, X, qubits=([qb0], ), controls=[qb1]) + cmd2 = Command(None, X, qubits=([qb1], ), controls=[qb0]) + mapper._stored_commands = [cmd1, cmd2] + mapper._send_possible_commands() + assert len(mapper._stored_commands) == 0 + + for qb0_id, qb1_id in itertools.permutations(range(8), 2): + if ((qb0_id, qb1_id) not in neighbours + and (qb1_id, qb0_id) not in neighbours): + qb0 = WeakQubitRef(engine=None, idx=qb0_id) + qb1 = WeakQubitRef(engine=None, idx=qb1_id) + cmd = Command(None, X, qubits=([qb0], ), controls=[qb1]) + mapper._stored_commands = [cmd] + mapper._send_possible_commands() + assert len(mapper._stored_commands) == 1 + + +def test_send_possible_commands_allocate(simple_mapper): + mapper, backend = simple_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + cmd0 = Command( + engine=None, gate=Allocate, qubits=([qb0], ), controls=[], tags=[]) + mapper._stored_commands = [cmd0] + mapper._currently_allocated_ids = set([10]) + # not in mapping: + mapper.current_mapping = dict() + assert len(backend.received_commands) == 0 + mapper._send_possible_commands() + assert len(backend.received_commands) == 0 + assert mapper._stored_commands == [cmd0] + # in mapping: + mapper.current_mapping = {0: 3} + mapper._send_possible_commands() + assert len(mapper._stored_commands) == 0 + # Only self._run() sends Allocate gates + mapped0 = WeakQubitRef(engine=None, idx=3) + received_cmd = Command( + engine=mapper, + gate=Allocate, + qubits=([mapped0], ), + controls=[], + tags=[LogicalQubitIDTag(0)]) + assert backend.received_commands[0] == received_cmd + assert mapper._currently_allocated_ids == set([10, 0]) + + +def test_send_possible_commands_allocation_no_active_qubits( + grid22_graph_mapper): + mapper, backend = grid22_graph_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=2) + qb3 = WeakQubitRef(engine=None, idx=3) + + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], )) + cmd1 = Command(engine=None, gate=Allocate, qubits=([qb1], )) + cmd2 = Command(engine=None, gate=Allocate, qubits=([qb2], )) + cmd3 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb2]) + cmd4 = Command(engine=None, gate=Allocate, qubits=([qb3], )) + cmd5 = Command(engine=None, gate=X, qubits=([qb3], )) + cmd6 = Command(engine=None, gate=Deallocate, qubits=([qb3], )) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper._stored_commands = [ + cmd0, cmd1, cmd2, cmd3, cmd4, cmd5, cmd6, cmd_flush + ] + + mapper._run() + assert len(mapper._stored_commands) == 4 + assert mapper._stored_commands[0] == cmd4 + assert mapper._stored_commands[1] == cmd5 + assert mapper._stored_commands[2] == cmd6 + mapper._run() + assert len(mapper._stored_commands) == 1 + assert mapper._stored_commands[0] == cmd_flush + + +def test_send_possible_commands_deallocate(simple_mapper): + mapper, backend = simple_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + cmd0 = Command( + engine=None, gate=Deallocate, qubits=([qb0], ), controls=[], tags=[]) + mapper._stored_commands = [cmd0] + mapper.current_mapping = dict() + mapper._currently_allocated_ids = set([10]) + # not yet allocated: + mapper._send_possible_commands() + assert len(backend.received_commands) == 0 + assert mapper._stored_commands == [cmd0] + # allocated: + mapper.current_mapping = {0: 3} + mapper._currently_allocated_ids.add(0) + mapper._send_possible_commands() + assert len(backend.received_commands) == 1 + assert len(mapper._stored_commands) == 0 + assert mapper.current_mapping == dict() + assert mapper._currently_allocated_ids == set([10]) + + +def test_send_possible_commands_no_initial_mapping(simple_mapper): + mapper, backend = simple_mapper + + assert mapper._current_mapping == {} + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=-1) + + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], ), controls=[]) + cmd1 = Command(engine=None, gate=Allocate, qubits=([qb1], ), controls=[]) + cmd2 = Command(None, X, qubits=([qb0], ), controls=[qb1]) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb2], )) + all_cmds = [cmd0, cmd1, cmd2, cmd_flush] + mapper.receive(all_cmds) + + assert mapper._current_mapping + assert len(mapper._stored_commands) == 0 + + +def test_send_possible_commands_keep_remaining_gates(simple_mapper): + mapper, backend = simple_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + cmd0 = Command( + engine=None, gate=Allocate, qubits=([qb0], ), controls=[], tags=[]) + cmd1 = Command( + engine=None, gate=Deallocate, qubits=([qb0], ), controls=[], tags=[]) + cmd2 = Command( + engine=None, gate=Allocate, qubits=([qb1], ), controls=[], tags=[]) + + mapper._stored_commands = [cmd0, cmd1, cmd2] + mapper.current_mapping = {0: 0} + mapper._send_possible_commands() + assert mapper._stored_commands == [cmd2] + + +def test_send_possible_commands_one_inactive_qubit(simple_mapper): + mapper, backend = simple_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + cmd0 = Command( + engine=None, gate=Allocate, qubits=([qb0], ), controls=[], tags=[]) + cmd1 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb1]) + mapper._stored_commands = [cmd0, cmd1] + mapper.current_mapping = {0: 0} + mapper._send_possible_commands() + assert mapper._stored_commands == [cmd1] + + +def test_run_and_receive(simple_graph, simple_mapper): + mapper, backend = simple_mapper + + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + gates = [ + Command(None, X, qubits=([qb[0]], ), controls=[qb[1]]), + Command(None, X, qubits=([qb[1]], ), controls=[qb[2]]), + Command(None, X, qubits=([qb[1]], ), controls=[qb[5]]), + Command(None, X, qubits=([qb[2]], ), controls=[qb[3]]), + Command(None, X, qubits=([qb[5]], ), controls=[qb[3]]), + Command(None, X, qubits=([qb[3]], ), controls=[qb[4]]), + Command(None, X, qubits=([qb[3]], ), controls=[qb[6]]), + Command(None, X, qubits=([qb[4]], ), controls=[qb[6]]) + ] + deallocate_cmds = [ + Command(engine=None, gate=Deallocate, qubits=([qb[1]], )) + ] + + allocated_qubits_ref = set([0, 2, 3, 4, 5, 6]) + + all_cmds = list(itertools.chain(allocate_cmds, gates, deallocate_cmds)) + mapper.receive(all_cmds) + assert mapper._stored_commands == all_cmds + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + mapper.receive([cmd_flush]) + assert mapper._stored_commands == [] + assert len(backend.received_commands) == len(all_cmds) + 1 + assert mapper._currently_allocated_ids == allocated_qubits_ref + + mapping = dict(enumerate(range(len(simple_graph)))) + del mapping[1] + assert mapper.current_mapping == mapping + + cmd9 = Command(None, X, qubits=([qb[0]], ), controls=[qb[6]]) + mapper.receive([cmd9, cmd_flush]) + assert mapper._currently_allocated_ids == allocated_qubits_ref + for idx in allocated_qubits_ref: + assert idx in mapper.current_mapping + assert mapper._stored_commands == [] + assert len(mapper.current_mapping) == 6 + assert mapper.num_mappings == 1 + + +def test_send_two_qubit_gate_before_swap(simple_mapper): + qb, all_cmds = allocate_all_qubits_cmd(simple_mapper[0]) + + all_cmds.insert(3, None) + all_cmds.insert(5, Command(None, X, qubits=([qb[2]], ), controls=[qb[3]])) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + all_cmds.append( + Command(engine=None, gate=FlushGate(), qubits=([qb_flush], ))) + + for cmd in [ + Command(None, X, qubits=([qb[0]], ), controls=[qb[2]]), + Command(None, X, qubits=([qb[2]], ), controls=[qb[0]]) + ]: + mapper, backend = deepcopy(simple_mapper) + mapper.enable_caching = False + + all_cmds[3] = cmd + + mapper._stored_commands = all_cmds + print([str(cmd) for cmd in all_cmds]) + mapper._run() + assert mapper.num_mappings == 1 + if mapper.current_mapping[2] == 2: + # qb[2] has not moved, all_cmds[5] is possible + assert mapper._stored_commands == all_cmds[6:] + assert mapper.current_mapping == {0: 1, 1: 0, 2: 2, 3: 3} + else: + # qb[2] moved, all_cmds[5] not possible + assert mapper._stored_commands == all_cmds[5:] + assert mapper.current_mapping == {0: 0, 1: 2, 2: 1, 3: 3} + + +def test_send_two_qubit_gate_before_swap_nonallocated_qubits(simple_mapper): + qb, allocate_cmds = allocate_all_qubits_cmd(simple_mapper[0]) + + all_cmds = [ + allocate_cmds[0], + allocate_cmds[-1], + None, + Command(None, X, qubits=([qb[6]], ), controls=[qb[4]]), + ] + + idx = all_cmds.index(None) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + all_cmds.append( + Command(engine=None, gate=FlushGate(), qubits=([qb_flush], ))) + + for cmd in [ + Command(None, X, qubits=([qb[0]], ), controls=[qb[6]]), + Command(None, X, qubits=([qb[6]], ), controls=[qb[0]]) + ]: + mapper, backend = deepcopy(simple_mapper) + mapper.current_mapping = dict(enumerate(range(len(qb)))) + mapper.enable_caching = False + + all_cmds[idx] = cmd + + mapper._stored_commands = all_cmds + mapper._run() + assert mapper.num_mappings == 1 + + if mapper.current_mapping[4] == 4 and mapper.current_mapping[5] == 5: + if mapper.current_mapping[6] == 3: + # qb[6] is on position 3, all commands are possible + assert mapper._stored_commands == all_cmds[-1:] + assert mapper.current_mapping == {0: 2, 4: 4, 5: 5, 6: 3} + else: + # qb[6] is on position 2, all_cmds[8] is not possible + assert mapper._stored_commands == all_cmds[-2:] + assert mapper.current_mapping == {0: 1, 4: 4, 5: 5, 6: 2} + else: + # Should not happen... + assert False + + +def test_allocate_too_many_qubits(simple_mapper): + mapper, backend = simple_mapper + + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + qb.append(WeakQubitRef(engine=None, idx=len(qb))) + allocate_cmds.append( + Command(engine=None, gate=Allocate, qubits=([qb[-1]], ))) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + with pytest.raises(RuntimeError): + mapper.receive(allocate_cmds + [cmd_flush]) + + +def test_send_possible_commands_reallocate_backend_id(grid22_graph_mapper): + mapper, backend = grid22_graph_mapper + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=2) + qb3 = WeakQubitRef(engine=None, idx=3) + qb4 = WeakQubitRef(engine=None, idx=4) + all_cmds = [ + Command(engine=None, gate=Allocate, qubits=([qb0], )), + Command(engine=None, gate=Allocate, qubits=([qb1], )), + Command(engine=None, gate=Allocate, qubits=([qb2], )), + Command(engine=None, gate=Allocate, qubits=([qb3], )), + Command(engine=None, gate=X, qubits=([qb1], )), + Command(engine=None, gate=Deallocate, qubits=([qb1], )), + Command(engine=None, gate=Allocate, qubits=([qb4], )), + Command(engine=None, gate=X, qubits=([qb4], )), + ] + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + mapper.receive(all_cmds + [cmd_flush]) + assert mapper.current_mapping == {0: 0, 2: 2, 3: 3, 4: 1} + assert len(mapper._stored_commands) == 0 + assert len(backend.received_commands) == 9 + + +def test_correct_stats(simple_mapper): + mapper, backend = simple_mapper + + # Should test stats for twice same mapping but depends on heuristic + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=2) + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], )) + cmd1 = Command(engine=None, gate=Allocate, qubits=([qb1], )) + cmd2 = Command(engine=None, gate=Allocate, qubits=([qb2], )) + + cmd3 = Command(None, X, qubits=([qb0], ), controls=[qb1]) + cmd4 = Command(None, X, qubits=([qb1], ), controls=[qb2]) + cmd5 = Command(None, X, qubits=([qb0], ), controls=[qb2]) + cmd6 = Command(None, X, qubits=([qb2], ), controls=[qb1]) + cmd7 = Command(None, X, qubits=([qb0], ), controls=[qb1]) + cmd8 = Command(None, X, qubits=([qb1], ), controls=[qb2]) + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + mapper.receive( + [cmd0, cmd1, cmd2, cmd3, cmd4, cmd5, cmd6, cmd7, cmd8, cmd_flush]) + assert mapper.num_mappings == 2 + + +def test_send_possible_cmds_before_new_mapping(simple_mapper): + mapper, backend = simple_mapper + + def dont_call_mapping(): + raise Exception + + mapper._find_paths = dont_call_mapping + + mapper.current_mapping = {0: 1} + qb0 = WeakQubitRef(engine=None, idx=0) + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], )) + qb2 = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb2], )) + mapper.receive([cmd0, cmd_flush]) + + +def test_logical_id_tags_allocate_and_deallocate(simple_mapper): + mapper, backend = simple_mapper + mapper.current_mapping = {0: 1, 1: 6} + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], )) + cmd1 = Command(engine=None, gate=Allocate, qubits=([qb1], )) + cmd2 = Command(None, X, qubits=([qb0], ), controls=[qb1]) + cmd3 = Command(engine=None, gate=Deallocate, qubits=([qb0], )) + cmd4 = Command(engine=None, gate=Deallocate, qubits=([qb1], )) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + mapper.receive([cmd0, cmd1, cmd2, cmd_flush]) + assert backend.received_commands[0].gate == Allocate + assert backend.received_commands[0].qubits[0][0].id == 1 + assert backend.received_commands[0].tags == [LogicalQubitIDTag(0)] + assert backend.received_commands[1].gate == Allocate + assert backend.received_commands[1].qubits[0][0].id == 6 + assert backend.received_commands[1].tags == [LogicalQubitIDTag(1)] + for cmd in backend.received_commands[2:]: + if cmd.gate == Allocate: + assert cmd.tags == [] + elif cmd.gate == Deallocate: + assert cmd.tags == [] + mapped_id_for_0 = mapper.current_mapping[0] + mapped_id_for_1 = mapper.current_mapping[1] + mapper.receive([cmd3, cmd4, cmd_flush]) + assert backend.received_commands[-3].gate == Deallocate + assert backend.received_commands[-3].qubits[0][0].id == mapped_id_for_0 + assert backend.received_commands[-3].tags == [LogicalQubitIDTag(0)] + assert backend.received_commands[-2].gate == Deallocate + assert backend.received_commands[-2].qubits[0][0].id == mapped_id_for_1 + assert backend.received_commands[-2].tags == [LogicalQubitIDTag(1)] + + +def test_check_that_local_optimizer_doesnt_merge(simple_graph): + mapper = graphm.GraphMapper(graph=simple_graph) + optimizer = LocalOptimizer(10) + backend = DummyEngine(save_commands=True) + backend.is_last_engine = True + mapper.next_engine = optimizer + mapper.current_mapping = dict(enumerate(range(len(simple_graph)))) + mapper.current_mapping = {0: 0} + mapper.storage = 1 + optimizer.next_engine = backend + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], )) + cmd1 = Command(None, X, qubits=([qb0], )) + cmd2 = Command(engine=None, gate=Deallocate, qubits=([qb0], )) + mapper.receive([cmd0, cmd1, cmd2]) + assert len(mapper._stored_commands) == 0 + mapper.current_mapping = {1: 0} + cmd3 = Command(engine=None, gate=Allocate, qubits=([qb1], )) + cmd4 = Command(None, X, qubits=([qb1], )) + cmd5 = Command(engine=None, gate=Deallocate, qubits=([qb1], )) + mapper.receive([cmd3, cmd4, cmd5, cmd_flush]) + assert len(backend.received_commands) == 7 + + +def test_3x3_grid_multiple_simultaneous_paths(grid33_graph_mapper): + mapper, backend = grid33_graph_mapper + + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + cmd0 = Command(None, X, qubits=([qb[0]], ), controls=[qb[6]]) + cmd1 = Command(None, X, qubits=([qb[1]], ), controls=[qb[7]]) + cmd2 = Command(None, X, qubits=([qb[2]], ), controls=[qb[8]]) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper.receive(allocate_cmds + [cmd0, cmd1, cmd2, cmd_flush]) + assert not mapper._stored_commands + assert mapper.num_mappings == 1 + assert mapper.depth_of_swaps == {1: 1} + assert mapper.current_mapping == { + 0: 0, + 1: 1, + 2: 2, + 3: 6, + 4: 7, + 5: 8, + 6: 3, + 7: 4, + 8: 5 + } + + cmd3 = Command(None, X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd4 = Command(None, X, qubits=([qb[3]], ), controls=[qb[5]]) + cmd5 = Command(None, X, qubits=([qb[6]], ), controls=[qb[8]]) + mapper.receive([cmd3, cmd4, cmd5, cmd_flush]) + + assert not mapper._stored_commands + assert mapper.num_mappings == 2 + assert mapper.depth_of_swaps == {1: 2} + assert mapper.current_mapping == { + 0: 0, + 1: 2, + 2: 1, + 3: 6, + 4: 8, + 5: 7, + 6: 3, + 7: 5, + 8: 4 + } From 946bcb99d6d1a5350c8a8703323c9a5a85bbbf18 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Tue, 8 Jan 2019 16:01:58 +0100 Subject: [PATCH 03/25] Add caching option to GraphMapper _graphmapper.py: minor updates --- projectq/cengines/_graphmapper.py | 89 +++++++++++++++++++++++++- projectq/cengines/_graphmapper_test.py | 62 +++++++++++++++++- 2 files changed, 149 insertions(+), 2 deletions(-) diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index 60e8f57d8..c7cbd8340 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -35,6 +35,66 @@ from projectq.types import WeakQubitRef +class PathCacheExhaustive(): + """ + Class acting as cache for optimal paths through the graph. + """ + + def __init__(self, path_length_threshold): + self._path_length_threshold = path_length_threshold + self._cache = {} + self.key_type = frozenset + + def empty_cache(self): + """Empty the cache.""" + self._cache = {} + + def get_path(self, start, end): + """ + Return a path from the cache. + + Args: + start (object): Start node for the path + end (object): End node for the path + + Returns: Optimal path stored in cache + + Raises: KeyError if path is not present in the cache + """ + return self._cache[self.key_type((start, end))] + + def has_path(self, start, end): + """ + Test whether a path connecting start to end is present in the cache. + + Args: + start (object): Start node for the path + end (object): End node for the path + + Returns: True/False + """ + return self.key_type((start, end)) in self._cache + + def add_path(self, path): + """ + Add a path to the cache. + + This method also recursively adds all the subpaths that are at least + self._path_length_threshold long to the cache. + + Args: + path (list): Path to store inside the cache + """ + length = len(path) + for start in range(length - self._path_length_threshold + 1): + node0 = path[start] + for incr in range(length - start - 1, + self._path_length_threshold - 2, -1): + end = start + incr + self._cache[self.key_type((node0, + path[end]))] = path[start:end + 1] + + class GraphMapperError(Exception): """Base class for all exceptions related to the GraphMapper.""" @@ -123,12 +183,26 @@ class GraphMapper(BasicMapperEngine): Maps a quantum circuit to an arbitrary connected graph of connected qubits using Swap gates. + Args: + graph (networkx.Graph) : Arbitrary connected graph + storage (int) Number of gates to temporarily store + add_qubits_to_mapping (function) Function called when new qubits are to + be added to the current mapping + Signature of the function call: + current_mapping + graph + new_logical_qubit_ids + stored_commands + enable_caching(Bool): Controls whether optimal path caching is + enabled Attributes: current_mapping: Stores the mapping: key is logical qubit id, value is mapped qubit id from 0,...,self.num_qubits graph (networkx.Graph): Arbitrary connected graph storage (int): Number of gate it caches before mapping. + enable_caching(Bool): Controls whether optimal path caching is + enabled num_qubits(int): number of qubits num_mappings (int): Number of times the mapper changed the mapping depth_of_swaps (dict): Key are circuit depth of swaps, value is the @@ -149,7 +223,8 @@ class GraphMapper(BasicMapperEngine): def __init__(self, graph, storage=1000, - add_qubits_to_mapping=_add_qubits_to_mapping): + add_qubits_to_mapping=_add_qubits_to_mapping, + enable_caching=True): """ Initialize a GraphMapper compiler engine. @@ -157,6 +232,8 @@ def __init__(self, graph (networkx.Graph): Arbitrary connected graph representing Qubit connectivity storage (int): Number of gates to temporarily store + enable_caching (Bool): Controls whether optimal path caching is + enabled Raises: RuntimeError: if the graph is not a connected graph """ @@ -172,6 +249,10 @@ def __init__(self, self.graph = graph self.num_qubits = self.graph.number_of_nodes() self.storage = storage + self.enable_caching = enable_caching + # Path cache support + path_length_threshold = 3 + self._path_cache = PathCacheExhaustive(path_length_threshold) # Randomness to pick permutations if there are too many. # This creates an own instance of Random in order to not influence # the bound methods of the random module which might be used in other @@ -332,6 +413,12 @@ def _process_two_qubit_gate_dumb(self, qubit0, qubit1, active_qubits): node1 = self._current_mapping[qubit1] # Qubits are both active but not connected via an edge + if self.enable_caching: + if self._path_cache.has_path(node0, node1): + return self._path_cache.get_path(node0, node1) + path = nx.shortest_path(self.graph, source=node0, target=node1) + self._path_cache.add_path(path) + return path return nx.shortest_path(self.graph, source=node0, target=node1) def _send_possible_commands(self): diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py index 21582f06b..517fb47ec 100644 --- a/projectq/cengines/_graphmapper_test.py +++ b/projectq/cengines/_graphmapper_test.py @@ -118,6 +118,48 @@ def simple_mapper(simple_graph): return mapper, backend +# ============================================================================== + + +def test_path_cache_exhaustive(): + path_length_threshold = 3 + cache = graphm.PathCacheExhaustive(path_length_threshold) + + assert not cache._cache + cache.add_path(['a', 'b', 'c']) + assert cache._cache == {cache.key_type(('a', 'c')): ['a', 'b', 'c']} + + assert cache.has_path('a', 'c') + assert not cache.has_path('a', 'b') + assert not cache.has_path('b', 'c') + + cache.empty_cache() + assert not cache._cache + + cache.add_path(['a', 'b', 'c', 'd']) + assert cache._cache == { + cache.key_type(('a', 'c')): ['a', 'b', 'c'], + cache.key_type(('a', 'd')): ['a', 'b', 'c', 'd'], + cache.key_type(('b', 'd')): ['b', 'c', 'd'] + } + assert cache.has_path('a', 'd') + assert cache.has_path('d', 'a') + assert cache.has_path('a', 'c') + assert cache.has_path('b', 'd') + assert not cache.has_path('a', 'b') + assert not cache.has_path('b', 'a') + assert not cache.has_path('b', 'c') + assert not cache.has_path('c', 'd') + + str_repr = str(cache) + assert str_repr.count("['a', 'd']: ['a', 'b', 'c', 'd']") == 1 + assert str_repr.count("['a', 'c']: ['a', 'b', 'c']") == 1 + assert str_repr.count("['b', 'd']: ['b', 'c', 'd']") == 1 + + +# ============================================================================== + + def test_is_available(simple_graph): mapper = graphm.GraphMapper(graph=simple_graph) qb0 = WeakQubitRef(engine=None, idx=0) @@ -659,8 +701,11 @@ def test_check_that_local_optimizer_doesnt_merge(simple_graph): assert len(backend.received_commands) == 7 -def test_3x3_grid_multiple_simultaneous_paths(grid33_graph_mapper): +@pytest.mark.parametrize("enable_caching", [False, True]) +def test_3x3_grid_multiple_simultaneous_paths(grid33_graph_mapper, + enable_caching): mapper, backend = grid33_graph_mapper + mapper.enable_caching = enable_caching qb, allocate_cmds = allocate_all_qubits_cmd(mapper) @@ -706,3 +751,18 @@ def test_3x3_grid_multiple_simultaneous_paths(grid33_graph_mapper): 7: 5, 8: 4 } + + if enable_caching: + assert mapper._path_cache._cache + assert mapper._path_cache.has_path(0, 6) + assert mapper._path_cache.has_path(1, 7) + assert mapper._path_cache.has_path(2, 8) + assert mapper._path_cache.has_path(0, 2) + assert mapper._path_cache.has_path(3, 5) + assert mapper._path_cache.has_path(6, 8) + assert not mapper._path_cache.has_path(0, 1) + assert not mapper._path_cache.has_path(1, 2) + assert not mapper._path_cache.has_path(3, 4) + assert not mapper._path_cache.has_path(4, 5) + assert not mapper._path_cache.has_path(6, 7) + assert not mapper._path_cache.has_path(7, 8) From c8f2b5cc034d20a7782d5236422e64862d0eea66 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Fri, 11 Jan 2019 09:04:41 +0100 Subject: [PATCH 04/25] Fix pytest.ini file --- pytest.ini | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pytest.ini b/pytest.ini index fab634b12..46f7c05ac 100755 --- a/pytest.ini +++ b/pytest.ini @@ -4,3 +4,5 @@ testpaths = projectq filterwarnings = error ignore:the matrix subclass is not the recommended way:PendingDeprecationWarning + ignore:invalid escape sequence:DeprecationWarning + ignore:Using or importing the ABCs from 'collections' instead:DeprecationWarning From 33dbd359f65cb51315d68608a5cac6423bf9fb4c Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Tue, 22 Jan 2019 12:01:04 +0100 Subject: [PATCH 05/25] Introduce path container to handle intersections between paths --- projectq/cengines/_graph_path_container.py | 536 ++++++++++++++++ .../cengines/_graph_path_container_test.py | 579 ++++++++++++++++++ projectq/cengines/_graphmapper.py | 159 ++--- projectq/cengines/_graphmapper_test.py | 194 +++++- 4 files changed, 1344 insertions(+), 124 deletions(-) create mode 100644 projectq/cengines/_graph_path_container.py create mode 100644 projectq/cengines/_graph_path_container_test.py diff --git a/projectq/cengines/_graph_path_container.py b/projectq/cengines/_graph_path_container.py new file mode 100644 index 000000000..1683b594c --- /dev/null +++ b/projectq/cengines/_graph_path_container.py @@ -0,0 +1,536 @@ +# Copyright 2019 ProjectQ-Framework (www.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This is a helper module for the _graphmapper.GraphMapper class. + +Its main goal is to store possible paths through the graph and then generate a +list of swap operations to perform as many paths as possible, by either solving +conflicts (ie. crossing points and intersections; see definitions below) or +discarding paths. +Note that when generating a list of swap operations for a particular path, the +path is usually splitted into two halves in order to maximize the number of +swap operations that can be performed simultaneously. + +In the context of this module, a distinction is made between a crossing point +and an intersection. + +A crossing point is just as its name implies a point or node of the graph that +simultaneously belongs to one or more paths. On the other hand, an intersection +is defined as a particular crossing point of a path for which one of the +splitted sub-path halves has an endpoint. This means that a path may have at +most two intersections + +This is best exemplified by some examples: + + Given the path [0, 1, 2, 3], a possible split to maximize simultaneous + swapping operations would be: + [[0, 1], [2, 3]] where 1 or 2 may be intersections. + + Given the path [0, 1, 2, 3, 4], possible splits would include: + [[0, 1, 2], [3, 4]] where 2 or 3 could be intersections if they are + crossings + [[0, 1], [2, 3, 4]] where 1 or 2 could be intersections if they are + crossings +""" + +import itertools + +# ============================================================================== + + +def _find_first_order_intersections(crossings, split_paths): + """ + Find out which crossing nodes are intersections. + + A crossing point is considered an intersection if and only if either: + - the end of sub-path 1 is the crossing point + - the beginning of sub-path 2 is the crossing point + + Args: + crossings (dict) : Dictionary containing the list of all crossing + points indexed by the path ID + split_paths (dict) : Dictionary containing the two halves of each paths + indexed by the path ID + + Returns: + intersections (dict) : Dictionary indexed by the intersection node + containing the IDs of the paths for which that + particular node is considered an intersection + """ + intersections = {} + + for path_id, (subpath1, subpath2) in split_paths.items(): + for crossing in crossings[path_id]: + if crossing.overlap[0] in (subpath1[-1], subpath2[0]): + if crossing.overlap[0] not in intersections: + intersections[crossing.overlap[0]] = set((path_id, )) + else: + intersections[crossing.overlap[0]].add(path_id) + + return intersections + + +def _try_solve_intersection(intersection_node, subpath1, subpath2, + subpath1_not_crossing, subpath2_not_crossing): + """ + Attempt to solve a first order intersection by modifying sub-paths. + + Args: + intersection_node (int) : Intersection node + subpath1 (list) : First half of the path + subpath2 (list) : Second half of the path + subpath1_not_crossing (list) : Helper list of booleans indicating + whether the nodes of the first subpath + are crossing or not + subpath2_not_crossing (list) : Helper list of booleans indicating + whether the nodes of the second subpath + are crossing or not + + Note: + subpath1*, subpath2* arguments are modified in-place + + Returns: + True/False depending on whether the intersection could be solved or not + """ + if len(subpath1) + len(subpath2) < 4: + return False + + if subpath1[-1] == intersection_node: + # Try moving the head of subpath2 to subpath1 + if len(subpath2) > 1 \ + and subpath2_not_crossing[0] \ + and subpath2_not_crossing[1]: + subpath1.append(subpath2[0]) + subpath1_not_crossing.append(subpath2_not_crossing[0]) + del subpath2[0] + del subpath2_not_crossing[0] + return True + else: + # Try moving the tail of subpath1 to subpath2 + if len(subpath1) > 1 \ + and subpath1_not_crossing[-1] \ + and subpath1_not_crossing[-2]: + subpath2.insert(0, subpath1.pop()) + subpath2_not_crossing.insert(0, subpath1_not_crossing.pop()) + return True + + # Try moving the last two elements of subpath1 to subpath2 + if len(subpath1) > 2 \ + and subpath1_not_crossing[-2] \ + and subpath1_not_crossing[-3]: + subpath2.insert(0, subpath1.pop()) + subpath2.insert(0, subpath1.pop()) + subpath2_not_crossing.insert(0, subpath1_not_crossing.pop()) + subpath2_not_crossing.insert(0, subpath1_not_crossing.pop()) + return True + + # Try moving the first two elements of subpath2 to subpath1 + if len(subpath2) > 2 \ + and subpath2_not_crossing[1] \ + and subpath2_not_crossing[2]: + subpath1.append(subpath2[0]) + subpath1.append(subpath2[1]) + subpath1_not_crossing.append(subpath2_not_crossing[0]) + subpath1_not_crossing.append(subpath2_not_crossing[1]) + del subpath2[:2] + del subpath2_not_crossing[:2] + return True + + return False + + +def _return_swaps(split_paths): + """ + Return a list of swap operations given a list of path halves + + Args: + split_paths (dict): Dictionary indexed by path ID containing 2-tuples + of path halves + + Returns: A list of swap operations (2-tuples) + """ + swap_operations = [] + for path in split_paths.values(): + swap_operations.append([]) + # Add swaps operations for first half of the path + for prev, cur in zip(path[0], path[0][1:]): + swap_operations[-1].append((prev, cur)) + + # Add swaps operations for the second half of the path + for prev, cur in zip(path[1][::-1], path[1][-2::-1]): + swap_operations[-1].append((prev, cur)) + + return swap_operations + + +# ============================================================================== + + +class PathContainer: + """ + Container for paths through a graph. + + Allows the resolution of conflict points such as crossings and + intersections. + + Attributes: + paths (dict) : list of paths currently held by a path container indexed + by a unique ID + crossings (dict) : dictionary of crossing points indexed by path ID + """ + + class _Crossing: + __slots__ = ['path_id', 'overlap'] + + def __init__(self, path_id, overlap): + self.path_id, self.overlap = path_id, overlap + + def __eq__(self, other): + if isinstance(other, self.__class__): + return (self.path_id, self.overlap) == (other.path_id, + other.overlap) + if isinstance(other, list): + return self.overlap == other + if isinstance(other, int): + return self.overlap[0] == other + raise NotImplementedError("Invalid comparison") + + def __str__(self): + return '{} {}'.format(self.path_id, self.overlap) + + def __repr__(self): + return 'Crossing({}, {})'.format(self.path_id, self.overlap) + + def __init__(self): + self.paths = {} + self.crossings = {} + self._path_id = 0 + + ################################################################# + # Methods querying information about the state of the container # + ################################################################# + + def get_all_nodes(self): + """ + Return a list of all nodes that are part of some path. + + Returns: + A set of nodes that are part of at least one path. + """ + return set(itertools.chain.from_iterable(self.paths.values())) + + def get_all_paths(self): + """ + Return a list of all the path contained in the container. + + Returns: + A list of paths (list of list of ints) + """ + return [v for _, v in self.paths.items()] + + def has_interaction(self, node0, node1): + """ + Check if a path within the container already generate an interaction + + Args: + node0 (int) : An endnode of a path + node1 (int) : An endnode of a path + + Returns: + True or False depending on whether the container has a path linking + node0 to node1 + """ + for path in self.paths.values(): + if frozenset((node0, node1)) == frozenset((path[0], path[-1])): + return True + return False + + def max_crossing_order(self): + """ + Return the order of the highest order intersection. + + The intersection order is given by the number of paths that consider a + particular crossing point as an intersection + + Returns: + An int + """ + crossing_orders = list( + itertools.chain.from_iterable( + [[len(c.overlap) for c in crossing] + for crossing in self.crossings.values()])) + if crossing_orders: + return max(crossing_orders) + return 0 + + ################################################## + # Methods modifying the content of the container # + ################################################## + + def clear(self): + """ + Reset the state of the container. + """ + self.paths = {} + self.crossings = {} + self._path_id = 0 + + def try_add_path(self, new_path): + """ + Try adding a path to the path container. + + Args: + new_path (list) : path to add to the container + + Returns: + True if the path could be added to the container, False otherwise + """ + # Prevent adding a path to the container if the start or the end + # qubit is already interacting with another one + # Also make sure the new path does not contain interacting qubits + for path in self.paths.values(): + if path[0] in new_path or path[-1] in new_path: + return False + + new_crossings = [] + for idx, path in self.paths.items(): + path_overlap = [node for node in new_path if node in path] + if len(path_overlap) > 1: + return False + if len(path_overlap) == 1: + new_crossings.append( + PathContainer._Crossing(idx, path_overlap)) + + self.paths[self._path_id] = new_path + self.crossings[self._path_id] = new_crossings + for crossing in new_crossings: + self.crossings[crossing.path_id].append( + PathContainer._Crossing(self._path_id, crossing.overlap)) + self._path_id += 1 + return True + + def remove_path_by_id(self, path_id): + """ + Remove a path from the path container given its ID. + + Args: + path_id (int) : ID of path to remove + + Raises: + KeyError if path_id is not valid + """ + if path_id not in self.paths: + raise KeyError(path_id) + self.crossings = { + k: [i for i in v if i.path_id != path_id] + for k, v in self.crossings.items() if k != path_id + } + del self.paths[path_id] + + def remove_crossing_of_order_higher_than(self, order): + """ + Remove paths that have crossings with order above a certain threshold. + + Args: + order (int) : Maximum allowed order of crossing + """ + number_of_crossings_per_path = { + path_id: len([c for c in crossing if len(c.overlap) > order]) + for path_id, crossing in self.crossings.items() + } + + path_id_list = [ + x for y, x in sorted( + zip(number_of_crossings_per_path.values(), + number_of_crossings_per_path.keys())) if y + ] + + while path_id_list and self.max_crossing_order() > order: + path_id = path_id_list.pop() + self.remove_path_by_id(path_id) + + def swap_paths(self, path_id1, path_id2): + """ + Swap two path within the path container. + + Args: + path_id1 (int) : ID of first path + path_id2 (int) : ID of second path + """ + + if path_id1 not in self.paths: + raise KeyError(path_id1) + if path_id2 not in self.paths: + raise KeyError(path_id2) + + for crossing_list in self.crossings.values(): + for crossing in crossing_list: + if path_id1 == crossing.path_id: + crossing.path_id = path_id2 + elif path_id2 == crossing.path_id: + crossing.path_id = path_id1 + + self.crossings[path_id2], self.crossings[path_id1] = self.crossings[ + path_id1], self.crossings[path_id2] + self.paths[path_id2], self.paths[path_id1] = self.paths[ + path_id1], self.paths[path_id2] + + def generate_swaps(self): + """ + Generate a list of swaps to execute as many paths as possible. + + Returns: + A list of swap operations (tuples) + """ + # TODO: think about merging paths + # TODO: maybe apply gates in the middle of the swaps + + max_crossing_order = self.max_crossing_order() + + split_paths = self._split_paths() + + if max_crossing_order > 0: + # Some paths have first order crossing points (ie. at most one + # point is common). Try to re-arrange the path splitting to remove + # the intersection points + self._solve_first_order_intersections(split_paths) + + # By this point, we should have solved all intersections + + return list(itertools.chain.from_iterable(_return_swaps(split_paths))) + + def _split_paths(self): + """ + Split all paths into pairs of equal or almost equal length sub-paths. + + Returns: + Dictionary indexed by path ID containing 2-tuples with each path + halves + """ + split_paths = {} + for path_id, path in self.paths.items(): + idx = len(path) >> 1 + split_paths[path_id] = (path[:idx], path[idx:]) + return split_paths + + def _solve_first_order_intersections(self, split_paths): + """ + Solve all first order intersections. + + The intersections may be "solved" in two different manners: + - Sub-path split are modified to transform intersections in simple + crossings + - Paths are removed from the container + + Pre-conditions: + self.max_crossing_order() == 1 + + Args: + split_paths (dict): Dictionary indexed by path ID containing + 2-tuples of path halvesx + """ + # Get all the intersections + intersections = _find_first_order_intersections( + self.crossings, split_paths) + + # Get a list of the intersection nodes sorted by intersection order and + # total number of points of all paths for that particular intersection + def intersection_sort(crossing): + order = len(crossing[0]) + number_of_points = sum( + [len(self.paths[path_id]) + for path_id in crossing[0]]) - order + 1 + return (order, number_of_points) + + intersection_node_list = [ + x for _, x in sorted( + zip(intersections.values(), intersections.keys()), + key=intersection_sort) + ] + + # and process them + while intersection_node_list: + intersection_node = intersection_node_list[-1] + node_is_not_crossing = { + path_id: ([ + node not in self.crossings[path_id] + for node in split_paths[path_id][0] + ], [ + node not in self.crossings[path_id] + for node in split_paths[path_id][1] + ]) + for path_id in intersections[intersection_node] + } + + if len(intersections[intersection_node]) == 1: + # This crossing is an intersection only for one path + # -> only need to make sure that the other paths gets + # processed first when generating the swaps + path_id = list(intersections[intersection_node])[0] + + for crossing in self.crossings[path_id]: + if crossing.overlap[0] == intersection_node: + other_path_id = crossing.path_id + if path_id < other_path_id: + self.swap_paths(path_id, other_path_id) + split_paths[0], split_paths[1] = split_paths[ + 1], split_paths[0] + del intersections[intersection_node] + del intersection_node_list[-1] + else: + # This crossing is an intersection for multiple paths + # -> find all paths concerned with this crossing + path_id_list = [ + x for _, x in sorted( + zip([ + len(self.paths[i]) + for i in intersections[intersection_node] + ], intersections[intersection_node])) + ] + + # TODO: multiple passes if failure to find an optimal solution + path_id1 = path_id_list.pop() + path_id2 = path_id_list.pop() + + solved = _try_solve_intersection( + intersection_node, + *(split_paths[path_id1] + node_is_not_crossing[path_id1])) + + if not solved: + solved = _try_solve_intersection( + intersection_node, + *(split_paths[path_id2] + + node_is_not_crossing[path_id2])) + + if not solved: + # Last resort: delete one path + path_id_min, path_id_max = sorted([path_id1, path_id2]) + del split_paths[path_id_max] + del node_is_not_crossing[path_id_max] + self.remove_path_by_id(path_id_max) + node_is_not_crossing[path_id_min] = ([ + node not in self.crossings[path_id_min] + for node in split_paths[path_id_min][0] + ], [ + node not in self.crossings[path_id_min] + for node in split_paths[path_id_min][1] + ]) + + intersections = _find_first_order_intersections( + self.crossings, split_paths) + intersection_node_list = [ + x for _, x in sorted( + zip(intersections.values(), intersections.keys()), + key=intersection_sort) + ] diff --git a/projectq/cengines/_graph_path_container_test.py b/projectq/cengines/_graph_path_container_test.py new file mode 100644 index 000000000..8054bb452 --- /dev/null +++ b/projectq/cengines/_graph_path_container_test.py @@ -0,0 +1,579 @@ +# Copyright 2019 ProjectQ-Framework (www.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for projectq.cengines._graph_path_container.py.""" +import pytest +from projectq.cengines._graph_path_container import PathContainer, _find_first_order_intersections + + +def test_path_container_crossing_class(): + Crossing = PathContainer._Crossing + crossing_list = [Crossing(0, [1]), Crossing(1, [1]), Crossing(2, [2])] + + assert Crossing(0, [1]) == Crossing(0, [1]) + assert Crossing(0, [1]) != Crossing(1, [1]) + assert Crossing(0, [1]) != Crossing(0, [0, 1]) + assert Crossing(0, [0]) != Crossing(1, [0, 1]) + + assert [0, 1] == Crossing(0, [0, 1]) + assert [0, 1] == Crossing(1, [0, 1]) + assert [0, 1] != Crossing(0, [0]) + assert [0, 1] != Crossing(1, [0]) + + assert Crossing(0, [1]) in crossing_list + assert [0] not in crossing_list + assert [1] in crossing_list + + assert str(Crossing(0, [1])) == "{} {}".format(0, [1]) + assert repr(Crossing(0, [1])) == "Crossing({}, {})".format(0, [1]) + + with pytest.raises(NotImplementedError): + assert "" == Crossing(0, [1]) + + +def test_path_container_has_interaction(): + container = PathContainer() + + path_dict = {0: [4, 5, 6, 7], 1: [1, 5, 9, 13], 2: [8, 9, 10, 11, 15]} + container.paths = path_dict + + assert container.has_interaction(4, 7) + assert container.has_interaction(7, 4) + assert container.has_interaction(8, 15) + assert container.has_interaction(15, 8) + assert not container.has_interaction(4, 8) + assert not container.has_interaction(8, 4) + + +def test_path_container_add_path(): + Crossing = PathContainer._Crossing + container = PathContainer() + + assert container.try_add_path([4, 5, 6, 7]) + assert container.paths == {0: [4, 5, 6, 7]} + assert container.crossings == {0: []} + + assert not container.try_add_path([4, 8, 12]) + assert not container.try_add_path([0, 1, 2, 3, 7]) + assert not container.try_add_path([1, 5, 6, 10]) + assert container.paths == {0: [4, 5, 6, 7]} + assert container.crossings == {0: []} + + assert container.try_add_path([1, 5, 9, 13]) + assert container.paths == {0: [4, 5, 6, 7], 1: [1, 5, 9, 13]} + assert container.crossings == { + 0: [Crossing(1, [5])], + 1: [Crossing(0, [5])] + } + + assert container.try_add_path([10, 6, 9, 14]) + assert container.paths == { + 0: [4, 5, 6, 7], + 1: [1, 5, 9, 13], + 2: [10, 6, 9, 14] + } + assert container.crossings == { + 0: [Crossing(1, [5]), Crossing(2, [6])], + 1: [Crossing(0, [5]), Crossing(2, [9])], + 2: [Crossing(0, [6]), Crossing(1, [9])], + } + + +def test_path_container_remove_path(): + Crossing = PathContainer._Crossing + container = PathContainer() + assert container.try_add_path([4, 5, 6, 7]) + assert container.try_add_path([1, 5, 9, 13]) + assert container.try_add_path([8, 9, 10, 11, 15]) + + with pytest.raises(KeyError): + container.remove_path_by_id(10) + + container.remove_path_by_id(2) + assert container.paths == {0: [4, 5, 6, 7], 1: [1, 5, 9, 13]} + assert container.crossings == { + 0: [Crossing(1, [5])], + 1: [Crossing(0, [5])] + } + + container.remove_path_by_id(0) + assert container.paths == {1: [1, 5, 9, 13]} + assert container.crossings == {1: []} + + assert container.try_add_path([8, 9, 10, 11, 15]) + assert container.paths == {1: [1, 5, 9, 13], 3: [8, 9, 10, 11, 15]} + assert container.crossings == { + 1: [Crossing(3, [9])], + 3: [Crossing(1, [9])] + } + + +def test_path_container_swap_paths(): + Crossing = PathContainer._Crossing + container = PathContainer() + + path_dict = {0: [4, 5, 6, 7], 1: [1, 5, 9, 13], 2: [8, 9, 10, 11, 15]} + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + + with pytest.raises(KeyError): + container.swap_paths(10, 0) + with pytest.raises(KeyError): + container.swap_paths(0, 10) + + container.swap_paths(0, 1) + path_dict[0], path_dict[1] = path_dict[1], path_dict[0] + assert container.paths == path_dict + assert container.crossings == { + 0: [Crossing(1, [5]), Crossing(2, [9])], + 1: [Crossing(0, [5])], + 2: [Crossing(0, [9])] + } + + path_dict[3] = [20, 21, 6, 22, 23, 10, 24, 25] + assert container.try_add_path(path_dict[3]) + + assert container.paths == path_dict + assert container.crossings == { + 0: [Crossing(1, [5]), Crossing(2, [9])], + 1: [Crossing(0, [5]), Crossing(3, [6])], + 2: [Crossing(0, [9]), Crossing(3, [10])], + 3: [Crossing(1, [6]), Crossing(2, [10])] + } + + container.swap_paths(1, 3) + path_dict[1], path_dict[3] = path_dict[3], path_dict[1] + assert container.paths == path_dict + + assert container.crossings == { + 0: [Crossing(3, [5]), Crossing(2, [9])], + 1: [Crossing(3, [6]), Crossing(2, [10])], + 2: [Crossing(0, [9]), Crossing(1, [10])], + 3: [Crossing(0, [5]), Crossing(1, [6])] + } + + +def test_path_container_clear(): + container = PathContainer() + assert container.try_add_path([4, 5, 6, 7]) + assert container.try_add_path([1, 5, 9, 13]) + assert container.try_add_path([8, 9, 10, 11, 15]) + + assert container.paths + assert container.crossings + + container.clear() + assert not container.paths + assert not container.crossings + + +def test_path_container_max_order(): + container = PathContainer() + assert container.max_crossing_order() == 0 + + assert container.try_add_path([4, 5, 6, 7]) + assert container.max_crossing_order() == 0 + + assert container.try_add_path([1, 5, 9, 13]) + assert container.max_crossing_order() == 1 + + +def test_path_container_discard_paths(): + Crossing = PathContainer._Crossing + container = PathContainer() + path_dict = {0: [4, 5, 6, 7], 1: [1, 5, 9, 13], 2: [8, 9, 10, 11, 15]} + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + + container.remove_crossing_of_order_higher_than(1) + assert container.max_crossing_order() == 1 + assert container.paths == path_dict + assert container.crossings == { + 0: [Crossing(1, [5])], + 1: [Crossing(0, [5]), Crossing(2, [9])], + 2: [Crossing(1, [9])] + } + + container.remove_crossing_of_order_higher_than(0) + del path_dict[1] + assert container.max_crossing_order() == 0 + assert container.paths == path_dict + assert container.crossings == {0: [], 2: []} + + +def test_path_container_get_path_data(): + container = PathContainer() + path_dict = {0: [4, 5, 6, 7], 1: [1, 5, 9, 13], 2: [8, 9, 10, 11, 15]} + for _, path in path_dict.items(): + assert container.try_add_path(path) + + assert container.get_all_paths() == [[4, 5, 6, 7], [1, 5, 9, 13], + [8, 9, 10, 11, 15]] + assert container.get_all_nodes() == set( + [1, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15]) + + +def test_path_container_find_first_order_intersections(): + Crossing = PathContainer._Crossing + container = PathContainer() + + path_dict = {0: [0, 1, 2, 10, 11, 12], 1: [3, 1, 4], 2: [5, 6, 7]} + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + + assert container.crossings == { + 0: [Crossing(1, [1])], + 1: [Crossing(0, [1])], + 2: [] + } + assert _find_first_order_intersections(container.crossings, + container._split_paths()) == { + 1: {1} + } + + container.remove_path_by_id(0) + del path_dict[0] + path_dict[3] = [0, 1, 2, 10] + assert container.try_add_path(path_dict[3]) + assert container.paths == path_dict + assert _find_first_order_intersections(container.crossings, + container._split_paths()) == { + 1: {1, 3}, + } + + path_dict[4] = [11, 6, 12, 14, 15, 16] + assert container.try_add_path(path_dict[4]) + assert container.paths == path_dict + assert _find_first_order_intersections(container.crossings, + container._split_paths()) == { + 1: {1, 3}, + 6: {2} + } + + path_dict[5] = [21, 1, 22, 24, 25, 26] + assert container.try_add_path(path_dict[5]) + assert container.paths == path_dict + assert _find_first_order_intersections(container.crossings, + container._split_paths()) == { + 1: {1, 3}, + 6: {2} + } + + path_dict[6] = [30, 15, 32, 33, 34, 35] + assert container.try_add_path(path_dict[6]) + assert container.paths == path_dict + assert _find_first_order_intersections(container.crossings, + container._split_paths()) == { + 1: {1, 3}, + 6: {2} + # The 15 node should not appear + } + + +def test_path_container_no_intersection(): + container = PathContainer() + path_dict = {0: [0, 1, 2, 3], 1: [4, 5, 6], 2: [7, 8, 9, 10, 11]} + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + assert container.generate_swaps() == [(0, 1), (3, 2), (6, 5), (7, 8), + (11, 10), (10, 9)] + assert container.paths == path_dict + + +def test_path_container_1_intersection_single_intersection(): + container = PathContainer() + + # 3 + # | + # 0 - 1 - 2 + # | 10 - 11 - 12 + # 4 + # NB: intersection at node 1 + path_dict = {0: [0, 1, 2], 1: [3, 1, 4], 2: [10, 11, 12]} + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + assert container.generate_swaps() == [(2, 1), (12, 11)] + # Make sure that path 1 gets deleted or we risk running an infinite loop + del path_dict[1] + assert container.paths == path_dict + + # 4 + # | + # 0 - 1 - 2 - 3 + # | 10 - 11 - 12 + # 5 + # NB: intersection at node 1 + container.clear() + path_dict = {0: [0, 1, 2, 3], 1: [4, 1, 5], 2: [10, 11, 12]} + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + assert container.generate_swaps() == [(0, 1), (1, 2), (5, 1), (12, 11)] + + # 4 + # | + # 0 - 1 - 2 - 3 + # | 10 - 11 - 12 + # 5 + # NB: intersection at node 1 + container.clear() + path_dict = {0: [4, 1, 5], 1: [0, 1, 2, 3], 2: [10, 11, 12]} + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + assert container.generate_swaps() == [(0, 1), (1, 2), (5, 1), (12, 11)] + + # 4 + # | + # 0 - 1 - 2 - 3 + # | 10 - 11 - 12 + # 5 + # NB: intersection at node 2 + container.clear() + path_dict = {0: [0, 1, 2, 3], 1: [4, 2, 5], 2: [10, 11, 12]} + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + assert container.generate_swaps() == [(3, 2), (2, 1), (5, 2), (12, 11)] + + # 9 + # | + # 0 - 1 - 2 - 3 - 4 - 5 + # | + # 10 6 - 7 - 8 + # | + # 11 + # NB: intersection at node 1 + container.clear() + path_dict = {0: [9, 1, 10, 11], 1: [0, 1, 2, 3, 4, 5], 2: [6, 7, 8]} + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + + container.generate_swaps() + path_dict[0], path_dict[1] = path_dict[1], path_dict[0] + assert container.paths == path_dict + + +def test_path_container_1_intersection_double_crossing(): + container = PathContainer() + + # 6 7 + # | | + # 0 - 1 - 2 - 3 - 4 - 5 + # | | + # 8 9 + # | + # 10 + # | + # 11 + # | + # 12 + # NB: intersection at node 2 + container.clear() + path_dict = {0: [0, 1, 2, 3, 4, 5], 1: [6, 2, 8], 2: [7, 4, 9, 10, 11, 12]} + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + assert container.generate_swaps() == [(5, 4), (4, 3), (3, 2), (2, 1), (8, + 2), + (7, 4), (4, 9), (12, 11), (11, 10)] + + # 6 7 + # | | + # 0 - 1 - 2 - 3 - 4 - 5 + # | | + # 8 9 + # | + # 10 + # | + # 11 + # | + # 12 + # NB: intersection at node 3 + container.clear() + path_dict = {0: [0, 1, 2, 3, 4, 5], 1: [7, 3, 9], 2: [6, 1, 8, 10, 11, 12]} + + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + assert container.generate_swaps() == [(0, 1), (1, 2), (2, 3), (3, 4), (9, + 3), + (6, 1), (1, 8), (12, 11), (11, 10)] + + # 4 5 4 5 + # | | | | + # 0 - 1 - 2 - 3 -> 0 - 1 - 2 - 3 or 0 - 1 - 2 - 3 + # | | | | + # 6 7 6 7 + # NB: intersection at nodes 1 & 2 + container.clear() + path_dict = { + 0: [0, 1, 2, 3], + 1: [4, 1, 6], + 2: [5, 2, 7], + } + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + swaps = container.generate_swaps() + assert swaps == [(0, 1), (1, 2), (6, 1)] \ + or swaps == [(3, 2), (2, 1), (7, 2)] + assert container.paths[0] == path_dict[0] + assert (1 in container.paths and container.paths[1] == path_dict[1]) \ + + (2 in container.paths and container.paths[2] == path_dict[2]) == 1 + + # 5 6 6 + # | | | + # 0 - 1 - 2 - 3 - 4 -> 0 - 1 - 2 - 3 - 4 + # | | | + # 7 8 8 + # NB: intersection at nodes 1 & 3 + container.clear() + path_dict = { + 0: [0, 1, 2, 3, 4], + 1: [5, 1, 7], + 2: [6, 3, 8], + } + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + swaps = container.generate_swaps() + assert container.generate_swaps() == [(0, 1), (4, 3), (3, 2), (8, 3)] + del path_dict[1] + assert container.paths == path_dict + + # 5 + # | + # 6 7 + # | | + # 0 - 1 - 2 - 3 - 4 + # | | + # 8 9 + # NB: intersection at nodes 1 & 3 + container.clear() + path_dict = { + 0: [0, 1, 2, 3, 4], + 1: [5, 6, 1, 8], + 2: [7, 2, 9], + } + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + assert container.generate_swaps() == [(0, 1), (1, 2), (2, 3), (5, 6), (8, + 1), + (9, 2)] + assert container.paths == path_dict + + +def test_path_container_1_intersection_triple_crossing(): + container = PathContainer() + + # 9 13 - 14 - 15 + # | / + # 0 - 1 - 2 - 3 - 4 - 5 + # / | + # 12 10 6 - 7 - 8 + # | + # 11 + # NB: intersection at node 1 + container.clear() + path_dict = { + 0: [9, 1, 10, 11], + 1: [0, 1, 2, 3, 4, 5], + 2: [6, 7, 8], + 3: [12, 1, 13, 14, 15, 16] + } + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + + container.generate_swaps() + path_dict[1], path_dict[3], path_dict[0] \ + = path_dict[0], path_dict[1], path_dict[3] + assert container.paths == path_dict + + # 6 7 8 6 8 + # | | | | | + # 0 - 1 - 2 - 3 - 4 - 5 -> 0 - 1 - 2 - 3 - 4 - 5 + # | | | | | + # 9 10 11 8 10 + # | | | | + # 12 13 12 13 + # | | | | + # 14 15 14 15 + # | | | | + # 16 17 16 17 + # NB: intersection at node 3 + container.clear() + path_dict = { + 0: [0, 1, 2, 3, 4, 5], + 1: [6, 1, 9, 12, 14, 16], + 2: [7, 3, 10], + 3: [8, 4, 11, 13, 15, 17] + } + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + assert container.generate_swaps() == [(0, 1), (1, 2), (5, 4), (4, 3), (6, + 1), + (1, 9), (16, 14), (14, 12), (8, 4), + (4, 11), (17, 15), (15, 13)] + del path_dict[2] + assert container.paths == path_dict + + # 4 5 10 - 11 - 12 4 10 - 11 - 12 + # | / | + # 0 - 1 - 2 - 3 -> 0 - 1 - 2 - 3 + # / | | + # 6 7 7 + # NB: intersection at node 1 + container.clear() + path_dict = {0: [0, 1, 2, 3], 1: [4, 1, 7], 2: [10, 11, 12], 3: [5, 1, 6]} + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + assert container.generate_swaps() == [(0, 1), (1, 2), (7, 1), (12, 11)] + + +@pytest.mark.xfail +def test_path_container_1_intersection_triple_crossing_complex(): + container = PathContainer() + # 4 + # | + # 0 - 1 - 2 - 3 + # | + # 5 - 6 - 7 + # | + # 8 + # NB: intersection at nodes 1 & 3 + container.clear() + path_dict = { + 0: [0, 1, 2, 3], + 1: [4, 1, 6, 8], + 2: [5, 6, 7], + } + for _, path in path_dict.items(): + assert container.try_add_path(path) + assert container.paths == path_dict + + # Ideally this situation should be solved without deleting any paths + assert container.generate_swaps() == [(0, 1), (1, 2), (8, 6), (6, 1), (7, + 6)] + path_dict[1], path_dict[2] = path_dict[2], path_dict[1] + assert container.paths == path_dict diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index c7cbd8340..d4ec1463b 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -22,10 +22,8 @@ uses Swap gates in order to move qubits next to each other. """ from copy import deepcopy -import itertools import random -import numpy as np import networkx as nx from projectq.cengines import (BasicMapperEngine, return_swap_depth) @@ -33,6 +31,9 @@ from projectq.ops import (AllocateQubitGate, Command, DeallocateQubitGate, FlushGate, Swap) from projectq.types import WeakQubitRef +from projectq.cengines._graph_path_container import PathContainer + +# ============================================================================== class PathCacheExhaustive(): @@ -95,6 +96,9 @@ def add_path(self, path): path[end]))] = path[start:end + 1] +# ============================================================================== + + class GraphMapperError(Exception): """Base class for all exceptions related to the GraphMapper.""" @@ -131,7 +135,7 @@ def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, Pre-conditions: len(active_qubits) <= num_qubits == len(graph) """ - #pylint: disable=unused-argument + # pylint: disable=unused-argument mapping = deepcopy(current_mapping) currently_used_nodes = sorted([v for _, v in mapping.items()]) available_ids = [n for n in graph if n not in currently_used_nodes] @@ -141,41 +145,6 @@ def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, return mapping -def _iterate_with_previous(some_iterable): - prevs, items = itertools.tee(some_iterable, 2) - items = itertools.islice(items, 1, None) - prevs = itertools.islice(prevs, len(some_iterable) - 1) - return zip(prevs, items) - - -def _return_swaps(paths): - """ - Generate a list of swap ops based on a list of paths through the graph. - - Args: - paths (list): List of paths through the graph between pairs - of qubits that need to interact - - Returns: A list of swap operations (tuples of logical qubit ids) - required to move the qubits to the correct locations - """ - swap_operations = [] - - for path in paths: - swap_operations.append([]) - path_for_qb0, path_for_qb1 = np.array_split(path, 2) - - # Add swaps operations for first half of the path - for prev, cur in _iterate_with_previous(path_for_qb0): - swap_operations[-1].append((prev, cur)) - - # Add swaps operations for the second half of the path - for prev, cur in _iterate_with_previous(path_for_qb1[::-1]): - swap_operations[-1].append((prev, cur)) - - return swap_operations - - class GraphMapper(BasicMapperEngine): """ Mapper to an arbitrary connected graph. @@ -319,10 +288,7 @@ def _process_commands(self): Returns: A list of paths through the graph to move some qubits and have them interact """ - # TODO: need to think about merging paths and applying gates in the - # middle of the swaps if possible - - paths = [] + paths = PathContainer() allocated_qubits = deepcopy(self._currently_allocated_ids) active_qubits = deepcopy(self._currently_allocated_ids) @@ -361,29 +327,18 @@ def _process_commands(self): # Process a two qubit gate: elif len(qubit_ids) == 2: - path = self._process_two_qubit_gate_dumb( - qubit0=qubit_ids[0], - qubit1=qubit_ids[1], - active_qubits=active_qubits) - - paths_start = [p[0] for p in paths] - paths_end = [p[-1] for p in paths] - - if path \ - and not any([p in paths_start for p in path]) \ - and not any([p in paths_end for p in path]): - paths += [path] - # Maybe a bit too conservative: remove all qubits - # of the path from the active qubits list - # This effectively only allows non-intersecting paths - for backend_id in path: - if backend_id in self._reverse_current_mapping: - active_qubits.discard( - self._reverse_current_mapping[backend_id]) + # At least one qubit is not an active qubit: + if qubit_ids[0] not in active_qubits \ + or qubit_ids[1] not in active_qubits: + active_qubits.discard(qubit_ids[0]) + active_qubits.discard(qubit_ids[1]) + elif not self._process_two_qubit_gate_dumb( + qubit0=qubit_ids[0], qubit1=qubit_ids[1], paths=paths): + break return paths - def _process_two_qubit_gate_dumb(self, qubit0, qubit1, active_qubits): + def _process_two_qubit_gate_dumb(self, qubit0, qubit1, paths): """ Process a two qubit gate. @@ -394,32 +349,41 @@ def _process_two_qubit_gate_dumb(self, qubit0, qubit1, active_qubits): Args: qubit0 (int): qubit.id of one of the qubits qubit1 (int): qubit.id of the other qubit - active_qubits (set): contains all qubit ids which for which - gates can be applied in this cycle before - the swaps Returns: A path through the graph (can be empty) """ - # At least one qubit is not an active qubit: - if qubit0 not in active_qubits or qubit1 not in active_qubits: - active_qubits.discard(qubit0) - active_qubits.discard(qubit1) - return [] - # Path is given using graph nodes (ie. mapped ids) # If we come here, the two nodes can't be connected on the graph or the # command would have been applied already node0 = self._current_mapping[qubit0] node1 = self._current_mapping[qubit1] + if paths.has_interaction(node0, node1) \ + or self.graph.has_edge(node0, node1): + return True + # Qubits are both active but not connected via an edge if self.enable_caching: if self._path_cache.has_path(node0, node1): - return self._path_cache.get_path(node0, node1) - path = nx.shortest_path(self.graph, source=node0, target=node1) - self._path_cache.add_path(path) - return path - return nx.shortest_path(self.graph, source=node0, target=node1) + path = self._path_cache.get_path(node0, node1) + else: + path = nx.shortest_path(self.graph, source=node0, target=node1) + self._path_cache.add_path(path) + else: + if self.graph.has_edge(node0, node1): + path = [node0, node1] + else: + path = nx.shortest_path(self.graph, source=node0, target=node1) + + if path: + # Makes sure that one qubit will interact with at most one other + # qubit before forcing the generation of a swap + # Also makes sure that path intersection (if any) are possible + return paths.try_add_path(path) + + # Technically, since the graph is connected, we should always be able + # to find a path between any two nodes. But just in case... + return False # pragma: no cover def _send_possible_commands(self): """ @@ -469,14 +433,12 @@ def _send_possible_commands(self): new_stored_commands.append(cmd) else: send_gate = True - logical_ids = set() backend_ids = set() for qureg in cmd.all_qubits: for qubit in qureg: if qubit.id not in active_ids: send_gate = False break - logical_ids.add(qubit.id) backend_ids.add(self._current_mapping[qubit.id]) # Check that mapped ids are connected by an edge on the graph @@ -514,7 +476,7 @@ def _run(self): if not self._stored_commands: return - swaps = _return_swaps(paths) + swaps = paths.generate_swaps() if swaps: # first mapping requires no swaps backend_ids_used = { @@ -522,15 +484,10 @@ def _run(self): for logical_id in self._currently_allocated_ids } - # Get a list of all backend ids we require to perform the swaps - required_ids = { - n - for n in list(itertools.chain.from_iterable(paths)) - } - # Get a list of the qubits we need to allocate just to perform the # swaps - not_allocated_ids = set(required_ids).difference(backend_ids_used) + not_allocated_ids = set( + paths.get_all_nodes()).difference(backend_ids_used) # Allocate all mapped qubit ids (which are not already allocated, # i.e., contained in self._currently_allocated_ids) @@ -541,14 +498,27 @@ def _run(self): engine=self, gate=AllocateQubitGate(), qubits=([qb], )) self.send([cmd]) + # Calculate reverse internal mapping + new_internal_mapping = deepcopy(self._reverse_current_mapping) + + # Add missing entries with invalid id to be able to process the + # swaps operations + for backend_id in not_allocated_ids: + new_internal_mapping[backend_id] = -1 + # Send swap operations to arrive at the new mapping - swaps = list(itertools.chain.from_iterable(swaps)) for bqb0, bqb1 in swaps: q0 = WeakQubitRef(engine=self, idx=bqb0) q1 = WeakQubitRef(engine=self, idx=bqb1) cmd = Command(engine=self, gate=Swap, qubits=([q0], [q1])) self.send([cmd]) + # Update internal mapping based on swap operations + new_internal_mapping[bqb0], \ + new_internal_mapping[bqb1] = \ + new_internal_mapping[bqb1], \ + new_internal_mapping[bqb0] + # Register statistics: self.num_mappings += 1 depth = return_swap_depth(swaps) @@ -561,21 +531,6 @@ def _run(self): else: self.num_of_swaps_per_mapping[len(swaps)] += 1 - # Calculate reverse internal mapping - new_internal_mapping = deepcopy(self._reverse_current_mapping) - - # Add missing entries with invalid id to be able to process the - # swaps operations - for backend_id in not_allocated_ids: - new_internal_mapping[backend_id] = -1 - - # Update internal mapping based on swap operations - for bqb0, bqb1 in swaps: - new_internal_mapping[bqb0], \ - new_internal_mapping[bqb1] = \ - new_internal_mapping[bqb1], \ - new_internal_mapping[bqb0] - # Calculate the list of "helper" qubits that need to be deallocated # and remove invalid entries not_needed_anymore = [] diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py index 517fb47ec..a153fee76 100644 --- a/projectq/cengines/_graphmapper_test.py +++ b/projectq/cengines/_graphmapper_test.py @@ -331,30 +331,33 @@ def test_send_possible_commands_allocation_no_active_qubits( qb1 = WeakQubitRef(engine=None, idx=1) qb2 = WeakQubitRef(engine=None, idx=2) qb3 = WeakQubitRef(engine=None, idx=3) + qb4 = WeakQubitRef(engine=None, idx=4) - cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], )) - cmd1 = Command(engine=None, gate=Allocate, qubits=([qb1], )) - cmd2 = Command(engine=None, gate=Allocate, qubits=([qb2], )) - cmd3 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb2]) - cmd4 = Command(engine=None, gate=Allocate, qubits=([qb3], )) - cmd5 = Command(engine=None, gate=X, qubits=([qb3], )) - cmd6 = Command(engine=None, gate=Deallocate, qubits=([qb3], )) + cmd_list = [ + Command(engine=None, gate=Allocate, qubits=([qb0], )), + Command(engine=None, gate=Allocate, qubits=([qb1], )), + Command(engine=None, gate=Allocate, qubits=([qb2], )), + Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb2]), + Command(engine=None, gate=X, qubits=([qb1], ), controls=[qb2]), + Command(engine=None, gate=Allocate, qubits=([qb3], )), + Command(engine=None, gate=X, qubits=([qb3], )), + Command(engine=None, gate=Deallocate, qubits=([qb3], )), + Command(engine=None, gate=Deallocate, qubits=([qb2], )), + Command(engine=None, gate=Deallocate, qubits=([qb1], )), + Command(engine=None, gate=Deallocate, qubits=([qb0], )), + Command(engine=None, gate=Allocate, qubits=([qb4], )), + ] qb_flush = WeakQubitRef(engine=None, idx=-1) cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) - mapper._stored_commands = [ - cmd0, cmd1, cmd2, cmd3, cmd4, cmd5, cmd6, cmd_flush - ] + mapper._stored_commands = cmd_list + [cmd_flush] mapper._run() - assert len(mapper._stored_commands) == 4 - assert mapper._stored_commands[0] == cmd4 - assert mapper._stored_commands[1] == cmd5 - assert mapper._stored_commands[2] == cmd6 - mapper._run() - assert len(mapper._stored_commands) == 1 - assert mapper._stored_commands[0] == cmd_flush + assert len(mapper._stored_commands) == 8 + # NB: after swap, can actually send Deallocate to qb0 + assert mapper._stored_commands[0:6] == cmd_list[4:10] + assert mapper._stored_commands[6] == cmd_list[11] def test_send_possible_commands_deallocate(simple_mapper): @@ -445,7 +448,7 @@ def test_run_and_receive(simple_graph, simple_mapper): Command(None, X, qubits=([qb[5]], ), controls=[qb[3]]), Command(None, X, qubits=([qb[3]], ), controls=[qb[4]]), Command(None, X, qubits=([qb[3]], ), controls=[qb[6]]), - Command(None, X, qubits=([qb[4]], ), controls=[qb[6]]) + Command(None, X, qubits=([qb[4]], ), controls=[qb[6]]), ] deallocate_cmds = [ Command(engine=None, gate=Deallocate, qubits=([qb[1]], )) @@ -560,7 +563,7 @@ def test_allocate_too_many_qubits(simple_mapper): qb, allocate_cmds = allocate_all_qubits_cmd(mapper) qb.append(WeakQubitRef(engine=None, idx=len(qb))) - allocate_cmds.append( + allocate_cmds.append( Command(engine=None, gate=Allocate, qubits=([qb[-1]], ))) qb_flush = WeakQubitRef(engine=None, idx=-1) @@ -702,21 +705,28 @@ def test_check_that_local_optimizer_doesnt_merge(simple_graph): @pytest.mark.parametrize("enable_caching", [False, True]) -def test_3x3_grid_multiple_simultaneous_paths(grid33_graph_mapper, - enable_caching): +def test_3x3_grid_multiple_simultaneous_non_intersecting_paths( + grid33_graph_mapper, enable_caching): mapper, backend = grid33_graph_mapper mapper.enable_caching = enable_caching qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + # 0 - 1 - 2 + # | | | + # 3 - 4 - 5 + # | | | + # 6 - 7 - 8 + cmd0 = Command(None, X, qubits=([qb[0]], ), controls=[qb[6]]) cmd1 = Command(None, X, qubits=([qb[1]], ), controls=[qb[7]]) cmd2 = Command(None, X, qubits=([qb[2]], ), controls=[qb[8]]) + cmd3 = Command(None, X, qubits=([qb[2]], ), controls=[qb[8]]) qb_flush = WeakQubitRef(engine=None, idx=-1) cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) - mapper.receive(allocate_cmds + [cmd0, cmd1, cmd2, cmd_flush]) + mapper.receive(allocate_cmds + [cmd0, cmd1, cmd2, cmd3, cmd_flush]) assert not mapper._stored_commands assert mapper.num_mappings == 1 assert mapper.depth_of_swaps == {1: 1} @@ -730,6 +740,16 @@ def test_3x3_grid_multiple_simultaneous_paths(grid33_graph_mapper, 6: 3, 7: 4, 8: 5 + } or mapper.current_mapping == { + 0: 3, + 1: 4, + 2: 5, + 3: 0, + 4: 1, + 5: 2, + 6: 6, + 7: 7, + 8: 8 } cmd3 = Command(None, X, qubits=([qb[0]], ), controls=[qb[2]]) @@ -750,6 +770,16 @@ def test_3x3_grid_multiple_simultaneous_paths(grid33_graph_mapper, 6: 3, 7: 5, 8: 4 + } or mapper.current_mapping == { + 0: 4, + 1: 3, + 2: 5, + 3: 1, + 4: 0, + 5: 2, + 6: 7, + 7: 6, + 8: 8 } if enable_caching: @@ -766,3 +796,123 @@ def test_3x3_grid_multiple_simultaneous_paths(grid33_graph_mapper, assert not mapper._path_cache.has_path(4, 5) assert not mapper._path_cache.has_path(6, 7) assert not mapper._path_cache.has_path(7, 8) + + +@pytest.mark.parametrize("enable_caching", [False, True]) +def test_3x3_grid_multiple_simultaneous_intersecting_paths_impossible( + grid33_graph_mapper, enable_caching): + mapper, backend = grid33_graph_mapper + mapper.enable_caching = enable_caching + + # 0 - 1 - 2 + # | | | + # 3 - 4 - 5 + # | | | + # 6 - 7 - 8 + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + cmd0 = Command(None, X, qubits=([qb[1]], ), controls=[qb[7]]) + cmd1 = Command(None, X, qubits=([qb[3]], ), controls=[qb[5]]) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper.receive(allocate_cmds + [cmd0, cmd1, cmd_flush]) + assert not mapper._stored_commands + assert mapper.num_mappings == 2 + assert mapper.depth_of_swaps == {1: 2} + assert mapper.current_mapping == { + 0: 0, + 1: 1, + 2: 2, + 3: 3, + 4: 7, + 5: 4, + 6: 6, + 7: 5, + 8: 8 + } or mapper.current_mapping == { + 0: 0, + 1: 3, + 2: 2, + 3: 4, + 4: 1, + 5: 5, + 6: 6, + 7: 7, + 8: 8 + } + + if enable_caching: + assert mapper._path_cache._cache + assert mapper._path_cache.has_path(1, 7) + assert mapper._path_cache.has_path(3, 5) + + mapper.current_mapping = dict(enumerate(range(len(qb)))) + + cmd2 = Command(None, X, qubits=([qb[7]], ), controls=[qb[1]]) + cmd3 = Command(None, X, qubits=([qb[1]], ), controls=[qb[8]]) + mapper.receive(allocate_cmds + [cmd2, cmd3, cmd_flush]) + assert not mapper._stored_commands + assert mapper.num_mappings == 4 + assert mapper.depth_of_swaps == {1: 4} + + if enable_caching: + assert mapper._path_cache._cache + assert mapper._path_cache.has_path(1, 7) + assert mapper._path_cache.has_path(3, 5) + assert mapper._path_cache.has_path(1, 8) + + +@pytest.mark.parametrize("enable_caching", [False, True]) +def test_3x3_grid_multiple_simultaneous_intersecting_paths_possible( + grid33_graph_mapper, enable_caching): + mapper, backend = grid33_graph_mapper + mapper.enable_caching = enable_caching + + # 0 - 1 - 2 + # | | | + # 3 - 4 - 5 + # | | | + # 6 - 7 - 8 + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + # NB. when generating the swaps for the paths through the graph, the path + # 0 -> 7 needs to be performed *before* the one 3 -> 5 + cmd0 = Command(None, X, qubits=([qb[3]], ), controls=[qb[5]]) + cmd1 = Command(None, X, qubits=([qb[0]], ), controls=[qb[7]]) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper.receive(allocate_cmds + [cmd0, cmd1, cmd_flush]) + + assert not mapper._stored_commands + assert mapper.num_mappings == 1 + assert mapper.depth_of_swaps == {3: 1} + assert mapper.current_mapping == { + 0: 0, + 1: 3, + 2: 2, + 3: 4, + 4: 7, + 5: 5, + 6: 6, + 7: 1, + 8: 8 + } or mapper.current_mapping == { + 0: 0, + 1: 4, + 2: 2, + 3: 3, + 4: 5, + 5: 7, + 6: 6, + 7: 1, + 8: 8 + } + + if enable_caching: + assert mapper._path_cache._cache + assert mapper._path_cache.has_path(0, 7) + assert mapper._path_cache.has_path(3, 5) From 2f5bf8f77bc2aacb6668f9863ce317f24409d4af Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Tue, 22 Jan 2019 12:10:25 +0100 Subject: [PATCH 06/25] Add paths statistics and string representation to the GraphMapper --- projectq/cengines/_graphmapper.py | 108 ++++++++++++++++++++----- projectq/cengines/_graphmapper_test.py | 40 ++++++++- 2 files changed, 126 insertions(+), 22 deletions(-) diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index d4ec1463b..4240a91f8 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -23,6 +23,7 @@ """ from copy import deepcopy +import math import random import networkx as nx @@ -46,6 +47,12 @@ def __init__(self, path_length_threshold): self._cache = {} self.key_type = frozenset + def __str__(self): + s = "" + for (node0, node1), path in self._cache.items(): + s += "{}: {}\n".format(sorted([node0, node1]), path) + return s + def empty_cache(self): """Empty the cache.""" self._cache = {} @@ -103,15 +110,6 @@ class GraphMapperError(Exception): """Base class for all exceptions related to the GraphMapper.""" -class QubitAllocationError(GraphMapperError): - """ - Exception raised if a qubit allocation is impossible. - - This would typically be the case if the number of allocated qubit is - greater than the number of nodes inside the graph. - """ - - def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, stored_commands): """ @@ -180,6 +178,8 @@ class GraphMapper(BasicMapperEngine): num_of_swaps_per_mapping (dict): Key are the number of swaps per mapping, value is the number of such mappings which have been applied + path_stats (dict) : Key is the endpoints of a path, value is the number + of such paths which have been applied Note: 1) Gates are cached and only mapped from time to time. A @@ -243,6 +243,7 @@ def __init__(self, self.num_mappings = 0 self.depth_of_swaps = dict() self.num_of_swaps_per_mapping = dict() + self.paths_stats = dict() @property def current_mapping(self): @@ -289,6 +290,7 @@ def _process_commands(self): them interact """ paths = PathContainer() + not_in_mapping_qubits = set() allocated_qubits = deepcopy(self._currently_allocated_ids) active_qubits = deepcopy(self._currently_allocated_ids) @@ -310,11 +312,8 @@ def _process_commands(self): if len(allocated_qubits) < self.num_qubits: allocated_qubits.add(qubit_id) active_qubits.add(qubit_id) - else: - raise QubitAllocationError( - "Unable to allocate new qubit: all possible qubits" - " ({}) have already been allocated".format( - self.num_qubits)) + if qubit_id not in self._current_mapping: + not_in_mapping_qubits.add(qubit_id) elif isinstance(cmd.gate, DeallocateQubitGate): qubit_id = cmd.qubits[0][0].id @@ -332,9 +331,22 @@ def _process_commands(self): or qubit_ids[1] not in active_qubits: active_qubits.discard(qubit_ids[0]) active_qubits.discard(qubit_ids[1]) - elif not self._process_two_qubit_gate_dumb( - qubit0=qubit_ids[0], qubit1=qubit_ids[1], paths=paths): - break + else: + if not_in_mapping_qubits: + self.current_mapping = self._add_qubits_to_mapping( + self._current_mapping, self.graph, + not_in_mapping_qubits, self._stored_commands) + not_in_mapping_qubits = set() + + if not self._process_two_qubit_gate_dumb( + qubit0=qubit_ids[0], qubit1=qubit_ids[1], + paths=paths): + break + + if not_in_mapping_qubits: + self.current_mapping = self._add_qubits_to_mapping( + self._current_mapping, self.graph, not_in_mapping_qubits, + self._stored_commands) return paths @@ -358,14 +370,15 @@ def _process_two_qubit_gate_dumb(self, qubit0, qubit1, paths): node0 = self._current_mapping[qubit0] node1 = self._current_mapping[qubit1] - if paths.has_interaction(node0, node1) \ - or self.graph.has_edge(node0, node1): + if paths.has_interaction(node0, node1): return True # Qubits are both active but not connected via an edge if self.enable_caching: if self._path_cache.has_path(node0, node1): path = self._path_cache.get_path(node0, node1) + elif self.graph.has_edge(node0, node1): + path = [node0, node1] else: path = nx.shortest_path(self.graph, source=node0, target=node1) self._path_cache.add_path(path) @@ -379,7 +392,15 @@ def _process_two_qubit_gate_dumb(self, qubit0, qubit1, paths): # Makes sure that one qubit will interact with at most one other # qubit before forcing the generation of a swap # Also makes sure that path intersection (if any) are possible - return paths.try_add_path(path) + if not paths.try_add_path(path): + return False + + interaction = frozenset((node0, node1)) + if interaction not in self.paths_stats: + self.paths_stats[interaction] = 1 + else: + self.paths_stats[interaction] += 1 + return True # Technically, since the graph is connected, we should always be able # to find a path between any two nodes. But just in case... @@ -586,3 +607,50 @@ def receive(self, command_list): # Storage is full: Create new map and send some gates away: if len(self._stored_commands) >= self.storage: self._run() + + def __str__(self): + """ + Return the string representation of this GraphMapper. + + Returns: + A summary (string) of resources used, including depth of swaps and + statistics about the paths generated + """ + + depth_of_swaps_str = "" + for depth_of_swaps, num_mapping in self.depth_of_swaps.items(): + depth_of_swaps_str += "\n {:3d}: {:3d}".format( + depth_of_swaps, num_mapping) + + num_swaps_per_mapping_str = "" + for num_swaps_per_mapping, num_mapping in self.num_of_swaps_per_mapping.items( + ): + num_swaps_per_mapping_str += "\n {:3d}: {:3d}".format( + num_swaps_per_mapping, num_mapping) + + interactions = [ + k for _, k in sorted( + zip(self.paths_stats.values(), self.paths_stats.keys()), + reverse=True) + ] + + max_width = math.ceil(math.log10(max(self.paths_stats.values()))) + 1 + paths_stats_str = "" + if self.enable_caching: + for k in interactions: + if self.graph.has_edge(*list(k)): + path = list(k) + else: + path = self._path_cache.get_path(*list(k)) + paths_stats_str += "\n {3:3} - {4:3}: {0:{1}} | {2}".format( + self.paths_stats[k], max_width, path, *k) + else: + for k in interactions: + paths_stats_str += "\n {2:3} - {3:3}: {0:{1}}".format( + self.paths_stats[k], max_width, *k) + + return ("Number of mappings: {}\n" + "Depth of swaps: {}\n\n" + + "Number of swaps per mapping:{}\n\n" + + "Path statistics:{}\n\n").format( + self.num_mappings, depth_of_swaps_str, + num_swaps_per_mapping_str, paths_stats_str) diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py index a153fee76..7ac1cc8ee 100644 --- a/projectq/cengines/_graphmapper_test.py +++ b/projectq/cengines/_graphmapper_test.py @@ -20,10 +20,10 @@ import pytest import networkx as nx import projectq -from projectq.cengines import DummyEngine, LocalOptimizer +from projectq.cengines import DummyEngine, LocalOptimizer, MainEngine from projectq.meta import LogicalQubitIDTag from projectq.ops import (Allocate, BasicGate, Command, Deallocate, FlushGate, - X) + X, H, All, Measure, CNOT) from projectq.types import WeakQubitRef from projectq.cengines import _graphmapper as graphm @@ -916,3 +916,39 @@ def test_3x3_grid_multiple_simultaneous_intersecting_paths_possible( assert mapper._path_cache._cache assert mapper._path_cache.has_path(0, 7) assert mapper._path_cache.has_path(3, 5) + + +@pytest.mark.parametrize("enable_caching", [False, True]) +def test_mapper_to_str(simple_graph, enable_caching): + mapper = graphm.GraphMapper( + graph=simple_graph, enable_caching=enable_caching) + backend = DummyEngine(save_commands=True) + eng = MainEngine(backend, [mapper]) + qureg = eng.allocate_qureg(len(simple_graph)) + + eng.flush() + assert mapper.current_mapping == dict(enumerate(range(len(simple_graph)))) + + H | qureg[0] + X | qureg[2] + + CNOT | (qureg[6], qureg[4]) + CNOT | (qureg[6], qureg[0]) + CNOT | (qureg[6], qureg[1]) + + All(Measure) | qureg + eng.flush() + + str_repr = str(mapper) + assert str_repr.count("Number of mappings: 2") == 1 + assert str_repr.count("1: 1") == 1 + assert str_repr.count("2: 1") == 2 + assert str_repr.count("3: 1") == 1 + assert str_repr.count(" 0 - 6: 1") == 1 + assert str_repr.count(" 0 - 3: 1") == 1 + assert str_repr.count(" 4 - 6: 1") == 1 + + sent_gates = [cmd.gate for cmd in backend.received_commands] + assert sent_gates.count(H) == 1 + assert sent_gates.count(X) == 4 + assert sent_gates.count(Measure) == 7 From 6854ff322e54c28a467b593ccfc6847857524fbe Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Wed, 30 Jan 2019 14:07:53 +0100 Subject: [PATCH 07/25] Various minor fixes --- projectq/cengines/_graphmapper.py | 82 +++++++++++++++++-------------- 1 file changed, 44 insertions(+), 38 deletions(-) diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index 4240a91f8..83e723c0e 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -425,31 +425,33 @@ def _send_possible_commands(self): if isinstance(cmd.gate, AllocateQubitGate): if cmd.qubits[0][0].id in self._current_mapping: self._currently_allocated_ids.add(cmd.qubits[0][0].id) - qb = WeakQubitRef( + qb0 = WeakQubitRef( engine=self, idx=self._current_mapping[cmd.qubits[0][0].id]) - new_cmd = Command( - engine=self, - gate=AllocateQubitGate(), - qubits=([qb], ), - tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) - self.send([new_cmd]) + self.send([ + Command( + engine=self, + gate=AllocateQubitGate(), + qubits=([qb0], ), + tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) + ]) else: new_stored_commands.append(cmd) elif isinstance(cmd.gate, DeallocateQubitGate): if cmd.qubits[0][0].id in active_ids: - qb = WeakQubitRef( + qb0 = WeakQubitRef( engine=self, idx=self._current_mapping[cmd.qubits[0][0].id]) - new_cmd = Command( - engine=self, - gate=DeallocateQubitGate(), - qubits=([qb], ), - tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) self._currently_allocated_ids.remove(cmd.qubits[0][0].id) active_ids.remove(cmd.qubits[0][0].id) self._current_mapping.pop(cmd.qubits[0][0].id) - self.send([new_cmd]) + self.send([ + Command( + engine=self, + gate=DeallocateQubitGate(), + qubits=([qb0], ), + tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) + ]) else: new_stored_commands.append(cmd) else: @@ -510,29 +512,31 @@ def _run(self): not_allocated_ids = set( paths.get_all_nodes()).difference(backend_ids_used) - # Allocate all mapped qubit ids (which are not already allocated, - # i.e., contained in self._currently_allocated_ids) - # and add them temporarily to the - for backend_id in not_allocated_ids: - qb = WeakQubitRef(engine=self, idx=backend_id) - cmd = Command( - engine=self, gate=AllocateQubitGate(), qubits=([qb], )) - self.send([cmd]) - - # Calculate reverse internal mapping + # Calculate temporary internal reverse mapping new_internal_mapping = deepcopy(self._reverse_current_mapping) - # Add missing entries with invalid id to be able to process the - # swaps operations + # Allocate all mapped qubit ids that are not currently allocated + # but part of some path so that we may perform the swapping + # operations. for backend_id in not_allocated_ids: + qb0 = WeakQubitRef(engine=self, idx=backend_id) + self.send([ + Command( + engine=self, + gate=AllocateQubitGate(), + qubits=([qb0], )) + ]) + + # Those qubits are not part of the current mapping, so add them + # to the temporary internal reverse mapping with invalid ids new_internal_mapping[backend_id] = -1 # Send swap operations to arrive at the new mapping for bqb0, bqb1 in swaps: - q0 = WeakQubitRef(engine=self, idx=bqb0) - q1 = WeakQubitRef(engine=self, idx=bqb1) - cmd = Command(engine=self, gate=Swap, qubits=([q0], [q1])) - self.send([cmd]) + qb0 = WeakQubitRef(engine=self, idx=bqb0) + qb1 = WeakQubitRef(engine=self, idx=bqb1) + self.send( + [Command(engine=self, gate=Swap, qubits=([qb0], [qb1]))]) # Update internal mapping based on swap operations new_internal_mapping[bqb0], \ @@ -565,17 +569,19 @@ def _run(self): # Deallocate all previously mapped ids which we only needed for the # swaps: for backend_id in not_needed_anymore: - qb = WeakQubitRef(engine=self, idx=backend_id) - cmd = Command( - engine=self, gate=DeallocateQubitGate(), qubits=([qb], )) - self.send([cmd]) + qb0 = WeakQubitRef(engine=self, idx=backend_id) + self.send([ + Command( + engine=self, + gate=DeallocateQubitGate(), + qubits=([qb0], )) + ]) # Calculate new mapping - new_mapping = { + self.current_mapping = { v: k for k, v in new_reverse_current_mapping.items() } - self.current_mapping = new_mapping # Send possible gates: self._send_possible_commands() @@ -623,8 +629,8 @@ def __str__(self): depth_of_swaps, num_mapping) num_swaps_per_mapping_str = "" - for num_swaps_per_mapping, num_mapping in self.num_of_swaps_per_mapping.items( - ): + for num_swaps_per_mapping, num_mapping \ + in self.num_of_swaps_per_mapping.items(): num_swaps_per_mapping_str += "\n {:3d}: {:3d}".format( num_swaps_per_mapping, num_mapping) From d3fa5e7d89e1c4134badb51c30d9e57cba00fc4b Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Mon, 4 Feb 2019 17:41:46 +0100 Subject: [PATCH 08/25] Rename PathContainer to PathManager + solve intersections on-the-fly --- .../cengines/_graph_path_container_test.py | 579 ----------- ...th_container.py => _graph_path_manager.py} | 466 ++++++--- projectq/cengines/_graph_path_manager_test.py | 941 ++++++++++++++++++ projectq/cengines/_graphmapper.py | 206 +--- projectq/cengines/_graphmapper_test.py | 145 +-- 5 files changed, 1369 insertions(+), 968 deletions(-) delete mode 100644 projectq/cengines/_graph_path_container_test.py rename projectq/cengines/{_graph_path_container.py => _graph_path_manager.py} (55%) create mode 100644 projectq/cengines/_graph_path_manager_test.py diff --git a/projectq/cengines/_graph_path_container_test.py b/projectq/cengines/_graph_path_container_test.py deleted file mode 100644 index 8054bb452..000000000 --- a/projectq/cengines/_graph_path_container_test.py +++ /dev/null @@ -1,579 +0,0 @@ -# Copyright 2019 ProjectQ-Framework (www.projectq.ch) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tests for projectq.cengines._graph_path_container.py.""" -import pytest -from projectq.cengines._graph_path_container import PathContainer, _find_first_order_intersections - - -def test_path_container_crossing_class(): - Crossing = PathContainer._Crossing - crossing_list = [Crossing(0, [1]), Crossing(1, [1]), Crossing(2, [2])] - - assert Crossing(0, [1]) == Crossing(0, [1]) - assert Crossing(0, [1]) != Crossing(1, [1]) - assert Crossing(0, [1]) != Crossing(0, [0, 1]) - assert Crossing(0, [0]) != Crossing(1, [0, 1]) - - assert [0, 1] == Crossing(0, [0, 1]) - assert [0, 1] == Crossing(1, [0, 1]) - assert [0, 1] != Crossing(0, [0]) - assert [0, 1] != Crossing(1, [0]) - - assert Crossing(0, [1]) in crossing_list - assert [0] not in crossing_list - assert [1] in crossing_list - - assert str(Crossing(0, [1])) == "{} {}".format(0, [1]) - assert repr(Crossing(0, [1])) == "Crossing({}, {})".format(0, [1]) - - with pytest.raises(NotImplementedError): - assert "" == Crossing(0, [1]) - - -def test_path_container_has_interaction(): - container = PathContainer() - - path_dict = {0: [4, 5, 6, 7], 1: [1, 5, 9, 13], 2: [8, 9, 10, 11, 15]} - container.paths = path_dict - - assert container.has_interaction(4, 7) - assert container.has_interaction(7, 4) - assert container.has_interaction(8, 15) - assert container.has_interaction(15, 8) - assert not container.has_interaction(4, 8) - assert not container.has_interaction(8, 4) - - -def test_path_container_add_path(): - Crossing = PathContainer._Crossing - container = PathContainer() - - assert container.try_add_path([4, 5, 6, 7]) - assert container.paths == {0: [4, 5, 6, 7]} - assert container.crossings == {0: []} - - assert not container.try_add_path([4, 8, 12]) - assert not container.try_add_path([0, 1, 2, 3, 7]) - assert not container.try_add_path([1, 5, 6, 10]) - assert container.paths == {0: [4, 5, 6, 7]} - assert container.crossings == {0: []} - - assert container.try_add_path([1, 5, 9, 13]) - assert container.paths == {0: [4, 5, 6, 7], 1: [1, 5, 9, 13]} - assert container.crossings == { - 0: [Crossing(1, [5])], - 1: [Crossing(0, [5])] - } - - assert container.try_add_path([10, 6, 9, 14]) - assert container.paths == { - 0: [4, 5, 6, 7], - 1: [1, 5, 9, 13], - 2: [10, 6, 9, 14] - } - assert container.crossings == { - 0: [Crossing(1, [5]), Crossing(2, [6])], - 1: [Crossing(0, [5]), Crossing(2, [9])], - 2: [Crossing(0, [6]), Crossing(1, [9])], - } - - -def test_path_container_remove_path(): - Crossing = PathContainer._Crossing - container = PathContainer() - assert container.try_add_path([4, 5, 6, 7]) - assert container.try_add_path([1, 5, 9, 13]) - assert container.try_add_path([8, 9, 10, 11, 15]) - - with pytest.raises(KeyError): - container.remove_path_by_id(10) - - container.remove_path_by_id(2) - assert container.paths == {0: [4, 5, 6, 7], 1: [1, 5, 9, 13]} - assert container.crossings == { - 0: [Crossing(1, [5])], - 1: [Crossing(0, [5])] - } - - container.remove_path_by_id(0) - assert container.paths == {1: [1, 5, 9, 13]} - assert container.crossings == {1: []} - - assert container.try_add_path([8, 9, 10, 11, 15]) - assert container.paths == {1: [1, 5, 9, 13], 3: [8, 9, 10, 11, 15]} - assert container.crossings == { - 1: [Crossing(3, [9])], - 3: [Crossing(1, [9])] - } - - -def test_path_container_swap_paths(): - Crossing = PathContainer._Crossing - container = PathContainer() - - path_dict = {0: [4, 5, 6, 7], 1: [1, 5, 9, 13], 2: [8, 9, 10, 11, 15]} - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - - with pytest.raises(KeyError): - container.swap_paths(10, 0) - with pytest.raises(KeyError): - container.swap_paths(0, 10) - - container.swap_paths(0, 1) - path_dict[0], path_dict[1] = path_dict[1], path_dict[0] - assert container.paths == path_dict - assert container.crossings == { - 0: [Crossing(1, [5]), Crossing(2, [9])], - 1: [Crossing(0, [5])], - 2: [Crossing(0, [9])] - } - - path_dict[3] = [20, 21, 6, 22, 23, 10, 24, 25] - assert container.try_add_path(path_dict[3]) - - assert container.paths == path_dict - assert container.crossings == { - 0: [Crossing(1, [5]), Crossing(2, [9])], - 1: [Crossing(0, [5]), Crossing(3, [6])], - 2: [Crossing(0, [9]), Crossing(3, [10])], - 3: [Crossing(1, [6]), Crossing(2, [10])] - } - - container.swap_paths(1, 3) - path_dict[1], path_dict[3] = path_dict[3], path_dict[1] - assert container.paths == path_dict - - assert container.crossings == { - 0: [Crossing(3, [5]), Crossing(2, [9])], - 1: [Crossing(3, [6]), Crossing(2, [10])], - 2: [Crossing(0, [9]), Crossing(1, [10])], - 3: [Crossing(0, [5]), Crossing(1, [6])] - } - - -def test_path_container_clear(): - container = PathContainer() - assert container.try_add_path([4, 5, 6, 7]) - assert container.try_add_path([1, 5, 9, 13]) - assert container.try_add_path([8, 9, 10, 11, 15]) - - assert container.paths - assert container.crossings - - container.clear() - assert not container.paths - assert not container.crossings - - -def test_path_container_max_order(): - container = PathContainer() - assert container.max_crossing_order() == 0 - - assert container.try_add_path([4, 5, 6, 7]) - assert container.max_crossing_order() == 0 - - assert container.try_add_path([1, 5, 9, 13]) - assert container.max_crossing_order() == 1 - - -def test_path_container_discard_paths(): - Crossing = PathContainer._Crossing - container = PathContainer() - path_dict = {0: [4, 5, 6, 7], 1: [1, 5, 9, 13], 2: [8, 9, 10, 11, 15]} - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - - container.remove_crossing_of_order_higher_than(1) - assert container.max_crossing_order() == 1 - assert container.paths == path_dict - assert container.crossings == { - 0: [Crossing(1, [5])], - 1: [Crossing(0, [5]), Crossing(2, [9])], - 2: [Crossing(1, [9])] - } - - container.remove_crossing_of_order_higher_than(0) - del path_dict[1] - assert container.max_crossing_order() == 0 - assert container.paths == path_dict - assert container.crossings == {0: [], 2: []} - - -def test_path_container_get_path_data(): - container = PathContainer() - path_dict = {0: [4, 5, 6, 7], 1: [1, 5, 9, 13], 2: [8, 9, 10, 11, 15]} - for _, path in path_dict.items(): - assert container.try_add_path(path) - - assert container.get_all_paths() == [[4, 5, 6, 7], [1, 5, 9, 13], - [8, 9, 10, 11, 15]] - assert container.get_all_nodes() == set( - [1, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15]) - - -def test_path_container_find_first_order_intersections(): - Crossing = PathContainer._Crossing - container = PathContainer() - - path_dict = {0: [0, 1, 2, 10, 11, 12], 1: [3, 1, 4], 2: [5, 6, 7]} - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - - assert container.crossings == { - 0: [Crossing(1, [1])], - 1: [Crossing(0, [1])], - 2: [] - } - assert _find_first_order_intersections(container.crossings, - container._split_paths()) == { - 1: {1} - } - - container.remove_path_by_id(0) - del path_dict[0] - path_dict[3] = [0, 1, 2, 10] - assert container.try_add_path(path_dict[3]) - assert container.paths == path_dict - assert _find_first_order_intersections(container.crossings, - container._split_paths()) == { - 1: {1, 3}, - } - - path_dict[4] = [11, 6, 12, 14, 15, 16] - assert container.try_add_path(path_dict[4]) - assert container.paths == path_dict - assert _find_first_order_intersections(container.crossings, - container._split_paths()) == { - 1: {1, 3}, - 6: {2} - } - - path_dict[5] = [21, 1, 22, 24, 25, 26] - assert container.try_add_path(path_dict[5]) - assert container.paths == path_dict - assert _find_first_order_intersections(container.crossings, - container._split_paths()) == { - 1: {1, 3}, - 6: {2} - } - - path_dict[6] = [30, 15, 32, 33, 34, 35] - assert container.try_add_path(path_dict[6]) - assert container.paths == path_dict - assert _find_first_order_intersections(container.crossings, - container._split_paths()) == { - 1: {1, 3}, - 6: {2} - # The 15 node should not appear - } - - -def test_path_container_no_intersection(): - container = PathContainer() - path_dict = {0: [0, 1, 2, 3], 1: [4, 5, 6], 2: [7, 8, 9, 10, 11]} - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - assert container.generate_swaps() == [(0, 1), (3, 2), (6, 5), (7, 8), - (11, 10), (10, 9)] - assert container.paths == path_dict - - -def test_path_container_1_intersection_single_intersection(): - container = PathContainer() - - # 3 - # | - # 0 - 1 - 2 - # | 10 - 11 - 12 - # 4 - # NB: intersection at node 1 - path_dict = {0: [0, 1, 2], 1: [3, 1, 4], 2: [10, 11, 12]} - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - assert container.generate_swaps() == [(2, 1), (12, 11)] - # Make sure that path 1 gets deleted or we risk running an infinite loop - del path_dict[1] - assert container.paths == path_dict - - # 4 - # | - # 0 - 1 - 2 - 3 - # | 10 - 11 - 12 - # 5 - # NB: intersection at node 1 - container.clear() - path_dict = {0: [0, 1, 2, 3], 1: [4, 1, 5], 2: [10, 11, 12]} - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - assert container.generate_swaps() == [(0, 1), (1, 2), (5, 1), (12, 11)] - - # 4 - # | - # 0 - 1 - 2 - 3 - # | 10 - 11 - 12 - # 5 - # NB: intersection at node 1 - container.clear() - path_dict = {0: [4, 1, 5], 1: [0, 1, 2, 3], 2: [10, 11, 12]} - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - assert container.generate_swaps() == [(0, 1), (1, 2), (5, 1), (12, 11)] - - # 4 - # | - # 0 - 1 - 2 - 3 - # | 10 - 11 - 12 - # 5 - # NB: intersection at node 2 - container.clear() - path_dict = {0: [0, 1, 2, 3], 1: [4, 2, 5], 2: [10, 11, 12]} - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - assert container.generate_swaps() == [(3, 2), (2, 1), (5, 2), (12, 11)] - - # 9 - # | - # 0 - 1 - 2 - 3 - 4 - 5 - # | - # 10 6 - 7 - 8 - # | - # 11 - # NB: intersection at node 1 - container.clear() - path_dict = {0: [9, 1, 10, 11], 1: [0, 1, 2, 3, 4, 5], 2: [6, 7, 8]} - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - - container.generate_swaps() - path_dict[0], path_dict[1] = path_dict[1], path_dict[0] - assert container.paths == path_dict - - -def test_path_container_1_intersection_double_crossing(): - container = PathContainer() - - # 6 7 - # | | - # 0 - 1 - 2 - 3 - 4 - 5 - # | | - # 8 9 - # | - # 10 - # | - # 11 - # | - # 12 - # NB: intersection at node 2 - container.clear() - path_dict = {0: [0, 1, 2, 3, 4, 5], 1: [6, 2, 8], 2: [7, 4, 9, 10, 11, 12]} - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - assert container.generate_swaps() == [(5, 4), (4, 3), (3, 2), (2, 1), (8, - 2), - (7, 4), (4, 9), (12, 11), (11, 10)] - - # 6 7 - # | | - # 0 - 1 - 2 - 3 - 4 - 5 - # | | - # 8 9 - # | - # 10 - # | - # 11 - # | - # 12 - # NB: intersection at node 3 - container.clear() - path_dict = {0: [0, 1, 2, 3, 4, 5], 1: [7, 3, 9], 2: [6, 1, 8, 10, 11, 12]} - - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - assert container.generate_swaps() == [(0, 1), (1, 2), (2, 3), (3, 4), (9, - 3), - (6, 1), (1, 8), (12, 11), (11, 10)] - - # 4 5 4 5 - # | | | | - # 0 - 1 - 2 - 3 -> 0 - 1 - 2 - 3 or 0 - 1 - 2 - 3 - # | | | | - # 6 7 6 7 - # NB: intersection at nodes 1 & 2 - container.clear() - path_dict = { - 0: [0, 1, 2, 3], - 1: [4, 1, 6], - 2: [5, 2, 7], - } - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - swaps = container.generate_swaps() - assert swaps == [(0, 1), (1, 2), (6, 1)] \ - or swaps == [(3, 2), (2, 1), (7, 2)] - assert container.paths[0] == path_dict[0] - assert (1 in container.paths and container.paths[1] == path_dict[1]) \ - + (2 in container.paths and container.paths[2] == path_dict[2]) == 1 - - # 5 6 6 - # | | | - # 0 - 1 - 2 - 3 - 4 -> 0 - 1 - 2 - 3 - 4 - # | | | - # 7 8 8 - # NB: intersection at nodes 1 & 3 - container.clear() - path_dict = { - 0: [0, 1, 2, 3, 4], - 1: [5, 1, 7], - 2: [6, 3, 8], - } - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - swaps = container.generate_swaps() - assert container.generate_swaps() == [(0, 1), (4, 3), (3, 2), (8, 3)] - del path_dict[1] - assert container.paths == path_dict - - # 5 - # | - # 6 7 - # | | - # 0 - 1 - 2 - 3 - 4 - # | | - # 8 9 - # NB: intersection at nodes 1 & 3 - container.clear() - path_dict = { - 0: [0, 1, 2, 3, 4], - 1: [5, 6, 1, 8], - 2: [7, 2, 9], - } - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - assert container.generate_swaps() == [(0, 1), (1, 2), (2, 3), (5, 6), (8, - 1), - (9, 2)] - assert container.paths == path_dict - - -def test_path_container_1_intersection_triple_crossing(): - container = PathContainer() - - # 9 13 - 14 - 15 - # | / - # 0 - 1 - 2 - 3 - 4 - 5 - # / | - # 12 10 6 - 7 - 8 - # | - # 11 - # NB: intersection at node 1 - container.clear() - path_dict = { - 0: [9, 1, 10, 11], - 1: [0, 1, 2, 3, 4, 5], - 2: [6, 7, 8], - 3: [12, 1, 13, 14, 15, 16] - } - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - - container.generate_swaps() - path_dict[1], path_dict[3], path_dict[0] \ - = path_dict[0], path_dict[1], path_dict[3] - assert container.paths == path_dict - - # 6 7 8 6 8 - # | | | | | - # 0 - 1 - 2 - 3 - 4 - 5 -> 0 - 1 - 2 - 3 - 4 - 5 - # | | | | | - # 9 10 11 8 10 - # | | | | - # 12 13 12 13 - # | | | | - # 14 15 14 15 - # | | | | - # 16 17 16 17 - # NB: intersection at node 3 - container.clear() - path_dict = { - 0: [0, 1, 2, 3, 4, 5], - 1: [6, 1, 9, 12, 14, 16], - 2: [7, 3, 10], - 3: [8, 4, 11, 13, 15, 17] - } - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - assert container.generate_swaps() == [(0, 1), (1, 2), (5, 4), (4, 3), (6, - 1), - (1, 9), (16, 14), (14, 12), (8, 4), - (4, 11), (17, 15), (15, 13)] - del path_dict[2] - assert container.paths == path_dict - - # 4 5 10 - 11 - 12 4 10 - 11 - 12 - # | / | - # 0 - 1 - 2 - 3 -> 0 - 1 - 2 - 3 - # / | | - # 6 7 7 - # NB: intersection at node 1 - container.clear() - path_dict = {0: [0, 1, 2, 3], 1: [4, 1, 7], 2: [10, 11, 12], 3: [5, 1, 6]} - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - assert container.generate_swaps() == [(0, 1), (1, 2), (7, 1), (12, 11)] - - -@pytest.mark.xfail -def test_path_container_1_intersection_triple_crossing_complex(): - container = PathContainer() - # 4 - # | - # 0 - 1 - 2 - 3 - # | - # 5 - 6 - 7 - # | - # 8 - # NB: intersection at nodes 1 & 3 - container.clear() - path_dict = { - 0: [0, 1, 2, 3], - 1: [4, 1, 6, 8], - 2: [5, 6, 7], - } - for _, path in path_dict.items(): - assert container.try_add_path(path) - assert container.paths == path_dict - - # Ideally this situation should be solved without deleting any paths - assert container.generate_swaps() == [(0, 1), (1, 2), (8, 6), (6, 1), (7, - 6)] - path_dict[1], path_dict[2] = path_dict[2], path_dict[1] - assert container.paths == path_dict diff --git a/projectq/cengines/_graph_path_container.py b/projectq/cengines/_graph_path_manager.py similarity index 55% rename from projectq/cengines/_graph_path_container.py rename to projectq/cengines/_graph_path_manager.py index 1683b594c..e31a1973f 100644 --- a/projectq/cengines/_graph_path_container.py +++ b/projectq/cengines/_graph_path_manager.py @@ -14,10 +14,11 @@ """ This is a helper module for the _graphmapper.GraphMapper class. -Its main goal is to store possible paths through the graph and then generate a -list of swap operations to perform as many paths as possible, by either solving -conflicts (ie. crossing points and intersections; see definitions below) or -discarding paths. +Its main goal is to provide classes and functions to manage paths through an +arbitrary graph and eventually generate a list of swap operations to perform as +many paths as possible, by either solving conflicts (ie. crossing points and +intersections; see definitions below) or discarding paths. + Note that when generating a list of swap operations for a particular path, the path is usually splitted into two halves in order to maximize the number of swap operations that can be performed simultaneously. @@ -45,6 +46,7 @@ """ import itertools +import networkx as nx # ============================================================================== @@ -161,7 +163,9 @@ def _return_swaps(split_paths): Returns: A list of swap operations (2-tuples) """ swap_operations = [] - for path in split_paths.values(): + + for path_id in sorted(split_paths): + path = split_paths[path_id] swap_operations.append([]) # Add swaps operations for first half of the path for prev, cur in zip(path[0], path[0][1:]): @@ -177,46 +181,144 @@ def _return_swaps(split_paths): # ============================================================================== -class PathContainer: +class PathCacheExhaustive(): + """ + Class acting as cache for optimal paths through the graph. """ - Container for paths through a graph. - Allows the resolution of conflict points such as crossings and - intersections. + def __init__(self, path_length_threshold): + self._path_length_threshold = path_length_threshold + self._cache = {} + self.key_type = frozenset - Attributes: - paths (dict) : list of paths currently held by a path container indexed - by a unique ID - crossings (dict) : dictionary of crossing points indexed by path ID - """ + def __str__(self): + ret = "" + for (node0, node1), path in self._cache.items(): + ret += "{}: {}\n".format(sorted([node0, node1]), path) + return ret + + def empty_cache(self): + """Empty the cache.""" + self._cache = {} + + def get_path(self, start, end): + """ + Return a path from the cache. + + Args: + start (object): Start node for the path + end (object): End node for the path + + Returns: Optimal path stored in cache + + Raises: KeyError if path is not present in the cache + """ + return self._cache[self.key_type((start, end))] + + def has_path(self, start, end): + """ + Test whether a path connecting start to end is present in the cache. + + Args: + start (object): Start node for the path + end (object): End node for the path + + Returns: True/False + """ + return self.key_type((start, end)) in self._cache + + def add_path(self, path): + """ + Add a path to the cache. + + This method also recursively adds all the subpaths that are at least + self._path_length_threshold long to the cache. + + Args: + path (list): Path to store inside the cache + """ + length = len(path) + for start in range(length - self._path_length_threshold + 1): + node0 = path[start] + for incr in range(length - start - 1, + self._path_length_threshold - 2, -1): + end = start + incr + self._cache[self.key_type((node0, + path[end]))] = path[start:end + 1] + + +# ============================================================================== + + +class _Crossing: + __slots__ = ['path_id', 'overlap'] - class _Crossing: - __slots__ = ['path_id', 'overlap'] + def __init__(self, path_id, overlap): + self.path_id, self.overlap = path_id, overlap - def __init__(self, path_id, overlap): - self.path_id, self.overlap = path_id, overlap + def __eq__(self, other): + if isinstance(other, self.__class__): + return (self.path_id, self.overlap) == (other.path_id, + other.overlap) + if isinstance(other, list): + return self.overlap == other + if isinstance(other, int): + return self.overlap[0] == other + raise NotImplementedError("Invalid comparison") - def __eq__(self, other): - if isinstance(other, self.__class__): - return (self.path_id, self.overlap) == (other.path_id, - other.overlap) - if isinstance(other, list): - return self.overlap == other - if isinstance(other, int): - return self.overlap[0] == other - raise NotImplementedError("Invalid comparison") + def __str__(self): + return '{} {}'.format(self.path_id, self.overlap) - def __str__(self): - return '{} {}'.format(self.path_id, self.overlap) + def __repr__(self): + return 'Crossing({}, {})'.format(self.path_id, self.overlap) - def __repr__(self): - return 'Crossing({}, {})'.format(self.path_id, self.overlap) - def __init__(self): +class PathManager: + """ + Class managing interactions between distant qubits on an arbitrary graph. + + This class essentially manages paths through an arbitrary graph, handling + possible intersections between multiple paths through an arbitrary graph by + resolving conflict points such as crossings and intersections. + + Attributes: + crossings (dict) : dictionary of crossing points indexed by path ID + cache (PathCacheExhaustive) : cache manager + enable_caching (bool): indicates whether caching is enabled or not + graph (networkx.Graph): Arbitrary connected graph + paths (dict) : list of paths currently held by a path container indexed + by a unique ID + paths_stats (dict) : dictionary for storing statistics indexed by + interactions (frozenset of pairs of qubits) + """ + + def __init__(self, graph, enable_caching=True): + """ + Args: + graph (networkx.Graph): an arbitrary connected graph + enable_caching (bool): Controls whether path caching is enabled + """ + # Make sure that we start with a valid graph + if not nx.is_connected(graph): + raise RuntimeError("Input graph must be a connected graph") + elif not all([isinstance(n, int) for n in graph]): + raise RuntimeError( + "All nodes inside the graph needs to be integers") + else: + self.graph = graph + self.paths = {} self.crossings = {} self._path_id = 0 + self.enable_caching = enable_caching + # Path cache support + path_length_threshold = 3 + self.cache = PathCacheExhaustive(path_length_threshold) + + # Statistics + self.paths_stats = dict() + ################################################################# # Methods querying information about the state of the container # ################################################################# @@ -228,7 +330,11 @@ def get_all_nodes(self): Returns: A set of nodes that are part of at least one path. """ - return set(itertools.chain.from_iterable(self.paths.values())) + all_nodes = [] + for row in self.paths.values(): + all_nodes.extend(row[0]) + all_nodes.extend(row[1]) + return set(all_nodes) def get_all_paths(self): """ @@ -237,7 +343,9 @@ def get_all_paths(self): Returns: A list of paths (list of list of ints) """ - return [v for _, v in self.paths.items()] + return [ + self.paths[k][0] + self.paths[k][1] for k in sorted(self.paths) + ] def has_interaction(self, node0, node1): """ @@ -252,16 +360,17 @@ def has_interaction(self, node0, node1): node0 to node1 """ for path in self.paths.values(): - if frozenset((node0, node1)) == frozenset((path[0], path[-1])): + if frozenset((node0, node1)) == frozenset((path[0][0], + path[1][-1])): return True return False def max_crossing_order(self): """ - Return the order of the highest order intersection. + Return the order of the largest crossing. - The intersection order is given by the number of paths that consider a - particular crossing point as an intersection + The order of a crossing is defined as the number of paths that + intersect Returns: An int @@ -274,17 +383,95 @@ def max_crossing_order(self): return max(crossing_orders) return 0 - ################################################## - # Methods modifying the content of the container # - ################################################## + ###################################################### + # Methods for resetting the content of the container # + ###################################################### + + def clear_paths(self): + """ + Reset the list of paths managed by this instance. + + Note: + Does not reset path statistics or the state of the cache. + """ + self.paths.clear() + self.crossings.clear() def clear(self): """ - Reset the state of the container. + Completely reset the state of this instance. + + Note: + Both path statistics and cache are also reset """ - self.paths = {} - self.crossings = {} - self._path_id = 0 + self.clear_paths() + self.paths_stats.clear() + self.cache.empty_cache() + + ############################################################# + # Entry point for the mapper to extract the final path list # + ############################################################# + + def generate_swaps(self): + """ + Generate a list of swaps to execute as many paths as possible. + + Returns: + A list of swap operations (tuples) + """ + + self._solve_first_order_intersections( + _find_first_order_intersections(self.crossings, self.paths)) + + # By this point, we should have solved all intersections + return list(itertools.chain.from_iterable(_return_swaps(self.paths))) + + ############################################# + # Methods for adding paths to the container # + ############################################# + + def push_interaction(self, node0, node1): + """ + Plan an interaction between two qubit. + + Args: + node0 (int) : backend id of the first qubit + node1 (int) : backend id of the second qubit + + Returns: + True if the path could be added to the container, False otherwise + """ + + # TODO: think about merging paths + # TODO: maybe apply gates in the middle of the swaps + + interaction = frozenset((node0, node1)) + if self.has_interaction(node0, node1): + self.paths_stats[interaction] += 1 + return True + + if not self.graph.has_edge(node0, node1): + new_path = self._calculate_path(node0, node1) + else: + new_path = None + + if new_path: + if not self.try_add_path(new_path) \ + and not self._try_alternative_paths(node0, node1): + return False + else: + # Prevent adding a new path if it contains some already interacting + # qubits + for path in self.paths.values(): + if path[0][0] in (node0, node1) or path[1][-1] in (node0, + node1): + return False + + if interaction not in self.paths_stats: + self.paths_stats[interaction] = 1 + else: + self.paths_stats[interaction] += 1 + return True def try_add_path(self, new_path): """ @@ -296,30 +483,78 @@ def try_add_path(self, new_path): Returns: True if the path could be added to the container, False otherwise """ - # Prevent adding a path to the container if the start or the end - # qubit is already interacting with another one - # Also make sure the new path does not contain interacting qubits + # Prevent adding a new path if it contains some already interacting + # qubits for path in self.paths.values(): - if path[0] in new_path or path[-1] in new_path: + if path[0][0] in new_path or path[1][-1] in new_path: return False + # Make sure each node appears only once + if len(new_path) != len(set(new_path)): + return False + + idx = len(new_path) >> 1 + new_subpath0, new_subpath1 = new_path[:idx], new_path[idx:] + new_intersections = {} new_crossings = [] - for idx, path in self.paths.items(): - path_overlap = [node for node in new_path if node in path] + for idx, (subpath0, subpath1) in self.paths.items(): + path_overlap = [ + node for node in new_path + if node in subpath0 or node in subpath1 + ] if len(path_overlap) > 1: return False if len(path_overlap) == 1: - new_crossings.append( - PathContainer._Crossing(idx, path_overlap)) - - self.paths[self._path_id] = new_path + new_crossings.append(_Crossing(idx, path_overlap)) + + # Is this crossing point an intersection for the new path? + if new_subpath0[-1] in path_overlap \ + or new_subpath1[0] in path_overlap: + if path_overlap[0] not in new_intersections: + new_intersections[path_overlap[0]] = set( + (self._path_id, )) + else: + new_intersections[path_overlap[0]].add(self._path_id) + + # Is this crossing point an intersection for the other path? + subpath0, subpath1 = self.paths[idx] + if subpath0[-1] in path_overlap \ + or subpath1[0] in path_overlap: + if path_overlap[0] not in new_intersections: + new_intersections[path_overlap[0]] = set((idx, )) + else: + new_intersections[path_overlap[0]].add(idx) + + self.paths[self._path_id] = (new_subpath0, new_subpath1) self.crossings[self._path_id] = new_crossings for crossing in new_crossings: - self.crossings[crossing.path_id].append( - PathContainer._Crossing(self._path_id, crossing.overlap)) + path_id = crossing.path_id + self.crossings[path_id].append( + _Crossing(self._path_id, crossing.overlap)) + + # Remove the entries where only the new path is present, as the + # solution in those cases is to execute the new path after the other + # paths, which is going to happen anyway as the new path is appended to + # the list of paths + new_intersections = { + node: path_ids + for node, path_ids in new_intersections.items() + if len(path_ids) > 1 or self._path_id not in path_ids + } + + if new_intersections: + self._solve_first_order_intersections(new_intersections) + + if self._path_id not in self.paths: + return False + self._path_id += 1 return True + ############################################# + # Methods for adding paths to the container # + ############################################# + def remove_path_by_id(self, path_id): """ Remove a path from the path container given its ID. @@ -386,45 +621,11 @@ def swap_paths(self, path_id1, path_id2): self.paths[path_id2], self.paths[path_id1] = self.paths[ path_id1], self.paths[path_id2] - def generate_swaps(self): - """ - Generate a list of swaps to execute as many paths as possible. + ########################## + # Private helper methods # + ########################## - Returns: - A list of swap operations (tuples) - """ - # TODO: think about merging paths - # TODO: maybe apply gates in the middle of the swaps - - max_crossing_order = self.max_crossing_order() - - split_paths = self._split_paths() - - if max_crossing_order > 0: - # Some paths have first order crossing points (ie. at most one - # point is common). Try to re-arrange the path splitting to remove - # the intersection points - self._solve_first_order_intersections(split_paths) - - # By this point, we should have solved all intersections - - return list(itertools.chain.from_iterable(_return_swaps(split_paths))) - - def _split_paths(self): - """ - Split all paths into pairs of equal or almost equal length sub-paths. - - Returns: - Dictionary indexed by path ID containing 2-tuples with each path - halves - """ - split_paths = {} - for path_id, path in self.paths.items(): - idx = len(path) >> 1 - split_paths[path_id] = (path[:idx], path[idx:]) - return split_paths - - def _solve_first_order_intersections(self, split_paths): + def _solve_first_order_intersections(self, intersections): """ Solve all first order intersections. @@ -437,20 +638,17 @@ def _solve_first_order_intersections(self, split_paths): self.max_crossing_order() == 1 Args: - split_paths (dict): Dictionary indexed by path ID containing - 2-tuples of path halvesx + intersections (dict): TODO """ - # Get all the intersections - intersections = _find_first_order_intersections( - self.crossings, split_paths) # Get a list of the intersection nodes sorted by intersection order and # total number of points of all paths for that particular intersection def intersection_sort(crossing): order = len(crossing[0]) - number_of_points = sum( - [len(self.paths[path_id]) - for path_id in crossing[0]]) - order + 1 + number_of_points = sum([ + len(self.paths[path_id][0]) + len(self.paths[path_id][1]) + for path_id in crossing[0] + ]) - order + 1 return (order, number_of_points) intersection_node_list = [ @@ -465,10 +663,10 @@ def intersection_sort(crossing): node_is_not_crossing = { path_id: ([ node not in self.crossings[path_id] - for node in split_paths[path_id][0] + for node in self.paths[path_id][0] ], [ node not in self.crossings[path_id] - for node in split_paths[path_id][1] + for node in self.paths[path_id][1] ]) for path_id in intersections[intersection_node] } @@ -484,8 +682,6 @@ def intersection_sort(crossing): other_path_id = crossing.path_id if path_id < other_path_id: self.swap_paths(path_id, other_path_id) - split_paths[0], split_paths[1] = split_paths[ - 1], split_paths[0] del intersections[intersection_node] del intersection_node_list[-1] else: @@ -494,7 +690,7 @@ def intersection_sort(crossing): path_id_list = [ x for _, x in sorted( zip([ - len(self.paths[i]) + len(self.paths[i][0]) + len(self.paths[i][1]) for i in intersections[intersection_node] ], intersections[intersection_node])) ] @@ -505,32 +701,74 @@ def intersection_sort(crossing): solved = _try_solve_intersection( intersection_node, - *(split_paths[path_id1] + node_is_not_crossing[path_id1])) + *(self.paths[path_id1] + node_is_not_crossing[path_id1])) if not solved: solved = _try_solve_intersection( intersection_node, - *(split_paths[path_id2] + + *(self.paths[path_id2] + node_is_not_crossing[path_id2])) if not solved: # Last resort: delete one path path_id_min, path_id_max = sorted([path_id1, path_id2]) - del split_paths[path_id_max] del node_is_not_crossing[path_id_max] self.remove_path_by_id(path_id_max) node_is_not_crossing[path_id_min] = ([ node not in self.crossings[path_id_min] - for node in split_paths[path_id_min][0] + for node in self.paths[path_id_min][0] ], [ node not in self.crossings[path_id_min] - for node in split_paths[path_id_min][1] + for node in self.paths[path_id_min][1] ]) intersections = _find_first_order_intersections( - self.crossings, split_paths) + self.crossings, self.paths) intersection_node_list = [ x for _, x in sorted( zip(intersections.values(), intersections.keys()), key=intersection_sort) ] + + def _calculate_path(self, node0, node1): + """ + Calculate a path between two nodes on the graph. + + Args: + node0 (int) : backend id of the first qubit + node1 (int) : backend id of the second qubit + """ + + if self.enable_caching: + try: + path = self.cache.get_path(node0, node1) + except KeyError: + path = nx.shortest_path(self.graph, source=node0, target=node1) + self.cache.add_path(path) + else: + path = nx.shortest_path(self.graph, source=node0, target=node1) + + return path + + def _try_alternative_paths(self, node0, node1): + """ + Attempt to find some alternative paths + """ + for neighbour in self.graph[node0]: + new_path = self._calculate_path(neighbour, node1) + if new_path[-1] == neighbour: + new_path = new_path + [node0] + else: + new_path = [node0] + new_path + if self.try_add_path(new_path): + return True + for neighbour in self.graph[node1]: + new_path = self._calculate_path(node0, neighbour) + if new_path[-1] == neighbour: + new_path = new_path + [node1] + else: + new_path = [node1] + new_path + if self.try_add_path(new_path): + return True + + return False diff --git a/projectq/cengines/_graph_path_manager_test.py b/projectq/cengines/_graph_path_manager_test.py new file mode 100644 index 000000000..e9b9dbbbd --- /dev/null +++ b/projectq/cengines/_graph_path_manager_test.py @@ -0,0 +1,941 @@ +# Copyright 2019 ProjectQ-Framework (www.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for projectq.cengines._graph_path_manager.py.""" + +from copy import deepcopy +import itertools +import networkx as nx +import pytest +from projectq.cengines._graph_path_manager import PathManager, \ + PathCacheExhaustive, _find_first_order_intersections, _Crossing + +# ============================================================================== + + +def generate_grid_graph(nrows, ncols): + graph = nx.Graph() + graph.add_nodes_from(range(nrows * ncols)) + + for row in range(nrows): + for col in range(ncols): + node0 = col + ncols * row + + is_middle = ((0 < row < nrows - 1) and (0 < col < ncols - 1)) + add_horizontal = is_middle or (row in (0, nrows - 1) and + (0 < col < ncols - 1)) + add_vertical = is_middle or (col in (0, ncols - 1) and + (0 < row < nrows - 1)) + + if add_horizontal: + graph.add_edge(node0, node0 - 1) + graph.add_edge(node0, node0 + 1) + if add_vertical: + graph.add_edge(node0, node0 - ncols) + graph.add_edge(node0, node0 + ncols) + + return graph + + +@pytest.fixture(scope="module") +def simple_graph(): + # 2 4 + # / \ / | + # 0 - 1 3 | + # \ / \ | + # 5 6 + graph = nx.Graph() + graph.add_nodes_from(range(7)) + graph.add_edges_from([(0, 1), (1, 2), (1, 5), (2, 3), (5, 3), (3, 4), (3, + 6), + (4, 6)]) + return graph + + +@pytest.fixture +def grid44_manager(): + return PathManager(graph=generate_grid_graph(4, 4), enable_caching=False) + + +# ============================================================================== + + +def test_path_cache_exhaustive(): + path_length_threshold = 3 + cache = PathCacheExhaustive(path_length_threshold) + + assert not cache._cache + cache.add_path(['a', 'b', 'c']) + assert cache._cache == {cache.key_type(('a', 'c')): ['a', 'b', 'c']} + + assert cache.has_path('a', 'c') + assert not cache.has_path('a', 'b') + assert not cache.has_path('b', 'c') + + cache.empty_cache() + assert not cache._cache + + cache.add_path(['a', 'b', 'c', 'd']) + assert cache._cache == { + cache.key_type(('a', 'c')): ['a', 'b', 'c'], + cache.key_type(('a', 'd')): ['a', 'b', 'c', 'd'], + cache.key_type(('b', 'd')): ['b', 'c', 'd'] + } + assert cache.get_path('a', 'd') == ['a', 'b', 'c', 'd'] + assert cache.has_path('a', 'd') + assert cache.has_path('d', 'a') + assert cache.has_path('a', 'c') + assert cache.has_path('b', 'd') + assert not cache.has_path('a', 'b') + assert not cache.has_path('b', 'a') + assert not cache.has_path('b', 'c') + assert not cache.has_path('c', 'd') + + str_repr = str(cache) + assert str_repr.count("['a', 'd']: ['a', 'b', 'c', 'd']") == 1 + assert str_repr.count("['a', 'c']: ['a', 'b', 'c']") == 1 + assert str_repr.count("['b', 'd']: ['b', 'c', 'd']") == 1 + + +# ============================================================================== + + +def test_path_container_crossing_class(): + Crossing = _Crossing + crossing_list = [Crossing(0, [1]), Crossing(1, [1]), Crossing(2, [2])] + + assert Crossing(0, [1]) == Crossing(0, [1]) + assert Crossing(0, [1]) != Crossing(1, [1]) + assert Crossing(0, [1]) != Crossing(0, [0, 1]) + assert Crossing(0, [0]) != Crossing(1, [0, 1]) + + assert [0, 1] == Crossing(0, [0, 1]) + assert [0, 1] == Crossing(1, [0, 1]) + assert [0, 1] != Crossing(0, [0]) + assert [0, 1] != Crossing(1, [0]) + + assert Crossing(0, [1]) in crossing_list + assert [0] not in crossing_list + assert [1] in crossing_list + + assert str(Crossing(0, [1])) == "{} {}".format(0, [1]) + assert repr(Crossing(0, [1])) == "Crossing({}, {})".format(0, [1]) + + with pytest.raises(NotImplementedError): + assert "" == Crossing(0, [1]) + + +# ============================================================================== + + +def test_valid_and_invalid_graphs(simple_graph): + graph = nx.Graph() + graph.add_nodes_from('abcd') + with pytest.raises(RuntimeError): + PathManager(graph=graph) + + graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'a')]) + with pytest.raises(RuntimeError): + PathManager(graph=graph) + + graph = deepcopy(simple_graph) + graph.remove_edge(0, 1) + with pytest.raises(RuntimeError): + PathManager(graph=graph) + + +def test_path_container_has_interaction(grid44_manager): + path_dict = { + 0: ([4, 5], [6, 7]), + 1: ([1, 5], [9, 13]), + 2: ([8, 9], [10, 11, 15]) + } + grid44_manager.paths = path_dict + + assert grid44_manager.has_interaction(4, 7) + assert grid44_manager.has_interaction(7, 4) + assert grid44_manager.has_interaction(8, 15) + assert grid44_manager.has_interaction(15, 8) + assert not grid44_manager.has_interaction(4, 5) + assert not grid44_manager.has_interaction(4, 6) + assert not grid44_manager.has_interaction(4, 8) + assert not grid44_manager.has_interaction(4, 9) + assert not grid44_manager.has_interaction(9, 4) + assert not grid44_manager.has_interaction(1, 5) + assert not grid44_manager.has_interaction(1, 9) + assert not grid44_manager.has_interaction(8, 9) + assert not grid44_manager.has_interaction(8, 10) + assert not grid44_manager.has_interaction(8, 11) + + +def test_path_container_get_all_nodes(grid44_manager): + path_dict = { + 0: ([4, 5], [6, 7]), + 1: ([1, 5], [9, 13]), + 2: ([8, 9], [10, 11, 15]) + } + grid44_manager.paths = path_dict + + assert grid44_manager.get_all_nodes() == set((1, 4, 5, 6, 7, 8, 9, 10, 11, + 13, 15)) + + +def test_path_container_get_all_paths(grid44_manager): + path_dict = { + 0: ([4, 5], [6, 7]), + 1: ([1, 5], [9, 13]), + 2: ([8, 9], [10, 11, 15]) + } + grid44_manager.paths = path_dict + + assert grid44_manager.get_all_paths() == [[4, 5, 6, 7], [1, 5, 9, 13], + [8, 9, 10, 11, 15]] + + +def test_path_container_max_order(grid44_manager): + assert grid44_manager.max_crossing_order() == 0 + + assert grid44_manager.try_add_path([4, 5, 6, 7]) + assert grid44_manager.max_crossing_order() == 0 + + assert grid44_manager.try_add_path([1, 5, 9, 13]) + assert grid44_manager.max_crossing_order() == 1 + + +def test_path_container_clear(grid44_manager): + grid44_manager.paths = { + 0: ([4, 5], [6, 7]), + 1: ([1, 5], [9, 13]), + 2: ([8, 9], [10, 11, 15]) + } + grid44_manager.crossings = {0: None, 1: None, 2: None} # dummy values + grid44_manager.paths_stats = {0: 0, 1: 1, 2: 2} # dummy values + + grid44_manager.clear_paths() + assert not grid44_manager.paths + assert not grid44_manager.crossings + assert grid44_manager.paths_stats + + grid44_manager.paths = { + 0: ([4, 5], [6, 7]), + 1: ([1, 5], [9, 13]), + 2: ([8, 9], [10, 11, 15]) + } + grid44_manager.crossings = {0: None, 1: None, 2: None} # dummy values + grid44_manager.paths_stats = {0: 0, 1: 1, 2: 2} # dummy values + + grid44_manager.clear() + assert not grid44_manager.paths + assert not grid44_manager.crossings + assert not grid44_manager.paths_stats + + +def test_path_container_add_path(grid44_manager): + Crossing = _Crossing + + assert grid44_manager.try_add_path([4, 5, 6, 7]) + assert grid44_manager.get_all_paths() == [[4, 5, 6, 7]] + assert grid44_manager.crossings == {0: []} + + assert not grid44_manager.try_add_path([4, 8, 12]) + assert not grid44_manager.try_add_path([0, 1, 2, 3, 7]) + assert not grid44_manager.try_add_path([1, 5, 6, 10]) + assert grid44_manager.get_all_paths() == [[4, 5, 6, 7]] + assert grid44_manager.crossings == {0: []} + + assert grid44_manager.try_add_path([1, 5, 9, 13]) + assert [4, 5, 6, 7] in grid44_manager.get_all_paths() + assert [1, 5, 9, 13] in grid44_manager.get_all_paths() + assert grid44_manager.crossings == { + 0: [Crossing(1, [5])], + 1: [Crossing(0, [5])] + } + + assert grid44_manager.try_add_path([10, 6, 9, 14, 15]) + assert [4, 5, 6, 7] in grid44_manager.get_all_paths() + assert [4, 5, 6, 7] in grid44_manager.get_all_paths() + assert [10, 6, 9, 14, 15] in grid44_manager.get_all_paths() + + crossings_overlap = [ + sorted([c.overlap[0] for c in crossing_list]) + for crossing_list in grid44_manager.crossings.values() + ] + + assert [6, 9] in crossings_overlap + assert [5, 9] in crossings_overlap + assert [5, 6] in crossings_overlap + + +def test_path_container_push_interaction(grid44_manager): + assert grid44_manager.push_interaction(4, 7) + assert grid44_manager.push_interaction(4, 7) + assert grid44_manager.get_all_paths() == [[4, 5, 6, 7]] + assert grid44_manager.crossings == {0: []} + + assert grid44_manager.push_interaction(14, 15) + assert grid44_manager.get_all_paths() == [[4, 5, 6, 7]] + assert grid44_manager.crossings == {0: []} + + assert not grid44_manager.push_interaction(0, 4) + + +@pytest.mark.parametrize("enable_caching", [False, True]) +def test_path_container_push_interaction_alternative(grid44_manager, + enable_caching): + grid44_manager.enable_caching = enable_caching + interaction_list = [ + [(4, 7), (0, 12), False], + [(4, 7), (12, 0), True], + [(7, 4), (0, 12), False], + [(7, 4), (12, 0), True], + ] + + for inter1, inter2, may_fail in interaction_list: + grid44_manager.clear_paths() + assert grid44_manager.push_interaction(*inter1) + if may_fail: + if grid44_manager.push_interaction(*inter2): + assert grid44_manager.get_all_paths()[1] in ([4, 5, 6, 7], + [7, 6, 5, 4]) + else: + assert grid44_manager.push_interaction(*inter2) + assert grid44_manager.get_all_paths()[1] in ([4, 5, 6, 7], + [7, 6, 5, 4]) + + interaction_list = [ + [(4, 7), (15, 3)], + [(4, 7), (3, 15)], + [(7, 4), (15, 3)], + [(7, 4), (3, 15)], + ] + grid44_manager.clear() + for inter1, inter2 in interaction_list: + grid44_manager.clear_paths() + assert grid44_manager.push_interaction(*inter1) + assert grid44_manager.push_interaction(*inter2) + assert grid44_manager.get_all_paths()[1] in ([4, 5, 6, 7], + [7, 6, 5, 4]) + + +def test_path_container_remove_path(grid44_manager): + Crossing = _Crossing + + assert grid44_manager.try_add_path([4, 5, 6, 7]) + assert grid44_manager.try_add_path([1, 5, 9, 13]) + assert grid44_manager.try_add_path([8, 9, 10, 11, 15]) + + with pytest.raises(KeyError): + grid44_manager.remove_path_by_id(10) + + grid44_manager.remove_path_by_id(0) + assert [4, 5, 6, 7] in grid44_manager.get_all_paths() + assert [1, 5, 9, 13] in grid44_manager.get_all_paths() + assert grid44_manager.crossings == { + 1: [Crossing(2, [5])], + 2: [Crossing(1, [5])] + } + + grid44_manager.remove_path_by_id(1) + assert [[1, 5, 9, 13]] == grid44_manager.get_all_paths() + assert grid44_manager.crossings == {2: []} + + assert grid44_manager.try_add_path([8, 9, 10, 11, 15]) + assert [1, 5, 9, 13] in grid44_manager.get_all_paths() + assert [8, 9, 10, 11, 15] in grid44_manager.get_all_paths() + assert grid44_manager.crossings == { + 2: [Crossing(3, [9])], + 3: [Crossing(2, [9])] + } + + +def test_path_container_swap_paths(grid44_manager): + path_dict = {0: [4, 5, 6, 7], 1: [1, 5, 9, 13], 2: [8, 9, 10, 11, 15]} + for _, path in path_dict.items(): + assert grid44_manager.try_add_path(path) + assert path in grid44_manager.get_all_paths() + path_dict_ref = grid44_manager.paths + + with pytest.raises(KeyError): + grid44_manager.swap_paths(10, 0) + with pytest.raises(KeyError): + grid44_manager.swap_paths(0, 10) + + grid44_manager.swap_paths(0, 1) + path_dict_ref[0], path_dict_ref[1] = path_dict_ref[1], path_dict_ref[0] + assert grid44_manager.paths == path_dict_ref + + path_dict[3] = [20, 21, 6, 22, 23, 10, 24, 25] + assert grid44_manager.try_add_path(path_dict[3]) + assert path_dict[3] in grid44_manager.get_all_paths() + path_dict_ref = grid44_manager.paths + + grid44_manager.swap_paths(1, 3) + path_dict_ref[1], path_dict_ref[3] = path_dict_ref[3], path_dict_ref[1] + assert grid44_manager.paths == path_dict_ref + + +def test_path_grid44_manager_discard_paths(grid44_manager): + Crossing = _Crossing + path_dict = {0: [4, 5, 6, 7], 1: [1, 5, 9, 13], 2: [8, 9, 10, 11, 15]} + for _, path in path_dict.items(): + assert grid44_manager.try_add_path(path) + assert path in grid44_manager.get_all_paths() + + path_dict_ref = grid44_manager.paths + grid44_manager.remove_crossing_of_order_higher_than(1) + assert grid44_manager.max_crossing_order() == 1 + assert grid44_manager.paths == path_dict_ref + assert grid44_manager.crossings == { + 0: [Crossing(2, [9])], + 1: [Crossing(2, [5])], + 2: [Crossing(1, [5]), Crossing(0, [9])] + } + + grid44_manager.remove_crossing_of_order_higher_than(0) + del path_dict_ref[1] + assert grid44_manager.max_crossing_order() == 0 + assert grid44_manager.paths == path_dict_ref + assert grid44_manager.crossings == {0: [], 1: []} + + +def test_path_container_find_first_order_intersections(): + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 10), (10, 11), (11, 12), (12, + 5)]) + graph.add_edges_from([(3, 1), (1, 4)]) + graph.add_edges_from([(5, 6), (6, 7)]) + graph.add_edges_from([(20, 6), (6, 21), (21, 22), (22, 23), (23, 24)]) + graph.add_edges_from([(30, 1), (1, 31), (31, 32)]) + graph.add_edges_from([(40, 23), (23, 41), (41, 42), (42, 43), (43, 44)]) + + Crossing = _Crossing + manager = PathManager(graph=graph, enable_caching=False) + + path_dict = {0: [0, 1, 2, 10, 11, 12], 1: [3, 1, 4], 2: [5, 6, 7]} + for _, path in path_dict.items(): + assert manager.try_add_path(path) + assert path in manager.get_all_paths() + + assert manager.crossings == { + 0: [Crossing(1, [1])], + 1: [Crossing(0, [1])], + 2: [] + } + assert _find_first_order_intersections(manager.crossings, + manager.paths) == { + 1: {1} + } + + manager.remove_path_by_id(0) + del path_dict[0] + path_dict[3] = [0, 1, 2, 10] + assert manager.try_add_path(path_dict[3]) + idx1 = manager.get_all_paths().index(path_dict[1]) + 1 + assert _find_first_order_intersections( + manager.crossings, manager.paths + ) == { + 1: {idx1}, + # would be 1: {idx1, idx3} if + # try_add_path was not also + # trying to solve the + # intersections while adding the + # paths + } + + path_dict[4] = [20, 6, 21, 22, 23, 24] + assert manager.try_add_path(path_dict[4]) + assert path_dict[4] in manager.get_all_paths() + idx1 = manager.get_all_paths().index(path_dict[1]) + 1 + idx2 = manager.get_all_paths().index(path_dict[2]) + 1 + assert _find_first_order_intersections(manager.crossings, + manager.paths) == { + 1: {idx1}, + 6: {idx2} + } + + path_dict[5] = [30, 1, 31, 32] + assert manager.try_add_path(path_dict[5]) + assert path_dict[5] in manager.get_all_paths() + idx1 = manager.get_all_paths().index(path_dict[1]) + 1 + idx2 = manager.get_all_paths().index(path_dict[2]) + 1 + assert _find_first_order_intersections(manager.crossings, + manager.paths) == { + 1: {idx1}, + 6: {idx2} + } + + path_dict[6] = [40, 23, 41, 42, 43, 44] + assert manager.try_add_path(path_dict[6]) + assert path_dict[6] in manager.get_all_paths() + idx1 = manager.get_all_paths().index(path_dict[1]) + 1 + idx2 = manager.get_all_paths().index(path_dict[2]) + 1 + assert _find_first_order_intersections(manager.crossings, + manager.paths) == { + 1: {idx1}, + 6: {idx2} + } + + +def test_path_container_no_intersection(grid44_manager): + path_dict = {0: [0, 1, 2, 3], 1: [5, 6, 7], 2: [4, 8, 9, 10, 11]} + for _, path in path_dict.items(): + assert grid44_manager.try_add_path(path) + assert path in grid44_manager.get_all_paths() + assert grid44_manager.generate_swaps() == [(0, 1), (3, 2), (7, 6), (4, 8), + (11, 10), (10, 9)] + + +def test_path_container_1_intersection_single_intersection(): + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (3, 1), (1, 4), (2, 10), (10, 11), + (11, 12)]) + + manager = PathManager(graph=graph) + + # 3 + # | + # 0 - 1 - 2 + # | 10 - 11 - 12 + # 4 + # NB: intersection at node 1 + ref_swaps = [ + [(0, 1), (12, 11)], + [(0, 1), (10, 11)], + [(2, 1), (12, 11)], + [(2, 1), (10, 11)], + [(3, 1), (12, 11)], + [(3, 1), (10, 11)], + [(4, 1), (12, 11)], + [(4, 1), (10, 11)], + ] + paths = [[0, 1, 2], [3, 1, 4]] + for path1, path2, in itertools.permutations(paths): + manager.clear() + assert manager.try_add_path(path1) + assert not manager.try_add_path(path2) + assert manager.try_add_path([10, 11, 12]) + assert manager.generate_swaps() in ref_swaps + + # 4 + # | + # 0 - 1 - 2 - 3 + # | 10 - 11 - 12 + # 5 + # NB: intersection at node 1 + ref_swaps = [ + [(0, 1), (1, 2), (4, 1), (12, 11)], + [(0, 1), (1, 2), (4, 1), (12, 11)], + [(0, 1), (1, 2), (5, 1), (10, 11)], + [(0, 1), (1, 2), (5, 1), (12, 11)], + [(0, 1), (1, 2), (12, 11), (4, 1)], + [(0, 1), (1, 2), (12, 11), (4, 1)], + [(0, 1), (1, 2), (10, 11), (5, 1)], + [(0, 1), (1, 2), (12, 11), (5, 1)], + [(12, 11), (0, 1), (1, 2), (4, 1)], + [(12, 11), (0, 1), (1, 2), (4, 1)], + [(10, 11), (0, 1), (1, 2), (5, 1)], + [(12, 11), (0, 1), (1, 2), (5, 1)], + ] + paths = [[0, 1, 2, 3], [4, 1, 5], [10, 11, 12]] + for path1, path2, path3 in itertools.permutations(paths): + manager.clear() + assert manager.try_add_path(path1) + assert manager.try_add_path(path2) + assert manager.try_add_path(path3) + assert manager.generate_swaps() in ref_swaps + + # 4 + # | + # 0 - 1 - 2 - 3 + # | 10 - 11 - 12 + # 5 + # NB: intersection at node 2 + ref_swaps = [ + [(3, 2), (2, 1), (4, 2), (12, 11)], + [(3, 2), (2, 1), (4, 2), (12, 11)], + [(3, 2), (2, 1), (5, 2), (10, 11)], + [(3, 2), (2, 1), (5, 2), (12, 11)], + [(3, 2), (2, 1), (12, 11), (4, 2)], + [(3, 2), (2, 1), (12, 11), (4, 2)], + [(3, 2), (2, 1), (10, 11), (5, 2)], + [(3, 2), (2, 1), (12, 11), (5, 2)], + [(12, 11), (3, 2), (2, 1), (4, 2)], + [(12, 11), (3, 2), (2, 1), (4, 2)], + [(10, 11), (3, 2), (2, 1), (5, 2)], + [(12, 11), (3, 2), (2, 1), (5, 2)], + ] + paths = [[0, 1, 2, 3], [4, 2, 5], [10, 11, 12]] + for path1, path2, path3 in itertools.permutations(paths): + manager.clear() + assert manager.try_add_path(path1) + assert manager.try_add_path(path2) + assert manager.try_add_path(path3) + assert manager.generate_swaps() in ref_swaps + + # 9 + # | + # 0 - 1 - 2 - 3 - 4 - 5 + # | + # 10 6 - 7 - 8 + # | + # 11 + # NB: intersection at node 1 + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (9, 1), + (1, 10), (10, 11), (5, 6), (6, 7), (7, 8)]) + manager = PathManager(graph=graph) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([9, 1, 10, 11]) + assert manager.try_add_path([6, 7, 8]) + assert manager.generate_swaps() == [(0, 1), (1, 2), (5, 4), (4, 3), (9, 1), + (11, 10), (8, 7)] + + +def test_path_container_1_intersection_double_crossing_long_right(): + # 6 7 + # | | + # 0 - 1 - 2 - 3 - 4 - 5 + # | | + # 8 9 + # | + # 10 + # | + # 11 + # | + # 12 + # NB: intersection at node 2 + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (6, 2), (2, + 8), + (7, 4), (4, 9), (9, 10), (10, 11), (11, 12)]) + manager = PathManager(graph=graph) + + ref_swaps = [(7, 4), (4, 9), (12, 11), (11, 10), (0, 1), (1, 2), (2, 3), + (5, 4), (8, 2)] + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([6, 2, 8]) + assert manager.try_add_path([7, 4, 9, 10, 11, 12]) + assert manager.generate_swaps() == ref_swaps + + manager.clear() + assert manager.try_add_path([6, 2, 8]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([7, 4, 9, 10, 11, 12]) + assert manager.generate_swaps() == ref_swaps + + ref_swaps = [(5, 4), (4, 3), (3, 2), (2, 1), (7, 4), (4, 9), (12, 11), + (11, 10), (8, 2)] + manager.clear() + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([7, 4, 9, 10, 11, 12]) + assert manager.try_add_path([6, 2, 8]) + assert manager.generate_swaps() == ref_swaps + manager.clear() + assert manager.try_add_path([6, 2, 8]) + assert manager.try_add_path([7, 4, 9, 10, 11, 12]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.generate_swaps() == ref_swaps + + ref_swaps = [(7, 4), (4, 9), (12, 11), (11, 10), (5, 4), (4, 3), (3, 2), + (2, 1), (8, 2)] + manager.clear() + assert manager.try_add_path([7, 4, 9, 10, 11, 12]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([6, 2, 8]) + assert manager.generate_swaps() == ref_swaps + manager.clear() + assert manager.try_add_path([7, 4, 9, 10, 11, 12]) + assert manager.try_add_path([6, 2, 8]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.generate_swaps() == ref_swaps + + +def test_path_container_1_intersection_double_crossing_long_left(): + # 6 7 + # | | + # 0 - 1 - 2 - 3 - 4 - 5 + # | | + # 8 9 + # | + # 10 + # | + # 11 + # | + # 12 + # NB: intersection at node 3 + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (6, 1), (1, + 8), + (8, 10), (10, 11), (11, 12), (7, 3), (3, 9)]) + manager = PathManager(graph=graph) + + ref_swaps = [(0, 1), (1, 2), (2, 3), (3, 4), (6, 1), (1, 8), (12, 11), + (11, 10), (9, 3)] + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([6, 1, 8, 10, 11, 12]) + assert manager.try_add_path([7, 3, 9]) + assert manager.generate_swaps() == ref_swaps + manager.clear() + assert manager.try_add_path([7, 3, 9]) + assert manager.try_add_path([6, 1, 8, 10, 11, 12]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.generate_swaps() == ref_swaps + + ref_swaps = [(6, 1), (1, 8), (12, 11), (11, 10), (0, 1), (5, 4), (4, 3), + (3, 2), (9, 3)] + manager.clear() + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([7, 3, 9]) + assert manager.try_add_path([6, 1, 8, 10, 11, 12]) + assert manager.generate_swaps() == ref_swaps + manager.clear() + assert manager.try_add_path([7, 3, 9]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([6, 1, 8, 10, 11, 12]) + assert manager.generate_swaps() == ref_swaps + + ref_swaps = [(6, 1), (1, 8), (12, 11), (11, 10), (0, 1), (1, 2), (2, 3), + (3, 4), (9, 3)] + manager.clear() + assert manager.try_add_path([6, 1, 8, 10, 11, 12]) + assert manager.try_add_path([7, 3, 9]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.generate_swaps() == ref_swaps + manager.clear() + assert manager.try_add_path([6, 1, 8, 10, 11, 12]) + assert manager.try_add_path([0, 1, 2, 3, 4, 5]) + assert manager.try_add_path([7, 3, 9]) + assert manager.generate_swaps() == ref_swaps + + +def test_path_container_1_intersection_double_crossing_delete_path(): + # 4 5 4 5 + # | | | | + # 0 - 1 - 2 - 3 -> 0 - 1 - 2 - 3 or 0 - 1 - 2 - 3 + # | | | | + # 6 7 6 7 + # NB: intersection at nodes 1 & 2 + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (4, 1), (1, 6), (5, 2), (2, + 7)]) + ref_swaps = [ + [(0, 1), (1, 2), (6, 1)], + [(0, 1), (1, 2), (4, 1)], + ] + + manager = PathManager(graph=graph) + assert manager.try_add_path([0, 1, 2, 3]) + assert manager.try_add_path([4, 1, 6]) + assert not manager.try_add_path([5, 2, 7]) + assert manager.generate_swaps() in ref_swaps + + ref_swaps = [ + [(3, 2), (2, 1), (7, 2)], + [(3, 2), (2, 1), (5, 2)], + ] + + manager.clear() + assert manager.try_add_path([0, 1, 2, 3]) + assert manager.try_add_path([5, 2, 7]) + assert not manager.try_add_path([4, 1, 6]) + assert manager.generate_swaps() in ref_swaps + + +def test_path_container_1_intersection_double_crossing_delete_path2(): + # 5 6 6 + # | | | + # 0 - 1 - 2 - 3 - 4 -> 0 - 1 - 2 - 3 - 4 + # | | | + # 7 8 8 + # NB: intersection at nodes 1 & 3 + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (5, 1), (1, 7), (6, + 3), + (3, 8)]) + manager = PathManager(graph=graph) + + ref_swaps = [ + [(0, 1), (1, 2), (4, 3), (7, 1)], + [(0, 1), (1, 2), (4, 3), (5, 1)], + [(0, 1), (4, 3), (3, 2), (8, 3)], + [(0, 1), (4, 3), (3, 2), (6, 3)], + ] + + assert manager.try_add_path([0, 1, 2, 3, 4]) + assert manager.try_add_path([5, 1, 7]) + assert not manager.try_add_path([6, 3, 8]) + assert manager.generate_swaps() in ref_swaps + + manager.clear() + assert manager.try_add_path([0, 1, 2, 3, 4]) + assert manager.try_add_path([6, 3, 8]) + assert not manager.try_add_path([5, 1, 7]) + assert manager.generate_swaps() in ref_swaps + + +def test_path_container_1_intersection_double_crossing_neighbouring_nodes(): + # 5 + # | + # 6 7 + # | | + # 0 - 1 - 2 - 3 - 4 + # | | + # 8 9 + # NB: intersection at nodes 1 & 3 + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (5, 6), (6, 1), (1, + 8), + (7, 2), (2, 9)]) + manager = PathManager(graph=graph) + + ref_swaps = [ + [(0, 1), (1, 2), (2, 3), (8, 1), (1, 6), (9, 2)], + [(0, 1), (1, 2), (2, 3), (5, 6), (8, 1), (9, 2)], + [(8, 1), (1, 6), (4, 3), (3, 2), (2, 1), (9, 2)], + [(8, 1), (1, 6), (0, 1), (1, 2), (2, 3), (9, 2)], + [(0, 1), (1, 2), (2, 3), (8, 1), (1, 6), (7, 2)], + [(0, 1), (1, 2), (2, 3), (5, 6), (8, 1), (7, 2)], + [(8, 1), (1, 6), (4, 3), (3, 2), (2, 1), (7, 2)], + [(8, 1), (1, 6), (0, 1), (1, 2), (2, 3), (7, 2)], + [(0, 1), (1, 2), (2, 3), (9, 2), (8, 1), (1, 6)], + [(0, 1), (1, 2), (2, 3), (9, 2), (5, 6), (8, 1)], + [(8, 1), (1, 6), (4, 3), (9, 2), (3, 2), (2, 1)], + [(8, 1), (1, 6), (0, 1), (9, 2), (1, 2), (2, 3)], + [(0, 1), (1, 2), (2, 3), (7, 2), (8, 1), (1, 6)], + [(0, 1), (1, 2), (2, 3), (7, 2), (5, 6), (8, 1)], + [(8, 1), (1, 6), (4, 3), (7, 2), (3, 2), (2, 1)], + [(8, 1), (1, 6), (0, 1), (7, 2), (1, 2), (2, 3)], + ] + + paths = [[0, 1, 2, 3, 4], [5, 6, 1, 8], [7, 2, 9]] + + for path1, path2, path3 in itertools.permutations(paths): + manager.clear() + assert manager.try_add_path(path1) + assert manager.try_add_path(path2) + assert manager.try_add_path(path3) + assert manager.generate_swaps() in ref_swaps + + +def test_path_container_1_intersection_triple_crossing(): + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (9, 1), + (1, 10), (10, 11), (12, 1), (1, 13), (13, 14), + (14, 15), (5, 6), (6, 7), (7, 8)]) + manager = PathManager(graph=graph) + + # 9 13 - 14 - 15 + # | / + # 0 - 1 - 2 - 3 - 4 - 5 + # / | + # 12 10 6 - 7 - 8 + # | + # 11 + # NB: intersection at node 1 + manager.clear() + paths = [[9, 1, 10, 11], [0, 1, 2, 3, 4, 5], [6, 7, 8], + [12, 1, 13, 14, 15, 16]] + for path in paths: + assert manager.try_add_path(path) + + paths[3], paths[0], paths[1] \ + = paths[0], paths[1], paths[3] + assert manager.get_all_paths() == paths + + manager.clear() + paths = [[0, 1, 2, 3, 4, 5], [9, 1, 10, 11], [6, 7, 8], + [12, 1, 13, 14, 15, 16]] + for path in paths: + assert manager.try_add_path(path) + + paths[3], paths[1] \ + = paths[1], paths[3] + assert manager.get_all_paths() == paths + + # 4 5 10 - 11 - 12 4 10 - 11 - 12 + # | / | + # 0 - 1 - 2 - 3 -> 0 - 1 - 2 - 3 + # / | | + # 6 7 7 + # NB: intersection at node 1 + ref_swaps = [[(0, 1), (1, 2), (4, 1), (12, 11)], + [(0, 1), (1, 2), (4, 1), (10, 11)], + [(0, 1), (1, 2), (7, 1), (12, 11)], + [(0, 1), (1, 2), (7, 1), (10, 11)]] + manager.clear() + paths = [[0, 1, 2, 3], [4, 1, 7], [10, 11, 12], [5, 1, 6]] + assert manager.try_add_path([0, 1, 2, 3]) + assert manager.try_add_path([4, 1, 7]) + assert manager.try_add_path([10, 11, 12]) + assert not manager.try_add_path([5, 1, 6]) + assert manager.generate_swaps() in ref_swaps + + +def test_path_container_1_intersection_triple_crossing_complex(): + # 4 + # | + # 0 - 1 - 2 - 3 + # | + # 5 - 6 - 7 + # | + # 8 + # NB: intersection at nodes 1 & 3 + graph = nx.Graph() + graph.add_edges_from([(0, 1), (1, 2), (2, 3), (4, 1), (1, 6), (6, 8), (5, + 6), + (6, 7)]) + manager = PathManager(graph=graph) + + ref_swaps = [ + [(0, 1), (1, 2), (4, 1), (8, 6)], + [(0, 1), (1, 2), (4, 1), (1, 6)], + [(4, 1), (1, 6), (0, 1), (1, 2)], + [(0, 1), (3, 2), (4, 1), (1, 6)], + [(4, 1), (8, 6), (0, 1), (3, 2)], + [(4, 1), (1, 6), (0, 1), (3, 2)], + ] + + assert manager.try_add_path([0, 1, 2, 3]) + assert manager.try_add_path([4, 1, 6, 8]) + assert not manager.try_add_path([5, 6, 7]) + assert manager.generate_swaps() in ref_swaps + + manager.clear() + assert manager.try_add_path([4, 1, 6, 8]) + assert manager.try_add_path([0, 1, 2, 3]) + assert not manager.try_add_path([5, 6, 7]) + assert manager.generate_swaps() in ref_swaps + + ref_swaps = [ + [(0, 1), (1, 2), (8, 6), (6, 1), (5, 6)], + [(0, 1), (1, 2), (8, 6), (6, 1), (7, 6)], + ] + + manager.clear() + assert manager.try_add_path([4, 1, 6, 8]) + assert manager.try_add_path([5, 6, 7]) + assert manager.try_add_path([0, 1, 2, 3]) + assert manager.generate_swaps() in ref_swaps + + manager.clear() + assert manager.try_add_path([0, 1, 2, 3]) + assert manager.try_add_path([5, 6, 7]) + + # With some modification to PathManager, this next line could be made not + # to fail adding the path. + # This would require the intersection resolving algorithm to allow the + # creation of a new intersection for the path currently being added but not + # for any other stored path. + # (ie. allowing the [4], [1, 6, 8] path split, although now 1 is an + # intersection for the new path) + assert not manager.try_add_path([4, 1, 6, 8]) diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index 83e723c0e..534b68e91 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -25,83 +25,13 @@ import math import random -import networkx as nx from projectq.cengines import (BasicMapperEngine, return_swap_depth) from projectq.meta import LogicalQubitIDTag from projectq.ops import (AllocateQubitGate, Command, DeallocateQubitGate, FlushGate, Swap) from projectq.types import WeakQubitRef -from projectq.cengines._graph_path_container import PathContainer - -# ============================================================================== - - -class PathCacheExhaustive(): - """ - Class acting as cache for optimal paths through the graph. - """ - - def __init__(self, path_length_threshold): - self._path_length_threshold = path_length_threshold - self._cache = {} - self.key_type = frozenset - - def __str__(self): - s = "" - for (node0, node1), path in self._cache.items(): - s += "{}: {}\n".format(sorted([node0, node1]), path) - return s - - def empty_cache(self): - """Empty the cache.""" - self._cache = {} - - def get_path(self, start, end): - """ - Return a path from the cache. - - Args: - start (object): Start node for the path - end (object): End node for the path - - Returns: Optimal path stored in cache - - Raises: KeyError if path is not present in the cache - """ - return self._cache[self.key_type((start, end))] - - def has_path(self, start, end): - """ - Test whether a path connecting start to end is present in the cache. - - Args: - start (object): Start node for the path - end (object): End node for the path - - Returns: True/False - """ - return self.key_type((start, end)) in self._cache - - def add_path(self, path): - """ - Add a path to the cache. - - This method also recursively adds all the subpaths that are at least - self._path_length_threshold long to the cache. - - Args: - path (list): Path to store inside the cache - """ - length = len(path) - for start in range(length - self._path_length_threshold + 1): - node0 = path[start] - for incr in range(length - start - 1, - self._path_length_threshold - 2, -1): - end = start + incr - self._cache[self.key_type((node0, - path[end]))] = path[start:end + 1] - +from projectq.cengines._graph_path_manager import PathManager # ============================================================================== @@ -166,10 +96,7 @@ class GraphMapper(BasicMapperEngine): Attributes: current_mapping: Stores the mapping: key is logical qubit id, value is mapped qubit id from 0,...,self.num_qubits - graph (networkx.Graph): Arbitrary connected graph storage (int): Number of gate it caches before mapping. - enable_caching(Bool): Controls whether optimal path caching is - enabled num_qubits(int): number of qubits num_mappings (int): Number of times the mapper changed the mapping depth_of_swaps (dict): Key are circuit depth of swaps, value is the @@ -201,27 +128,15 @@ def __init__(self, graph (networkx.Graph): Arbitrary connected graph representing Qubit connectivity storage (int): Number of gates to temporarily store - enable_caching (Bool): Controls whether optimal path caching is - enabled + enable_caching (Bool): Controls whether path caching is enabled Raises: RuntimeError: if the graph is not a connected graph """ BasicMapperEngine.__init__(self) - # Make sure that we start with a valid graph - if not nx.is_connected(graph): - raise RuntimeError("Input graph must be a connected graph") - elif not all([isinstance(n, int) for n in graph]): - raise RuntimeError( - "All nodes inside the graph needs to be integers") - else: - self.graph = graph - self.num_qubits = self.graph.number_of_nodes() + self.paths = PathManager(graph, enable_caching) + self.num_qubits = graph.number_of_nodes() self.storage = storage - self.enable_caching = enable_caching - # Path cache support - path_length_threshold = 3 - self._path_cache = PathCacheExhaustive(path_length_threshold) # Randomness to pick permutations if there are too many. # This creates an own instance of Random in order to not influence # the bound methods of the random module which might be used in other @@ -243,7 +158,6 @@ def __init__(self, self.num_mappings = 0 self.depth_of_swaps = dict() self.num_of_swaps_per_mapping = dict() - self.paths_stats = dict() @property def current_mapping(self): @@ -289,11 +203,14 @@ def _process_commands(self): Returns: A list of paths through the graph to move some qubits and have them interact """ - paths = PathContainer() not_in_mapping_qubits = set() allocated_qubits = deepcopy(self._currently_allocated_ids) active_qubits = deepcopy(self._currently_allocated_ids) + # Always start from scratch again + # (does not reset cache or path statistics) + self.paths.clear_paths() + for cmd in self._stored_commands: if (len(allocated_qubits) == self.num_qubits and not active_qubits): @@ -334,78 +251,20 @@ def _process_commands(self): else: if not_in_mapping_qubits: self.current_mapping = self._add_qubits_to_mapping( - self._current_mapping, self.graph, + self._current_mapping, self.paths.graph, not_in_mapping_qubits, self._stored_commands) not_in_mapping_qubits = set() - if not self._process_two_qubit_gate_dumb( - qubit0=qubit_ids[0], qubit1=qubit_ids[1], - paths=paths): + if not self.paths.push_interaction( + self._current_mapping[qubit_ids[0]], + self._current_mapping[qubit_ids[1]]): break if not_in_mapping_qubits: self.current_mapping = self._add_qubits_to_mapping( - self._current_mapping, self.graph, not_in_mapping_qubits, + self._current_mapping, self.paths.graph, not_in_mapping_qubits, self._stored_commands) - return paths - - def _process_two_qubit_gate_dumb(self, qubit0, qubit1, paths): - """ - Process a two qubit gate. - - It either removes the two qubits from active_qubits if the gate is - not possible or generate an optimal path through the graph connecting - the two qubits. - - Args: - qubit0 (int): qubit.id of one of the qubits - qubit1 (int): qubit.id of the other qubit - - Returns: A path through the graph (can be empty) - """ - # Path is given using graph nodes (ie. mapped ids) - # If we come here, the two nodes can't be connected on the graph or the - # command would have been applied already - node0 = self._current_mapping[qubit0] - node1 = self._current_mapping[qubit1] - - if paths.has_interaction(node0, node1): - return True - - # Qubits are both active but not connected via an edge - if self.enable_caching: - if self._path_cache.has_path(node0, node1): - path = self._path_cache.get_path(node0, node1) - elif self.graph.has_edge(node0, node1): - path = [node0, node1] - else: - path = nx.shortest_path(self.graph, source=node0, target=node1) - self._path_cache.add_path(path) - else: - if self.graph.has_edge(node0, node1): - path = [node0, node1] - else: - path = nx.shortest_path(self.graph, source=node0, target=node1) - - if path: - # Makes sure that one qubit will interact with at most one other - # qubit before forcing the generation of a swap - # Also makes sure that path intersection (if any) are possible - if not paths.try_add_path(path): - return False - - interaction = frozenset((node0, node1)) - if interaction not in self.paths_stats: - self.paths_stats[interaction] = 1 - else: - self.paths_stats[interaction] += 1 - return True - - # Technically, since the graph is connected, we should always be able - # to find a path between any two nodes. But just in case... - return False # pragma: no cover - def _send_possible_commands(self): """ Send the stored commands possible without changing the mapping. @@ -424,10 +283,10 @@ def _send_possible_commands(self): break if isinstance(cmd.gate, AllocateQubitGate): if cmd.qubits[0][0].id in self._current_mapping: - self._currently_allocated_ids.add(cmd.qubits[0][0].id) qb0 = WeakQubitRef( engine=self, idx=self._current_mapping[cmd.qubits[0][0].id]) + self._currently_allocated_ids.add(cmd.qubits[0][0].id) self.send([ Command( engine=self, @@ -466,7 +325,7 @@ def _send_possible_commands(self): # Check that mapped ids are connected by an edge on the graph if len(backend_ids) == 2: - send_gate = self.graph.has_edge(*list(backend_ids)) + send_gate = self.paths.graph.has_edge(*list(backend_ids)) if send_gate: self._send_cmd_with_mapped_ids(cmd) @@ -493,13 +352,13 @@ def _run(self): # Go through the command list and generate a list of paths. # At the same time, add soon-to-be-allocated qubits to the mapping - paths = self._process_commands() + self._process_commands() self._send_possible_commands() if not self._stored_commands: return - swaps = paths.generate_swaps() + swaps = self.paths.generate_swaps() if swaps: # first mapping requires no swaps backend_ids_used = { @@ -510,7 +369,7 @@ def _run(self): # Get a list of the qubits we need to allocate just to perform the # swaps not_allocated_ids = set( - paths.get_all_nodes()).difference(backend_ids_used) + self.paths.get_all_nodes()).difference(backend_ids_used) # Calculate temporary internal reverse mapping new_internal_mapping = deepcopy(self._reverse_current_mapping) @@ -531,6 +390,14 @@ def _run(self): # to the temporary internal reverse mapping with invalid ids new_internal_mapping[backend_id] = -1 + # Calculate reverse internal mapping + new_internal_mapping = deepcopy(self._reverse_current_mapping) + + # Add missing entries with invalid id to be able to process the + # swaps operations + for backend_id in not_allocated_ids: + new_internal_mapping[backend_id] = -1 + # Send swap operations to arrive at the new mapping for bqb0, bqb1 in swaps: qb0 = WeakQubitRef(engine=self, idx=bqb0) @@ -624,36 +491,39 @@ def __str__(self): """ depth_of_swaps_str = "" - for depth_of_swaps, num_mapping in self.depth_of_swaps.items(): + for depth_of_swaps, num_mapping in sorted(self.depth_of_swaps.items()): depth_of_swaps_str += "\n {:3d}: {:3d}".format( depth_of_swaps, num_mapping) num_swaps_per_mapping_str = "" for num_swaps_per_mapping, num_mapping \ - in self.num_of_swaps_per_mapping.items(): + in sorted(self.num_of_swaps_per_mapping.items(), + key=lambda x: x[1], reverse=True): num_swaps_per_mapping_str += "\n {:3d}: {:3d}".format( num_swaps_per_mapping, num_mapping) interactions = [ k for _, k in sorted( - zip(self.paths_stats.values(), self.paths_stats.keys()), + zip(self.paths.paths_stats.values(), + self.paths.paths_stats.keys()), reverse=True) ] - max_width = math.ceil(math.log10(max(self.paths_stats.values()))) + 1 + max_width = int(math.ceil( + math.log10(max(self.paths.paths_stats.values()))) + 1) paths_stats_str = "" - if self.enable_caching: + if self.paths.enable_caching: for k in interactions: - if self.graph.has_edge(*list(k)): + if self.paths.graph.has_edge(*list(k)): path = list(k) else: - path = self._path_cache.get_path(*list(k)) + path = self.paths.cache.get_path(*list(k)) paths_stats_str += "\n {3:3} - {4:3}: {0:{1}} | {2}".format( - self.paths_stats[k], max_width, path, *k) + self.paths.paths_stats[k], max_width, path, *k) else: for k in interactions: paths_stats_str += "\n {2:3} - {3:3}: {0:{1}}".format( - self.paths_stats[k], max_width, *k) + self.paths.paths_stats[k], max_width, *k) return ("Number of mappings: {}\n" + "Depth of swaps: {}\n\n" + "Number of swaps per mapping:{}\n\n" + diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py index 7ac1cc8ee..55c6e4f5b 100644 --- a/projectq/cengines/_graphmapper_test.py +++ b/projectq/cengines/_graphmapper_test.py @@ -15,11 +15,9 @@ from copy import deepcopy import itertools -import random import pytest import networkx as nx -import projectq from projectq.cengines import DummyEngine, LocalOptimizer, MainEngine from projectq.meta import LogicalQubitIDTag from projectq.ops import (Allocate, BasicGate, Command, Deallocate, FlushGate, @@ -121,45 +119,6 @@ def simple_mapper(simple_graph): # ============================================================================== -def test_path_cache_exhaustive(): - path_length_threshold = 3 - cache = graphm.PathCacheExhaustive(path_length_threshold) - - assert not cache._cache - cache.add_path(['a', 'b', 'c']) - assert cache._cache == {cache.key_type(('a', 'c')): ['a', 'b', 'c']} - - assert cache.has_path('a', 'c') - assert not cache.has_path('a', 'b') - assert not cache.has_path('b', 'c') - - cache.empty_cache() - assert not cache._cache - - cache.add_path(['a', 'b', 'c', 'd']) - assert cache._cache == { - cache.key_type(('a', 'c')): ['a', 'b', 'c'], - cache.key_type(('a', 'd')): ['a', 'b', 'c', 'd'], - cache.key_type(('b', 'd')): ['b', 'c', 'd'] - } - assert cache.has_path('a', 'd') - assert cache.has_path('d', 'a') - assert cache.has_path('a', 'c') - assert cache.has_path('b', 'd') - assert not cache.has_path('a', 'b') - assert not cache.has_path('b', 'a') - assert not cache.has_path('b', 'c') - assert not cache.has_path('c', 'd') - - str_repr = str(cache) - assert str_repr.count("['a', 'd']: ['a', 'b', 'c', 'd']") == 1 - assert str_repr.count("['a', 'c']: ['a', 'b', 'c']") == 1 - assert str_repr.count("['b', 'd']: ['b', 'c', 'd']") == 1 - - -# ============================================================================== - - def test_is_available(simple_graph): mapper = graphm.GraphMapper(graph=simple_graph) qb0 = WeakQubitRef(engine=None, idx=0) @@ -213,42 +172,6 @@ def test_run_infinite_loop_detection(simple_mapper): mapper.receive([cmd0, cmd_flush]) -def test_valid_and_invalid_graphs(simple_graph, grid33_graph): - graph = nx.Graph() - graph.add_nodes_from('abcd') - with pytest.raises(RuntimeError): - graphm.GraphMapper(graph=graph) - - graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'a')]) - with pytest.raises(RuntimeError): - graphm.GraphMapper(graph=graph) - - graph = deepcopy(simple_graph) - graph.remove_edge(0, 1) - with pytest.raises(RuntimeError): - graphm.GraphMapper(graph=graph) - - mapper = graphm.GraphMapper(graph=simple_graph) - backend = DummyEngine(save_commands=True) - backend.is_last_engine = True - mapper.next_engine = backend - qb, allocate_cmds = allocate_all_qubits_cmd(mapper) - mapper._stored_commands = allocate_cmds - mapper._run() - assert not mapper._send_possible_commands() - assert mapper.current_mapping == dict(enumerate(range(len(simple_graph)))) - - mapper = graphm.GraphMapper(graph=grid33_graph) - backend = DummyEngine(save_commands=True) - backend.is_last_engine = True - mapper.next_engine = backend - qb, allocate_cmds = allocate_all_qubits_cmd(mapper) - mapper._stored_commands = allocate_cmds - mapper._run() - assert not mapper._send_possible_commands() - assert mapper.current_mapping == dict(enumerate(range(len(grid33_graph)))) - - def test_resetting_mapping_to_none(simple_graph): mapper = graphm.GraphMapper(graph=simple_graph) mapper.current_mapping = {0: 1} @@ -356,7 +279,7 @@ def test_send_possible_commands_allocation_no_active_qubits( mapper._run() assert len(mapper._stored_commands) == 8 # NB: after swap, can actually send Deallocate to qb0 - assert mapper._stored_commands[0:6] == cmd_list[4:10] + assert mapper._stored_commands[:6] == cmd_list[4:10] assert mapper._stored_commands[6] == cmd_list[11] @@ -500,17 +423,26 @@ def test_send_two_qubit_gate_before_swap(simple_mapper): all_cmds[3] = cmd mapper._stored_commands = all_cmds - print([str(cmd) for cmd in all_cmds]) mapper._run() assert mapper.num_mappings == 1 if mapper.current_mapping[2] == 2: # qb[2] has not moved, all_cmds[5] is possible - assert mapper._stored_commands == all_cmds[6:] - assert mapper.current_mapping == {0: 1, 1: 0, 2: 2, 3: 3} + assert mapper._stored_commands == all_cmds[-4:] + assert mapper.current_mapping == { + 0: 1, + 1: 0, + 2: 2, + 3: 3, + } else: # qb[2] moved, all_cmds[5] not possible - assert mapper._stored_commands == all_cmds[5:] - assert mapper.current_mapping == {0: 0, 1: 2, 2: 1, 3: 3} + assert mapper._stored_commands == [all_cmds[5]] + all_cmds[-4:] + assert mapper.current_mapping == { + 0: 0, + 1: 2, + 2: 1, + 3: 3, + } def test_send_two_qubit_gate_before_swap_nonallocated_qubits(simple_mapper): @@ -783,19 +715,19 @@ def test_3x3_grid_multiple_simultaneous_non_intersecting_paths( } if enable_caching: - assert mapper._path_cache._cache - assert mapper._path_cache.has_path(0, 6) - assert mapper._path_cache.has_path(1, 7) - assert mapper._path_cache.has_path(2, 8) - assert mapper._path_cache.has_path(0, 2) - assert mapper._path_cache.has_path(3, 5) - assert mapper._path_cache.has_path(6, 8) - assert not mapper._path_cache.has_path(0, 1) - assert not mapper._path_cache.has_path(1, 2) - assert not mapper._path_cache.has_path(3, 4) - assert not mapper._path_cache.has_path(4, 5) - assert not mapper._path_cache.has_path(6, 7) - assert not mapper._path_cache.has_path(7, 8) + assert mapper.paths.cache._cache + assert mapper.paths.cache.has_path(0, 6) + assert mapper.paths.cache.has_path(1, 7) + assert mapper.paths.cache.has_path(2, 8) + assert mapper.paths.cache.has_path(0, 2) + assert mapper.paths.cache.has_path(3, 5) + assert mapper.paths.cache.has_path(6, 8) + assert not mapper.paths.cache.has_path(0, 1) + assert not mapper.paths.cache.has_path(1, 2) + assert not mapper.paths.cache.has_path(3, 4) + assert not mapper.paths.cache.has_path(4, 5) + assert not mapper.paths.cache.has_path(6, 7) + assert not mapper.paths.cache.has_path(7, 8) @pytest.mark.parametrize("enable_caching", [False, True]) @@ -844,9 +776,9 @@ def test_3x3_grid_multiple_simultaneous_intersecting_paths_impossible( } if enable_caching: - assert mapper._path_cache._cache - assert mapper._path_cache.has_path(1, 7) - assert mapper._path_cache.has_path(3, 5) + assert mapper.paths.cache._cache + assert mapper.paths.cache.has_path(1, 7) + assert mapper.paths.cache.has_path(3, 5) mapper.current_mapping = dict(enumerate(range(len(qb)))) @@ -858,10 +790,10 @@ def test_3x3_grid_multiple_simultaneous_intersecting_paths_impossible( assert mapper.depth_of_swaps == {1: 4} if enable_caching: - assert mapper._path_cache._cache - assert mapper._path_cache.has_path(1, 7) - assert mapper._path_cache.has_path(3, 5) - assert mapper._path_cache.has_path(1, 8) + assert mapper.paths.cache._cache + assert mapper.paths.cache.has_path(1, 7) + assert mapper.paths.cache.has_path(3, 5) + assert mapper.paths.cache.has_path(1, 8) @pytest.mark.parametrize("enable_caching", [False, True]) @@ -913,9 +845,9 @@ def test_3x3_grid_multiple_simultaneous_intersecting_paths_possible( } if enable_caching: - assert mapper._path_cache._cache - assert mapper._path_cache.has_path(0, 7) - assert mapper._path_cache.has_path(3, 5) + assert mapper.paths.cache._cache + assert mapper.paths.cache.has_path(0, 7) + assert mapper.paths.cache.has_path(3, 5) @pytest.mark.parametrize("enable_caching", [False, True]) @@ -946,7 +878,6 @@ def test_mapper_to_str(simple_graph, enable_caching): assert str_repr.count("3: 1") == 1 assert str_repr.count(" 0 - 6: 1") == 1 assert str_repr.count(" 0 - 3: 1") == 1 - assert str_repr.count(" 4 - 6: 1") == 1 sent_gates = [cmd.gate for cmd in backend.received_commands] assert sent_gates.count(H) == 1 From 77711be307ea4daa72f56e0fa4a324a459269ed3 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Mon, 4 Mar 2019 10:30:09 +0100 Subject: [PATCH 09/25] Improve initial qubit placement --- projectq/cengines/_command_list.py | 136 ++++++++++++ projectq/cengines/_command_list_test.py | 281 ++++++++++++++++++++++++ projectq/cengines/_graphmapper.py | 262 +++++++++++++++++++--- projectq/cengines/_graphmapper_test.py | 224 +++++++++++++++++-- 4 files changed, 862 insertions(+), 41 deletions(-) create mode 100644 projectq/cengines/_command_list.py create mode 100644 projectq/cengines/_command_list_test.py diff --git a/projectq/cengines/_command_list.py b/projectq/cengines/_command_list.py new file mode 100644 index 000000000..7685340ca --- /dev/null +++ b/projectq/cengines/_command_list.py @@ -0,0 +1,136 @@ +# Copyright 2019 ProjectQ-Framework (www.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This is a helper module for the _graphmapper.GraphMapper class. +""" + +from copy import deepcopy +import networkx as nx + +# ============================================================================== + + +class CommandList(): + """Class used to manage a list of ProjectQ commands""" + + def __init__(self): + self._cmds = [] + self.partitions = [set()] + self.interactions = [[]] + + def __len__(self): + return len(self._cmds) + + def __iadd__(self, other): + self.extend(other) + return self + + def __iter__(self): + return self._cmds.__iter__() + + def __getitem__(self, key): + return self._cmds[key] + + def __eq__(self, other): + if isinstance(other, list): + return self._cmds == other + if isinstance(other, CommandList): + return self._cmds == other._cmds + raise NotImplementedError() + + @property + def stored_commands(self): + """ + Simple getter. + """ + return deepcopy(self._cmds) + + def clear(self): + """ + Remove all commands from the container. + """ + self._cmds.clear() + self.partitions = [set()] + self.interactions = [[]] + + def append(self, cmd): + """ + Append a command to the end of the container. + """ + self._cmds.append(cmd) + + qubit_ids = {qubit.id for qureg in cmd.all_qubits for qubit in qureg} + if len(qubit_ids) > 1: + # Add new partition if any qubit ids are already present in the + # current partition + if self.partitions[-1] \ + and self.partitions[-1] & qubit_ids: + self.partitions.append(set()) + self.interactions.append([]) + self.partitions[-1] |= qubit_ids + self.interactions[-1].append(tuple(sorted(qubit_ids))) + + def extend(self, iterable): + """ + Extend container by appending commands from the iterable. + """ + for cmd in iterable: + self.append(cmd) + + # -------------------------------------------------------------------------- + + def calculate_qubit_interaction_subgraphs(self, order=2): + """ + Calculate qubits interaction graph based on all commands stored. + + While iterating through the partitions, we create a graph whose + vertices are logical qubit IDs and where edges represent an interaction + between qubits. + Additionally, we make sure that the resulting graph has no vertices + with degree higher than a specified threshold. + + Args: + order (int): maximum degree of the nodes in the resulting graph + + Returns: + A list of list of graph nodes corresponding to all the connected + components of the qubit interaction graph. Within each components, + nodes are sorted in decreasing order of their degree. + + Note: + The current implementation is really aimed towards handling + two-qubit gates but should also work with higher order qubit gates. + """ + graph = nx.Graph() + for timestep in self.interactions: + for interaction in timestep: + for prev, cur in zip(interaction, interaction[1:]): + if prev not in graph \ + or cur not in graph \ + or (len(graph[prev]) < order + and len(graph[cur]) < order): + graph.add_edge(prev, cur) + + # Return value is a list of list of nodes corresponding to a list of + # connected components of the intial graph sorted by their order + # Each connected component is sorted in decreasing order by the degree + # of each node in the graph + return [ + sorted( + graph.subgraph(g), key=lambda n: len(graph[n]), reverse=True) + for g in sorted( + nx.connected_components(graph), + key=lambda c: (max(len(graph[n]) for n in c), len(c)), + reverse=True) + ] diff --git a/projectq/cengines/_command_list_test.py b/projectq/cengines/_command_list_test.py new file mode 100644 index 000000000..86eb1ea57 --- /dev/null +++ b/projectq/cengines/_command_list_test.py @@ -0,0 +1,281 @@ +# Copyright 2019 ProjectQ-Framework (www.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for projectq.cengines._command_list.py.""" + +from projectq.cengines._command_list import CommandList + +from copy import deepcopy +import pytest +from projectq.ops import (Allocate, Command, X) +from projectq.types import WeakQubitRef + +# ============================================================================== + + +def allocate_all_qubits_cmd(num_qubits): + qb = [] + allocate_cmds = [] + for i in range(num_qubits): + qb.append(WeakQubitRef(engine=None, idx=i)) + allocate_cmds.append( + Command(engine=None, gate=Allocate, qubits=([qb[i]], ))) + return qb, allocate_cmds + + +# ============================================================================== + + +@pytest.fixture +def command_list(): + return CommandList() + + +# ============================================================================== + + +def test_empty_command_list(command_list): + assert not command_list + assert command_list._cmds == [] + assert command_list.partitions == [set()] + + +def test_append_single_qubit_gate(command_list): + assert not command_list + + qb0 = WeakQubitRef(engine=None, idx=0) + cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], )) + command_list.append(cmd0) + assert command_list._cmds == [cmd0] + assert command_list.interactions == [[]] + + cmd1 = Command(engine=None, gate=X, qubits=([qb0], )) + command_list.append(cmd1) + assert command_list._cmds == [cmd0, cmd1] + assert command_list.partitions == [set()] + assert command_list.interactions == [[]] + + assert command_list + command_list.clear() + assert not command_list + assert command_list._cmds == [] + assert command_list.partitions == [set()] + assert command_list.interactions == [[]] + + +def test_append_two_qubit_gate(command_list): + assert not command_list + + qb, allocate_cmds = allocate_all_qubits_cmd(4) + for cmd in allocate_cmds: + command_list.append(cmd) + assert command_list._cmds == allocate_cmds + assert command_list.partitions == [set()] + assert command_list.interactions == [[]] + + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + command_list.append(cmd0) + assert command_list._cmds == allocate_cmds + [cmd0] + assert command_list.partitions == [{0, 1}] + assert command_list.interactions == [[(0, 1)]] + + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + command_list.append(cmd1) + assert command_list._cmds == allocate_cmds + [cmd0, cmd1] + assert command_list.partitions == [{0, 1, 2, 3}] + assert command_list.interactions == [[(0, 1), (2, 3)]] + + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + command_list.append(cmd2) + assert command_list._cmds == allocate_cmds + [cmd0, cmd1, cmd2] + assert command_list.partitions == [{0, 1, 2, 3}, {0, 2}] + assert command_list.interactions == [[(0, 1), (2, 3)], [(0, 2)]] + + assert command_list + command_list.clear() + assert not command_list + assert command_list._cmds == [] + assert command_list.partitions == [set()] + assert command_list.interactions == [[]] + + +def test_extend(command_list): + assert not command_list + + qb, allocate_cmds = allocate_all_qubits_cmd(4) + command_list.extend(allocate_cmds) + assert command_list._cmds == allocate_cmds + assert command_list.partitions == [set()] + + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) + command_list.extend((cmd0, cmd1, cmd2, cmd3)) + assert command_list._cmds == allocate_cmds + [cmd0, cmd1, cmd2, cmd3] + assert command_list.partitions == [{0, 1, 2, 3}, {0, 2}] + assert command_list.interactions == [[(0, 1), (2, 3)], [(0, 2)]] + + +def test_iadd(): + command_list_ref = CommandList() + command_list = CommandList() + assert not command_list + assert not command_list_ref + + qb, allocate_cmds = allocate_all_qubits_cmd(4) + command_list_ref.extend(allocate_cmds) + command_list += allocate_cmds + + assert command_list._cmds == command_list_ref._cmds + assert command_list.partitions == command_list_ref.partitions + assert command_list.interactions == command_list_ref.interactions + + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) + command_list_ref.extend((cmd0, cmd1, cmd2, cmd3)) + command_list += (cmd0, cmd1, cmd2, cmd3) + assert command_list._cmds == command_list_ref._cmds + assert command_list.partitions == command_list_ref.partitions + assert command_list.interactions == command_list_ref.interactions + + +def test_iter(command_list): + assert not command_list + + for cmd in command_list: + raise RuntimeError('ERROR') + + qb, allocate_cmds = allocate_all_qubits_cmd(4) + command_list.extend(allocate_cmds) + + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) + command_list.extend((cmd0, cmd1, cmd2, cmd3)) + + for cmd, cmd_ref in zip(command_list, command_list.stored_commands): + assert cmd == cmd_ref + + +def test_getitem(command_list): + assert not command_list + + qb, allocate_cmds = allocate_all_qubits_cmd(4) + command_list.extend(allocate_cmds) + + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) + command_list.extend((cmd0, cmd1, cmd2, cmd3)) + + ref_list = allocate_cmds + [cmd0, cmd1, cmd2, cmd3] + for i in range(len(command_list)): + assert command_list[i] == ref_list[i] + + assert command_list[4:] == ref_list[4:] + + +def test_eq(command_list): + assert not command_list + qb, allocate_cmds = allocate_all_qubits_cmd(4) + command_list.extend(allocate_cmds) + + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) + command_list.extend((cmd0, cmd1, cmd2, cmd3)) + + with pytest.raises(NotImplementedError): + assert command_list == 2 + with pytest.raises(NotImplementedError): + assert command_list == 2. + with pytest.raises(NotImplementedError): + assert command_list == 'asr' + + assert command_list == allocate_cmds + [cmd0, cmd1, cmd2, cmd3] + assert command_list != allocate_cmds + + other_list = deepcopy(command_list) + assert command_list == other_list + other_list.append(cmd0) + assert command_list != other_list + + +def test_generate_qubit_interaction_graph(command_list): + assert not command_list + + qb, allocate_cmds = allocate_all_qubits_cmd(9) + command_list.extend(allocate_cmds) + + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) + command_list.extend((cmd0, cmd1, cmd2, cmd3)) + + subgraphs = command_list.calculate_qubit_interaction_subgraphs(order=2) + assert len(subgraphs) == 1 + assert len(subgraphs[0]) == 4 + assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) + assert subgraphs[0][-2:] in ([1, 3], [3, 1]) + + # -------------------------------------------------------------------------- + + cmd4 = Command(engine=None, gate=X, qubits=([qb[4]], ), controls=[qb[5]]) + cmd5 = Command(engine=None, gate=X, qubits=([qb[5]], ), controls=[qb[6]]) + command_list.extend((cmd4, cmd5)) + + subgraphs = command_list.calculate_qubit_interaction_subgraphs(order=2) + assert len(subgraphs) == 2 + assert len(subgraphs[0]) == 4 + + assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) + assert subgraphs[0][-2:] in ([1, 3], [3, 1]) + assert subgraphs[1] in ([5, 4, 6], [5, 6, 4]) + + # -------------------------------------------------------------------------- + + cmd6 = Command(engine=None, gate=X, qubits=([qb[6]], ), controls=[qb[7]]) + cmd7 = Command(engine=None, gate=X, qubits=([qb[7]], ), controls=[qb[8]]) + command_list.extend((cmd6, cmd7)) + + subgraphs = command_list.calculate_qubit_interaction_subgraphs(order=2) + + assert len(subgraphs) == 2 + assert len(subgraphs[0]) == 5 + assert all([n in subgraphs[0] for n in [4, 5, 6, 7, 8]]) + assert subgraphs[0][-2:] in ([4, 8], [8, 4]) + assert len(subgraphs[1]) == 4 + assert all([n in subgraphs[1] for n in [0, 1, 2, 3]]) + assert subgraphs[1][-2:] in ([1, 3], [3, 1]) + + # -------------------------------------------------------------------------- + + command_list.append( + Command(engine=None, gate=X, qubits=([qb[3]], ), controls=[qb[0]])) + subgraphs = command_list.calculate_qubit_interaction_subgraphs(order=3) + + assert len(subgraphs) == 2 + assert len(subgraphs[0]) == 4 + assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) + assert subgraphs[0][0] == 0 + assert subgraphs[0][-2:] in ([1, 3], [3, 1]) + assert len(subgraphs[1]) == 5 + assert all([n in subgraphs[1] for n in [4, 5, 6, 7, 8]]) + assert subgraphs[1][-2:] in ([4, 8], [8, 4]) diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index 534b68e91..dae3ff15d 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -25,6 +25,7 @@ import math import random +import itertools from projectq.cengines import (BasicMapperEngine, return_swap_depth) from projectq.meta import LogicalQubitIDTag @@ -32,6 +33,23 @@ FlushGate, Swap) from projectq.types import WeakQubitRef from projectq.cengines._graph_path_manager import PathManager +from projectq.cengines._command_list import CommandList + +# ------------------------------------------------------------------------------ + +# https://www.peterbe.com/plog/fastest-way-to-uniquify-a-list-in-python-3.6 +import sys +if sys.version_info[0] >= 3 and sys.version_info[1] > 6: # pragma: no cover + + def uniquify_list(seq): + return list(dict.fromkeys(seq)) +else: # pragma: no cover + + def uniquify_list(seq): + seen = set() + seen_add = seen.add + return [x for x in seq if x not in seen and not seen_add(x)] + # ============================================================================== @@ -40,13 +58,130 @@ class GraphMapperError(Exception): """Base class for all exceptions related to the GraphMapper.""" +def _add_qubits_to_mapping_fcfs(current_mapping, graph, new_logical_qubit_ids, + stored_commands): + """ + Add active qubits to a mapping. + + This function implements the simple first-come first serve approach; + Qubits that are active but not yet registered in the mapping are added by + mapping them to the next available backend id + + Args: + current_mapping (dict): specify which method should be used to + add the new qubits to the current mapping + graph (networkx.Graph): underlying graph used by the mapper + new_logical_qubit_ids (list): list of logical ids not yet part of the + mapping and that need to be assigned a + backend id + stored_commands (CommandList): list of commands yet to be processed by + the mapper + + Returns: A new mapping + """ + mapping = deepcopy(current_mapping) + currently_used_nodes = sorted([v for _, v in mapping.items()]) + available_nodes = [n for n in graph if n not in currently_used_nodes] + + if len(new_logical_qubit_ids) > len(available_nodes): + raise RuntimeError("Mapper ran out of qubit to allocate. " + "Increase the number of qubits for this " + "mapper.") + + + for i, logical_id in enumerate(new_logical_qubit_ids): + mapping[logical_id] = available_nodes[i] + return mapping + + +def _generate_mapping_minimize_swaps(graph, qubit_interaction_subgraphs): + """ + Generate an initial mapping while maximizing the number of 2-qubit gates + that can be applied without applying any SWAP operations. + + Args: + graph (networkx.Graph): underlying graph used by the mapper + qubit_interaction_subgraph (list): see documentation for CommandList + + Returns: A new mapping + """ + mapping = {} + available_nodes = sorted(list(graph), key=lambda n: len(graph[n])) + + # Initialize the seed node + logical_id = qubit_interaction_subgraphs[0].pop(0) + backend_id = available_nodes.pop() + mapping[logical_id] = backend_id + + for subgraph in qubit_interaction_subgraphs: + if available_nodes: + anchor_node = backend_id + for logical_id in subgraph: + neighbours = sorted( + [n for n in graph[anchor_node] if n in available_nodes], + key=lambda n: len(graph[n])) + + # If possible, take the neighbour with the highest + # degree. Otherwise, take the next highest order available node + if neighbours: + backend_id = neighbours[-1] + available_nodes.remove(backend_id) + elif available_nodes: + backend_id = available_nodes.pop() + else: + break + mapping[logical_id] = backend_id + else: + break + + return mapping + + + +def _add_qubits_to_mapping_smart_init(current_mapping, graph, + new_logical_qubit_ids, stored_commands): + """ + Add active qubits to a mapping. + + Similar to the first-come first-serve approach, except the initial mapping + tries to maximize the initial number of gates to be applied without + swaps. Otherwise identical to the first-come first-serve approach. + + Args: + current_mapping (dict): specify which method should be used to + add the new qubits to the current mapping + graph (networkx.Graph): underlying graph used by the mapper + new_logical_qubit_ids (list): list of logical ids not yet part of the + mapping and that need to be assigned a + backend id + stored_commands (CommandList): list of commands yet to be processed by + the mapper + + Returns: A new mapping + """ + qubit_interaction_subgraphs = \ + stored_commands.calculate_qubit_interaction_subgraphs(order=2) + + # Interaction subgraph list can be empty if only single qubit gates are + # present + if not qubit_interaction_subgraphs: + qubit_interaction_subgraphs = [list(new_logical_qubit_ids)] + + if not current_mapping: + return _generate_mapping_minimize_swaps(graph, + qubit_interaction_subgraphs) + return _add_qubits_to_mapping_fcfs(current_mapping, graph, + new_logical_qubit_ids, stored_commands) + + def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, stored_commands): """ Add active qubits to a mapping Qubits that are active but not yet registered in the mapping are added by - mapping them to the next available backend id + mapping them to an available backend id, as close as possible to other + qubits which they might interact with. Args: current_mapping (dict): specify which method should be used to @@ -55,21 +190,77 @@ def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, new_logical_qubit_ids (list): list of logical ids not yet part of the mapping and that need to be assigned a backend id - stored_commands (list): list of commands yet to be processed by the - mapper + stored_commands (CommandList): list of commands yet to be processed by + the mapper Returns: A new mapping - - Pre-conditions: - len(active_qubits) <= num_qubits == len(graph) """ - # pylint: disable=unused-argument + qubit_interaction_subgraphs = \ + stored_commands.calculate_qubit_interaction_subgraphs(order=2) + + # Interaction subgraph list can be empty if only single qubit gates are + # present + if not qubit_interaction_subgraphs: + qubit_interaction_subgraphs = [list(new_logical_qubit_ids)] + + if not current_mapping: + return _generate_mapping_minimize_swaps(graph, + qubit_interaction_subgraphs) + mapping = deepcopy(current_mapping) currently_used_nodes = sorted([v for _, v in mapping.items()]) - available_ids = [n for n in graph if n not in currently_used_nodes] + available_nodes = sorted( + [n for n in graph if n not in currently_used_nodes], + key=lambda n: len(graph[n])) + interactions = list( + itertools.chain.from_iterable(stored_commands.interactions)) + + for logical_id in uniquify_list(new_logical_qubit_ids): + qubit_interactions = uniquify_list([ + i[0] if i[0] != logical_id else i[1] for i in interactions + if logical_id in i + ]) + + backend_id = None + + if len(qubit_interactions) == 1: + qubit = qubit_interactions[0] + candidates = sorted([ + n + for n in graph[mapping[qubit]] if n not in currently_used_nodes + ], + key=lambda n: len(graph[n])) + backend_id = candidates[-1] + elif qubit_interactions: + neighbours = [] + for qubit in qubit_interactions: + if qubit in mapping: + neighbours.append( + set(n for n in graph[mapping[qubit]] + if n in available_nodes)) + else: + break + + intersection = set() + while neighbours: + intersection = neighbours[0].intersection(*neighbours[1:]) + if intersection: + backend_id = intersection.pop() + break + neighbours.pop() + + if backend_id is None: + try: + backend_id = available_nodes.pop() + except: + raise RuntimeError("Mapper ran out of qubit to allocate. " + "Increase the number of qubits for this " + "mapper.") + else: + available_nodes.remove(backend_id) + + mapping[logical_id] = backend_id - for i, logical_id in enumerate(new_logical_qubit_ids): - mapping[logical_id] = available_ids[i] return mapping @@ -83,13 +274,20 @@ class GraphMapper(BasicMapperEngine): Args: graph (networkx.Graph) : Arbitrary connected graph storage (int) Number of gates to temporarily store - add_qubits_to_mapping (function) Function called when new qubits are to - be added to the current mapping - Signature of the function call: - current_mapping - graph - new_logical_qubit_ids - stored_commands + add_qubits_to_mapping (function or str) Function called when new qubits + are to be added to the current + mapping. + Special possible string values: + "fcfs": first-come first serve + "fcfs_init": first-come first + serve with smarter + mapping + initialisation + Signature of the function call: + current_mapping + graph + new_logical_qubit_ids + stored_commands enable_caching(Bool): Controls whether optimal path caching is enabled @@ -143,7 +341,7 @@ def __init__(self, # places. self._rng = random.Random(11) # Storing commands - self._stored_commands = list() + self._stored_commands = CommandList() # Logical qubit ids for which the Allocate gate has already been # processed and sent to the next engine but which are not yet # deallocated: @@ -152,7 +350,7 @@ def __init__(self, self._current_mapping = dict() # differs from other mappers self._reverse_current_mapping = dict() # Function to add new logical qubits ids to the mapping - self._add_qubits_to_mapping = add_qubits_to_mapping + self.set_add_qubits_to_mapping(add_qubits_to_mapping) # Statistics: self.num_mappings = 0 @@ -177,6 +375,19 @@ def current_mapping(self, current_mapping): for k, v in self._current_mapping.items() } + def set_add_qubits_to_mapping(self, add_qubits_to_mapping): + if isinstance(add_qubits_to_mapping, str): + if add_qubits_to_mapping.lower() == "fcfs": + self._add_qubits_to_mapping = _add_qubits_to_mapping_fcfs + elif add_qubits_to_mapping.lower() == "fcfs_init": + self._add_qubits_to_mapping = _add_qubits_to_mapping_smart_init + else: + raise ValueError( + "Invalid invalid value for add_qubits_to_mapping: {}". + format(add_qubits_to_mapping)) + else: + self._add_qubits_to_mapping = add_qubits_to_mapping + def is_available(self, cmd): """Only allows 1 or two qubit gates.""" num_qubits = 0 @@ -203,7 +414,7 @@ def _process_commands(self): Returns: A list of paths through the graph to move some qubits and have them interact """ - not_in_mapping_qubits = set() + not_in_mapping_qubits = [] allocated_qubits = deepcopy(self._currently_allocated_ids) active_qubits = deepcopy(self._currently_allocated_ids) @@ -230,7 +441,8 @@ def _process_commands(self): allocated_qubits.add(qubit_id) active_qubits.add(qubit_id) if qubit_id not in self._current_mapping: - not_in_mapping_qubits.add(qubit_id) + not_in_mapping_qubits.append(qubit_id) + # not_in_mapping_qubits.add(qubit_id) elif isinstance(cmd.gate, DeallocateQubitGate): qubit_id = cmd.qubits[0][0].id @@ -253,7 +465,7 @@ def _process_commands(self): self.current_mapping = self._add_qubits_to_mapping( self._current_mapping, self.paths.graph, not_in_mapping_qubits, self._stored_commands) - not_in_mapping_qubits = set() + not_in_mapping_qubits = [] if not self.paths.push_interaction( self._current_mapping[qubit_ids[0]], @@ -275,7 +487,7 @@ def _send_possible_commands(self): # So that loop doesn't stop before AllocateGate applied active_ids.add(logical_id) - new_stored_commands = [] + new_stored_commands = CommandList() for i in range(len(self._stored_commands)): cmd = self._stored_commands[i] if not active_ids: @@ -509,8 +721,8 @@ def __str__(self): reverse=True) ] - max_width = int(math.ceil( - math.log10(max(self.paths.paths_stats.values()))) + 1) + max_width = int( + math.ceil(math.log10(max(self.paths.paths_stats.values()))) + 1) paths_stats_str = "" if self.paths.enable_caching: for k in interactions: diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py index 55c6e4f5b..002baa448 100644 --- a/projectq/cengines/_graphmapper_test.py +++ b/projectq/cengines/_graphmapper_test.py @@ -50,13 +50,18 @@ def generate_grid_graph(nrows, ncols): (0 < col < ncols - 1)) add_vertical = is_middle or (col in (0, ncols - 1) and (0 < row < nrows - 1)) - if add_horizontal: graph.add_edge(node0, node0 - 1) graph.add_edge(node0, node0 + 1) if add_vertical: graph.add_edge(node0, node0 - ncols) graph.add_edge(node0, node0 + ncols) + if nrows == 2: + node0 = col + graph.add_edge(node0, node0 + ncols) + if ncols == 2: + node0 = ncols * row + graph.add_edge(node0, node0 + 1) return graph @@ -91,7 +96,8 @@ def grid33_graph(): @pytest.fixture def grid22_graph_mapper(grid22_graph): - mapper = graphm.GraphMapper(graph=grid22_graph) + mapper = graphm.GraphMapper( + graph=grid22_graph, add_qubits_to_mapping="fcfs") backend = DummyEngine(save_commands=True) backend.is_last_engine = True mapper.next_engine = backend @@ -100,7 +106,8 @@ def grid22_graph_mapper(grid22_graph): @pytest.fixture def grid33_graph_mapper(grid33_graph): - mapper = graphm.GraphMapper(graph=grid33_graph) + mapper = graphm.GraphMapper( + graph=grid33_graph, add_qubits_to_mapping="fcfs") backend = DummyEngine(save_commands=True) backend.is_last_engine = True mapper.next_engine = backend @@ -109,7 +116,8 @@ def grid33_graph_mapper(grid33_graph): @pytest.fixture def simple_mapper(simple_graph): - mapper = graphm.GraphMapper(graph=simple_graph) + mapper = graphm.GraphMapper( + graph=simple_graph, add_qubits_to_mapping="fcfs") backend = DummyEngine(save_commands=True) backend.is_last_engine = True mapper.next_engine = backend @@ -140,13 +148,14 @@ def test_invalid_gates(simple_mapper): qb0 = WeakQubitRef(engine=None, idx=0) qb1 = WeakQubitRef(engine=None, idx=1) qb2 = WeakQubitRef(engine=None, idx=2) - qb3 = WeakQubitRef(engine=None, idx=-1) cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], ), controls=[]) cmd1 = Command(engine=None, gate=Allocate, qubits=([qb1], ), controls=[]) cmd2 = Command(engine=None, gate=Allocate, qubits=([qb2], ), controls=[]) cmd3 = Command(engine=None, gate=X, qubits=([qb0], [qb1]), controls=[qb2]) - cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb3], )) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) with pytest.raises(Exception): mapper.receive([cmd0, cmd1, cmd2, cmd3, cmd_flush]) @@ -157,16 +166,15 @@ def test_run_infinite_loop_detection(simple_mapper): qb0 = WeakQubitRef(engine=None, idx=0) qb1 = WeakQubitRef(engine=None, idx=1) - qb2 = WeakQubitRef(engine=None, idx=2) - qb3 = WeakQubitRef(engine=None, idx=-1) - cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb3], )) + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) cmd0 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[]) with pytest.raises(RuntimeError): mapper.receive([cmd0, cmd_flush]) - mapper._stored_commands = [] + mapper._stored_commands.clear() cmd0 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb1]) with pytest.raises(RuntimeError): mapper.receive([cmd0, cmd_flush]) @@ -185,6 +193,186 @@ def test_resetting_mapping_to_none(simple_graph): assert mapper._reverse_current_mapping == {} +def test_add_qubits_to_mapping_methods_failure(simple_graph): + with pytest.raises(ValueError): + graphm.GraphMapper( + graph=simple_graph, add_qubits_to_mapping="as") + + +@pytest.mark.parametrize("add_qubits", ["fcfs", "fcfs_init", "FCFS"]) +def test_add_qubits_to_mapping_methods(simple_graph, add_qubits): + mapper = graphm.GraphMapper( + graph=simple_graph, add_qubits_to_mapping=add_qubits) + backend = DummyEngine(save_commands=True) + backend.is_last_engine = True + mapper.next_engine = backend + + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + gates = [ + Command(None, X, qubits=([qb[1]], ), controls=[qb[0]]), + Command(None, X, qubits=([qb[1]], ), controls=[qb[2]]), + ] + + mapper.receive(list(itertools.chain(allocate_cmds, gates, [cmd_flush]))) + assert mapper.num_mappings == 0 + + +def test_qubit_placement_initial_mapping_single_qubit_gates( + grid33_graph_mapper): + grid33_graph_mapper[0].set_add_qubits_to_mapping( + graphm._add_qubits_to_mapping) + mapper, backend = deepcopy(grid33_graph_mapper) + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper.receive(allocate_cmds + [cmd_flush]) + mapping = mapper.current_mapping + + assert mapper.num_mappings == 0 + assert mapping[0] == 4 + assert sorted([mapping[1], mapping[2], mapping[3], + mapping[4]]) == [1, 3, 5, 7] + assert sorted([mapping[5], mapping[6], mapping[7], + mapping[8]]) == [0, 2, 6, 8] + + +def test_qubit_placement_single_two_qubit_gate(grid33_graph_mapper): + grid33_graph_mapper[0].set_add_qubits_to_mapping( + graphm._add_qubits_to_mapping) + mapper_ref, backend = deepcopy(grid33_graph_mapper) + + mapper_ref.current_mapping = {3: 3, 4: 4, 5: 5} + mapper_ref._currently_allocated_ids = set( + mapper_ref.current_mapping.keys()) + + qb, allocate_cmds = allocate_all_qubits_cmd(mapper_ref) + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper = deepcopy(mapper_ref) + mapper.receive([ + allocate_cmds[0], + Command(None, X, qubits=([qb[0]], ), controls=[qb[3]]), cmd_flush + ]) + mapping = mapper.current_mapping + + assert mapper.num_mappings == 0 + assert mapping[0] in {0, 6} + + mapper = deepcopy(mapper_ref) + mapper.receive([ + allocate_cmds[6], + Command(None, X, qubits=([qb[3]], ), controls=[qb[6]]), cmd_flush + ]) + mapping = mapper.current_mapping + + assert mapper.num_mappings == 0 + assert mapping[6] in {0, 6} + + +def test_qubit_placement_double_two_qubit_gate(grid33_graph_mapper): + grid33_graph_mapper[0].set_add_qubits_to_mapping( + graphm._add_qubits_to_mapping) + mapper_ref, backend_ref = deepcopy(grid33_graph_mapper) + + mapper_ref.current_mapping = {1: 1, 3: 3, 4: 4, 5: 5} + mapper_ref._currently_allocated_ids = set( + mapper_ref.current_mapping.keys()) + + qb, allocate_cmds = allocate_all_qubits_cmd(mapper_ref) + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper = deepcopy(mapper_ref) + backend = deepcopy(backend_ref) + mapper.next_engine = backend + mapper.receive([ + allocate_cmds[0], + Command(None, X, qubits=([qb[0]], ), controls=[qb[3]]), + Command(None, X, qubits=([qb[0]], ), controls=[qb[1]]), cmd_flush + ]) + mapping = mapper.current_mapping + + assert mapper.num_mappings == 0 + assert mapping[0] == 0 + + mapper = deepcopy(mapper_ref) + backend = deepcopy(backend_ref) + mapper.next_engine = backend + mapper.receive([ + allocate_cmds[2], + Command(None, X, qubits=([qb[2]], ), controls=[qb[3]]), + Command(None, X, qubits=([qb[2]], ), controls=[qb[1]]), + Command(None, X, qubits=([qb[2]], ), controls=[qb[5]]), + cmd_flush, + ]) + mapping = mapper.current_mapping + + # Make sure that the qb[2] was allocated at backend_id 0 + assert backend.received_commands[0].gate == Allocate + assert backend.received_commands[0].qubits[0][0].id == 0 + assert backend.received_commands[0].tags == [LogicalQubitIDTag(2)] + + +def test_qubit_placement_multiple_two_qubit_gates(grid33_graph_mapper): + grid33_graph_mapper[0].set_add_qubits_to_mapping( + graphm._add_qubits_to_mapping) + mapper, backend = deepcopy(grid33_graph_mapper) + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + gates = [ + Command(None, X, qubits=([qb[1]], ), controls=[qb[0]]), + Command(None, X, qubits=([qb[1]], ), controls=[qb[2]]), + Command(None, X, qubits=([qb[1]], ), controls=[qb[3]]), + Command(None, X, qubits=([qb[1]], ), controls=[qb[4]]), + ] + + all_cmds = list(itertools.chain(allocate_cmds, gates)) + mapper, backend = deepcopy(grid33_graph_mapper) + mapper.receive(all_cmds + [cmd_flush]) + mapping = mapper.current_mapping + + assert mapper.num_mappings == 0 + assert mapping[1] == 4 + assert sorted([mapping[0], mapping[2], mapping[3], + mapping[4]]) == [1, 3, 5, 7] + assert sorted([mapping[5], mapping[6], mapping[7], + mapping[8]]) == [0, 2, 6, 8] + + gates = [ + Command(None, X, qubits=([qb[0]], ), controls=[qb[1]]), + Command(None, X, qubits=([qb[0]], ), controls=[qb[2]]), + Command(None, X, qubits=([qb[0]], ), controls=[qb[3]]), + Command(None, X, qubits=([qb[0]], ), controls=[qb[4]]), + Command(None, X, qubits=([qb[5]], ), controls=[qb[4]]), + Command(None, X, qubits=([qb[5]], ), controls=[qb[1]]), + Command(None, X, qubits=([qb[6]], ), controls=[qb[1]]), + Command(None, X, qubits=([qb[6]], ), controls=[qb[3]]), + Command(None, X, qubits=([qb[7]], ), controls=[qb[4]]), + Command(None, X, qubits=([qb[7]], ), controls=[qb[2]]), + Command(None, X, qubits=([qb[8]], ), controls=[qb[2]]), + Command(None, X, qubits=([qb[8]], ), controls=[qb[3]]), + ] + + mapper, backend = deepcopy(grid33_graph_mapper) + mapper.receive(list(itertools.chain(allocate_cmds, gates)) + [cmd_flush]) + mapping = mapper.current_mapping + + assert mapper.num_mappings == 0 + assert mapping[0] == 4 + assert sorted([mapping[1], mapping[2], mapping[3], + mapping[4]]) == [1, 3, 5, 7] + assert sorted([mapping[5], mapping[6], mapping[7], + mapping[8]]) == [0, 2, 6, 8] + + def test_send_possible_commands(simple_graph, simple_mapper): mapper, backend = simple_mapper mapper.current_mapping = dict(enumerate(range(len(simple_graph)))) @@ -201,7 +389,7 @@ def test_send_possible_commands(simple_graph, simple_mapper): qb1 = WeakQubitRef(engine=None, idx=qb1_id) cmd1 = Command(None, X, qubits=([qb0], ), controls=[qb1]) cmd2 = Command(None, X, qubits=([qb1], ), controls=[qb0]) - mapper._stored_commands = [cmd1, cmd2] + mapper._stored_commands += [cmd1, cmd2] mapper._send_possible_commands() assert len(mapper._stored_commands) == 0 @@ -211,7 +399,8 @@ def test_send_possible_commands(simple_graph, simple_mapper): qb0 = WeakQubitRef(engine=None, idx=qb0_id) qb1 = WeakQubitRef(engine=None, idx=qb1_id) cmd = Command(None, X, qubits=([qb0], ), controls=[qb1]) - mapper._stored_commands = [cmd] + mapper._stored_commands.clear() + mapper._stored_commands += [cmd] mapper._send_possible_commands() assert len(mapper._stored_commands) == 1 @@ -222,7 +411,7 @@ def test_send_possible_commands_allocate(simple_mapper): qb0 = WeakQubitRef(engine=None, idx=0) cmd0 = Command( engine=None, gate=Allocate, qubits=([qb0], ), controls=[], tags=[]) - mapper._stored_commands = [cmd0] + mapper._stored_commands += [cmd0] mapper._currently_allocated_ids = set([10]) # not in mapping: mapper.current_mapping = dict() @@ -274,7 +463,7 @@ def test_send_possible_commands_allocation_no_active_qubits( qb_flush = WeakQubitRef(engine=None, idx=-1) cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) - mapper._stored_commands = cmd_list + [cmd_flush] + mapper._stored_commands += cmd_list + [cmd_flush] mapper._run() assert len(mapper._stored_commands) == 8 @@ -422,7 +611,8 @@ def test_send_two_qubit_gate_before_swap(simple_mapper): all_cmds[3] = cmd - mapper._stored_commands = all_cmds + mapper._stored_commands.clear() + mapper._stored_commands += all_cmds mapper._run() assert mapper.num_mappings == 1 if mapper.current_mapping[2] == 2: @@ -853,7 +1043,9 @@ def test_3x3_grid_multiple_simultaneous_intersecting_paths_possible( @pytest.mark.parametrize("enable_caching", [False, True]) def test_mapper_to_str(simple_graph, enable_caching): mapper = graphm.GraphMapper( - graph=simple_graph, enable_caching=enable_caching) + graph=simple_graph, + enable_caching=enable_caching, + add_qubits_to_mapping="fcfs") backend = DummyEngine(save_commands=True) eng = MainEngine(backend, [mapper]) qureg = eng.allocate_qureg(len(simple_graph)) From e7b2363397de889785db4eb5a5af26e2971e42d1 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Mon, 18 Mar 2019 13:57:33 +0100 Subject: [PATCH 10/25] Refactor __str__ methods for GraphMapper and PathManager --- projectq/cengines/_graph_path_manager.py | 35 ++++++++++++++++++++++++ projectq/cengines/_graphmapper.py | 31 ++------------------- 2 files changed, 37 insertions(+), 29 deletions(-) diff --git a/projectq/cengines/_graph_path_manager.py b/projectq/cengines/_graph_path_manager.py index e31a1973f..3b6356894 100644 --- a/projectq/cengines/_graph_path_manager.py +++ b/projectq/cengines/_graph_path_manager.py @@ -46,6 +46,8 @@ """ import itertools +import math +import numpy as np import networkx as nx # ============================================================================== @@ -319,6 +321,39 @@ def __init__(self, graph, enable_caching=True): # Statistics self.paths_stats = dict() + def __str__(self): + interactions = [ + k for _, k in sorted( + zip(self.paths_stats.values(), self.paths_stats.keys()), + reverse=True) + ] + + max_width = int( + math.ceil(math.log10(max(self.paths_stats.values()))) + 1) + paths_stats_str = "" + if self.enable_caching: + average_path_length = np.average( + [ + len(self.cache.get_path(*list(k))) + if not self.graph.has_edge(*list(k)) else 2 + for k in interactions + ], + weights=[self.paths_stats[k] for k in interactions]) + for k in interactions: + if self.graph.has_edge(*list(k)): + path = list(k) + else: + path = self.cache.get_path(*list(k)) + paths_stats_str += "\n {3:3} - {4:3}: {0:{1}} | {2}".format( + self.paths_stats[k], max_width, path, *k) + else: + average_path_length = None + for k in interactions: + paths_stats_str += "\n {2:3} - {3:3}: {0:{1}}".format( + self.paths_stats[k], max_width, *k) + return "Path statistics:{}\n\nAverage path length: {}".format( + paths_stats_str, average_path_length) + ################################################################# # Methods querying information about the state of the container # ################################################################# diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index dae3ff15d..abe913a16 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -23,7 +23,6 @@ """ from copy import deepcopy -import math import random import itertools @@ -87,7 +86,6 @@ def _add_qubits_to_mapping_fcfs(current_mapping, graph, new_logical_qubit_ids, raise RuntimeError("Mapper ran out of qubit to allocate. " "Increase the number of qubits for this " "mapper.") - for i, logical_id in enumerate(new_logical_qubit_ids): mapping[logical_id] = available_nodes[i] @@ -137,7 +135,6 @@ def _generate_mapping_minimize_swaps(graph, qubit_interaction_subgraphs): return mapping - def _add_qubits_to_mapping_smart_init(current_mapping, graph, new_logical_qubit_ids, stored_commands): """ @@ -714,31 +711,7 @@ def __str__(self): num_swaps_per_mapping_str += "\n {:3d}: {:3d}".format( num_swaps_per_mapping, num_mapping) - interactions = [ - k for _, k in sorted( - zip(self.paths.paths_stats.values(), - self.paths.paths_stats.keys()), - reverse=True) - ] - - max_width = int( - math.ceil(math.log10(max(self.paths.paths_stats.values()))) + 1) - paths_stats_str = "" - if self.paths.enable_caching: - for k in interactions: - if self.paths.graph.has_edge(*list(k)): - path = list(k) - else: - path = self.paths.cache.get_path(*list(k)) - paths_stats_str += "\n {3:3} - {4:3}: {0:{1}} | {2}".format( - self.paths.paths_stats[k], max_width, path, *k) - else: - for k in interactions: - paths_stats_str += "\n {2:3} - {3:3}: {0:{1}}".format( - self.paths.paths_stats[k], max_width, *k) - return ("Number of mappings: {}\n" + "Depth of swaps: {}\n\n" + - "Number of swaps per mapping:{}\n\n" + - "Path statistics:{}\n\n").format( + "Number of swaps per mapping:{}\n\n{}\n\n").format( self.num_mappings, depth_of_swaps_str, - num_swaps_per_mapping_str, paths_stats_str) + num_swaps_per_mapping_str, str(self.paths)) From 64c266aa28ffdc6b0fac95f36aae1c3a040e2216 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Mon, 6 May 2019 13:56:47 +0200 Subject: [PATCH 11/25] Fill gaps in test coverage + cleanup code + fix Python 2.7 issue --- projectq/cengines/_command_list.py | 2 +- projectq/cengines/_graphmapper.py | 60 ++++++++------------ projectq/cengines/_graphmapper_test.py | 78 +++++++++++++++++--------- 3 files changed, 78 insertions(+), 62 deletions(-) diff --git a/projectq/cengines/_command_list.py b/projectq/cengines/_command_list.py index 7685340ca..9642fc2bd 100644 --- a/projectq/cengines/_command_list.py +++ b/projectq/cengines/_command_list.py @@ -60,7 +60,7 @@ def clear(self): """ Remove all commands from the container. """ - self._cmds.clear() + self._cmds = [] self.partitions = [set()] self.interactions = [[]] diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index abe913a16..1a07bd0ea 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -82,11 +82,6 @@ def _add_qubits_to_mapping_fcfs(current_mapping, graph, new_logical_qubit_ids, currently_used_nodes = sorted([v for _, v in mapping.items()]) available_nodes = [n for n in graph if n not in currently_used_nodes] - if len(new_logical_qubit_ids) > len(available_nodes): - raise RuntimeError("Mapper ran out of qubit to allocate. " - "Increase the number of qubits for this " - "mapper.") - for i, logical_id in enumerate(new_logical_qubit_ids): mapping[logical_id] = available_nodes[i] return mapping @@ -112,25 +107,20 @@ def _generate_mapping_minimize_swaps(graph, qubit_interaction_subgraphs): mapping[logical_id] = backend_id for subgraph in qubit_interaction_subgraphs: - if available_nodes: - anchor_node = backend_id - for logical_id in subgraph: - neighbours = sorted( - [n for n in graph[anchor_node] if n in available_nodes], - key=lambda n: len(graph[n])) - - # If possible, take the neighbour with the highest - # degree. Otherwise, take the next highest order available node - if neighbours: - backend_id = neighbours[-1] - available_nodes.remove(backend_id) - elif available_nodes: - backend_id = available_nodes.pop() - else: - break - mapping[logical_id] = backend_id - else: - break + anchor_node = backend_id + for logical_id in subgraph: + neighbours = sorted( + [n for n in graph[anchor_node] if n in available_nodes], + key=lambda n: len(graph[n])) + + # If possible, take the neighbour with the highest + # degree. Otherwise, take the next highest order available node + if neighbours: + backend_id = neighbours[-1] + available_nodes.remove(backend_id) + else: + backend_id = available_nodes.pop() + mapping[logical_id] = backend_id return mapping @@ -222,12 +212,15 @@ def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, if len(qubit_interactions) == 1: qubit = qubit_interactions[0] - candidates = sorted([ - n - for n in graph[mapping[qubit]] if n not in currently_used_nodes - ], - key=lambda n: len(graph[n])) - backend_id = candidates[-1] + + if qubit in mapping: + candidates = sorted([ + n for n in graph[mapping[qubit]] + if n not in currently_used_nodes + ], + key=lambda n: len(graph[n])) + if candidates: + backend_id = candidates[-1] elif qubit_interactions: neighbours = [] for qubit in qubit_interactions: @@ -247,12 +240,7 @@ def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, neighbours.pop() if backend_id is None: - try: - backend_id = available_nodes.pop() - except: - raise RuntimeError("Mapper ran out of qubit to allocate. " - "Increase the number of qubits for this " - "mapper.") + backend_id = available_nodes.pop() else: available_nodes.remove(backend_id) diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py index 002baa448..01d0fdf82 100644 --- a/projectq/cengines/_graphmapper_test.py +++ b/projectq/cengines/_graphmapper_test.py @@ -195,8 +195,7 @@ def test_resetting_mapping_to_none(simple_graph): def test_add_qubits_to_mapping_methods_failure(simple_graph): with pytest.raises(ValueError): - graphm.GraphMapper( - graph=simple_graph, add_qubits_to_mapping="as") + graphm.GraphMapper(graph=simple_graph, add_qubits_to_mapping="as") @pytest.mark.parametrize("add_qubits", ["fcfs", "fcfs_init", "FCFS"]) @@ -346,31 +345,17 @@ def test_qubit_placement_multiple_two_qubit_gates(grid33_graph_mapper): assert sorted([mapping[5], mapping[6], mapping[7], mapping[8]]) == [0, 2, 6, 8] - gates = [ - Command(None, X, qubits=([qb[0]], ), controls=[qb[1]]), - Command(None, X, qubits=([qb[0]], ), controls=[qb[2]]), - Command(None, X, qubits=([qb[0]], ), controls=[qb[3]]), - Command(None, X, qubits=([qb[0]], ), controls=[qb[4]]), - Command(None, X, qubits=([qb[5]], ), controls=[qb[4]]), - Command(None, X, qubits=([qb[5]], ), controls=[qb[1]]), - Command(None, X, qubits=([qb[6]], ), controls=[qb[1]]), - Command(None, X, qubits=([qb[6]], ), controls=[qb[3]]), - Command(None, X, qubits=([qb[7]], ), controls=[qb[4]]), - Command(None, X, qubits=([qb[7]], ), controls=[qb[2]]), - Command(None, X, qubits=([qb[8]], ), controls=[qb[2]]), - Command(None, X, qubits=([qb[8]], ), controls=[qb[3]]), - ] - + all_cmds = list(itertools.chain(allocate_cmds[:5], gates)) mapper, backend = deepcopy(grid33_graph_mapper) - mapper.receive(list(itertools.chain(allocate_cmds, gates)) + [cmd_flush]) - mapping = mapper.current_mapping + mapper.receive(all_cmds + [cmd_flush]) - assert mapper.num_mappings == 0 - assert mapping[0] == 4 - assert sorted([mapping[1], mapping[2], mapping[3], - mapping[4]]) == [1, 3, 5, 7] - assert sorted([mapping[5], mapping[6], mapping[7], - mapping[8]]) == [0, 2, 6, 8] + gates = [ + Command(None, X, qubits=([qb[5]], ), controls=[qb[6]]), + Command(None, X, qubits=([qb[5]], ), controls=[qb[7]]), + ] + all_cmds = list(itertools.chain(allocate_cmds[5:], gates)) + mapper.receive(all_cmds + [cmd_flush]) + assert mapper.num_mappings == 2 def test_send_possible_commands(simple_graph, simple_mapper): @@ -472,6 +457,49 @@ def test_send_possible_commands_allocation_no_active_qubits( assert mapper._stored_commands[6] == cmd_list[11] +def test_send_possible_commands_allocation_no_active_qubits( + grid22_graph_mapper): + mapper, backend = grid22_graph_mapper + + qb0 = WeakQubitRef(engine=None, idx=0) + qb1 = WeakQubitRef(engine=None, idx=1) + qb2 = WeakQubitRef(engine=None, idx=2) + qb3 = WeakQubitRef(engine=None, idx=3) + qb4 = WeakQubitRef(engine=None, idx=4) + + cmd_list = [ + Command(engine=None, gate=Allocate, qubits=([qb0], )), + Command(engine=None, gate=Allocate, qubits=([qb1], )), + Command(engine=None, gate=Allocate, qubits=([qb2], )), + Command(engine=None, gate=Allocate, qubits=([qb3], )), + Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb2]), + ] + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + + mapper._stored_commands += cmd_list + [cmd_flush] + + mapper._run() + assert mapper.num_mappings == 1 + assert len(mapper._stored_commands) == 1 + assert mapper._stored_commands[0] == cmd_flush + + cmd_list = [ + Command(engine=None, gate=X, qubits=([qb2], ), controls=[qb3]), + Command(engine=None, gate=Deallocate, qubits=([qb3], )), + Command(engine=None, gate=Deallocate, qubits=([qb2], )), + Command(engine=None, gate=Deallocate, qubits=([qb1], )), + Command(engine=None, gate=Deallocate, qubits=([qb0], )), + Command(engine=None, gate=Allocate, qubits=([qb4], )), + ] + mapper._stored_commands = cmd_list + [cmd_flush] + mapper._run() + assert mapper.num_mappings == 1 + assert len(mapper._stored_commands) == 2 + assert mapper._stored_commands[0] == cmd_list[-1] + + def test_send_possible_commands_deallocate(simple_mapper): mapper, backend = simple_mapper From 970253c663bcc0de7f6ddcf21cfe26322ac7628d Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Wed, 9 Oct 2019 11:29:30 +0200 Subject: [PATCH 12/25] Replace path manager with cost-function metrics for GraphMapper --- docs/projectq.cengines.rst | 18 + projectq/cengines/_graph_path_manager.py | 809 --------------- projectq/cengines/_graph_path_manager_test.py | 941 ------------------ .../cengines/_multi_qubit_gate_manager.py | 649 ++++++++++++ .../_multi_qubit_gate_manager_test.py | 562 +++++++++++ 5 files changed, 1229 insertions(+), 1750 deletions(-) delete mode 100644 projectq/cengines/_graph_path_manager.py delete mode 100644 projectq/cengines/_graph_path_manager_test.py create mode 100644 projectq/cengines/_multi_qubit_gate_manager.py create mode 100644 projectq/cengines/_multi_qubit_gate_manager_test.py diff --git a/docs/projectq.cengines.rst b/docs/projectq.cengines.rst index 5a3c963a6..90f9aaa30 100755 --- a/docs/projectq.cengines.rst +++ b/docs/projectq.cengines.rst @@ -31,3 +31,21 @@ Module contents :members: :special-members: __init__ :imported-members: + + +Helper sub-modules +------------------ + +Multi-qubit gate sub-module +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. automodule:: projectq.cengines._multi_qubit_gate_manager + :members: + :imported-members: + +Command list sub-module +^^^^^^^^^^^^^^^^^^^^^^^ + +.. automodule:: projectq.cengines._command_list + :members: + :imported-members: diff --git a/projectq/cengines/_graph_path_manager.py b/projectq/cengines/_graph_path_manager.py deleted file mode 100644 index 3b6356894..000000000 --- a/projectq/cengines/_graph_path_manager.py +++ /dev/null @@ -1,809 +0,0 @@ -# Copyright 2019 ProjectQ-Framework (www.projectq.ch) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -This is a helper module for the _graphmapper.GraphMapper class. - -Its main goal is to provide classes and functions to manage paths through an -arbitrary graph and eventually generate a list of swap operations to perform as -many paths as possible, by either solving conflicts (ie. crossing points and -intersections; see definitions below) or discarding paths. - -Note that when generating a list of swap operations for a particular path, the -path is usually splitted into two halves in order to maximize the number of -swap operations that can be performed simultaneously. - -In the context of this module, a distinction is made between a crossing point -and an intersection. - -A crossing point is just as its name implies a point or node of the graph that -simultaneously belongs to one or more paths. On the other hand, an intersection -is defined as a particular crossing point of a path for which one of the -splitted sub-path halves has an endpoint. This means that a path may have at -most two intersections - -This is best exemplified by some examples: - - Given the path [0, 1, 2, 3], a possible split to maximize simultaneous - swapping operations would be: - [[0, 1], [2, 3]] where 1 or 2 may be intersections. - - Given the path [0, 1, 2, 3, 4], possible splits would include: - [[0, 1, 2], [3, 4]] where 2 or 3 could be intersections if they are - crossings - [[0, 1], [2, 3, 4]] where 1 or 2 could be intersections if they are - crossings -""" - -import itertools -import math -import numpy as np -import networkx as nx - -# ============================================================================== - - -def _find_first_order_intersections(crossings, split_paths): - """ - Find out which crossing nodes are intersections. - - A crossing point is considered an intersection if and only if either: - - the end of sub-path 1 is the crossing point - - the beginning of sub-path 2 is the crossing point - - Args: - crossings (dict) : Dictionary containing the list of all crossing - points indexed by the path ID - split_paths (dict) : Dictionary containing the two halves of each paths - indexed by the path ID - - Returns: - intersections (dict) : Dictionary indexed by the intersection node - containing the IDs of the paths for which that - particular node is considered an intersection - """ - intersections = {} - - for path_id, (subpath1, subpath2) in split_paths.items(): - for crossing in crossings[path_id]: - if crossing.overlap[0] in (subpath1[-1], subpath2[0]): - if crossing.overlap[0] not in intersections: - intersections[crossing.overlap[0]] = set((path_id, )) - else: - intersections[crossing.overlap[0]].add(path_id) - - return intersections - - -def _try_solve_intersection(intersection_node, subpath1, subpath2, - subpath1_not_crossing, subpath2_not_crossing): - """ - Attempt to solve a first order intersection by modifying sub-paths. - - Args: - intersection_node (int) : Intersection node - subpath1 (list) : First half of the path - subpath2 (list) : Second half of the path - subpath1_not_crossing (list) : Helper list of booleans indicating - whether the nodes of the first subpath - are crossing or not - subpath2_not_crossing (list) : Helper list of booleans indicating - whether the nodes of the second subpath - are crossing or not - - Note: - subpath1*, subpath2* arguments are modified in-place - - Returns: - True/False depending on whether the intersection could be solved or not - """ - if len(subpath1) + len(subpath2) < 4: - return False - - if subpath1[-1] == intersection_node: - # Try moving the head of subpath2 to subpath1 - if len(subpath2) > 1 \ - and subpath2_not_crossing[0] \ - and subpath2_not_crossing[1]: - subpath1.append(subpath2[0]) - subpath1_not_crossing.append(subpath2_not_crossing[0]) - del subpath2[0] - del subpath2_not_crossing[0] - return True - else: - # Try moving the tail of subpath1 to subpath2 - if len(subpath1) > 1 \ - and subpath1_not_crossing[-1] \ - and subpath1_not_crossing[-2]: - subpath2.insert(0, subpath1.pop()) - subpath2_not_crossing.insert(0, subpath1_not_crossing.pop()) - return True - - # Try moving the last two elements of subpath1 to subpath2 - if len(subpath1) > 2 \ - and subpath1_not_crossing[-2] \ - and subpath1_not_crossing[-3]: - subpath2.insert(0, subpath1.pop()) - subpath2.insert(0, subpath1.pop()) - subpath2_not_crossing.insert(0, subpath1_not_crossing.pop()) - subpath2_not_crossing.insert(0, subpath1_not_crossing.pop()) - return True - - # Try moving the first two elements of subpath2 to subpath1 - if len(subpath2) > 2 \ - and subpath2_not_crossing[1] \ - and subpath2_not_crossing[2]: - subpath1.append(subpath2[0]) - subpath1.append(subpath2[1]) - subpath1_not_crossing.append(subpath2_not_crossing[0]) - subpath1_not_crossing.append(subpath2_not_crossing[1]) - del subpath2[:2] - del subpath2_not_crossing[:2] - return True - - return False - - -def _return_swaps(split_paths): - """ - Return a list of swap operations given a list of path halves - - Args: - split_paths (dict): Dictionary indexed by path ID containing 2-tuples - of path halves - - Returns: A list of swap operations (2-tuples) - """ - swap_operations = [] - - for path_id in sorted(split_paths): - path = split_paths[path_id] - swap_operations.append([]) - # Add swaps operations for first half of the path - for prev, cur in zip(path[0], path[0][1:]): - swap_operations[-1].append((prev, cur)) - - # Add swaps operations for the second half of the path - for prev, cur in zip(path[1][::-1], path[1][-2::-1]): - swap_operations[-1].append((prev, cur)) - - return swap_operations - - -# ============================================================================== - - -class PathCacheExhaustive(): - """ - Class acting as cache for optimal paths through the graph. - """ - - def __init__(self, path_length_threshold): - self._path_length_threshold = path_length_threshold - self._cache = {} - self.key_type = frozenset - - def __str__(self): - ret = "" - for (node0, node1), path in self._cache.items(): - ret += "{}: {}\n".format(sorted([node0, node1]), path) - return ret - - def empty_cache(self): - """Empty the cache.""" - self._cache = {} - - def get_path(self, start, end): - """ - Return a path from the cache. - - Args: - start (object): Start node for the path - end (object): End node for the path - - Returns: Optimal path stored in cache - - Raises: KeyError if path is not present in the cache - """ - return self._cache[self.key_type((start, end))] - - def has_path(self, start, end): - """ - Test whether a path connecting start to end is present in the cache. - - Args: - start (object): Start node for the path - end (object): End node for the path - - Returns: True/False - """ - return self.key_type((start, end)) in self._cache - - def add_path(self, path): - """ - Add a path to the cache. - - This method also recursively adds all the subpaths that are at least - self._path_length_threshold long to the cache. - - Args: - path (list): Path to store inside the cache - """ - length = len(path) - for start in range(length - self._path_length_threshold + 1): - node0 = path[start] - for incr in range(length - start - 1, - self._path_length_threshold - 2, -1): - end = start + incr - self._cache[self.key_type((node0, - path[end]))] = path[start:end + 1] - - -# ============================================================================== - - -class _Crossing: - __slots__ = ['path_id', 'overlap'] - - def __init__(self, path_id, overlap): - self.path_id, self.overlap = path_id, overlap - - def __eq__(self, other): - if isinstance(other, self.__class__): - return (self.path_id, self.overlap) == (other.path_id, - other.overlap) - if isinstance(other, list): - return self.overlap == other - if isinstance(other, int): - return self.overlap[0] == other - raise NotImplementedError("Invalid comparison") - - def __str__(self): - return '{} {}'.format(self.path_id, self.overlap) - - def __repr__(self): - return 'Crossing({}, {})'.format(self.path_id, self.overlap) - - -class PathManager: - """ - Class managing interactions between distant qubits on an arbitrary graph. - - This class essentially manages paths through an arbitrary graph, handling - possible intersections between multiple paths through an arbitrary graph by - resolving conflict points such as crossings and intersections. - - Attributes: - crossings (dict) : dictionary of crossing points indexed by path ID - cache (PathCacheExhaustive) : cache manager - enable_caching (bool): indicates whether caching is enabled or not - graph (networkx.Graph): Arbitrary connected graph - paths (dict) : list of paths currently held by a path container indexed - by a unique ID - paths_stats (dict) : dictionary for storing statistics indexed by - interactions (frozenset of pairs of qubits) - """ - - def __init__(self, graph, enable_caching=True): - """ - Args: - graph (networkx.Graph): an arbitrary connected graph - enable_caching (bool): Controls whether path caching is enabled - """ - # Make sure that we start with a valid graph - if not nx.is_connected(graph): - raise RuntimeError("Input graph must be a connected graph") - elif not all([isinstance(n, int) for n in graph]): - raise RuntimeError( - "All nodes inside the graph needs to be integers") - else: - self.graph = graph - - self.paths = {} - self.crossings = {} - self._path_id = 0 - - self.enable_caching = enable_caching - # Path cache support - path_length_threshold = 3 - self.cache = PathCacheExhaustive(path_length_threshold) - - # Statistics - self.paths_stats = dict() - - def __str__(self): - interactions = [ - k for _, k in sorted( - zip(self.paths_stats.values(), self.paths_stats.keys()), - reverse=True) - ] - - max_width = int( - math.ceil(math.log10(max(self.paths_stats.values()))) + 1) - paths_stats_str = "" - if self.enable_caching: - average_path_length = np.average( - [ - len(self.cache.get_path(*list(k))) - if not self.graph.has_edge(*list(k)) else 2 - for k in interactions - ], - weights=[self.paths_stats[k] for k in interactions]) - for k in interactions: - if self.graph.has_edge(*list(k)): - path = list(k) - else: - path = self.cache.get_path(*list(k)) - paths_stats_str += "\n {3:3} - {4:3}: {0:{1}} | {2}".format( - self.paths_stats[k], max_width, path, *k) - else: - average_path_length = None - for k in interactions: - paths_stats_str += "\n {2:3} - {3:3}: {0:{1}}".format( - self.paths_stats[k], max_width, *k) - return "Path statistics:{}\n\nAverage path length: {}".format( - paths_stats_str, average_path_length) - - ################################################################# - # Methods querying information about the state of the container # - ################################################################# - - def get_all_nodes(self): - """ - Return a list of all nodes that are part of some path. - - Returns: - A set of nodes that are part of at least one path. - """ - all_nodes = [] - for row in self.paths.values(): - all_nodes.extend(row[0]) - all_nodes.extend(row[1]) - return set(all_nodes) - - def get_all_paths(self): - """ - Return a list of all the path contained in the container. - - Returns: - A list of paths (list of list of ints) - """ - return [ - self.paths[k][0] + self.paths[k][1] for k in sorted(self.paths) - ] - - def has_interaction(self, node0, node1): - """ - Check if a path within the container already generate an interaction - - Args: - node0 (int) : An endnode of a path - node1 (int) : An endnode of a path - - Returns: - True or False depending on whether the container has a path linking - node0 to node1 - """ - for path in self.paths.values(): - if frozenset((node0, node1)) == frozenset((path[0][0], - path[1][-1])): - return True - return False - - def max_crossing_order(self): - """ - Return the order of the largest crossing. - - The order of a crossing is defined as the number of paths that - intersect - - Returns: - An int - """ - crossing_orders = list( - itertools.chain.from_iterable( - [[len(c.overlap) for c in crossing] - for crossing in self.crossings.values()])) - if crossing_orders: - return max(crossing_orders) - return 0 - - ###################################################### - # Methods for resetting the content of the container # - ###################################################### - - def clear_paths(self): - """ - Reset the list of paths managed by this instance. - - Note: - Does not reset path statistics or the state of the cache. - """ - self.paths.clear() - self.crossings.clear() - - def clear(self): - """ - Completely reset the state of this instance. - - Note: - Both path statistics and cache are also reset - """ - self.clear_paths() - self.paths_stats.clear() - self.cache.empty_cache() - - ############################################################# - # Entry point for the mapper to extract the final path list # - ############################################################# - - def generate_swaps(self): - """ - Generate a list of swaps to execute as many paths as possible. - - Returns: - A list of swap operations (tuples) - """ - - self._solve_first_order_intersections( - _find_first_order_intersections(self.crossings, self.paths)) - - # By this point, we should have solved all intersections - return list(itertools.chain.from_iterable(_return_swaps(self.paths))) - - ############################################# - # Methods for adding paths to the container # - ############################################# - - def push_interaction(self, node0, node1): - """ - Plan an interaction between two qubit. - - Args: - node0 (int) : backend id of the first qubit - node1 (int) : backend id of the second qubit - - Returns: - True if the path could be added to the container, False otherwise - """ - - # TODO: think about merging paths - # TODO: maybe apply gates in the middle of the swaps - - interaction = frozenset((node0, node1)) - if self.has_interaction(node0, node1): - self.paths_stats[interaction] += 1 - return True - - if not self.graph.has_edge(node0, node1): - new_path = self._calculate_path(node0, node1) - else: - new_path = None - - if new_path: - if not self.try_add_path(new_path) \ - and not self._try_alternative_paths(node0, node1): - return False - else: - # Prevent adding a new path if it contains some already interacting - # qubits - for path in self.paths.values(): - if path[0][0] in (node0, node1) or path[1][-1] in (node0, - node1): - return False - - if interaction not in self.paths_stats: - self.paths_stats[interaction] = 1 - else: - self.paths_stats[interaction] += 1 - return True - - def try_add_path(self, new_path): - """ - Try adding a path to the path container. - - Args: - new_path (list) : path to add to the container - - Returns: - True if the path could be added to the container, False otherwise - """ - # Prevent adding a new path if it contains some already interacting - # qubits - for path in self.paths.values(): - if path[0][0] in new_path or path[1][-1] in new_path: - return False - - # Make sure each node appears only once - if len(new_path) != len(set(new_path)): - return False - - idx = len(new_path) >> 1 - new_subpath0, new_subpath1 = new_path[:idx], new_path[idx:] - new_intersections = {} - new_crossings = [] - for idx, (subpath0, subpath1) in self.paths.items(): - path_overlap = [ - node for node in new_path - if node in subpath0 or node in subpath1 - ] - if len(path_overlap) > 1: - return False - if len(path_overlap) == 1: - new_crossings.append(_Crossing(idx, path_overlap)) - - # Is this crossing point an intersection for the new path? - if new_subpath0[-1] in path_overlap \ - or new_subpath1[0] in path_overlap: - if path_overlap[0] not in new_intersections: - new_intersections[path_overlap[0]] = set( - (self._path_id, )) - else: - new_intersections[path_overlap[0]].add(self._path_id) - - # Is this crossing point an intersection for the other path? - subpath0, subpath1 = self.paths[idx] - if subpath0[-1] in path_overlap \ - or subpath1[0] in path_overlap: - if path_overlap[0] not in new_intersections: - new_intersections[path_overlap[0]] = set((idx, )) - else: - new_intersections[path_overlap[0]].add(idx) - - self.paths[self._path_id] = (new_subpath0, new_subpath1) - self.crossings[self._path_id] = new_crossings - for crossing in new_crossings: - path_id = crossing.path_id - self.crossings[path_id].append( - _Crossing(self._path_id, crossing.overlap)) - - # Remove the entries where only the new path is present, as the - # solution in those cases is to execute the new path after the other - # paths, which is going to happen anyway as the new path is appended to - # the list of paths - new_intersections = { - node: path_ids - for node, path_ids in new_intersections.items() - if len(path_ids) > 1 or self._path_id not in path_ids - } - - if new_intersections: - self._solve_first_order_intersections(new_intersections) - - if self._path_id not in self.paths: - return False - - self._path_id += 1 - return True - - ############################################# - # Methods for adding paths to the container # - ############################################# - - def remove_path_by_id(self, path_id): - """ - Remove a path from the path container given its ID. - - Args: - path_id (int) : ID of path to remove - - Raises: - KeyError if path_id is not valid - """ - if path_id not in self.paths: - raise KeyError(path_id) - self.crossings = { - k: [i for i in v if i.path_id != path_id] - for k, v in self.crossings.items() if k != path_id - } - del self.paths[path_id] - - def remove_crossing_of_order_higher_than(self, order): - """ - Remove paths that have crossings with order above a certain threshold. - - Args: - order (int) : Maximum allowed order of crossing - """ - number_of_crossings_per_path = { - path_id: len([c for c in crossing if len(c.overlap) > order]) - for path_id, crossing in self.crossings.items() - } - - path_id_list = [ - x for y, x in sorted( - zip(number_of_crossings_per_path.values(), - number_of_crossings_per_path.keys())) if y - ] - - while path_id_list and self.max_crossing_order() > order: - path_id = path_id_list.pop() - self.remove_path_by_id(path_id) - - def swap_paths(self, path_id1, path_id2): - """ - Swap two path within the path container. - - Args: - path_id1 (int) : ID of first path - path_id2 (int) : ID of second path - """ - - if path_id1 not in self.paths: - raise KeyError(path_id1) - if path_id2 not in self.paths: - raise KeyError(path_id2) - - for crossing_list in self.crossings.values(): - for crossing in crossing_list: - if path_id1 == crossing.path_id: - crossing.path_id = path_id2 - elif path_id2 == crossing.path_id: - crossing.path_id = path_id1 - - self.crossings[path_id2], self.crossings[path_id1] = self.crossings[ - path_id1], self.crossings[path_id2] - self.paths[path_id2], self.paths[path_id1] = self.paths[ - path_id1], self.paths[path_id2] - - ########################## - # Private helper methods # - ########################## - - def _solve_first_order_intersections(self, intersections): - """ - Solve all first order intersections. - - The intersections may be "solved" in two different manners: - - Sub-path split are modified to transform intersections in simple - crossings - - Paths are removed from the container - - Pre-conditions: - self.max_crossing_order() == 1 - - Args: - intersections (dict): TODO - """ - - # Get a list of the intersection nodes sorted by intersection order and - # total number of points of all paths for that particular intersection - def intersection_sort(crossing): - order = len(crossing[0]) - number_of_points = sum([ - len(self.paths[path_id][0]) + len(self.paths[path_id][1]) - for path_id in crossing[0] - ]) - order + 1 - return (order, number_of_points) - - intersection_node_list = [ - x for _, x in sorted( - zip(intersections.values(), intersections.keys()), - key=intersection_sort) - ] - - # and process them - while intersection_node_list: - intersection_node = intersection_node_list[-1] - node_is_not_crossing = { - path_id: ([ - node not in self.crossings[path_id] - for node in self.paths[path_id][0] - ], [ - node not in self.crossings[path_id] - for node in self.paths[path_id][1] - ]) - for path_id in intersections[intersection_node] - } - - if len(intersections[intersection_node]) == 1: - # This crossing is an intersection only for one path - # -> only need to make sure that the other paths gets - # processed first when generating the swaps - path_id = list(intersections[intersection_node])[0] - - for crossing in self.crossings[path_id]: - if crossing.overlap[0] == intersection_node: - other_path_id = crossing.path_id - if path_id < other_path_id: - self.swap_paths(path_id, other_path_id) - del intersections[intersection_node] - del intersection_node_list[-1] - else: - # This crossing is an intersection for multiple paths - # -> find all paths concerned with this crossing - path_id_list = [ - x for _, x in sorted( - zip([ - len(self.paths[i][0]) + len(self.paths[i][1]) - for i in intersections[intersection_node] - ], intersections[intersection_node])) - ] - - # TODO: multiple passes if failure to find an optimal solution - path_id1 = path_id_list.pop() - path_id2 = path_id_list.pop() - - solved = _try_solve_intersection( - intersection_node, - *(self.paths[path_id1] + node_is_not_crossing[path_id1])) - - if not solved: - solved = _try_solve_intersection( - intersection_node, - *(self.paths[path_id2] + - node_is_not_crossing[path_id2])) - - if not solved: - # Last resort: delete one path - path_id_min, path_id_max = sorted([path_id1, path_id2]) - del node_is_not_crossing[path_id_max] - self.remove_path_by_id(path_id_max) - node_is_not_crossing[path_id_min] = ([ - node not in self.crossings[path_id_min] - for node in self.paths[path_id_min][0] - ], [ - node not in self.crossings[path_id_min] - for node in self.paths[path_id_min][1] - ]) - - intersections = _find_first_order_intersections( - self.crossings, self.paths) - intersection_node_list = [ - x for _, x in sorted( - zip(intersections.values(), intersections.keys()), - key=intersection_sort) - ] - - def _calculate_path(self, node0, node1): - """ - Calculate a path between two nodes on the graph. - - Args: - node0 (int) : backend id of the first qubit - node1 (int) : backend id of the second qubit - """ - - if self.enable_caching: - try: - path = self.cache.get_path(node0, node1) - except KeyError: - path = nx.shortest_path(self.graph, source=node0, target=node1) - self.cache.add_path(path) - else: - path = nx.shortest_path(self.graph, source=node0, target=node1) - - return path - - def _try_alternative_paths(self, node0, node1): - """ - Attempt to find some alternative paths - """ - for neighbour in self.graph[node0]: - new_path = self._calculate_path(neighbour, node1) - if new_path[-1] == neighbour: - new_path = new_path + [node0] - else: - new_path = [node0] + new_path - if self.try_add_path(new_path): - return True - for neighbour in self.graph[node1]: - new_path = self._calculate_path(node0, neighbour) - if new_path[-1] == neighbour: - new_path = new_path + [node1] - else: - new_path = [node1] + new_path - if self.try_add_path(new_path): - return True - - return False diff --git a/projectq/cengines/_graph_path_manager_test.py b/projectq/cengines/_graph_path_manager_test.py deleted file mode 100644 index e9b9dbbbd..000000000 --- a/projectq/cengines/_graph_path_manager_test.py +++ /dev/null @@ -1,941 +0,0 @@ -# Copyright 2019 ProjectQ-Framework (www.projectq.ch) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tests for projectq.cengines._graph_path_manager.py.""" - -from copy import deepcopy -import itertools -import networkx as nx -import pytest -from projectq.cengines._graph_path_manager import PathManager, \ - PathCacheExhaustive, _find_first_order_intersections, _Crossing - -# ============================================================================== - - -def generate_grid_graph(nrows, ncols): - graph = nx.Graph() - graph.add_nodes_from(range(nrows * ncols)) - - for row in range(nrows): - for col in range(ncols): - node0 = col + ncols * row - - is_middle = ((0 < row < nrows - 1) and (0 < col < ncols - 1)) - add_horizontal = is_middle or (row in (0, nrows - 1) and - (0 < col < ncols - 1)) - add_vertical = is_middle or (col in (0, ncols - 1) and - (0 < row < nrows - 1)) - - if add_horizontal: - graph.add_edge(node0, node0 - 1) - graph.add_edge(node0, node0 + 1) - if add_vertical: - graph.add_edge(node0, node0 - ncols) - graph.add_edge(node0, node0 + ncols) - - return graph - - -@pytest.fixture(scope="module") -def simple_graph(): - # 2 4 - # / \ / | - # 0 - 1 3 | - # \ / \ | - # 5 6 - graph = nx.Graph() - graph.add_nodes_from(range(7)) - graph.add_edges_from([(0, 1), (1, 2), (1, 5), (2, 3), (5, 3), (3, 4), (3, - 6), - (4, 6)]) - return graph - - -@pytest.fixture -def grid44_manager(): - return PathManager(graph=generate_grid_graph(4, 4), enable_caching=False) - - -# ============================================================================== - - -def test_path_cache_exhaustive(): - path_length_threshold = 3 - cache = PathCacheExhaustive(path_length_threshold) - - assert not cache._cache - cache.add_path(['a', 'b', 'c']) - assert cache._cache == {cache.key_type(('a', 'c')): ['a', 'b', 'c']} - - assert cache.has_path('a', 'c') - assert not cache.has_path('a', 'b') - assert not cache.has_path('b', 'c') - - cache.empty_cache() - assert not cache._cache - - cache.add_path(['a', 'b', 'c', 'd']) - assert cache._cache == { - cache.key_type(('a', 'c')): ['a', 'b', 'c'], - cache.key_type(('a', 'd')): ['a', 'b', 'c', 'd'], - cache.key_type(('b', 'd')): ['b', 'c', 'd'] - } - assert cache.get_path('a', 'd') == ['a', 'b', 'c', 'd'] - assert cache.has_path('a', 'd') - assert cache.has_path('d', 'a') - assert cache.has_path('a', 'c') - assert cache.has_path('b', 'd') - assert not cache.has_path('a', 'b') - assert not cache.has_path('b', 'a') - assert not cache.has_path('b', 'c') - assert not cache.has_path('c', 'd') - - str_repr = str(cache) - assert str_repr.count("['a', 'd']: ['a', 'b', 'c', 'd']") == 1 - assert str_repr.count("['a', 'c']: ['a', 'b', 'c']") == 1 - assert str_repr.count("['b', 'd']: ['b', 'c', 'd']") == 1 - - -# ============================================================================== - - -def test_path_container_crossing_class(): - Crossing = _Crossing - crossing_list = [Crossing(0, [1]), Crossing(1, [1]), Crossing(2, [2])] - - assert Crossing(0, [1]) == Crossing(0, [1]) - assert Crossing(0, [1]) != Crossing(1, [1]) - assert Crossing(0, [1]) != Crossing(0, [0, 1]) - assert Crossing(0, [0]) != Crossing(1, [0, 1]) - - assert [0, 1] == Crossing(0, [0, 1]) - assert [0, 1] == Crossing(1, [0, 1]) - assert [0, 1] != Crossing(0, [0]) - assert [0, 1] != Crossing(1, [0]) - - assert Crossing(0, [1]) in crossing_list - assert [0] not in crossing_list - assert [1] in crossing_list - - assert str(Crossing(0, [1])) == "{} {}".format(0, [1]) - assert repr(Crossing(0, [1])) == "Crossing({}, {})".format(0, [1]) - - with pytest.raises(NotImplementedError): - assert "" == Crossing(0, [1]) - - -# ============================================================================== - - -def test_valid_and_invalid_graphs(simple_graph): - graph = nx.Graph() - graph.add_nodes_from('abcd') - with pytest.raises(RuntimeError): - PathManager(graph=graph) - - graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'a')]) - with pytest.raises(RuntimeError): - PathManager(graph=graph) - - graph = deepcopy(simple_graph) - graph.remove_edge(0, 1) - with pytest.raises(RuntimeError): - PathManager(graph=graph) - - -def test_path_container_has_interaction(grid44_manager): - path_dict = { - 0: ([4, 5], [6, 7]), - 1: ([1, 5], [9, 13]), - 2: ([8, 9], [10, 11, 15]) - } - grid44_manager.paths = path_dict - - assert grid44_manager.has_interaction(4, 7) - assert grid44_manager.has_interaction(7, 4) - assert grid44_manager.has_interaction(8, 15) - assert grid44_manager.has_interaction(15, 8) - assert not grid44_manager.has_interaction(4, 5) - assert not grid44_manager.has_interaction(4, 6) - assert not grid44_manager.has_interaction(4, 8) - assert not grid44_manager.has_interaction(4, 9) - assert not grid44_manager.has_interaction(9, 4) - assert not grid44_manager.has_interaction(1, 5) - assert not grid44_manager.has_interaction(1, 9) - assert not grid44_manager.has_interaction(8, 9) - assert not grid44_manager.has_interaction(8, 10) - assert not grid44_manager.has_interaction(8, 11) - - -def test_path_container_get_all_nodes(grid44_manager): - path_dict = { - 0: ([4, 5], [6, 7]), - 1: ([1, 5], [9, 13]), - 2: ([8, 9], [10, 11, 15]) - } - grid44_manager.paths = path_dict - - assert grid44_manager.get_all_nodes() == set((1, 4, 5, 6, 7, 8, 9, 10, 11, - 13, 15)) - - -def test_path_container_get_all_paths(grid44_manager): - path_dict = { - 0: ([4, 5], [6, 7]), - 1: ([1, 5], [9, 13]), - 2: ([8, 9], [10, 11, 15]) - } - grid44_manager.paths = path_dict - - assert grid44_manager.get_all_paths() == [[4, 5, 6, 7], [1, 5, 9, 13], - [8, 9, 10, 11, 15]] - - -def test_path_container_max_order(grid44_manager): - assert grid44_manager.max_crossing_order() == 0 - - assert grid44_manager.try_add_path([4, 5, 6, 7]) - assert grid44_manager.max_crossing_order() == 0 - - assert grid44_manager.try_add_path([1, 5, 9, 13]) - assert grid44_manager.max_crossing_order() == 1 - - -def test_path_container_clear(grid44_manager): - grid44_manager.paths = { - 0: ([4, 5], [6, 7]), - 1: ([1, 5], [9, 13]), - 2: ([8, 9], [10, 11, 15]) - } - grid44_manager.crossings = {0: None, 1: None, 2: None} # dummy values - grid44_manager.paths_stats = {0: 0, 1: 1, 2: 2} # dummy values - - grid44_manager.clear_paths() - assert not grid44_manager.paths - assert not grid44_manager.crossings - assert grid44_manager.paths_stats - - grid44_manager.paths = { - 0: ([4, 5], [6, 7]), - 1: ([1, 5], [9, 13]), - 2: ([8, 9], [10, 11, 15]) - } - grid44_manager.crossings = {0: None, 1: None, 2: None} # dummy values - grid44_manager.paths_stats = {0: 0, 1: 1, 2: 2} # dummy values - - grid44_manager.clear() - assert not grid44_manager.paths - assert not grid44_manager.crossings - assert not grid44_manager.paths_stats - - -def test_path_container_add_path(grid44_manager): - Crossing = _Crossing - - assert grid44_manager.try_add_path([4, 5, 6, 7]) - assert grid44_manager.get_all_paths() == [[4, 5, 6, 7]] - assert grid44_manager.crossings == {0: []} - - assert not grid44_manager.try_add_path([4, 8, 12]) - assert not grid44_manager.try_add_path([0, 1, 2, 3, 7]) - assert not grid44_manager.try_add_path([1, 5, 6, 10]) - assert grid44_manager.get_all_paths() == [[4, 5, 6, 7]] - assert grid44_manager.crossings == {0: []} - - assert grid44_manager.try_add_path([1, 5, 9, 13]) - assert [4, 5, 6, 7] in grid44_manager.get_all_paths() - assert [1, 5, 9, 13] in grid44_manager.get_all_paths() - assert grid44_manager.crossings == { - 0: [Crossing(1, [5])], - 1: [Crossing(0, [5])] - } - - assert grid44_manager.try_add_path([10, 6, 9, 14, 15]) - assert [4, 5, 6, 7] in grid44_manager.get_all_paths() - assert [4, 5, 6, 7] in grid44_manager.get_all_paths() - assert [10, 6, 9, 14, 15] in grid44_manager.get_all_paths() - - crossings_overlap = [ - sorted([c.overlap[0] for c in crossing_list]) - for crossing_list in grid44_manager.crossings.values() - ] - - assert [6, 9] in crossings_overlap - assert [5, 9] in crossings_overlap - assert [5, 6] in crossings_overlap - - -def test_path_container_push_interaction(grid44_manager): - assert grid44_manager.push_interaction(4, 7) - assert grid44_manager.push_interaction(4, 7) - assert grid44_manager.get_all_paths() == [[4, 5, 6, 7]] - assert grid44_manager.crossings == {0: []} - - assert grid44_manager.push_interaction(14, 15) - assert grid44_manager.get_all_paths() == [[4, 5, 6, 7]] - assert grid44_manager.crossings == {0: []} - - assert not grid44_manager.push_interaction(0, 4) - - -@pytest.mark.parametrize("enable_caching", [False, True]) -def test_path_container_push_interaction_alternative(grid44_manager, - enable_caching): - grid44_manager.enable_caching = enable_caching - interaction_list = [ - [(4, 7), (0, 12), False], - [(4, 7), (12, 0), True], - [(7, 4), (0, 12), False], - [(7, 4), (12, 0), True], - ] - - for inter1, inter2, may_fail in interaction_list: - grid44_manager.clear_paths() - assert grid44_manager.push_interaction(*inter1) - if may_fail: - if grid44_manager.push_interaction(*inter2): - assert grid44_manager.get_all_paths()[1] in ([4, 5, 6, 7], - [7, 6, 5, 4]) - else: - assert grid44_manager.push_interaction(*inter2) - assert grid44_manager.get_all_paths()[1] in ([4, 5, 6, 7], - [7, 6, 5, 4]) - - interaction_list = [ - [(4, 7), (15, 3)], - [(4, 7), (3, 15)], - [(7, 4), (15, 3)], - [(7, 4), (3, 15)], - ] - grid44_manager.clear() - for inter1, inter2 in interaction_list: - grid44_manager.clear_paths() - assert grid44_manager.push_interaction(*inter1) - assert grid44_manager.push_interaction(*inter2) - assert grid44_manager.get_all_paths()[1] in ([4, 5, 6, 7], - [7, 6, 5, 4]) - - -def test_path_container_remove_path(grid44_manager): - Crossing = _Crossing - - assert grid44_manager.try_add_path([4, 5, 6, 7]) - assert grid44_manager.try_add_path([1, 5, 9, 13]) - assert grid44_manager.try_add_path([8, 9, 10, 11, 15]) - - with pytest.raises(KeyError): - grid44_manager.remove_path_by_id(10) - - grid44_manager.remove_path_by_id(0) - assert [4, 5, 6, 7] in grid44_manager.get_all_paths() - assert [1, 5, 9, 13] in grid44_manager.get_all_paths() - assert grid44_manager.crossings == { - 1: [Crossing(2, [5])], - 2: [Crossing(1, [5])] - } - - grid44_manager.remove_path_by_id(1) - assert [[1, 5, 9, 13]] == grid44_manager.get_all_paths() - assert grid44_manager.crossings == {2: []} - - assert grid44_manager.try_add_path([8, 9, 10, 11, 15]) - assert [1, 5, 9, 13] in grid44_manager.get_all_paths() - assert [8, 9, 10, 11, 15] in grid44_manager.get_all_paths() - assert grid44_manager.crossings == { - 2: [Crossing(3, [9])], - 3: [Crossing(2, [9])] - } - - -def test_path_container_swap_paths(grid44_manager): - path_dict = {0: [4, 5, 6, 7], 1: [1, 5, 9, 13], 2: [8, 9, 10, 11, 15]} - for _, path in path_dict.items(): - assert grid44_manager.try_add_path(path) - assert path in grid44_manager.get_all_paths() - path_dict_ref = grid44_manager.paths - - with pytest.raises(KeyError): - grid44_manager.swap_paths(10, 0) - with pytest.raises(KeyError): - grid44_manager.swap_paths(0, 10) - - grid44_manager.swap_paths(0, 1) - path_dict_ref[0], path_dict_ref[1] = path_dict_ref[1], path_dict_ref[0] - assert grid44_manager.paths == path_dict_ref - - path_dict[3] = [20, 21, 6, 22, 23, 10, 24, 25] - assert grid44_manager.try_add_path(path_dict[3]) - assert path_dict[3] in grid44_manager.get_all_paths() - path_dict_ref = grid44_manager.paths - - grid44_manager.swap_paths(1, 3) - path_dict_ref[1], path_dict_ref[3] = path_dict_ref[3], path_dict_ref[1] - assert grid44_manager.paths == path_dict_ref - - -def test_path_grid44_manager_discard_paths(grid44_manager): - Crossing = _Crossing - path_dict = {0: [4, 5, 6, 7], 1: [1, 5, 9, 13], 2: [8, 9, 10, 11, 15]} - for _, path in path_dict.items(): - assert grid44_manager.try_add_path(path) - assert path in grid44_manager.get_all_paths() - - path_dict_ref = grid44_manager.paths - grid44_manager.remove_crossing_of_order_higher_than(1) - assert grid44_manager.max_crossing_order() == 1 - assert grid44_manager.paths == path_dict_ref - assert grid44_manager.crossings == { - 0: [Crossing(2, [9])], - 1: [Crossing(2, [5])], - 2: [Crossing(1, [5]), Crossing(0, [9])] - } - - grid44_manager.remove_crossing_of_order_higher_than(0) - del path_dict_ref[1] - assert grid44_manager.max_crossing_order() == 0 - assert grid44_manager.paths == path_dict_ref - assert grid44_manager.crossings == {0: [], 1: []} - - -def test_path_container_find_first_order_intersections(): - graph = nx.Graph() - graph.add_edges_from([(0, 1), (1, 2), (2, 10), (10, 11), (11, 12), (12, - 5)]) - graph.add_edges_from([(3, 1), (1, 4)]) - graph.add_edges_from([(5, 6), (6, 7)]) - graph.add_edges_from([(20, 6), (6, 21), (21, 22), (22, 23), (23, 24)]) - graph.add_edges_from([(30, 1), (1, 31), (31, 32)]) - graph.add_edges_from([(40, 23), (23, 41), (41, 42), (42, 43), (43, 44)]) - - Crossing = _Crossing - manager = PathManager(graph=graph, enable_caching=False) - - path_dict = {0: [0, 1, 2, 10, 11, 12], 1: [3, 1, 4], 2: [5, 6, 7]} - for _, path in path_dict.items(): - assert manager.try_add_path(path) - assert path in manager.get_all_paths() - - assert manager.crossings == { - 0: [Crossing(1, [1])], - 1: [Crossing(0, [1])], - 2: [] - } - assert _find_first_order_intersections(manager.crossings, - manager.paths) == { - 1: {1} - } - - manager.remove_path_by_id(0) - del path_dict[0] - path_dict[3] = [0, 1, 2, 10] - assert manager.try_add_path(path_dict[3]) - idx1 = manager.get_all_paths().index(path_dict[1]) + 1 - assert _find_first_order_intersections( - manager.crossings, manager.paths - ) == { - 1: {idx1}, - # would be 1: {idx1, idx3} if - # try_add_path was not also - # trying to solve the - # intersections while adding the - # paths - } - - path_dict[4] = [20, 6, 21, 22, 23, 24] - assert manager.try_add_path(path_dict[4]) - assert path_dict[4] in manager.get_all_paths() - idx1 = manager.get_all_paths().index(path_dict[1]) + 1 - idx2 = manager.get_all_paths().index(path_dict[2]) + 1 - assert _find_first_order_intersections(manager.crossings, - manager.paths) == { - 1: {idx1}, - 6: {idx2} - } - - path_dict[5] = [30, 1, 31, 32] - assert manager.try_add_path(path_dict[5]) - assert path_dict[5] in manager.get_all_paths() - idx1 = manager.get_all_paths().index(path_dict[1]) + 1 - idx2 = manager.get_all_paths().index(path_dict[2]) + 1 - assert _find_first_order_intersections(manager.crossings, - manager.paths) == { - 1: {idx1}, - 6: {idx2} - } - - path_dict[6] = [40, 23, 41, 42, 43, 44] - assert manager.try_add_path(path_dict[6]) - assert path_dict[6] in manager.get_all_paths() - idx1 = manager.get_all_paths().index(path_dict[1]) + 1 - idx2 = manager.get_all_paths().index(path_dict[2]) + 1 - assert _find_first_order_intersections(manager.crossings, - manager.paths) == { - 1: {idx1}, - 6: {idx2} - } - - -def test_path_container_no_intersection(grid44_manager): - path_dict = {0: [0, 1, 2, 3], 1: [5, 6, 7], 2: [4, 8, 9, 10, 11]} - for _, path in path_dict.items(): - assert grid44_manager.try_add_path(path) - assert path in grid44_manager.get_all_paths() - assert grid44_manager.generate_swaps() == [(0, 1), (3, 2), (7, 6), (4, 8), - (11, 10), (10, 9)] - - -def test_path_container_1_intersection_single_intersection(): - graph = nx.Graph() - graph.add_edges_from([(0, 1), (1, 2), (3, 1), (1, 4), (2, 10), (10, 11), - (11, 12)]) - - manager = PathManager(graph=graph) - - # 3 - # | - # 0 - 1 - 2 - # | 10 - 11 - 12 - # 4 - # NB: intersection at node 1 - ref_swaps = [ - [(0, 1), (12, 11)], - [(0, 1), (10, 11)], - [(2, 1), (12, 11)], - [(2, 1), (10, 11)], - [(3, 1), (12, 11)], - [(3, 1), (10, 11)], - [(4, 1), (12, 11)], - [(4, 1), (10, 11)], - ] - paths = [[0, 1, 2], [3, 1, 4]] - for path1, path2, in itertools.permutations(paths): - manager.clear() - assert manager.try_add_path(path1) - assert not manager.try_add_path(path2) - assert manager.try_add_path([10, 11, 12]) - assert manager.generate_swaps() in ref_swaps - - # 4 - # | - # 0 - 1 - 2 - 3 - # | 10 - 11 - 12 - # 5 - # NB: intersection at node 1 - ref_swaps = [ - [(0, 1), (1, 2), (4, 1), (12, 11)], - [(0, 1), (1, 2), (4, 1), (12, 11)], - [(0, 1), (1, 2), (5, 1), (10, 11)], - [(0, 1), (1, 2), (5, 1), (12, 11)], - [(0, 1), (1, 2), (12, 11), (4, 1)], - [(0, 1), (1, 2), (12, 11), (4, 1)], - [(0, 1), (1, 2), (10, 11), (5, 1)], - [(0, 1), (1, 2), (12, 11), (5, 1)], - [(12, 11), (0, 1), (1, 2), (4, 1)], - [(12, 11), (0, 1), (1, 2), (4, 1)], - [(10, 11), (0, 1), (1, 2), (5, 1)], - [(12, 11), (0, 1), (1, 2), (5, 1)], - ] - paths = [[0, 1, 2, 3], [4, 1, 5], [10, 11, 12]] - for path1, path2, path3 in itertools.permutations(paths): - manager.clear() - assert manager.try_add_path(path1) - assert manager.try_add_path(path2) - assert manager.try_add_path(path3) - assert manager.generate_swaps() in ref_swaps - - # 4 - # | - # 0 - 1 - 2 - 3 - # | 10 - 11 - 12 - # 5 - # NB: intersection at node 2 - ref_swaps = [ - [(3, 2), (2, 1), (4, 2), (12, 11)], - [(3, 2), (2, 1), (4, 2), (12, 11)], - [(3, 2), (2, 1), (5, 2), (10, 11)], - [(3, 2), (2, 1), (5, 2), (12, 11)], - [(3, 2), (2, 1), (12, 11), (4, 2)], - [(3, 2), (2, 1), (12, 11), (4, 2)], - [(3, 2), (2, 1), (10, 11), (5, 2)], - [(3, 2), (2, 1), (12, 11), (5, 2)], - [(12, 11), (3, 2), (2, 1), (4, 2)], - [(12, 11), (3, 2), (2, 1), (4, 2)], - [(10, 11), (3, 2), (2, 1), (5, 2)], - [(12, 11), (3, 2), (2, 1), (5, 2)], - ] - paths = [[0, 1, 2, 3], [4, 2, 5], [10, 11, 12]] - for path1, path2, path3 in itertools.permutations(paths): - manager.clear() - assert manager.try_add_path(path1) - assert manager.try_add_path(path2) - assert manager.try_add_path(path3) - assert manager.generate_swaps() in ref_swaps - - # 9 - # | - # 0 - 1 - 2 - 3 - 4 - 5 - # | - # 10 6 - 7 - 8 - # | - # 11 - # NB: intersection at node 1 - graph = nx.Graph() - graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (9, 1), - (1, 10), (10, 11), (5, 6), (6, 7), (7, 8)]) - manager = PathManager(graph=graph) - assert manager.try_add_path([0, 1, 2, 3, 4, 5]) - assert manager.try_add_path([9, 1, 10, 11]) - assert manager.try_add_path([6, 7, 8]) - assert manager.generate_swaps() == [(0, 1), (1, 2), (5, 4), (4, 3), (9, 1), - (11, 10), (8, 7)] - - -def test_path_container_1_intersection_double_crossing_long_right(): - # 6 7 - # | | - # 0 - 1 - 2 - 3 - 4 - 5 - # | | - # 8 9 - # | - # 10 - # | - # 11 - # | - # 12 - # NB: intersection at node 2 - graph = nx.Graph() - graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (6, 2), (2, - 8), - (7, 4), (4, 9), (9, 10), (10, 11), (11, 12)]) - manager = PathManager(graph=graph) - - ref_swaps = [(7, 4), (4, 9), (12, 11), (11, 10), (0, 1), (1, 2), (2, 3), - (5, 4), (8, 2)] - assert manager.try_add_path([0, 1, 2, 3, 4, 5]) - assert manager.try_add_path([6, 2, 8]) - assert manager.try_add_path([7, 4, 9, 10, 11, 12]) - assert manager.generate_swaps() == ref_swaps - - manager.clear() - assert manager.try_add_path([6, 2, 8]) - assert manager.try_add_path([0, 1, 2, 3, 4, 5]) - assert manager.try_add_path([7, 4, 9, 10, 11, 12]) - assert manager.generate_swaps() == ref_swaps - - ref_swaps = [(5, 4), (4, 3), (3, 2), (2, 1), (7, 4), (4, 9), (12, 11), - (11, 10), (8, 2)] - manager.clear() - assert manager.try_add_path([0, 1, 2, 3, 4, 5]) - assert manager.try_add_path([7, 4, 9, 10, 11, 12]) - assert manager.try_add_path([6, 2, 8]) - assert manager.generate_swaps() == ref_swaps - manager.clear() - assert manager.try_add_path([6, 2, 8]) - assert manager.try_add_path([7, 4, 9, 10, 11, 12]) - assert manager.try_add_path([0, 1, 2, 3, 4, 5]) - assert manager.generate_swaps() == ref_swaps - - ref_swaps = [(7, 4), (4, 9), (12, 11), (11, 10), (5, 4), (4, 3), (3, 2), - (2, 1), (8, 2)] - manager.clear() - assert manager.try_add_path([7, 4, 9, 10, 11, 12]) - assert manager.try_add_path([0, 1, 2, 3, 4, 5]) - assert manager.try_add_path([6, 2, 8]) - assert manager.generate_swaps() == ref_swaps - manager.clear() - assert manager.try_add_path([7, 4, 9, 10, 11, 12]) - assert manager.try_add_path([6, 2, 8]) - assert manager.try_add_path([0, 1, 2, 3, 4, 5]) - assert manager.generate_swaps() == ref_swaps - - -def test_path_container_1_intersection_double_crossing_long_left(): - # 6 7 - # | | - # 0 - 1 - 2 - 3 - 4 - 5 - # | | - # 8 9 - # | - # 10 - # | - # 11 - # | - # 12 - # NB: intersection at node 3 - graph = nx.Graph() - graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (6, 1), (1, - 8), - (8, 10), (10, 11), (11, 12), (7, 3), (3, 9)]) - manager = PathManager(graph=graph) - - ref_swaps = [(0, 1), (1, 2), (2, 3), (3, 4), (6, 1), (1, 8), (12, 11), - (11, 10), (9, 3)] - assert manager.try_add_path([0, 1, 2, 3, 4, 5]) - assert manager.try_add_path([6, 1, 8, 10, 11, 12]) - assert manager.try_add_path([7, 3, 9]) - assert manager.generate_swaps() == ref_swaps - manager.clear() - assert manager.try_add_path([7, 3, 9]) - assert manager.try_add_path([6, 1, 8, 10, 11, 12]) - assert manager.try_add_path([0, 1, 2, 3, 4, 5]) - assert manager.generate_swaps() == ref_swaps - - ref_swaps = [(6, 1), (1, 8), (12, 11), (11, 10), (0, 1), (5, 4), (4, 3), - (3, 2), (9, 3)] - manager.clear() - assert manager.try_add_path([0, 1, 2, 3, 4, 5]) - assert manager.try_add_path([7, 3, 9]) - assert manager.try_add_path([6, 1, 8, 10, 11, 12]) - assert manager.generate_swaps() == ref_swaps - manager.clear() - assert manager.try_add_path([7, 3, 9]) - assert manager.try_add_path([0, 1, 2, 3, 4, 5]) - assert manager.try_add_path([6, 1, 8, 10, 11, 12]) - assert manager.generate_swaps() == ref_swaps - - ref_swaps = [(6, 1), (1, 8), (12, 11), (11, 10), (0, 1), (1, 2), (2, 3), - (3, 4), (9, 3)] - manager.clear() - assert manager.try_add_path([6, 1, 8, 10, 11, 12]) - assert manager.try_add_path([7, 3, 9]) - assert manager.try_add_path([0, 1, 2, 3, 4, 5]) - assert manager.generate_swaps() == ref_swaps - manager.clear() - assert manager.try_add_path([6, 1, 8, 10, 11, 12]) - assert manager.try_add_path([0, 1, 2, 3, 4, 5]) - assert manager.try_add_path([7, 3, 9]) - assert manager.generate_swaps() == ref_swaps - - -def test_path_container_1_intersection_double_crossing_delete_path(): - # 4 5 4 5 - # | | | | - # 0 - 1 - 2 - 3 -> 0 - 1 - 2 - 3 or 0 - 1 - 2 - 3 - # | | | | - # 6 7 6 7 - # NB: intersection at nodes 1 & 2 - graph = nx.Graph() - graph.add_edges_from([(0, 1), (1, 2), (2, 3), (4, 1), (1, 6), (5, 2), (2, - 7)]) - ref_swaps = [ - [(0, 1), (1, 2), (6, 1)], - [(0, 1), (1, 2), (4, 1)], - ] - - manager = PathManager(graph=graph) - assert manager.try_add_path([0, 1, 2, 3]) - assert manager.try_add_path([4, 1, 6]) - assert not manager.try_add_path([5, 2, 7]) - assert manager.generate_swaps() in ref_swaps - - ref_swaps = [ - [(3, 2), (2, 1), (7, 2)], - [(3, 2), (2, 1), (5, 2)], - ] - - manager.clear() - assert manager.try_add_path([0, 1, 2, 3]) - assert manager.try_add_path([5, 2, 7]) - assert not manager.try_add_path([4, 1, 6]) - assert manager.generate_swaps() in ref_swaps - - -def test_path_container_1_intersection_double_crossing_delete_path2(): - # 5 6 6 - # | | | - # 0 - 1 - 2 - 3 - 4 -> 0 - 1 - 2 - 3 - 4 - # | | | - # 7 8 8 - # NB: intersection at nodes 1 & 3 - graph = nx.Graph() - graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (5, 1), (1, 7), (6, - 3), - (3, 8)]) - manager = PathManager(graph=graph) - - ref_swaps = [ - [(0, 1), (1, 2), (4, 3), (7, 1)], - [(0, 1), (1, 2), (4, 3), (5, 1)], - [(0, 1), (4, 3), (3, 2), (8, 3)], - [(0, 1), (4, 3), (3, 2), (6, 3)], - ] - - assert manager.try_add_path([0, 1, 2, 3, 4]) - assert manager.try_add_path([5, 1, 7]) - assert not manager.try_add_path([6, 3, 8]) - assert manager.generate_swaps() in ref_swaps - - manager.clear() - assert manager.try_add_path([0, 1, 2, 3, 4]) - assert manager.try_add_path([6, 3, 8]) - assert not manager.try_add_path([5, 1, 7]) - assert manager.generate_swaps() in ref_swaps - - -def test_path_container_1_intersection_double_crossing_neighbouring_nodes(): - # 5 - # | - # 6 7 - # | | - # 0 - 1 - 2 - 3 - 4 - # | | - # 8 9 - # NB: intersection at nodes 1 & 3 - graph = nx.Graph() - graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (5, 6), (6, 1), (1, - 8), - (7, 2), (2, 9)]) - manager = PathManager(graph=graph) - - ref_swaps = [ - [(0, 1), (1, 2), (2, 3), (8, 1), (1, 6), (9, 2)], - [(0, 1), (1, 2), (2, 3), (5, 6), (8, 1), (9, 2)], - [(8, 1), (1, 6), (4, 3), (3, 2), (2, 1), (9, 2)], - [(8, 1), (1, 6), (0, 1), (1, 2), (2, 3), (9, 2)], - [(0, 1), (1, 2), (2, 3), (8, 1), (1, 6), (7, 2)], - [(0, 1), (1, 2), (2, 3), (5, 6), (8, 1), (7, 2)], - [(8, 1), (1, 6), (4, 3), (3, 2), (2, 1), (7, 2)], - [(8, 1), (1, 6), (0, 1), (1, 2), (2, 3), (7, 2)], - [(0, 1), (1, 2), (2, 3), (9, 2), (8, 1), (1, 6)], - [(0, 1), (1, 2), (2, 3), (9, 2), (5, 6), (8, 1)], - [(8, 1), (1, 6), (4, 3), (9, 2), (3, 2), (2, 1)], - [(8, 1), (1, 6), (0, 1), (9, 2), (1, 2), (2, 3)], - [(0, 1), (1, 2), (2, 3), (7, 2), (8, 1), (1, 6)], - [(0, 1), (1, 2), (2, 3), (7, 2), (5, 6), (8, 1)], - [(8, 1), (1, 6), (4, 3), (7, 2), (3, 2), (2, 1)], - [(8, 1), (1, 6), (0, 1), (7, 2), (1, 2), (2, 3)], - ] - - paths = [[0, 1, 2, 3, 4], [5, 6, 1, 8], [7, 2, 9]] - - for path1, path2, path3 in itertools.permutations(paths): - manager.clear() - assert manager.try_add_path(path1) - assert manager.try_add_path(path2) - assert manager.try_add_path(path3) - assert manager.generate_swaps() in ref_swaps - - -def test_path_container_1_intersection_triple_crossing(): - graph = nx.Graph() - graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (9, 1), - (1, 10), (10, 11), (12, 1), (1, 13), (13, 14), - (14, 15), (5, 6), (6, 7), (7, 8)]) - manager = PathManager(graph=graph) - - # 9 13 - 14 - 15 - # | / - # 0 - 1 - 2 - 3 - 4 - 5 - # / | - # 12 10 6 - 7 - 8 - # | - # 11 - # NB: intersection at node 1 - manager.clear() - paths = [[9, 1, 10, 11], [0, 1, 2, 3, 4, 5], [6, 7, 8], - [12, 1, 13, 14, 15, 16]] - for path in paths: - assert manager.try_add_path(path) - - paths[3], paths[0], paths[1] \ - = paths[0], paths[1], paths[3] - assert manager.get_all_paths() == paths - - manager.clear() - paths = [[0, 1, 2, 3, 4, 5], [9, 1, 10, 11], [6, 7, 8], - [12, 1, 13, 14, 15, 16]] - for path in paths: - assert manager.try_add_path(path) - - paths[3], paths[1] \ - = paths[1], paths[3] - assert manager.get_all_paths() == paths - - # 4 5 10 - 11 - 12 4 10 - 11 - 12 - # | / | - # 0 - 1 - 2 - 3 -> 0 - 1 - 2 - 3 - # / | | - # 6 7 7 - # NB: intersection at node 1 - ref_swaps = [[(0, 1), (1, 2), (4, 1), (12, 11)], - [(0, 1), (1, 2), (4, 1), (10, 11)], - [(0, 1), (1, 2), (7, 1), (12, 11)], - [(0, 1), (1, 2), (7, 1), (10, 11)]] - manager.clear() - paths = [[0, 1, 2, 3], [4, 1, 7], [10, 11, 12], [5, 1, 6]] - assert manager.try_add_path([0, 1, 2, 3]) - assert manager.try_add_path([4, 1, 7]) - assert manager.try_add_path([10, 11, 12]) - assert not manager.try_add_path([5, 1, 6]) - assert manager.generate_swaps() in ref_swaps - - -def test_path_container_1_intersection_triple_crossing_complex(): - # 4 - # | - # 0 - 1 - 2 - 3 - # | - # 5 - 6 - 7 - # | - # 8 - # NB: intersection at nodes 1 & 3 - graph = nx.Graph() - graph.add_edges_from([(0, 1), (1, 2), (2, 3), (4, 1), (1, 6), (6, 8), (5, - 6), - (6, 7)]) - manager = PathManager(graph=graph) - - ref_swaps = [ - [(0, 1), (1, 2), (4, 1), (8, 6)], - [(0, 1), (1, 2), (4, 1), (1, 6)], - [(4, 1), (1, 6), (0, 1), (1, 2)], - [(0, 1), (3, 2), (4, 1), (1, 6)], - [(4, 1), (8, 6), (0, 1), (3, 2)], - [(4, 1), (1, 6), (0, 1), (3, 2)], - ] - - assert manager.try_add_path([0, 1, 2, 3]) - assert manager.try_add_path([4, 1, 6, 8]) - assert not manager.try_add_path([5, 6, 7]) - assert manager.generate_swaps() in ref_swaps - - manager.clear() - assert manager.try_add_path([4, 1, 6, 8]) - assert manager.try_add_path([0, 1, 2, 3]) - assert not manager.try_add_path([5, 6, 7]) - assert manager.generate_swaps() in ref_swaps - - ref_swaps = [ - [(0, 1), (1, 2), (8, 6), (6, 1), (5, 6)], - [(0, 1), (1, 2), (8, 6), (6, 1), (7, 6)], - ] - - manager.clear() - assert manager.try_add_path([4, 1, 6, 8]) - assert manager.try_add_path([5, 6, 7]) - assert manager.try_add_path([0, 1, 2, 3]) - assert manager.generate_swaps() in ref_swaps - - manager.clear() - assert manager.try_add_path([0, 1, 2, 3]) - assert manager.try_add_path([5, 6, 7]) - - # With some modification to PathManager, this next line could be made not - # to fail adding the path. - # This would require the intersection resolving algorithm to allow the - # creation of a new intersection for the path currently being added but not - # for any other stored path. - # (ie. allowing the [4], [1, 6, 8] path split, although now 1 is an - # intersection for the new path) - assert not manager.try_add_path([4, 1, 6, 8]) diff --git a/projectq/cengines/_multi_qubit_gate_manager.py b/projectq/cengines/_multi_qubit_gate_manager.py new file mode 100644 index 000000000..2f7542a85 --- /dev/null +++ b/projectq/cengines/_multi_qubit_gate_manager.py @@ -0,0 +1,649 @@ +# Copyright 2019 ProjectQ-Framework (www.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This is a helper module for the :py:class:`projectq.cengines.GraphMapper` +class. + +Implements a SABRE-like algorithm [1] to generate a list of SWAP operations to +route qubit through an arbitrary graph. + +[1] https://arxiv.org/abs/1809.02573v2 +""" + +import networkx as nx + +# ============================================================================== + + +def _sum_distance_over_gates(gate_list, mapping, distance_matrix): + """ + Calculate the sum of distances between pairs of qubits + + Args: + gate_list (list): List of 2-qubit gates + mapping (dict): Current mapping + distance_matrix (dict): Distance matrix within the hardware coupling + graph + + Returns: + Sum of all pair-wise distances between qubits + """ + return sum([ + distance_matrix[mapping[gate.logical_id0]][mapping[gate.logical_id1]] + for gate in gate_list + ]) + + +def nearest_neighbours_cost_fun(gates_dag, mapping, distance_matrix, swap, + opts): + """ + Nearest neighbours cost function + + .. math:: + + H = \sum_{\mathrm{gate}\ \in\ F} + D(\mathrm{gate}.q_1, \mathrm{gate}.q_2) + + where: + + - :math:`F` is the ensemble of gates in the front layer + - :math:`D` is the distance matrix + - :math:`\mathrm{gate}.q_{1, 2}` are the backend qubit IDs for each gate + + + Args: + gates_dag (GatesDAG): Direct acyclic graph of future quantum gates + mapping (dict): Current mapping + distance_matrix (dict): Distance matrix within the hardware coupling + graph + swap (tuple): Candidate swap (not used by this function) + opts (dict): Miscellaneous parameters for cost function (not used by + this function) + + Returns: + Score of current swap operations + """ + return _sum_distance_over_gates(gates_dag.front_layer, mapping, + distance_matrix) + + +def look_ahead_parallelism_cost_fun(gates_dag, mapping, distance_matrix, swap, + opts): + """ + Cost function using nearest-neighbour interactions as well as considering + gates from the near-term layer (provided it has been calculated) in order + to favour swap operations that can be performed in parallel. + + .. math:: + + H = M \\left[\\frac{1}{|F|}\sum_{\mathrm{gate}\ \in\ F} + D(\mathrm{gate}.q_1, \mathrm{gate}.q_2) + + \\frac{W}{|E|}\sum_{\mathrm{gate}\ \in\ E} + D(\mathrm{gate}.q_1, \mathrm{gate}.q_2) \\right] + + where: + + - :math:`M` is defined as :math:`\max(decay(SWAP.q_1), decay(SWAP.q_2))` + - :math:`F` is the ensemble of gates in front layer + - :math:`E` is the ensemble of gates in near-term layer + - :math:`D` is the distance matrix + - :math:`\mathrm{gate}.q_{1, 2}` are the backend qubit IDs for each gate + + + Args: + gates_dag (GatesDAG): Direct acyclic graph of future quantum gates + mapping (dict): Current mapping + distance_matrix (dict): Distance matrix within the hardware coupling + graph + swap (tuple): Candidate swap operation + opts (dict): Miscellaneous parameters for cost function + + Returns: + Score of current swap operations + + Note: + ``opts`` must contain the following key-values + + .. list-table:: + :header-rows: 1 + + * - Key + - Type + - Description + * - decay + - :py:class:`.DecayManager` + - | Instance containing current decay information for each + | backend qubit + * - W + - ``float`` + - Weighting factor (see cost function formula) + + + """ + decay = opts['decay'] + W = opts['W'] + N_front = len(gates_dag.front_layer) + N_near = len(gates_dag.near_term_layer) + + front_layer_term = (1. / N_front * _sum_distance_over_gates( + gates_dag.front_layer, mapping, distance_matrix)) + + if N_near == 0: + return (max(decay.get_decay_value(swap[0]), + decay.get_decay_value(swap[1])) * front_layer_term) + return ( + max(decay.get_decay_value(swap[0]), decay.get_decay_value(swap[1])) * + front_layer_term + (W / N_near * _sum_distance_over_gates( + gates_dag.near_term_layer, mapping, distance_matrix))) + + +# ============================================================================== + + +def _apply_swap_to_mapping(mapping, logical_id0, logical_id1, backend_id1): + """ + Modify a mapping by applying a SWAP operation + + Args: + mapping (dict): mapping to update + logical_id0 (int): A logical qubit ID + logical_id1 (int): A logical qubit ID + backend_id1 (int): Backend ID corresponding to ``logical_id1`` + + .. note:: + + ``logical_id1`` can be set to -1 to indicate a non-allocated backend qubit + """ + # If the qubit is present in the mapping (ie. second qubit is already + # allocated), update the mapping, otherwise simply assign the new backend + # ID to the qubit being swapped. + + if logical_id1 != -1: + mapping[logical_id0], mapping[logical_id1] \ + = mapping[logical_id1], mapping[logical_id0] + else: + mapping[logical_id0] = backend_id1 + + +# ============================================================================== + + +class QubitIDDecay(object): + """ + Class storing the decay information about a particular backend qubit ID + + Attributes: + decay (float): Decay value for a backend qubit ID + lifetime (int): Lifetime of decay information for a backend qubit ID + """ + def __init__(self, decay, lifetime): + self.decay = decay + self.lifetime = lifetime + + +class DecayManager(object): + """ + Class managing the decay information about a list of backend qubit IDs + + User should call the :py:meth:`step` method each time a swap gate is added and + :py:meth:`remove_decay` once a 2-qubit gate is executed. + """ + def __init__(self, delta, max_lifetime): + """ + Constructor + + Args: + delta (float): Decay parameter + max_lifetime (int): Maximum lifetime of decay information for a + particular qubit + """ + self._delta = delta + self._cutoff = max_lifetime + self._backend_ids = {} + + def add_to_decay(self, backend_id): + """ + Add to the decay to a particular backend qubit ID + + Args: + backend_id (int) : Backend qubit ID + """ + # Ignore invalid (ie. non-allocated) backend IDs + if backend_id < 0: + return + + if backend_id in self._backend_ids: + self._backend_ids[backend_id].lifetime = self._cutoff + self._backend_ids[backend_id].decay += self._delta + else: + self._backend_ids[backend_id] = QubitIDDecay( + self._delta, self._cutoff) + + def remove_decay(self, backend_id): + """ + Remove the decay of a particular backend qubit ID + + Args: + backend_id (int) : Backend qubit ID + """ + if backend_id in self._backend_ids: + del self._backend_ids[backend_id] + + def get_decay_value(self, backend_id): + """ + Retrieve the decay value of a particular backend qubit ID + + Args: + backend_id (int) : Backend qubit ID + """ + if backend_id in self._backend_ids: + return self._backend_ids[backend_id].decay + return 0 + + def step(self): + """ + Step all decay values in time + + Use this method to indicate a SWAP search step has been performed. + """ + backend_ids = list(self._backend_ids) + for backend_id in backend_ids: + self._backend_ids[backend_id].lifetime -= 1 + if self._backend_ids[backend_id].lifetime == 0: + del self._backend_ids[backend_id] + + +# ============================================================================== + + +class _DAGNode(object): + """ + Class representing a node inside a Direct Acyclic Graph (DAG) + + .. note:: + + Main purpose of this class is to allow gates with identical qubits to be + stored within the same graph (networkx limitation) + """ + def __init__(self, logical_id0, logical_id1): + self.logical_id0 = logical_id0 + self.logical_id1 = logical_id1 + self.logical_ids = frozenset((logical_id0, logical_id1)) + + +class GatesDAG(object): + """ + Class managing a list of multi-qubit gates and storing them into a Direct + Acyclic Graph (DAG) in order of precedence. + """ + def __init__(self): + self._dag = nx.DiGraph() + self._logical_ids_in_diag = set() + self.front_layer = [] + self.near_term_layer = set() + self._back_layer = {} + + def add_gate(self, logical_id0, logical_id1): + """ + Add a gate to the DAG + + Args: + logical_id0 (int) : A logical qubit ID + logical_id1 (int) : A logical qubit ID + + .. note:: + If neither of ``logical_id0`` or ``logical_id1`` are currently found within the + DAG, also add the gate to the font layer. + """ + + logical_id0_in_dag = logical_id0 in self._logical_ids_in_diag + logical_id1_in_dag = logical_id1 in self._logical_ids_in_diag + + if not (logical_id0_in_dag and logical_id1_in_dag and self. + _back_layer[logical_id0] == self._back_layer[logical_id1]): + # Do not add the new gate to DAG if both qubits are present inside + # the DAG *and* the gate on the last layer is the same for both + # qubits. + new_gate = _DAGNode(logical_id0, logical_id1) + + self._dag.add_node(new_gate) + + if logical_id0_in_dag: + self._dag.add_edge(self._back_layer[logical_id0], new_gate) + self._logical_ids_in_diag.add(logical_id1) + else: + self._logical_ids_in_diag.add(logical_id0) + + if logical_id1_in_dag: + self._dag.add_edge(self._back_layer[logical_id1], new_gate) + self._logical_ids_in_diag.add(logical_id0) + else: + self._logical_ids_in_diag.add(logical_id1) + + self._back_layer[logical_id0] = new_gate + self._back_layer[logical_id1] = new_gate + + # If both qubit are not already in the DAG, then we just got a new + # gate on the front layer + if not logical_id0_in_dag and not logical_id1_in_dag: + self.front_layer.append(new_gate) + return new_gate + return None + + def remove_from_front_layer(self, logical_id0, logical_id1): + """ + Remove a gate from the front layer of the DAG + + Args: + logical_id0 (int) : A logical qubit ID + logical_id1 (int) : A logical qubit ID + + Raises: + RuntimeError if the gate does not exist in the front layer + """ + # First find the gate inside the first layer list + for gate in self.front_layer: + if gate.logical_ids == frozenset((logical_id0, logical_id1)): + break + else: + raise RuntimeError('({}, {}) not found in DAG'.format( + logical_id0, logical_id1)) + + descendants = list(self._dag[gate]) + + if not descendants: + self._logical_ids_in_diag.remove(logical_id0) + self._logical_ids_in_diag.remove(logical_id1) + del self._back_layer[logical_id0] + del self._back_layer[logical_id1] + self._dag.remove_node(gate) + else: + if len(descendants) == 1: + # Look for the logical_id not found in the descendant + logical_id = logical_id0 + if logical_id in descendants[0].logical_ids: + logical_id = logical_id1 + + self._logical_ids_in_diag.remove(logical_id) + del self._back_layer[logical_id] + + # Remove gate from DAG + self._dag.remove_node(gate) + + for descendant in descendants: + if not self._dag.pred[descendant]: + self.front_layer.append(descendant) + + # Remove the gate from the first layer + self.front_layer.remove(gate) + + def max_distance_in_dag(self): + """ + Calculate the distance between the front layer and each gate of the + DAG. + + A gate with distance 0 is on the front layer. + + Returns: + Python dictionary indexed by gate with their distance as value + """ + gate_max_distance = {} + for gate in self.front_layer: + gate_max_distance[gate] = 0 + self._max_distance_in_dag(gate_max_distance, gate, 1) + + return gate_max_distance + + def calculate_near_term_layer(self, max_distance): + """ + Calculate a near term layer with all gates less than `max_distance` + from the front layer + + Args: + max_distance (int): Maximum distance from front layer to consider + """ + if not max_distance: + self.near_term_layer = set() + else: + self.near_term_layer = { + gate + for gate, dist in self.max_distance_in_dag().items() + if 0 < dist <= max_distance + } + + def _max_distance_in_dag(self, gate_max_distance, gate, distance): + """ + Recursively calculate the maximum distance for each gate of the DAG + + Args: + gate_max_depth (dict): Dictionary containing the current maximum + distance for each gate + gate (_DAGNode): Root node from DAG for traversal + distance (int): Current distance offset + """ + for descendant in self._dag[gate]: + try: + if gate_max_distance[descendant] < distance: + gate_max_distance[descendant] = distance + except KeyError: + gate_max_distance[descendant] = distance + + if self._dag[descendant]: + self._max_distance_in_dag(gate_max_distance, descendant, + distance + 1) + + +class MultiQubitGateManager(object): + """ + Class managing qubit interactions + """ + def __init__(self, graph, decay_opts={}): + """ + Args: + graph (networkx.Graph): an arbitrary connected graph + """ + # Make sure that we start with a valid graph + if not nx.is_connected(graph): + raise RuntimeError("Input graph must be a connected graph") + elif not all([isinstance(n, int) for n in graph]): + raise RuntimeError( + "All nodes inside the graph needs to be integers") + else: + self.graph = graph + self.distance_matrix = dict( + nx.all_pairs_shortest_path_length(self.graph)) + + self._dag = GatesDAG() + self._decay = DecayManager(decay_opts.get('delta', 0.001), + decay_opts.get('lifetime', 5)) + self.stats = {} + + def generate_swaps(self, current_mapping, cost_fun, opts={}, + max_steps=100): + """ + Generate a list of swaps to execute some quantum gates + + Args: + mapping (dict): Current mapping + cost_fun (function): Cost function to rank swap candidates + Must accept the following parameters: + - dag (_GatesDAG) + - new_mapping (dict) + - distance_matrix (dict) + - swap_candidate (tuple) + max_steps (int): (optional) Maximum number of swap steps to + attempt before giving up + opts (dict): (optional) Extra parameters for cost function call + (see note below) + + .. note:: + + The ``opts`` optional parameter may contain the following key-values: + + .. list-table:: + :header-rows: 1 + + * - Key + - Type + - Description + * - near_term_layer + - ``int`` + - | If 0 (default) do not consider near-term gates + | when generating the list of swap operations. + | If >0, calculate the near-term layer using + | all gates in the DAG that have a distance equal + | to or less than this value. + * - ... + - ... + - | Any other parameter will be passed onto the cost + | function when it is called. + + Returns: + A tuple (list, set) of swap operations (tuples of backend IDs) and + a set of all the backend IDs that are traversed by the SWAP + operations. + """ + + if not self._dag.front_layer: + return ([], set()) + + opts['decay'] = self._decay + + self._dag.calculate_near_term_layer(opts.get('near_term_layer', 0)) + + mapping = current_mapping.copy() + swaps = [] + all_swapped_qubits = set() + while not self._can_execute_some_gate(mapping): + (logical_id0, logical_id1, + backend_id1) = self._generate_one_swap_step( + mapping, cost_fun, opts) + swaps.append((mapping[logical_id0], backend_id1)) + all_swapped_qubits.add(mapping[logical_id0]) + all_swapped_qubits.add(backend_id1) + + for backend_id in swaps[-1]: + self._decay.add_to_decay(backend_id) + self._decay.step() + + _apply_swap_to_mapping(mapping, logical_id0, logical_id1, + backend_id1) + + if len(swaps) > max_steps: + raise RuntimeError( + 'Maximum number of steps to find a list of' + + ' SWAP operations reached without convergence') + + return swaps, all_swapped_qubits + + def push_interaction(self, logical_id0, logical_id1): + """ + Plan an interaction between two qubit. + + Args: + logical_id0 (int) : A logical qubit ID + logical_id1 (int) : A logical qubit ID + """ + + self._dag.add_gate(logical_id0, logical_id1) + + new_gate = frozenset((logical_id0, logical_id1)) + if new_gate not in self.stats: + self.stats[new_gate] = 1 + else: + self.stats[new_gate] += 1 + + def execute_gate( + self, + mapping, + logical_id0, + logical_id1, + ): + """ + Execute a gate (ie. mark it as executed if present in the DAG) + + Args: + mapping (dict): Current mapping + logical_id0 (int) : A logical qubit ID + logical_id1 (int) : A logical qubit ID + """ + if self.graph.has_edge(mapping[logical_id0], mapping[logical_id1]): + for gate in self._dag.front_layer: + if (logical_id0 in gate.logical_ids + and logical_id1 in gate.logical_ids): + self._dag.remove_from_front_layer(logical_id0, logical_id1) + return True + return False + + # ========================================================================== + + def _generate_one_swap_step(self, mapping, cost_fun, opts={}): + """ + Find the most optimal swap operation to perform next + + Args: + mapping (dict): Current mapping + cost_fun (function): Cost function to rank swap candidates + Must accept the following parameters: + - dag (_GatesDAG) + - new_mapping (dict) + - distance_matrix (dict) + - swap_candidate (tuple) + + Returns: + Tuple with (logical_id0, logical_id1, backend_id1) where + logical_id1 can be -1 if backend_id1 does not currently have a + logical qubit associated to it. + """ + + reverse_mapping = {v: k for k, v in mapping.items()} + + # Only consider gates from the front layer and generate a list of + # potential SWAP operations with all qubits that are neighours of + # those concerned by a gate + swap_candidates = [] + for gate in self._dag.front_layer: + for logical_id in gate.logical_ids: + for backend_id in self.graph[mapping[logical_id]]: + swap_candidates.append( + (logical_id, reverse_mapping.get(backend_id, + -1), backend_id)) + + # Rank swap candidates using the provided cost function + scores = [] + for logical_id0, logical_id1, backend_id1 in swap_candidates: + new_mapping = mapping.copy() + + _apply_swap_to_mapping(new_mapping, logical_id0, logical_id1, + backend_id1) + + scores.append( + cost_fun(self._dag, new_mapping, self.distance_matrix, + (logical_id0, logical_id1), opts)) + + # Return the swap candidate with the lowest score + return swap_candidates[scores.index(min(scores))] + + def _can_execute_some_gate(self, mapping): + """ + Test whether some gate from the front layer can be executed + + Args: + mapping (dict): Current mapping + """ + for gate in self._dag.front_layer: + if self.graph.has_edge(mapping[gate.logical_id0], + mapping[gate.logical_id1]): + return True + return False diff --git a/projectq/cengines/_multi_qubit_gate_manager_test.py b/projectq/cengines/_multi_qubit_gate_manager_test.py new file mode 100644 index 000000000..140b8d2e4 --- /dev/null +++ b/projectq/cengines/_multi_qubit_gate_manager_test.py @@ -0,0 +1,562 @@ +# Copyright 2018 ProjectQ-Framework (www.projectq.ch) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for projectq.cengines._graphmapper.py.""" + +from copy import deepcopy + +import pytest +import networkx as nx +# from projectq.cengines import DummyEngine, LocalOptimizer, MainEngine +# from projectq.meta import LogicalQubitIDTag +from projectq.ops import (Allocate, BasicGate, Command, Deallocate, FlushGate, + X, H, All, Measure, CNOT) +from projectq.types import WeakQubitRef + +from projectq.cengines import _multi_qubit_gate_manager as multi + + +# For debugging purposes +def to_string(self): + return str(tuple(self.logical_ids)) + + +multi._dag_node.__str__ = to_string +multi._dag_node.__repr__ = to_string + + +def allocate_all_qubits_cmd(mapper): + qb = [] + allocate_cmds = [] + for i in range(mapper.num_qubits): + qb.append(WeakQubitRef(engine=None, idx=i)) + allocate_cmds.append( + Command(engine=None, gate=Allocate, qubits=([qb[i]], ))) + return qb, allocate_cmds + + +def generate_grid_graph(nrows, ncols): + graph = nx.Graph() + graph.add_nodes_from(range(nrows * ncols)) + + for row in range(nrows): + for col in range(ncols): + node0 = col + ncols * row + + is_middle = ((0 < row < nrows - 1) and (0 < col < ncols - 1)) + add_horizontal = is_middle or (row in (0, nrows - 1) and + (0 < col < ncols - 1)) + add_vertical = is_middle or (col in (0, ncols - 1) and + (0 < row < nrows - 1)) + if add_horizontal: + graph.add_edge(node0, node0 - 1) + graph.add_edge(node0, node0 + 1) + if add_vertical: + graph.add_edge(node0, node0 - ncols) + graph.add_edge(node0, node0 + ncols) + if nrows == 2: + node0 = col + graph.add_edge(node0, node0 + ncols) + if ncols == 2: + node0 = ncols * row + graph.add_edge(node0, node0 + 1) + + return graph + + +@pytest.fixture(scope="module") +def simple_graph(): + # 2 4 + # / \ / | + # 0 - 1 3 | + # \ / \ | + # 5 6 + graph = nx.Graph() + graph.add_nodes_from(range(7)) + graph.add_edges_from([(0, 1), (1, 2), (1, 5), (2, 3), (5, 3), (3, 4), + (3, 6), (4, 6)]) + return graph + + +@pytest.fixture(scope="module") +def grid33_graph(): + return generate_grid_graph(3, 3) + + +@pytest.fixture +def decay_manager(): + return multi.DecayManager(0.001, 5) + + +@pytest.fixture +def gates_dag(): + return multi.GatesDAG() + + +@pytest.fixture +def qubit_manager(): + return multi.MultiQubitGateManager(generate_grid_graph(3, 3)) + + +# ============================================================================== +# DecayManager +# ------------------------------------------------------------------------------ + + +def test_decay_manager_add(decay_manager): + delta = decay_manager._delta + lifetime = decay_manager._cutoff + + decay_manager.add_to_decay(-1) + assert not decay_manager._backend_ids + + decay_manager.add_to_decay(0) + assert list(decay_manager._backend_ids) == [0] + backend_qubit = decay_manager._backend_ids[0] + assert backend_qubit.decay == delta + assert backend_qubit.lifetime == lifetime + + decay_manager.add_to_decay(0) + assert list(decay_manager._backend_ids) == [0] + backend_qubit = decay_manager._backend_ids[0] + assert backend_qubit.decay == 2 * delta + assert backend_qubit.lifetime == lifetime + + decay_manager.add_to_decay(1) + assert sorted(decay_manager._backend_ids) == [0, 1] + backend_qubit = decay_manager._backend_ids[0] + assert backend_qubit.decay == 2 * delta + assert backend_qubit.lifetime == lifetime + + backend_qubit = decay_manager._backend_ids[1] + assert backend_qubit.decay == delta + assert backend_qubit.lifetime == lifetime + + +def test_decay_manager_remove(decay_manager): + decay_manager.add_to_decay(0) + decay_manager.add_to_decay(0) + decay_manager.add_to_decay(1) + assert sorted(list(decay_manager._backend_ids)) == [0, 1] + + decay_manager.remove_decay(0) + assert list(decay_manager._backend_ids) == [1] + decay_manager.remove_decay(1) + assert not decay_manager._backend_ids + + +def test_decay_manager_get_decay_value(decay_manager): + delta = decay_manager._delta + + decay_manager.add_to_decay(0) + decay_manager.add_to_decay(0) + decay_manager.add_to_decay(1) + + assert decay_manager.get_decay_value(0) == 2 * delta + assert decay_manager.get_decay_value(1) == delta + assert decay_manager.get_decay_value(-1) == 0 + assert decay_manager.get_decay_value(2) == 0 + + +def test_decay_manager_step(decay_manager): + delta = decay_manager._delta + lifetime = decay_manager._cutoff + + decay_manager.add_to_decay(0) + decay_manager.step() + + backend_qubit = decay_manager._backend_ids[0] + assert backend_qubit.decay == delta + assert backend_qubit.lifetime == lifetime - 1 + + decay_manager.add_to_decay(0) + decay_manager.add_to_decay(1) + + decay_manager.step() + + backend_qubit0 = decay_manager._backend_ids[0] + backend_qubit1 = decay_manager._backend_ids[1] + + assert backend_qubit0.decay == 2 * delta + assert backend_qubit0.lifetime == lifetime - 1 + assert backend_qubit1.decay == delta + assert backend_qubit1.lifetime == lifetime - 1 + + decay_manager.step() + assert backend_qubit0.decay == 2 * delta + assert backend_qubit0.lifetime == lifetime - 2 + assert backend_qubit1.decay == delta + assert backend_qubit1.lifetime == lifetime - 2 + + decay_manager.add_to_decay(1) + assert backend_qubit1.lifetime == lifetime + + for i in range(3): + decay_manager.step() + + # Qubit 0 decay information should be deleted by now + assert list(decay_manager._backend_ids) == [1] + + for i in range(2): + assert list(decay_manager._backend_ids) == [1] + decay_manager.step() + assert not decay_manager._backend_ids + + +# ============================================================================== +# GatesDAG +# ------------------------------------------------------------------------------ + + +def test_gates_dag_init(gates_dag): + assert gates_dag._dag.number_of_nodes() == 0 + assert gates_dag._dag.number_of_edges() == 0 + assert not gates_dag.front_layer + assert not gates_dag.near_term_layer + + +def test_gates_dag_add_gate(gates_dag): + dag_node01 = gates_dag.add_gate(0, 1) + + assert gates_dag._dag.number_of_nodes() == 1 + assert gates_dag._dag.number_of_edges() == 0 + assert gates_dag.front_layer + assert not gates_dag.near_term_layer + assert dag_node01.logical_id0 == 0 + assert dag_node01.logical_id1 == 1 + assert dag_node01.logical_ids == frozenset((0, 1)) + assert gates_dag.front_layer == [dag_node01] + assert gates_dag._logical_ids_in_diag == {0, 1} + assert gates_dag._back_layer == {0: dag_node01, 1: dag_node01} + + # ---------------------------------- + + dag_node56 = gates_dag.add_gate(5, 6) + + assert gates_dag._dag.number_of_nodes() == 2 + assert gates_dag._dag.number_of_edges() == 0 + assert gates_dag.front_layer + assert not gates_dag.near_term_layer + + assert dag_node01.logical_id0 == 0 + assert dag_node01.logical_id1 == 1 + assert dag_node01.logical_ids == frozenset((0, 1)) + assert dag_node56.logical_id0 == 5 + assert dag_node56.logical_id1 == 6 + assert dag_node56.logical_ids == frozenset((5, 6)) + + assert gates_dag.front_layer == [dag_node01, dag_node56] + assert gates_dag._logical_ids_in_diag == {0, 1, 5, 6} + assert gates_dag._back_layer == { + 0: dag_node01, + 1: dag_node01, + 5: dag_node56, + 6: dag_node56 + } + + # ---------------------------------- + + dag_node12 = gates_dag.add_gate(1, 2) + assert gates_dag._dag.number_of_nodes() == 3 + assert gates_dag._dag.number_of_edges() == 1 + assert gates_dag.front_layer + assert not gates_dag.near_term_layer + + assert dag_node01.logical_id0 == 0 + assert dag_node01.logical_id1 == 1 + assert dag_node01.logical_ids == frozenset((0, 1)) + assert dag_node12.logical_id0 == 1 + assert dag_node12.logical_id1 == 2 + assert dag_node12.logical_ids == frozenset((1, 2)) + assert dag_node56.logical_id0 == 5 + assert dag_node56.logical_id1 == 6 + assert dag_node56.logical_ids == frozenset((5, 6)) + + assert gates_dag.front_layer == [dag_node01, dag_node56] + assert gates_dag._logical_ids_in_diag == {0, 1, 2, 5, 6} + assert gates_dag._back_layer == { + 0: dag_node01, + 1: dag_node12, + 2: dag_node12, + 5: dag_node56, + 6: dag_node56 + } + + # ---------------------------------- + + dag_node26 = gates_dag.add_gate(2, 6) + assert gates_dag.add_gate(2, 6) is None + assert gates_dag._dag.number_of_nodes() == 4 + assert gates_dag._dag.number_of_edges() == 3 + assert gates_dag.front_layer + assert not gates_dag.near_term_layer + + assert gates_dag.front_layer == [dag_node01, dag_node56] + assert gates_dag._logical_ids_in_diag == {0, 1, 2, 5, 6} + assert gates_dag._back_layer == { + 0: dag_node01, + 1: dag_node12, + 2: dag_node26, + 5: dag_node56, + 6: dag_node26 + } + + +def test_gates_dag_remove_from_front_layer(gates_dag): + dag_node01 = gates_dag.add_gate(0, 1) + dag_node56 = gates_dag.add_gate(5, 6) + dag_node12 = gates_dag.add_gate(1, 2) + dag_node26 = gates_dag.add_gate(2, 6) + dag_node78 = gates_dag.add_gate(7, 8) + + with pytest.raises(RuntimeError): + gates_dag.remove_from_front_layer(1, 2) + + assert gates_dag.front_layer == [dag_node01, dag_node56, dag_node78] + + gates_dag.remove_from_front_layer(7, 8) + assert gates_dag.front_layer == [dag_node01, dag_node56] + assert gates_dag._logical_ids_in_diag == {0, 1, 2, 5, 6} + assert 7 not in gates_dag._back_layer + assert 8 not in gates_dag._back_layer + + gates_dag.remove_from_front_layer(1, 0) + assert gates_dag.front_layer == [dag_node56, dag_node12] + + gates_dag.remove_from_front_layer(5, 6) + assert gates_dag.front_layer == [dag_node12] + + gates_dag.remove_from_front_layer(1, 2) + assert gates_dag.front_layer == [dag_node26] + + +def test_gates_dag_max_distance(gates_dag): + dag_node23a = gates_dag.add_gate(2, 3) + dag_node56 = gates_dag.add_gate(5, 6) + dag_node12 = gates_dag.add_gate(1, 2) + dag_node34 = gates_dag.add_gate(3, 4) + dag_node23b = gates_dag.add_gate(2, 3) + dag_node46 = gates_dag.add_gate(4, 6) + dag_node45 = gates_dag.add_gate(5, 4) + dag_node14 = gates_dag.add_gate(4, 1) + + distance = gates_dag.max_distance_in_dag() + assert distance[dag_node23a] == 0 + assert distance[dag_node56] == 0 + assert distance[dag_node12] == 1 + assert distance[dag_node34] == 1 + assert distance[dag_node23b] == 2 + assert distance[dag_node46] == 2 + assert distance[dag_node45] == 3 + assert distance[dag_node14] == 4 + + +def test_gates_dag_near_term_layer(gates_dag): + dag_node23a = gates_dag.add_gate(2, 3) + dag_node56 = gates_dag.add_gate(5, 6) + dag_node12 = gates_dag.add_gate(1, 2) + dag_node34 = gates_dag.add_gate(3, 4) + dag_node23b = gates_dag.add_gate(2, 3) + dag_node46 = gates_dag.add_gate(4, 6) + dag_node45 = gates_dag.add_gate(5, 4) + dag_node14 = gates_dag.add_gate(4, 1) + + gates_dag.calculate_near_term_layer(0) + assert not gates_dag.near_term_layer + + gates_dag.calculate_near_term_layer(1) + assert {dag_node12, dag_node34} == gates_dag.near_term_layer + + gates_dag.calculate_near_term_layer(2) + assert {dag_node12, dag_node34, dag_node23b, + dag_node46} == gates_dag.near_term_layer + + +# ============================================================================== +# MultiQubitGateManager +# ------------------------------------------------------------------------------ + + +def test_qubit_manager_valid_and_invalid_graphs(simple_graph): + graph = nx.Graph() + graph.add_nodes_from('abcd') + with pytest.raises(RuntimeError): + multi.MultiQubitGateManager(graph=graph) + + graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'a')]) + with pytest.raises(RuntimeError): + multi.MultiQubitGateManager(graph=graph) + + graph = deepcopy(simple_graph) + graph.remove_edge(0, 1) + with pytest.raises(RuntimeError): + multi.MultiQubitGateManager(graph=graph) + + manager = multi.MultiQubitGateManager(graph=simple_graph) + dist = manager.distance_matrix + + assert dist[0][1] == 1 + assert dist[0][2] == 2 + assert dist[0][3] == 3 + assert dist[0][4] == 4 + assert dist[0][5] == 2 + assert dist[0][6] == 4 + assert dist[1][0] == 1 + assert dist[1][2] == 1 + assert dist[1][3] == 2 + assert dist[1][4] == 3 + assert dist[1][5] == 1 + assert dist[1][6] == 3 + + +def test_qubit_manager_push_interaction(qubit_manager): + qubit_manager.push_interaction(0, 1) + assert qubit_manager.stats[frozenset((0, 1))] == 1 + qubit_manager.push_interaction(0, 1) + assert qubit_manager.stats[frozenset((0, 1))] == 2 + qubit_manager.push_interaction(5, 6) + assert qubit_manager.stats[frozenset((0, 1))] == 2 + assert qubit_manager.stats[frozenset((5, 6))] == 1 + + +def test_qubit_manager_can_execute_gate(qubit_manager): + mapping = {i: i for i in range(9)} + + qubit_manager.push_interaction(5, 6) + assert not qubit_manager._can_execute_some_gate(mapping) + qubit_manager.push_interaction(0, 1) + assert qubit_manager._can_execute_some_gate(mapping) + + +def test_qubit_manager_generatae_one_swap_step(qubit_manager): + manager = deepcopy(qubit_manager) + manager.push_interaction(0, 8) + + mapping = {i: i for i in range(9)} + (logical_id0, logical_id1, backend_id1) = manager._generate_one_swap_step( + mapping, multi.nearest_neighbours_cost_fun) + + assert logical_id0 in (0, 8) + if logical_id0 == 0: + assert backend_id1 in (1, 3) + else: + assert backend_id1 in (5, 7) + + mapping = {0: 0, 8: 8} + (logical_id0, logical_id1, backend_id1) = manager._generate_one_swap_step( + mapping, multi.nearest_neighbours_cost_fun) + + assert logical_id1 == -1 + if logical_id0 == 0: + assert backend_id1 in (1, 3) + else: + assert backend_id1 in (5, 7) + + # ---------------------------------- + + manager = deepcopy(qubit_manager) + manager.push_interaction(0, 1) + manager.push_interaction(0, 8) + + mapping = {i: i for i in range(9)} + (logical_id0, logical_id1, backend_id1) = manager._generate_one_swap_step( + mapping, multi.nearest_neighbours_cost_fun) + + # In this case, the only swap that does not increases the overall distance + # is (0, 1) + assert logical_id0 in (0, 1) + assert backend_id1 in (0, 1) + + +def test_qubit_manager_generate_swaps(qubit_manager): + manager = deepcopy(qubit_manager) + mapping = {i: i for i in range(9)} + + swaps, all_qubits = manager.generate_swaps( + mapping, multi.nearest_neighbours_cost_fun) + + assert not swaps + assert not all_qubits + + # ---------------------------------- + + manager.push_interaction(0, 8) + + with pytest.raises(RuntimeError): + manager.generate_swaps(mapping, + multi.nearest_neighbours_cost_fun, + max_steps=2) + + # ---------------------------------- + + mapping = {i: i for i in range(9)} + swaps, _ = manager.generate_swaps(mapping, + multi.nearest_neighbours_cost_fun) + + # Make sure the original mapping was not modified + assert mapping == {i: i for i in range(9)} + + reverse_mapping = {v: k for k, v in mapping.items()} + for id0, id1 in swaps: + reverse_mapping[id0], reverse_mapping[id1] = (reverse_mapping[id1], + reverse_mapping[id0]) + + mapping = {v: k for k, v in reverse_mapping.items()} + assert manager.graph.has_edge(mapping[0], mapping[8]) + + # ---------------------------------- + + mapping = {i: i for i in range(9)} + manager._use_near_term_layer = 1 + swaps, _ = manager.generate_swaps(mapping, + multi.look_ahead_parallelism_cost_fun, + opts={'W': 0.5}) + reverse_mapping = {v: k for k, v in mapping.items()} + for id0, id1 in swaps: + reverse_mapping[id0], reverse_mapping[id1] = (reverse_mapping[id1], + reverse_mapping[id0]) + + mapping = {v: k for k, v in reverse_mapping.items()} + assert manager.graph.has_edge(mapping[0], mapping[8]) + + # ---------------------------------- + + manager = deepcopy(qubit_manager) + manager._use_near_term_layer = 1 + mapping = {0: 0, 1: 1, 8: 8} + manager.push_interaction(0, 8) + manager.push_interaction(0, 1) + + swaps, all_qubits = manager.generate_swaps( + mapping, + multi.look_ahead_parallelism_cost_fun, + opts={ + 'W': 0.5, + 'near_term_layer': 1 + }) + + mapping = {i: i for i in range(9)} + reverse_mapping = {v: k for k, v in mapping.items()} + all_qubits_ref = set() + for id0, id1 in swaps: + all_qubits_ref.update((id0, id1)) + reverse_mapping[id0], reverse_mapping[id1] = (reverse_mapping[id1], + reverse_mapping[id0]) + + mapping = {v: k for k, v in reverse_mapping.items()} + + assert all_qubits == all_qubits_ref + + # Both gates should be executable at the same time + assert manager.graph.has_edge(mapping[0], mapping[8]) + assert manager.graph.has_edge(mapping[0], mapping[1]) From a6edcc420238c1e310aa117ae6e69a59196c9994 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Wed, 9 Oct 2019 11:40:47 +0200 Subject: [PATCH 13/25] New implementation of the GraphMapper (SABRE-like algorithm) This new implementation is largely based on the SABRE algorithm [1]. Essentially, use cost functions to plan the next list of SWAP operations. [1] https://arxiv.org/abs/1809.02573v2 --- docs/projectq.cengines.rst | 3 +- projectq/cengines/_graphmapper.py | 132 +++++++---- projectq/cengines/_graphmapper_test.py | 309 +++++-------------------- 3 files changed, 137 insertions(+), 307 deletions(-) diff --git a/docs/projectq.cengines.rst b/docs/projectq.cengines.rst index 90f9aaa30..47d369cb6 100755 --- a/docs/projectq.cengines.rst +++ b/docs/projectq.cengines.rst @@ -14,13 +14,14 @@ The ProjectQ compiler engines package. projectq.cengines.DummyEngine projectq.cengines.ForwarderEngine projectq.cengines.GridMapper + projectq.cengines.GraphMapper projectq.cengines.InstructionFilter projectq.cengines.IBM5QubitMapper projectq.cengines.LinearMapper projectq.cengines.LocalOptimizer projectq.cengines.ManualMapper projectq.cengines.MainEngine - projectq.cengines.SwapAndCNOTFlipper + projectq.cengines.SwapAndCNOTFlipper projectq.cengines.TagRemover diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index 1a07bd0ea..9ecf764cd 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -31,8 +31,9 @@ from projectq.ops import (AllocateQubitGate, Command, DeallocateQubitGate, FlushGate, Swap) from projectq.types import WeakQubitRef -from projectq.cengines._graph_path_manager import PathManager -from projectq.cengines._command_list import CommandList +from ._command_list import CommandList +from ._multi_qubit_gate_manager import (MultiQubitGateManager, + look_ahead_parallelism_cost_fun) # ------------------------------------------------------------------------------ @@ -178,7 +179,7 @@ def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, mapping and that need to be assigned a backend id stored_commands (CommandList): list of commands yet to be processed by - the mapper + the mapper Returns: A new mapping """ @@ -256,6 +257,9 @@ class GraphMapper(BasicMapperEngine): Maps a quantum circuit to an arbitrary connected graph of connected qubits using Swap gates. + .. seealso:: + :py:mod:`projectq.cengines._multi_qubit_gate_manager` + Args: graph (networkx.Graph) : Arbitrary connected graph storage (int) Number of gates to temporarily store @@ -273,8 +277,6 @@ class GraphMapper(BasicMapperEngine): graph new_logical_qubit_ids stored_commands - enable_caching(Bool): Controls whether optimal path caching is - enabled Attributes: current_mapping: Stores the mapping: key is logical qubit id, value @@ -298,12 +300,11 @@ class GraphMapper(BasicMapperEngine): 3) Does not optimize for dirty qubits. """ - def __init__(self, graph, storage=1000, add_qubits_to_mapping=_add_qubits_to_mapping, - enable_caching=True): + opts={}): """ Initialize a GraphMapper compiler engine. @@ -311,13 +312,46 @@ def __init__(self, graph (networkx.Graph): Arbitrary connected graph representing Qubit connectivity storage (int): Number of gates to temporarily store - enable_caching (Bool): Controls whether path caching is enabled + generate_swap_opts (dict): extra options to customize swap + operation generation + opts (dict): Extra options (see below) + Raises: RuntimeError: if the graph is not a connected graph + + Note: + ``opts`` may contain the following key-values: + + .. list-table:: + :header-rows: 1 + + * - Key + - Type + - Description + * - cost_fun + - ``function`` + - | Cost function to be called when generating a new + | list of swap operations. + | Defaults to :py:func:`.look_ahead_parallelism_cost_fun` + * - decay_opts + - ``dict`` + - | Options to pass onto the :py:class:`.DecayManager` + constructor + | Defaults to ``{'delta': 0.001, 'max_lifetime': 5}``. + * - opts + - ``dict`` + - | Extra options to pass onto the cost function + | (see :py:meth:`.MultiQubitGateManager.generate_swaps`) + | Defaults to ``{'W': 0.5}``. """ BasicMapperEngine.__init__(self) - self.paths = PathManager(graph, enable_caching) + self.qubit_manager = MultiQubitGateManager(graph=graph, + decay_opts=opts.get( + 'decay_opts', { + 'delta': 0.001, + 'max_lifetime': 5 + })) self.num_qubits = graph.number_of_nodes() self.storage = storage # Randomness to pick permutations if there are too many. @@ -337,6 +371,9 @@ def __init__(self, # Function to add new logical qubits ids to the mapping self.set_add_qubits_to_mapping(add_qubits_to_mapping) + self._cost_fun = opts.get('cost_fun', look_ahead_parallelism_cost_fun) + self._opts = opts.get('opts', {'W': 0.5}) + # Statistics: self.num_mappings = 0 self.depth_of_swaps = dict() @@ -403,10 +440,6 @@ def _process_commands(self): allocated_qubits = deepcopy(self._currently_allocated_ids) active_qubits = deepcopy(self._currently_allocated_ids) - # Always start from scratch again - # (does not reset cache or path statistics) - self.paths.clear_paths() - for cmd in self._stored_commands: if (len(allocated_qubits) == self.num_qubits and not active_qubits): @@ -448,19 +481,17 @@ def _process_commands(self): else: if not_in_mapping_qubits: self.current_mapping = self._add_qubits_to_mapping( - self._current_mapping, self.paths.graph, + self._current_mapping, self.qubit_manager.graph, not_in_mapping_qubits, self._stored_commands) not_in_mapping_qubits = [] - if not self.paths.push_interaction( - self._current_mapping[qubit_ids[0]], - self._current_mapping[qubit_ids[1]]): - break + self.qubit_manager.push_interaction( + qubit_ids[0], qubit_ids[1]) if not_in_mapping_qubits: self.current_mapping = self._add_qubits_to_mapping( - self._current_mapping, self.paths.graph, not_in_mapping_qubits, - self._stored_commands) + self._current_mapping, self.qubit_manager.graph, + not_in_mapping_qubits, self._stored_commands) def _send_possible_commands(self): """ @@ -485,11 +516,10 @@ def _send_possible_commands(self): idx=self._current_mapping[cmd.qubits[0][0].id]) self._currently_allocated_ids.add(cmd.qubits[0][0].id) self.send([ - Command( - engine=self, - gate=AllocateQubitGate(), - qubits=([qb0], ), - tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) + Command(engine=self, + gate=AllocateQubitGate(), + qubits=([qb0], ), + tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) ]) else: new_stored_commands.append(cmd) @@ -502,36 +532,37 @@ def _send_possible_commands(self): active_ids.remove(cmd.qubits[0][0].id) self._current_mapping.pop(cmd.qubits[0][0].id) self.send([ - Command( - engine=self, - gate=DeallocateQubitGate(), - qubits=([qb0], ), - tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) + Command(engine=self, + gate=DeallocateQubitGate(), + qubits=([qb0], ), + tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) ]) else: new_stored_commands.append(cmd) else: send_gate = True - backend_ids = set() + logical_ids = [] for qureg in cmd.all_qubits: for qubit in qureg: + logical_ids.append(qubit.id) + if qubit.id not in active_ids: send_gate = False - break - backend_ids.add(self._current_mapping[qubit.id]) - # Check that mapped ids are connected by an edge on the graph - if len(backend_ids) == 2: - send_gate = self.paths.graph.has_edge(*list(backend_ids)) + if send_gate: + # Check that mapped ids are connected by an edge on the + # graph + if len(logical_ids) == 2: + send_gate = self.qubit_manager.execute_gate( + self._current_mapping, *logical_ids) if send_gate: self._send_cmd_with_mapped_ids(cmd) else: # Cannot execute gate -> make sure no other gate will use # any of those qubits to preserve sequence - for qureg in cmd.all_qubits: - for qubit in qureg: - active_ids.discard(qubit.id) + for logical_id in logical_ids: + active_ids.discard(logical_id) new_stored_commands.append(cmd) self._stored_commands = new_stored_commands @@ -555,7 +586,8 @@ def _run(self): if not self._stored_commands: return - swaps = self.paths.generate_swaps() + swaps, all_swapped_qubits = self.qubit_manager.generate_swaps( + self._current_mapping, self._cost_fun, self._opts) if swaps: # first mapping requires no swaps backend_ids_used = { @@ -565,8 +597,7 @@ def _run(self): # Get a list of the qubits we need to allocate just to perform the # swaps - not_allocated_ids = set( - self.paths.get_all_nodes()).difference(backend_ids_used) + not_allocated_ids = all_swapped_qubits.difference(backend_ids_used) # Calculate temporary internal reverse mapping new_internal_mapping = deepcopy(self._reverse_current_mapping) @@ -577,10 +608,9 @@ def _run(self): for backend_id in not_allocated_ids: qb0 = WeakQubitRef(engine=self, idx=backend_id) self.send([ - Command( - engine=self, - gate=AllocateQubitGate(), - qubits=([qb0], )) + Command(engine=self, + gate=AllocateQubitGate(), + qubits=([qb0], )) ]) # Those qubits are not part of the current mapping, so add them @@ -635,10 +665,9 @@ def _run(self): for backend_id in not_needed_anymore: qb0 = WeakQubitRef(engine=self, idx=backend_id) self.send([ - Command( - engine=self, - gate=DeallocateQubitGate(), - qubits=([qb0], )) + Command(engine=self, + gate=DeallocateQubitGate(), + qubits=([qb0], )) ]) # Calculate new mapping @@ -668,6 +697,7 @@ def receive(self, command_list): receive. """ for cmd in command_list: + print(cmd) if isinstance(cmd.gate, FlushGate): while self._stored_commands: self._run() @@ -702,4 +732,4 @@ def __str__(self): return ("Number of mappings: {}\n" + "Depth of swaps: {}\n\n" + "Number of swaps per mapping:{}\n\n{}\n\n").format( self.num_mappings, depth_of_swaps_str, - num_swaps_per_mapping_str, str(self.paths)) + num_swaps_per_mapping_str, str(self.qubit_manager)) diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py index 01d0fdf82..e5c9a0855 100644 --- a/projectq/cengines/_graphmapper_test.py +++ b/projectq/cengines/_graphmapper_test.py @@ -75,9 +75,8 @@ def simple_graph(): # 5 6 graph = nx.Graph() graph.add_nodes_from(range(7)) - graph.add_edges_from([(0, 1), (1, 2), (1, 5), (2, 3), (5, 3), (3, 4), (3, - 6), - (4, 6)]) + graph.add_edges_from([(0, 1), (1, 2), (1, 5), (2, 3), (5, 3), (3, 4), + (3, 6), (4, 6)]) return graph @@ -96,8 +95,8 @@ def grid33_graph(): @pytest.fixture def grid22_graph_mapper(grid22_graph): - mapper = graphm.GraphMapper( - graph=grid22_graph, add_qubits_to_mapping="fcfs") + mapper = graphm.GraphMapper(graph=grid22_graph, + add_qubits_to_mapping="fcfs") backend = DummyEngine(save_commands=True) backend.is_last_engine = True mapper.next_engine = backend @@ -106,8 +105,8 @@ def grid22_graph_mapper(grid22_graph): @pytest.fixture def grid33_graph_mapper(grid33_graph): - mapper = graphm.GraphMapper( - graph=grid33_graph, add_qubits_to_mapping="fcfs") + mapper = graphm.GraphMapper(graph=grid33_graph, + add_qubits_to_mapping="fcfs") backend = DummyEngine(save_commands=True) backend.is_last_engine = True mapper.next_engine = backend @@ -116,8 +115,8 @@ def grid33_graph_mapper(grid33_graph): @pytest.fixture def simple_mapper(simple_graph): - mapper = graphm.GraphMapper( - graph=simple_graph, add_qubits_to_mapping="fcfs") + mapper = graphm.GraphMapper(graph=simple_graph, + add_qubits_to_mapping="fcfs") backend = DummyEngine(save_commands=True) backend.is_last_engine = True mapper.next_engine = backend @@ -200,8 +199,8 @@ def test_add_qubits_to_mapping_methods_failure(simple_graph): @pytest.mark.parametrize("add_qubits", ["fcfs", "fcfs_init", "FCFS"]) def test_add_qubits_to_mapping_methods(simple_graph, add_qubits): - mapper = graphm.GraphMapper( - graph=simple_graph, add_qubits_to_mapping=add_qubits) + mapper = graphm.GraphMapper(graph=simple_graph, + add_qubits_to_mapping=add_qubits) backend = DummyEngine(save_commands=True) backend.is_last_engine = True mapper.next_engine = backend @@ -394,8 +393,11 @@ def test_send_possible_commands_allocate(simple_mapper): mapper, backend = simple_mapper qb0 = WeakQubitRef(engine=None, idx=0) - cmd0 = Command( - engine=None, gate=Allocate, qubits=([qb0], ), controls=[], tags=[]) + cmd0 = Command(engine=None, + gate=Allocate, + qubits=([qb0], ), + controls=[], + tags=[]) mapper._stored_commands += [cmd0] mapper._currently_allocated_ids = set([10]) # not in mapping: @@ -410,12 +412,11 @@ def test_send_possible_commands_allocate(simple_mapper): assert len(mapper._stored_commands) == 0 # Only self._run() sends Allocate gates mapped0 = WeakQubitRef(engine=None, idx=3) - received_cmd = Command( - engine=mapper, - gate=Allocate, - qubits=([mapped0], ), - controls=[], - tags=[LogicalQubitIDTag(0)]) + received_cmd = Command(engine=mapper, + gate=Allocate, + qubits=([mapped0], ), + controls=[], + tags=[LogicalQubitIDTag(0)]) assert backend.received_commands[0] == received_cmd assert mapper._currently_allocated_ids == set([10, 0]) @@ -504,8 +505,11 @@ def test_send_possible_commands_deallocate(simple_mapper): mapper, backend = simple_mapper qb0 = WeakQubitRef(engine=None, idx=0) - cmd0 = Command( - engine=None, gate=Deallocate, qubits=([qb0], ), controls=[], tags=[]) + cmd0 = Command(engine=None, + gate=Deallocate, + qubits=([qb0], ), + controls=[], + tags=[]) mapper._stored_commands = [cmd0] mapper.current_mapping = dict() mapper._currently_allocated_ids = set([10]) @@ -548,12 +552,21 @@ def test_send_possible_commands_keep_remaining_gates(simple_mapper): qb0 = WeakQubitRef(engine=None, idx=0) qb1 = WeakQubitRef(engine=None, idx=1) - cmd0 = Command( - engine=None, gate=Allocate, qubits=([qb0], ), controls=[], tags=[]) - cmd1 = Command( - engine=None, gate=Deallocate, qubits=([qb0], ), controls=[], tags=[]) - cmd2 = Command( - engine=None, gate=Allocate, qubits=([qb1], ), controls=[], tags=[]) + cmd0 = Command(engine=None, + gate=Allocate, + qubits=([qb0], ), + controls=[], + tags=[]) + cmd1 = Command(engine=None, + gate=Deallocate, + qubits=([qb0], ), + controls=[], + tags=[]) + cmd2 = Command(engine=None, + gate=Allocate, + qubits=([qb1], ), + controls=[], + tags=[]) mapper._stored_commands = [cmd0, cmd1, cmd2] mapper.current_mapping = {0: 0} @@ -566,8 +579,11 @@ def test_send_possible_commands_one_inactive_qubit(simple_mapper): qb0 = WeakQubitRef(engine=None, idx=0) qb1 = WeakQubitRef(engine=None, idx=1) - cmd0 = Command( - engine=None, gate=Allocate, qubits=([qb0], ), controls=[], tags=[]) + cmd0 = Command(engine=None, + gate=Allocate, + qubits=([qb0], ), + controls=[], + tags=[]) cmd1 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb1]) mapper._stored_commands = [cmd0, cmd1] mapper.current_mapping = {0: 0} @@ -635,7 +651,6 @@ def test_send_two_qubit_gate_before_swap(simple_mapper): Command(None, X, qubits=([qb[2]], ), controls=[qb[0]]) ]: mapper, backend = deepcopy(simple_mapper) - mapper.enable_caching = False all_cmds[3] = cmd @@ -644,13 +659,17 @@ def test_send_two_qubit_gate_before_swap(simple_mapper): mapper._run() assert mapper.num_mappings == 1 if mapper.current_mapping[2] == 2: - # qb[2] has not moved, all_cmds[5] is possible - assert mapper._stored_commands == all_cmds[-4:] + # qb[2] has not moved, all_cmds[5] and everything + # thereafter is possible + assert mapper._stored_commands == all_cmds[-1:] assert mapper.current_mapping == { 0: 1, 1: 0, 2: 2, 3: 3, + 4: 4, + 5: 5, + 6: 6 } else: # qb[2] moved, all_cmds[5] not possible @@ -685,7 +704,6 @@ def test_send_two_qubit_gate_before_swap_nonallocated_qubits(simple_mapper): ]: mapper, backend = deepcopy(simple_mapper) mapper.current_mapping = dict(enumerate(range(len(qb)))) - mapper.enable_caching = False all_cmds[idx] = cmd @@ -854,226 +872,9 @@ def test_check_that_local_optimizer_doesnt_merge(simple_graph): assert len(backend.received_commands) == 7 -@pytest.mark.parametrize("enable_caching", [False, True]) -def test_3x3_grid_multiple_simultaneous_non_intersecting_paths( - grid33_graph_mapper, enable_caching): - mapper, backend = grid33_graph_mapper - mapper.enable_caching = enable_caching - - qb, allocate_cmds = allocate_all_qubits_cmd(mapper) - - # 0 - 1 - 2 - # | | | - # 3 - 4 - 5 - # | | | - # 6 - 7 - 8 - - cmd0 = Command(None, X, qubits=([qb[0]], ), controls=[qb[6]]) - cmd1 = Command(None, X, qubits=([qb[1]], ), controls=[qb[7]]) - cmd2 = Command(None, X, qubits=([qb[2]], ), controls=[qb[8]]) - cmd3 = Command(None, X, qubits=([qb[2]], ), controls=[qb[8]]) - - qb_flush = WeakQubitRef(engine=None, idx=-1) - cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) - - mapper.receive(allocate_cmds + [cmd0, cmd1, cmd2, cmd3, cmd_flush]) - assert not mapper._stored_commands - assert mapper.num_mappings == 1 - assert mapper.depth_of_swaps == {1: 1} - assert mapper.current_mapping == { - 0: 0, - 1: 1, - 2: 2, - 3: 6, - 4: 7, - 5: 8, - 6: 3, - 7: 4, - 8: 5 - } or mapper.current_mapping == { - 0: 3, - 1: 4, - 2: 5, - 3: 0, - 4: 1, - 5: 2, - 6: 6, - 7: 7, - 8: 8 - } - - cmd3 = Command(None, X, qubits=([qb[0]], ), controls=[qb[2]]) - cmd4 = Command(None, X, qubits=([qb[3]], ), controls=[qb[5]]) - cmd5 = Command(None, X, qubits=([qb[6]], ), controls=[qb[8]]) - mapper.receive([cmd3, cmd4, cmd5, cmd_flush]) - - assert not mapper._stored_commands - assert mapper.num_mappings == 2 - assert mapper.depth_of_swaps == {1: 2} - assert mapper.current_mapping == { - 0: 0, - 1: 2, - 2: 1, - 3: 6, - 4: 8, - 5: 7, - 6: 3, - 7: 5, - 8: 4 - } or mapper.current_mapping == { - 0: 4, - 1: 3, - 2: 5, - 3: 1, - 4: 0, - 5: 2, - 6: 7, - 7: 6, - 8: 8 - } - - if enable_caching: - assert mapper.paths.cache._cache - assert mapper.paths.cache.has_path(0, 6) - assert mapper.paths.cache.has_path(1, 7) - assert mapper.paths.cache.has_path(2, 8) - assert mapper.paths.cache.has_path(0, 2) - assert mapper.paths.cache.has_path(3, 5) - assert mapper.paths.cache.has_path(6, 8) - assert not mapper.paths.cache.has_path(0, 1) - assert not mapper.paths.cache.has_path(1, 2) - assert not mapper.paths.cache.has_path(3, 4) - assert not mapper.paths.cache.has_path(4, 5) - assert not mapper.paths.cache.has_path(6, 7) - assert not mapper.paths.cache.has_path(7, 8) - - -@pytest.mark.parametrize("enable_caching", [False, True]) -def test_3x3_grid_multiple_simultaneous_intersecting_paths_impossible( - grid33_graph_mapper, enable_caching): - mapper, backend = grid33_graph_mapper - mapper.enable_caching = enable_caching - - # 0 - 1 - 2 - # | | | - # 3 - 4 - 5 - # | | | - # 6 - 7 - 8 - qb, allocate_cmds = allocate_all_qubits_cmd(mapper) - - cmd0 = Command(None, X, qubits=([qb[1]], ), controls=[qb[7]]) - cmd1 = Command(None, X, qubits=([qb[3]], ), controls=[qb[5]]) - - qb_flush = WeakQubitRef(engine=None, idx=-1) - cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) - - mapper.receive(allocate_cmds + [cmd0, cmd1, cmd_flush]) - assert not mapper._stored_commands - assert mapper.num_mappings == 2 - assert mapper.depth_of_swaps == {1: 2} - assert mapper.current_mapping == { - 0: 0, - 1: 1, - 2: 2, - 3: 3, - 4: 7, - 5: 4, - 6: 6, - 7: 5, - 8: 8 - } or mapper.current_mapping == { - 0: 0, - 1: 3, - 2: 2, - 3: 4, - 4: 1, - 5: 5, - 6: 6, - 7: 7, - 8: 8 - } - - if enable_caching: - assert mapper.paths.cache._cache - assert mapper.paths.cache.has_path(1, 7) - assert mapper.paths.cache.has_path(3, 5) - - mapper.current_mapping = dict(enumerate(range(len(qb)))) - - cmd2 = Command(None, X, qubits=([qb[7]], ), controls=[qb[1]]) - cmd3 = Command(None, X, qubits=([qb[1]], ), controls=[qb[8]]) - mapper.receive(allocate_cmds + [cmd2, cmd3, cmd_flush]) - assert not mapper._stored_commands - assert mapper.num_mappings == 4 - assert mapper.depth_of_swaps == {1: 4} - - if enable_caching: - assert mapper.paths.cache._cache - assert mapper.paths.cache.has_path(1, 7) - assert mapper.paths.cache.has_path(3, 5) - assert mapper.paths.cache.has_path(1, 8) - - -@pytest.mark.parametrize("enable_caching", [False, True]) -def test_3x3_grid_multiple_simultaneous_intersecting_paths_possible( - grid33_graph_mapper, enable_caching): - mapper, backend = grid33_graph_mapper - mapper.enable_caching = enable_caching - - # 0 - 1 - 2 - # | | | - # 3 - 4 - 5 - # | | | - # 6 - 7 - 8 - qb, allocate_cmds = allocate_all_qubits_cmd(mapper) - - # NB. when generating the swaps for the paths through the graph, the path - # 0 -> 7 needs to be performed *before* the one 3 -> 5 - cmd0 = Command(None, X, qubits=([qb[3]], ), controls=[qb[5]]) - cmd1 = Command(None, X, qubits=([qb[0]], ), controls=[qb[7]]) - - qb_flush = WeakQubitRef(engine=None, idx=-1) - cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) - - mapper.receive(allocate_cmds + [cmd0, cmd1, cmd_flush]) - - assert not mapper._stored_commands - assert mapper.num_mappings == 1 - assert mapper.depth_of_swaps == {3: 1} - assert mapper.current_mapping == { - 0: 0, - 1: 3, - 2: 2, - 3: 4, - 4: 7, - 5: 5, - 6: 6, - 7: 1, - 8: 8 - } or mapper.current_mapping == { - 0: 0, - 1: 4, - 2: 2, - 3: 3, - 4: 5, - 5: 7, - 6: 6, - 7: 1, - 8: 8 - } - - if enable_caching: - assert mapper.paths.cache._cache - assert mapper.paths.cache.has_path(0, 7) - assert mapper.paths.cache.has_path(3, 5) - - -@pytest.mark.parametrize("enable_caching", [False, True]) -def test_mapper_to_str(simple_graph, enable_caching): - mapper = graphm.GraphMapper( - graph=simple_graph, - enable_caching=enable_caching, - add_qubits_to_mapping="fcfs") +def test_mapper_to_str(simple_graph): + mapper = graphm.GraphMapper(graph=simple_graph, + add_qubits_to_mapping="fcfs") backend = DummyEngine(save_commands=True) eng = MainEngine(backend, [mapper]) qureg = eng.allocate_qureg(len(simple_graph)) @@ -1096,8 +897,6 @@ def test_mapper_to_str(simple_graph, enable_caching): assert str_repr.count("1: 1") == 1 assert str_repr.count("2: 1") == 2 assert str_repr.count("3: 1") == 1 - assert str_repr.count(" 0 - 6: 1") == 1 - assert str_repr.count(" 0 - 3: 1") == 1 sent_gates = [cmd.gate for cmd in backend.received_commands] assert sent_gates.count(H) == 1 From 76e0e9e39e3197e655bb35bc8ac0ef20757f32b1 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Fri, 11 Oct 2019 15:26:26 +0200 Subject: [PATCH 14/25] Store all gates in MultiQubitGateManager TODO: rename the class and its associated file --- .../cengines/_multi_qubit_gate_manager.py | 646 ++++++++++----- .../_multi_qubit_gate_manager_test.py | 760 ++++++++++++++---- 2 files changed, 1048 insertions(+), 358 deletions(-) diff --git a/projectq/cengines/_multi_qubit_gate_manager.py b/projectq/cengines/_multi_qubit_gate_manager.py index 2f7542a85..8b86b9ad2 100644 --- a/projectq/cengines/_multi_qubit_gate_manager.py +++ b/projectq/cengines/_multi_qubit_gate_manager.py @@ -22,11 +22,12 @@ """ import networkx as nx +from projectq.ops import (AllocateQubitGate, DeallocateQubitGate) # ============================================================================== -def _sum_distance_over_gates(gate_list, mapping, distance_matrix): +def _sum_distance_over_gates(node_list, mapping, distance_matrix): """ Calculate the sum of distances between pairs of qubits @@ -40,8 +41,8 @@ def _sum_distance_over_gates(gate_list, mapping, distance_matrix): Sum of all pair-wise distances between qubits """ return sum([ - distance_matrix[mapping[gate.logical_id0]][mapping[gate.logical_id1]] - for gate in gate_list + distance_matrix[mapping[node.logical_id0]][mapping[node.logical_id1]] + for node in node_list ]) @@ -52,7 +53,7 @@ def nearest_neighbours_cost_fun(gates_dag, mapping, distance_matrix, swap, .. math:: - H = \sum_{\mathrm{gate}\ \in\ F} + H = \sum_{\mathrm{gate}\ \in\ F} D(\mathrm{gate}.q_1, \mathrm{gate}.q_2) where: @@ -63,7 +64,7 @@ def nearest_neighbours_cost_fun(gates_dag, mapping, distance_matrix, swap, Args: - gates_dag (GatesDAG): Direct acyclic graph of future quantum gates + gates_dag (CommandDAG): Direct acyclic graph of future quantum gates mapping (dict): Current mapping distance_matrix (dict): Distance matrix within the hardware coupling graph @@ -74,6 +75,7 @@ def nearest_neighbours_cost_fun(gates_dag, mapping, distance_matrix, swap, Returns: Score of current swap operations """ + #pylint: disable=unused-argument return _sum_distance_over_gates(gates_dag.front_layer, mapping, distance_matrix) @@ -86,10 +88,10 @@ def look_ahead_parallelism_cost_fun(gates_dag, mapping, distance_matrix, swap, to favour swap operations that can be performed in parallel. .. math:: - - H = M \\left[\\frac{1}{|F|}\sum_{\mathrm{gate}\ \in\ F} + + H = M \\left[\\frac{1}{|F|}\sum_{\mathrm{gate}\ \in\ F} D(\mathrm{gate}.q_1, \mathrm{gate}.q_2) - + \\frac{W}{|E|}\sum_{\mathrm{gate}\ \in\ E} + + \\frac{W}{|E|}\sum_{\mathrm{gate}\ \in\ E} D(\mathrm{gate}.q_1, \mathrm{gate}.q_2) \\right] where: @@ -102,7 +104,7 @@ def look_ahead_parallelism_cost_fun(gates_dag, mapping, distance_matrix, swap, Args: - gates_dag (GatesDAG): Direct acyclic graph of future quantum gates + gates_dag (CommandDAG): Direct acyclic graph of future quantum gates mapping (dict): Current mapping distance_matrix (dict): Distance matrix within the hardware coupling graph @@ -128,24 +130,24 @@ def look_ahead_parallelism_cost_fun(gates_dag, mapping, distance_matrix, swap, * - W - ``float`` - Weighting factor (see cost function formula) - - """ decay = opts['decay'] - W = opts['W'] - N_front = len(gates_dag.front_layer) - N_near = len(gates_dag.near_term_layer) + near_term_weight = opts['W'] - front_layer_term = (1. / N_front * _sum_distance_over_gates( - gates_dag.front_layer, mapping, distance_matrix)) + n_front = len(gates_dag.front_layer_for_cost_fun) + n_near = len(gates_dag.near_term_layer) - if N_near == 0: - return (max(decay.get_decay_value(swap[0]), - decay.get_decay_value(swap[1])) * front_layer_term) - return ( - max(decay.get_decay_value(swap[0]), decay.get_decay_value(swap[1])) * - front_layer_term + (W / N_near * _sum_distance_over_gates( - gates_dag.near_term_layer, mapping, distance_matrix))) + decay_factor = max(decay.get_decay_value(swap[0]), + decay.get_decay_value(swap[1])) + front_layer_term = (1. / n_front * _sum_distance_over_gates( + gates_dag.front_layer_for_cost_fun, mapping, distance_matrix)) + + if n_near == 0: + return decay_factor * front_layer_term + return (decay_factor * + (front_layer_term + + (near_term_weight / n_near * _sum_distance_over_gates( + gates_dag.near_term_layer, mapping, distance_matrix)))) # ============================================================================== @@ -179,19 +181,6 @@ def _apply_swap_to_mapping(mapping, logical_id0, logical_id1, backend_id1): # ============================================================================== -class QubitIDDecay(object): - """ - Class storing the decay information about a particular backend qubit ID - - Attributes: - decay (float): Decay value for a backend qubit ID - lifetime (int): Lifetime of decay information for a backend qubit ID - """ - def __init__(self, decay, lifetime): - self.decay = decay - self.lifetime = lifetime - - class DecayManager(object): """ Class managing the decay information about a list of backend qubit IDs @@ -199,6 +188,15 @@ class DecayManager(object): User should call the :py:meth:`step` method each time a swap gate is added and :py:meth:`remove_decay` once a 2-qubit gate is executed. """ + + # def __repr__(self): + # s = '' + # for backend_id in self._backend_ids: + # tmp = self._backend_ids[backend_id] + # s += '\n {:2}: {}, {}'.format(backend_id, tmp.decay, tmp.lifetime) + # s += '\n' + # return s + def __init__(self, delta, max_lifetime): """ Constructor @@ -212,10 +210,16 @@ def __init__(self, delta, max_lifetime): self._cutoff = max_lifetime self._backend_ids = {} + def clear(self): + """ + Clear the state of a DecayManager + """ + self._backend_ids = {} + def add_to_decay(self, backend_id): """ Add to the decay to a particular backend qubit ID - + Args: backend_id (int) : Backend qubit ID """ @@ -224,11 +228,13 @@ def add_to_decay(self, backend_id): return if backend_id in self._backend_ids: - self._backend_ids[backend_id].lifetime = self._cutoff - self._backend_ids[backend_id].decay += self._delta + self._backend_ids[backend_id]['lifetime'] = self._cutoff + self._backend_ids[backend_id]['decay'] += self._delta else: - self._backend_ids[backend_id] = QubitIDDecay( - self._delta, self._cutoff) + self._backend_ids[backend_id] = { + 'decay': 1 + self._delta, + 'lifetime': self._cutoff + } def remove_decay(self, backend_id): """ @@ -248,8 +254,8 @@ def get_decay_value(self, backend_id): backend_id (int) : Backend qubit ID """ if backend_id in self._backend_ids: - return self._backend_ids[backend_id].decay - return 0 + return self._backend_ids[backend_id]['decay'] + return 1 def step(self): """ @@ -259,30 +265,57 @@ def step(self): """ backend_ids = list(self._backend_ids) for backend_id in backend_ids: - self._backend_ids[backend_id].lifetime -= 1 - if self._backend_ids[backend_id].lifetime == 0: + self._backend_ids[backend_id]['lifetime'] -= 1 + if self._backend_ids[backend_id]['lifetime'] == 0: del self._backend_ids[backend_id] # ============================================================================== -class _DAGNode(object): +class _DAGNodeBase(object): + #pylint: disable=too-few-public-methods + def __init__(self, cmd, *args): + self.logical_ids = frozenset(args) + self.cmd = cmd + self.compatible_successor_cmds = [] + + def append_compatible_cmd(self, cmd): + """ + Append a compatible commands to this DAG node + + Args: + cmd (Command): A ProjectQ command + """ + self.compatible_successor_cmds.append(cmd) + + +class _DAGNodeSingle(_DAGNodeBase): + """ + Node representing a single qubit gate as part of a Direct Acyclic Graph + (DAG) of quantum gates """ - Class representing a node inside a Direct Acyclic Graph (DAG) - .. note:: + #pylint: disable=too-few-public-methods + def __init__(self, cmd, logical_id): + super(_DAGNodeSingle, self).__init__(cmd, logical_id) + self.logical_id = logical_id + - Main purpose of this class is to allow gates with identical qubits to be - stored within the same graph (networkx limitation) +class _DAGNodeDouble(_DAGNodeBase): """ - def __init__(self, logical_id0, logical_id1): + Node representing a 2-qubit gate as part of a Direct Acyclic Graph (DAG) + of quantum gates + """ + + #pylint: disable=too-few-public-methods + def __init__(self, cmd, logical_id0, logical_id1): + super(_DAGNodeDouble, self).__init__(cmd, logical_id0, logical_id1) self.logical_id0 = logical_id0 self.logical_id1 = logical_id1 - self.logical_ids = frozenset((logical_id0, logical_id1)) -class GatesDAG(object): +class CommandDAG(object): """ Class managing a list of multi-qubit gates and storing them into a Direct Acyclic Graph (DAG) in order of precedence. @@ -291,106 +324,154 @@ def __init__(self): self._dag = nx.DiGraph() self._logical_ids_in_diag = set() self.front_layer = [] - self.near_term_layer = set() + self.front_layer_for_cost_fun = [] + self.near_term_layer = [] self._back_layer = {} - def add_gate(self, logical_id0, logical_id1): + def size(self): """ - Add a gate to the DAG + Return the size of the DAG (ie. number of nodes) - Args: - logical_id0 (int) : A logical qubit ID - logical_id1 (int) : A logical qubit ID + Note: + This need not be the number of commands stored within the DAG. + """ + return self._dag.number_of_nodes() - .. note:: - If neither of ``logical_id0`` or ``logical_id1`` are currently found within the - DAG, also add the gate to the font layer. + def clear(self): """ + Clear the state of a DAG - logical_id0_in_dag = logical_id0 in self._logical_ids_in_diag - logical_id1_in_dag = logical_id1 in self._logical_ids_in_diag + Remove all nodes from the DAG and all layers. + """ + self._dag.clear() + self._logical_ids_in_diag = set() + self.front_layer_for_cost_fun = [] + self.front_layer = [] + self.near_term_layer = [] + self._back_layer = {} - if not (logical_id0_in_dag and logical_id1_in_dag and self. - _back_layer[logical_id0] == self._back_layer[logical_id1]): - # Do not add the new gate to DAG if both qubits are present inside - # the DAG *and* the gate on the last layer is the same for both - # qubits. - new_gate = _DAGNode(logical_id0, logical_id1) + def add_command(self, cmd): + """ + Add a command to the DAG + + Args: + cmd (Command): A ProjectQ command + """ + logical_ids = [qubit.id for qureg in cmd.all_qubits for qubit in qureg] - self._dag.add_node(new_gate) + if len(logical_ids) == 2: + logical_id0_in_dag = logical_ids[0] in self._logical_ids_in_diag + logical_id1_in_dag = logical_ids[1] in self._logical_ids_in_diag + + if (logical_id0_in_dag and logical_id1_in_dag and self._back_layer[ + logical_ids[0]] == self._back_layer[logical_ids[1]]): + self._back_layer[logical_ids[1]].append_compatible_cmd(cmd) + return + + new_node = _DAGNodeDouble(cmd, *logical_ids) + self._dag.add_node(new_node) if logical_id0_in_dag: - self._dag.add_edge(self._back_layer[logical_id0], new_gate) - self._logical_ids_in_diag.add(logical_id1) + self._dag.add_edge(self._back_layer[logical_ids[0]], new_node) + self._logical_ids_in_diag.add(logical_ids[1]) else: - self._logical_ids_in_diag.add(logical_id0) + self._logical_ids_in_diag.add(logical_ids[0]) if logical_id1_in_dag: - self._dag.add_edge(self._back_layer[logical_id1], new_gate) - self._logical_ids_in_diag.add(logical_id0) + self._dag.add_edge(self._back_layer[logical_ids[1]], new_node) + self._logical_ids_in_diag.add(logical_ids[0]) else: - self._logical_ids_in_diag.add(logical_id1) + self._logical_ids_in_diag.add(logical_ids[1]) - self._back_layer[logical_id0] = new_gate - self._back_layer[logical_id1] = new_gate + self._back_layer[logical_ids[0]] = new_node + self._back_layer[logical_ids[1]] = new_node # If both qubit are not already in the DAG, then we just got a new # gate on the front layer if not logical_id0_in_dag and not logical_id1_in_dag: - self.front_layer.append(new_gate) - return new_gate - return None + self.front_layer_for_cost_fun.append(new_node) + self.front_layer.append(new_node) + else: + logical_id = logical_ids[0] + logical_id_in_dag = logical_id in self._logical_ids_in_diag + + if isinstance(cmd.gate, (AllocateQubitGate, DeallocateQubitGate)): + new_node = _DAGNodeSingle(cmd, logical_id) + self._dag.add_node(new_node) + + if logical_id_in_dag: + self._dag.add_edge(self._back_layer[logical_id], new_node) + else: + self._logical_ids_in_diag.add(logical_id) + + self.front_layer.append(new_node) + + self._back_layer[logical_id] = new_node + else: + if not logical_id_in_dag: + new_node = _DAGNodeSingle(cmd, logical_id) + self._dag.add_node(new_node) + self._logical_ids_in_diag.add(logical_id) + + self._back_layer[logical_id] = new_node - def remove_from_front_layer(self, logical_id0, logical_id1): + self.front_layer.append(new_node) + else: + self._back_layer[logical_id].append_compatible_cmd(cmd) + + def remove_from_front_layer(self, cmd): """ Remove a gate from the front layer of the DAG Args: - logical_id0 (int) : A logical qubit ID - logical_id1 (int) : A logical qubit ID + cmd (Command): A ProjectQ command Raises: RuntimeError if the gate does not exist in the front layer """ # First find the gate inside the first layer list - for gate in self.front_layer: - if gate.logical_ids == frozenset((logical_id0, logical_id1)): - break - else: - raise RuntimeError('({}, {}) not found in DAG'.format( - logical_id0, logical_id1)) + node = next((node for node in self.front_layer if node.cmd is cmd), + None) + if not node: + raise RuntimeError('({}) not found in DAG'.format(cmd)) - descendants = list(self._dag[gate]) + logical_ids = [qubit.id for qureg in cmd.all_qubits for qubit in qureg] + + descendants = list(self._dag[node]) if not descendants: - self._logical_ids_in_diag.remove(logical_id0) - self._logical_ids_in_diag.remove(logical_id1) - del self._back_layer[logical_id0] - del self._back_layer[logical_id1] - self._dag.remove_node(gate) + for logical_id in logical_ids: + self._logical_ids_in_diag.remove(logical_id) + del self._back_layer[logical_id] + self._dag.remove_node(node) else: if len(descendants) == 1: - # Look for the logical_id not found in the descendant - logical_id = logical_id0 - if logical_id in descendants[0].logical_ids: - logical_id = logical_id1 + if isinstance(node, _DAGNodeDouble): + # Look for the logical_id not found in the descendant + logical_id, tmp = logical_ids + if logical_id in descendants[0].logical_ids: + logical_id = tmp - self._logical_ids_in_diag.remove(logical_id) - del self._back_layer[logical_id] + self._logical_ids_in_diag.remove(logical_id) + del self._back_layer[logical_id] # Remove gate from DAG - self._dag.remove_node(gate) + self._dag.remove_node(node) for descendant in descendants: if not self._dag.pred[descendant]: self.front_layer.append(descendant) + if isinstance(descendant, _DAGNodeDouble): + self.front_layer_for_cost_fun.append(descendant) # Remove the gate from the first layer - self.front_layer.remove(gate) + self.front_layer.remove(node) + if isinstance(node, _DAGNodeDouble): + self.front_layer_for_cost_fun.remove(node) def max_distance_in_dag(self): """ - Calculate the distance between the front layer and each gate of the + Calculate the distance between the front layer and each node of the DAG. A gate with distance 0 is on the front layer. @@ -398,57 +479,135 @@ def max_distance_in_dag(self): Returns: Python dictionary indexed by gate with their distance as value """ - gate_max_distance = {} - for gate in self.front_layer: - gate_max_distance[gate] = 0 - self._max_distance_in_dag(gate_max_distance, gate, 1) + node_max_distance = {} + for node in self.front_layer: + node_max_distance[node] = 0 + self._max_distance_in_dag(node_max_distance, node, 1) - return gate_max_distance + return node_max_distance - def calculate_near_term_layer(self, max_distance): + def calculate_near_term_layer(self, mapping): """ - Calculate a near term layer with all gates less than `max_distance` - from the front layer + Calculate the first order near term layer. + + This is the set of gates that will become the front layer once these + get executed. Args: - max_distance (int): Maximum distance from front layer to consider + mapping (dict): current mapping + """ + near_term_layer_candidates = [] + for node in self.front_layer_for_cost_fun: + for descendant in self._dag[node]: + if (isinstance(descendant, _DAGNodeDouble) + and descendant.logical_id0 in mapping + and descendant.logical_id1 in mapping): + near_term_layer_candidates.append(descendant) + + # Only add candidates for which all predecessors are in the front layer + self.near_term_layer = [] + for node in near_term_layer_candidates: + for predecessor in self._dag.pred[node]: + if predecessor not in self.front_layer: + break + else: + if node not in self.near_term_layer: + self.near_term_layer.append(node) + + def calculate_interaction_list(self): """ - if not max_distance: - self.near_term_layer = set() - else: - self.near_term_layer = { - gate - for gate, dist in self.max_distance_in_dag().items() - if 0 < dist <= max_distance - } + List all known interactions between multiple qubits + + Returns: + List of tuples of logical qubit IDs for each 2-qubit gate present + in the DAG. + """ + interactions = [] + for node in self._dag: + if isinstance(node, _DAGNodeDouble): + interactions.append(tuple(node.logical_ids)) + return interactions + + def calculate_qubit_interaction_subgraphs(self, max_order=2): + """ + Calculate qubits interaction graph based on all commands stored. + + The interaction graph has logical qubit IDs as nodes and edges + represent a 2-qubit gate between qubits. + + Args: + max_order (int): Maximum degree of the nodes in the resulting + graph + + Returns: + A list of list of graph nodes corresponding to all the connected + components of the qubit interaction graph. Within each components, + nodes are sorted in decreasing order of their degree. + """ + graph = nx.Graph() + + for node in self.front_layer: + self._add_to_interaction_graph(node, graph, max_order) + + return [ + sorted(graph.subgraph(g), + key=lambda n: len(graph[n]), + reverse=True) for g in sorted( + nx.connected_components(graph), + key=lambda c: (max(len(graph[n]) for n in c), len(c)), + reverse=True) + ] - def _max_distance_in_dag(self, gate_max_distance, gate, distance): + def _add_to_interaction_graph(self, node, graph, max_order): """ - Recursively calculate the maximum distance for each gate of the DAG + Recursively add an interaction to the interaction graph Args: - gate_max_depth (dict): Dictionary containing the current maximum - distance for each gate - gate (_DAGNode): Root node from DAG for traversal + node (_DAGNodeDouble): Node from DAG + graph (networkx.Graph): Interaction graph + max_order (int): Maximum degree of the nodes in the resulting + interaction graph + """ + if isinstance(node, _DAGNodeDouble) \ + and (node.logical_id0 not in graph \ + or node.logical_id1 not in graph \ + or (len(graph[node.logical_id0]) < max_order + and len(graph[node.logical_id1]) < max_order)): + graph.add_edge(node.logical_id0, node.logical_id1) + + for descendant in self._dag[node]: + self._add_to_interaction_graph(descendant, graph, max_order) + + def _max_distance_in_dag(self, node_max_distance, node, distance): + """ + Recursively calculate the maximum distance for each node of the DAG + + Args: + node_max_distance (dict): Dictionary containing the current + maximum distance for each node + node (_DAGNode): Root node from DAG for traversal distance (int): Current distance offset """ - for descendant in self._dag[gate]: + for descendant in self._dag[node]: try: - if gate_max_distance[descendant] < distance: - gate_max_distance[descendant] = distance + if node_max_distance[descendant] < distance: + node_max_distance[descendant] = distance except KeyError: - gate_max_distance[descendant] = distance + node_max_distance[descendant] = distance if self._dag[descendant]: - self._max_distance_in_dag(gate_max_distance, descendant, + self._max_distance_in_dag(node_max_distance, descendant, distance + 1) +# ============================================================================== + + class MultiQubitGateManager(object): """ Class managing qubit interactions """ - def __init__(self, graph, decay_opts={}): + def __init__(self, graph, decay_opts=None): """ Args: graph (networkx.Graph): an arbitrary connected graph @@ -456,20 +615,45 @@ def __init__(self, graph, decay_opts={}): # Make sure that we start with a valid graph if not nx.is_connected(graph): raise RuntimeError("Input graph must be a connected graph") - elif not all([isinstance(n, int) for n in graph]): + + if not all([isinstance(n, int) for n in graph]): raise RuntimeError( "All nodes inside the graph needs to be integers") - else: - self.graph = graph - self.distance_matrix = dict( - nx.all_pairs_shortest_path_length(self.graph)) - self._dag = GatesDAG() + self.graph = graph + self.distance_matrix = dict( + nx.all_pairs_shortest_path_length(self.graph)) + + if decay_opts is None: + decay_opts = {} + self._dag = CommandDAG() self._decay = DecayManager(decay_opts.get('delta', 0.001), - decay_opts.get('lifetime', 5)) - self.stats = {} + decay_opts.get('max_lifetime', 5)) - def generate_swaps(self, current_mapping, cost_fun, opts={}, + def size(self): + """ + Return the size of the underlying DAG + + .. seealso:: + :py:meth:`.CommandDAG.size` + """ + return self._dag.size() + + def clear(self): + """ + Return the size of the underlying DAG + + .. seealso:: + :py:meth:`.CommandDAG.clear` + :py:meth:`.DecayManager.clear` + """ + self._dag.clear() + self._decay.clear() + + def generate_swaps(self, + current_mapping, + cost_fun, + opts=None, max_steps=100): """ Generate a list of swaps to execute some quantum gates @@ -493,7 +677,7 @@ def generate_swaps(self, current_mapping, cost_fun, opts={}, .. list-table:: :header-rows: 1 - + * - Key - Type - Description @@ -508,33 +692,37 @@ def generate_swaps(self, current_mapping, cost_fun, opts={}, - ... - | Any other parameter will be passed onto the cost | function when it is called. - + Returns: A tuple (list, set) of swap operations (tuples of backend IDs) and a set of all the backend IDs that are traversed by the SWAP operations. """ - if not self._dag.front_layer: + if not self._dag.front_layer_for_cost_fun: return ([], set()) + if opts is None: + opts = {} + + self._decay.clear() opts['decay'] = self._decay - self._dag.calculate_near_term_layer(opts.get('near_term_layer', 0)) + self._dag.calculate_near_term_layer(current_mapping) mapping = current_mapping.copy() swaps = [] all_swapped_qubits = set() while not self._can_execute_some_gate(mapping): - (logical_id0, logical_id1, + (logical_id0, backend_id0, logical_id1, backend_id1) = self._generate_one_swap_step( mapping, cost_fun, opts) swaps.append((mapping[logical_id0], backend_id1)) - all_swapped_qubits.add(mapping[logical_id0]) + all_swapped_qubits.add(backend_id0) all_swapped_qubits.add(backend_id1) - for backend_id in swaps[-1]: - self._decay.add_to_decay(backend_id) + self._decay.add_to_decay(backend_id0) + self._decay.add_to_decay(backend_id1) self._decay.step() _apply_swap_to_mapping(mapping, logical_id0, logical_id1, @@ -542,53 +730,117 @@ def generate_swaps(self, current_mapping, cost_fun, opts={}, if len(swaps) > max_steps: raise RuntimeError( - 'Maximum number of steps to find a list of' + - ' SWAP operations reached without convergence') + 'Maximum number of steps ({}) to find a list of'.format( + max_steps) + + ' SWAP operations reached without convergence') return swaps, all_swapped_qubits - def push_interaction(self, logical_id0, logical_id1): + def add_command(self, cmd): """ - Plan an interaction between two qubit. + Add a command to the underlying DAG Args: - logical_id0 (int) : A logical qubit ID - logical_id1 (int) : A logical qubit ID - """ + cmd (Command): A ProjectQ command - self._dag.add_gate(logical_id0, logical_id1) + .. seealso:: + :py:meth:`.GatesDAG.add_command` + """ - new_gate = frozenset((logical_id0, logical_id1)) - if new_gate not in self.stats: - self.stats[new_gate] = 1 - else: - self.stats[new_gate] += 1 + return self._dag.add_command(cmd) - def execute_gate( - self, - mapping, - logical_id0, - logical_id1, - ): + def get_executable_commands(self, mapping): """ - Execute a gate (ie. mark it as executed if present in the DAG) + Find as many executable commands as possible given a mapping Args: mapping (dict): Current mapping - logical_id0 (int) : A logical qubit ID - logical_id1 (int) : A logical qubit ID - """ - if self.graph.has_edge(mapping[logical_id0], mapping[logical_id1]): - for gate in self._dag.front_layer: - if (logical_id0 in gate.logical_ids - and logical_id1 in gate.logical_ids): - self._dag.remove_from_front_layer(logical_id0, logical_id1) - return True - return False + + Returns: + A tuple (cmds_to_execute, allocate_cmds) where the first one is a + list of ProjectQ commands that can be executed and the second a + list of allocation commands for qubits not in the current mapping + """ + cmds_to_execute = [] + allocate_cmds = [] + has_command_to_execute = True + + while has_command_to_execute: + # Reset after each pass + has_command_to_execute = False + + for node in self._dag.front_layer.copy(): + if isinstance(node, _DAGNodeSingle): + if isinstance(node.cmd.gate, AllocateQubitGate): + # Allocating a qubit already in mapping is allowed + if node.logical_id in mapping: + has_command_to_execute = True + cmds_to_execute.append(node.cmd) + cmds_to_execute.extend( + node.compatible_successor_cmds) + self._dag.remove_from_front_layer(node.cmd) + elif node not in allocate_cmds: + allocate_cmds.append(node) + elif node.logical_id in mapping: + has_command_to_execute = True + cmds_to_execute.append(node.cmd) + cmds_to_execute.extend(node.compatible_successor_cmds) + self._dag.remove_from_front_layer(node.cmd) + elif node.logical_id0 in mapping and node.logical_id1 in mapping: + if self.graph.has_edge(mapping[node.logical_id0], + mapping[node.logical_id1]): + has_command_to_execute = True + cmds_to_execute.append(node.cmd) + cmds_to_execute.extend(node.compatible_successor_cmds) + self._dag.remove_from_front_layer(node.cmd) + + return cmds_to_execute, allocate_cmds + + def execute_allocate_cmds(self, allocate_cmds, mapping): + """ + Executea list of allocate commands (ie. remove them from the front + layer) + + Args: + allocate_cmds (list): A list of Allocate commands (DAG nodes) + mapping (dict): Current mapping + + Returns: + A list of ProjectQ commands to be executed + """ + cmds_to_execute = [] + for node in allocate_cmds: + assert isinstance(node.cmd.gate, AllocateQubitGate) + if node.logical_id in mapping: + cmds_to_execute.append(node.cmd) + cmds_to_execute.extend(node.compatible_successor_cmds) + self._dag.remove_from_front_layer(node.cmd) + + return cmds_to_execute # ========================================================================== - def _generate_one_swap_step(self, mapping, cost_fun, opts={}): + def calculate_qubit_interaction_subgraphs(self, max_order=2): + """ + Calculate qubits interaction graph based on all commands stored. + + Args: + max_order (int): Maximum degree of the nodes in the resulting + interaction graph + + Returns: + A list of list of graph nodes corresponding to all the connected + components of the qubit interaction graph. Within each components, + nodes are sorted in decreasing order of their degree. + + .. seealso:: + :py:meth:`CommandDAG.calculate_qubit_interaction_subgraphs` + """ + return self._dag.calculate_qubit_interaction_subgraphs(max_order) + + # ========================================================================== + + def _generate_one_swap_step(self, mapping, cost_fun, opts): """ Find the most optimal swap operation to perform next @@ -602,9 +854,9 @@ def _generate_one_swap_step(self, mapping, cost_fun, opts={}): - swap_candidate (tuple) Returns: - Tuple with (logical_id0, logical_id1, backend_id1) where - logical_id1 can be -1 if backend_id1 does not currently have a - logical qubit associated to it. + Tuple with (logical_id0, backend_id0, logical_id1, backend_id1) + where logical_id1 can be -1 if backend_id1 does not currently have + a logical qubit associated to it. """ reverse_mapping = {v: k for k, v in mapping.items()} @@ -612,17 +864,18 @@ def _generate_one_swap_step(self, mapping, cost_fun, opts={}): # Only consider gates from the front layer and generate a list of # potential SWAP operations with all qubits that are neighours of # those concerned by a gate + swap_candidates = [] - for gate in self._dag.front_layer: - for logical_id in gate.logical_ids: - for backend_id in self.graph[mapping[logical_id]]: + for node in self._dag.front_layer_for_cost_fun: + for logical_id in node.logical_ids: + for backend_id1 in self.graph[mapping[logical_id]]: swap_candidates.append( - (logical_id, reverse_mapping.get(backend_id, - -1), backend_id)) + (logical_id, mapping[logical_id], + reverse_mapping.get(backend_id1, -1), backend_id1)) # Rank swap candidates using the provided cost function scores = [] - for logical_id0, logical_id1, backend_id1 in swap_candidates: + for logical_id0, backend_id0, logical_id1, backend_id1 in swap_candidates: new_mapping = mapping.copy() _apply_swap_to_mapping(new_mapping, logical_id0, logical_id1, @@ -630,7 +883,7 @@ def _generate_one_swap_step(self, mapping, cost_fun, opts={}): scores.append( cost_fun(self._dag, new_mapping, self.distance_matrix, - (logical_id0, logical_id1), opts)) + (backend_id0, backend_id1), opts)) # Return the swap candidate with the lowest score return swap_candidates[scores.index(min(scores))] @@ -642,8 +895,11 @@ def _can_execute_some_gate(self, mapping): Args: mapping (dict): Current mapping """ - for gate in self._dag.front_layer: - if self.graph.has_edge(mapping[gate.logical_id0], - mapping[gate.logical_id1]): + for node in self._dag.front_layer: + if isinstance(node, _DAGNodeSingle) and node.logical_id in mapping: + return True + + if (isinstance(node, _DAGNodeDouble) and self.graph.has_edge( + mapping[node.logical_id0], mapping[node.logical_id1])): return True return False diff --git a/projectq/cengines/_multi_qubit_gate_manager_test.py b/projectq/cengines/_multi_qubit_gate_manager_test.py index 140b8d2e4..5f7243ad3 100644 --- a/projectq/cengines/_multi_qubit_gate_manager_test.py +++ b/projectq/cengines/_multi_qubit_gate_manager_test.py @@ -25,20 +25,26 @@ from projectq.cengines import _multi_qubit_gate_manager as multi +# ============================================================================== + # For debugging purposes -def to_string(self): - return str(tuple(self.logical_ids)) +def dagnode_to_string(self): + return '{} {}'.format(self.__class__.__name__, tuple(self.logical_ids)) -multi._dag_node.__str__ = to_string -multi._dag_node.__repr__ = to_string +multi._DAGNodeBase.__str__ = dagnode_to_string +multi._DAGNodeBase.__repr__ = dagnode_to_string +Command.__repr__ = Command.__str__ + +# ============================================================================== -def allocate_all_qubits_cmd(mapper): + +def allocate_all_qubits_cmd(num_qubits): qb = [] allocate_cmds = [] - for i in range(mapper.num_qubits): + for i in range(num_qubits): qb.append(WeakQubitRef(engine=None, idx=i)) allocate_cmds.append( Command(engine=None, gate=Allocate, qubits=([qb[i]], ))) @@ -74,6 +80,25 @@ def generate_grid_graph(nrows, ncols): return graph +def gen_cmd(*logical_ids, gate=X): + if len(logical_ids) == 1: + qb0 = WeakQubitRef(engine=None, idx=logical_ids[0]) + return Command(None, gate, qubits=([qb0], )) + elif len(logical_ids) == 2: + qb0 = WeakQubitRef(engine=None, idx=logical_ids[0]) + qb1 = WeakQubitRef(engine=None, idx=logical_ids[1]) + return Command(None, gate, qubits=([qb0], ), controls=[qb1]) + else: + raise RuntimeError('Unsupported') + + +def search_cmd(command_dag, cmd): + for node in command_dag._dag: + if node.cmd is cmd: + return node + raise RuntimeError('Unable to find command in DAG') + + @pytest.fixture(scope="module") def simple_graph(): # 2 4 @@ -99,8 +124,8 @@ def decay_manager(): @pytest.fixture -def gates_dag(): - return multi.GatesDAG() +def command_dag(): + return multi.CommandDAG() @pytest.fixture @@ -123,24 +148,24 @@ def test_decay_manager_add(decay_manager): decay_manager.add_to_decay(0) assert list(decay_manager._backend_ids) == [0] backend_qubit = decay_manager._backend_ids[0] - assert backend_qubit.decay == delta - assert backend_qubit.lifetime == lifetime + assert backend_qubit['decay'] == pytest.approx(1 + delta) + assert backend_qubit['lifetime'] == lifetime decay_manager.add_to_decay(0) assert list(decay_manager._backend_ids) == [0] backend_qubit = decay_manager._backend_ids[0] - assert backend_qubit.decay == 2 * delta - assert backend_qubit.lifetime == lifetime + assert backend_qubit['decay'] == pytest.approx(1 + 2 * delta) + assert backend_qubit['lifetime'] == lifetime decay_manager.add_to_decay(1) assert sorted(decay_manager._backend_ids) == [0, 1] backend_qubit = decay_manager._backend_ids[0] - assert backend_qubit.decay == 2 * delta - assert backend_qubit.lifetime == lifetime + assert backend_qubit['decay'] == pytest.approx(1 + 2 * delta) + assert backend_qubit['lifetime'] == lifetime backend_qubit = decay_manager._backend_ids[1] - assert backend_qubit.decay == delta - assert backend_qubit.lifetime == lifetime + assert backend_qubit['decay'] == pytest.approx(1 + delta) + assert backend_qubit['lifetime'] == lifetime def test_decay_manager_remove(decay_manager): @@ -162,10 +187,10 @@ def test_decay_manager_get_decay_value(decay_manager): decay_manager.add_to_decay(0) decay_manager.add_to_decay(1) - assert decay_manager.get_decay_value(0) == 2 * delta - assert decay_manager.get_decay_value(1) == delta - assert decay_manager.get_decay_value(-1) == 0 - assert decay_manager.get_decay_value(2) == 0 + assert decay_manager.get_decay_value(0) == pytest.approx(1 + 2 * delta) + assert decay_manager.get_decay_value(1) == pytest.approx(1 + delta) + assert decay_manager.get_decay_value(-1) == 1 + assert decay_manager.get_decay_value(2) == 1 def test_decay_manager_step(decay_manager): @@ -176,8 +201,8 @@ def test_decay_manager_step(decay_manager): decay_manager.step() backend_qubit = decay_manager._backend_ids[0] - assert backend_qubit.decay == delta - assert backend_qubit.lifetime == lifetime - 1 + assert backend_qubit['decay'] == pytest.approx(1 + delta) + assert backend_qubit['lifetime'] == lifetime - 1 decay_manager.add_to_decay(0) decay_manager.add_to_decay(1) @@ -187,19 +212,20 @@ def test_decay_manager_step(decay_manager): backend_qubit0 = decay_manager._backend_ids[0] backend_qubit1 = decay_manager._backend_ids[1] - assert backend_qubit0.decay == 2 * delta - assert backend_qubit0.lifetime == lifetime - 1 - assert backend_qubit1.decay == delta - assert backend_qubit1.lifetime == lifetime - 1 + assert backend_qubit0['decay'] == pytest.approx(1 + 2 * delta) + assert backend_qubit0['lifetime'] == lifetime - 1 + assert backend_qubit1['decay'] == pytest.approx(1 + delta) + assert backend_qubit1['lifetime'] == lifetime - 1 decay_manager.step() - assert backend_qubit0.decay == 2 * delta - assert backend_qubit0.lifetime == lifetime - 2 - assert backend_qubit1.decay == delta - assert backend_qubit1.lifetime == lifetime - 2 + assert backend_qubit0['decay'] == pytest.approx(1 + 2 * delta) + assert backend_qubit0['lifetime'] == lifetime - 2 + assert backend_qubit1['decay'] == pytest.approx(1 + delta) + assert backend_qubit1['lifetime'] == lifetime - 2 decay_manager.add_to_decay(1) - assert backend_qubit1.lifetime == lifetime + assert backend_qubit1['decay'] == pytest.approx(1 + 2 * delta) + assert backend_qubit1['lifetime'] == lifetime for i in range(3): decay_manager.step() @@ -218,46 +244,105 @@ def test_decay_manager_step(decay_manager): # ------------------------------------------------------------------------------ -def test_gates_dag_init(gates_dag): - assert gates_dag._dag.number_of_nodes() == 0 - assert gates_dag._dag.number_of_edges() == 0 - assert not gates_dag.front_layer - assert not gates_dag.near_term_layer +def test_command_dag_init(command_dag): + assert command_dag._dag.number_of_nodes() == 0 + assert command_dag._dag.number_of_edges() == 0 + assert not command_dag.front_layer + assert not command_dag.near_term_layer + + +def test_command_dag_add_1qubit_gate(command_dag): + cmd0a = gen_cmd(0) + cmd0b = gen_cmd(0) + cmd1 = gen_cmd(1) + # ---------------------------------- + + command_dag.add_command(cmd0a) + command_dag.add_command(cmd1) + command_dag.add_command(cmd0b) + dag_node0a = search_cmd(command_dag, cmd0a) + dag_node1 = search_cmd(command_dag, cmd1) + + with pytest.raises(RuntimeError): + search_cmd(command_dag, cmd0b) + + assert command_dag._dag.number_of_nodes() == 2 + assert command_dag._dag.number_of_edges() == 0 + assert command_dag.front_layer + assert not command_dag.near_term_layer + assert dag_node0a.logical_ids == frozenset((0, )) + assert command_dag.front_layer == [dag_node0a, dag_node1] + assert command_dag._logical_ids_in_diag == {0, 1} + assert command_dag._back_layer == {0: dag_node0a, 1: dag_node1} + + +def test_command_dag_add_1qubit_gate_allocate(command_dag): + + allocate2 = gen_cmd(2, gate=Allocate) + cmd2a = gen_cmd(2) + cmd2b = gen_cmd(2) + deallocate2 = gen_cmd(2, gate=Allocate) + + # ---------------------------------- + + command_dag.add_command(allocate2) + command_dag.add_command(cmd2a) + command_dag.add_command(cmd2b) + command_dag.add_command(deallocate2) + dag_allocate = search_cmd(command_dag, allocate2) + dag_deallocate = search_cmd(command_dag, deallocate2) + with pytest.raises(RuntimeError): + search_cmd(command_dag, cmd2a) + with pytest.raises(RuntimeError): + search_cmd(command_dag, cmd2b) + + assert command_dag._dag.number_of_nodes() == 2 + assert command_dag._dag.number_of_edges() == 1 + assert command_dag.front_layer == [dag_allocate] + assert not command_dag.near_term_layer + assert dag_allocate.logical_ids == frozenset((2, )) + assert dag_deallocate.logical_ids == frozenset((2, )) + assert command_dag._logical_ids_in_diag == {2} + assert command_dag._back_layer == {2: dag_deallocate} + + +def test_command_dag_add_2qubit_gate(command_dag): + cmd01 = gen_cmd(0, 1) + cmd56 = gen_cmd(5, 6) + cmd12 = gen_cmd(1, 2) + cmd12b = gen_cmd(1, 2) + cmd26 = gen_cmd(2, 6) + # ---------------------------------- -def test_gates_dag_add_gate(gates_dag): - dag_node01 = gates_dag.add_gate(0, 1) + command_dag.add_command(cmd01) + dag_node01 = search_cmd(command_dag, cmd01) - assert gates_dag._dag.number_of_nodes() == 1 - assert gates_dag._dag.number_of_edges() == 0 - assert gates_dag.front_layer - assert not gates_dag.near_term_layer - assert dag_node01.logical_id0 == 0 - assert dag_node01.logical_id1 == 1 + assert command_dag._dag.number_of_nodes() == 1 + assert command_dag._dag.number_of_edges() == 0 + assert command_dag.front_layer + assert not command_dag.near_term_layer assert dag_node01.logical_ids == frozenset((0, 1)) - assert gates_dag.front_layer == [dag_node01] - assert gates_dag._logical_ids_in_diag == {0, 1} - assert gates_dag._back_layer == {0: dag_node01, 1: dag_node01} + assert command_dag.front_layer == [dag_node01] + assert command_dag._logical_ids_in_diag == {0, 1} + assert command_dag._back_layer == {0: dag_node01, 1: dag_node01} # ---------------------------------- - dag_node56 = gates_dag.add_gate(5, 6) + command_dag.add_command(cmd56) + dag_node56 = search_cmd(command_dag, cmd56) - assert gates_dag._dag.number_of_nodes() == 2 - assert gates_dag._dag.number_of_edges() == 0 - assert gates_dag.front_layer - assert not gates_dag.near_term_layer + assert command_dag._dag.number_of_nodes() == 2 + assert command_dag._dag.number_of_edges() == 0 + assert command_dag.front_layer + assert not command_dag.near_term_layer - assert dag_node01.logical_id0 == 0 - assert dag_node01.logical_id1 == 1 assert dag_node01.logical_ids == frozenset((0, 1)) - assert dag_node56.logical_id0 == 5 - assert dag_node56.logical_id1 == 6 assert dag_node56.logical_ids == frozenset((5, 6)) - assert gates_dag.front_layer == [dag_node01, dag_node56] - assert gates_dag._logical_ids_in_diag == {0, 1, 5, 6} - assert gates_dag._back_layer == { + assert command_dag.front_layer == [dag_node01, dag_node56] + assert command_dag._logical_ids_in_diag == {0, 1, 5, 6} + assert command_dag._back_layer == { 0: dag_node01, 1: dag_node01, 5: dag_node56, @@ -266,25 +351,24 @@ def test_gates_dag_add_gate(gates_dag): # ---------------------------------- - dag_node12 = gates_dag.add_gate(1, 2) - assert gates_dag._dag.number_of_nodes() == 3 - assert gates_dag._dag.number_of_edges() == 1 - assert gates_dag.front_layer - assert not gates_dag.near_term_layer + command_dag.add_command(cmd12) + command_dag.add_command(cmd12b) + dag_node12 = search_cmd(command_dag, cmd12) + with pytest.raises(RuntimeError): + search_cmd(command_dag, cmd12b) + + assert command_dag._dag.number_of_nodes() == 3 + assert command_dag._dag.number_of_edges() == 1 + assert command_dag.front_layer + assert not command_dag.near_term_layer - assert dag_node01.logical_id0 == 0 - assert dag_node01.logical_id1 == 1 assert dag_node01.logical_ids == frozenset((0, 1)) - assert dag_node12.logical_id0 == 1 - assert dag_node12.logical_id1 == 2 assert dag_node12.logical_ids == frozenset((1, 2)) - assert dag_node56.logical_id0 == 5 - assert dag_node56.logical_id1 == 6 assert dag_node56.logical_ids == frozenset((5, 6)) - assert gates_dag.front_layer == [dag_node01, dag_node56] - assert gates_dag._logical_ids_in_diag == {0, 1, 2, 5, 6} - assert gates_dag._back_layer == { + assert command_dag.front_layer == [dag_node01, dag_node56] + assert command_dag._logical_ids_in_diag == {0, 1, 2, 5, 6} + assert command_dag._back_layer == { 0: dag_node01, 1: dag_node12, 2: dag_node12, @@ -294,16 +378,16 @@ def test_gates_dag_add_gate(gates_dag): # ---------------------------------- - dag_node26 = gates_dag.add_gate(2, 6) - assert gates_dag.add_gate(2, 6) is None - assert gates_dag._dag.number_of_nodes() == 4 - assert gates_dag._dag.number_of_edges() == 3 - assert gates_dag.front_layer - assert not gates_dag.near_term_layer + command_dag.add_command(cmd26) + dag_node26 = search_cmd(command_dag, cmd26) + assert command_dag._dag.number_of_nodes() == 4 + assert command_dag._dag.number_of_edges() == 3 + assert command_dag.front_layer + assert not command_dag.near_term_layer - assert gates_dag.front_layer == [dag_node01, dag_node56] - assert gates_dag._logical_ids_in_diag == {0, 1, 2, 5, 6} - assert gates_dag._back_layer == { + assert command_dag.front_layer == [dag_node01, dag_node56] + assert command_dag._logical_ids_in_diag == {0, 1, 2, 5, 6} + assert command_dag._back_layer == { 0: dag_node01, 1: dag_node12, 2: dag_node26, @@ -312,45 +396,134 @@ def test_gates_dag_add_gate(gates_dag): } -def test_gates_dag_remove_from_front_layer(gates_dag): - dag_node01 = gates_dag.add_gate(0, 1) - dag_node56 = gates_dag.add_gate(5, 6) - dag_node12 = gates_dag.add_gate(1, 2) - dag_node26 = gates_dag.add_gate(2, 6) - dag_node78 = gates_dag.add_gate(7, 8) +def test_command_dag_add_gate(command_dag): + cmd0 = gen_cmd(0) + cmd01 = gen_cmd(0, 1) + cmd56 = gen_cmd(5, 6) + cmd7 = gen_cmd(7) + + # ---------------------------------- + + command_dag.add_command(cmd0) + command_dag.add_command(cmd01) + dag_node0 = search_cmd(command_dag, cmd0) + + assert len(command_dag.front_layer) == 1 + assert not command_dag.front_layer_for_cost_fun + + assert command_dag._dag.number_of_nodes() == 2 + assert command_dag._dag.number_of_edges() == 1 + assert command_dag.front_layer == [dag_node0] + assert not command_dag.near_term_layer + + command_dag.add_command(cmd56) + command_dag.add_command(cmd7) + dag_node56 = search_cmd(command_dag, cmd56) + + assert len(command_dag.front_layer) == 3 + assert command_dag.front_layer_for_cost_fun == [dag_node56] + + +def test_command_dag_remove_from_front_layer1(command_dag): + allocate0 = gen_cmd(0, gate=Allocate) + cmd0 = gen_cmd(0) + deallocate0 = gen_cmd(0, gate=Deallocate) + + # ---------------------------------- + + command_dag.add_command(allocate0) + command_dag.add_command(cmd0) + command_dag.add_command(deallocate0) + dag_allocate0 = search_cmd(command_dag, allocate0) + dag_deallocate = search_cmd(command_dag, deallocate0) with pytest.raises(RuntimeError): - gates_dag.remove_from_front_layer(1, 2) + search_cmd(command_dag, cmd0) + + with pytest.raises(RuntimeError): + command_dag.remove_from_front_layer(cmd0) + + assert command_dag.front_layer == [dag_allocate0] - assert gates_dag.front_layer == [dag_node01, dag_node56, dag_node78] + command_dag.remove_from_front_layer(allocate0) + assert command_dag.front_layer == [dag_deallocate] + assert command_dag._logical_ids_in_diag == {0} - gates_dag.remove_from_front_layer(7, 8) - assert gates_dag.front_layer == [dag_node01, dag_node56] - assert gates_dag._logical_ids_in_diag == {0, 1, 2, 5, 6} - assert 7 not in gates_dag._back_layer - assert 8 not in gates_dag._back_layer + command_dag.remove_from_front_layer(deallocate0) + assert not command_dag.front_layer - gates_dag.remove_from_front_layer(1, 0) - assert gates_dag.front_layer == [dag_node56, dag_node12] - gates_dag.remove_from_front_layer(5, 6) - assert gates_dag.front_layer == [dag_node12] +def test_command_dag_remove_from_front_layer2(command_dag): + cmd01 = gen_cmd(0, 1) + cmd56 = gen_cmd(5, 6) + cmd12 = gen_cmd(1, 2) + cmd26 = gen_cmd(2, 6) + cmd78 = gen_cmd(7, 8) + + # ---------------------------------- + + command_dag.add_command(cmd01) + command_dag.add_command(cmd56) + command_dag.add_command(cmd12) + command_dag.add_command(cmd26) + command_dag.add_command(cmd78) + dag_node01 = search_cmd(command_dag, cmd01) + dag_node56 = search_cmd(command_dag, cmd56) + dag_node12 = search_cmd(command_dag, cmd12) + dag_node26 = search_cmd(command_dag, cmd26) + dag_node78 = search_cmd(command_dag, cmd78) + + with pytest.raises(RuntimeError): + command_dag.remove_from_front_layer(cmd12) + + assert command_dag.front_layer == [dag_node01, dag_node56, dag_node78] - gates_dag.remove_from_front_layer(1, 2) - assert gates_dag.front_layer == [dag_node26] + command_dag.remove_from_front_layer(cmd78) + assert command_dag.front_layer == [dag_node01, dag_node56] + assert command_dag._logical_ids_in_diag == {0, 1, 2, 5, 6} + assert 7 not in command_dag._back_layer + assert 8 not in command_dag._back_layer + command_dag.remove_from_front_layer(cmd01) + assert command_dag.front_layer == [dag_node56, dag_node12] -def test_gates_dag_max_distance(gates_dag): - dag_node23a = gates_dag.add_gate(2, 3) - dag_node56 = gates_dag.add_gate(5, 6) - dag_node12 = gates_dag.add_gate(1, 2) - dag_node34 = gates_dag.add_gate(3, 4) - dag_node23b = gates_dag.add_gate(2, 3) - dag_node46 = gates_dag.add_gate(4, 6) - dag_node45 = gates_dag.add_gate(5, 4) - dag_node14 = gates_dag.add_gate(4, 1) + command_dag.remove_from_front_layer(cmd56) + assert command_dag.front_layer == [dag_node12] - distance = gates_dag.max_distance_in_dag() + command_dag.remove_from_front_layer(cmd12) + assert command_dag.front_layer == [dag_node26] + + +def test_command_dag_max_distance(command_dag): + cmd23a = gen_cmd(2, 3) + cmd56 = gen_cmd(5, 6) + cmd12 = gen_cmd(1, 2) + cmd34 = gen_cmd(3, 4) + cmd23b = gen_cmd(2, 3) + cmd46 = gen_cmd(4, 6) + cmd45 = gen_cmd(5, 4) + cmd14 = gen_cmd(4, 1) + + # ---------------------------------- + + command_dag.add_command(cmd23a) + command_dag.add_command(cmd56) + command_dag.add_command(cmd12) + command_dag.add_command(cmd34) + command_dag.add_command(cmd23b) + command_dag.add_command(cmd46) + command_dag.add_command(cmd45) + command_dag.add_command(cmd14) + dag_node23a = search_cmd(command_dag, cmd23a) + dag_node56 = search_cmd(command_dag, cmd56) + dag_node12 = search_cmd(command_dag, cmd12) + dag_node34 = search_cmd(command_dag, cmd34) + dag_node23b = search_cmd(command_dag, cmd23b) + dag_node46 = search_cmd(command_dag, cmd46) + dag_node45 = search_cmd(command_dag, cmd45) + dag_node14 = search_cmd(command_dag, cmd14) + + distance = command_dag.max_distance_in_dag() assert distance[dag_node23a] == 0 assert distance[dag_node56] == 0 assert distance[dag_node12] == 1 @@ -361,25 +534,115 @@ def test_gates_dag_max_distance(gates_dag): assert distance[dag_node14] == 4 -def test_gates_dag_near_term_layer(gates_dag): - dag_node23a = gates_dag.add_gate(2, 3) - dag_node56 = gates_dag.add_gate(5, 6) - dag_node12 = gates_dag.add_gate(1, 2) - dag_node34 = gates_dag.add_gate(3, 4) - dag_node23b = gates_dag.add_gate(2, 3) - dag_node46 = gates_dag.add_gate(4, 6) - dag_node45 = gates_dag.add_gate(5, 4) - dag_node14 = gates_dag.add_gate(4, 1) - - gates_dag.calculate_near_term_layer(0) - assert not gates_dag.near_term_layer - - gates_dag.calculate_near_term_layer(1) - assert {dag_node12, dag_node34} == gates_dag.near_term_layer - - gates_dag.calculate_near_term_layer(2) - assert {dag_node12, dag_node34, dag_node23b, - dag_node46} == gates_dag.near_term_layer +def test_command_dag_near_term_layer(command_dag): + cmd23a = gen_cmd(2, 3) + cmd56 = gen_cmd(5, 6) + cmd12 = gen_cmd(1, 2) + cmd34 = gen_cmd(3, 4) + cmd23b = gen_cmd(2, 3) + cmd46 = gen_cmd(4, 6) + cmd45 = gen_cmd(5, 4) + cmd14 = gen_cmd(4, 1) + command_dag.add_command(cmd23a) + command_dag.add_command(cmd56) + command_dag.add_command(cmd12) + command_dag.add_command(cmd34) + command_dag.add_command(cmd23b) + command_dag.add_command(cmd46) + command_dag.add_command(cmd45) + command_dag.add_command(cmd14) + dag_node12 = search_cmd(command_dag, cmd12) + dag_node34 = search_cmd(command_dag, cmd34) + + command_dag.calculate_near_term_layer({i: i for i in range(7)}) + assert [dag_node12, dag_node34] == command_dag.near_term_layer + + +def test_command_dag_calculate_interaction_list(command_dag): + cmd01 = gen_cmd(0, 1) + cmd03 = gen_cmd(0, 3) + cmd34 = gen_cmd(3, 4) + cmd7 = gen_cmd(7, gate=Allocate) + cmd8 = gen_cmd(8) + + command_dag.add_command(cmd01) + command_dag.add_command(cmd34) + command_dag.add_command(cmd03) + command_dag.add_command(cmd8) + command_dag.add_command(cmd7) + + interactions = command_dag.calculate_interaction_list() + + assert (0, 1) in interactions + assert (0, 3) in interactions + assert (3, 4) in interactions + + +def test_command_dag_generate_qubit_interaction_graph(command_dag): + + qb, allocate_cmds = allocate_all_qubits_cmd(9) + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) + + command_dag.add_command(cmd0) + command_dag.add_command(cmd1) + command_dag.add_command(cmd2) + command_dag.add_command(cmd3) + + subgraphs = command_dag.calculate_qubit_interaction_subgraphs(max_order=2) + assert len(subgraphs) == 1 + assert len(subgraphs[0]) == 4 + assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) + assert subgraphs[0][-2:] in ([1, 3], [3, 1]) + + # -------------------------------------------------------------------------- + + cmd4 = Command(engine=None, gate=X, qubits=([qb[4]], ), controls=[qb[5]]) + cmd5 = Command(engine=None, gate=X, qubits=([qb[5]], ), controls=[qb[6]]) + command_dag.add_command(cmd4) + command_dag.add_command(cmd5) + + subgraphs = command_dag.calculate_qubit_interaction_subgraphs(max_order=2) + assert len(subgraphs) == 2 + assert len(subgraphs[0]) == 4 + + assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) + assert subgraphs[0][-2:] in ([1, 3], [3, 1]) + assert subgraphs[1] in ([5, 4, 6], [5, 6, 4]) + + # -------------------------------------------------------------------------- + + cmd6 = Command(engine=None, gate=X, qubits=([qb[6]], ), controls=[qb[7]]) + cmd7 = Command(engine=None, gate=X, qubits=([qb[7]], ), controls=[qb[8]]) + command_dag.add_command(cmd6) + command_dag.add_command(cmd7) + + subgraphs = command_dag.calculate_qubit_interaction_subgraphs(max_order=2) + + assert len(subgraphs) == 2 + assert len(subgraphs[0]) == 5 + assert all([n in subgraphs[0] for n in [4, 5, 6, 7, 8]]) + assert subgraphs[0][-2:] in ([4, 8], [8, 4]) + assert len(subgraphs[1]) == 4 + assert all([n in subgraphs[1] for n in [0, 1, 2, 3]]) + assert subgraphs[1][-2:] in ([1, 3], [3, 1]) + + # -------------------------------------------------------------------------- + + command_dag.add_command( + Command(engine=None, gate=X, qubits=([qb[3]], ), controls=[qb[0]])) + subgraphs = command_dag.calculate_qubit_interaction_subgraphs(max_order=3) + + assert len(subgraphs) == 2 + assert len(subgraphs[0]) == 4 + assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) + assert subgraphs[0][0] == 0 + assert subgraphs[0][-2:] in ([1, 3], [3, 1]) + assert len(subgraphs[1]) == 5 + assert all([n in subgraphs[1] for n in [4, 5, 6, 7, 8]]) + assert subgraphs[1][-2:] in ([4, 8], [8, 4]) # ============================================================================== @@ -419,32 +682,58 @@ def test_qubit_manager_valid_and_invalid_graphs(simple_graph): assert dist[1][6] == 3 -def test_qubit_manager_push_interaction(qubit_manager): - qubit_manager.push_interaction(0, 1) - assert qubit_manager.stats[frozenset((0, 1))] == 1 - qubit_manager.push_interaction(0, 1) - assert qubit_manager.stats[frozenset((0, 1))] == 2 - qubit_manager.push_interaction(5, 6) - assert qubit_manager.stats[frozenset((0, 1))] == 2 - assert qubit_manager.stats[frozenset((5, 6))] == 1 - - def test_qubit_manager_can_execute_gate(qubit_manager): + cmd0 = gen_cmd(0) + cmd01 = gen_cmd(0, 1) + cmd38 = gen_cmd(3, 8) + mapping = {i: i for i in range(9)} - qubit_manager.push_interaction(5, 6) - assert not qubit_manager._can_execute_some_gate(mapping) - qubit_manager.push_interaction(0, 1) - assert qubit_manager._can_execute_some_gate(mapping) + manager = deepcopy(qubit_manager) + manager.add_command(cmd38) + assert not manager._can_execute_some_gate(mapping) + manager.add_command(cmd0) + assert manager._can_execute_some_gate(mapping) + + manager = deepcopy(qubit_manager) + manager.add_command(cmd38) + assert not manager._can_execute_some_gate(mapping) + manager.add_command(cmd01) + assert manager._can_execute_some_gate(mapping) + + +def test_qubit_manager_clear(qubit_manager): + cmd0 = gen_cmd(0) + cmd01 = gen_cmd(0, 1) + cmd38 = gen_cmd(3, 8) + + qubit_manager.add_command(cmd38) + qubit_manager.add_command(cmd0) + qubit_manager.add_command(cmd38) + qubit_manager.add_command(cmd01) + + qubit_manager._decay.add_to_decay(0) + + assert qubit_manager._decay._backend_ids + assert qubit_manager._dag._dag + qubit_manager.clear() + assert not qubit_manager._decay._backend_ids + assert not qubit_manager._dag._dag + +def test_qubit_manager_generate_one_swap_step(qubit_manager): + cmd08 = gen_cmd(0, 8) + cmd01 = gen_cmd(0, 1) + + # ---------------------------------- -def test_qubit_manager_generatae_one_swap_step(qubit_manager): manager = deepcopy(qubit_manager) - manager.push_interaction(0, 8) + manager.add_command(cmd08) mapping = {i: i for i in range(9)} - (logical_id0, logical_id1, backend_id1) = manager._generate_one_swap_step( - mapping, multi.nearest_neighbours_cost_fun) + (logical_id0, backend_id0, logical_id1, + backend_id1) = manager._generate_one_swap_step( + mapping, multi.nearest_neighbours_cost_fun, {}) assert logical_id0 in (0, 8) if logical_id0 == 0: @@ -453,8 +742,9 @@ def test_qubit_manager_generatae_one_swap_step(qubit_manager): assert backend_id1 in (5, 7) mapping = {0: 0, 8: 8} - (logical_id0, logical_id1, backend_id1) = manager._generate_one_swap_step( - mapping, multi.nearest_neighbours_cost_fun) + (logical_id0, backend_id0, logical_id1, + backend_id1) = manager._generate_one_swap_step( + mapping, multi.nearest_neighbours_cost_fun, {}) assert logical_id1 == -1 if logical_id0 == 0: @@ -465,12 +755,13 @@ def test_qubit_manager_generatae_one_swap_step(qubit_manager): # ---------------------------------- manager = deepcopy(qubit_manager) - manager.push_interaction(0, 1) - manager.push_interaction(0, 8) + manager.add_command(cmd01) + manager.add_command(cmd08) mapping = {i: i for i in range(9)} - (logical_id0, logical_id1, backend_id1) = manager._generate_one_swap_step( - mapping, multi.nearest_neighbours_cost_fun) + (logical_id0, backend_id0, logical_id1, + backend_id1) = manager._generate_one_swap_step( + mapping, multi.nearest_neighbours_cost_fun, {}) # In this case, the only swap that does not increases the overall distance # is (0, 1) @@ -479,6 +770,11 @@ def test_qubit_manager_generatae_one_swap_step(qubit_manager): def test_qubit_manager_generate_swaps(qubit_manager): + cmd08 = gen_cmd(0, 8) + cmd01 = gen_cmd(0, 1) + + # ---------------------------------- + manager = deepcopy(qubit_manager) mapping = {i: i for i in range(9)} @@ -490,7 +786,8 @@ def test_qubit_manager_generate_swaps(qubit_manager): # ---------------------------------- - manager.push_interaction(0, 8) + manager.add_command(cmd08) + assert manager.size() == 1 with pytest.raises(RuntimeError): manager.generate_swaps(mapping, @@ -517,7 +814,6 @@ def test_qubit_manager_generate_swaps(qubit_manager): # ---------------------------------- mapping = {i: i for i in range(9)} - manager._use_near_term_layer = 1 swaps, _ = manager.generate_swaps(mapping, multi.look_ahead_parallelism_cost_fun, opts={'W': 0.5}) @@ -532,17 +828,14 @@ def test_qubit_manager_generate_swaps(qubit_manager): # ---------------------------------- manager = deepcopy(qubit_manager) - manager._use_near_term_layer = 1 mapping = {0: 0, 1: 1, 8: 8} - manager.push_interaction(0, 8) - manager.push_interaction(0, 1) + manager.add_command(cmd08) + manager.add_command(cmd01) + assert manager.size() == 2 swaps, all_qubits = manager.generate_swaps( - mapping, - multi.look_ahead_parallelism_cost_fun, - opts={ + mapping, multi.look_ahead_parallelism_cost_fun, opts={ 'W': 0.5, - 'near_term_layer': 1 }) mapping = {i: i for i in range(9)} @@ -560,3 +853,144 @@ def test_qubit_manager_generate_swaps(qubit_manager): # Both gates should be executable at the same time assert manager.graph.has_edge(mapping[0], mapping[8]) assert manager.graph.has_edge(mapping[0], mapping[1]) + + +def test_qubit_manager_get_executable_commands(qubit_manager): + cmd0 = gen_cmd(0) + cmd01 = gen_cmd(0, 1) + cmd03 = gen_cmd(0, 3) + cmd34 = gen_cmd(3, 4) + cmd7 = gen_cmd(7, gate=Allocate) + cmd8a = gen_cmd(8, gate=Allocate) + cmd8b = gen_cmd(8) + + manager = deepcopy(qubit_manager) + mapping = {0: 0, 1: 1, 3: 3, 4: 4, 8: 8} + manager.add_command(cmd0) + manager.add_command(cmd01) + manager.add_command(cmd34) + manager.add_command(cmd03) + manager.add_command(cmd8a) + manager.add_command(cmd8b) + manager.add_command(cmd7) + + dag_allocate7 = search_cmd(manager._dag, cmd7) + + assert manager.size() == 6 + + cmds_to_execute, allocate_cmds = manager.get_executable_commands(mapping) + + assert cmds_to_execute == [cmd0, cmd34, cmd8a, cmd8b, cmd01, cmd03] + assert allocate_cmds == [dag_allocate7] + assert manager.size() == 1 + + mapping.update({7: 7}) + cmds_to_execute = manager.execute_allocate_cmds(allocate_cmds, mapping) + + assert cmds_to_execute == [cmd7] + assert manager.size() == 0 + + mapping = {0: 0, 1: 1, 3: 3, 4: 4, 8: 8} + manager.add_command(cmd01) + manager.add_command(cmd03) + manager.add_command(cmd34) + manager.add_command(cmd8a) + manager.add_command(cmd8b) + manager.add_command(cmd7) + + dag_allocate7 = search_cmd(manager._dag, cmd7) + + cmds_to_execute, allocate_cmds = manager.get_executable_commands(mapping) + + assert cmds_to_execute == [cmd01, cmd8a, cmd8b, cmd03, cmd34] + assert allocate_cmds == [dag_allocate7] + assert manager.size() == 1 + + +def test_qubit_manager_generate_qubit_interaction_graph(qubit_manager): + qb, allocate_cmds = allocate_all_qubits_cmd(9) + cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) + cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) + cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) + cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) + + qubit_manager.add_command(cmd0) + qubit_manager.add_command(cmd1) + qubit_manager.add_command(cmd2) + qubit_manager.add_command(cmd3) + + cmd4 = Command(engine=None, gate=X, qubits=([qb[4]], ), controls=[qb[5]]) + cmd5 = Command(engine=None, gate=X, qubits=([qb[5]], ), controls=[qb[6]]) + qubit_manager.add_command(cmd4) + qubit_manager.add_command(cmd5) + + cmd6 = Command(engine=None, gate=X, qubits=([qb[6]], ), controls=[qb[7]]) + cmd7 = Command(engine=None, gate=X, qubits=([qb[7]], ), controls=[qb[8]]) + qubit_manager.add_command(cmd6) + qubit_manager.add_command(cmd7) + + qubit_manager.add_command( + Command(engine=None, gate=X, qubits=([qb[3]], ), controls=[qb[0]])) + subgraphs = qubit_manager.calculate_qubit_interaction_subgraphs( + max_order=2) + + assert len(subgraphs) == 2 + assert len(subgraphs[0]) == 4 + assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) + assert subgraphs[0][0] == 0 + assert subgraphs[0][-2:] in ([1, 3], [3, 1]) + assert len(subgraphs[1]) == 5 + assert all([n in subgraphs[1] for n in [4, 5, 6, 7, 8]]) + assert subgraphs[1][-2:] in ([4, 8], [8, 4]) + + +def test_qubit_manager_generate_swaps_change_mapping(qubit_manager): + cmd05 = gen_cmd(0, 5) + cmd07 = gen_cmd(0, 7) + cmd58 = gen_cmd(5, 8) + + qubit_manager.add_command(cmd05) + qubit_manager.add_command(cmd07) + qubit_manager.add_command(cmd58) + + mapping = {i: i for i in range(9)} + + swaps, all_qubits = qubit_manager.generate_swaps( + mapping, multi.look_ahead_parallelism_cost_fun, {'W': 0.5}) + + reverse_mapping = {v: k for k, v in mapping.items()} + for bqb0, bqb1 in swaps: + (reverse_mapping[bqb0], + reverse_mapping[bqb1]) = (reverse_mapping[bqb1], + reverse_mapping[bqb0]) + mapping = {v: k for k, v in reverse_mapping.items()} + + cmd_list, _ = qubit_manager.get_executable_commands(mapping) + assert cmd_list == [cmd05, cmd07, cmd58] + assert qubit_manager.size() == 0 + + # ---------------------------------- + + qubit_manager.clear() + + cmd06 = gen_cmd(0, 6) + + qubit_manager.add_command(cmd05) + qubit_manager.add_command(cmd06) + qubit_manager.add_command(cmd58) + + mapping = {i: i for i in range(9)} + + swaps, all_qubits = qubit_manager.generate_swaps( + mapping, multi.look_ahead_parallelism_cost_fun, {'W': 0.5}) + + reverse_mapping = {v: k for k, v in mapping.items()} + for bqb0, bqb1 in swaps: + (reverse_mapping[bqb0], + reverse_mapping[bqb1]) = (reverse_mapping[bqb1], + reverse_mapping[bqb0]) + mapping = {v: k for k, v in reverse_mapping.items()} + + cmd_list, _ = qubit_manager.get_executable_commands(mapping) + assert cmd_list == [cmd05, cmd06] + assert qubit_manager.size() == 1 From c5a35c8461c021489bb80fcda6f7dac3e67798eb Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Wed, 16 Oct 2019 15:05:58 +0200 Subject: [PATCH 15/25] Remove CommandList and replace it with MultiQubitGateManager --- projectq/cengines/_command_list.py | 136 ------------ projectq/cengines/_command_list_test.py | 281 ----------------------- projectq/cengines/_graphmapper.py | 282 +++++++++--------------- projectq/cengines/_graphmapper_test.py | 258 ++++++---------------- 4 files changed, 176 insertions(+), 781 deletions(-) delete mode 100644 projectq/cengines/_command_list.py delete mode 100644 projectq/cengines/_command_list_test.py diff --git a/projectq/cengines/_command_list.py b/projectq/cengines/_command_list.py deleted file mode 100644 index 9642fc2bd..000000000 --- a/projectq/cengines/_command_list.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2019 ProjectQ-Framework (www.projectq.ch) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -This is a helper module for the _graphmapper.GraphMapper class. -""" - -from copy import deepcopy -import networkx as nx - -# ============================================================================== - - -class CommandList(): - """Class used to manage a list of ProjectQ commands""" - - def __init__(self): - self._cmds = [] - self.partitions = [set()] - self.interactions = [[]] - - def __len__(self): - return len(self._cmds) - - def __iadd__(self, other): - self.extend(other) - return self - - def __iter__(self): - return self._cmds.__iter__() - - def __getitem__(self, key): - return self._cmds[key] - - def __eq__(self, other): - if isinstance(other, list): - return self._cmds == other - if isinstance(other, CommandList): - return self._cmds == other._cmds - raise NotImplementedError() - - @property - def stored_commands(self): - """ - Simple getter. - """ - return deepcopy(self._cmds) - - def clear(self): - """ - Remove all commands from the container. - """ - self._cmds = [] - self.partitions = [set()] - self.interactions = [[]] - - def append(self, cmd): - """ - Append a command to the end of the container. - """ - self._cmds.append(cmd) - - qubit_ids = {qubit.id for qureg in cmd.all_qubits for qubit in qureg} - if len(qubit_ids) > 1: - # Add new partition if any qubit ids are already present in the - # current partition - if self.partitions[-1] \ - and self.partitions[-1] & qubit_ids: - self.partitions.append(set()) - self.interactions.append([]) - self.partitions[-1] |= qubit_ids - self.interactions[-1].append(tuple(sorted(qubit_ids))) - - def extend(self, iterable): - """ - Extend container by appending commands from the iterable. - """ - for cmd in iterable: - self.append(cmd) - - # -------------------------------------------------------------------------- - - def calculate_qubit_interaction_subgraphs(self, order=2): - """ - Calculate qubits interaction graph based on all commands stored. - - While iterating through the partitions, we create a graph whose - vertices are logical qubit IDs and where edges represent an interaction - between qubits. - Additionally, we make sure that the resulting graph has no vertices - with degree higher than a specified threshold. - - Args: - order (int): maximum degree of the nodes in the resulting graph - - Returns: - A list of list of graph nodes corresponding to all the connected - components of the qubit interaction graph. Within each components, - nodes are sorted in decreasing order of their degree. - - Note: - The current implementation is really aimed towards handling - two-qubit gates but should also work with higher order qubit gates. - """ - graph = nx.Graph() - for timestep in self.interactions: - for interaction in timestep: - for prev, cur in zip(interaction, interaction[1:]): - if prev not in graph \ - or cur not in graph \ - or (len(graph[prev]) < order - and len(graph[cur]) < order): - graph.add_edge(prev, cur) - - # Return value is a list of list of nodes corresponding to a list of - # connected components of the intial graph sorted by their order - # Each connected component is sorted in decreasing order by the degree - # of each node in the graph - return [ - sorted( - graph.subgraph(g), key=lambda n: len(graph[n]), reverse=True) - for g in sorted( - nx.connected_components(graph), - key=lambda c: (max(len(graph[n]) for n in c), len(c)), - reverse=True) - ] diff --git a/projectq/cengines/_command_list_test.py b/projectq/cengines/_command_list_test.py deleted file mode 100644 index 86eb1ea57..000000000 --- a/projectq/cengines/_command_list_test.py +++ /dev/null @@ -1,281 +0,0 @@ -# Copyright 2019 ProjectQ-Framework (www.projectq.ch) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tests for projectq.cengines._command_list.py.""" - -from projectq.cengines._command_list import CommandList - -from copy import deepcopy -import pytest -from projectq.ops import (Allocate, Command, X) -from projectq.types import WeakQubitRef - -# ============================================================================== - - -def allocate_all_qubits_cmd(num_qubits): - qb = [] - allocate_cmds = [] - for i in range(num_qubits): - qb.append(WeakQubitRef(engine=None, idx=i)) - allocate_cmds.append( - Command(engine=None, gate=Allocate, qubits=([qb[i]], ))) - return qb, allocate_cmds - - -# ============================================================================== - - -@pytest.fixture -def command_list(): - return CommandList() - - -# ============================================================================== - - -def test_empty_command_list(command_list): - assert not command_list - assert command_list._cmds == [] - assert command_list.partitions == [set()] - - -def test_append_single_qubit_gate(command_list): - assert not command_list - - qb0 = WeakQubitRef(engine=None, idx=0) - cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0], )) - command_list.append(cmd0) - assert command_list._cmds == [cmd0] - assert command_list.interactions == [[]] - - cmd1 = Command(engine=None, gate=X, qubits=([qb0], )) - command_list.append(cmd1) - assert command_list._cmds == [cmd0, cmd1] - assert command_list.partitions == [set()] - assert command_list.interactions == [[]] - - assert command_list - command_list.clear() - assert not command_list - assert command_list._cmds == [] - assert command_list.partitions == [set()] - assert command_list.interactions == [[]] - - -def test_append_two_qubit_gate(command_list): - assert not command_list - - qb, allocate_cmds = allocate_all_qubits_cmd(4) - for cmd in allocate_cmds: - command_list.append(cmd) - assert command_list._cmds == allocate_cmds - assert command_list.partitions == [set()] - assert command_list.interactions == [[]] - - cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) - command_list.append(cmd0) - assert command_list._cmds == allocate_cmds + [cmd0] - assert command_list.partitions == [{0, 1}] - assert command_list.interactions == [[(0, 1)]] - - cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) - command_list.append(cmd1) - assert command_list._cmds == allocate_cmds + [cmd0, cmd1] - assert command_list.partitions == [{0, 1, 2, 3}] - assert command_list.interactions == [[(0, 1), (2, 3)]] - - cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) - command_list.append(cmd2) - assert command_list._cmds == allocate_cmds + [cmd0, cmd1, cmd2] - assert command_list.partitions == [{0, 1, 2, 3}, {0, 2}] - assert command_list.interactions == [[(0, 1), (2, 3)], [(0, 2)]] - - assert command_list - command_list.clear() - assert not command_list - assert command_list._cmds == [] - assert command_list.partitions == [set()] - assert command_list.interactions == [[]] - - -def test_extend(command_list): - assert not command_list - - qb, allocate_cmds = allocate_all_qubits_cmd(4) - command_list.extend(allocate_cmds) - assert command_list._cmds == allocate_cmds - assert command_list.partitions == [set()] - - cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) - cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) - cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) - cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) - command_list.extend((cmd0, cmd1, cmd2, cmd3)) - assert command_list._cmds == allocate_cmds + [cmd0, cmd1, cmd2, cmd3] - assert command_list.partitions == [{0, 1, 2, 3}, {0, 2}] - assert command_list.interactions == [[(0, 1), (2, 3)], [(0, 2)]] - - -def test_iadd(): - command_list_ref = CommandList() - command_list = CommandList() - assert not command_list - assert not command_list_ref - - qb, allocate_cmds = allocate_all_qubits_cmd(4) - command_list_ref.extend(allocate_cmds) - command_list += allocate_cmds - - assert command_list._cmds == command_list_ref._cmds - assert command_list.partitions == command_list_ref.partitions - assert command_list.interactions == command_list_ref.interactions - - cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) - cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) - cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) - cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) - command_list_ref.extend((cmd0, cmd1, cmd2, cmd3)) - command_list += (cmd0, cmd1, cmd2, cmd3) - assert command_list._cmds == command_list_ref._cmds - assert command_list.partitions == command_list_ref.partitions - assert command_list.interactions == command_list_ref.interactions - - -def test_iter(command_list): - assert not command_list - - for cmd in command_list: - raise RuntimeError('ERROR') - - qb, allocate_cmds = allocate_all_qubits_cmd(4) - command_list.extend(allocate_cmds) - - cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) - cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) - cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) - cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) - command_list.extend((cmd0, cmd1, cmd2, cmd3)) - - for cmd, cmd_ref in zip(command_list, command_list.stored_commands): - assert cmd == cmd_ref - - -def test_getitem(command_list): - assert not command_list - - qb, allocate_cmds = allocate_all_qubits_cmd(4) - command_list.extend(allocate_cmds) - - cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) - cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) - cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) - cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) - command_list.extend((cmd0, cmd1, cmd2, cmd3)) - - ref_list = allocate_cmds + [cmd0, cmd1, cmd2, cmd3] - for i in range(len(command_list)): - assert command_list[i] == ref_list[i] - - assert command_list[4:] == ref_list[4:] - - -def test_eq(command_list): - assert not command_list - qb, allocate_cmds = allocate_all_qubits_cmd(4) - command_list.extend(allocate_cmds) - - cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) - cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) - cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) - cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) - command_list.extend((cmd0, cmd1, cmd2, cmd3)) - - with pytest.raises(NotImplementedError): - assert command_list == 2 - with pytest.raises(NotImplementedError): - assert command_list == 2. - with pytest.raises(NotImplementedError): - assert command_list == 'asr' - - assert command_list == allocate_cmds + [cmd0, cmd1, cmd2, cmd3] - assert command_list != allocate_cmds - - other_list = deepcopy(command_list) - assert command_list == other_list - other_list.append(cmd0) - assert command_list != other_list - - -def test_generate_qubit_interaction_graph(command_list): - assert not command_list - - qb, allocate_cmds = allocate_all_qubits_cmd(9) - command_list.extend(allocate_cmds) - - cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) - cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) - cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) - cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) - command_list.extend((cmd0, cmd1, cmd2, cmd3)) - - subgraphs = command_list.calculate_qubit_interaction_subgraphs(order=2) - assert len(subgraphs) == 1 - assert len(subgraphs[0]) == 4 - assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) - assert subgraphs[0][-2:] in ([1, 3], [3, 1]) - - # -------------------------------------------------------------------------- - - cmd4 = Command(engine=None, gate=X, qubits=([qb[4]], ), controls=[qb[5]]) - cmd5 = Command(engine=None, gate=X, qubits=([qb[5]], ), controls=[qb[6]]) - command_list.extend((cmd4, cmd5)) - - subgraphs = command_list.calculate_qubit_interaction_subgraphs(order=2) - assert len(subgraphs) == 2 - assert len(subgraphs[0]) == 4 - - assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) - assert subgraphs[0][-2:] in ([1, 3], [3, 1]) - assert subgraphs[1] in ([5, 4, 6], [5, 6, 4]) - - # -------------------------------------------------------------------------- - - cmd6 = Command(engine=None, gate=X, qubits=([qb[6]], ), controls=[qb[7]]) - cmd7 = Command(engine=None, gate=X, qubits=([qb[7]], ), controls=[qb[8]]) - command_list.extend((cmd6, cmd7)) - - subgraphs = command_list.calculate_qubit_interaction_subgraphs(order=2) - - assert len(subgraphs) == 2 - assert len(subgraphs[0]) == 5 - assert all([n in subgraphs[0] for n in [4, 5, 6, 7, 8]]) - assert subgraphs[0][-2:] in ([4, 8], [8, 4]) - assert len(subgraphs[1]) == 4 - assert all([n in subgraphs[1] for n in [0, 1, 2, 3]]) - assert subgraphs[1][-2:] in ([1, 3], [3, 1]) - - # -------------------------------------------------------------------------- - - command_list.append( - Command(engine=None, gate=X, qubits=([qb[3]], ), controls=[qb[0]])) - subgraphs = command_list.calculate_qubit_interaction_subgraphs(order=3) - - assert len(subgraphs) == 2 - assert len(subgraphs[0]) == 4 - assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) - assert subgraphs[0][0] == 0 - assert subgraphs[0][-2:] in ([1, 3], [3, 1]) - assert len(subgraphs[1]) == 5 - assert all([n in subgraphs[1] for n in [4, 5, 6, 7, 8]]) - assert subgraphs[1][-2:] in ([4, 8], [8, 4]) diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index 9ecf764cd..735b83382 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -31,7 +31,6 @@ from projectq.ops import (AllocateQubitGate, Command, DeallocateQubitGate, FlushGate, Swap) from projectq.types import WeakQubitRef -from ._command_list import CommandList from ._multi_qubit_gate_manager import (MultiQubitGateManager, look_ahead_parallelism_cost_fun) @@ -59,7 +58,7 @@ class GraphMapperError(Exception): def _add_qubits_to_mapping_fcfs(current_mapping, graph, new_logical_qubit_ids, - stored_commands): + commands_dag): """ Add active qubits to a mapping. @@ -127,7 +126,7 @@ def _generate_mapping_minimize_swaps(graph, qubit_interaction_subgraphs): def _add_qubits_to_mapping_smart_init(current_mapping, graph, - new_logical_qubit_ids, stored_commands): + new_logical_qubit_ids, commands_dag): """ Add active qubits to a mapping. @@ -148,7 +147,7 @@ def _add_qubits_to_mapping_smart_init(current_mapping, graph, Returns: A new mapping """ qubit_interaction_subgraphs = \ - stored_commands.calculate_qubit_interaction_subgraphs(order=2) + commands_dag.calculate_qubit_interaction_subgraphs(max_order=2) # Interaction subgraph list can be empty if only single qubit gates are # present @@ -159,11 +158,11 @@ def _add_qubits_to_mapping_smart_init(current_mapping, graph, return _generate_mapping_minimize_swaps(graph, qubit_interaction_subgraphs) return _add_qubits_to_mapping_fcfs(current_mapping, graph, - new_logical_qubit_ids, stored_commands) + new_logical_qubit_ids, commands_dag) def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, - stored_commands): + commands_dag): """ Add active qubits to a mapping @@ -184,7 +183,7 @@ def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, Returns: A new mapping """ qubit_interaction_subgraphs = \ - stored_commands.calculate_qubit_interaction_subgraphs(order=2) + commands_dag.calculate_qubit_interaction_subgraphs(max_order=2) # Interaction subgraph list can be empty if only single qubit gates are # present @@ -200,8 +199,7 @@ def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, available_nodes = sorted( [n for n in graph if n not in currently_used_nodes], key=lambda n: len(graph[n])) - interactions = list( - itertools.chain.from_iterable(stored_commands.interactions)) + interactions = commands_dag.calculate_interaction_list() for logical_id in uniquify_list(new_logical_qubit_ids): qubit_interactions = uniquify_list([ @@ -267,16 +265,21 @@ class GraphMapper(BasicMapperEngine): are to be added to the current mapping. Special possible string values: - "fcfs": first-come first serve - "fcfs_init": first-come first - serve with smarter - mapping - initialisation - Signature of the function call: - current_mapping - graph - new_logical_qubit_ids - stored_commands + + - ``"fcfs"``: first-come first + serve + - ``"fcfs_init"``: first-come + first serve with smarter + mapping initialisation + + Note: + 1) Gates are cached and only mapped from time to time. A + FastForwarding gate doesn't empty the cache, only a FlushGate does. + 2) Only 1 and two qubit gates allowed. + 3) Does not optimize for dirty qubits. + 4) Signature for third argument is + ``add_qubits_to_mapping(current_mapping, graph, + new_logical_qubit_ids, command_dag)`` Attributes: current_mapping: Stores the mapping: key is logical qubit id, value @@ -292,13 +295,6 @@ class GraphMapper(BasicMapperEngine): mappings which have been applied path_stats (dict) : Key is the endpoints of a path, value is the number of such paths which have been applied - - Note: - 1) Gates are cached and only mapped from time to time. A - FastForwarding gate doesn't empty the cache, only a FlushGate does. - 2) Only 1 and two qubit gates allowed. - 3) Does not optimize for dirty qubits. - """ def __init__(self, graph, @@ -359,8 +355,6 @@ def __init__(self, # the bound methods of the random module which might be used in other # places. self._rng = random.Random(11) - # Storing commands - self._stored_commands = CommandList() # Logical qubit ids for which the Allocate gate has already been # processed and sent to the next engine but which are not yet # deallocated: @@ -373,6 +367,7 @@ def __init__(self, self._cost_fun = opts.get('cost_fun', look_ahead_parallelism_cost_fun) self._opts = opts.get('opts', {'W': 0.5}) + self._max_swap_steps = opts.get('max_swap_steps', 30) # Statistics: self.num_mappings = 0 @@ -417,154 +412,83 @@ def is_available(self, cmd): num_qubits += len(qureg) return num_qubits <= 2 - def _process_commands(self): + def _send_single_command(self, cmd): """ - Process commands and if necessary, calculate paths through the graph. + Send a command to the next engine taking care of mapped qubit IDs - Attempts to find as many paths through the graph as possible in order - to generate a new mapping that is able to apply as many gates as - possible. + Args: + cmd (Command): A ProjectQ command + """ - It goes through stored_commands and tries to find paths through the - graph that can be applied simultaneously to move the qubits without - side effects so that as many gates can be applied; gates are applied - on on a first come first served basis. + if isinstance(cmd.gate, AllocateQubitGate): + assert cmd.qubits[0][0].id in self._current_mapping + qb0 = WeakQubitRef(engine=self, + idx=self._current_mapping[cmd.qubits[0][0].id]) + self._currently_allocated_ids.add(cmd.qubits[0][0].id) + self.send([ + Command(engine=self, + gate=AllocateQubitGate(), + qubits=([qb0], ), + tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) + ]) + elif isinstance(cmd.gate, DeallocateQubitGate): + assert cmd.qubits[0][0].id in self._current_mapping + qb0 = WeakQubitRef(engine=self, + idx=self._current_mapping[cmd.qubits[0][0].id]) + self._currently_allocated_ids.remove(cmd.qubits[0][0].id) + self._current_mapping.pop(cmd.qubits[0][0].id) + self.send([ + Command(engine=self, + gate=DeallocateQubitGate(), + qubits=([qb0], ), + tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) + ]) + else: + self._send_cmd_with_mapped_ids(cmd) - Args: - None (list): Nothing here for now + def _send_possible_commands(self): + """ + Send as many commands as possible without introducing swap operations - Returns: A list of paths through the graph to move some qubits and have - them interact + Note: + This function will modify the current mapping when qubit + allocation/deallocation gates are encountered """ - not_in_mapping_qubits = [] - allocated_qubits = deepcopy(self._currently_allocated_ids) - active_qubits = deepcopy(self._currently_allocated_ids) - for cmd in self._stored_commands: - if (len(allocated_qubits) == self.num_qubits - and not active_qubits): - break + (cmds_to_execute, + allocate_cmds) = self.qubit_manager.get_executable_commands( + self._current_mapping) - qubit_ids = [ - qubit.id for qureg in cmd.all_qubits for qubit in qureg - ] + # Execute all the commands that can possibly be executed + for cmd in cmds_to_execute: + self._send_single_command(cmd) - if len(qubit_ids) > 2 or not qubit_ids: - raise Exception("Invalid command (number of qubits): " + - str(cmd)) + # There are no more commands to + num_available_qubits = self.num_qubits - len(self._current_mapping) + if allocate_cmds and num_available_qubits > 0: - elif isinstance(cmd.gate, AllocateQubitGate): - qubit_id = cmd.qubits[0][0].id - if len(allocated_qubits) < self.num_qubits: - allocated_qubits.add(qubit_id) - active_qubits.add(qubit_id) - if qubit_id not in self._current_mapping: - not_in_mapping_qubits.append(qubit_id) - # not_in_mapping_qubits.add(qubit_id) - - elif isinstance(cmd.gate, DeallocateQubitGate): - qubit_id = cmd.qubits[0][0].id - if qubit_id in active_qubits: - active_qubits.remove(qubit_id) - # Do not remove from allocated_qubits as this would - # allow the mapper to add a new qubit to this location - # before the next swaps which is currently not - # supported - - # Process a two qubit gate: - elif len(qubit_ids) == 2: - # At least one qubit is not an active qubit: - if qubit_ids[0] not in active_qubits \ - or qubit_ids[1] not in active_qubits: - active_qubits.discard(qubit_ids[0]) - active_qubits.discard(qubit_ids[1]) - else: - if not_in_mapping_qubits: - self.current_mapping = self._add_qubits_to_mapping( - self._current_mapping, self.qubit_manager.graph, - not_in_mapping_qubits, self._stored_commands) - not_in_mapping_qubits = [] + def rank_allocate_cmds(l, dag): + return l - self.qubit_manager.push_interaction( - qubit_ids[0], qubit_ids[1]) + allocate_cmds = rank_allocate_cmds( + allocate_cmds, self.qubit_manager._dag)[:num_available_qubits] + not_in_mapping_qubits = [node.logical_id for node in allocate_cmds] - if not_in_mapping_qubits: - self.current_mapping = self._add_qubits_to_mapping( - self._current_mapping, self.qubit_manager.graph, - not_in_mapping_qubits, self._stored_commands) + new_mapping = self._add_qubits_to_mapping(self._current_mapping, + self.qubit_manager.graph, + not_in_mapping_qubits, + self.qubit_manager._dag) - def _send_possible_commands(self): - """ - Send the stored commands possible without changing the mapping. - """ - active_ids = deepcopy(self._currently_allocated_ids) - - for logical_id in self._current_mapping: - # So that loop doesn't stop before AllocateGate applied - active_ids.add(logical_id) - - new_stored_commands = CommandList() - for i in range(len(self._stored_commands)): - cmd = self._stored_commands[i] - if not active_ids: - new_stored_commands += self._stored_commands[i:] - break - if isinstance(cmd.gate, AllocateQubitGate): - if cmd.qubits[0][0].id in self._current_mapping: - qb0 = WeakQubitRef( - engine=self, - idx=self._current_mapping[cmd.qubits[0][0].id]) - self._currently_allocated_ids.add(cmd.qubits[0][0].id) - self.send([ - Command(engine=self, - gate=AllocateQubitGate(), - qubits=([qb0], ), - tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) - ]) - else: - new_stored_commands.append(cmd) - elif isinstance(cmd.gate, DeallocateQubitGate): - if cmd.qubits[0][0].id in active_ids: - qb0 = WeakQubitRef( - engine=self, - idx=self._current_mapping[cmd.qubits[0][0].id]) - self._currently_allocated_ids.remove(cmd.qubits[0][0].id) - active_ids.remove(cmd.qubits[0][0].id) - self._current_mapping.pop(cmd.qubits[0][0].id) - self.send([ - Command(engine=self, - gate=DeallocateQubitGate(), - qubits=([qb0], ), - tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)]) - ]) - else: - new_stored_commands.append(cmd) - else: - send_gate = True - logical_ids = [] - for qureg in cmd.all_qubits: - for qubit in qureg: - logical_ids.append(qubit.id) - - if qubit.id not in active_ids: - send_gate = False - - if send_gate: - # Check that mapped ids are connected by an edge on the - # graph - if len(logical_ids) == 2: - send_gate = self.qubit_manager.execute_gate( - self._current_mapping, *logical_ids) - - if send_gate: - self._send_cmd_with_mapped_ids(cmd) - else: - # Cannot execute gate -> make sure no other gate will use - # any of those qubits to preserve sequence - for logical_id in logical_ids: - active_ids.discard(logical_id) - new_stored_commands.append(cmd) - self._stored_commands = new_stored_commands + self.current_mapping = new_mapping + + for cmd in self.qubit_manager.execute_allocate_cmds( + allocate_cmds, self._current_mapping): + self._send_single_command(cmd) + + cmds_to_execute, _ = self.qubit_manager.get_executable_commands( + self._current_mapping) + for cmd in cmds_to_execute: + self._send_single_command(cmd) def _run(self): """ @@ -576,18 +500,18 @@ def _run(self): executes all possible gates, and finally deallocates mapped qubit ids which don't store any information. """ - num_of_stored_commands_before = len(self._stored_commands) - # Go through the command list and generate a list of paths. - # At the same time, add soon-to-be-allocated qubits to the mapping - self._process_commands() + num_of_stored_commands_before = self.qubit_manager.size() self._send_possible_commands() - if not self._stored_commands: + if not self.qubit_manager.size(): return swaps, all_swapped_qubits = self.qubit_manager.generate_swaps( - self._current_mapping, self._cost_fun, self._opts) + self._current_mapping, + cost_fun=self._cost_fun, + opts=self._opts, + max_steps=self._max_swap_steps) if swaps: # first mapping requires no swaps backend_ids_used = { @@ -678,8 +602,9 @@ def _run(self): # Send possible gates: self._send_possible_commands() + # Check that mapper actually made progress - if len(self._stored_commands) == num_of_stored_commands_before: + if self.qubit_manager.size() == num_of_stored_commands_before: raise RuntimeError("Mapper is potentially in an infinite loop. " "It is likely that the algorithm requires " "too many qubits. Increase the number of " @@ -697,15 +622,24 @@ def receive(self, command_list): receive. """ for cmd in command_list: - print(cmd) + + qubit_ids = [ + qubit.id for qureg in cmd.all_qubits for qubit in qureg + ] + + if len(qubit_ids) > 2 or not qubit_ids: + raise Exception("Invalid command (number of qubits): " + + str(cmd)) + if isinstance(cmd.gate, FlushGate): - while self._stored_commands: + while self.qubit_manager.size() > 0: self._run() self.send([cmd]) else: - self._stored_commands.append(cmd) + self.qubit_manager.add_command(cmd) + # Storage is full: Create new map and send some gates away: - if len(self._stored_commands) >= self.storage: + if self.qubit_manager.size() >= self.storage: self._run() def __str__(self): diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py index e5c9a0855..e4a0a7f08 100644 --- a/projectq/cengines/_graphmapper_test.py +++ b/projectq/cengines/_graphmapper_test.py @@ -26,6 +26,19 @@ from projectq.cengines import _graphmapper as graphm +import projectq.cengines._multi_qubit_gate_manager as multi + + +def decay_to_string(self): + s = '' + for qubit_id, node in self._backend_ids.items(): + s += '{}: {}, {}\n'.format(qubit_id, node['decay'], node['lifetime']) + return s + + +multi.DecayManager.__str__ = decay_to_string +Command.__repr__ = Command.__str__ + def allocate_all_qubits_cmd(mapper): qb = [] @@ -126,6 +139,15 @@ def simple_mapper(simple_graph): # ============================================================================== +def get_node_list(self): + return list(self._dag._dag.nodes) + + +graphm.MultiQubitGateManager._get_node_list = get_node_list + +# ============================================================================== + + def test_is_available(simple_graph): mapper = graphm.GraphMapper(graph=simple_graph) qb0 = WeakQubitRef(engine=None, idx=0) @@ -160,23 +182,19 @@ def test_invalid_gates(simple_mapper): mapper.receive([cmd0, cmd1, cmd2, cmd3, cmd_flush]) -def test_run_infinite_loop_detection(simple_mapper): - mapper, backend = simple_mapper +# def test_run_infinite_loop_detection(simple_mapper): +# mapper, backend = simple_mapper +# mapper.current_mapping = {i: i for i in range(7)} - qb0 = WeakQubitRef(engine=None, idx=0) - qb1 = WeakQubitRef(engine=None, idx=1) +# qb0 = WeakQubitRef(engine=None, idx=0) +# qb1 = WeakQubitRef(engine=None, idx=6) - qb_flush = WeakQubitRef(engine=None, idx=-1) - cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) +# qb_flush = WeakQubitRef(engine=None, idx=-1) +# cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) - cmd0 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[]) - with pytest.raises(RuntimeError): - mapper.receive([cmd0, cmd_flush]) - - mapper._stored_commands.clear() - cmd0 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb1]) - with pytest.raises(RuntimeError): - mapper.receive([cmd0, cmd_flush]) +# cmd0 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb1]) +# with pytest.raises(RuntimeError): +# mapper.receive([cmd0, cmd_flush]) def test_resetting_mapping_to_none(simple_graph): @@ -373,132 +391,21 @@ def test_send_possible_commands(simple_graph, simple_mapper): qb1 = WeakQubitRef(engine=None, idx=qb1_id) cmd1 = Command(None, X, qubits=([qb0], ), controls=[qb1]) cmd2 = Command(None, X, qubits=([qb1], ), controls=[qb0]) - mapper._stored_commands += [cmd1, cmd2] + mapper.qubit_manager.add_command(cmd1) + mapper.qubit_manager.add_command(cmd2) mapper._send_possible_commands() - assert len(mapper._stored_commands) == 0 + assert mapper.qubit_manager.size() == 0 - for qb0_id, qb1_id in itertools.permutations(range(8), 2): + for qb0_id, qb1_id in itertools.permutations(range(7), 2): if ((qb0_id, qb1_id) not in neighbours and (qb1_id, qb0_id) not in neighbours): qb0 = WeakQubitRef(engine=None, idx=qb0_id) qb1 = WeakQubitRef(engine=None, idx=qb1_id) cmd = Command(None, X, qubits=([qb0], ), controls=[qb1]) - mapper._stored_commands.clear() - mapper._stored_commands += [cmd] + mapper.qubit_manager.clear() + mapper.qubit_manager.add_command(cmd) mapper._send_possible_commands() - assert len(mapper._stored_commands) == 1 - - -def test_send_possible_commands_allocate(simple_mapper): - mapper, backend = simple_mapper - - qb0 = WeakQubitRef(engine=None, idx=0) - cmd0 = Command(engine=None, - gate=Allocate, - qubits=([qb0], ), - controls=[], - tags=[]) - mapper._stored_commands += [cmd0] - mapper._currently_allocated_ids = set([10]) - # not in mapping: - mapper.current_mapping = dict() - assert len(backend.received_commands) == 0 - mapper._send_possible_commands() - assert len(backend.received_commands) == 0 - assert mapper._stored_commands == [cmd0] - # in mapping: - mapper.current_mapping = {0: 3} - mapper._send_possible_commands() - assert len(mapper._stored_commands) == 0 - # Only self._run() sends Allocate gates - mapped0 = WeakQubitRef(engine=None, idx=3) - received_cmd = Command(engine=mapper, - gate=Allocate, - qubits=([mapped0], ), - controls=[], - tags=[LogicalQubitIDTag(0)]) - assert backend.received_commands[0] == received_cmd - assert mapper._currently_allocated_ids == set([10, 0]) - - -def test_send_possible_commands_allocation_no_active_qubits( - grid22_graph_mapper): - mapper, backend = grid22_graph_mapper - - qb0 = WeakQubitRef(engine=None, idx=0) - qb1 = WeakQubitRef(engine=None, idx=1) - qb2 = WeakQubitRef(engine=None, idx=2) - qb3 = WeakQubitRef(engine=None, idx=3) - qb4 = WeakQubitRef(engine=None, idx=4) - - cmd_list = [ - Command(engine=None, gate=Allocate, qubits=([qb0], )), - Command(engine=None, gate=Allocate, qubits=([qb1], )), - Command(engine=None, gate=Allocate, qubits=([qb2], )), - Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb2]), - Command(engine=None, gate=X, qubits=([qb1], ), controls=[qb2]), - Command(engine=None, gate=Allocate, qubits=([qb3], )), - Command(engine=None, gate=X, qubits=([qb3], )), - Command(engine=None, gate=Deallocate, qubits=([qb3], )), - Command(engine=None, gate=Deallocate, qubits=([qb2], )), - Command(engine=None, gate=Deallocate, qubits=([qb1], )), - Command(engine=None, gate=Deallocate, qubits=([qb0], )), - Command(engine=None, gate=Allocate, qubits=([qb4], )), - ] - - qb_flush = WeakQubitRef(engine=None, idx=-1) - cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) - - mapper._stored_commands += cmd_list + [cmd_flush] - - mapper._run() - assert len(mapper._stored_commands) == 8 - # NB: after swap, can actually send Deallocate to qb0 - assert mapper._stored_commands[:6] == cmd_list[4:10] - assert mapper._stored_commands[6] == cmd_list[11] - - -def test_send_possible_commands_allocation_no_active_qubits( - grid22_graph_mapper): - mapper, backend = grid22_graph_mapper - - qb0 = WeakQubitRef(engine=None, idx=0) - qb1 = WeakQubitRef(engine=None, idx=1) - qb2 = WeakQubitRef(engine=None, idx=2) - qb3 = WeakQubitRef(engine=None, idx=3) - qb4 = WeakQubitRef(engine=None, idx=4) - - cmd_list = [ - Command(engine=None, gate=Allocate, qubits=([qb0], )), - Command(engine=None, gate=Allocate, qubits=([qb1], )), - Command(engine=None, gate=Allocate, qubits=([qb2], )), - Command(engine=None, gate=Allocate, qubits=([qb3], )), - Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb2]), - ] - - qb_flush = WeakQubitRef(engine=None, idx=-1) - cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) - - mapper._stored_commands += cmd_list + [cmd_flush] - - mapper._run() - assert mapper.num_mappings == 1 - assert len(mapper._stored_commands) == 1 - assert mapper._stored_commands[0] == cmd_flush - - cmd_list = [ - Command(engine=None, gate=X, qubits=([qb2], ), controls=[qb3]), - Command(engine=None, gate=Deallocate, qubits=([qb3], )), - Command(engine=None, gate=Deallocate, qubits=([qb2], )), - Command(engine=None, gate=Deallocate, qubits=([qb1], )), - Command(engine=None, gate=Deallocate, qubits=([qb0], )), - Command(engine=None, gate=Allocate, qubits=([qb4], )), - ] - mapper._stored_commands = cmd_list + [cmd_flush] - mapper._run() - assert mapper.num_mappings == 1 - assert len(mapper._stored_commands) == 2 - assert mapper._stored_commands[0] == cmd_list[-1] + assert mapper.qubit_manager.size() == 1 def test_send_possible_commands_deallocate(simple_mapper): @@ -510,19 +417,19 @@ def test_send_possible_commands_deallocate(simple_mapper): qubits=([qb0], ), controls=[], tags=[]) - mapper._stored_commands = [cmd0] + mapper.qubit_manager.add_command(cmd0) mapper.current_mapping = dict() mapper._currently_allocated_ids = set([10]) # not yet allocated: mapper._send_possible_commands() assert len(backend.received_commands) == 0 - assert mapper._stored_commands == [cmd0] + assert mapper.qubit_manager.size() == 1 # allocated: mapper.current_mapping = {0: 3} mapper._currently_allocated_ids.add(0) mapper._send_possible_commands() assert len(backend.received_commands) == 1 - assert len(mapper._stored_commands) == 0 + assert mapper.qubit_manager.size() == 0 assert mapper.current_mapping == dict() assert mapper._currently_allocated_ids == set([10]) @@ -544,34 +451,7 @@ def test_send_possible_commands_no_initial_mapping(simple_mapper): mapper.receive(all_cmds) assert mapper._current_mapping - assert len(mapper._stored_commands) == 0 - - -def test_send_possible_commands_keep_remaining_gates(simple_mapper): - mapper, backend = simple_mapper - - qb0 = WeakQubitRef(engine=None, idx=0) - qb1 = WeakQubitRef(engine=None, idx=1) - cmd0 = Command(engine=None, - gate=Allocate, - qubits=([qb0], ), - controls=[], - tags=[]) - cmd1 = Command(engine=None, - gate=Deallocate, - qubits=([qb0], ), - controls=[], - tags=[]) - cmd2 = Command(engine=None, - gate=Allocate, - qubits=([qb1], ), - controls=[], - tags=[]) - - mapper._stored_commands = [cmd0, cmd1, cmd2] - mapper.current_mapping = {0: 0} - mapper._send_possible_commands() - assert mapper._stored_commands == [cmd2] + assert mapper.qubit_manager.size() == 0 def test_send_possible_commands_one_inactive_qubit(simple_mapper): @@ -585,10 +465,11 @@ def test_send_possible_commands_one_inactive_qubit(simple_mapper): controls=[], tags=[]) cmd1 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb1]) - mapper._stored_commands = [cmd0, cmd1] + mapper.qubit_manager.add_command(cmd0) + mapper.qubit_manager.add_command(cmd1) mapper.current_mapping = {0: 0} mapper._send_possible_commands() - assert mapper._stored_commands == [cmd1] + mapper.qubit_manager._get_node_list()[0].cmd == cmd1 def test_run_and_receive(simple_graph, simple_mapper): @@ -614,11 +495,10 @@ def test_run_and_receive(simple_graph, simple_mapper): all_cmds = list(itertools.chain(allocate_cmds, gates, deallocate_cmds)) mapper.receive(all_cmds) - assert mapper._stored_commands == all_cmds qb_flush = WeakQubitRef(engine=None, idx=-1) cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) mapper.receive([cmd_flush]) - assert mapper._stored_commands == [] + assert mapper.qubit_manager.size() == 0 assert len(backend.received_commands) == len(all_cmds) + 1 assert mapper._currently_allocated_ids == allocated_qubits_ref @@ -631,7 +511,7 @@ def test_run_and_receive(simple_graph, simple_mapper): assert mapper._currently_allocated_ids == allocated_qubits_ref for idx in allocated_qubits_ref: assert idx in mapper.current_mapping - assert mapper._stored_commands == [] + assert mapper.qubit_manager.size() == 0 assert len(mapper.current_mapping) == 6 assert mapper.num_mappings == 1 @@ -654,14 +534,15 @@ def test_send_two_qubit_gate_before_swap(simple_mapper): all_cmds[3] = cmd - mapper._stored_commands.clear() - mapper._stored_commands += all_cmds + mapper.qubit_manager.clear() + mapper.receive(all_cmds) mapper._run() assert mapper.num_mappings == 1 + if mapper.current_mapping[2] == 2: # qb[2] has not moved, all_cmds[5] and everything # thereafter is possible - assert mapper._stored_commands == all_cmds[-1:] + assert mapper.qubit_manager.size() == 0 assert mapper.current_mapping == { 0: 1, 1: 0, @@ -707,22 +588,20 @@ def test_send_two_qubit_gate_before_swap_nonallocated_qubits(simple_mapper): all_cmds[idx] = cmd - mapper._stored_commands = all_cmds + mapper.receive(all_cmds) mapper._run() assert mapper.num_mappings == 1 - - if mapper.current_mapping[4] == 4 and mapper.current_mapping[5] == 5: - if mapper.current_mapping[6] == 3: - # qb[6] is on position 3, all commands are possible - assert mapper._stored_commands == all_cmds[-1:] - assert mapper.current_mapping == {0: 2, 4: 4, 5: 5, 6: 3} - else: - # qb[6] is on position 2, all_cmds[8] is not possible - assert mapper._stored_commands == all_cmds[-2:] - assert mapper.current_mapping == {0: 1, 4: 4, 5: 5, 6: 2} + assert mapper.current_mapping[4] == 4 + assert mapper.current_mapping[5] == 5 + assert mapper.current_mapping[6] in [3, 6] + + if mapper.current_mapping[6] == 3: + # qb[6] is on position 3, all commands are possible + assert mapper.qubit_manager.size() == 0 + assert mapper.current_mapping == {0: 2, 4: 4, 5: 5, 6: 3} else: - # Should not happen... - assert False + assert mapper.qubit_manager.size() == 0 + assert mapper.current_mapping == {0: 3, 4: 4, 5: 5, 6: 6} def test_allocate_too_many_qubits(simple_mapper): @@ -763,7 +642,7 @@ def test_send_possible_commands_reallocate_backend_id(grid22_graph_mapper): cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) mapper.receive(all_cmds + [cmd_flush]) assert mapper.current_mapping == {0: 0, 2: 2, 3: 3, 4: 1} - assert len(mapper._stored_commands) == 0 + assert mapper.qubit_manager.size() == 0 assert len(backend.received_commands) == 9 @@ -863,7 +742,7 @@ def test_check_that_local_optimizer_doesnt_merge(simple_graph): cmd1 = Command(None, X, qubits=([qb0], )) cmd2 = Command(engine=None, gate=Deallocate, qubits=([qb0], )) mapper.receive([cmd0, cmd1, cmd2]) - assert len(mapper._stored_commands) == 0 + assert mapper.qubit_manager.size() == 0 mapper.current_mapping = {1: 0} cmd3 = Command(engine=None, gate=Allocate, qubits=([qb1], )) cmd4 = Command(None, X, qubits=([qb1], )) @@ -887,15 +766,14 @@ def test_mapper_to_str(simple_graph): CNOT | (qureg[6], qureg[4]) CNOT | (qureg[6], qureg[0]) - CNOT | (qureg[6], qureg[1]) + CNOT | (qureg[4], qureg[5]) All(Measure) | qureg eng.flush() str_repr = str(mapper) - assert str_repr.count("Number of mappings: 2") == 1 - assert str_repr.count("1: 1") == 1 - assert str_repr.count("2: 1") == 2 + assert str_repr.count("Number of mappings: 1") == 1 + assert str_repr.count("2: 1") == 1 assert str_repr.count("3: 1") == 1 sent_gates = [cmd.gate for cmd in backend.received_commands] From 0aef11164432c6e9a28d018a8d3f28ff62c99f78 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Thu, 17 Oct 2019 09:54:30 +0200 Subject: [PATCH 16/25] Fix some linting-related issues --- docs/projectq.cengines.rst | 7 - projectq/cengines/_graphmapper.py | 71 +++++---- projectq/cengines/_graphmapper_test.py | 2 +- .../cengines/_multi_qubit_gate_manager.py | 141 ++++++++++-------- .../_multi_qubit_gate_manager_test.py | 64 ++------ 5 files changed, 133 insertions(+), 152 deletions(-) diff --git a/docs/projectq.cengines.rst b/docs/projectq.cengines.rst index 47d369cb6..ce2da4b9b 100755 --- a/docs/projectq.cengines.rst +++ b/docs/projectq.cengines.rst @@ -43,10 +43,3 @@ Multi-qubit gate sub-module .. automodule:: projectq.cengines._multi_qubit_gate_manager :members: :imported-members: - -Command list sub-module -^^^^^^^^^^^^^^^^^^^^^^^ - -.. automodule:: projectq.cengines._command_list - :members: - :imported-members: diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index 735b83382..8fcdd5280 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -24,7 +24,7 @@ from copy import deepcopy import random -import itertools +import sys from projectq.cengines import (BasicMapperEngine, return_swap_depth) from projectq.meta import LogicalQubitIDTag @@ -37,14 +37,15 @@ # ------------------------------------------------------------------------------ # https://www.peterbe.com/plog/fastest-way-to-uniquify-a-list-in-python-3.6 -import sys if sys.version_info[0] >= 3 and sys.version_info[1] > 6: # pragma: no cover def uniquify_list(seq): + #pylint: disable=missing-function-docstring return list(dict.fromkeys(seq)) else: # pragma: no cover def uniquify_list(seq): + #pylint: disable=missing-function-docstring seen = set() seen_add = seen.add return [x for x in seq if x not in seen and not seen_add(x)] @@ -78,6 +79,8 @@ def _add_qubits_to_mapping_fcfs(current_mapping, graph, new_logical_qubit_ids, Returns: A new mapping """ + #pylint: disable=unused-argument + mapping = deepcopy(current_mapping) currently_used_nodes = sorted([v for _, v in mapping.items()]) available_nodes = [n for n in graph if n not in currently_used_nodes] @@ -199,11 +202,11 @@ def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, available_nodes = sorted( [n for n in graph if n not in currently_used_nodes], key=lambda n: len(graph[n])) - interactions = commands_dag.calculate_interaction_list() for logical_id in uniquify_list(new_logical_qubit_ids): qubit_interactions = uniquify_list([ - i[0] if i[0] != logical_id else i[1] for i in interactions + i[0] if i[0] != logical_id else i[1] + for i in commands_dag.calculate_interaction_list() if logical_id in i ]) @@ -300,7 +303,7 @@ def __init__(self, graph, storage=1000, add_qubits_to_mapping=_add_qubits_to_mapping, - opts={}): + opts=None): """ Initialize a GraphMapper compiler engine. @@ -342,8 +345,13 @@ def __init__(self, """ BasicMapperEngine.__init__(self) + if opts is None: + self._opts = {} + else: + self._opts = opts + self.qubit_manager = MultiQubitGateManager(graph=graph, - decay_opts=opts.get( + decay_opts=self._opts.get( 'decay_opts', { 'delta': 0.001, 'max_lifetime': 5 @@ -365,10 +373,6 @@ def __init__(self, # Function to add new logical qubits ids to the mapping self.set_add_qubits_to_mapping(add_qubits_to_mapping) - self._cost_fun = opts.get('cost_fun', look_ahead_parallelism_cost_fun) - self._opts = opts.get('opts', {'W': 0.5}) - self._max_swap_steps = opts.get('max_swap_steps', 30) - # Statistics: self.num_mappings = 0 self.depth_of_swaps = dict() @@ -393,6 +397,17 @@ def current_mapping(self, current_mapping): } def set_add_qubits_to_mapping(self, add_qubits_to_mapping): + """ + Modify the callback function used to add qubits to an existing mapping + + Args: + add_qubits_to_mapping (function): Callback function + + Note: + Signature for callback function is: + ``add_qubits_to_mapping(current_mapping, graph, + new_logical_qubit_ids, command_dag)`` + """ if isinstance(add_qubits_to_mapping, str): if add_qubits_to_mapping.lower() == "fcfs": self._add_qubits_to_mapping = _add_qubits_to_mapping_fcfs @@ -467,17 +482,18 @@ def _send_possible_commands(self): num_available_qubits = self.num_qubits - len(self._current_mapping) if allocate_cmds and num_available_qubits > 0: - def rank_allocate_cmds(l, dag): - return l + def rank_allocate_cmds(cmds_list, dag): + #pylint: disable=unused-argument + return cmds_list allocate_cmds = rank_allocate_cmds( - allocate_cmds, self.qubit_manager._dag)[:num_available_qubits] + allocate_cmds, self.qubit_manager.dag)[:num_available_qubits] not_in_mapping_qubits = [node.logical_id for node in allocate_cmds] new_mapping = self._add_qubits_to_mapping(self._current_mapping, self.qubit_manager.graph, not_in_mapping_qubits, - self.qubit_manager._dag) + self.qubit_manager.dag) self.current_mapping = new_mapping @@ -509,19 +525,18 @@ def _run(self): swaps, all_swapped_qubits = self.qubit_manager.generate_swaps( self._current_mapping, - cost_fun=self._cost_fun, - opts=self._opts, - max_steps=self._max_swap_steps) - - if swaps: # first mapping requires no swaps - backend_ids_used = { - self._current_mapping[logical_id] - for logical_id in self._currently_allocated_ids - } + cost_fun=self._opts.get('cost_fun', + look_ahead_parallelism_cost_fun), + opts=self._opts.get('opts', {'W': 0.5}), + max_steps=self._opts.get('max_swap_steps', 30)) + if swaps: # Get a list of the qubits we need to allocate just to perform the # swaps - not_allocated_ids = all_swapped_qubits.difference(backend_ids_used) + not_allocated_ids = all_swapped_qubits.difference({ + self._current_mapping[logical_id] + for logical_id in self._currently_allocated_ids + }) # Calculate temporary internal reverse mapping new_internal_mapping = deepcopy(self._reverse_current_mapping) @@ -628,8 +643,8 @@ def receive(self, command_list): ] if len(qubit_ids) > 2 or not qubit_ids: - raise Exception("Invalid command (number of qubits): " + - str(cmd)) + raise Exception("Invalid command (number of qubits): " + + str(cmd)) if isinstance(cmd.gate, FlushGate): while self.qubit_manager.size() > 0: @@ -663,7 +678,7 @@ def __str__(self): num_swaps_per_mapping_str += "\n {:3d}: {:3d}".format( num_swaps_per_mapping, num_mapping) - return ("Number of mappings: {}\n" + "Depth of swaps: {}\n\n" + - "Number of swaps per mapping:{}\n\n{}\n\n").format( + return ("Number of mappings: {}\n" + "Depth of swaps: {}\n\n" + + "Number of swaps per mapping:{}\n\n{}\n\n").format( self.num_mappings, depth_of_swaps_str, num_swaps_per_mapping_str, str(self.qubit_manager)) diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py index e4a0a7f08..cc09837ab 100644 --- a/projectq/cengines/_graphmapper_test.py +++ b/projectq/cengines/_graphmapper_test.py @@ -140,7 +140,7 @@ def simple_mapper(simple_graph): def get_node_list(self): - return list(self._dag._dag.nodes) + return list(self.dag._dag.nodes) graphm.MultiQubitGateManager._get_node_list = get_node_list diff --git a/projectq/cengines/_multi_qubit_gate_manager.py b/projectq/cengines/_multi_qubit_gate_manager.py index 8b86b9ad2..28e1c33aa 100644 --- a/projectq/cengines/_multi_qubit_gate_manager.py +++ b/projectq/cengines/_multi_qubit_gate_manager.py @@ -27,6 +27,53 @@ # ============================================================================== +def _topological_sort(dag): + indegree_map = {} + zero_indegree = [] + for v, d in dag.in_degree(): + if d > 0: + indegree_map[v] = d + else: + zero_indegree.append(v) + + while zero_indegree: + node = zero_indegree.pop() + for child in dag[node]: + indegree_map[child] -= 1 + if indegree_map[child] == 0: + zero_indegree.append(child) + del indegree_map[child] + yield node + + +# Coffaman-Graham algorithm with infinite width +def _coffman_graham_ranking(dag): + layers = [] + levels = {} + + for node in topological_sort(dag): + dependant_level = -1 + for dependant in dag.pred[node]: + level = levels[dependant] + if level > dependant_level: + dependant_level = level + + level = -1 + if dependant_level < len(layers) - 1: + level = dependant_level + 1 + if level < 0: + layers.append([]) + level = len(layers) - 1 + + layers[level].append(node) + levels[node] = level + + return layers + + +# ============================================================================== + + def _sum_distance_over_gates(node_list, mapping, distance_matrix): """ Calculate the sum of distances between pairs of qubits @@ -48,7 +95,7 @@ def _sum_distance_over_gates(node_list, mapping, distance_matrix): def nearest_neighbours_cost_fun(gates_dag, mapping, distance_matrix, swap, opts): - """ + r""" Nearest neighbours cost function .. math:: @@ -82,21 +129,22 @@ def nearest_neighbours_cost_fun(gates_dag, mapping, distance_matrix, swap, def look_ahead_parallelism_cost_fun(gates_dag, mapping, distance_matrix, swap, opts): - """ + r""" Cost function using nearest-neighbour interactions as well as considering gates from the near-term layer (provided it has been calculated) in order to favour swap operations that can be performed in parallel. .. math:: - H = M \\left[\\frac{1}{|F|}\sum_{\mathrm{gate}\ \in\ F} + H = M \left[\frac{1}{|F|}\sum_{\mathrm{gate}\ \in\ F} D(\mathrm{gate}.q_1, \mathrm{gate}.q_2) - + \\frac{W}{|E|}\sum_{\mathrm{gate}\ \in\ E} - D(\mathrm{gate}.q_1, \mathrm{gate}.q_2) \\right] + + \frac{W}{|E|}\sum_{\mathrm{gate}\ \in\ E} + D(\mathrm{gate}.q_1, \mathrm{gate}.q_2) \right] where: - - :math:`M` is defined as :math:`\max(decay(SWAP.q_1), decay(SWAP.q_2))` + - :math:`M` is defined as :math:`\max(\mathrm{decay}(SWAP.q_1), + \mathrm{decay}(SWAP.q_2))` - :math:`F` is the ensemble of gates in front layer - :math:`E` is the ensemble of gates in near-term layer - :math:`D` is the distance matrix @@ -469,23 +517,6 @@ def remove_from_front_layer(self, cmd): if isinstance(node, _DAGNodeDouble): self.front_layer_for_cost_fun.remove(node) - def max_distance_in_dag(self): - """ - Calculate the distance between the front layer and each node of the - DAG. - - A gate with distance 0 is on the front layer. - - Returns: - Python dictionary indexed by gate with their distance as value - """ - node_max_distance = {} - for node in self.front_layer: - node_max_distance[node] = 0 - self._max_distance_in_dag(node_max_distance, node, 1) - - return node_max_distance - def calculate_near_term_layer(self, mapping): """ Calculate the first order near term layer. @@ -569,9 +600,9 @@ def _add_to_interaction_graph(self, node, graph, max_order): interaction graph """ if isinstance(node, _DAGNodeDouble) \ - and (node.logical_id0 not in graph \ - or node.logical_id1 not in graph \ - or (len(graph[node.logical_id0]) < max_order + and (node.logical_id0 not in graph + or node.logical_id1 not in graph + or (len(graph[node.logical_id0]) < max_order and len(graph[node.logical_id1]) < max_order)): graph.add_edge(node.logical_id0, node.logical_id1) @@ -626,7 +657,7 @@ def __init__(self, graph, decay_opts=None): if decay_opts is None: decay_opts = {} - self._dag = CommandDAG() + self.dag = CommandDAG() self._decay = DecayManager(decay_opts.get('delta', 0.001), decay_opts.get('max_lifetime', 5)) @@ -637,7 +668,7 @@ def size(self): .. seealso:: :py:meth:`.CommandDAG.size` """ - return self._dag.size() + return self.dag.size() def clear(self): """ @@ -647,7 +678,7 @@ def clear(self): :py:meth:`.CommandDAG.clear` :py:meth:`.DecayManager.clear` """ - self._dag.clear() + self.dag.clear() self._decay.clear() def generate_swaps(self, @@ -669,29 +700,11 @@ def generate_swaps(self, max_steps (int): (optional) Maximum number of swap steps to attempt before giving up opts (dict): (optional) Extra parameters for cost function call - (see note below) - - .. note:: - - The ``opts`` optional parameter may contain the following key-values: - - .. list-table:: - :header-rows: 1 - - * - Key - - Type - - Description - * - near_term_layer - - ``int`` - - | If 0 (default) do not consider near-term gates - | when generating the list of swap operations. - | If >0, calculate the near-term layer using - | all gates in the DAG that have a distance equal - | to or less than this value. - * - ... - - ... - - | Any other parameter will be passed onto the cost - | function when it is called. + + + .. seealso:: + :py:meth:`nearest_neighbours_cost_fun` + :py:meth:`look_ahead_parallelism_cost_fun` Returns: A tuple (list, set) of swap operations (tuples of backend IDs) and @@ -699,7 +712,7 @@ def generate_swaps(self, operations. """ - if not self._dag.front_layer_for_cost_fun: + if not self.dag.front_layer_for_cost_fun: return ([], set()) if opts is None: @@ -708,7 +721,7 @@ def generate_swaps(self, self._decay.clear() opts['decay'] = self._decay - self._dag.calculate_near_term_layer(current_mapping) + self.dag.calculate_near_term_layer(current_mapping) mapping = current_mapping.copy() swaps = [] @@ -747,7 +760,7 @@ def add_command(self, cmd): :py:meth:`.GatesDAG.add_command` """ - return self._dag.add_command(cmd) + return self.dag.add_command(cmd) def get_executable_commands(self, mapping): """ @@ -769,7 +782,7 @@ def get_executable_commands(self, mapping): # Reset after each pass has_command_to_execute = False - for node in self._dag.front_layer.copy(): + for node in self.dag.front_layer.copy(): if isinstance(node, _DAGNodeSingle): if isinstance(node.cmd.gate, AllocateQubitGate): # Allocating a qubit already in mapping is allowed @@ -778,21 +791,21 @@ def get_executable_commands(self, mapping): cmds_to_execute.append(node.cmd) cmds_to_execute.extend( node.compatible_successor_cmds) - self._dag.remove_from_front_layer(node.cmd) + self.dag._remove_from_front_layer(node.cmd) elif node not in allocate_cmds: allocate_cmds.append(node) elif node.logical_id in mapping: has_command_to_execute = True cmds_to_execute.append(node.cmd) cmds_to_execute.extend(node.compatible_successor_cmds) - self._dag.remove_from_front_layer(node.cmd) + self.dag._remove_from_front_layer(node.cmd) elif node.logical_id0 in mapping and node.logical_id1 in mapping: if self.graph.has_edge(mapping[node.logical_id0], mapping[node.logical_id1]): has_command_to_execute = True cmds_to_execute.append(node.cmd) cmds_to_execute.extend(node.compatible_successor_cmds) - self._dag.remove_from_front_layer(node.cmd) + self.dag._remove_from_front_layer(node.cmd) return cmds_to_execute, allocate_cmds @@ -814,7 +827,7 @@ def execute_allocate_cmds(self, allocate_cmds, mapping): if node.logical_id in mapping: cmds_to_execute.append(node.cmd) cmds_to_execute.extend(node.compatible_successor_cmds) - self._dag.remove_from_front_layer(node.cmd) + self.dag._remove_from_front_layer(node.cmd) return cmds_to_execute @@ -836,7 +849,7 @@ def calculate_qubit_interaction_subgraphs(self, max_order=2): .. seealso:: :py:meth:`CommandDAG.calculate_qubit_interaction_subgraphs` """ - return self._dag.calculate_qubit_interaction_subgraphs(max_order) + return self.dag.calculate_qubit_interaction_subgraphs(max_order) # ========================================================================== @@ -866,7 +879,7 @@ def _generate_one_swap_step(self, mapping, cost_fun, opts): # those concerned by a gate swap_candidates = [] - for node in self._dag.front_layer_for_cost_fun: + for node in self.dag.front_layer_for_cost_fun: for logical_id in node.logical_ids: for backend_id1 in self.graph[mapping[logical_id]]: swap_candidates.append( @@ -882,7 +895,7 @@ def _generate_one_swap_step(self, mapping, cost_fun, opts): backend_id1) scores.append( - cost_fun(self._dag, new_mapping, self.distance_matrix, + cost_fun(self.dag, new_mapping, self.distance_matrix, (backend_id0, backend_id1), opts)) # Return the swap candidate with the lowest score @@ -895,7 +908,7 @@ def _can_execute_some_gate(self, mapping): Args: mapping (dict): Current mapping """ - for node in self._dag.front_layer: + for node in self.dag.front_layer: if isinstance(node, _DAGNodeSingle) and node.logical_id in mapping: return True diff --git a/projectq/cengines/_multi_qubit_gate_manager_test.py b/projectq/cengines/_multi_qubit_gate_manager_test.py index 5f7243ad3..3ef92d6f5 100644 --- a/projectq/cengines/_multi_qubit_gate_manager_test.py +++ b/projectq/cengines/_multi_qubit_gate_manager_test.py @@ -441,15 +441,15 @@ def test_command_dag_remove_from_front_layer1(command_dag): search_cmd(command_dag, cmd0) with pytest.raises(RuntimeError): - command_dag.remove_from_front_layer(cmd0) + command_dag._remove_from_front_layer(cmd0) assert command_dag.front_layer == [dag_allocate0] - command_dag.remove_from_front_layer(allocate0) + command_dag._remove_from_front_layer(allocate0) assert command_dag.front_layer == [dag_deallocate] assert command_dag._logical_ids_in_diag == {0} - command_dag.remove_from_front_layer(deallocate0) + command_dag._remove_from_front_layer(deallocate0) assert not command_dag.front_layer @@ -474,66 +474,26 @@ def test_command_dag_remove_from_front_layer2(command_dag): dag_node78 = search_cmd(command_dag, cmd78) with pytest.raises(RuntimeError): - command_dag.remove_from_front_layer(cmd12) + command_dag._remove_from_front_layer(cmd12) assert command_dag.front_layer == [dag_node01, dag_node56, dag_node78] - command_dag.remove_from_front_layer(cmd78) + command_dag._remove_from_front_layer(cmd78) assert command_dag.front_layer == [dag_node01, dag_node56] assert command_dag._logical_ids_in_diag == {0, 1, 2, 5, 6} assert 7 not in command_dag._back_layer assert 8 not in command_dag._back_layer - command_dag.remove_from_front_layer(cmd01) + command_dag._remove_from_front_layer(cmd01) assert command_dag.front_layer == [dag_node56, dag_node12] - command_dag.remove_from_front_layer(cmd56) + command_dag._remove_from_front_layer(cmd56) assert command_dag.front_layer == [dag_node12] - command_dag.remove_from_front_layer(cmd12) + command_dag._remove_from_front_layer(cmd12) assert command_dag.front_layer == [dag_node26] -def test_command_dag_max_distance(command_dag): - cmd23a = gen_cmd(2, 3) - cmd56 = gen_cmd(5, 6) - cmd12 = gen_cmd(1, 2) - cmd34 = gen_cmd(3, 4) - cmd23b = gen_cmd(2, 3) - cmd46 = gen_cmd(4, 6) - cmd45 = gen_cmd(5, 4) - cmd14 = gen_cmd(4, 1) - - # ---------------------------------- - - command_dag.add_command(cmd23a) - command_dag.add_command(cmd56) - command_dag.add_command(cmd12) - command_dag.add_command(cmd34) - command_dag.add_command(cmd23b) - command_dag.add_command(cmd46) - command_dag.add_command(cmd45) - command_dag.add_command(cmd14) - dag_node23a = search_cmd(command_dag, cmd23a) - dag_node56 = search_cmd(command_dag, cmd56) - dag_node12 = search_cmd(command_dag, cmd12) - dag_node34 = search_cmd(command_dag, cmd34) - dag_node23b = search_cmd(command_dag, cmd23b) - dag_node46 = search_cmd(command_dag, cmd46) - dag_node45 = search_cmd(command_dag, cmd45) - dag_node14 = search_cmd(command_dag, cmd14) - - distance = command_dag.max_distance_in_dag() - assert distance[dag_node23a] == 0 - assert distance[dag_node56] == 0 - assert distance[dag_node12] == 1 - assert distance[dag_node34] == 1 - assert distance[dag_node23b] == 2 - assert distance[dag_node46] == 2 - assert distance[dag_node45] == 3 - assert distance[dag_node14] == 4 - - def test_command_dag_near_term_layer(command_dag): cmd23a = gen_cmd(2, 3) cmd56 = gen_cmd(5, 6) @@ -715,10 +675,10 @@ def test_qubit_manager_clear(qubit_manager): qubit_manager._decay.add_to_decay(0) assert qubit_manager._decay._backend_ids - assert qubit_manager._dag._dag + assert qubit_manager.dag._dag qubit_manager.clear() assert not qubit_manager._decay._backend_ids - assert not qubit_manager._dag._dag + assert not qubit_manager.dag._dag def test_qubit_manager_generate_one_swap_step(qubit_manager): @@ -874,7 +834,7 @@ def test_qubit_manager_get_executable_commands(qubit_manager): manager.add_command(cmd8b) manager.add_command(cmd7) - dag_allocate7 = search_cmd(manager._dag, cmd7) + dag_allocate7 = search_cmd(manager.dag, cmd7) assert manager.size() == 6 @@ -898,7 +858,7 @@ def test_qubit_manager_get_executable_commands(qubit_manager): manager.add_command(cmd8b) manager.add_command(cmd7) - dag_allocate7 = search_cmd(manager._dag, cmd7) + dag_allocate7 = search_cmd(manager.dag, cmd7) cmds_to_execute, allocate_cmds = manager.get_executable_commands(mapping) From 3748c00ec49d4587c78a18e3bab741505d044241 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Mon, 21 Oct 2019 16:23:46 +0200 Subject: [PATCH 17/25] Fix some more bugs and linting issues and increase test coverage --- projectq/cengines/_graphmapper.py | 66 ++-- projectq/cengines/_graphmapper_test.py | 42 ++- .../cengines/_multi_qubit_gate_manager.py | 331 +++++++++--------- .../_multi_qubit_gate_manager_test.py | 155 +++++--- 4 files changed, 347 insertions(+), 247 deletions(-) diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index 8fcdd5280..5620fa5b0 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -149,15 +149,15 @@ def _add_qubits_to_mapping_smart_init(current_mapping, graph, Returns: A new mapping """ - qubit_interaction_subgraphs = \ - commands_dag.calculate_qubit_interaction_subgraphs(max_order=2) + if not current_mapping: + qubit_interaction_subgraphs = \ + commands_dag.calculate_qubit_interaction_subgraphs(max_order=2) - # Interaction subgraph list can be empty if only single qubit gates are - # present - if not qubit_interaction_subgraphs: - qubit_interaction_subgraphs = [list(new_logical_qubit_ids)] + # Interaction subgraph list can be empty if only single qubit gates are + # present + if not qubit_interaction_subgraphs: + qubit_interaction_subgraphs = [list(new_logical_qubit_ids)] - if not current_mapping: return _generate_mapping_minimize_swaps(graph, qubit_interaction_subgraphs) return _add_qubits_to_mapping_fcfs(current_mapping, graph, @@ -185,15 +185,15 @@ def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, Returns: A new mapping """ - qubit_interaction_subgraphs = \ - commands_dag.calculate_qubit_interaction_subgraphs(max_order=2) + if not current_mapping: + qubit_interaction_subgraphs = \ + commands_dag.calculate_qubit_interaction_subgraphs(max_order=2) - # Interaction subgraph list can be empty if only single qubit gates are - # present - if not qubit_interaction_subgraphs: - qubit_interaction_subgraphs = [list(new_logical_qubit_ids)] + # Interaction subgraph list can be empty if only single qubit gates are + # present + if not qubit_interaction_subgraphs: + qubit_interaction_subgraphs = [list(new_logical_qubit_ids)] - if not current_mapping: return _generate_mapping_minimize_swaps(graph, qubit_interaction_subgraphs) @@ -213,17 +213,24 @@ def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, backend_id = None if len(qubit_interactions) == 1: + # If there's only a single qubit interacting and it is already + # present within the mapping, find the neighbour with the highest + # degree + qubit = qubit_interactions[0] if qubit in mapping: - candidates = sorted([ - n for n in graph[mapping[qubit]] - if n not in currently_used_nodes - ], - key=lambda n: len(graph[n])) + candidates = sorted( + [n for n in graph[mapping[qubit]] if n in available_nodes], + key=lambda n: len(graph[n])) if candidates: backend_id = candidates[-1] elif qubit_interactions: + # If there are multiple qubits interacting, find out all the + # neighbouring nodes for each interaction. Then within those + # nodes, try to find the one that maximizes the number of + # interactions without swapping + neighbours = [] for qubit in qubit_interactions: if qubit in mapping: @@ -233,14 +240,18 @@ def _add_qubits_to_mapping(current_mapping, graph, new_logical_qubit_ids, else: break + # Try to find an intersection that maximizes the number of + # interactions by iteratively reducing the number of considered + # interactions + intersection = set() - while neighbours: + while neighbours and not intersection: intersection = neighbours[0].intersection(*neighbours[1:]) - if intersection: - backend_id = intersection.pop() - break neighbours.pop() + if intersection: + backend_id = intersection.pop() + if backend_id is None: backend_id = available_nodes.pop() else: @@ -342,6 +353,11 @@ def __init__(self, - | Extra options to pass onto the cost function | (see :py:meth:`.MultiQubitGateManager.generate_swaps`) | Defaults to ``{'W': 0.5}``. + * - max_swap_steps + - ``int`` + - | Maximum number of swap steps per mapping + | (see :py:meth:`.MultiQubitGateManager.generate_swaps`) + | Defaults to 30 """ BasicMapperEngine.__init__(self) @@ -483,7 +499,7 @@ def _send_possible_commands(self): if allocate_cmds and num_available_qubits > 0: def rank_allocate_cmds(cmds_list, dag): - #pylint: disable=unused-argument + # pylint: disable=unused-argument return cmds_list allocate_cmds = rank_allocate_cmds( @@ -663,7 +679,7 @@ def __str__(self): Returns: A summary (string) of resources used, including depth of swaps and - statistics about the paths generated + statistics about the swaps themselves """ depth_of_swaps_str = "" diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py index cc09837ab..f8e309ca0 100644 --- a/projectq/cengines/_graphmapper_test.py +++ b/projectq/cengines/_graphmapper_test.py @@ -182,19 +182,18 @@ def test_invalid_gates(simple_mapper): mapper.receive([cmd0, cmd1, cmd2, cmd3, cmd_flush]) -# def test_run_infinite_loop_detection(simple_mapper): -# mapper, backend = simple_mapper -# mapper.current_mapping = {i: i for i in range(7)} +def test_init(simple_graph): + opts = {'decay_opts': {'delta': 0.002}} -# qb0 = WeakQubitRef(engine=None, idx=0) -# qb1 = WeakQubitRef(engine=None, idx=6) + mapper = graphm.GraphMapper(graph=simple_graph, opts=opts) + assert mapper.qubit_manager._decay._delta == 0.002 + assert mapper.qubit_manager._decay._cutoff == 5 -# qb_flush = WeakQubitRef(engine=None, idx=-1) -# cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + opts = {'decay_opts': {'delta': 0.002, 'max_lifetime': 10}} -# cmd0 = Command(engine=None, gate=X, qubits=([qb0], ), controls=[qb1]) -# with pytest.raises(RuntimeError): -# mapper.receive([cmd0, cmd_flush]) + mapper = graphm.GraphMapper(graph=simple_graph, opts=opts) + assert mapper.qubit_manager._decay._delta == 0.002 + assert mapper.qubit_manager._decay._cutoff == 10 def test_resetting_mapping_to_none(simple_graph): @@ -215,6 +214,27 @@ def test_add_qubits_to_mapping_methods_failure(simple_graph): graphm.GraphMapper(graph=simple_graph, add_qubits_to_mapping="as") +@pytest.mark.parametrize("add_qubits", ["fcfs", "fcfs_init", "FCFS"]) +def test_add_qubits_to_mapping_methods_only_single(simple_graph, add_qubits): + mapper = graphm.GraphMapper(graph=simple_graph, + add_qubits_to_mapping=add_qubits) + backend = DummyEngine(save_commands=True) + backend.is_last_engine = True + mapper.next_engine = backend + + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + gates = [ + Command(None, X, qubits=([qb[1]], )), + Command(None, X, qubits=([qb[2]], )), + ] + + mapper.receive(list(itertools.chain(allocate_cmds, gates, [cmd_flush]))) + assert mapper.num_mappings == 0 + + @pytest.mark.parametrize("add_qubits", ["fcfs", "fcfs_init", "FCFS"]) def test_add_qubits_to_mapping_methods(simple_graph, add_qubits): mapper = graphm.GraphMapper(graph=simple_graph, @@ -594,7 +614,7 @@ def test_send_two_qubit_gate_before_swap_nonallocated_qubits(simple_mapper): assert mapper.current_mapping[4] == 4 assert mapper.current_mapping[5] == 5 assert mapper.current_mapping[6] in [3, 6] - + if mapper.current_mapping[6] == 3: # qb[6] is on position 3, all commands are possible assert mapper.qubit_manager.size() == 0 diff --git a/projectq/cengines/_multi_qubit_gate_manager.py b/projectq/cengines/_multi_qubit_gate_manager.py index 28e1c33aa..75cc09256 100644 --- a/projectq/cengines/_multi_qubit_gate_manager.py +++ b/projectq/cengines/_multi_qubit_gate_manager.py @@ -22,6 +22,8 @@ """ import networkx as nx +import statistics +import math from projectq.ops import (AllocateQubitGate, DeallocateQubitGate) # ============================================================================== @@ -30,11 +32,11 @@ def _topological_sort(dag): indegree_map = {} zero_indegree = [] - for v, d in dag.in_degree(): - if d > 0: - indegree_map[v] = d + for node, degree in dag.in_degree(): + if degree > 0: + indegree_map[node] = degree else: - zero_indegree.append(v) + zero_indegree.append(node) while zero_indegree: node = zero_indegree.pop() @@ -48,10 +50,10 @@ def _topological_sort(dag): # Coffaman-Graham algorithm with infinite width def _coffman_graham_ranking(dag): - layers = [] + layers = [[]] levels = {} - - for node in topological_sort(dag): + + for node in _topological_sort(dag): dependant_level = -1 for dependant in dag.pred[node]: level = levels[dependant] @@ -68,6 +70,8 @@ def _coffman_graham_ranking(dag): layers[level].append(node) levels[node] = level + for layer in layers: + layer.reverse() return layers @@ -123,7 +127,7 @@ def nearest_neighbours_cost_fun(gates_dag, mapping, distance_matrix, swap, Score of current swap operations """ #pylint: disable=unused-argument - return _sum_distance_over_gates(gates_dag.front_layer, mapping, + return _sum_distance_over_gates(gates_dag.front_layer_2qubit, mapping, distance_matrix) @@ -182,13 +186,13 @@ def look_ahead_parallelism_cost_fun(gates_dag, mapping, distance_matrix, swap, decay = opts['decay'] near_term_weight = opts['W'] - n_front = len(gates_dag.front_layer_for_cost_fun) + n_front = len(gates_dag.front_layer_2qubit) n_near = len(gates_dag.near_term_layer) decay_factor = max(decay.get_decay_value(swap[0]), decay.get_decay_value(swap[1])) front_layer_term = (1. / n_front * _sum_distance_over_gates( - gates_dag.front_layer_for_cost_fun, mapping, distance_matrix)) + gates_dag.front_layer_2qubit, mapping, distance_matrix)) if n_near == 0: return decay_factor * front_layer_term @@ -236,15 +240,6 @@ class DecayManager(object): User should call the :py:meth:`step` method each time a swap gate is added and :py:meth:`remove_decay` once a 2-qubit gate is executed. """ - - # def __repr__(self): - # s = '' - # for backend_id in self._backend_ids: - # tmp = self._backend_ids[backend_id] - # s += '\n {:2}: {}, {}'.format(backend_id, tmp.decay, tmp.lifetime) - # s += '\n' - # return s - def __init__(self, delta, max_lifetime): """ Constructor @@ -322,7 +317,7 @@ def step(self): class _DAGNodeBase(object): - #pylint: disable=too-few-public-methods + # pylint: disable=too-few-public-methods def __init__(self, cmd, *args): self.logical_ids = frozenset(args) self.cmd = cmd @@ -344,7 +339,7 @@ class _DAGNodeSingle(_DAGNodeBase): (DAG) of quantum gates """ - #pylint: disable=too-few-public-methods + # pylint: disable=too-few-public-methods def __init__(self, cmd, logical_id): super(_DAGNodeSingle, self).__init__(cmd, logical_id) self.logical_id = logical_id @@ -356,7 +351,7 @@ class _DAGNodeDouble(_DAGNodeBase): of quantum gates """ - #pylint: disable=too-few-public-methods + # pylint: disable=too-few-public-methods def __init__(self, cmd, logical_id0, logical_id1): super(_DAGNodeDouble, self).__init__(cmd, logical_id0, logical_id1) self.logical_id0 = logical_id0 @@ -371,11 +366,24 @@ class CommandDAG(object): def __init__(self): self._dag = nx.DiGraph() self._logical_ids_in_diag = set() - self.front_layer = [] - self.front_layer_for_cost_fun = [] self.near_term_layer = [] + + self._layers_up_to_date = True + self._front_layer = [] + self._front_layer_2qubit = [] + self._layers = [[]] self._back_layer = {} + @property + def front_layer(self): + self.calculate_command_hierarchy() + return self._layers[0] + + @property + def front_layer_2qubit(self): + self.calculate_command_hierarchy() + return self._front_layer_2qubit + def size(self): """ Return the size of the DAG (ie. number of nodes) @@ -393,11 +401,23 @@ def clear(self): """ self._dag.clear() self._logical_ids_in_diag = set() - self.front_layer_for_cost_fun = [] - self.front_layer = [] self.near_term_layer = [] + + self._layers_up_to_date = True + self._front_layer = [] + self._front_layer_2qubit = [] + self._layers = [[]] self._back_layer = {} + def calculate_command_hierarchy(self): + if not self._layers_up_to_date: + self._layers = _coffman_graham_ranking(self._dag) + self._front_layer_2qubit = [ + node for node in self._layers[0] + if isinstance(node, _DAGNodeDouble) + ] + self._layers_up_to_date = True + def add_command(self, cmd): """ Add a command to the DAG @@ -434,11 +454,7 @@ def add_command(self, cmd): self._back_layer[logical_ids[0]] = new_node self._back_layer[logical_ids[1]] = new_node - # If both qubit are not already in the DAG, then we just got a new - # gate on the front layer - if not logical_id0_in_dag and not logical_id1_in_dag: - self.front_layer_for_cost_fun.append(new_node) - self.front_layer.append(new_node) + self._layers_up_to_date = False else: logical_id = logical_ids[0] logical_id_in_dag = logical_id in self._logical_ids_in_diag @@ -452,9 +468,8 @@ def add_command(self, cmd): else: self._logical_ids_in_diag.add(logical_id) - self.front_layer.append(new_node) - self._back_layer[logical_id] = new_node + self._layers_up_to_date = False else: if not logical_id_in_dag: new_node = _DAGNodeSingle(cmd, logical_id) @@ -462,62 +477,11 @@ def add_command(self, cmd): self._logical_ids_in_diag.add(logical_id) self._back_layer[logical_id] = new_node - - self.front_layer.append(new_node) + self._layers_up_to_date = False else: self._back_layer[logical_id].append_compatible_cmd(cmd) - def remove_from_front_layer(self, cmd): - """ - Remove a gate from the front layer of the DAG - - Args: - cmd (Command): A ProjectQ command - - Raises: - RuntimeError if the gate does not exist in the front layer - """ - # First find the gate inside the first layer list - node = next((node for node in self.front_layer if node.cmd is cmd), - None) - if not node: - raise RuntimeError('({}) not found in DAG'.format(cmd)) - - logical_ids = [qubit.id for qureg in cmd.all_qubits for qubit in qureg] - - descendants = list(self._dag[node]) - - if not descendants: - for logical_id in logical_ids: - self._logical_ids_in_diag.remove(logical_id) - del self._back_layer[logical_id] - self._dag.remove_node(node) - else: - if len(descendants) == 1: - if isinstance(node, _DAGNodeDouble): - # Look for the logical_id not found in the descendant - logical_id, tmp = logical_ids - if logical_id in descendants[0].logical_ids: - logical_id = tmp - - self._logical_ids_in_diag.remove(logical_id) - del self._back_layer[logical_id] - - # Remove gate from DAG - self._dag.remove_node(node) - - for descendant in descendants: - if not self._dag.pred[descendant]: - self.front_layer.append(descendant) - if isinstance(descendant, _DAGNodeDouble): - self.front_layer_for_cost_fun.append(descendant) - - # Remove the gate from the first layer - self.front_layer.remove(node) - if isinstance(node, _DAGNodeDouble): - self.front_layer_for_cost_fun.remove(node) - - def calculate_near_term_layer(self, mapping): + def calculate_near_term_layer(self, mapping, depth=1): """ Calculate the first order near term layer. @@ -527,23 +491,15 @@ def calculate_near_term_layer(self, mapping): Args: mapping (dict): current mapping """ - near_term_layer_candidates = [] - for node in self.front_layer_for_cost_fun: - for descendant in self._dag[node]: - if (isinstance(descendant, _DAGNodeDouble) - and descendant.logical_id0 in mapping - and descendant.logical_id1 in mapping): - near_term_layer_candidates.append(descendant) - - # Only add candidates for which all predecessors are in the front layer + self.calculate_command_hierarchy() self.near_term_layer = [] - for node in near_term_layer_candidates: - for predecessor in self._dag.pred[node]: - if predecessor not in self.front_layer: - break - else: - if node not in self.near_term_layer: - self.near_term_layer.append(node) + if len(self._layers) > 1: + for layer in self._layers[1:depth + 1]: + self.near_term_layer.extend([ + node for node in layer + if (isinstance(node, _DAGNodeDouble) and node.logical_id0 + in mapping and node.logical_id1 in mapping) + ]) def calculate_interaction_list(self): """ @@ -553,11 +509,8 @@ def calculate_interaction_list(self): List of tuples of logical qubit IDs for each 2-qubit gate present in the DAG. """ - interactions = [] - for node in self._dag: - if isinstance(node, _DAGNodeDouble): - interactions.append(tuple(node.logical_ids)) - return interactions + return [(node.logical_id0, node.logical_id1) for node in self._dag + if isinstance(node, _DAGNodeDouble)] def calculate_qubit_interaction_subgraphs(self, max_order=2): """ @@ -575,10 +528,31 @@ def calculate_qubit_interaction_subgraphs(self, max_order=2): components of the qubit interaction graph. Within each components, nodes are sorted in decreasing order of their degree. """ - graph = nx.Graph() + self.calculate_command_hierarchy() - for node in self.front_layer: - self._add_to_interaction_graph(node, graph, max_order) + graph = nx.Graph() + for layer in self._layers: + for node in layer: + if isinstance(node, _DAGNodeDouble): + node0_in_graph = node.logical_id0 in graph + node1_in_graph = node.logical_id1 in graph + + add_edge = True + if (node0_in_graph + and len(graph[node.logical_id0]) >= max_order): + add_edge = False + if (node1_in_graph + and len(graph[node.logical_id1]) >= max_order): + add_edge = False + + if add_edge or graph.has_edge(node.logical_id0, + node.logical_id1): + graph.add_edge(node.logical_id0, node.logical_id1) + else: + break + else: + continue # only executed if the inner loop did NOT break + break # only executed if the inner loop DID break return [ sorted(graph.subgraph(g), @@ -589,46 +563,45 @@ def calculate_qubit_interaction_subgraphs(self, max_order=2): reverse=True) ] - def _add_to_interaction_graph(self, node, graph, max_order): + def remove_command(self, cmd): """ - Recursively add an interaction to the interaction graph + Remove a command from the DAG + + Note: + Only commands present in the front layer of the DAG can be + removed. Args: - node (_DAGNodeDouble): Node from DAG - graph (networkx.Graph): Interaction graph - max_order (int): Maximum degree of the nodes in the resulting - interaction graph + cmd (Command): A ProjectQ command + + Raises: + RuntimeError if the gate does not exist in the front layer """ - if isinstance(node, _DAGNodeDouble) \ - and (node.logical_id0 not in graph - or node.logical_id1 not in graph - or (len(graph[node.logical_id0]) < max_order - and len(graph[node.logical_id1]) < max_order)): - graph.add_edge(node.logical_id0, node.logical_id1) + # First find the gate inside the front layer list + node = next((node for node in self.front_layer if node.cmd is cmd), + None) + if node is None: + raise RuntimeError( + '({}) not found in front layer of DAG'.format(cmd)) - for descendant in self._dag[node]: - self._add_to_interaction_graph(descendant, graph, max_order) + logical_ids = {qubit.id for qureg in cmd.all_qubits for qubit in qureg} - def _max_distance_in_dag(self, node_max_distance, node, distance): - """ - Recursively calculate the maximum distance for each node of the DAG + descendants = list(self._dag[node]) - Args: - node_max_distance (dict): Dictionary containing the current - maximum distance for each node - node (_DAGNode): Root node from DAG for traversal - distance (int): Current distance offset - """ - for descendant in self._dag[node]: - try: - if node_max_distance[descendant] < distance: - node_max_distance[descendant] = distance - except KeyError: - node_max_distance[descendant] = distance + if not descendants: + self._logical_ids_in_diag -= logical_ids + for logical_id in logical_ids: + del self._back_layer[logical_id] + elif len(descendants) == 1 and isinstance(node, _DAGNodeDouble): + logical_id, = logical_ids.difference(descendants[0].logical_ids) - if self._dag[descendant]: - self._max_distance_in_dag(node_max_distance, descendant, - distance + 1) + self._logical_ids_in_diag.remove(logical_id) + del self._back_layer[logical_id] + + # Remove gate from DAG + self._dag.remove_node(node) + + self._layers_up_to_date = False # ============================================================================== @@ -660,6 +633,30 @@ def __init__(self, graph, decay_opts=None): self.dag = CommandDAG() self._decay = DecayManager(decay_opts.get('delta', 0.001), decay_opts.get('max_lifetime', 5)) + self._stats = { + 'simul_exec': [], + '2qubit_gates_loc': {}, + } + + def __str__(self): + """ + Return the string representation of this MultiQubitGateManager. + + Returns: + A summary (string) about the commands executed. + """ + + max_width = int( + math.ceil(math.log10(max( + self._stats['2qubit_gates_loc'].values()))) + 1) + interactions_str = "" + for (backend_id0, backend_id1), number \ + in sorted(self._stats['2qubit_gates_loc'].items(), + key=lambda x: x[1], reverse=True): + interactions_str += "\n {0}: {1:{2}}".format( + sorted([backend_id0, backend_id1]), number, max_width) + + return ('2-qubit gates locations:{}').format(interactions_str) def size(self): """ @@ -712,7 +709,7 @@ def generate_swaps(self, operations. """ - if not self.dag.front_layer_for_cost_fun: + if not self.dag.front_layer_2qubit: return ([], set()) if opts is None: @@ -760,7 +757,7 @@ def add_command(self, cmd): :py:meth:`.GatesDAG.add_command` """ - return self.dag.add_command(cmd) + self.dag.add_command(cmd) def get_executable_commands(self, mapping): """ @@ -777,36 +774,50 @@ def get_executable_commands(self, mapping): cmds_to_execute = [] allocate_cmds = [] has_command_to_execute = True + self._stats['simul_exec'].append(0) + + def _add_to_execute_list(node): + cmds_to_execute.append(node.cmd) + cmds_to_execute.extend(node.compatible_successor_cmds) + self.dag.remove_command(node.cmd) + + self.dag.calculate_command_hierarchy() while has_command_to_execute: # Reset after each pass has_command_to_execute = False - for node in self.dag.front_layer.copy(): + for node in self.dag.front_layer: if isinstance(node, _DAGNodeSingle): if isinstance(node.cmd.gate, AllocateQubitGate): # Allocating a qubit already in mapping is allowed if node.logical_id in mapping: has_command_to_execute = True - cmds_to_execute.append(node.cmd) - cmds_to_execute.extend( - node.compatible_successor_cmds) - self.dag._remove_from_front_layer(node.cmd) + _add_to_execute_list(node) elif node not in allocate_cmds: allocate_cmds.append(node) elif node.logical_id in mapping: has_command_to_execute = True - cmds_to_execute.append(node.cmd) - cmds_to_execute.extend(node.compatible_successor_cmds) - self.dag._remove_from_front_layer(node.cmd) + self._stats['simul_exec'][-1] += 1 + _add_to_execute_list(node) elif node.logical_id0 in mapping and node.logical_id1 in mapping: if self.graph.has_edge(mapping[node.logical_id0], mapping[node.logical_id1]): has_command_to_execute = True - cmds_to_execute.append(node.cmd) - cmds_to_execute.extend(node.compatible_successor_cmds) - self.dag._remove_from_front_layer(node.cmd) - + _add_to_execute_list(node) + self._stats['simul_exec'][-1] += 1 + key = frozenset((mapping[node.logical_id0], + mapping[node.logical_id1])) + self._stats['2qubit_gates_loc'][key] = self._stats.get( + node.logical_ids, 0) + 1 + for cmd in node.compatible_successor_cmds: + if len([ + qubit.id for qureg in cmd.all_qubits + for qubit in qureg + ]) == 2: + self._stats['2qubit_gates_loc'][key] += 1 + + self.dag.calculate_command_hierarchy() return cmds_to_execute, allocate_cmds def execute_allocate_cmds(self, allocate_cmds, mapping): @@ -827,8 +838,9 @@ def execute_allocate_cmds(self, allocate_cmds, mapping): if node.logical_id in mapping: cmds_to_execute.append(node.cmd) cmds_to_execute.extend(node.compatible_successor_cmds) - self.dag._remove_from_front_layer(node.cmd) + self.dag.remove_command(node.cmd) + self.dag.calculate_command_hierarchy() return cmds_to_execute # ========================================================================== @@ -849,6 +861,7 @@ def calculate_qubit_interaction_subgraphs(self, max_order=2): .. seealso:: :py:meth:`CommandDAG.calculate_qubit_interaction_subgraphs` """ + self.dag.calculate_command_hierarchy() return self.dag.calculate_qubit_interaction_subgraphs(max_order) # ========================================================================== @@ -872,6 +885,7 @@ def _generate_one_swap_step(self, mapping, cost_fun, opts): a logical qubit associated to it. """ + self.dag.calculate_command_hierarchy() reverse_mapping = {v: k for k, v in mapping.items()} # Only consider gates from the front layer and generate a list of @@ -879,7 +893,7 @@ def _generate_one_swap_step(self, mapping, cost_fun, opts): # those concerned by a gate swap_candidates = [] - for node in self.dag.front_layer_for_cost_fun: + for node in self.dag.front_layer_2qubit: for logical_id in node.logical_ids: for backend_id1 in self.graph[mapping[logical_id]]: swap_candidates.append( @@ -908,6 +922,7 @@ def _can_execute_some_gate(self, mapping): Args: mapping (dict): Current mapping """ + self.dag.calculate_command_hierarchy() for node in self.dag.front_layer: if isinstance(node, _DAGNodeSingle) and node.logical_id in mapping: return True diff --git a/projectq/cengines/_multi_qubit_gate_manager_test.py b/projectq/cengines/_multi_qubit_gate_manager_test.py index 3ef92d6f5..3312f6939 100644 --- a/projectq/cengines/_multi_qubit_gate_manager_test.py +++ b/projectq/cengines/_multi_qubit_gate_manager_test.py @@ -17,10 +17,8 @@ import pytest import networkx as nx -# from projectq.cengines import DummyEngine, LocalOptimizer, MainEngine -# from projectq.meta import LogicalQubitIDTag -from projectq.ops import (Allocate, BasicGate, Command, Deallocate, FlushGate, - X, H, All, Measure, CNOT) +import re +from projectq.ops import (Allocate, Command, Deallocate, X, H) from projectq.types import WeakQubitRef from projectq.cengines import _multi_qubit_gate_manager as multi @@ -409,7 +407,7 @@ def test_command_dag_add_gate(command_dag): dag_node0 = search_cmd(command_dag, cmd0) assert len(command_dag.front_layer) == 1 - assert not command_dag.front_layer_for_cost_fun + assert not command_dag.front_layer_2qubit assert command_dag._dag.number_of_nodes() == 2 assert command_dag._dag.number_of_edges() == 1 @@ -421,10 +419,10 @@ def test_command_dag_add_gate(command_dag): dag_node56 = search_cmd(command_dag, cmd56) assert len(command_dag.front_layer) == 3 - assert command_dag.front_layer_for_cost_fun == [dag_node56] + assert command_dag.front_layer_2qubit == [dag_node56] -def test_command_dag_remove_from_front_layer1(command_dag): +def test_command_dag_remove_command(command_dag): allocate0 = gen_cmd(0, gate=Allocate) cmd0 = gen_cmd(0) deallocate0 = gen_cmd(0, gate=Deallocate) @@ -441,19 +439,19 @@ def test_command_dag_remove_from_front_layer1(command_dag): search_cmd(command_dag, cmd0) with pytest.raises(RuntimeError): - command_dag._remove_from_front_layer(cmd0) + command_dag.remove_command(cmd0) assert command_dag.front_layer == [dag_allocate0] - command_dag._remove_from_front_layer(allocate0) + command_dag.remove_command(allocate0) assert command_dag.front_layer == [dag_deallocate] assert command_dag._logical_ids_in_diag == {0} - command_dag._remove_from_front_layer(deallocate0) + command_dag.remove_command(deallocate0) assert not command_dag.front_layer -def test_command_dag_remove_from_front_layer2(command_dag): +def test_command_dag_remove_command2(command_dag): cmd01 = gen_cmd(0, 1) cmd56 = gen_cmd(5, 6) cmd12 = gen_cmd(1, 2) @@ -474,23 +472,23 @@ def test_command_dag_remove_from_front_layer2(command_dag): dag_node78 = search_cmd(command_dag, cmd78) with pytest.raises(RuntimeError): - command_dag._remove_from_front_layer(cmd12) + command_dag.remove_command(cmd12) assert command_dag.front_layer == [dag_node01, dag_node56, dag_node78] - command_dag._remove_from_front_layer(cmd78) + command_dag.remove_command(cmd78) assert command_dag.front_layer == [dag_node01, dag_node56] assert command_dag._logical_ids_in_diag == {0, 1, 2, 5, 6} assert 7 not in command_dag._back_layer assert 8 not in command_dag._back_layer - command_dag._remove_from_front_layer(cmd01) + command_dag.remove_command(cmd01) assert command_dag.front_layer == [dag_node56, dag_node12] - command_dag._remove_from_front_layer(cmd56) + command_dag.remove_command(cmd56) assert command_dag.front_layer == [dag_node12] - command_dag._remove_from_front_layer(cmd12) + command_dag.remove_command(cmd12) assert command_dag.front_layer == [dag_node26] @@ -515,7 +513,7 @@ def test_command_dag_near_term_layer(command_dag): dag_node34 = search_cmd(command_dag, cmd34) command_dag.calculate_near_term_layer({i: i for i in range(7)}) - assert [dag_node12, dag_node34] == command_dag.near_term_layer + assert command_dag.near_term_layer == [dag_node12, dag_node34] def test_command_dag_calculate_interaction_list(command_dag): @@ -533,9 +531,9 @@ def test_command_dag_calculate_interaction_list(command_dag): interactions = command_dag.calculate_interaction_list() - assert (0, 1) in interactions - assert (0, 3) in interactions - assert (3, 4) in interactions + assert (0, 1) in interactions or (1, 0) in interactions + assert (0, 3) in interactions or (3, 0) in interactions + assert (3, 4) in interactions or (4, 3) in interactions def test_command_dag_generate_qubit_interaction_graph(command_dag): @@ -684,11 +682,13 @@ def test_qubit_manager_clear(qubit_manager): def test_qubit_manager_generate_one_swap_step(qubit_manager): cmd08 = gen_cmd(0, 8) cmd01 = gen_cmd(0, 1) + cmd5 = gen_cmd(5) # ---------------------------------- manager = deepcopy(qubit_manager) manager.add_command(cmd08) + manager.add_command(cmd5) mapping = {i: i for i in range(9)} (logical_id0, backend_id0, logical_id1, @@ -869,39 +869,36 @@ def test_qubit_manager_get_executable_commands(qubit_manager): def test_qubit_manager_generate_qubit_interaction_graph(qubit_manager): qb, allocate_cmds = allocate_all_qubits_cmd(9) - cmd0 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]) - cmd1 = Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]) - cmd2 = Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]) - cmd3 = Command(engine=None, gate=X, qubits=([qb[1]], )) - - qubit_manager.add_command(cmd0) - qubit_manager.add_command(cmd1) - qubit_manager.add_command(cmd2) - qubit_manager.add_command(cmd3) - - cmd4 = Command(engine=None, gate=X, qubits=([qb[4]], ), controls=[qb[5]]) - cmd5 = Command(engine=None, gate=X, qubits=([qb[5]], ), controls=[qb[6]]) - qubit_manager.add_command(cmd4) - qubit_manager.add_command(cmd5) - - cmd6 = Command(engine=None, gate=X, qubits=([qb[6]], ), controls=[qb[7]]) - cmd7 = Command(engine=None, gate=X, qubits=([qb[7]], ), controls=[qb[8]]) - qubit_manager.add_command(cmd6) - qubit_manager.add_command(cmd7) - - qubit_manager.add_command( - Command(engine=None, gate=X, qubits=([qb[3]], ), controls=[qb[0]])) - subgraphs = qubit_manager.calculate_qubit_interaction_subgraphs( - max_order=2) - - assert len(subgraphs) == 2 - assert len(subgraphs[0]) == 4 - assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) - assert subgraphs[0][0] == 0 - assert subgraphs[0][-2:] in ([1, 3], [3, 1]) - assert len(subgraphs[1]) == 5 - assert all([n in subgraphs[1] for n in [4, 5, 6, 7, 8]]) - assert subgraphs[1][-2:] in ([4, 8], [8, 4]) + cmd_list = [ + Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[1]]), + Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[3]]), + Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[2]]), + Command(engine=None, gate=X, qubits=([qb[1]], )), + Command(engine=None, gate=X, qubits=([qb[4]], ), controls=[qb[5]]), + Command(engine=None, gate=X, qubits=([qb[5]], ), controls=[qb[6]]), + Command(engine=None, gate=X, qubits=([qb[6]], ), controls=[qb[7]]), + Command(engine=None, gate=X, qubits=([qb[7]], ), controls=[qb[8]]) + ] + + for cmd_last in [ + Command(engine=None, gate=X, qubits=([qb[3]], ), controls=[qb[0]]), + Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[3]]) + ]: + qubit_manager.clear() + for cmd in cmd_list: + qubit_manager.add_command(cmd) + qubit_manager.add_command(cmd_last) + + subgraphs = qubit_manager.calculate_qubit_interaction_subgraphs( + max_order=2) + + assert len(subgraphs) == 2 + assert len(subgraphs[0]) == 4 + assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) + assert subgraphs[0][0] == 0 + assert subgraphs[0][-2:] in ([1, 3], [3, 1]) + assert len(subgraphs[1]) == 3 + assert all([n in subgraphs[1] for n in [4, 5, 6]]) def test_qubit_manager_generate_swaps_change_mapping(qubit_manager): @@ -954,3 +951,55 @@ def test_qubit_manager_generate_swaps_change_mapping(qubit_manager): cmd_list, _ = qubit_manager.get_executable_commands(mapping) assert cmd_list == [cmd05, cmd06] assert qubit_manager.size() == 1 + + +def test_qubit_manager_str(): + qubit_manager = multi.MultiQubitGateManager(generate_grid_graph(3, 3)) + + qb, allocate_cmds = allocate_all_qubits_cmd(9) + cmd_list = [ + Command(engine=None, gate=H, qubits=([qb[0]], )), + Command(engine=None, gate=X, qubits=([qb[0]], ), controls=[qb[8]]), + Command(engine=None, gate=X, qubits=([qb[2]], ), controls=[qb[6]]), + Command(engine=None, gate=X, qubits=([qb[1]], ), controls=[qb[7]]), + Command(engine=None, gate=X, qubits=([qb[1]], )), + Command(engine=None, gate=X, qubits=([qb[4]], ), controls=[qb[5]]), + Command(engine=None, gate=X, qubits=([qb[5]], ), controls=[qb[4]]), + Command(engine=None, gate=X, qubits=([qb[5]], ), controls=[qb[6]]), + Command(engine=None, gate=X, qubits=([qb[6]], ), controls=[qb[7]]), + Command(engine=None, gate=X, qubits=([qb[7]], ), controls=[qb[8]]), + ] + + for cmd in cmd_list: + qubit_manager.add_command(cmd) + + mapping = {i: i for i in range(16)} + + while qubit_manager.size() > 0: + qubit_manager.get_executable_commands(mapping) + + swaps, all_qubits = qubit_manager.generate_swaps( + mapping, multi.look_ahead_parallelism_cost_fun, {'W': 0.5}) + + reverse_mapping = {v: k for k, v in mapping.items()} + for bqb0, bqb1 in swaps: + (reverse_mapping[bqb0], + reverse_mapping[bqb1]) = (reverse_mapping[bqb1], + reverse_mapping[bqb0]) + mapping = {v: k for k, v in reverse_mapping.items()} + + str_repr = str(qubit_manager) + + num_of_2qubit_gates_ref = 0 + for cmd in cmd_list: + if len({qubit.id for qureg in cmd.all_qubits for qubit in qureg}) == 2: + num_of_2qubit_gates_ref += 1 + + num_of_2qubit_gates = 0 + for line in str_repr.split('\n'): + m = re.match(r'^\s+\[[0-9]+,\s[0-9]+\]:\s*([0-9]+)$', line) + if m: + num_of_2qubit_gates += int(m.group(1)) + + assert num_of_2qubit_gates == num_of_2qubit_gates_ref + assert str_repr.count("[4, 5]: 2") == 1 From 091df8b44b56e1436d7a6d394e7d6192f5c8b1bb Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Tue, 22 Oct 2019 10:49:44 +0200 Subject: [PATCH 18/25] Rename MultiQubitGateManager to GateManager and update docstrings --- docs/projectq.cengines.rst | 8 +- ...qubit_gate_manager.py => _gate_manager.py} | 77 +++++++++++++------ ..._manager_test.py => _gate_manager_test.py} | 44 +++++------ projectq/cengines/_graphmapper.py | 69 +++++++++-------- projectq/cengines/_graphmapper_test.py | 6 +- 5 files changed, 118 insertions(+), 86 deletions(-) rename projectq/cengines/{_multi_qubit_gate_manager.py => _gate_manager.py} (94%) rename projectq/cengines/{_multi_qubit_gate_manager_test.py => _gate_manager_test.py} (96%) diff --git a/docs/projectq.cengines.rst b/docs/projectq.cengines.rst index ce2da4b9b..32e6ea763 100755 --- a/docs/projectq.cengines.rst +++ b/docs/projectq.cengines.rst @@ -6,7 +6,7 @@ The ProjectQ compiler engines package. .. autosummary:: projectq.cengines.AutoReplacer projectq.cengines.BasicEngine - projectq.cengines.BasicMapper + projectq.cengines.BasicMapperEngine projectq.cengines.CommandModifier projectq.cengines.CompareEngine projectq.cengines.DecompositionRule @@ -37,9 +37,9 @@ Module contents Helper sub-modules ------------------ -Multi-qubit gate sub-module -^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Gate manager sub-module +^^^^^^^^^^^^^^^^^^^^^^^ -.. automodule:: projectq.cengines._multi_qubit_gate_manager +.. automodule:: projectq.cengines._gate_manager :members: :imported-members: diff --git a/projectq/cengines/_multi_qubit_gate_manager.py b/projectq/cengines/_gate_manager.py similarity index 94% rename from projectq/cengines/_multi_qubit_gate_manager.py rename to projectq/cengines/_gate_manager.py index 75cc09256..2ea16fb37 100644 --- a/projectq/cengines/_multi_qubit_gate_manager.py +++ b/projectq/cengines/_gate_manager.py @@ -22,7 +22,6 @@ """ import networkx as nx -import statistics import math from projectq.ops import (AllocateQubitGate, DeallocateQubitGate) @@ -30,6 +29,23 @@ def _topological_sort(dag): + """ + Returns a generator of nodes in topologically sorted order. + + A topological sort is a nonunique permutation of the nodes such that an + edge from u to v implies that u appears before v in the topological sort + order. + + Args: + dag (networkx.DiGraph): A Directed Acyclic Graph (DAG) + + Returns: + An iterable of node names in topological sorted order. + + Note: + This implementation is based on + :py:func:`networkx.algorithms.dag.topological_sort` + """ indegree_map = {} zero_indegree = [] for node, degree in dag.in_degree(): @@ -50,6 +66,18 @@ def _topological_sort(dag): # Coffaman-Graham algorithm with infinite width def _coffman_graham_ranking(dag): + """ + Apply the Coffman-Grapham layering algorithm to a DAG (with infinite width) + + Args: + dag (networkx.DiGraph): A Directed Acyclic Graph (DAG) + + Returns: + A list of layers (Python list of lists). + + Note: + This function does not limit the width of any layers. + """ layers = [[]] levels = {} @@ -86,7 +114,7 @@ def _sum_distance_over_gates(node_list, mapping, distance_matrix): gate_list (list): List of 2-qubit gates mapping (dict): Current mapping distance_matrix (dict): Distance matrix within the hardware coupling - graph + graph Returns: Sum of all pair-wise distances between qubits @@ -118,10 +146,10 @@ def nearest_neighbours_cost_fun(gates_dag, mapping, distance_matrix, swap, gates_dag (CommandDAG): Direct acyclic graph of future quantum gates mapping (dict): Current mapping distance_matrix (dict): Distance matrix within the hardware coupling - graph + graph swap (tuple): Candidate swap (not used by this function) opts (dict): Miscellaneous parameters for cost function (not used by - this function) + this function) Returns: Score of current swap operations @@ -159,7 +187,7 @@ def look_ahead_parallelism_cost_fun(gates_dag, mapping, distance_matrix, swap, gates_dag (CommandDAG): Direct acyclic graph of future quantum gates mapping (dict): Current mapping distance_matrix (dict): Distance matrix within the hardware coupling - graph + graph swap (tuple): Candidate swap operation opts (dict): Miscellaneous parameters for cost function @@ -237,8 +265,8 @@ class DecayManager(object): """ Class managing the decay information about a list of backend qubit IDs - User should call the :py:meth:`step` method each time a swap gate is added and - :py:meth:`remove_decay` once a 2-qubit gate is executed. + User should call the :py:meth:`step` method each time a swap gate is added + and :py:meth:`remove_decay` once a 2-qubit gate is executed. """ def __init__(self, delta, max_lifetime): """ @@ -247,7 +275,7 @@ def __init__(self, delta, max_lifetime): Args: delta (float): Decay parameter max_lifetime (int): Maximum lifetime of decay information for a - particular qubit + particular qubit """ self._delta = delta self._cutoff = max_lifetime @@ -389,7 +417,9 @@ def size(self): Return the size of the DAG (ie. number of nodes) Note: - This need not be the number of commands stored within the DAG. + This may not be equal to the number of commands stored within the + DAG as some nodes might store more than one gate if they are + compatible. """ return self._dag.number_of_nodes() @@ -521,7 +551,7 @@ def calculate_qubit_interaction_subgraphs(self, max_order=2): Args: max_order (int): Maximum degree of the nodes in the resulting - graph + graph Returns: A list of list of graph nodes corresponding to all the connected @@ -607,7 +637,7 @@ def remove_command(self, cmd): # ============================================================================== -class MultiQubitGateManager(object): +class GateManager(object): """ Class managing qubit interactions """ @@ -689,16 +719,15 @@ def generate_swaps(self, Args: mapping (dict): Current mapping cost_fun (function): Cost function to rank swap candidates - Must accept the following parameters: - - dag (_GatesDAG) - - new_mapping (dict) - - distance_matrix (dict) - - swap_candidate (tuple) + Must accept the following parameters: + - dag (_GatesDAG) + - new_mapping (dict) + - distance_matrix (dict) + - swap_candidate (tuple) max_steps (int): (optional) Maximum number of swap steps to - attempt before giving up + attempt before giving up opts (dict): (optional) Extra parameters for cost function call - .. seealso:: :py:meth:`nearest_neighbours_cost_fun` :py:meth:`look_ahead_parallelism_cost_fun` @@ -851,7 +880,7 @@ def calculate_qubit_interaction_subgraphs(self, max_order=2): Args: max_order (int): Maximum degree of the nodes in the resulting - interaction graph + interaction graph Returns: A list of list of graph nodes corresponding to all the connected @@ -873,11 +902,11 @@ def _generate_one_swap_step(self, mapping, cost_fun, opts): Args: mapping (dict): Current mapping cost_fun (function): Cost function to rank swap candidates - Must accept the following parameters: - - dag (_GatesDAG) - - new_mapping (dict) - - distance_matrix (dict) - - swap_candidate (tuple) + Must accept the following parameters: + - dag (_GatesDAG) + - new_mapping (dict) + - distance_matrix (dict) + - swap_candidate (tuple) Returns: Tuple with (logical_id0, backend_id0, logical_id1, backend_id1) diff --git a/projectq/cengines/_multi_qubit_gate_manager_test.py b/projectq/cengines/_gate_manager_test.py similarity index 96% rename from projectq/cengines/_multi_qubit_gate_manager_test.py rename to projectq/cengines/_gate_manager_test.py index 3312f6939..96225126e 100644 --- a/projectq/cengines/_multi_qubit_gate_manager_test.py +++ b/projectq/cengines/_gate_manager_test.py @@ -21,7 +21,7 @@ from projectq.ops import (Allocate, Command, Deallocate, X, H) from projectq.types import WeakQubitRef -from projectq.cengines import _multi_qubit_gate_manager as multi +from projectq.cengines import _gate_manager as gatemgr # ============================================================================== @@ -31,8 +31,8 @@ def dagnode_to_string(self): return '{} {}'.format(self.__class__.__name__, tuple(self.logical_ids)) -multi._DAGNodeBase.__str__ = dagnode_to_string -multi._DAGNodeBase.__repr__ = dagnode_to_string +gatemgr._DAGNodeBase.__str__ = dagnode_to_string +gatemgr._DAGNodeBase.__repr__ = dagnode_to_string Command.__repr__ = Command.__str__ @@ -118,17 +118,17 @@ def grid33_graph(): @pytest.fixture def decay_manager(): - return multi.DecayManager(0.001, 5) + return gatemgr.DecayManager(0.001, 5) @pytest.fixture def command_dag(): - return multi.CommandDAG() + return gatemgr.CommandDAG() @pytest.fixture def qubit_manager(): - return multi.MultiQubitGateManager(generate_grid_graph(3, 3)) + return gatemgr.GateManager(generate_grid_graph(3, 3)) # ============================================================================== @@ -612,18 +612,18 @@ def test_qubit_manager_valid_and_invalid_graphs(simple_graph): graph = nx.Graph() graph.add_nodes_from('abcd') with pytest.raises(RuntimeError): - multi.MultiQubitGateManager(graph=graph) + gatemgr.GateManager(graph=graph) graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'a')]) with pytest.raises(RuntimeError): - multi.MultiQubitGateManager(graph=graph) + gatemgr.GateManager(graph=graph) graph = deepcopy(simple_graph) graph.remove_edge(0, 1) with pytest.raises(RuntimeError): - multi.MultiQubitGateManager(graph=graph) + gatemgr.GateManager(graph=graph) - manager = multi.MultiQubitGateManager(graph=simple_graph) + manager = gatemgr.GateManager(graph=simple_graph) dist = manager.distance_matrix assert dist[0][1] == 1 @@ -693,7 +693,7 @@ def test_qubit_manager_generate_one_swap_step(qubit_manager): mapping = {i: i for i in range(9)} (logical_id0, backend_id0, logical_id1, backend_id1) = manager._generate_one_swap_step( - mapping, multi.nearest_neighbours_cost_fun, {}) + mapping, gatemgr.nearest_neighbours_cost_fun, {}) assert logical_id0 in (0, 8) if logical_id0 == 0: @@ -704,7 +704,7 @@ def test_qubit_manager_generate_one_swap_step(qubit_manager): mapping = {0: 0, 8: 8} (logical_id0, backend_id0, logical_id1, backend_id1) = manager._generate_one_swap_step( - mapping, multi.nearest_neighbours_cost_fun, {}) + mapping, gatemgr.nearest_neighbours_cost_fun, {}) assert logical_id1 == -1 if logical_id0 == 0: @@ -721,7 +721,7 @@ def test_qubit_manager_generate_one_swap_step(qubit_manager): mapping = {i: i for i in range(9)} (logical_id0, backend_id0, logical_id1, backend_id1) = manager._generate_one_swap_step( - mapping, multi.nearest_neighbours_cost_fun, {}) + mapping, gatemgr.nearest_neighbours_cost_fun, {}) # In this case, the only swap that does not increases the overall distance # is (0, 1) @@ -739,7 +739,7 @@ def test_qubit_manager_generate_swaps(qubit_manager): mapping = {i: i for i in range(9)} swaps, all_qubits = manager.generate_swaps( - mapping, multi.nearest_neighbours_cost_fun) + mapping, gatemgr.nearest_neighbours_cost_fun) assert not swaps assert not all_qubits @@ -751,14 +751,14 @@ def test_qubit_manager_generate_swaps(qubit_manager): with pytest.raises(RuntimeError): manager.generate_swaps(mapping, - multi.nearest_neighbours_cost_fun, + gatemgr.nearest_neighbours_cost_fun, max_steps=2) # ---------------------------------- mapping = {i: i for i in range(9)} swaps, _ = manager.generate_swaps(mapping, - multi.nearest_neighbours_cost_fun) + gatemgr.nearest_neighbours_cost_fun) # Make sure the original mapping was not modified assert mapping == {i: i for i in range(9)} @@ -775,7 +775,7 @@ def test_qubit_manager_generate_swaps(qubit_manager): mapping = {i: i for i in range(9)} swaps, _ = manager.generate_swaps(mapping, - multi.look_ahead_parallelism_cost_fun, + gatemgr.look_ahead_parallelism_cost_fun, opts={'W': 0.5}) reverse_mapping = {v: k for k, v in mapping.items()} for id0, id1 in swaps: @@ -794,7 +794,7 @@ def test_qubit_manager_generate_swaps(qubit_manager): assert manager.size() == 2 swaps, all_qubits = manager.generate_swaps( - mapping, multi.look_ahead_parallelism_cost_fun, opts={ + mapping, gatemgr.look_ahead_parallelism_cost_fun, opts={ 'W': 0.5, }) @@ -913,7 +913,7 @@ def test_qubit_manager_generate_swaps_change_mapping(qubit_manager): mapping = {i: i for i in range(9)} swaps, all_qubits = qubit_manager.generate_swaps( - mapping, multi.look_ahead_parallelism_cost_fun, {'W': 0.5}) + mapping, gatemgr.look_ahead_parallelism_cost_fun, {'W': 0.5}) reverse_mapping = {v: k for k, v in mapping.items()} for bqb0, bqb1 in swaps: @@ -939,7 +939,7 @@ def test_qubit_manager_generate_swaps_change_mapping(qubit_manager): mapping = {i: i for i in range(9)} swaps, all_qubits = qubit_manager.generate_swaps( - mapping, multi.look_ahead_parallelism_cost_fun, {'W': 0.5}) + mapping, gatemgr.look_ahead_parallelism_cost_fun, {'W': 0.5}) reverse_mapping = {v: k for k, v in mapping.items()} for bqb0, bqb1 in swaps: @@ -954,7 +954,7 @@ def test_qubit_manager_generate_swaps_change_mapping(qubit_manager): def test_qubit_manager_str(): - qubit_manager = multi.MultiQubitGateManager(generate_grid_graph(3, 3)) + qubit_manager = gatemgr.GateManager(generate_grid_graph(3, 3)) qb, allocate_cmds = allocate_all_qubits_cmd(9) cmd_list = [ @@ -979,7 +979,7 @@ def test_qubit_manager_str(): qubit_manager.get_executable_commands(mapping) swaps, all_qubits = qubit_manager.generate_swaps( - mapping, multi.look_ahead_parallelism_cost_fun, {'W': 0.5}) + mapping, gatemgr.look_ahead_parallelism_cost_fun, {'W': 0.5}) reverse_mapping = {v: k for k, v in mapping.items()} for bqb0, bqb1 in swaps: diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index 5620fa5b0..6c77fc328 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -31,8 +31,7 @@ from projectq.ops import (AllocateQubitGate, Command, DeallocateQubitGate, FlushGate, Swap) from projectq.types import WeakQubitRef -from ._multi_qubit_gate_manager import (MultiQubitGateManager, - look_ahead_parallelism_cost_fun) +from ._gate_manager import GateManager, look_ahead_parallelism_cost_fun # ------------------------------------------------------------------------------ @@ -270,21 +269,18 @@ class GraphMapper(BasicMapperEngine): using Swap gates. .. seealso:: - :py:mod:`projectq.cengines._multi_qubit_gate_manager` + :py:mod:`projectq.cengines._gate_manager` Args: graph (networkx.Graph) : Arbitrary connected graph storage (int) Number of gates to temporarily store - add_qubits_to_mapping (function or str) Function called when new qubits - are to be added to the current - mapping. - Special possible string values: + add_qubits_to_mapping (function or str): Function called when + new qubits are to be added to the current mapping. + Special possible string values: - - ``"fcfs"``: first-come first - serve - - ``"fcfs_init"``: first-come - first serve with smarter - mapping initialisation + - ``"fcfs"``: first-come first serve + - ``"fcfs_init"``: first-come first serve with smarter mapping + initialisation Note: 1) Gates are cached and only mapped from time to time. A @@ -297,18 +293,15 @@ class GraphMapper(BasicMapperEngine): Attributes: current_mapping: Stores the mapping: key is logical qubit id, value - is mapped qubit id from 0,...,self.num_qubits + is mapped qubit id from 0,...,self.num_qubits storage (int): Number of gate it caches before mapping. num_qubits(int): number of qubits num_mappings (int): Number of times the mapper changed the mapping depth_of_swaps (dict): Key are circuit depth of swaps, value is the - number of such mappings which have been - applied + number of such mappings which have beenapplied num_of_swaps_per_mapping (dict): Key are the number of swaps per - mapping, value is the number of such - mappings which have been applied - path_stats (dict) : Key is the endpoints of a path, value is the number - of such paths which have been applied + mapping, value is the number of such mappings which have + been applied """ def __init__(self, graph, @@ -320,10 +313,16 @@ def __init__(self, Args: graph (networkx.Graph): Arbitrary connected graph representing - Qubit connectivity + Qubit connectivity storage (int): Number of gates to temporarily store - generate_swap_opts (dict): extra options to customize swap - operation generation + add_qubits_to_mapping (function or str): Function called + when new qubits are to be added to the current + mapping. + Special possible string values: + + - ``"fcfs"``: first-come first serve + - ``"fcfs_init"``: first-come first serve with smarter mapping + initialisation opts (dict): Extra options (see below) Raises: @@ -366,12 +365,12 @@ def __init__(self, else: self._opts = opts - self.qubit_manager = MultiQubitGateManager(graph=graph, - decay_opts=self._opts.get( - 'decay_opts', { - 'delta': 0.001, - 'max_lifetime': 5 - })) + self.qubit_manager = GateManager(graph=graph, + decay_opts=self._opts.get( + 'decay_opts', { + 'delta': 0.001, + 'max_lifetime': 5 + })) self.num_qubits = graph.number_of_nodes() self.storage = storage # Randomness to pick permutations if there are too many. @@ -526,11 +525,15 @@ def _run(self): """ Create a new mapping and executes possible gates. - It first allocates all 0, ..., self.num_qubits-1 mapped qubit ids, if - they are not already used because we might need them all for the - swaps. Then it creates a new map, swaps all the qubits to the new map, - executes all possible gates, and finally deallocates mapped qubit ids - which don't store any information. + First execute all possible commands, given the current mapping. Then + find a new mapping, perform the swap operawtions to get to the new + mapping and then execute all possible gates once more. Non-allocated + qubits that are required for the swap operations will be automatically + allocated and deallocated as required. + + Raises: + RuntimeError if the mapper is unable to make progress (possibly + due to an insufficient number of qubits) """ num_of_stored_commands_before = self.qubit_manager.size() diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py index f8e309ca0..844b1c51d 100644 --- a/projectq/cengines/_graphmapper_test.py +++ b/projectq/cengines/_graphmapper_test.py @@ -26,7 +26,7 @@ from projectq.cengines import _graphmapper as graphm -import projectq.cengines._multi_qubit_gate_manager as multi +import projectq.cengines._gate_manager as gatemgr def decay_to_string(self): @@ -36,7 +36,7 @@ def decay_to_string(self): return s -multi.DecayManager.__str__ = decay_to_string +gatemgr.DecayManager.__str__ = decay_to_string Command.__repr__ = Command.__str__ @@ -143,7 +143,7 @@ def get_node_list(self): return list(self.dag._dag.nodes) -graphm.MultiQubitGateManager._get_node_list = get_node_list +graphm.GateManager._get_node_list = get_node_list # ============================================================================== From 477a2172ca98180e2bd1dfe4135209da70b6b7be Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Tue, 22 Oct 2019 12:55:38 +0200 Subject: [PATCH 19/25] Fix node order in layers for consistency accross Python versions --- projectq/cengines/_gate_manager.py | 27 ++++++++++++++++--------- projectq/cengines/_gate_manager_test.py | 12 +++++------ projectq/cengines/_graphmapper_test.py | 5 ++--- 3 files changed, 25 insertions(+), 19 deletions(-) diff --git a/projectq/cengines/_gate_manager.py b/projectq/cengines/_gate_manager.py index 2ea16fb37..7f40e587d 100644 --- a/projectq/cengines/_gate_manager.py +++ b/projectq/cengines/_gate_manager.py @@ -99,7 +99,7 @@ def _coffman_graham_ranking(dag): levels[node] = level for layer in layers: - layer.reverse() + layer.sort(key=lambda node: node.node_id) return layers @@ -346,7 +346,8 @@ def step(self): class _DAGNodeBase(object): # pylint: disable=too-few-public-methods - def __init__(self, cmd, *args): + def __init__(self, node_id, cmd, *args): + self.node_id = node_id self.logical_ids = frozenset(args) self.cmd = cmd self.compatible_successor_cmds = [] @@ -368,8 +369,8 @@ class _DAGNodeSingle(_DAGNodeBase): """ # pylint: disable=too-few-public-methods - def __init__(self, cmd, logical_id): - super(_DAGNodeSingle, self).__init__(cmd, logical_id) + def __init__(self, node_id, cmd, logical_id): + super(_DAGNodeSingle, self).__init__(node_id, cmd, logical_id) self.logical_id = logical_id @@ -380,8 +381,9 @@ class _DAGNodeDouble(_DAGNodeBase): """ # pylint: disable=too-few-public-methods - def __init__(self, cmd, logical_id0, logical_id1): - super(_DAGNodeDouble, self).__init__(cmd, logical_id0, logical_id1) + def __init__(self, node_id, cmd, logical_id0, logical_id1): + super(_DAGNodeDouble, self).__init__(node_id, cmd, logical_id0, + logical_id1) self.logical_id0 = logical_id0 self.logical_id1 = logical_id1 @@ -393,6 +395,7 @@ class CommandDAG(object): """ def __init__(self): self._dag = nx.DiGraph() + self._node_id = 0 self._logical_ids_in_diag = set() self.near_term_layer = [] @@ -430,6 +433,7 @@ def clear(self): Remove all nodes from the DAG and all layers. """ self._dag.clear() + self._node_id = 0 self._logical_ids_in_diag = set() self.near_term_layer = [] @@ -466,7 +470,8 @@ def add_command(self, cmd): self._back_layer[logical_ids[1]].append_compatible_cmd(cmd) return - new_node = _DAGNodeDouble(cmd, *logical_ids) + new_node = _DAGNodeDouble(self._node_id, cmd, *logical_ids) + self._node_id += 1 self._dag.add_node(new_node) if logical_id0_in_dag: @@ -490,7 +495,8 @@ def add_command(self, cmd): logical_id_in_dag = logical_id in self._logical_ids_in_diag if isinstance(cmd.gate, (AllocateQubitGate, DeallocateQubitGate)): - new_node = _DAGNodeSingle(cmd, logical_id) + new_node = _DAGNodeSingle(self._node_id, cmd, logical_id) + self._node_id += 1 self._dag.add_node(new_node) if logical_id_in_dag: @@ -502,7 +508,8 @@ def add_command(self, cmd): self._layers_up_to_date = False else: if not logical_id_in_dag: - new_node = _DAGNodeSingle(cmd, logical_id) + new_node = _DAGNodeSingle(self._node_id, cmd, logical_id) + self._node_id += 1 self._dag.add_node(new_node) self._logical_ids_in_diag.add(logical_id) @@ -582,7 +589,7 @@ def calculate_qubit_interaction_subgraphs(self, max_order=2): break else: continue # only executed if the inner loop did NOT break - break # only executed if the inner loop DID break + break # only executed if the inner loop DID break return [ sorted(graph.subgraph(g), diff --git a/projectq/cengines/_gate_manager_test.py b/projectq/cengines/_gate_manager_test.py index 96225126e..ab63b3cf2 100644 --- a/projectq/cengines/_gate_manager_test.py +++ b/projectq/cengines/_gate_manager_test.py @@ -78,16 +78,16 @@ def generate_grid_graph(nrows, ncols): return graph -def gen_cmd(*logical_ids, gate=X): +def gen_cmd(*logical_ids, **kwargs): + gate = kwargs.get('gate', X) if len(logical_ids) == 1: qb0 = WeakQubitRef(engine=None, idx=logical_ids[0]) return Command(None, gate, qubits=([qb0], )) - elif len(logical_ids) == 2: + if len(logical_ids) == 2: qb0 = WeakQubitRef(engine=None, idx=logical_ids[0]) qb1 = WeakQubitRef(engine=None, idx=logical_ids[1]) return Command(None, gate, qubits=([qb0], ), controls=[qb1]) - else: - raise RuntimeError('Unsupported') + raise RuntimeError('Unsupported') def search_cmd(command_dag, cmd): @@ -897,8 +897,8 @@ def test_qubit_manager_generate_qubit_interaction_graph(qubit_manager): assert all([n in subgraphs[0] for n in [0, 1, 2, 3]]) assert subgraphs[0][0] == 0 assert subgraphs[0][-2:] in ([1, 3], [3, 1]) - assert len(subgraphs[1]) == 3 - assert all([n in subgraphs[1] for n in [4, 5, 6]]) + assert len(subgraphs[1]) == 4 + assert all([n in subgraphs[1] for n in [4, 5, 6, 7]]) def test_qubit_manager_generate_swaps_change_mapping(qubit_manager): diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py index 844b1c51d..8fb7fc03e 100644 --- a/projectq/cengines/_graphmapper_test.py +++ b/projectq/cengines/_graphmapper_test.py @@ -349,9 +349,8 @@ def test_qubit_placement_double_two_qubit_gate(grid33_graph_mapper): ]) mapping = mapper.current_mapping - # Make sure that the qb[2] was allocated at backend_id 0 assert backend.received_commands[0].gate == Allocate - assert backend.received_commands[0].qubits[0][0].id == 0 + assert backend.received_commands[0].qubits[0][0].id in [0, 2, 6, 8] assert backend.received_commands[0].tags == [LogicalQubitIDTag(2)] @@ -574,7 +573,7 @@ def test_send_two_qubit_gate_before_swap(simple_mapper): } else: # qb[2] moved, all_cmds[5] not possible - assert mapper._stored_commands == [all_cmds[5]] + all_cmds[-4:] + assert backend._stored_commands == [all_cmds[5]] + all_cmds[-4:] assert mapper.current_mapping == { 0: 0, 1: 2, From a2647471034f5de290a146de70a1da0d1cc2d37b Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Thu, 24 Oct 2019 12:49:22 +0200 Subject: [PATCH 20/25] Fix default options for GraphMapper --- projectq/cengines/_gate_manager.py | 33 ++++++++++--- projectq/cengines/_gate_manager_test.py | 9 +++- projectq/cengines/_graphmapper.py | 61 +++++++++++++----------- projectq/cengines/_graphmapper_test.py | 62 +++++++++++++++++++++++++ 4 files changed, 131 insertions(+), 34 deletions(-) diff --git a/projectq/cengines/_gate_manager.py b/projectq/cengines/_gate_manager.py index 7f40e587d..5b5ff3ba4 100644 --- a/projectq/cengines/_gate_manager.py +++ b/projectq/cengines/_gate_manager.py @@ -28,6 +28,20 @@ # ============================================================================== +class defaults(object): + """ + Class containing default values for some options + """ + + delta = 0.001 + max_lifetime = 5 + near_term_layer_depth = 1 + W = 0.5 + + +# ============================================================================== + + def _topological_sort(dag): """ Returns a generator of nodes in topologically sorted order. @@ -154,7 +168,7 @@ def nearest_neighbours_cost_fun(gates_dag, mapping, distance_matrix, swap, Returns: Score of current swap operations """ - #pylint: disable=unused-argument + # pylint: disable=unused-argument return _sum_distance_over_gates(gates_dag.front_layer_2qubit, mapping, distance_matrix) @@ -212,7 +226,7 @@ def look_ahead_parallelism_cost_fun(gates_dag, mapping, distance_matrix, swap, - Weighting factor (see cost function formula) """ decay = opts['decay'] - near_term_weight = opts['W'] + near_term_weight = opts.get('W', defaults.W) n_front = len(gates_dag.front_layer_2qubit) n_near = len(gates_dag.near_term_layer) @@ -668,8 +682,9 @@ def __init__(self, graph, decay_opts=None): if decay_opts is None: decay_opts = {} self.dag = CommandDAG() - self._decay = DecayManager(decay_opts.get('delta', 0.001), - decay_opts.get('max_lifetime', 5)) + self._decay = DecayManager( + decay_opts.get('delta', defaults.delta), + decay_opts.get('max_lifetime', defaults.max_lifetime)) self._stats = { 'simul_exec': [], '2qubit_gates_loc': {}, @@ -754,7 +769,9 @@ def generate_swaps(self, self._decay.clear() opts['decay'] = self._decay - self.dag.calculate_near_term_layer(current_mapping) + self.dag.calculate_near_term_layer( + current_mapping, + opts.get('near_term_layer_depth', defaults.near_term_layer_depth)) mapping = current_mapping.copy() swaps = [] @@ -836,7 +853,8 @@ def _add_to_execute_list(node): has_command_to_execute = True self._stats['simul_exec'][-1] += 1 _add_to_execute_list(node) - elif node.logical_id0 in mapping and node.logical_id1 in mapping: + elif (node.logical_id0 in mapping + and node.logical_id1 in mapping): if self.graph.has_edge(mapping[node.logical_id0], mapping[node.logical_id1]): has_command_to_execute = True @@ -938,7 +956,8 @@ def _generate_one_swap_step(self, mapping, cost_fun, opts): # Rank swap candidates using the provided cost function scores = [] - for logical_id0, backend_id0, logical_id1, backend_id1 in swap_candidates: + for (logical_id0, backend_id0, logical_id1, + backend_id1) in swap_candidates: new_mapping = mapping.copy() _apply_swap_to_mapping(new_mapping, logical_id0, logical_id1, diff --git a/projectq/cengines/_gate_manager_test.py b/projectq/cengines/_gate_manager_test.py index ab63b3cf2..15f8df46e 100644 --- a/projectq/cengines/_gate_manager_test.py +++ b/projectq/cengines/_gate_manager_test.py @@ -511,10 +511,17 @@ def test_command_dag_near_term_layer(command_dag): command_dag.add_command(cmd14) dag_node12 = search_cmd(command_dag, cmd12) dag_node34 = search_cmd(command_dag, cmd34) + dag_node23b = search_cmd(command_dag, cmd23b) + dag_node46 = search_cmd(command_dag, cmd46) - command_dag.calculate_near_term_layer({i: i for i in range(7)}) + command_dag.calculate_near_term_layer({i: i for i in range(7)}, depth=1) assert command_dag.near_term_layer == [dag_node12, dag_node34] + command_dag.calculate_near_term_layer({i: i for i in range(7)}, depth=2) + assert command_dag.near_term_layer == [ + dag_node12, dag_node34, dag_node23b, dag_node46 + ] + def test_command_dag_calculate_interaction_list(command_dag): cmd01 = gen_cmd(0, 1) diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index 6c77fc328..0eaa1b7b1 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -15,11 +15,12 @@ Mapper for a quantum circuit to an arbitrary connected graph. Input: Quantum circuit with 1 and 2 qubit gates on n qubits. Gates are assumed - to be applied in parallel if they act on disjoint qubit(s) and any pair - of qubits can perform a 2 qubit gate (all-to-all connectivity) +to be applied in parallel if they act on disjoint qubit(s) and any pair of +qubits can perform a 2 qubit gate (all-to-all connectivity) + Output: Quantum circuit in which qubits are placed in 2-D square grid in which - only nearest neighbour qubits can perform a 2 qubit gate. The mapper - uses Swap gates in order to move qubits next to each other. +only nearest neighbour qubits can perform a 2 qubit gate. The mapper uses Swap +gates in order to move qubits next to each other. """ from copy import deepcopy @@ -39,12 +40,12 @@ if sys.version_info[0] >= 3 and sys.version_info[1] > 6: # pragma: no cover def uniquify_list(seq): - #pylint: disable=missing-function-docstring + # pylint: disable=missing-function-docstring return list(dict.fromkeys(seq)) else: # pragma: no cover def uniquify_list(seq): - #pylint: disable=missing-function-docstring + # pylint: disable=missing-function-docstring seen = set() seen_add = seen.add return [x for x in seq if x not in seen and not seen_add(x)] @@ -53,6 +54,18 @@ def uniquify_list(seq): # ============================================================================== +class defaults(object): + """ + Class containing default values for some options + """ + #: Defaults to :py:func:`.look_ahead_parallelism_cost_fun` + cost_fun = look_ahead_parallelism_cost_fun + max_swap_steps = 30 + + +# ============================================================================== + + class GraphMapperError(Exception): """Base class for all exceptions related to the GraphMapper.""" @@ -78,7 +91,7 @@ def _add_qubits_to_mapping_fcfs(current_mapping, graph, new_logical_qubit_ids, Returns: A new mapping """ - #pylint: disable=unused-argument + # pylint: disable=unused-argument mapping = deepcopy(current_mapping) currently_used_nodes = sorted([v for _, v in mapping.items()]) @@ -337,26 +350,20 @@ def __init__(self, * - Key - Type - Description - * - cost_fun - - ``function`` - - | Cost function to be called when generating a new - | list of swap operations. - | Defaults to :py:func:`.look_ahead_parallelism_cost_fun` * - decay_opts - ``dict`` - | Options to pass onto the :py:class:`.DecayManager` constructor - | Defaults to ``{'delta': 0.001, 'max_lifetime': 5}``. - * - opts + | (see :py:class:`._gate_manager.defaults`) + * - swap_opts - ``dict`` - - | Extra options to pass onto the cost function - | (see :py:meth:`.MultiQubitGateManager.generate_swaps`) - | Defaults to ``{'W': 0.5}``. - * - max_swap_steps - - ``int`` - - | Maximum number of swap steps per mapping - | (see :py:meth:`.MultiQubitGateManager.generate_swaps`) - | Defaults to 30 + - | Extra options used when generating a list of swap + | operations. + | Acceptable keys: W, cost_fun, near_term_layer_depth, + | max_swap_steps + | (see :py:meth:`.GateManager.generate_swaps`, + | :py:class:`._graphmapper.defaults` and + | :py:class:`._gate_manager.defaults`) """ BasicMapperEngine.__init__(self) @@ -542,12 +549,14 @@ def _run(self): if not self.qubit_manager.size(): return + # NB: default values are taken care of at place of access + swap_opts = self._opts.get('swap_opts', {}) + swaps, all_swapped_qubits = self.qubit_manager.generate_swaps( self._current_mapping, - cost_fun=self._opts.get('cost_fun', - look_ahead_parallelism_cost_fun), - opts=self._opts.get('opts', {'W': 0.5}), - max_steps=self._opts.get('max_swap_steps', 30)) + cost_fun=swap_opts.get('cost_fun', defaults.cost_fun), + opts=swap_opts, + max_steps=swap_opts.get('max_swap_steps', defaults.max_swap_steps)) if swaps: # Get a list of the qubits we need to allocate just to perform the diff --git a/projectq/cengines/_graphmapper_test.py b/projectq/cengines/_graphmapper_test.py index 8fb7fc03e..64f6f0bda 100644 --- a/projectq/cengines/_graphmapper_test.py +++ b/projectq/cengines/_graphmapper_test.py @@ -535,6 +535,68 @@ def test_run_and_receive(simple_graph, simple_mapper): assert mapper.num_mappings == 1 +@pytest.mark.parametrize("opts", [{}, { + 'swap_opts': { + } +}, { + 'swap_opts': { + 'W': 0.5, + } +}, { + 'swap_opts': { + 'W': 0.5, + 'near_term_layer_depth': 2 + } +}]) +def test_run_and_receive_with_opts(simple_graph, opts): + mapper = graphm.GraphMapper(graph=simple_graph, + add_qubits_to_mapping="fcfs", + opts=opts) + backend = DummyEngine(save_commands=True) + backend.is_last_engine = True + mapper.next_engine = backend + + qb, allocate_cmds = allocate_all_qubits_cmd(mapper) + + gates = [ + Command(None, X, qubits=([qb[0]], ), controls=[qb[1]]), + Command(None, X, qubits=([qb[1]], ), controls=[qb[2]]), + Command(None, X, qubits=([qb[1]], ), controls=[qb[5]]), + Command(None, X, qubits=([qb[2]], ), controls=[qb[3]]), + Command(None, X, qubits=([qb[5]], ), controls=[qb[3]]), + Command(None, X, qubits=([qb[3]], ), controls=[qb[4]]), + Command(None, X, qubits=([qb[3]], ), controls=[qb[6]]), + Command(None, X, qubits=([qb[4]], ), controls=[qb[6]]), + ] + deallocate_cmds = [ + Command(engine=None, gate=Deallocate, qubits=([qb[1]], )) + ] + + allocated_qubits_ref = set([0, 2, 3, 4, 5, 6]) + + all_cmds = list(itertools.chain(allocate_cmds, gates, deallocate_cmds)) + mapper.receive(all_cmds) + qb_flush = WeakQubitRef(engine=None, idx=-1) + cmd_flush = Command(engine=None, gate=FlushGate(), qubits=([qb_flush], )) + mapper.receive([cmd_flush]) + assert mapper.qubit_manager.size() == 0 + assert len(backend.received_commands) == len(all_cmds) + 1 + assert mapper._currently_allocated_ids == allocated_qubits_ref + + mapping = dict(enumerate(range(len(simple_graph)))) + del mapping[1] + assert mapper.current_mapping == mapping + + cmd9 = Command(None, X, qubits=([qb[0]], ), controls=[qb[6]]) + mapper.receive([cmd9, cmd_flush]) + assert mapper._currently_allocated_ids == allocated_qubits_ref + for idx in allocated_qubits_ref: + assert idx in mapper.current_mapping + assert mapper.qubit_manager.size() == 0 + assert len(mapper.current_mapping) == 6 + assert mapper.num_mappings == 1 + + def test_send_two_qubit_gate_before_swap(simple_mapper): qb, all_cmds = allocate_all_qubits_cmd(simple_mapper[0]) From c9a64f682c6c68e2eb080768338145b3fb153051 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Thu, 24 Oct 2019 13:19:05 +0200 Subject: [PATCH 21/25] Fix error in statistics and adjust tests --- projectq/cengines/_gate_manager.py | 4 ++-- projectq/cengines/_gate_manager_test.py | 8 +++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/projectq/cengines/_gate_manager.py b/projectq/cengines/_gate_manager.py index 5b5ff3ba4..7777604b3 100644 --- a/projectq/cengines/_gate_manager.py +++ b/projectq/cengines/_gate_manager.py @@ -862,8 +862,8 @@ def _add_to_execute_list(node): self._stats['simul_exec'][-1] += 1 key = frozenset((mapping[node.logical_id0], mapping[node.logical_id1])) - self._stats['2qubit_gates_loc'][key] = self._stats.get( - node.logical_ids, 0) + 1 + self._stats['2qubit_gates_loc'][key] \ + = self._stats['2qubit_gates_loc'].get(key, 0) + 1 for cmd in node.compatible_successor_cmds: if len([ qubit.id for qureg in cmd.all_qubits diff --git a/projectq/cengines/_gate_manager_test.py b/projectq/cengines/_gate_manager_test.py index 15f8df46e..5b0481d91 100644 --- a/projectq/cengines/_gate_manager_test.py +++ b/projectq/cengines/_gate_manager_test.py @@ -972,6 +972,9 @@ def test_qubit_manager_str(): Command(engine=None, gate=X, qubits=([qb[1]], )), Command(engine=None, gate=X, qubits=([qb[4]], ), controls=[qb[5]]), Command(engine=None, gate=X, qubits=([qb[5]], ), controls=[qb[4]]), + Command(engine=None, gate=X, qubits=([qb[3]], ), controls=[qb[4]]), + Command(engine=None, gate=X, qubits=([qb[5]], ), controls=[qb[4]]), + Command(engine=None, gate=X, qubits=([qb[3]], ), controls=[qb[4]]), Command(engine=None, gate=X, qubits=([qb[5]], ), controls=[qb[6]]), Command(engine=None, gate=X, qubits=([qb[6]], ), controls=[qb[7]]), Command(engine=None, gate=X, qubits=([qb[7]], ), controls=[qb[8]]), @@ -1008,5 +1011,8 @@ def test_qubit_manager_str(): if m: num_of_2qubit_gates += int(m.group(1)) + edge34_count = int(re.search(r'\s+\[3, 4\]:\s+([0-9]+)', + str_repr).group(1)) assert num_of_2qubit_gates == num_of_2qubit_gates_ref - assert str_repr.count("[4, 5]: 2") == 1 + assert edge34_count > 1 + assert str_repr.count("[4, 5]: 3") == 1 From 65cf486690b6ba80f76aa5cff21aac0c2c022b5c Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Thu, 24 Oct 2019 14:55:03 +0200 Subject: [PATCH 22/25] Add missing public function to cengines --- projectq/cengines/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/projectq/cengines/__init__.py b/projectq/cengines/__init__.py index 90b7d95de..703e2955d 100755 --- a/projectq/cengines/__init__.py +++ b/projectq/cengines/__init__.py @@ -33,3 +33,5 @@ from ._testengine import CompareEngine, DummyEngine from ._twodmapper import GridMapper from ._graphmapper import GraphMapper +from ._gate_manager import (nearest_neighbours_cost_fun, + look_ahead_parallelism_cost_fun) From 5b2a96e0145dca6206d475f138e738ed7f44cdc8 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Thu, 24 Oct 2019 15:34:29 +0200 Subject: [PATCH 23/25] Fix issue with Python2 and class functions --- projectq/cengines/_graphmapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index 0eaa1b7b1..256b91e8c 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -59,7 +59,7 @@ class defaults(object): Class containing default values for some options """ #: Defaults to :py:func:`.look_ahead_parallelism_cost_fun` - cost_fun = look_ahead_parallelism_cost_fun + cost_fun = staticmethod(look_ahead_parallelism_cost_fun) max_swap_steps = 30 From 33952a9dcda01cdea17015de17fdad590b636600 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Thu, 24 Oct 2019 16:14:36 +0200 Subject: [PATCH 24/25] Fix some more issues --- projectq/cengines/_graphmapper.py | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/projectq/cengines/_graphmapper.py b/projectq/cengines/_graphmapper.py index 256b91e8c..883740dbf 100644 --- a/projectq/cengines/_graphmapper.py +++ b/projectq/cengines/_graphmapper.py @@ -286,7 +286,7 @@ class GraphMapper(BasicMapperEngine): Args: graph (networkx.Graph) : Arbitrary connected graph - storage (int) Number of gates to temporarily store + storage (int): Approximate number of gates to temporarily store add_qubits_to_mapping (function or str): Function called when new qubits are to be added to the current mapping. Special possible string values: @@ -307,7 +307,7 @@ class GraphMapper(BasicMapperEngine): Attributes: current_mapping: Stores the mapping: key is logical qubit id, value is mapped qubit id from 0,...,self.num_qubits - storage (int): Number of gate it caches before mapping. + storage (int): Approximate number of gate it caches before mapping. num_qubits(int): number of qubits num_mappings (int): Number of times the mapper changed the mapping depth_of_swaps (dict): Key are circuit depth of swaps, value is the @@ -327,7 +327,7 @@ def __init__(self, Args: graph (networkx.Graph): Arbitrary connected graph representing Qubit connectivity - storage (int): Number of gates to temporarily store + storage (int): Approximate number of gates to temporarily store add_qubits_to_mapping (function or str): Function called when new qubits are to be added to the current mapping. @@ -584,14 +584,6 @@ def _run(self): # to the temporary internal reverse mapping with invalid ids new_internal_mapping[backend_id] = -1 - # Calculate reverse internal mapping - new_internal_mapping = deepcopy(self._reverse_current_mapping) - - # Add missing entries with invalid id to be able to process the - # swaps operations - for backend_id in not_allocated_ids: - new_internal_mapping[backend_id] = -1 - # Send swap operations to arrive at the new mapping for bqb0, bqb1 in swaps: qb0 = WeakQubitRef(engine=self, idx=bqb0) @@ -608,14 +600,9 @@ def _run(self): # Register statistics: self.num_mappings += 1 depth = return_swap_depth(swaps) - if depth not in self.depth_of_swaps: - self.depth_of_swaps[depth] = 1 - else: - self.depth_of_swaps[depth] += 1 - if len(swaps) not in self.num_of_swaps_per_mapping: - self.num_of_swaps_per_mapping[len(swaps)] = 1 - else: - self.num_of_swaps_per_mapping[len(swaps)] += 1 + self.depth_of_swaps[depth] = self.depth_of_swaps.get(depth, 0) + 1 + self.num_of_swaps_per_mapping[len( + swaps)] = self.num_of_swaps_per_mapping.get(len(swaps), 0) + 1 # Calculate the list of "helper" qubits that need to be deallocated # and remove invalid entries From 9a3eab8c6ffb75b4710f9e0f12f21bccebc0b007 Mon Sep 17 00:00:00 2001 From: Damien Nguyen Date: Wed, 13 Nov 2019 16:28:27 +0100 Subject: [PATCH 25/25] Fix typo --- projectq/cengines/_gate_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/projectq/cengines/_gate_manager.py b/projectq/cengines/_gate_manager.py index 7777604b3..d24526df7 100644 --- a/projectq/cengines/_gate_manager.py +++ b/projectq/cengines/_gate_manager.py @@ -78,7 +78,7 @@ def _topological_sort(dag): yield node -# Coffaman-Graham algorithm with infinite width +# Coffman-Graham algorithm with infinite width def _coffman_graham_ranking(dag): """ Apply the Coffman-Grapham layering algorithm to a DAG (with infinite width)