From 5a9273c67e6ddc2ab195f1f7c8e4495313c5409c Mon Sep 17 00:00:00 2001 From: RohitP2005 Date: Wed, 18 Dec 2024 15:05:35 +0530 Subject: [PATCH 01/11] added traversal algorithm to nx_parallel --- _nx_parallel/__init__.py | 146 +++++ nx_parallel/algorithms/traversal/__init__.py | 2 + .../traversal/breadth_first_search.py | 575 ++++++++++++++++++ .../traversal/depth_first_search.py | 529 ++++++++++++++++ 4 files changed, 1252 insertions(+) create mode 100644 nx_parallel/algorithms/traversal/__init__.py create mode 100644 nx_parallel/algorithms/traversal/breadth_first_search.py create mode 100644 nx_parallel/algorithms/traversal/depth_first_search.py diff --git a/_nx_parallel/__init__.py b/_nx_parallel/__init__.py index 4c5e4352..707243f4 100644 --- a/_nx_parallel/__init__.py +++ b/_nx_parallel/__init__.py @@ -90,6 +90,64 @@ def get_info(): 'get_chunks : str, function (default = "chunks")': "A function that takes in a list of all the nodes as input and returns an iterable `node_chunks`. The default chunking is done by slicing the `nodes` into `n_jobs` number of chunks." }, }, + "bfs_edges": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L109", + "additional_docs": "Iterate over edges in a breadth-first-search starting at source.", + "additional_parameters": { + "G : NetworkX graph": "", + "source : node": "Specify starting node for breadth-first search; this function iterates over only those edges in the component reachable from this node.", + "reverse : bool, optional": "If True traverse a directed graph in the reverse direction", + "depth_limit : int, optional(default=len(G))": "Specify the maximum search depth", + "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", + }, + }, + "bfs_labeled_edges": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L464", + "additional_docs": "Iterate over edges in a breadth-first search (BFS) labeled by type.", + "additional_parameters": { + "G : NetworkX graph": "A graph over which to find the layers using breadth-first search.", + "sources : node in `G` or list of nodes in `G`": "Starting nodes for single source or multiple sources breadth-first search", + }, + }, + "bfs_layers": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L406", + "additional_docs": "Returns an iterator of all the layers in breadth-first search traversal.", + "additional_parameters": { + "G : NetworkX graph": "A graph over which to find the layers using breadth-first search.", + "sources : node in `G` or list of nodes in `G`": "Specify starting nodes for single source or multiple sources breadth-first search", + }, + }, + "bfs_predecessors": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L266", + "additional_docs": "Returns an iterator of predecessors in breadth-first-search from source.", + "additional_parameters": { + "G : NetworkX graph": "", + "source : node": "Specify starting node for breadth-first search", + "depth_limit : int, optional(default=len(G))": "Specify the maximum search depth", + "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", + }, + }, + "bfs_successors": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L332", + "additional_docs": "Returns an iterator of successors in breadth-first-search from source.", + "additional_parameters": { + "G : NetworkX graph": "", + "source : node": "Specify starting node for breadth-first search", + "depth_limit : int, optional(default=len(G))": "Specify the maximum search depth", + "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", + }, + }, + "bfs_tree": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L198", + "additional_docs": "Returns an oriented tree constructed from of a breadth-first-search starting at source.", + "additional_parameters": { + "G : NetworkX graph": "", + "source : node": "Specify starting node for breadth-first search", + "reverse : bool, optional": "If True traverse a directed graph in the reverse direction", + "depth_limit : int, optional(default=len(G))": "Specify the maximum search depth", + "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", + }, + }, "closeness_vitality": { "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/vitality.py#L10", "additional_docs": "The parallel computation is implemented only when the node is not specified. The closeness vitality for each node is computed concurrently.", @@ -97,6 +155,84 @@ def get_info(): 'get_chunks : str, function (default = "chunks")': "A function that takes in a list of all the nodes as input and returns an iterable `node_chunks`. The default chunking is done by slicing the `nodes` into `n_jobs` number of chunks." }, }, + "descendants_at_distance": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L539", + "additional_docs": "Returns all nodes at a fixed `distance` from `source` in `G`.", + "additional_parameters": { + "G : NetworkX graph": "A graph source : node in `G` distance : the distance of the wanted nodes from `source`" + }, + }, + "dfs_edges": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/depth_first_search.py#L19", + "additional_docs": "Iterate over edges in a depth-first-search (DFS).", + "additional_parameters": { + "G : NetworkX graph": "", + "source : node, optional": "Specify starting node for depth-first search and yield edges in the component reachable from source.", + "depth_limit : int, optional (default=len(G))": "Specify the maximum search depth.", + "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", + }, + }, + "dfs_labeled_edges": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/depth_first_search.py#L414", + "additional_docs": "Iterate over edges in a depth-first-search (DFS) labeled by type.", + "additional_parameters": { + "G : NetworkX graph": "", + "source : node, optional": "Specify starting node for depth-first search and return edges in the component reachable from source.", + "depth_limit : int, optional (default=len(G))": "Specify the maximum search depth.", + "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", + }, + }, + "dfs_postorder_nodes": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/depth_first_search.py#L296", + "additional_docs": "Generate nodes in a depth-first-search post-ordering starting at source.", + "additional_parameters": { + "G : NetworkX graph": "", + "source : node, optional": "Specify starting node for depth-first search.", + "depth_limit : int, optional (default=len(G))": "Specify the maximum search depth.", + "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", + }, + }, + "dfs_predecessors": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/depth_first_search.py#L167", + "additional_docs": "Returns dictionary of predecessors in depth-first-search from source.", + "additional_parameters": { + "G : NetworkX graph": "", + "source : node, optional": "Specify starting node for depth-first search. Note that you will get predecessors for all nodes in the component containing `source`. This input only specifies where the DFS starts.", + "depth_limit : int, optional (default=len(G))": "Specify the maximum search depth.", + "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", + }, + }, + "dfs_preorder_nodes": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/depth_first_search.py#L355", + "additional_docs": "Generate nodes in a depth-first-search pre-ordering starting at source.", + "additional_parameters": { + "G : NetworkX graph": "", + "source : node, optional": "Specify starting node for depth-first search and return nodes in the component reachable from source.", + "depth_limit : int, optional (default=len(G))": "Specify the maximum search depth.", + "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", + }, + }, + "dfs_successors": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/depth_first_search.py#L229", + "additional_docs": "Returns dictionary of successors in depth-first-search from source.", + "additional_parameters": { + "G : NetworkX graph": "", + "source : node, optional": "Specify starting node for depth-first search. Note that you will get successors for all nodes in the component containing `source`. This input only specifies where the DFS starts.", + "depth_limit : int, optional (default=len(G))": "Specify the maximum search depth.", + "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", + }, + }, + "dfs_tree": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/depth_first_search.py#L116", + "additional_docs": "Returns oriented tree constructed from a depth-first-search from source.", + "additional_parameters": { + "G : NetworkX graph": "", + "source : node, optional": "Specify starting node for depth-first search.", + "depth_limit : int, optional (default=len(G))": "Specify the maximum search depth.", + "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", + "T : NetworkX DiGraph": "An oriented tree", + }, + }, "edge_betweenness_centrality": { "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/centrality/betweenness.py#L96", "additional_docs": "The parallel computation is implemented by dividing the nodes into chunks and computing edge betweenness centrality for each chunk concurrently.", @@ -104,6 +240,16 @@ def get_info(): 'get_chunks : str, function (default = "chunks")': "A function that takes in a list of all the nodes as input and returns an iterable `node_chunks`. The default chunking is done by slicing the `nodes` into `n_jobs` number of chunks." }, }, + "generic_bfs_edges": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L20", + "additional_docs": "Iterate over edges in a breadth-first search.", + "additional_parameters": { + "G : NetworkX graph": "", + "source : node": "Starting node for the breadth-first search; this function iterates over only those edges in the component reachable from this node.", + "neighbors : function": "A function that takes a newly visited node of the graph as input and returns an *iterator* (not just a list) of nodes that are neighbors of that node with custom ordering. If not specified, this is just the ``G.neighbors`` method, but in general it can be any function that returns an iterator over some or all of the neighbors of a given node, in any order.", + "depth_limit : int, optional(default=len(G))": "Specify the maximum search depth.", + }, + }, "is_reachable": { "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/tournament.py#L13", "additional_docs": "The function parallelizes the calculation of two neighborhoods of vertices in `G` and checks closure conditions for each neighborhood subset in parallel.", diff --git a/nx_parallel/algorithms/traversal/__init__.py b/nx_parallel/algorithms/traversal/__init__.py new file mode 100644 index 00000000..3f332cec --- /dev/null +++ b/nx_parallel/algorithms/traversal/__init__.py @@ -0,0 +1,2 @@ +from .depth_first_search import * +from .breadth_first_search import * diff --git a/nx_parallel/algorithms/traversal/breadth_first_search.py b/nx_parallel/algorithms/traversal/breadth_first_search.py new file mode 100644 index 00000000..899dc92b --- /dev/null +++ b/nx_parallel/algorithms/traversal/breadth_first_search.py @@ -0,0 +1,575 @@ +"""Basic algorithms for breadth-first searching the nodes of a graph.""" + +from collections import deque + +import networkx as nx + +__all__ = [ + "bfs_edges", + "bfs_tree", + "bfs_predecessors", + "bfs_successors", + "descendants_at_distance", + "bfs_layers", + "bfs_labeled_edges", + "generic_bfs_edges", +] + + +@nx._dispatchable +def generic_bfs_edges(G, source, neighbors=None, depth_limit=None): + """Iterate over edges in a breadth-first search. + + The breadth-first search begins at `source` and enqueues the + neighbors of newly visited nodes specified by the `neighbors` + function. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for the breadth-first search; this function + iterates over only those edges in the component reachable from + this node. + + neighbors : function + A function that takes a newly visited node of the graph as input + and returns an *iterator* (not just a list) of nodes that are + neighbors of that node with custom ordering. If not specified, this is + just the ``G.neighbors`` method, but in general it can be any function + that returns an iterator over some or all of the neighbors of a + given node, in any order. + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth. + + Yields + ------ + edge + Edges in the breadth-first search starting from `source`. + + Examples + -------- + >>> G = nx.path_graph(7) + >>> list(nx.generic_bfs_edges(G, source=0)) + [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)] + >>> list(nx.generic_bfs_edges(G, source=2)) + [(2, 1), (2, 3), (1, 0), (3, 4), (4, 5), (5, 6)] + >>> list(nx.generic_bfs_edges(G, source=2, depth_limit=2)) + [(2, 1), (2, 3), (1, 0), (3, 4)] + + The `neighbors` param can be used to specify the visitation order of each + node's neighbors generically. In the following example, we modify the default + neighbor to return *odd* nodes first: + + >>> def odd_first(n): + ... return sorted(G.neighbors(n), key=lambda x: x % 2, reverse=True) + + >>> G = nx.star_graph(5) + >>> list(nx.generic_bfs_edges(G, source=0)) # Default neighbor ordering + [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)] + >>> list(nx.generic_bfs_edges(G, source=0, neighbors=odd_first)) + [(0, 1), (0, 3), (0, 5), (0, 2), (0, 4)] + + Notes + ----- + This implementation is from `PADS`_, which was in the public domain + when it was first accessed in July, 2004. The modifications + to allow depth limits are based on the Wikipedia article + "`Depth-limited-search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS/BFS.py + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + """ + if neighbors is None: + neighbors = G.neighbors + if depth_limit is None: + depth_limit = len(G) + + seen = {source} + n = len(G) + depth = 0 + next_parents_children = [(source, neighbors(source))] + while next_parents_children and depth < depth_limit: + this_parents_children = next_parents_children + next_parents_children = [] + for parent, children in this_parents_children: + for child in children: + if child not in seen: + seen.add(child) + next_parents_children.append((child, neighbors(child))) + yield parent, child + if len(seen) == n: + return + depth += 1 + + +@nx._dispatchable +def bfs_edges(G, source, reverse=False, depth_limit=None, sort_neighbors=None): + """Iterate over edges in a breadth-first-search starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search; this function + iterates over only those edges in the component reachable from + this node. + + reverse : bool, optional + If True traverse a directed graph in the reverse direction + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Yields + ------ + edge: 2-tuple of nodes + Yields edges resulting from the breadth-first search. + + Examples + -------- + To get the edges in a breadth-first search:: + + >>> G = nx.path_graph(3) + >>> list(nx.bfs_edges(G, 0)) + [(0, 1), (1, 2)] + >>> list(nx.bfs_edges(G, source=0, depth_limit=1)) + [(0, 1)] + + To get the nodes in a breadth-first search order:: + + >>> G = nx.path_graph(3) + >>> root = 2 + >>> edges = nx.bfs_edges(G, root) + >>> nodes = [root] + [v for u, v in edges] + >>> nodes + [2, 1, 0] + + Notes + ----- + The naming of this function is very similar to + :func:`~networkx.algorithms.traversal.edgebfs.edge_bfs`. The difference + is that ``edge_bfs`` yields edges even if they extend back to an already + explored node while this generator yields the edges of the tree that results + from a breadth-first-search (BFS) so no edges are reported if they extend + to already explored nodes. That means ``edge_bfs`` reports all edges while + ``bfs_edges`` only reports those traversed by a node-based BFS. Yet another + description is that ``bfs_edges`` reports the edges traversed during BFS + while ``edge_bfs`` reports all edges in the order they are explored. + + Based on the breadth-first search implementation in PADS [1]_ + by D. Eppstein, July 2004; with modifications to allow depth limits + as described in [2]_. + + References + ---------- + .. [1] http://www.ics.uci.edu/~eppstein/PADS/BFS.py. + .. [2] https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + bfs_tree + :func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges` + :func:`~networkx.algorithms.traversal.edgebfs.edge_bfs` + + """ + if reverse and G.is_directed(): + successors = G.predecessors + else: + successors = G.neighbors + + if sort_neighbors is not None: + yield from generic_bfs_edges( + G, source, lambda node: iter(sort_neighbors(successors(node))), depth_limit + ) + else: + yield from generic_bfs_edges(G, source, successors, depth_limit) + + +@nx._dispatchable(returns_graph=True) +def bfs_tree(G, source, reverse=False, depth_limit=None, sort_neighbors=None): + """Returns an oriented tree constructed from of a breadth-first-search + starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search + + reverse : bool, optional + If True traverse a directed graph in the reverse direction + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + T: NetworkX DiGraph + An oriented tree + + Examples + -------- + >>> G = nx.path_graph(3) + >>> list(nx.bfs_tree(G, 1).edges()) + [(1, 0), (1, 2)] + >>> H = nx.Graph() + >>> nx.add_path(H, [0, 1, 2, 3, 4, 5, 6]) + >>> nx.add_path(H, [2, 7, 8, 9, 10]) + >>> sorted(list(nx.bfs_tree(H, source=3, depth_limit=3).edges())) + [(1, 0), (2, 1), (2, 7), (3, 2), (3, 4), (4, 5), (5, 6), (7, 8)] + + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004. The modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited-search`_". + + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_tree + bfs_edges + edge_bfs + """ + T = nx.DiGraph() + T.add_node(source) + edges_gen = bfs_edges( + G, + source, + reverse=reverse, + depth_limit=depth_limit, + sort_neighbors=sort_neighbors, + ) + T.add_edges_from(edges_gen) + return T + + +@nx._dispatchable +def bfs_predecessors(G, source, depth_limit=None, sort_neighbors=None): + """Returns an iterator of predecessors in breadth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + pred: iterator + (node, predecessor) iterator where `predecessor` is the predecessor of + `node` in a breadth first search starting from `source`. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> dict(nx.bfs_predecessors(G, 0)) + {1: 0, 2: 1} + >>> H = nx.Graph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> dict(nx.bfs_predecessors(H, 0)) + {1: 0, 2: 0, 3: 1, 4: 1, 5: 2, 6: 2} + >>> M = nx.Graph() + >>> nx.add_path(M, [0, 1, 2, 3, 4, 5, 6]) + >>> nx.add_path(M, [2, 7, 8, 9, 10]) + >>> sorted(nx.bfs_predecessors(M, source=1, depth_limit=3)) + [(0, 1), (2, 1), (3, 2), (4, 3), (7, 2), (8, 7)] + >>> N = nx.DiGraph() + >>> nx.add_path(N, [0, 1, 2, 3, 4, 7]) + >>> nx.add_path(N, [3, 5, 6, 7]) + >>> sorted(nx.bfs_predecessors(N, source=2)) + [(3, 2), (4, 3), (5, 3), (6, 5), (7, 4)] + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004. The modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited-search`_". + + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + bfs_tree + bfs_edges + edge_bfs + """ + for s, t in bfs_edges( + G, source, depth_limit=depth_limit, sort_neighbors=sort_neighbors + ): + yield (t, s) + + +@nx._dispatchable +def bfs_successors(G, source, depth_limit=None, sort_neighbors=None): + """Returns an iterator of successors in breadth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + succ: iterator + (node, successors) iterator where `successors` is the non-empty list of + successors of `node` in a breadth first search from `source`. + To appear in the iterator, `node` must have successors. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> dict(nx.bfs_successors(G, 0)) + {0: [1], 1: [2]} + >>> H = nx.Graph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> dict(nx.bfs_successors(H, 0)) + {0: [1, 2], 1: [3, 4], 2: [5, 6]} + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) + >>> nx.add_path(G, [2, 7, 8, 9, 10]) + >>> dict(nx.bfs_successors(G, source=1, depth_limit=3)) + {1: [0, 2], 2: [3, 7], 3: [4], 7: [8]} + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3, 4, 5]) + >>> dict(nx.bfs_successors(G, source=3)) + {3: [4], 4: [5]} + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004.The modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited-search`_". + + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + bfs_tree + bfs_edges + edge_bfs + """ + parent = source + children = [] + for p, c in bfs_edges( + G, source, depth_limit=depth_limit, sort_neighbors=sort_neighbors + ): + if p == parent: + children.append(c) + continue + yield (parent, children) + children = [c] + parent = p + yield (parent, children) + + +@nx._dispatchable +def bfs_layers(G, sources): + """Returns an iterator of all the layers in breadth-first search traversal. + + Parameters + ---------- + G : NetworkX graph + A graph over which to find the layers using breadth-first search. + + sources : node in `G` or list of nodes in `G` + Specify starting nodes for single source or multiple sources breadth-first search + + Yields + ------ + layer: list of nodes + Yields list of nodes at the same distance from sources + + Examples + -------- + >>> G = nx.path_graph(5) + >>> dict(enumerate(nx.bfs_layers(G, [0, 4]))) + {0: [0, 4], 1: [1, 3], 2: [2]} + >>> H = nx.Graph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> dict(enumerate(nx.bfs_layers(H, [1]))) + {0: [1], 1: [0, 3, 4], 2: [2], 3: [5, 6]} + >>> dict(enumerate(nx.bfs_layers(H, [1, 6]))) + {0: [1, 6], 1: [0, 3, 4, 2], 2: [5]} + """ + if sources in G: + sources = [sources] + + current_layer = list(sources) + visited = set(sources) + + for source in current_layer: + if source not in G: + raise nx.NetworkXError(f"The node {source} is not in the graph.") + + # this is basically BFS, except that the current layer only stores the nodes at + # same distance from sources at each iteration + while current_layer: + yield current_layer + next_layer = [] + for node in current_layer: + for child in G[node]: + if child not in visited: + visited.add(child) + next_layer.append(child) + current_layer = next_layer + + +REVERSE_EDGE = "reverse" +TREE_EDGE = "tree" +FORWARD_EDGE = "forward" +LEVEL_EDGE = "level" + + +@nx._dispatchable +def bfs_labeled_edges(G, sources): + """Iterate over edges in a breadth-first search (BFS) labeled by type. + + We generate triple of the form (*u*, *v*, *d*), where (*u*, *v*) is the + edge being explored in the breadth-first search and *d* is one of the + strings 'tree', 'forward', 'level', or 'reverse'. A 'tree' edge is one in + which *v* is first discovered and placed into the layer below *u*. A + 'forward' edge is one in which *u* is on the layer above *v* and *v* has + already been discovered. A 'level' edge is one in which both *u* and *v* + occur on the same layer. A 'reverse' edge is one in which *u* is on a layer + below *v*. + + We emit each edge exactly once. In an undirected graph, 'reverse' edges do + not occur, because each is discovered either as a 'tree' or 'forward' edge. + + Parameters + ---------- + G : NetworkX graph + A graph over which to find the layers using breadth-first search. + + sources : node in `G` or list of nodes in `G` + Starting nodes for single source or multiple sources breadth-first search + + Yields + ------ + edges: generator + A generator of triples (*u*, *v*, *d*) where (*u*, *v*) is the edge being + explored and *d* is described above. + + Examples + -------- + >>> G = nx.cycle_graph(4, create_using=nx.DiGraph) + >>> list(nx.bfs_labeled_edges(G, 0)) + [(0, 1, 'tree'), (1, 2, 'tree'), (2, 3, 'tree'), (3, 0, 'reverse')] + >>> G = nx.complete_graph(3) + >>> list(nx.bfs_labeled_edges(G, 0)) + [(0, 1, 'tree'), (0, 2, 'tree'), (1, 2, 'level')] + >>> list(nx.bfs_labeled_edges(G, [0, 1])) + [(0, 1, 'level'), (0, 2, 'tree'), (1, 2, 'forward')] + """ + if sources in G: + sources = [sources] + + neighbors = G._adj + directed = G.is_directed() + visited = set() + visit = visited.discard if directed else visited.add + # We use visited in a negative sense, so the visited set stays empty for the + # directed case and level edges are reported on their first occurrence in + # the undirected case. Note our use of visited.discard -- this is built-in + # thus somewhat faster than a python-defined def nop(x): pass + depth = {s: 0 for s in sources} + queue = deque(depth.items()) + push = queue.append + pop = queue.popleft + while queue: + u, du = pop() + for v in neighbors[u]: + if v not in depth: + depth[v] = dv = du + 1 + push((v, dv)) + yield u, v, TREE_EDGE + else: + dv = depth[v] + if du == dv: + if v not in visited: + yield u, v, LEVEL_EDGE + elif du < dv: + yield u, v, FORWARD_EDGE + elif directed: + yield u, v, REVERSE_EDGE + visit(u) + + +@nx._dispatchable +def descendants_at_distance(G, source, distance): + """Returns all nodes at a fixed `distance` from `source` in `G`. + + Parameters + ---------- + G : NetworkX graph + A graph + source : node in `G` + distance : the distance of the wanted nodes from `source` + + Returns + ------- + set() + The descendants of `source` in `G` at the given `distance` from `source` + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.descendants_at_distance(G, 2, 2) + {0, 4} + >>> H = nx.DiGraph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> nx.descendants_at_distance(H, 0, 2) + {3, 4, 5, 6} + >>> nx.descendants_at_distance(H, 5, 0) + {5} + >>> nx.descendants_at_distance(H, 5, 1) + set() + """ + if source not in G: + raise nx.NetworkXError(f"The node {source} is not in the graph.") + + bfs_generator = nx.bfs_layers(G, source) + for i, layer in enumerate(bfs_generator): + if i == distance: + return set(layer) + return set() diff --git a/nx_parallel/algorithms/traversal/depth_first_search.py b/nx_parallel/algorithms/traversal/depth_first_search.py new file mode 100644 index 00000000..5bac5ecf --- /dev/null +++ b/nx_parallel/algorithms/traversal/depth_first_search.py @@ -0,0 +1,529 @@ +"""Basic algorithms for depth-first searching the nodes of a graph.""" + +from collections import defaultdict + +import networkx as nx + +__all__ = [ + "dfs_edges", + "dfs_tree", + "dfs_predecessors", + "dfs_successors", + "dfs_preorder_nodes", + "dfs_postorder_nodes", + "dfs_labeled_edges", +] + + +@nx._dispatchable +def dfs_edges(G, source=None, depth_limit=None, *, sort_neighbors=None): + """Iterate over edges in a depth-first-search (DFS). + + Perform a depth-first-search over the nodes of `G` and yield + the edges in order. This may not generate all edges in `G` + (see `~networkx.algorithms.traversal.edgedfs.edge_dfs`). + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and yield edges in + the component reachable from source. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Yields + ------ + edge: 2-tuple of nodes + Yields edges resulting from the depth-first-search. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> list(nx.dfs_edges(G, source=0)) + [(0, 1), (1, 2), (2, 3), (3, 4)] + >>> list(nx.dfs_edges(G, source=0, depth_limit=2)) + [(0, 1), (1, 2)] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in PADS [1]_, with modifications + to allow depth limits based on the Wikipedia article + "Depth-limited search" [2]_. + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` + :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_edges` + + References + ---------- + .. [1] http://www.ics.uci.edu/~eppstein/PADS + .. [2] https://en.wikipedia.org/wiki/Depth-limited_search + """ + if source is None: + # edges for all components + nodes = G + else: + # edges for components with source + nodes = [source] + if depth_limit is None: + depth_limit = len(G) + + get_children = ( + G.neighbors + if sort_neighbors is None + else lambda n: iter(sort_neighbors(G.neighbors(n))) + ) + + visited = set() + for start in nodes: + if start in visited: + continue + visited.add(start) + stack = [(start, get_children(start))] + depth_now = 1 + while stack: + parent, children = stack[-1] + for child in children: + if child not in visited: + yield parent, child + visited.add(child) + if depth_now < depth_limit: + stack.append((child, get_children(child))) + depth_now += 1 + break + else: + stack.pop() + depth_now -= 1 + + +@nx._dispatchable(returns_graph=True) +def dfs_tree(G, source=None, depth_limit=None, *, sort_neighbors=None): + """Returns oriented tree constructed from a depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + T : NetworkX DiGraph + An oriented tree + + Examples + -------- + >>> G = nx.path_graph(5) + >>> T = nx.dfs_tree(G, source=0, depth_limit=2) + >>> list(T.edges()) + [(0, 1), (1, 2)] + >>> T = nx.dfs_tree(G, source=0) + >>> list(T.edges()) + [(0, 1), (1, 2), (2, 3), (3, 4)] + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` + :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree` + """ + T = nx.DiGraph() + if source is None: + T.add_nodes_from(G) + else: + T.add_node(source) + T.add_edges_from(dfs_edges(G, source, depth_limit, sort_neighbors=sort_neighbors)) + return T + + +@nx._dispatchable +def dfs_predecessors(G, source=None, depth_limit=None, *, sort_neighbors=None): + """Returns dictionary of predecessors in depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + Note that you will get predecessors for all nodes in the + component containing `source`. This input only specifies + where the DFS starts. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + pred: dict + A dictionary with nodes as keys and predecessor nodes as values. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.dfs_predecessors(G, source=0) + {1: 0, 2: 1, 3: 2} + >>> nx.dfs_predecessors(G, source=0, depth_limit=2) + {1: 0, 2: 1} + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` + :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree` + """ + return { + t: s + for s, t in dfs_edges(G, source, depth_limit, sort_neighbors=sort_neighbors) + } + + +@nx._dispatchable +def dfs_successors(G, source=None, depth_limit=None, *, sort_neighbors=None): + """Returns dictionary of successors in depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + Note that you will get successors for all nodes in the + component containing `source`. This input only specifies + where the DFS starts. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + succ: dict + A dictionary with nodes as keys and list of successor nodes as values. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.dfs_successors(G, source=0) + {0: [1], 1: [2], 2: [3], 3: [4]} + >>> nx.dfs_successors(G, source=0, depth_limit=2) + {0: [1], 1: [2]} + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` + :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree` + """ + d = defaultdict(list) + for s, t in dfs_edges( + G, + source=source, + depth_limit=depth_limit, + sort_neighbors=sort_neighbors, + ): + d[s].append(t) + return dict(d) + + +@nx._dispatchable +def dfs_postorder_nodes(G, source=None, depth_limit=None, *, sort_neighbors=None): + """Generate nodes in a depth-first-search post-ordering starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + nodes: generator + A generator of nodes in a depth-first-search post-ordering. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> list(nx.dfs_postorder_nodes(G, source=0)) + [4, 3, 2, 1, 0] + >>> list(nx.dfs_postorder_nodes(G, source=0, depth_limit=2)) + [1, 0] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_edges + dfs_preorder_nodes + dfs_labeled_edges + :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` + :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree` + """ + edges = nx.dfs_labeled_edges( + G, source=source, depth_limit=depth_limit, sort_neighbors=sort_neighbors + ) + return (v for u, v, d in edges if d == "reverse") + + +@nx._dispatchable +def dfs_preorder_nodes(G, source=None, depth_limit=None, *, sort_neighbors=None): + """Generate nodes in a depth-first-search pre-ordering starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and return nodes in + the component reachable from source. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + nodes: generator + A generator of nodes in a depth-first-search pre-ordering. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> list(nx.dfs_preorder_nodes(G, source=0)) + [0, 1, 2, 3, 4] + >>> list(nx.dfs_preorder_nodes(G, source=0, depth_limit=2)) + [0, 1, 2] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_edges + dfs_postorder_nodes + dfs_labeled_edges + :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_edges` + """ + edges = nx.dfs_labeled_edges( + G, source=source, depth_limit=depth_limit, sort_neighbors=sort_neighbors + ) + return (v for u, v, d in edges if d == "forward") + + +@nx._dispatchable +def dfs_labeled_edges(G, source=None, depth_limit=None, *, sort_neighbors=None): + """Iterate over edges in a depth-first-search (DFS) labeled by type. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and return edges in + the component reachable from source. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + sort_neighbors : function (default=None) + A function that takes an iterator over nodes as the input, and + returns an iterable of the same nodes with a custom ordering. + For example, `sorted` will sort the nodes in increasing order. + + Returns + ------- + edges: generator + A generator of triples of the form (*u*, *v*, *d*), where (*u*, + *v*) is the edge being explored in the depth-first search and *d* + is one of the strings 'forward', 'nontree', 'reverse', or 'reverse-depth_limit'. + A 'forward' edge is one in which *u* has been visited but *v* has + not. A 'nontree' edge is one in which both *u* and *v* have been + visited but the edge is not in the DFS tree. A 'reverse' edge is + one in which both *u* and *v* have been visited and the edge is in + the DFS tree. When the `depth_limit` is reached via a 'forward' edge, + a 'reverse' edge is immediately generated rather than the subtree + being explored. To indicate this flavor of 'reverse' edge, the string + yielded is 'reverse-depth_limit'. + + Examples + -------- + + The labels reveal the complete transcript of the depth-first search + algorithm in more detail than, for example, :func:`dfs_edges`:: + + >>> from pprint import pprint + >>> + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 1)]) + >>> pprint(list(nx.dfs_labeled_edges(G, source=0))) + [(0, 0, 'forward'), + (0, 1, 'forward'), + (1, 2, 'forward'), + (2, 1, 'nontree'), + (1, 2, 'reverse'), + (0, 1, 'reverse'), + (0, 0, 'reverse')] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_edges + dfs_preorder_nodes + dfs_postorder_nodes + """ + # Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py + # by D. Eppstein, July 2004. + if source is None: + # edges for all components + nodes = G + else: + # edges for components with source + nodes = [source] + if depth_limit is None: + depth_limit = len(G) + + get_children = ( + G.neighbors + if sort_neighbors is None + else lambda n: iter(sort_neighbors(G.neighbors(n))) + ) + + visited = set() + for start in nodes: + if start in visited: + continue + yield start, start, "forward" + visited.add(start) + stack = [(start, get_children(start))] + depth_now = 1 + while stack: + parent, children = stack[-1] + for child in children: + if child in visited: + yield parent, child, "nontree" + else: + yield parent, child, "forward" + visited.add(child) + if depth_now < depth_limit: + stack.append((child, iter(get_children(child)))) + depth_now += 1 + break + else: + yield parent, child, "reverse-depth_limit" + else: + stack.pop() + depth_now -= 1 + if stack: + yield stack[-1][0], parent, "reverse" + yield start, start, "reverse" From 2b3ad04426440a55177f13951ac767acbfc708b6 Mon Sep 17 00:00:00 2001 From: RohitP2005 Date: Sun, 22 Dec 2024 16:46:49 +0530 Subject: [PATCH 02/11] Add parallel BFS implementation leveraging embarrassingly parallel computation --- _nx_parallel/__init__.py | 154 +---- .../traversal/breadth_first_search.py | 617 ++---------------- .../traversal/depth_first_search.py | 529 --------------- 3 files changed, 77 insertions(+), 1223 deletions(-) delete mode 100644 nx_parallel/algorithms/traversal/depth_first_search.py diff --git a/_nx_parallel/__init__.py b/_nx_parallel/__init__.py index 707243f4..a6602af8 100644 --- a/_nx_parallel/__init__.py +++ b/_nx_parallel/__init__.py @@ -90,64 +90,6 @@ def get_info(): 'get_chunks : str, function (default = "chunks")': "A function that takes in a list of all the nodes as input and returns an iterable `node_chunks`. The default chunking is done by slicing the `nodes` into `n_jobs` number of chunks." }, }, - "bfs_edges": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L109", - "additional_docs": "Iterate over edges in a breadth-first-search starting at source.", - "additional_parameters": { - "G : NetworkX graph": "", - "source : node": "Specify starting node for breadth-first search; this function iterates over only those edges in the component reachable from this node.", - "reverse : bool, optional": "If True traverse a directed graph in the reverse direction", - "depth_limit : int, optional(default=len(G))": "Specify the maximum search depth", - "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", - }, - }, - "bfs_labeled_edges": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L464", - "additional_docs": "Iterate over edges in a breadth-first search (BFS) labeled by type.", - "additional_parameters": { - "G : NetworkX graph": "A graph over which to find the layers using breadth-first search.", - "sources : node in `G` or list of nodes in `G`": "Starting nodes for single source or multiple sources breadth-first search", - }, - }, - "bfs_layers": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L406", - "additional_docs": "Returns an iterator of all the layers in breadth-first search traversal.", - "additional_parameters": { - "G : NetworkX graph": "A graph over which to find the layers using breadth-first search.", - "sources : node in `G` or list of nodes in `G`": "Specify starting nodes for single source or multiple sources breadth-first search", - }, - }, - "bfs_predecessors": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L266", - "additional_docs": "Returns an iterator of predecessors in breadth-first-search from source.", - "additional_parameters": { - "G : NetworkX graph": "", - "source : node": "Specify starting node for breadth-first search", - "depth_limit : int, optional(default=len(G))": "Specify the maximum search depth", - "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", - }, - }, - "bfs_successors": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L332", - "additional_docs": "Returns an iterator of successors in breadth-first-search from source.", - "additional_parameters": { - "G : NetworkX graph": "", - "source : node": "Specify starting node for breadth-first search", - "depth_limit : int, optional(default=len(G))": "Specify the maximum search depth", - "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", - }, - }, - "bfs_tree": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L198", - "additional_docs": "Returns an oriented tree constructed from of a breadth-first-search starting at source.", - "additional_parameters": { - "G : NetworkX graph": "", - "source : node": "Specify starting node for breadth-first search", - "reverse : bool, optional": "If True traverse a directed graph in the reverse direction", - "depth_limit : int, optional(default=len(G))": "Specify the maximum search depth", - "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", - }, - }, "closeness_vitality": { "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/vitality.py#L10", "additional_docs": "The parallel computation is implemented only when the node is not specified. The closeness vitality for each node is computed concurrently.", @@ -155,84 +97,6 @@ def get_info(): 'get_chunks : str, function (default = "chunks")': "A function that takes in a list of all the nodes as input and returns an iterable `node_chunks`. The default chunking is done by slicing the `nodes` into `n_jobs` number of chunks." }, }, - "descendants_at_distance": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L539", - "additional_docs": "Returns all nodes at a fixed `distance` from `source` in `G`.", - "additional_parameters": { - "G : NetworkX graph": "A graph source : node in `G` distance : the distance of the wanted nodes from `source`" - }, - }, - "dfs_edges": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/depth_first_search.py#L19", - "additional_docs": "Iterate over edges in a depth-first-search (DFS).", - "additional_parameters": { - "G : NetworkX graph": "", - "source : node, optional": "Specify starting node for depth-first search and yield edges in the component reachable from source.", - "depth_limit : int, optional (default=len(G))": "Specify the maximum search depth.", - "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", - }, - }, - "dfs_labeled_edges": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/depth_first_search.py#L414", - "additional_docs": "Iterate over edges in a depth-first-search (DFS) labeled by type.", - "additional_parameters": { - "G : NetworkX graph": "", - "source : node, optional": "Specify starting node for depth-first search and return edges in the component reachable from source.", - "depth_limit : int, optional (default=len(G))": "Specify the maximum search depth.", - "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", - }, - }, - "dfs_postorder_nodes": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/depth_first_search.py#L296", - "additional_docs": "Generate nodes in a depth-first-search post-ordering starting at source.", - "additional_parameters": { - "G : NetworkX graph": "", - "source : node, optional": "Specify starting node for depth-first search.", - "depth_limit : int, optional (default=len(G))": "Specify the maximum search depth.", - "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", - }, - }, - "dfs_predecessors": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/depth_first_search.py#L167", - "additional_docs": "Returns dictionary of predecessors in depth-first-search from source.", - "additional_parameters": { - "G : NetworkX graph": "", - "source : node, optional": "Specify starting node for depth-first search. Note that you will get predecessors for all nodes in the component containing `source`. This input only specifies where the DFS starts.", - "depth_limit : int, optional (default=len(G))": "Specify the maximum search depth.", - "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", - }, - }, - "dfs_preorder_nodes": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/depth_first_search.py#L355", - "additional_docs": "Generate nodes in a depth-first-search pre-ordering starting at source.", - "additional_parameters": { - "G : NetworkX graph": "", - "source : node, optional": "Specify starting node for depth-first search and return nodes in the component reachable from source.", - "depth_limit : int, optional (default=len(G))": "Specify the maximum search depth.", - "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", - }, - }, - "dfs_successors": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/depth_first_search.py#L229", - "additional_docs": "Returns dictionary of successors in depth-first-search from source.", - "additional_parameters": { - "G : NetworkX graph": "", - "source : node, optional": "Specify starting node for depth-first search. Note that you will get successors for all nodes in the component containing `source`. This input only specifies where the DFS starts.", - "depth_limit : int, optional (default=len(G))": "Specify the maximum search depth.", - "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", - }, - }, - "dfs_tree": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/depth_first_search.py#L116", - "additional_docs": "Returns oriented tree constructed from a depth-first-search from source.", - "additional_parameters": { - "G : NetworkX graph": "", - "source : node, optional": "Specify starting node for depth-first search.", - "depth_limit : int, optional (default=len(G))": "Specify the maximum search depth.", - "sort_neighbors : function (default=None)": "A function that takes an iterator over nodes as the input, and returns an iterable of the same nodes with a custom ordering. For example, `sorted` will sort the nodes in increasing order.", - "T : NetworkX DiGraph": "An oriented tree", - }, - }, "edge_betweenness_centrality": { "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/centrality/betweenness.py#L96", "additional_docs": "The parallel computation is implemented by dividing the nodes into chunks and computing edge betweenness centrality for each chunk concurrently.", @@ -240,16 +104,6 @@ def get_info(): 'get_chunks : str, function (default = "chunks")': "A function that takes in a list of all the nodes as input and returns an iterable `node_chunks`. The default chunking is done by slicing the `nodes` into `n_jobs` number of chunks." }, }, - "generic_bfs_edges": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L20", - "additional_docs": "Iterate over edges in a breadth-first search.", - "additional_parameters": { - "G : NetworkX graph": "", - "source : node": "Starting node for the breadth-first search; this function iterates over only those edges in the component reachable from this node.", - "neighbors : function": "A function that takes a newly visited node of the graph as input and returns an *iterator* (not just a list) of nodes that are neighbors of that node with custom ordering. If not specified, this is just the ``G.neighbors`` method, but in general it can be any function that returns an iterator over some or all of the neighbors of a given node, in any order.", - "depth_limit : int, optional(default=len(G))": "Specify the maximum search depth.", - }, - }, "is_reachable": { "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/tournament.py#L13", "additional_docs": "The function parallelizes the calculation of two neighborhoods of vertices in `G` and checks closure conditions for each neighborhood subset in parallel.", @@ -285,6 +139,14 @@ def get_info(): 'get_chunks : str, function (default = "chunks")': "A function that takes in a list of all the isolated nodes as input and returns an iterable `isolate_chunks`. The default chunking is done by slicing the `isolates` into `n_jobs` number of chunks." }, }, + "parallel_bfs": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/traversal/breadth_first_search.py#L10", + "additional_docs": "Perform a parallelized Breadth-First Search (BFS) on the graph.", + "additional_parameters": { + "G : graph": 'A NetworkX graph. source : node, optional Starting node for the BFS traversal. If None, BFS is performed for all nodes. get_chunks : str or function (default="chunks") A function to divide nodes into chunks for parallel processing. If "chunks", the nodes are split into `n_jobs` chunks automatically. n_jobs : int, optional Number of jobs to run in parallel. If None, defaults to the number of CPUs.', + "bfs_result : dict": "A dictionary where keys are nodes and values are their BFS traversal order.", + }, + }, "square_clustering": { "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/cluster.py#L11", "additional_docs": "The nodes are chunked into `node_chunks` and then the square clustering coefficient for all `node_chunks` are computed in parallel over `n_jobs` number of CPU cores.", diff --git a/nx_parallel/algorithms/traversal/breadth_first_search.py b/nx_parallel/algorithms/traversal/breadth_first_search.py index 899dc92b..7a90a577 100644 --- a/nx_parallel/algorithms/traversal/breadth_first_search.py +++ b/nx_parallel/algorithms/traversal/breadth_first_search.py @@ -1,575 +1,96 @@ -"""Basic algorithms for breadth-first searching the nodes of a graph.""" +from joblib import Parallel, delayed +from networkx.utils import py_random_state +import nx_parallel as nxp -from collections import deque +__all__ = ["parallel_bfs"] -import networkx as nx -__all__ = [ - "bfs_edges", - "bfs_tree", - "bfs_predecessors", - "bfs_successors", - "descendants_at_distance", - "bfs_layers", - "bfs_labeled_edges", - "generic_bfs_edges", -] - - -@nx._dispatchable -def generic_bfs_edges(G, source, neighbors=None, depth_limit=None): - """Iterate over edges in a breadth-first search. - - The breadth-first search begins at `source` and enqueues the - neighbors of newly visited nodes specified by the `neighbors` - function. - - Parameters - ---------- - G : NetworkX graph - - source : node - Starting node for the breadth-first search; this function - iterates over only those edges in the component reachable from - this node. - - neighbors : function - A function that takes a newly visited node of the graph as input - and returns an *iterator* (not just a list) of nodes that are - neighbors of that node with custom ordering. If not specified, this is - just the ``G.neighbors`` method, but in general it can be any function - that returns an iterator over some or all of the neighbors of a - given node, in any order. - - depth_limit : int, optional(default=len(G)) - Specify the maximum search depth. - - Yields - ------ - edge - Edges in the breadth-first search starting from `source`. - - Examples - -------- - >>> G = nx.path_graph(7) - >>> list(nx.generic_bfs_edges(G, source=0)) - [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)] - >>> list(nx.generic_bfs_edges(G, source=2)) - [(2, 1), (2, 3), (1, 0), (3, 4), (4, 5), (5, 6)] - >>> list(nx.generic_bfs_edges(G, source=2, depth_limit=2)) - [(2, 1), (2, 3), (1, 0), (3, 4)] - - The `neighbors` param can be used to specify the visitation order of each - node's neighbors generically. In the following example, we modify the default - neighbor to return *odd* nodes first: - - >>> def odd_first(n): - ... return sorted(G.neighbors(n), key=lambda x: x % 2, reverse=True) - - >>> G = nx.star_graph(5) - >>> list(nx.generic_bfs_edges(G, source=0)) # Default neighbor ordering - [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)] - >>> list(nx.generic_bfs_edges(G, source=0, neighbors=odd_first)) - [(0, 1), (0, 3), (0, 5), (0, 2), (0, 4)] - - Notes - ----- - This implementation is from `PADS`_, which was in the public domain - when it was first accessed in July, 2004. The modifications - to allow depth limits are based on the Wikipedia article - "`Depth-limited-search`_". - - .. _PADS: http://www.ics.uci.edu/~eppstein/PADS/BFS.py - .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search +@nxp._configure_if_nx_active() +@py_random_state(3) +def parallel_bfs(G, source=None, get_chunks="chunks", n_jobs=None): """ - if neighbors is None: - neighbors = G.neighbors - if depth_limit is None: - depth_limit = len(G) - - seen = {source} - n = len(G) - depth = 0 - next_parents_children = [(source, neighbors(source))] - while next_parents_children and depth < depth_limit: - this_parents_children = next_parents_children - next_parents_children = [] - for parent, children in this_parents_children: - for child in children: - if child not in seen: - seen.add(child) - next_parents_children.append((child, neighbors(child))) - yield parent, child - if len(seen) == n: - return - depth += 1 - - -@nx._dispatchable -def bfs_edges(G, source, reverse=False, depth_limit=None, sort_neighbors=None): - """Iterate over edges in a breadth-first-search starting at source. + Perform a parallelized Breadth-First Search (BFS) on the graph. Parameters ---------- - G : NetworkX graph - - source : node - Specify starting node for breadth-first search; this function - iterates over only those edges in the component reachable from - this node. - - reverse : bool, optional - If True traverse a directed graph in the reverse direction - - depth_limit : int, optional(default=len(G)) - Specify the maximum search depth - - sort_neighbors : function (default=None) - A function that takes an iterator over nodes as the input, and - returns an iterable of the same nodes with a custom ordering. - For example, `sorted` will sort the nodes in increasing order. - - Yields - ------ - edge: 2-tuple of nodes - Yields edges resulting from the breadth-first search. - - Examples - -------- - To get the edges in a breadth-first search:: - - >>> G = nx.path_graph(3) - >>> list(nx.bfs_edges(G, 0)) - [(0, 1), (1, 2)] - >>> list(nx.bfs_edges(G, source=0, depth_limit=1)) - [(0, 1)] - - To get the nodes in a breadth-first search order:: - - >>> G = nx.path_graph(3) - >>> root = 2 - >>> edges = nx.bfs_edges(G, root) - >>> nodes = [root] + [v for u, v in edges] - >>> nodes - [2, 1, 0] - - Notes - ----- - The naming of this function is very similar to - :func:`~networkx.algorithms.traversal.edgebfs.edge_bfs`. The difference - is that ``edge_bfs`` yields edges even if they extend back to an already - explored node while this generator yields the edges of the tree that results - from a breadth-first-search (BFS) so no edges are reported if they extend - to already explored nodes. That means ``edge_bfs`` reports all edges while - ``bfs_edges`` only reports those traversed by a node-based BFS. Yet another - description is that ``bfs_edges`` reports the edges traversed during BFS - while ``edge_bfs`` reports all edges in the order they are explored. - - Based on the breadth-first search implementation in PADS [1]_ - by D. Eppstein, July 2004; with modifications to allow depth limits - as described in [2]_. - - References - ---------- - .. [1] http://www.ics.uci.edu/~eppstein/PADS/BFS.py. - .. [2] https://en.wikipedia.org/wiki/Depth-limited_search - - See Also - -------- - bfs_tree - :func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges` - :func:`~networkx.algorithms.traversal.edgebfs.edge_bfs` - - """ - if reverse and G.is_directed(): - successors = G.predecessors - else: - successors = G.neighbors - - if sort_neighbors is not None: - yield from generic_bfs_edges( - G, source, lambda node: iter(sort_neighbors(successors(node))), depth_limit - ) - else: - yield from generic_bfs_edges(G, source, successors, depth_limit) - - -@nx._dispatchable(returns_graph=True) -def bfs_tree(G, source, reverse=False, depth_limit=None, sort_neighbors=None): - """Returns an oriented tree constructed from of a breadth-first-search - starting at source. - - Parameters - ---------- - G : NetworkX graph - - source : node - Specify starting node for breadth-first search - - reverse : bool, optional - If True traverse a directed graph in the reverse direction - - depth_limit : int, optional(default=len(G)) - Specify the maximum search depth - - sort_neighbors : function (default=None) - A function that takes an iterator over nodes as the input, and - returns an iterable of the same nodes with a custom ordering. - For example, `sorted` will sort the nodes in increasing order. + G : graph + A NetworkX graph. + source : node, optional + Starting node for the BFS traversal. If None, BFS is performed for all nodes. + get_chunks : str or function (default="chunks") + A function to divide nodes into chunks for parallel processing. + If "chunks", the nodes are split into `n_jobs` chunks automatically. + n_jobs : int, optional + Number of jobs to run in parallel. If None, defaults to the number of CPUs. Returns ------- - T: NetworkX DiGraph - An oriented tree - - Examples - -------- - >>> G = nx.path_graph(3) - >>> list(nx.bfs_tree(G, 1).edges()) - [(1, 0), (1, 2)] - >>> H = nx.Graph() - >>> nx.add_path(H, [0, 1, 2, 3, 4, 5, 6]) - >>> nx.add_path(H, [2, 7, 8, 9, 10]) - >>> sorted(list(nx.bfs_tree(H, source=3, depth_limit=3).edges())) - [(1, 0), (2, 1), (2, 7), (3, 2), (3, 4), (4, 5), (5, 6), (7, 8)] - - - Notes - ----- - Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py - by D. Eppstein, July 2004. The modifications - to allow depth limits based on the Wikipedia article - "`Depth-limited-search`_". - - .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search - - See Also - -------- - dfs_tree - bfs_edges - edge_bfs + bfs_result : dict + A dictionary where keys are nodes and values are their BFS traversal order. """ - T = nx.DiGraph() - T.add_node(source) - edges_gen = bfs_edges( - G, - source, - reverse=reverse, - depth_limit=depth_limit, - sort_neighbors=sort_neighbors, - ) - T.add_edges_from(edges_gen) - return T - - -@nx._dispatchable -def bfs_predecessors(G, source, depth_limit=None, sort_neighbors=None): - """Returns an iterator of predecessors in breadth-first-search from source. + if hasattr(G, "graph_object"): + G = G.graph_object - Parameters - ---------- - G : NetworkX graph - - source : node - Specify starting node for breadth-first search - - depth_limit : int, optional(default=len(G)) - Specify the maximum search depth - - sort_neighbors : function (default=None) - A function that takes an iterator over nodes as the input, and - returns an iterable of the same nodes with a custom ordering. - For example, `sorted` will sort the nodes in increasing order. - - Returns - ------- - pred: iterator - (node, predecessor) iterator where `predecessor` is the predecessor of - `node` in a breadth first search starting from `source`. - - Examples - -------- - >>> G = nx.path_graph(3) - >>> dict(nx.bfs_predecessors(G, 0)) - {1: 0, 2: 1} - >>> H = nx.Graph() - >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) - >>> dict(nx.bfs_predecessors(H, 0)) - {1: 0, 2: 0, 3: 1, 4: 1, 5: 2, 6: 2} - >>> M = nx.Graph() - >>> nx.add_path(M, [0, 1, 2, 3, 4, 5, 6]) - >>> nx.add_path(M, [2, 7, 8, 9, 10]) - >>> sorted(nx.bfs_predecessors(M, source=1, depth_limit=3)) - [(0, 1), (2, 1), (3, 2), (4, 3), (7, 2), (8, 7)] - >>> N = nx.DiGraph() - >>> nx.add_path(N, [0, 1, 2, 3, 4, 7]) - >>> nx.add_path(N, [3, 5, 6, 7]) - >>> sorted(nx.bfs_predecessors(N, source=2)) - [(3, 2), (4, 3), (5, 3), (6, 5), (7, 4)] - - Notes - ----- - Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py - by D. Eppstein, July 2004. The modifications - to allow depth limits based on the Wikipedia article - "`Depth-limited-search`_". - - .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search - - See Also - -------- - bfs_tree - bfs_edges - edge_bfs - """ - for s, t in bfs_edges( - G, source, depth_limit=depth_limit, sort_neighbors=sort_neighbors - ): - yield (t, s) - - -@nx._dispatchable -def bfs_successors(G, source, depth_limit=None, sort_neighbors=None): - """Returns an iterator of successors in breadth-first-search from source. - - Parameters - ---------- - G : NetworkX graph - - source : node - Specify starting node for breadth-first search - - depth_limit : int, optional(default=len(G)) - Specify the maximum search depth - - sort_neighbors : function (default=None) - A function that takes an iterator over nodes as the input, and - returns an iterable of the same nodes with a custom ordering. - For example, `sorted` will sort the nodes in increasing order. - - Returns - ------- - succ: iterator - (node, successors) iterator where `successors` is the non-empty list of - successors of `node` in a breadth first search from `source`. - To appear in the iterator, `node` must have successors. - - Examples - -------- - >>> G = nx.path_graph(3) - >>> dict(nx.bfs_successors(G, 0)) - {0: [1], 1: [2]} - >>> H = nx.Graph() - >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) - >>> dict(nx.bfs_successors(H, 0)) - {0: [1, 2], 1: [3, 4], 2: [5, 6]} - >>> G = nx.Graph() - >>> nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) - >>> nx.add_path(G, [2, 7, 8, 9, 10]) - >>> dict(nx.bfs_successors(G, source=1, depth_limit=3)) - {1: [0, 2], 2: [3, 7], 3: [4], 7: [8]} - >>> G = nx.DiGraph() - >>> nx.add_path(G, [0, 1, 2, 3, 4, 5]) - >>> dict(nx.bfs_successors(G, source=3)) - {3: [4], 4: [5]} - - Notes - ----- - Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py - by D. Eppstein, July 2004.The modifications - to allow depth limits based on the Wikipedia article - "`Depth-limited-search`_". - - .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search - - See Also - -------- - bfs_tree - bfs_edges - edge_bfs - """ - parent = source - children = [] - for p, c in bfs_edges( - G, source, depth_limit=depth_limit, sort_neighbors=sort_neighbors - ): - if p == parent: - children.append(c) - continue - yield (parent, children) - children = [c] - parent = p - yield (parent, children) - - -@nx._dispatchable -def bfs_layers(G, sources): - """Returns an iterator of all the layers in breadth-first search traversal. - - Parameters - ---------- - G : NetworkX graph - A graph over which to find the layers using breadth-first search. - - sources : node in `G` or list of nodes in `G` - Specify starting nodes for single source or multiple sources breadth-first search - - Yields - ------ - layer: list of nodes - Yields list of nodes at the same distance from sources - - Examples - -------- - >>> G = nx.path_graph(5) - >>> dict(enumerate(nx.bfs_layers(G, [0, 4]))) - {0: [0, 4], 1: [1, 3], 2: [2]} - >>> H = nx.Graph() - >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) - >>> dict(enumerate(nx.bfs_layers(H, [1]))) - {0: [1], 1: [0, 3, 4], 2: [2], 3: [5, 6]} - >>> dict(enumerate(nx.bfs_layers(H, [1, 6]))) - {0: [1, 6], 1: [0, 3, 4, 2], 2: [5]} - """ - if sources in G: - sources = [sources] - - current_layer = list(sources) - visited = set(sources) - - for source in current_layer: - if source not in G: - raise nx.NetworkXError(f"The node {source} is not in the graph.") - - # this is basically BFS, except that the current layer only stores the nodes at - # same distance from sources at each iteration - while current_layer: - yield current_layer - next_layer = [] - for node in current_layer: - for child in G[node]: - if child not in visited: - visited.add(child) - next_layer.append(child) - current_layer = next_layer - - -REVERSE_EDGE = "reverse" -TREE_EDGE = "tree" -FORWARD_EDGE = "forward" -LEVEL_EDGE = "level" + if source is None: + nodes = G.nodes + else: + nodes = [source] + if n_jobs is None: + n_jobs = nxp.get_n_jobs() -@nx._dispatchable -def bfs_labeled_edges(G, sources): - """Iterate over edges in a breadth-first search (BFS) labeled by type. + # Create node chunks + if get_chunks == "chunks": + node_chunks = nxp.create_iterables(G, "node", n_jobs, nodes) + else: + node_chunks = get_chunks(nodes) - We generate triple of the form (*u*, *v*, *d*), where (*u*, *v*) is the - edge being explored in the breadth-first search and *d* is one of the - strings 'tree', 'forward', 'level', or 'reverse'. A 'tree' edge is one in - which *v* is first discovered and placed into the layer below *u*. A - 'forward' edge is one in which *u* is on the layer above *v* and *v* has - already been discovered. A 'level' edge is one in which both *u* and *v* - occur on the same layer. A 'reverse' edge is one in which *u* is on a layer - below *v*. + # Run BFS on each chunk in parallel + bfs_results = Parallel(n_jobs=n_jobs)( + delayed(_bfs_chunk)(G, chunk) for chunk in node_chunks + ) - We emit each edge exactly once. In an undirected graph, 'reverse' edges do - not occur, because each is discovered either as a 'tree' or 'forward' edge. + # Combine results from all chunks + combined_result = {} + for result in bfs_results: + combined_result.update(result) - Parameters - ---------- - G : NetworkX graph - A graph over which to find the layers using breadth-first search. + return combined_result - sources : node in `G` or list of nodes in `G` - Starting nodes for single source or multiple sources breadth-first search - Yields - ------ - edges: generator - A generator of triples (*u*, *v*, *d*) where (*u*, *v*) is the edge being - explored and *d* is described above. - - Examples - -------- - >>> G = nx.cycle_graph(4, create_using=nx.DiGraph) - >>> list(nx.bfs_labeled_edges(G, 0)) - [(0, 1, 'tree'), (1, 2, 'tree'), (2, 3, 'tree'), (3, 0, 'reverse')] - >>> G = nx.complete_graph(3) - >>> list(nx.bfs_labeled_edges(G, 0)) - [(0, 1, 'tree'), (0, 2, 'tree'), (1, 2, 'level')] - >>> list(nx.bfs_labeled_edges(G, [0, 1])) - [(0, 1, 'level'), (0, 2, 'tree'), (1, 2, 'forward')] +def _bfs_chunk(G, nodes): """ - if sources in G: - sources = [sources] - - neighbors = G._adj - directed = G.is_directed() - visited = set() - visit = visited.discard if directed else visited.add - # We use visited in a negative sense, so the visited set stays empty for the - # directed case and level edges are reported on their first occurrence in - # the undirected case. Note our use of visited.discard -- this is built-in - # thus somewhat faster than a python-defined def nop(x): pass - depth = {s: 0 for s in sources} - queue = deque(depth.items()) - push = queue.append - pop = queue.popleft - while queue: - u, du = pop() - for v in neighbors[u]: - if v not in depth: - depth[v] = dv = du + 1 - push((v, dv)) - yield u, v, TREE_EDGE - else: - dv = depth[v] - if du == dv: - if v not in visited: - yield u, v, LEVEL_EDGE - elif du < dv: - yield u, v, FORWARD_EDGE - elif directed: - yield u, v, REVERSE_EDGE - visit(u) - - -@nx._dispatchable -def descendants_at_distance(G, source, distance): - """Returns all nodes at a fixed `distance` from `source` in `G`. + Perform BFS for a subset of nodes. Parameters ---------- - G : NetworkX graph - A graph - source : node in `G` - distance : the distance of the wanted nodes from `source` + G : graph + A NetworkX graph. + nodes : list + A list of nodes to start BFS from. Returns ------- - set() - The descendants of `source` in `G` at the given `distance` from `source` - - Examples - -------- - >>> G = nx.path_graph(5) - >>> nx.descendants_at_distance(G, 2, 2) - {0, 4} - >>> H = nx.DiGraph() - >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) - >>> nx.descendants_at_distance(H, 0, 2) - {3, 4, 5, 6} - >>> nx.descendants_at_distance(H, 5, 0) - {5} - >>> nx.descendants_at_distance(H, 5, 1) - set() + bfs_result : dict + BFS traversal order for the given subset of nodes. """ - if source not in G: - raise nx.NetworkXError(f"The node {source} is not in the graph.") - - bfs_generator = nx.bfs_layers(G, source) - for i, layer in enumerate(bfs_generator): - if i == distance: - return set(layer) - return set() + bfs_result = {} + for node in nodes: + if node not in bfs_result: + visited = set() + queue = [node] + order = 0 + + while queue: + current = queue.pop(0) + if current not in visited: + visited.add(current) + bfs_result[current] = order + order += 1 + queue.extend( + neighbor + for neighbor in G.neighbors(current) + if neighbor not in visited + ) + + return bfs_result diff --git a/nx_parallel/algorithms/traversal/depth_first_search.py b/nx_parallel/algorithms/traversal/depth_first_search.py deleted file mode 100644 index 5bac5ecf..00000000 --- a/nx_parallel/algorithms/traversal/depth_first_search.py +++ /dev/null @@ -1,529 +0,0 @@ -"""Basic algorithms for depth-first searching the nodes of a graph.""" - -from collections import defaultdict - -import networkx as nx - -__all__ = [ - "dfs_edges", - "dfs_tree", - "dfs_predecessors", - "dfs_successors", - "dfs_preorder_nodes", - "dfs_postorder_nodes", - "dfs_labeled_edges", -] - - -@nx._dispatchable -def dfs_edges(G, source=None, depth_limit=None, *, sort_neighbors=None): - """Iterate over edges in a depth-first-search (DFS). - - Perform a depth-first-search over the nodes of `G` and yield - the edges in order. This may not generate all edges in `G` - (see `~networkx.algorithms.traversal.edgedfs.edge_dfs`). - - Parameters - ---------- - G : NetworkX graph - - source : node, optional - Specify starting node for depth-first search and yield edges in - the component reachable from source. - - depth_limit : int, optional (default=len(G)) - Specify the maximum search depth. - - sort_neighbors : function (default=None) - A function that takes an iterator over nodes as the input, and - returns an iterable of the same nodes with a custom ordering. - For example, `sorted` will sort the nodes in increasing order. - - Yields - ------ - edge: 2-tuple of nodes - Yields edges resulting from the depth-first-search. - - Examples - -------- - >>> G = nx.path_graph(5) - >>> list(nx.dfs_edges(G, source=0)) - [(0, 1), (1, 2), (2, 3), (3, 4)] - >>> list(nx.dfs_edges(G, source=0, depth_limit=2)) - [(0, 1), (1, 2)] - - Notes - ----- - If a source is not specified then a source is chosen arbitrarily and - repeatedly until all components in the graph are searched. - - The implementation of this function is adapted from David Eppstein's - depth-first search function in PADS [1]_, with modifications - to allow depth limits based on the Wikipedia article - "Depth-limited search" [2]_. - - See Also - -------- - dfs_preorder_nodes - dfs_postorder_nodes - dfs_labeled_edges - :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` - :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_edges` - - References - ---------- - .. [1] http://www.ics.uci.edu/~eppstein/PADS - .. [2] https://en.wikipedia.org/wiki/Depth-limited_search - """ - if source is None: - # edges for all components - nodes = G - else: - # edges for components with source - nodes = [source] - if depth_limit is None: - depth_limit = len(G) - - get_children = ( - G.neighbors - if sort_neighbors is None - else lambda n: iter(sort_neighbors(G.neighbors(n))) - ) - - visited = set() - for start in nodes: - if start in visited: - continue - visited.add(start) - stack = [(start, get_children(start))] - depth_now = 1 - while stack: - parent, children = stack[-1] - for child in children: - if child not in visited: - yield parent, child - visited.add(child) - if depth_now < depth_limit: - stack.append((child, get_children(child))) - depth_now += 1 - break - else: - stack.pop() - depth_now -= 1 - - -@nx._dispatchable(returns_graph=True) -def dfs_tree(G, source=None, depth_limit=None, *, sort_neighbors=None): - """Returns oriented tree constructed from a depth-first-search from source. - - Parameters - ---------- - G : NetworkX graph - - source : node, optional - Specify starting node for depth-first search. - - depth_limit : int, optional (default=len(G)) - Specify the maximum search depth. - - sort_neighbors : function (default=None) - A function that takes an iterator over nodes as the input, and - returns an iterable of the same nodes with a custom ordering. - For example, `sorted` will sort the nodes in increasing order. - - Returns - ------- - T : NetworkX DiGraph - An oriented tree - - Examples - -------- - >>> G = nx.path_graph(5) - >>> T = nx.dfs_tree(G, source=0, depth_limit=2) - >>> list(T.edges()) - [(0, 1), (1, 2)] - >>> T = nx.dfs_tree(G, source=0) - >>> list(T.edges()) - [(0, 1), (1, 2), (2, 3), (3, 4)] - - See Also - -------- - dfs_preorder_nodes - dfs_postorder_nodes - dfs_labeled_edges - :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` - :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree` - """ - T = nx.DiGraph() - if source is None: - T.add_nodes_from(G) - else: - T.add_node(source) - T.add_edges_from(dfs_edges(G, source, depth_limit, sort_neighbors=sort_neighbors)) - return T - - -@nx._dispatchable -def dfs_predecessors(G, source=None, depth_limit=None, *, sort_neighbors=None): - """Returns dictionary of predecessors in depth-first-search from source. - - Parameters - ---------- - G : NetworkX graph - - source : node, optional - Specify starting node for depth-first search. - Note that you will get predecessors for all nodes in the - component containing `source`. This input only specifies - where the DFS starts. - - depth_limit : int, optional (default=len(G)) - Specify the maximum search depth. - - sort_neighbors : function (default=None) - A function that takes an iterator over nodes as the input, and - returns an iterable of the same nodes with a custom ordering. - For example, `sorted` will sort the nodes in increasing order. - - Returns - ------- - pred: dict - A dictionary with nodes as keys and predecessor nodes as values. - - Examples - -------- - >>> G = nx.path_graph(4) - >>> nx.dfs_predecessors(G, source=0) - {1: 0, 2: 1, 3: 2} - >>> nx.dfs_predecessors(G, source=0, depth_limit=2) - {1: 0, 2: 1} - - Notes - ----- - If a source is not specified then a source is chosen arbitrarily and - repeatedly until all components in the graph are searched. - - The implementation of this function is adapted from David Eppstein's - depth-first search function in `PADS`_, with modifications - to allow depth limits based on the Wikipedia article - "`Depth-limited search`_". - - .. _PADS: http://www.ics.uci.edu/~eppstein/PADS - .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search - - See Also - -------- - dfs_preorder_nodes - dfs_postorder_nodes - dfs_labeled_edges - :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` - :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree` - """ - return { - t: s - for s, t in dfs_edges(G, source, depth_limit, sort_neighbors=sort_neighbors) - } - - -@nx._dispatchable -def dfs_successors(G, source=None, depth_limit=None, *, sort_neighbors=None): - """Returns dictionary of successors in depth-first-search from source. - - Parameters - ---------- - G : NetworkX graph - - source : node, optional - Specify starting node for depth-first search. - Note that you will get successors for all nodes in the - component containing `source`. This input only specifies - where the DFS starts. - - depth_limit : int, optional (default=len(G)) - Specify the maximum search depth. - - sort_neighbors : function (default=None) - A function that takes an iterator over nodes as the input, and - returns an iterable of the same nodes with a custom ordering. - For example, `sorted` will sort the nodes in increasing order. - - Returns - ------- - succ: dict - A dictionary with nodes as keys and list of successor nodes as values. - - Examples - -------- - >>> G = nx.path_graph(5) - >>> nx.dfs_successors(G, source=0) - {0: [1], 1: [2], 2: [3], 3: [4]} - >>> nx.dfs_successors(G, source=0, depth_limit=2) - {0: [1], 1: [2]} - - Notes - ----- - If a source is not specified then a source is chosen arbitrarily and - repeatedly until all components in the graph are searched. - - The implementation of this function is adapted from David Eppstein's - depth-first search function in `PADS`_, with modifications - to allow depth limits based on the Wikipedia article - "`Depth-limited search`_". - - .. _PADS: http://www.ics.uci.edu/~eppstein/PADS - .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search - - See Also - -------- - dfs_preorder_nodes - dfs_postorder_nodes - dfs_labeled_edges - :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` - :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree` - """ - d = defaultdict(list) - for s, t in dfs_edges( - G, - source=source, - depth_limit=depth_limit, - sort_neighbors=sort_neighbors, - ): - d[s].append(t) - return dict(d) - - -@nx._dispatchable -def dfs_postorder_nodes(G, source=None, depth_limit=None, *, sort_neighbors=None): - """Generate nodes in a depth-first-search post-ordering starting at source. - - Parameters - ---------- - G : NetworkX graph - - source : node, optional - Specify starting node for depth-first search. - - depth_limit : int, optional (default=len(G)) - Specify the maximum search depth. - - sort_neighbors : function (default=None) - A function that takes an iterator over nodes as the input, and - returns an iterable of the same nodes with a custom ordering. - For example, `sorted` will sort the nodes in increasing order. - - Returns - ------- - nodes: generator - A generator of nodes in a depth-first-search post-ordering. - - Examples - -------- - >>> G = nx.path_graph(5) - >>> list(nx.dfs_postorder_nodes(G, source=0)) - [4, 3, 2, 1, 0] - >>> list(nx.dfs_postorder_nodes(G, source=0, depth_limit=2)) - [1, 0] - - Notes - ----- - If a source is not specified then a source is chosen arbitrarily and - repeatedly until all components in the graph are searched. - - The implementation of this function is adapted from David Eppstein's - depth-first search function in `PADS`_, with modifications - to allow depth limits based on the Wikipedia article - "`Depth-limited search`_". - - .. _PADS: http://www.ics.uci.edu/~eppstein/PADS - .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search - - See Also - -------- - dfs_edges - dfs_preorder_nodes - dfs_labeled_edges - :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` - :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree` - """ - edges = nx.dfs_labeled_edges( - G, source=source, depth_limit=depth_limit, sort_neighbors=sort_neighbors - ) - return (v for u, v, d in edges if d == "reverse") - - -@nx._dispatchable -def dfs_preorder_nodes(G, source=None, depth_limit=None, *, sort_neighbors=None): - """Generate nodes in a depth-first-search pre-ordering starting at source. - - Parameters - ---------- - G : NetworkX graph - - source : node, optional - Specify starting node for depth-first search and return nodes in - the component reachable from source. - - depth_limit : int, optional (default=len(G)) - Specify the maximum search depth. - - sort_neighbors : function (default=None) - A function that takes an iterator over nodes as the input, and - returns an iterable of the same nodes with a custom ordering. - For example, `sorted` will sort the nodes in increasing order. - - Returns - ------- - nodes: generator - A generator of nodes in a depth-first-search pre-ordering. - - Examples - -------- - >>> G = nx.path_graph(5) - >>> list(nx.dfs_preorder_nodes(G, source=0)) - [0, 1, 2, 3, 4] - >>> list(nx.dfs_preorder_nodes(G, source=0, depth_limit=2)) - [0, 1, 2] - - Notes - ----- - If a source is not specified then a source is chosen arbitrarily and - repeatedly until all components in the graph are searched. - - The implementation of this function is adapted from David Eppstein's - depth-first search function in `PADS`_, with modifications - to allow depth limits based on the Wikipedia article - "`Depth-limited search`_". - - .. _PADS: http://www.ics.uci.edu/~eppstein/PADS - .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search - - See Also - -------- - dfs_edges - dfs_postorder_nodes - dfs_labeled_edges - :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_edges` - """ - edges = nx.dfs_labeled_edges( - G, source=source, depth_limit=depth_limit, sort_neighbors=sort_neighbors - ) - return (v for u, v, d in edges if d == "forward") - - -@nx._dispatchable -def dfs_labeled_edges(G, source=None, depth_limit=None, *, sort_neighbors=None): - """Iterate over edges in a depth-first-search (DFS) labeled by type. - - Parameters - ---------- - G : NetworkX graph - - source : node, optional - Specify starting node for depth-first search and return edges in - the component reachable from source. - - depth_limit : int, optional (default=len(G)) - Specify the maximum search depth. - - sort_neighbors : function (default=None) - A function that takes an iterator over nodes as the input, and - returns an iterable of the same nodes with a custom ordering. - For example, `sorted` will sort the nodes in increasing order. - - Returns - ------- - edges: generator - A generator of triples of the form (*u*, *v*, *d*), where (*u*, - *v*) is the edge being explored in the depth-first search and *d* - is one of the strings 'forward', 'nontree', 'reverse', or 'reverse-depth_limit'. - A 'forward' edge is one in which *u* has been visited but *v* has - not. A 'nontree' edge is one in which both *u* and *v* have been - visited but the edge is not in the DFS tree. A 'reverse' edge is - one in which both *u* and *v* have been visited and the edge is in - the DFS tree. When the `depth_limit` is reached via a 'forward' edge, - a 'reverse' edge is immediately generated rather than the subtree - being explored. To indicate this flavor of 'reverse' edge, the string - yielded is 'reverse-depth_limit'. - - Examples - -------- - - The labels reveal the complete transcript of the depth-first search - algorithm in more detail than, for example, :func:`dfs_edges`:: - - >>> from pprint import pprint - >>> - >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 1)]) - >>> pprint(list(nx.dfs_labeled_edges(G, source=0))) - [(0, 0, 'forward'), - (0, 1, 'forward'), - (1, 2, 'forward'), - (2, 1, 'nontree'), - (1, 2, 'reverse'), - (0, 1, 'reverse'), - (0, 0, 'reverse')] - - Notes - ----- - If a source is not specified then a source is chosen arbitrarily and - repeatedly until all components in the graph are searched. - - The implementation of this function is adapted from David Eppstein's - depth-first search function in `PADS`_, with modifications - to allow depth limits based on the Wikipedia article - "`Depth-limited search`_". - - .. _PADS: http://www.ics.uci.edu/~eppstein/PADS - .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search - - See Also - -------- - dfs_edges - dfs_preorder_nodes - dfs_postorder_nodes - """ - # Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py - # by D. Eppstein, July 2004. - if source is None: - # edges for all components - nodes = G - else: - # edges for components with source - nodes = [source] - if depth_limit is None: - depth_limit = len(G) - - get_children = ( - G.neighbors - if sort_neighbors is None - else lambda n: iter(sort_neighbors(G.neighbors(n))) - ) - - visited = set() - for start in nodes: - if start in visited: - continue - yield start, start, "forward" - visited.add(start) - stack = [(start, get_children(start))] - depth_now = 1 - while stack: - parent, children = stack[-1] - for child in children: - if child in visited: - yield parent, child, "nontree" - else: - yield parent, child, "forward" - visited.add(child) - if depth_now < depth_limit: - stack.append((child, iter(get_children(child)))) - depth_now += 1 - break - else: - yield parent, child, "reverse-depth_limit" - else: - stack.pop() - depth_now -= 1 - if stack: - yield stack[-1][0], parent, "reverse" - yield start, start, "reverse" From 9168a71a22efb85eb7195aedfab56dc7b99a260f Mon Sep 17 00:00:00 2001 From: RohitP2005 Date: Tue, 11 Mar 2025 08:43:07 +0530 Subject: [PATCH 03/11] Added voterank algorithm under centrality --- nx_parallel/algorithms/centrality/voterank.py | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 nx_parallel/algorithms/centrality/voterank.py diff --git a/nx_parallel/algorithms/centrality/voterank.py b/nx_parallel/algorithms/centrality/voterank.py new file mode 100644 index 00000000..34ea2548 --- /dev/null +++ b/nx_parallel/algorithms/centrality/voterank.py @@ -0,0 +1,100 @@ +from joblib import Parallel, delayed +import networkx as nx +import networkx.utils as nxu +import networkx.parallel as nxp + +__all__ = ["voterank_parallel"] + +@nxp._configure_if_nx_active() +@nxu.py_random_state(5) +def voterank_parallel( + G, + number_of_nodes=None, + get_chunks="chunks", +): + """Parallelized VoteRank Algorithm using joblib. + + This implementation splits the graph into chunks and processes each chunk + in parallel using joblib. It follows the approach used in betweenness + centrality parallelization. + + Parameters + ---------- + G : networkx.Graph + Input graph. + number_of_nodes : int, optional + Number of ranked nodes to extract (default: all nodes). + get_chunks : str, function (default = "chunks") + A function that takes in a list of all the nodes as input and returns + an iterable `node_chunks`. The default chunking is done by slicing the + `nodes` into `n_jobs` number of chunks. + + Returns + ------- + influential_nodes : list + List of influential nodes ranked by VoteRank. + """ + + if hasattr(G, "graph_object"): + G = G.graph_object + + if len(G) == 0: + return [] + + # Set default number of nodes to rank + if number_of_nodes is None or number_of_nodes > len(G): + number_of_nodes = len(G) + + # Get number of parallel jobs + n_jobs = nxp.get_n_jobs() + + # Determine chunks of nodes for parallel processing + nodes = list(G.nodes()) + if get_chunks == "chunks": + node_chunks = nxp.create_iterables(G, "node", n_jobs, nodes) + else: + node_chunks = get_chunks(nodes) + + # Initialize vote ranking structure + vote_rank = {n: [0, 1] for n in G.nodes()} + avg_degree = sum(deg for _, deg in G.degree()) / len(G) + + def process_chunk(chunk): + """Process a chunk of nodes and compute VoteRank scores.""" + local_vote_rank = {n: [0, 1] for n in chunk} + + for n in chunk: + local_vote_rank[n][0] = 0 # Reset scores + for n, nbr in G.edges(): + local_vote_rank[n][0] += vote_rank[nbr][1] + if not G.is_directed(): + local_vote_rank[nbr][0] += vote_rank[n][1] + + return local_vote_rank + + influential_nodes = [] + + for _ in range(number_of_nodes): + # Run parallel processing on node chunks + vote_chunks = Parallel(n_jobs=n_jobs)( + delayed(process_chunk)(chunk) for chunk in node_chunks + ) + + # Merge partial results + for chunk_result in vote_chunks: + for node, scores in chunk_result.items(): + vote_rank[node][0] += scores[0] + + # Select top influential node + top_node = max(G.nodes, key=lambda x: vote_rank[x][0]) + if vote_rank[top_node][0] == 0: + break + influential_nodes.append(top_node) + + # Weaken the selected node and its neighbors + vote_rank[top_node] = [0, 0] + for _, nbr in G.edges(top_node): + vote_rank[nbr][1] = max(vote_rank[nbr][1] - 1 / avg_degree, 0) + + return influential_nodes + From 57d1119c2703eca7210cf261654f1ffcbc556663 Mon Sep 17 00:00:00 2001 From: RohitP2005 Date: Tue, 11 Mar 2025 08:45:53 +0530 Subject: [PATCH 04/11] Added harmonic algorithm under centrality --- nx_parallel/algorithms/centrality/harmonic.py | 84 +++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 nx_parallel/algorithms/centrality/harmonic.py diff --git a/nx_parallel/algorithms/centrality/harmonic.py b/nx_parallel/algorithms/centrality/harmonic.py new file mode 100644 index 00000000..e7a13b19 --- /dev/null +++ b/nx_parallel/algorithms/centrality/harmonic.py @@ -0,0 +1,84 @@ +from functools import partial +from joblib import Parallel, delayed +import networkx as nx +import networkx.parallel as nxp + +__all__ = ["harmonic_centrality_parallel"] + +@nxp._configure_if_nx_active() +def harmonic_centrality_parallel(G, nbunch=None, distance=None, sources=None, get_chunks="chunks"): + """Compute harmonic centrality in parallel. + + This implementation follows the approach used in betweenness centrality parallelization. + + Parameters + ---------- + G : NetworkX graph + A graph (directed or undirected). + nbunch : container, optional (default: all nodes in G) + Nodes for which harmonic centrality is calculated. + sources : container, optional (default: all nodes in G) + Nodes from which reciprocal distances are computed. + distance : edge attribute key, optional (default: None) + Use the specified edge attribute as the edge weight. + get_chunks : str, function (default = "chunks") + Function that takes a list of nodes as input and returns an iterable `node_chunks`. + + Returns + ------- + dict + Dictionary of nodes with harmonic centrality values. + """ + + if hasattr(G, "graph_object"): + G = G.graph_object + + nbunch = set(G.nbunch_iter(nbunch) if nbunch is not None else G.nodes) + sources = set(G.nbunch_iter(sources) if sources is not None else G.nodes) + + centrality = {u: 0 for u in nbunch} + + transposed = False + if len(nbunch) < len(sources): + transposed = True + nbunch, sources = sources, nbunch + if nx.is_directed(G): + G = nx.reverse(G, copy=False) + + # Get number of parallel jobs + n_jobs = nxp.get_n_jobs() + + # Chunking nodes for parallel processing + nodes = list(sources) + if get_chunks == "chunks": + node_chunks = nxp.create_iterables(G, "node", n_jobs, nodes) + else: + node_chunks = get_chunks(nodes) + + def process_chunk(chunk): + """Process a chunk of nodes and compute harmonic centrality.""" + local_centrality = {u: 0 for u in chunk} + spl = partial(nx.shortest_path_length, G, weight=distance) + + for v in chunk: + dist = spl(v) + for u in nbunch.intersection(dist): + d = dist[u] + if d == 0: + continue + local_centrality[v if transposed else u] += 1 / d + + return local_centrality + + # Run parallel processing on node chunks + results = Parallel(n_jobs=n_jobs)( + delayed(process_chunk)(chunk) for chunk in node_chunks + ) + + # Merge results + for result in results: + for node, value in result.items(): + centrality[node] += value + + return centrality + From bc41a29f972a68d08a109f3fceddd1c118ad0d32 Mon Sep 17 00:00:00 2001 From: RohitP2005 Date: Tue, 11 Mar 2025 09:05:04 +0530 Subject: [PATCH 05/11] ruff linting fix --- _nx_parallel/__init__.py | 15 +++++++++++++++ nx_parallel/algorithms/centrality/harmonic.py | 8 +++++--- nx_parallel/algorithms/centrality/voterank.py | 7 +++---- 3 files changed, 23 insertions(+), 7 deletions(-) diff --git a/_nx_parallel/__init__.py b/_nx_parallel/__init__.py index a6602af8..d7f1d3e9 100644 --- a/_nx_parallel/__init__.py +++ b/_nx_parallel/__init__.py @@ -104,6 +104,13 @@ def get_info(): 'get_chunks : str, function (default = "chunks")': "A function that takes in a list of all the nodes as input and returns an iterable `node_chunks`. The default chunking is done by slicing the `nodes` into `n_jobs` number of chunks." }, }, + "harmonic_centrality_parallel": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/centrality/harmonic.py#L10", + "additional_docs": "Compute harmonic centrality in parallel.", + "additional_parameters": { + "G : NetworkX graph": 'A graph (directed or undirected). nbunch : container, optional (default: all nodes in G) Nodes for which harmonic centrality is calculated. sources : container, optional (default: all nodes in G) Nodes from which reciprocal distances are computed. distance : edge attribute key, optional (default: None) Use the specified edge attribute as the edge weight. get_chunks : str, function (default = "chunks") Function that takes a list of nodes as input and returns an iterable `node_chunks`.' + }, + }, "is_reachable": { "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/tournament.py#L13", "additional_docs": "The function parallelizes the calculation of two neighborhoods of vertices in `G` and checks closure conditions for each neighborhood subset in parallel.", @@ -161,5 +168,13 @@ def get_info(): 'get_chunks : str, function (default = "chunks")': "A function that takes in a list of all the nodes as input and returns an iterable `node_chunks`. The default chunking is done by slicing the `nodes` into `n_jobs` number of chunks." }, }, + "voterank_parallel": { + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/centrality/voterank.py#L10", + "additional_docs": "Parallelized VoteRank Algorithm using joblib.", + "additional_parameters": { + "G : networkx.Graph": 'Input graph. number_of_nodes : int, optional Number of ranked nodes to extract (default: all nodes). get_chunks : str, function (default = "chunks") A function that takes in a list of all the nodes as input and returns an iterable `node_chunks`. The default chunking is done by slicing the `nodes` into `n_jobs` number of chunks.', + "influential_nodes : list": "List of influential nodes ranked by VoteRank.", + }, + }, }, } diff --git a/nx_parallel/algorithms/centrality/harmonic.py b/nx_parallel/algorithms/centrality/harmonic.py index e7a13b19..0274c192 100644 --- a/nx_parallel/algorithms/centrality/harmonic.py +++ b/nx_parallel/algorithms/centrality/harmonic.py @@ -5,12 +5,15 @@ __all__ = ["harmonic_centrality_parallel"] + @nxp._configure_if_nx_active() -def harmonic_centrality_parallel(G, nbunch=None, distance=None, sources=None, get_chunks="chunks"): +def harmonic_centrality_parallel( + G, nbunch=None, distance=None, sources=None, get_chunks="chunks" +): """Compute harmonic centrality in parallel. This implementation follows the approach used in betweenness centrality parallelization. - + Parameters ---------- G : NetworkX graph @@ -81,4 +84,3 @@ def process_chunk(chunk): centrality[node] += value return centrality - diff --git a/nx_parallel/algorithms/centrality/voterank.py b/nx_parallel/algorithms/centrality/voterank.py index 34ea2548..7c123c87 100644 --- a/nx_parallel/algorithms/centrality/voterank.py +++ b/nx_parallel/algorithms/centrality/voterank.py @@ -1,10 +1,10 @@ from joblib import Parallel, delayed -import networkx as nx import networkx.utils as nxu import networkx.parallel as nxp __all__ = ["voterank_parallel"] + @nxp._configure_if_nx_active() @nxu.py_random_state(5) def voterank_parallel( @@ -62,14 +62,14 @@ def voterank_parallel( def process_chunk(chunk): """Process a chunk of nodes and compute VoteRank scores.""" local_vote_rank = {n: [0, 1] for n in chunk} - + for n in chunk: local_vote_rank[n][0] = 0 # Reset scores for n, nbr in G.edges(): local_vote_rank[n][0] += vote_rank[nbr][1] if not G.is_directed(): local_vote_rank[nbr][0] += vote_rank[n][1] - + return local_vote_rank influential_nodes = [] @@ -97,4 +97,3 @@ def process_chunk(chunk): vote_rank[nbr][1] = max(vote_rank[nbr][1] - 1 / avg_degree, 0) return influential_nodes - From 4a08ea53e4647ca3357301413855333f2385d035 Mon Sep 17 00:00:00 2001 From: RohitP2005 Date: Fri, 14 Mar 2025 00:04:25 +0530 Subject: [PATCH 06/11] added the algorithms to interface --- _nx_parallel/__init__.py | 4 +- nx_parallel/algorithms/centrality/__init__.py | 2 + nx_parallel/algorithms/centrality/harmonic.py | 4 +- .../algorithms/centrality/tests/__init__.py | 3 ++ .../tests/test_harmonic_centrality.py | 40 +++++++++++++++++++ .../tests/test_voterank_centrality.py | 29 ++++++++++++++ nx_parallel/algorithms/centrality/voterank.py | 4 +- nx_parallel/interface.py | 2 + 8 files changed, 82 insertions(+), 6 deletions(-) create mode 100644 nx_parallel/algorithms/centrality/tests/test_harmonic_centrality.py create mode 100644 nx_parallel/algorithms/centrality/tests/test_voterank_centrality.py diff --git a/_nx_parallel/__init__.py b/_nx_parallel/__init__.py index d7f1d3e9..d91bea10 100644 --- a/_nx_parallel/__init__.py +++ b/_nx_parallel/__init__.py @@ -104,7 +104,7 @@ def get_info(): 'get_chunks : str, function (default = "chunks")': "A function that takes in a list of all the nodes as input and returns an iterable `node_chunks`. The default chunking is done by slicing the `nodes` into `n_jobs` number of chunks." }, }, - "harmonic_centrality_parallel": { + "harmonic_centrality": { "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/centrality/harmonic.py#L10", "additional_docs": "Compute harmonic centrality in parallel.", "additional_parameters": { @@ -168,7 +168,7 @@ def get_info(): 'get_chunks : str, function (default = "chunks")': "A function that takes in a list of all the nodes as input and returns an iterable `node_chunks`. The default chunking is done by slicing the `nodes` into `n_jobs` number of chunks." }, }, - "voterank_parallel": { + "voterank_centrality": { "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/centrality/voterank.py#L10", "additional_docs": "Parallelized VoteRank Algorithm using joblib.", "additional_parameters": { diff --git a/nx_parallel/algorithms/centrality/__init__.py b/nx_parallel/algorithms/centrality/__init__.py index cf7adb68..cc319d88 100644 --- a/nx_parallel/algorithms/centrality/__init__.py +++ b/nx_parallel/algorithms/centrality/__init__.py @@ -1 +1,3 @@ from .betweenness import * +from .harmonic import * +from .voterank import * diff --git a/nx_parallel/algorithms/centrality/harmonic.py b/nx_parallel/algorithms/centrality/harmonic.py index 0274c192..087e9d46 100644 --- a/nx_parallel/algorithms/centrality/harmonic.py +++ b/nx_parallel/algorithms/centrality/harmonic.py @@ -3,11 +3,11 @@ import networkx as nx import networkx.parallel as nxp -__all__ = ["harmonic_centrality_parallel"] +__all__ = ["harmonic_centrality"] @nxp._configure_if_nx_active() -def harmonic_centrality_parallel( +def harmonic_centrality( G, nbunch=None, distance=None, sources=None, get_chunks="chunks" ): """Compute harmonic centrality in parallel. diff --git a/nx_parallel/algorithms/centrality/tests/__init__.py b/nx_parallel/algorithms/centrality/tests/__init__.py index e69de29b..a11efc6d 100644 --- a/nx_parallel/algorithms/centrality/tests/__init__.py +++ b/nx_parallel/algorithms/centrality/tests/__init__.py @@ -0,0 +1,3 @@ +from .test_betweenness_centrality import * +from .test_harmonic_centrality import * +from .test_voterank_centrality import * diff --git a/nx_parallel/algorithms/centrality/tests/test_harmonic_centrality.py b/nx_parallel/algorithms/centrality/tests/test_harmonic_centrality.py new file mode 100644 index 00000000..f3d5c682 --- /dev/null +++ b/nx_parallel/algorithms/centrality/tests/test_harmonic_centrality.py @@ -0,0 +1,40 @@ +import networkx as nx +import nx_parallel as nxp +import math + + +def test_harmonic_centrality_get_chunks(): + def get_chunk(nodes): + num_chunks = nxp.get_n_jobs() + node_hc = {i: 0 for i in nodes} + + for node in nodes: + node_hc[node] = sum( + 1 / d + for _, d in nx.single_source_shortest_path_length(G, node).items() + if d > 0 + ) + + sorted_nodes = sorted(node_hc.items(), key=lambda x: x[1], reverse=True) + + chunks = [[] for _ in range(num_chunks)] + chunk_sums = [0] * num_chunks + + for node, value in sorted_nodes: + min_chunk_index = chunk_sums.index(min(chunk_sums)) + chunks[min_chunk_index].append(node) + chunk_sums[min_chunk_index] += value + + return chunks + + # Create a random graph + G = nx.fast_gnp_random_graph(100, 0.1, directed=False) + H = nxp.ParallelGraph(G) + + # Compute harmonic centrality with and without chunking + par_hc_chunk = nxp.harmonic_centrality(H, get_chunks=get_chunk) + par_hc = nxp.harmonic_centrality(H) + + # Validate results + for node in G.nodes: + assert math.isclose(par_hc[node], par_hc_chunk[node], abs_tol=1e-16) diff --git a/nx_parallel/algorithms/centrality/tests/test_voterank_centrality.py b/nx_parallel/algorithms/centrality/tests/test_voterank_centrality.py new file mode 100644 index 00000000..792d83b3 --- /dev/null +++ b/nx_parallel/algorithms/centrality/tests/test_voterank_centrality.py @@ -0,0 +1,29 @@ +import networkx as nx +import nx_parallel as nxp + + +def test_voterank_get_chunks(): + def get_chunk(nodes): + num_chunks = nxp.get_n_jobs() + sorted_nodes = sorted(nodes, key=lambda n: G.degree(n), reverse=True) + + chunks = [[] for _ in range(num_chunks)] + chunk_sums = [0] * num_chunks + + for node in sorted_nodes: + min_chunk_index = chunk_sums.index(min(chunk_sums)) + chunks[min_chunk_index].append(node) + chunk_sums[min_chunk_index] += G.degree(node) + + return chunks + + # Generate a random graph + G = nx.fast_gnp_random_graph(100, 0.1, directed=False) + H = nxp.ParallelGraph(G) + + # Compute VoteRank with and without chunking + par_vr_chunk = nxp.voterank_centrality(H, get_chunks=get_chunk) + par_vr = nxp.voterank_centrality(H) + + # Ensure both methods produce the same influential nodes + assert par_vr_chunk == par_vr diff --git a/nx_parallel/algorithms/centrality/voterank.py b/nx_parallel/algorithms/centrality/voterank.py index 7c123c87..246bea24 100644 --- a/nx_parallel/algorithms/centrality/voterank.py +++ b/nx_parallel/algorithms/centrality/voterank.py @@ -2,12 +2,12 @@ import networkx.utils as nxu import networkx.parallel as nxp -__all__ = ["voterank_parallel"] +__all__ = ["voterank_centrality"] @nxp._configure_if_nx_active() @nxu.py_random_state(5) -def voterank_parallel( +def voterank_centrality( G, number_of_nodes=None, get_chunks="chunks", diff --git a/nx_parallel/interface.py b/nx_parallel/interface.py index 38af8c73..31a40f18 100644 --- a/nx_parallel/interface.py +++ b/nx_parallel/interface.py @@ -18,6 +18,8 @@ # Centrality "betweenness_centrality", "edge_betweenness_centrality", + "harmonic_centrality", + "voterank_centrality", # Efficiency "local_efficiency", # Shortest Paths : generic From 8507eb427969da4cbdaf322c75deee3caa18514f Mon Sep 17 00:00:00 2001 From: RohitP2005 Date: Fri, 14 Mar 2025 01:34:56 +0530 Subject: [PATCH 07/11] API Integration Check --- _nx_parallel/__init__.py | 11 +- nx_parallel/algorithms/centrality/harmonic.py | 44 +++--- nx_parallel/algorithms/centrality/voterank.py | 130 +++++++----------- 3 files changed, 75 insertions(+), 110 deletions(-) diff --git a/_nx_parallel/__init__.py b/_nx_parallel/__init__.py index d91bea10..10eb4a57 100644 --- a/_nx_parallel/__init__.py +++ b/_nx_parallel/__init__.py @@ -108,7 +108,7 @@ def get_info(): "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/centrality/harmonic.py#L10", "additional_docs": "Compute harmonic centrality in parallel.", "additional_parameters": { - "G : NetworkX graph": 'A graph (directed or undirected). nbunch : container, optional (default: all nodes in G) Nodes for which harmonic centrality is calculated. sources : container, optional (default: all nodes in G) Nodes from which reciprocal distances are computed. distance : edge attribute key, optional (default: None) Use the specified edge attribute as the edge weight. get_chunks : str, function (default = "chunks") Function that takes a list of nodes as input and returns an iterable `node_chunks`.' + "G : NetworkX graph": "A graph (directed or undirected). u : node or iterable, optional (default: all nodes in G) Compute harmonic centrality for the specified node(s). distance : edge attribute key, optional (default: None) Use the specified edge attribute as the edge weight. wf_improved : bool, optional (default: True) This parameter is included for API compatibility but not used in harmonic centrality. backend : str, optional (default: None) The parallel backend to use (`'loky'`, `'threading'`, etc.). **backend_kwargs : additional backend parameters" }, }, "is_reachable": { @@ -169,12 +169,9 @@ def get_info(): }, }, "voterank_centrality": { - "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/centrality/voterank.py#L10", - "additional_docs": "Parallelized VoteRank Algorithm using joblib.", - "additional_parameters": { - "G : networkx.Graph": 'Input graph. number_of_nodes : int, optional Number of ranked nodes to extract (default: all nodes). get_chunks : str, function (default = "chunks") A function that takes in a list of all the nodes as input and returns an iterable `node_chunks`. The default chunking is done by slicing the `nodes` into `n_jobs` number of chunks.', - "influential_nodes : list": "List of influential nodes ranked by VoteRank.", - }, + "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/centrality/voterank.py#L27", + "additional_docs": "Parallelized VoteRank centrality using joblib with chunking.", + "additional_parameters": None, }, }, } diff --git a/nx_parallel/algorithms/centrality/harmonic.py b/nx_parallel/algorithms/centrality/harmonic.py index 087e9d46..46bab453 100644 --- a/nx_parallel/algorithms/centrality/harmonic.py +++ b/nx_parallel/algorithms/centrality/harmonic.py @@ -1,31 +1,30 @@ from functools import partial from joblib import Parallel, delayed import networkx as nx -import networkx.parallel as nxp +import nx_parallel as nxp __all__ = ["harmonic_centrality"] @nxp._configure_if_nx_active() def harmonic_centrality( - G, nbunch=None, distance=None, sources=None, get_chunks="chunks" + G, u=None, distance=None, wf_improved=True, *, backend=None, **backend_kwargs ): """Compute harmonic centrality in parallel. - This implementation follows the approach used in betweenness centrality parallelization. - Parameters ---------- G : NetworkX graph A graph (directed or undirected). - nbunch : container, optional (default: all nodes in G) - Nodes for which harmonic centrality is calculated. - sources : container, optional (default: all nodes in G) - Nodes from which reciprocal distances are computed. + u : node or iterable, optional (default: all nodes in G) + Compute harmonic centrality for the specified node(s). distance : edge attribute key, optional (default: None) Use the specified edge attribute as the edge weight. - get_chunks : str, function (default = "chunks") - Function that takes a list of nodes as input and returns an iterable `node_chunks`. + wf_improved : bool, optional (default: True) + This parameter is included for API compatibility but not used in harmonic centrality. + backend : str, optional (default: None) + The parallel backend to use (`'loky'`, `'threading'`, etc.). + **backend_kwargs : additional backend parameters Returns ------- @@ -36,15 +35,15 @@ def harmonic_centrality( if hasattr(G, "graph_object"): G = G.graph_object - nbunch = set(G.nbunch_iter(nbunch) if nbunch is not None else G.nodes) - sources = set(G.nbunch_iter(sources) if sources is not None else G.nodes) + u = set(G.nbunch_iter(u) if u is not None else G.nodes) + sources = set(G.nodes) # Always use all nodes as sources - centrality = {u: 0 for u in nbunch} + centrality = {v: 0 for v in u} transposed = False - if len(nbunch) < len(sources): + if len(u) < len(sources): transposed = True - nbunch, sources = sources, nbunch + u, sources = sources, u if nx.is_directed(G): G = nx.reverse(G, copy=False) @@ -53,28 +52,25 @@ def harmonic_centrality( # Chunking nodes for parallel processing nodes = list(sources) - if get_chunks == "chunks": - node_chunks = nxp.create_iterables(G, "node", n_jobs, nodes) - else: - node_chunks = get_chunks(nodes) + node_chunks = nxp.create_iterables(G, "node", n_jobs, nodes) def process_chunk(chunk): """Process a chunk of nodes and compute harmonic centrality.""" - local_centrality = {u: 0 for u in chunk} + local_centrality = {v: 0 for v in chunk} spl = partial(nx.shortest_path_length, G, weight=distance) for v in chunk: dist = spl(v) - for u in nbunch.intersection(dist): - d = dist[u] + for node in u.intersection(dist): + d = dist[node] if d == 0: continue - local_centrality[v if transposed else u] += 1 / d + local_centrality[v if transposed else node] += 1 / d return local_centrality # Run parallel processing on node chunks - results = Parallel(n_jobs=n_jobs)( + results = Parallel(n_jobs=n_jobs, backend=backend, **backend_kwargs)( delayed(process_chunk)(chunk) for chunk in node_chunks ) diff --git a/nx_parallel/algorithms/centrality/voterank.py b/nx_parallel/algorithms/centrality/voterank.py index 246bea24..1fb8d119 100644 --- a/nx_parallel/algorithms/centrality/voterank.py +++ b/nx_parallel/algorithms/centrality/voterank.py @@ -1,99 +1,71 @@ from joblib import Parallel, delayed -import networkx.utils as nxu -import networkx.parallel as nxp +import nx_parallel as nxp __all__ = ["voterank_centrality"] +def _compute_votes(G, vote_rank, nodes): + """Compute votes for a chunk of nodes in parallel.""" + votes = {n: 0 for n in nodes} + + for n in nodes: + for nbr in G[n]: + votes[n] += vote_rank[nbr][1] # Node receives votes from neighbors + + return votes + + +def _update_voting_ability(G, vote_rank, selected_node, avgDegree): + """Update the voting ability of the selected node and its out-neighbors.""" + for nbr in G[selected_node]: + vote_rank[nbr][1] = max( + vote_rank[nbr][1] - (1 / avgDegree), 0 + ) # Ensure non-negative + + @nxp._configure_if_nx_active() -@nxu.py_random_state(5) -def voterank_centrality( - G, - number_of_nodes=None, - get_chunks="chunks", -): - """Parallelized VoteRank Algorithm using joblib. - - This implementation splits the graph into chunks and processes each chunk - in parallel using joblib. It follows the approach used in betweenness - centrality parallelization. - - Parameters - ---------- - G : networkx.Graph - Input graph. - number_of_nodes : int, optional - Number of ranked nodes to extract (default: all nodes). - get_chunks : str, function (default = "chunks") - A function that takes in a list of all the nodes as input and returns - an iterable `node_chunks`. The default chunking is done by slicing the - `nodes` into `n_jobs` number of chunks. - - Returns - ------- - influential_nodes : list - List of influential nodes ranked by VoteRank. - """ - - if hasattr(G, "graph_object"): - G = G.graph_object +def voterank_centrality(G, number_of_nodes=None, *, backend=None, **backend_kwargs): + """Parallelized VoteRank centrality using joblib with chunking.""" + influential_nodes = [] + vote_rank = {n: [0, 1] for n in G.nodes()} # (score, voting ability) if len(G) == 0: - return [] - - # Set default number of nodes to rank + return influential_nodes if number_of_nodes is None or number_of_nodes > len(G): number_of_nodes = len(G) - # Get number of parallel jobs - n_jobs = nxp.get_n_jobs() - - # Determine chunks of nodes for parallel processing + avgDegree = sum( + deg for _, deg in (G.out_degree() if G.is_directed() else G.degree()) + ) / len(G) nodes = list(G.nodes()) - if get_chunks == "chunks": - node_chunks = nxp.create_iterables(G, "node", n_jobs, nodes) - else: - node_chunks = get_chunks(nodes) + chunk_size = backend_kwargs.get("chunk_size", 100) # Support chunk size override + node_chunks = [nodes[i : i + chunk_size] for i in range(0, len(nodes), chunk_size)] - # Initialize vote ranking structure - vote_rank = {n: [0, 1] for n in G.nodes()} - avg_degree = sum(deg for _, deg in G.degree()) / len(G) + for _ in range(number_of_nodes): + # Step 1: Compute votes in parallel using chunks + vote_chunks = Parallel(n_jobs=-1)( + delayed(_compute_votes)(G, vote_rank, chunk) for chunk in node_chunks + ) - def process_chunk(chunk): - """Process a chunk of nodes and compute VoteRank scores.""" - local_vote_rank = {n: [0, 1] for n in chunk} + # Merge chunk results + votes = {n: 0 for n in G.nodes()} + for chunk_votes in vote_chunks: + for node, score in chunk_votes.items(): + votes[node] += score - for n in chunk: - local_vote_rank[n][0] = 0 # Reset scores - for n, nbr in G.edges(): - local_vote_rank[n][0] += vote_rank[nbr][1] - if not G.is_directed(): - local_vote_rank[nbr][0] += vote_rank[n][1] + # Step 2: Reset votes for already selected nodes + for n in influential_nodes: + votes[n] = 0 - return local_vote_rank + # Step 3: Select the most influential node + n = max(sorted(G.nodes()), key=lambda x: votes[x]) # Deterministic tie-breaking + if votes[n] == 0: + return influential_nodes # Stop if no influential node found - influential_nodes = [] - - for _ in range(number_of_nodes): - # Run parallel processing on node chunks - vote_chunks = Parallel(n_jobs=n_jobs)( - delayed(process_chunk)(chunk) for chunk in node_chunks - ) + influential_nodes.append(n) + vote_rank[n] = [0, 0] # Weaken selected node - # Merge partial results - for chunk_result in vote_chunks: - for node, scores in chunk_result.items(): - vote_rank[node][0] += scores[0] - - # Select top influential node - top_node = max(G.nodes, key=lambda x: vote_rank[x][0]) - if vote_rank[top_node][0] == 0: - break - influential_nodes.append(top_node) - - # Weaken the selected node and its neighbors - vote_rank[top_node] = [0, 0] - for _, nbr in G.edges(top_node): - vote_rank[nbr][1] = max(vote_rank[nbr][1] - 1 / avg_degree, 0) + # Step 4: Update voting ability + _update_voting_ability(G, vote_rank, n, avgDegree) return influential_nodes From 1d0f2aeeb3cb62f3fa7225d77a476f2d4bff8874 Mon Sep 17 00:00:00 2001 From: RohitP2005 Date: Mon, 17 Mar 2025 19:21:18 +0530 Subject: [PATCH 08/11] Ran timming scripts and renamed voterank algorithm --- _nx_parallel/__init__.py | 2 +- .../tests/test_voterank_centrality.py | 4 ++-- nx_parallel/algorithms/centrality/voterank.py | 4 ++-- nx_parallel/interface.py | 2 +- timing/heatmap_harmonic_centrality_timing.png | Bin 0 -> 32362 bytes timing/timing_individual_function.py | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) create mode 100644 timing/heatmap_harmonic_centrality_timing.png diff --git a/_nx_parallel/__init__.py b/_nx_parallel/__init__.py index 10eb4a57..1caf87e6 100644 --- a/_nx_parallel/__init__.py +++ b/_nx_parallel/__init__.py @@ -168,7 +168,7 @@ def get_info(): 'get_chunks : str, function (default = "chunks")': "A function that takes in a list of all the nodes as input and returns an iterable `node_chunks`. The default chunking is done by slicing the `nodes` into `n_jobs` number of chunks." }, }, - "voterank_centrality": { + "voterank": { "url": "https://github.com/networkx/nx-parallel/blob/main/nx_parallel/algorithms/centrality/voterank.py#L27", "additional_docs": "Parallelized VoteRank centrality using joblib with chunking.", "additional_parameters": None, diff --git a/nx_parallel/algorithms/centrality/tests/test_voterank_centrality.py b/nx_parallel/algorithms/centrality/tests/test_voterank_centrality.py index 792d83b3..19c03c7a 100644 --- a/nx_parallel/algorithms/centrality/tests/test_voterank_centrality.py +++ b/nx_parallel/algorithms/centrality/tests/test_voterank_centrality.py @@ -22,8 +22,8 @@ def get_chunk(nodes): H = nxp.ParallelGraph(G) # Compute VoteRank with and without chunking - par_vr_chunk = nxp.voterank_centrality(H, get_chunks=get_chunk) - par_vr = nxp.voterank_centrality(H) + par_vr_chunk = nxp.voterank(H, get_chunks=get_chunk) + par_vr = nxp.voterank(H) # Ensure both methods produce the same influential nodes assert par_vr_chunk == par_vr diff --git a/nx_parallel/algorithms/centrality/voterank.py b/nx_parallel/algorithms/centrality/voterank.py index 1fb8d119..b219460a 100644 --- a/nx_parallel/algorithms/centrality/voterank.py +++ b/nx_parallel/algorithms/centrality/voterank.py @@ -1,7 +1,7 @@ from joblib import Parallel, delayed import nx_parallel as nxp -__all__ = ["voterank_centrality"] +__all__ = ["voterank"] def _compute_votes(G, vote_rank, nodes): @@ -24,7 +24,7 @@ def _update_voting_ability(G, vote_rank, selected_node, avgDegree): @nxp._configure_if_nx_active() -def voterank_centrality(G, number_of_nodes=None, *, backend=None, **backend_kwargs): +def voterank(G, number_of_nodes=None, *, backend=None, **backend_kwargs): """Parallelized VoteRank centrality using joblib with chunking.""" influential_nodes = [] vote_rank = {n: [0, 1] for n in G.nodes()} # (score, voting ability) diff --git a/nx_parallel/interface.py b/nx_parallel/interface.py index 31a40f18..1c6f9d38 100644 --- a/nx_parallel/interface.py +++ b/nx_parallel/interface.py @@ -19,7 +19,7 @@ "betweenness_centrality", "edge_betweenness_centrality", "harmonic_centrality", - "voterank_centrality", + "voterank", # Efficiency "local_efficiency", # Shortest Paths : generic diff --git a/timing/heatmap_harmonic_centrality_timing.png b/timing/heatmap_harmonic_centrality_timing.png new file mode 100644 index 0000000000000000000000000000000000000000..0df9263f6648f32635c60a0467709ff5d9688388 GIT binary patch literal 32362 zcmd44cU;c<|37|NhvOK>E>uRLB?_rj#u25Yt*yPZr?!KPilnquXm8p(B$Sc1b|EcI zU0v0Y0!?xtOBzL!+t(UW=5^2MjAEY%Fw>0x7H3SGS6?2bFWiKCbLoBNnU76&lZbUck!ND3+O(E_qg4C5;53cGU-Yih&V0zRO~Tk? zTUyRJQL7XJ_ZfJn9DBAP*`$$wIX(9u=H}+7-8H_v3D_uTpIT9ORjaoq@6Pb{r%#?7 zR?V_tX&gUtyX=`nLaP8VPtG=S5qL{-M=P&*3 zHyK02*fkHymT+CXJt&3zV_L5<+d5XgApgdC&e#w^2R4_fi7{HU3mKj#J&%af*pRu+ zHCql=R#EG&%Ce?BhpLEE8>pQXC&&aPs^;vo{34!aSS7aTxAj&w{SW|EJm|xU@9d0I&9Qm6Vj#qqRYrtI_st{rO?%F{IyyQI_2%?N#ub=0 z3n?ZfQXSqXs5{AhdU;FX^ z6On73}(2&TAiRAg=bO6UGv)BEYOXSiMpAnmdCbo5Vw0)A%^R-#k)u;nQ@eR(PSqysP^*#9yLa!7KSp5nLs;&GQl@uNkl3)xzko0#9vqrCbV{T$x$W)&Tsf8s4Bmx;#{lam#`r!2C<)x^a;@<%@YM*UjL z(-_@e7IvZ=k97LR*H-Ug)!{9xqiwhC-!FIl`t`xVK?|E*otar##c$uft<^Yqg}$c9 zFip3lJ8+%$mr~E|Yzm3-tG0`DMEI#m4}Yx_5sssSwR!1h zIqchf753@9+w9A!EMJ?Vr{KA9e<}{2Xt|%da|~9l%uz6|OO-l&*fm+NG_gG5 zRK2IYko{1!ntdmW?`~Bka<8V`3%FpnfZC>!QsHS?jQewUcid(^i)fR^?6FU`gg5uy z57#WZVNjQ9ASNze+22~&)8y{%UeTRYkhtymjaL;FGAyU=R_+XS)Z}e5s2l3Xv9BB% z_#}so*!Jr7G0#oB)dka2?1g)Vo-SsJHfw!W5+azRlzu+u!-o%6=8gkJ8!#RBup{Lj zojZ~@G5ESOGAqxb?Z$9#V_b?}X_HK-=}y-^Y z{{GRKnFp%#l{&0v+-9frTMspV#33T$56tz;77DBQ!HyOVRhd!rsl0+&&BeA#bYx%YD~ zd?Rbt)sSVGeU{mv*qbF*H77kcFVB}p@AQ77YVjPK9x(;ezNS3;-mG??osx_fE?kHa z^JNbDv%RIZ^4uho4A%nohzo@9vBWy?g7{t@-ok4}A$TWn*C}N__X~_I9}xU5U20 zK?-CY_4T8jvHVsq#m0LlU@gu+-mRKl=`+qNB&24VJK)x1sh(|R;JKNPTQx70Zav4@ z`-5$-Ut{qwKNZ*-Jzb6Gmk(hj$-0*W@I+a(l|;nK#Tml1W(eNmz2WXIgAKiI?OJ9@ zNy&A)Rn7(r*rt!)JbCiud*TL?wMFYKAF3BPi=8}~**&!krr9))dY3uIZs22j+m`Y% zo0RTtjiYIkNz;i*s!3{soJy%DPuyZmOJiEQHvF|8*X#Ov<)pk}rEIOu0yg3%CMGv- z+{o5ZHb`NE=Q*OLrnYp+k~7$z@NzqpgFk=PcItn+cdptc}jAzvQjvyau1F!QWpO5Hb~EC=e~WiFfNbl3*bQq2L{R>9J{)fk#V=Fsp&e7 zvuBQ7T_Iu1)zsZCl07y0wK^dP`^C7%xx|N+KIC;&sIyX2QxjZdC{9~YVjq20W1eMH zJ5~OdDWX#-zwqFpX#IBKC> z>&vbnn_;5LAndF{Y%y`y*4@&#)`KK3T z=TA>fwzaoM!MN(fWM_D=_k`Tz(YuLDJH2+;@s>R)cbl-VFe0MoHYT?i1r1NV5iR$i z=cz<6a^>pP?mLn~WtgFT8qXG1#mb$+Nsu~o#w)yAbl@eEi^`Ry>!isIdU_JD4T-s_ zhNZ&?*+t6{6UY`gJF)ECS>J`{DfW@NozZNI5L+fhF%OQ># zu}^10MpOB~ryYkNwEV$g7txPb4U%mB%Zl>@%#~l_NDxesfu2rp|n6?`+$? zM8vM0#XCb~vAjO_?!~7W)fg7KEsvFsxWOC)OByKTlpW&G%a=FsV$&D~yKddO2tkKo z{i%sjo0kG+Ew21l9rrh=*&TtO&lS13IR#62Ml@qNI7Fh3o{PE+*b$JdMutGaK#|j2;9b+Tjgtu@~ zY_MQ^_wL<@Z8bIWX(c&4y)XTWi;I`8T&aLL-DmOnRgh5M`rRtg?I8}QjqB5^d{||^ z_D=2$u{%l(s;sQf>3atsz@>G;*k{Mx{z}YeOY$-b#mfL8371Kca9<%e^YeWR5wawEcu#?I#XUlbdozCMmopl?rv2>1tA!fW8MQb>Mo6P>KHJ;ZSVP$+!pQcj8hG(O6d43 z!XN9EhFDR{$5wCLXh<8~B;=ThNr9aqPlxn?9;R2MG(@mxSlGJDr?tD=NTKPG#<8%Y zSC*!C>(SNpl?LuN+880`o9s)H95?1qI2ozF!9q@T{Zy(1jW54;e_m}`ma5BU!3jl{>LdJQA_x4!APch{j#BAzf_kD00?xk8*dvD&+xP5wKg`d~T^@~W^){Efkk+%yzq)DAy+8u6%)LkxUhl;okM0^4^?XN> z(ZRMJq4D1~?B2&rj|oinjZlT78;>%|foT_k10Z_^zqqltw|8c*a+kEPX#S;e$I>;ko@X?^_jJ#z12b>HQE~73|HTZyWAhDzK z7cGrOh>{*7S9*^x3?Gd~492FE8r`_pWOJIcR<5Y1D2~ECvAZ0s@c70}xucQh=Nq%- z#%X;DS647t^)~R4o8WB7IXdPNV~mSvAKX9O*gr6En^`s6O0=B1>K)t%NtcX!$cK?} zN{flzsE$|E%5dqfPH4knaR|1_3vrpc00;Xv@esE50afIDQ?bvV|3Qw6&GwbZonm0P z49XrJPfxa|b`|gO7#A8-$rf7u?S;{X_3L*O?y1B!P!53cjFpYiw3U1pz;hx$Ki?jv z>FYaXPw!UCJ&R-|T`Iz$$?nr~dbbD2p+5NX8djcs z`t)gf%{q2@*+iABa%#5-lAM}|Q+|~NF4I;(*3Ht%PD-O(fxMQkxf(mNeIOeNxM>cqFwW{YNQ;SykN zcXwt=)s2dedmI-Lsmq%NPIFQ^yYwXB897+p`tH>Bqc{a$LwxB4n=5h$OW1A9$=@Wg z$I=?I7xW zkIi9P&r6QwD>ee*dKJqiD9OPzR*ZgYPc#1{vYc+uVJvyMWvO6PV~%Zo)uib&KF(j2 z!%yB`_S#e>l!u>R33-}a$e|18yO-7^C@XkdwLV)UGxibS2aa?%vTJ^e&)0THhQuTK zF@kR!Yq_+Nd9s;l(v(+c61Xc8hb{GW&q#QR@LWZuf%8*iflC2%3_K6sc~qZ2HD<)Z z%-mq1qYab=#1=+Zer%-)UFfr)P`333E zt3Au|XiW~CT@fse0frKApZ=4)^wf)w~>`9ux)%esz&D+G+jVj6~6cgmFzkZPVZ9T_(q%h2?>S~1m z#1fP<^4s!pCE2HcF7n+e84^^@2W$&@A}phULWnRj=F!o{;i=4^b_XRFF`r!;3U_UK z%7yyM^PEgzm&1S?A)#SybXvV?)z;!EhXmq`oIA{unvHB7)J*k&&0t=AcAUQF-#QKt zl&%biIIDRA1e`Smev1^D19Th_L$aXfh zZv(cAf%CvceDU}X`trw5GdYj6GW0dWUlSvXGoNi(8NF3qU0so5J$|kC?m%Q7ujcb5 za)~N@h5QEFUojBS0NYOzzA`y&6y7fyAP;@D&kZKzUz7}wRnl30|Y*#udYuuxbxx`QxZY^SFKvLboufs z0#%6K*}=DV4cRi8s|mrC>6$8R0&7<8^Qq^<<73kk-)==ph9(k-0Z@awNy&gI3ql&; zWz~3(rTDkNidZ0cXmp~BGXOxXJJJEmvN}s zpyp*`<79eG#M#H1>9>1$#e#ZiBJg*oY4j(>#dAAti+aZHd@(6aNa-sgZVb=y4UluOhrT6Zhn5bx0SYH~pUdf(i zIYwS(G$j!eo5tRk7Z|v;uOY~Z0y@Xt=wSQpUUpTUJn*yx#S{^EVFlP4f;$7`6h$xv z+#=?HX;VCioaV&Pa_TqVbdRHf$krO=g!6K9Q_WhB5|9g~uFSMxG7`9MZ53ijDd42C zFwV;v_+?>RDvgrzBBjDaShsC^W^!SMQGIp8{}}@q3vHk+^f$MiQvOuo!Fn%Zdec)I zEoCq>>VXQKdyZHI*5_>%dva+%w1bAmA z?_~Y^_h%L@UTg$pC_Sdy)CvHiKyQT$41g?rvM-WK;{;n#!-_~xvisoPBIi~!eazcZOjTdbRl`SO6Gl>quLULSq}j(>Dozfy940|CR%hLZ`#xQ&K=|H zAHTJiM&)cM*WHj=CHpDrbg)>)vOlc^i*sNjmqpDB0jt=k)UsxW_!(fUA&!HhqSAJD znFIp|Xs45@Ygac3z(uvIrIyj2Q*NpW`1q^N1I9W zp@ycnd3M5tQ$koq9Y;K&^yu7ECu5ojj8yc&t&GVn9bkbJTAy8^ zest#w9PXndM(|ZU-HhKXhhqcndmNaLG{WfBYu2QJCno$HXyEky$NS{)E+Atnppz~@ z?TC#0zPt-I>Z-d-qI19&RS%EUXlFJE3ra8-F4?oZI9p$}TOX25n> z_nVvf9>VE7Jj{NS5C3CKtsjA$?)tocKNn}Wd^!?ek!8Or5I z4g5r}&*fN#22>)jB+&W8hYwSc{sV?S3s+i)pcW@IY_Hafj;!{O3gAA}q7CZNa&hv+ zn;@YX0&dL;xV}?Fy14@gDFNpR+l6gJ*m8cSvDEzODF>i#5(qf#v%9@P+Tn+>I0@;N z%5n0XY5HYH%c%j&3BGk}iC+RprQDKO7h;{*-Bk{uyiNKDu8nrta85#KJ41)=V+-88Bs|)#5*x5t^s@s z;ao7j^w0VtZ}QXe1E4Z@w{71phb12!8Hpx8qX_^BcAgwF05nVp9Z(sj4sSsJ#6cb~ zhP_2HnBv*9XQ=>~$l~?oe!FQLXwLJl2T@@>rh}D(LmbewEb`KI2w&d@@y;BS^XKE4 zV}M(1X*%Xt* z;jQ&H35ML=+>GD^`%72A8F3jI#07hM5=y6AK#JltSiJMbwQFZE;Z^xg4rKqpk%3|$ ze0^W`EhKbG`Hormcm!NcC%me{gQHi5aEc7y-#=p5l$)iLW~dX_2d;!1i2%KjGXK3= zL=w?p9YkzV3-=6a%%Pb4yaPnys@1DCf$q~WGBS!_19^fXq62VjW6eTO*f-T$q?-WV z5Pg0vfpyV2$xR3~_Ie=QWzkaG3tzr`*-xYK+7EvNiw7=!)3zzBQ zGXx?-PMPubH3&_QBbR=mM@q&^ra~LI4WNxZ1eS|e$>LU>M5wV5_KSOBVqzH=7gv~7 zXZZ__7ZS-2JEf%y|sz=-I8B_-HUDM2{IrI^`5Fld}mS5?GmN4M%8@ z0Kyw)=T8jRn~ETa@_>2X1a~3w@+N~Pd}V~dFpjDRaFfECnwtK>L2fu4PvqZ`?wxo5 z5ont{G2fdA;)tE)0k2Uq0_e7<%eXne5oljxYg?PnS~l5NBY+${5F!J=2Z~?c6%igz zhY&8EEE9sKLNdM;n-7{*7+KChR}~G#d3Gt|TetJ(oZuc)3q)`v6)Ve1o{-EZ_|rg* zJdb>V<%RdgX@E6Eu;V=eOwWLyG13u)CHT;&oN=R7x2ETX5g?iY*rH8<5BEVpV)D7f z_jNRE`v4qplt5F$JptQyc95=k08pnV+Z#^V6CpMoj&NF)sJQq(q0!H4!hjGGisUxT z-}Zg`D(wL4cmj}Zuyz6VDm2ovAneS8qrl43y6fNG)S$Cm9nVFvhLf->zga>?U`3TrQV{313Zx0d=?}CeN z?d>&L%PRex4pJ(R7im_LBqGT^&^weBYGcZH$nHG}y|5ow5M-GST=Q*jZ_SM9JZBRO z5D1nBc%9l^VAjc%K7+H@|M2L+UKqg~d+hQ3U;;JBYLHV(EsJQgVq?WaqP zL`fMaZD1VAJws9zoeffUbBTYo$Y&v=6cr)3LrF zvaf*n?tK^*#^cCDEK~A)ca4T36(YrYa_k%gJ+OoC)mAbrV((uK48bMc)M*c&T%ZL1+9*(5gl7}CTxgqh$XpVn^uOT4*rE8;i26rX_p z1l(c%nje&wG$2Ms@x!}6BOoFX6Cu$%E27vDRlHmAEBQR-;IBqA8qUM(%?3YiYrg>HCr3#D|YU^%VcS@WToKiHCGtwZW%3pQp>m5;;m=^b|sbs(3{nNZZtb=T#Bu9k5w_giq=G zDY|c^wY9?mBE}<*B6KZC?;4zvKDGmt#Mhd1ZmnOp^kx{bD%@k3PkD0v#!91)Uw@}i zPFr*x_DDH~6I5~CPVWRcy;`Gt^{)26Psu%7{b17LdJcn8sb(2m6?LdWvN`n~!JSC1 z0|%C#4Dgc`NjcoRFEeBwF)$n?Y-45xnWI z;Z1L=Dk959md__S5i^& zE#w$MXra~WDZyXX10Rn=kRjJz8mb^BCgx@h386!2&jPdM9zS>e;mF_Dtz$u!nZwD( zR#K}m&+giu+2=2r|G+{gsD^HMc%-uPXbojCL#CxpA~qfTMG`gm!ri>Qynr-x&hCUM zihNGl@=a^j!GF`%vath@9{F&iuKfHw1d>XRHLs}FA}%9DjHow24p^z4uJFi6L#P1A z(+bVrXnsXcdv>ejJ1Ua(CfXO#t1FqR@ein2I6KGmhY7ORRWS0=on)y*xdr5-3$)dH zS!jL0 z6U8(mn0iPQCotgY+S$xd6sQn$kC0)%?i)HH+k)pv11@W9jT|Zt&PN~dFSqDomQXb!#-n4|0ue;-`r*iEGVD#mM_D;SlgO^>_JP1Jnv)U z5gtvt7R(eg+u1uvM$mO$2gOX?>*t!f#>`lNZ(KQ zIksQ!FEEpv?LycPOZ;P&*Td^AByW6n6B?5rtDd_fUM_i zry{%Fz39Q)RRk)4xDGWFYuB%jgi^98)dUI}=rTzW0}%$pNE6gnH5ip0;qL4RVIO>; zWHQd)KvkE-?n9q)>h|s1ME*zsUhn~|kQ#tCeD`PyN=s^+pE?m?5>b?zXso~jBWQ!50aAVZ;HQRVJiIPX>J+`H{~ANKGe6!1nF$B!QmSYLzGUKS}7 z2)$PTos)70Z*1GPO*?H$z2qyA;sN%Pd#rgJ3QHr9p3q2F!2C+MFF5>Vc1#y(vF{=g zgApFx1Qi{J-Hb=Z$6rIHK-@|jaVw$1E^gxs7cSHXnR(;Ztr}pnX|{c81hD}h%-iFR zJc#ka=T!^`&7KUbng2v*j`iEDx}XtKZbH%3LrBkaz7T^2_CuUb0tY69T|WK{mcfKR z6p;tP9ig9o8!VuJbRpNE5!2O;%#?6XT00lGo|+vl`>my?k4efO6NAR52K6tM&@C6e ze969T+fnbzEK9wdRbL@;jDnUh3h9~w0&v6f@aw2H0Z4zs&jQTo1Vu{9iXaG!1r#7EZDp0(IQ9iv zu?idj>yGzZ$qd0w1@6^y1FUi08XGARvWRXZ<%E|9+>r{Le$m2(hrER5neBE*`k!(s zvpF$BtasPiL^&A5`s)y?f;tf65fly_Mg)jL!O_om#*igJ3ZpGD!|=`gqS>k$uOlvc z{}d_%T!HANfv=iGc>#gG2u_GZKGw83pWwBM5%Vr#<&q90HE#TvE1lcAca_HZNkiB(3zrKdZO`RvH z3U5OMm9g^s!}xwYptdGh>?d+MJfF4zzB>c8EX31EFodxnR64sV5ynEAr@N2jiZd@) zagVak6ct}g2$T)tp!VI(%2Uv2Mf%0K}RFpa*5zYuLCiyr8{o0WdF|l#2{R9Vc<#lCi;i^=yZ@IQ1$GKmK0aKC(EomU@yu^Y z-hBE#7FiXHydM|^NI9WAz8O@VaQ5-Tu(#hoNqJHP`_K}L55pHIB1M-5)gqf^p@U6I z?{+&eq)1arOsqt3(a$m096_2HXL0bNGwzJ@@?mPzkIQ)>{(54~ckeKleEeCr>E+*l z;Cgj4Hp=dj)`epr#vNZ;E2rwW>hpa6F}@!9eZ|iA0LD87Vqb3j={#3%h&20ad{L2+ zf%HE%0ws9~`%DJfI?G{st%8#^lf#8TqFRzdC^!)UQ=2N%Ks{Ke(*L+&CLR0t0fD)S zgfSp=a43}765V+8Sq^sPkBg${=U%JJkBqLRRoB{vLTTAjQFEAg*|F6H9P__l-tO#P zB%ca8bt_$oFbKgsUSu$A5PcH4Ps)mw-#`0ve~Hsy6oSeI=2r~Gf>QX(EZSX@q<~18 zNYVk@29^Oa{`;junTgb3L_kKq2kRdJYgS)Ej z2n4X5Z}oqSn6mrN&JT!EWU3DnLJ<6lSE6HixM;~j2N=iLgL;>?c zC=WZG)4TQ1kI!dr{J!i-N@*o;4*vLo*Ky=vGrRAn5z@Xq#D~dNn(u@_Bv8;H9YB_^ zzrQ{WeA>4+hQIvy<$%pIQx-?Rc(nB@lPrp&dTLWnfbdm-^+KK+0o70mpx9XkqGenB zdiP_0+h8!fohhY471L8RVF<7Y9Z-j&Q{ZD^;PwK`0QLO+Qn_5DX|8Yi92T1tLIFc3 z6%J6N@g@ZM_=ICANr?kC1mSMrv+N~bF7!(C_dYCa*MA=v!4A>E{{AyiWpb={Cv|&J zaRkh|{5d@UlgK+O;f&KReA6WaKcILJXjX32#Va;PgD{ZBaj!!4Dq%kXGt1Xu;tT)2 zOKGq`2JlyAQIt%1+4MHBijs%~Pyc52j`(;)qE!e52B?daK{f~_V>~2I(J1-TC*@An zp>U;57up&28`sIQi20m>T7py}IgNEjtzcSnxLQ+eejZ3VLXt4Y!0_7lq}5a+pd-Q= zc)NXJKW$3p68B5>jZkTj)II?LT5SpRSc?BB(p*+T?)hS$oU1 z=;^8sP$g%8=BO%)gq5-sH#e)3Lb&VKAG6=Oeq9Q59fD{`h=>#pY8@i=C6E)MUWMzc zS^j>I(2B2hX{T--8oUOf(nAm@Mug@ABZvGU0u<2P4IQ@qy`Z2V_Q>v>9^GH9FiRkm zXCR4WFwk%-NB0|6X6iT+D~DYL^RnNzR|Uf#47v5|7Nb2nc6N~TK;p`WQq$rCI^(z@ z1ZWDxOd~qx37ch{DKB4yHs=ACuYUF&XWj)X6Oj>EXTv%1lKkQ*vjSyO>j83TA1pA) zKruXMHJFcebacB!Z`{0@uG~CCQtU*sk`u=kCEsJxeOErjT46o>l}}K5 zY!%goW-J$dR;Vz$F=yy~WWm&F1+|>hoSs1k52%hVJ|d%nc7ckYr|a z7WMYwHdprKRgE3LA|!oe(YG%(6_;|lqpV-wV^|MD;kDVpi}}!r^L+ARW`F>OFv_Sa ze+bnHNr|yw@1f74Hrh5rK>U2@z&wQ%t72}?iO(|2H%}z8$#Nw1>AsQeQ{(E4I{upv z&b<}&gy*O%WKiif=H?qJzXYV)Z=1MoWvwugEiI7VbLMcq>v-K_imR5AkMaGjlDY92 z8(d;JU)sj$oi%c5)(AIrDwAqYd>#=uZ8I5SpFXk1-m+{7!$AjG>Ys527TvchHS12* zT`(+Q?kq0O(@x8*9sadgK_w-0T70@*YpQ^XW*Kf7dMk{_Z9^mDgX2b%Zra)$#4Nn2 zXTO@xP?x~^TGwF8D=C;^h3i#+>aZMTiwr_DHSkDe3J9Mq9>7g3FA<}NdJ_chW05-m zzNVpJio!mvx(xeBDv$kGz+La~iiG!hV`X$RK^>hViuiT?-c9CVifJvrn@nt7eeCD zxc6tZoI@Y@Ejy0*7pv~co@JDl ze8p@h&$}k-pGcdD)5$|ga!O+xgs&}XaYSXs%pRXkB9h#YLy8W=DxpVHK^&ryK#-%8 zlW9u(^Z@GaU}-*s`(%fH!HXzNP=D^fj;$rxd-a`EJsM-qA6Zp~a@36)b@|rg3=CRR z4T}uR_o?t%XlMA6MiY%w3#!tTH~4a_J8$1Wadl`nc^i3S5o6g0;rujTdhXQW{^q5e zD!U7v4mC9Rdfn4ku;Ju8QguT%(_mAnyxmcC6Mtj1mYJj3@<4kA(+c3eL_pKE&_ezdcyfb;8H`-Y_+&N-pa&H8>RZ0nk=F(T2u9jBL%5^-y{cpr~vrgJy znGYhld0)6)QDSsp7?e0k@fXDIg>z*$rZjq25lFG*IAAZ3zz&A!)C6VYFdW$ z)%$QWgs`fED&ByrBB5O&>ev+wDVR(m6jZH6JzuH=%|tFii5fW$?4}bWbq2cwEzfqJ zS%1!oqIudovL-DE=F*ZSe*vOOOkbP2WRjjtA-**$idX9?D7#LaRs1djLk zi4z0@Oq@UnL<)xbIzK?`2nAUXsltW;3Sj%lUFzaxbob#qbGWxFmk1V7wAc4C_Oi*O zdrUR-M{V2}%hfqH*5+dJA=-wauF0ag?e$%Dl-9lttiQ$Y7<-)OO4X7S_U^`SEP_=J z142xy${P@AZ@%jj7~ivVS}NiG8O}yp4s-D~cf)lq;xf~LF0Y-|NUyf;Dp%A=$&OJx z$uMb3i?y<@lQ)b_889ESo*w)8G&8X~9;PYXSlPO-NflZhS@7E|qNGl`33|0O)4mUZ zB~TM3DFC1O^R4_%zU$RsN?DoQWAvn~m!%&uRy@N`vDFXZu_^D6P>iW^*4 z(p_(=+XY5BU7MCOxfscoqdn)KT+ZZ|Qq}uo%e#w9OG^_QN=vy^102%Nllt5<1NX;2 ztf4$<9J44q?`gnyxc+o+1-w>eijq;AS!p6ux@sur=WloWIw~VlHl|y6Gk2E#sUy2Z z!PO?DE@?jBf+3N{C5oK#p6ax`Msv>OXw`_F`yIxmBj_gjf2XJf!*csB<1lnkJ#_yO z+q&gaU)z2g;~frPvwZ>O(2^!KKH20mUOm6Za-42%7JA4Tl%fBzX3f1-+>|HG!hLEn zxqK6qNhZ9DMn0D%`Y-6(^GzbyQL=jGIPWpYPjarZwFU9%lX4A@)%&;ozeq zPU{_`-GBA#xv3oW$GUaHualV4ciZ2eoJVP3lDpwo(>0Ihe3Ec?|4n9ty1ENDM(kZv zp3SEm+9Jmo6J8bi?s48!JneVU@>F$QcFjLNOAODad{ZP`hI=yIi{&ml)`y{iCB?dO z=;@5zId0K2a|49Bd;%4D7*>@SNbv~Z7tyiq(0C<0m@|2b)YSn*#w1F#nNCwR!e zU$5p9GCMB2ePOl+9YM>XX_Z5zy3T7|vD1v-KLreC8^8^f3PH{tL}noue^v?wAt=Uw zNE#%dWk!nT4}`<@w`3)b6~>g7^E98sF5L+kq?#PMB6%o1!k~AGC&G;Q4?nKm@+Z<& zk}TH?zRdumP|yt-9}2G0m_-r75-Bb!Vz|&qLuUSubjH=_aZ^3rX^L5(!`LuUfWlns zIH=w~JWr=IL=_Z9e*2RjH=t1Lym}25W*2Fe0oU>sbg)ql0JsGX(F%;NvLb}ty%DqW zh~Fc%03P+SUW7skp!jVoom)a<(LDv|+Cl>2IUgmn@;4BtBft5_EnjVm2g&zjf1}L& zJm?s8i+%uut{i`Y<(c`NpYKg4dmsLXUO;v|!7}DZ2xR5mtDqTiJvN6^n1M(F#=eD; z_7b4%vr*B@T)bx)&&pX~(K+*e7c595^Lr%mQABu1k`C+|Yk;<*(zM=2Drvej3oKB% zTm>v*0YwkF5b@&B$xFa!5E>^T7g-M>m}Wsd;P=%7l7AT^LREY;2I3r>Hq<_s!>7;5 zZvO_1R(A{x4A^Wht$6<81(^1VIr)zNO>9YSouxG7PTlDH@tAZm5|fZ1O*P~|AoB$N zm}pn`M`z<_Hd~~}VJ4JllY&%uZB{rZhyW^p5RnQdY`wy{w}7dL<8F&OgdI@>+%HTi z%`gT7C*%m}tN`X>*TI7y=KK!9bb_{%Y8eD9;Ms`G6dLDTok=`o9Mp~_=sa{|E$hR4 zr*X8YGD@diT}HnRCBqrucMh)IM5)m#Sz;wRNTpWg^yM2>xBhsFmWLcG^_LH!7MYcs zTYAmT)3T`W6}iQj0F@tVP9@*OO`ElP@pW|2RFQq{bL3FW@Yk4tC z+&d@*BEB2}4+Iv|R#d>nQ8X$Fc{Cs3#)FeyEL zf4FI!OrF-cos*M8(guPlA-UJx_XD1`PlR!ie)D^hF8DzWAY}u=iFW6$aM`!sEMiYl71*C}^DI38yH2@mP zhQwjbc!4|kcY)3jskRohQWYJJHk{%;O#Z9m;0-3(J z$qWrnNq#_wyfP0WSWm1cV(riUilL`=#w7&Y#bqLgsVPke0 zI_gDZ<8Y#M7+n?!v}s^Yd6g?3@XQGsQvCF6_8=Gn1AOD1DEZA9CV(gKhxtCd<4ecJ zA3#w6A}2TN$;rt!RBNE}C}+^$-#;i(eX7Z!53O5Hca?=*WsbqK;NT#L$RnNPN(?20 zhbv1fM0A+gqECd=F6zsoF%` zx0!DpMa6@4`+HnIiIfAvC=r(x-W^shBP~rTmqg0}s*>&`0kh_ud$0SY?eF{K0hAGl z3&mv7SaQ-v4{g~%X>S!pqplRJp?1(O`4GGVaP(nx8j=E7+X>pxzAukP)LP2hCg1Wl z%VLS(F888q;ZX?7?}8ZvTQz-nY~hnE?24LrMHwQ!Athj>n~QfP+OHujI08Yi4>R^% zu}9>OzVdB;9WS;jOzfq1)(pmeM8>|6l?3ZJh@1)EhtyX zXq>S9()qFEgd9abGd^%I1kI^?q;Ux!VLTa1Ip|lES?jn~@zwu@kY>E~iPhC%BZXx5 z;@b#_&F%gRs>W>ZdUqZ* z^M}xm>9GHXRxMzOE8`TnfReuS(Ea{9l$QB~ev%7FrWAo&Ko=yx#DHY{% zwjuPzidTQeM!zNdr`*^>Ib=o~>LJiN(FF{XUVHLr{Lw>25hEbtS_gw!G>#hyO9?p$ z0fsnCVR{V;X=P9_(PIrEG@OVn*DN}x=y7hGB$_5DF8cA2-i<|EL$u#^TjjVM6I8-n2dc~G69)c7uQ4z=uu zmF_n~ybE37ldZT;AJFE;Zj@~asRk3u=+sXhn^1!IL?{Y$70rL#X^Z>JnhOZ%7EWByB0HfPL#WoATr{Qc!$^08%l^IjBOA*3jVJ4Z(i zRQr75>;?#@gkvpF`s;F53&)@slm*Q~I!h5+1a-e)zfwpazk`nA6vf|WF9Xcw!PLftpK;I`1k(FaI!HR= z$wZ15Kbk^6IZ_@K2QZx!^B_1OuP1II_8EGs6-h>xeQeC zJxR?(%ijb-RT7aWNC)P0&BH_0lqY{8z>sNu!YePBZl1I9*$^+k%SrEGw)% z(qLL6oj4=%U$c&>`uR)&z~{<~{`Cs;DcY(#KL#_*qdXDyXnT2&;n^`$(*~QX!@mv0 z>Hi0fYBcnFP3De&R$`o+Jvl`q|B}Z(_B#3x1?XSb zndLV2tn2*euZb)L*gD7Fk6d#%O-4pHg#GTC<*`0q&T*LpV<@GH(`p*aYuYPUz{2rFqGB?X{_)qFol-Zzj zc)2k$yOb(Iyl2yb_8ELUbDpBI655LIkK5u+G-^1*BnO2*AQ3m|r-Tka$kMq~CvilP z*Ov{CL48Lg5TNai6_TKGApJOc(qM=tyBE5O&$cRi030W>7iOcPOf?U;5wK#MtDJqccJ=Dj1UJgz1Oir={a=`N0Yg(Xts%XzQ7+X} z0;89J1|~583A&)gL(tN07e&cjJ9}gK7CoH?vrr5viHZA-8zqlC{~w3%!EE zAijt}g$DXSp^6s$(>#&WJ&+U{{ex&BNqYgne+Qk9QZkqQRWUU)!>+}>&ipo_8)5b# z@9Qb)tr`0SuPBe+VkCsk8HARlb}9$C+;?0LJTp30^yfdHzvbJUqoZSP_HAg0G@kmJ z9>nr~ZhAO854tsze`(AL zu_%=1YkR++Ey|vNqQUz+%okHgb=Qw~DU{jY?MS&su*o}rIV(!03%2@NwD|-^1NN<= zwv|F*+!L|@;PXeor7L;yPYUHG)wx1ZF^7v;QWu*Jke#ec|fD3uhLkedXE9nf1$X?Y3~;Tlr3-Emd1;E1EKilkn!-?e`k#r0}EX-14} ziwy_|`QHK105h0jHT*h??@6;Q$Uw`mbj+$jz)FbO%MWXFx0R1N8e01)SM<6Q~A-0Zmu_eVd=_ZY_kTnvFi_}{I z-4ABiL1__cAq$>?;EjmFmZC8OG-}pJ*3h2kEW*Sy5H{`Ce)T&Zpt)9KK84X0Ix>Qf zLJ%a6HV0_JLhppuJ)~hj!7oWSznNzE$UzS>pu}?JMd*ZCamq<224+@RW1{mW4O#%^ zb`l%~aNM1+g_MSsGm~%^D84G79!#)zqVNWiCO12eO91a8Vp;U_+S~riuN1p201Ua% z#*j3NjYa7*X<$1eYDVW!B5Eexmc7wjr|a`cxS@OQ3;FwhKZ&>+avUVRdhDmf#LBnd zI`j}7e)QoS&@9$b;$CiGj+N0~ z&;0%(M+Zu@NFxTcvCs{=OG8HjA~N0QIAS)5ZgAd-JI^o^B^j{*Q2&F=srdK{r9mKM z9;M%G997cP*IWf1DDIO3oB(!-bU+~q0l=_o{FF-+(6l!*boQhIqN%Cr&|=CK_biYY z5cwO6T7dU~EK3s2UaHW;fLT@YAtb$Q`0qg2(AE`px9yD=R*rko>W;(fQ*acJV;zGM zOB!u$NRzQi^ui~?G8BmHL{T0DerJRy`Y(ldSBx<7Tb;z3YAsixn3?}|vhe%Z-V&rE zm>ODv;sT0bQqknvkK~Qfs2J#~&qyLLEylx&ayJz8d*8aXvVuAvcgMLz)}%sqFdP|K z1RyU*$O*uJh6+~->*FwcGsji2!JxGj-+4KFOj(x&qhHD6{ z|4(aY9+z{zxAB`Zi` zmO^z*g~|GiWy(5IqOy!76=jTyQ_^`~We(53&-2``{)kuiegA&H@Avyzuj~3Kfs$~f zl>87@ljO0qV_@QAfM%g=s$l`{*ab9x1iNQ&LX>KmGZ)q|Opah%3kXp};hQ>bDIRzh z(>^58NN*s7TNtD*F8LNL8nBds(x_@< zwiT#kkS~_3t|n3d3X~`dmauvA=J$`DtYg?+Nx88GZ3)dgDe_{p!)O(rRVC+hv~__? zXVY3zs~m`)g#UHY-fs_XJk8>J2L}G+n}!S-MJ0@gr(I#ic#TBJme>deQ4}c&j=BMf zld=yz%Y|Q^HW-qSnym2$n@ISnJo?`~-N2;J5W+-qCsZ}AO0tJHNMw=6HO0TJp~jVF zs!xexfTRhj1(O&nYSY4V#c{g()}Bc6-`M?UE^*Oye0bVa46`LyudR?#w)7iZr%|j~ z(KjTIGc&Bd!8)CXr?D`yI~iq9{srISH$7VwsRLfVm`Brf2$Ek1jj2Wf6^UALH2b10 zIvDOO*-RfngK}u4eW89RM;RHxwkB0rrAwf81Y0txc$!%lMj)v_ekRRL6t~cO)XizL zckq;Kb-8F|+`BfFF`c3xV3Y_kOge!s>wEwRlCDH;dm5V3_%oUm&7m6b-#v*Z>QcfM z3rh6~gt!Mp*dpyAxft+}kbILnT<##q9wJcGN9K)5!&u*^D38R|aiSTcNfS$?yF+|O zooGCd_TY1|+aN<^mdE7U8ZUDoUo~yue{z6^1zdJ|wi1*W-Ned#M!_V|1N7mDNUXQ` zc7^IRYOaH0k&OJos5tiO4i>lzx_p-!LAGc!dRoJUC*Rdwkw^#|BFA3^kV!L_UcG!a zTqTiIMx!Y7-Z(MPgz5;paxH;5-IJXh$yjojOC@y;cUmD$*9DH24m8#II3_^}a;n&6 zc;Bj>3vVqR#0sE3*$DTP)?Y9a^$@Xx87xN1jEm+h*RZ}-rR%h9O{9byg@=b}AiS+a zd5FPFc}pyfD%TRKB33k?C7Z9FB7F5dK_P5}Ky+JEB#n>gj@Zy@`iV>?WNp>1XQv3YOo!`zj&=ueR*044hgb#yDqpuJ5(|Q5e+Dn0rYC zQjA+?4FA}1Oh?z~HJneeme>rAJ-#x8-jH~FgZC|I6%2q|ore5_ z@`Pl<8V~>*P16>%WS5$>W_7QgTTt1>V6%>AC}A(OwALBvl%0d`&WsdLXOWc7+Mcqm znOVT|S^!8i;j&MPtSwG2i=;~1QrJ%(2PgeoHK5r*KBt%eDPmDQ!{=il`x8Q>fQ;7H z^0Qj%M-08-{#{ug`I0Q4`cm4z@k?DpNemdp+*coF(u6?WU*Z8GlS;OwIPZtif>mpH z6eR;qXNkD~@ZrPLvGV49Uqv)Lya`5LaAol!t-A5XHES!}csGZs2gZ4&j29I@cZqk* zXzt;YV#k|gR+!=qM!s*jHXeG`klFXJWq0mtYM2`gIcy9tbxq{Z%rUj-gqK$U1NCJs z`)?2Dtw>eXVZ$PgJ)T+^yoMo6P;fPxBYrpqVC z)Z(rY)+J?*RU``^axDA>qs&BPbJ+3xXS@bmp%XY!`n=j$@Mu(3fa=vd1YjAursrmsuFyN~4em?xevo9z-QWn)MvT!_Iq`lG~R!smLj22m3DH zRkdxqn2{U@1s?0WwQN1#kmi$dTqL%H<;&~8Td$rEm>g-3e)G{M-MIuJ1nl_L^tG(` zvg9X~3Qh=&6z4iQIT=m#$tal*+2~craQL8_oYj@Lt0jCUHPr~}DLW|P6c#cS1h<;9 zq$*G`Tt-=oMl5LpwHf=KbBH1~diB_JzVTdd??*XJFf}Oh(&^u2JNM4#@cCR<_bK00 zn*n0Xa@&1rz*CgVNIEg9?O}D3E!uH7~?0903CrCo{YVtlOfbZr}{g{yj%K) zOcdjRE+4bLe4kXasnwj{y6A2LG9Gt`1y_iO%g9M)(Iiu2@BJ9uu&By^wYrO7e+Hsg zd&q)?#&AbvBbw~*PMyJ(Z9gCrmK?dLF%Xw2ykv*~OyDqc*}Dd76U-ff_c^sS%DiYF zX&w_=a7z}LO0ggXh$mmg+(_z8fMfQ`&wO9e=>pbK+9GXMTnEOAN!`u*?j@tT)PyS| z6>kOx2E!m(3($;8Bs1Ml87pWI3Nr)m5oGA~l0JMwo`d4+ zX0a{-lotLTD)qKNK5M{p#FiyF^$V%E0LCID9gKQ(@Ik!|o=xig;N2 zJ`b0i+d(vaImlUHI!TBDc@U2l5H)5aWf_2CwfK|*8xcyZiy@Sl>-nbO|IuW#utulW zfZ?K}qXt7{r;+_>`ez~;p*oDCqcdN$5xZaeeTFZcbGFR~z9n!FyTu1fym!R*uh#*n zG^Clo(a#Y93*zE8Dh4SQLLH+68IxghySFNxd0Rl*coQ)xNK6t%H*M5gAF+~TZk*Mn zCVX_8^Hcs))HrNbbNA2x_*UP^U~vA=B^vHjr!1`Q;hq&@@zbQSgZsVb_tI+A(!V!x zyT9zd^Qdtt-F=&_Z<+sPiDi?V&%bKhtlRpvH_mwcVa0|C^~N9QamZNN{=(4v%Qj7a zIj+G{ySY8mmrM!U-1g4%)%j6}T)Mx^$-Dor>m>_H9?rk?@Y!d}su$h7%u=-ZhvD}J zq~~OM=S_3eU>Mx)js68rv{{}1G1#(xP1W}0po)qLFPo^$Yu6SWap~b75TMazB4GRb zmlk&)6u|;VZrHHk$oblJ2(5WrsN)8VO}baa#ToNwCC10|Yw9rfn-h815@$_4awLEm zlY3sRU*8<3u{y|i+O(LVmg{==?(OGO5>nBx-{QqT?A^Q9&u3`ur{;~Xx$u)8U%(6O z+O^Bv!s6)7U3oT;)a`=fZ~A5bt?6g6SeEk?R^$4%Z~p~&i!mV~Cq_@2K7H5SyLZ8X z{Mg`fP|UZ-jvbr+>E@13mbIVj*Qc&JwRf77qZ(d7<=_g_D=Jn$M8w$9cjm+~V|Eku z+^bCSJ$nd)inEHkI0IJoEH5u-l)*IvjC9W3!=uo4$=ImToUthhUKt)9w{xenWTO$qW5Q*8YHIJ8+g>)} zuTy4Po9;Dg-n;`>&49k?2fSALc+!EtN$aGJN>P8*?-@ zPF(!#?3txJU5OOHKwNWU&KO)50Ak0Ip0(J3(DtvNW&Yi}J3zEO3b(suw$Q!I`7f$F z5y6_9n~V89oAgb6zmnsXe!-0H6W1s*GV+1#@Fh!*Qz*#H${-l zRgQ>cJ9K3 z=;R6IwU2e^`of~J>JJ3unETT|$H$|I_O;LK8U=aGx;N>=-uU<|>D<0^nxK$>1T3;! z=;0A{?%cVRkr(knWo2a*6sUVmKR4;bsm5SrApf%#MpH~ojP%|epB#-5J3lx$nBHt8 z*WRz+Uya`Ear7UA9g=bgIJ-D591&8R{E{b6z%(Ug-n>wTh3AZ_@QuVzr~x;6xJ!LK;Wt?3h)~ zteY6uJUu-r@6Tjd>#YRtLg>~POP4Kk zsdyCpAlqtuazR)MiLmmo(VUVmg8KLWap@BWa#l%*5TJB5sY+_zvSm2T?A%;;clQS) zlGv=*uWt$r3JRZx$+EOfG;%dX6l5|9)SUc6f`X2yKg||0n_v0jMFHvQ_qoLwAsXkA zwN8!g-^<$C`j^*++_GzSuBUD@wE7l*O%n#tX{UWzML*jie^3=XZ7&h7efYd=&xH!y}Fa=JvQN zeQa%a?%!_(c(AwUM-~=yEN&0Ih#j(f+}hkc<27*6zKr6OOGKBhT_ZPd-iKx5L}?ak z25BCUuZRZOwVU1g#y@MFn7ex;3t4VVFRQPu2GMDaT24LYxp-cYsxqO}FrD@oFeN!-s2rW^{fzBd;#4 zfK5AP%Jx;Q`zD~k5u2JC_#=6+UXyKV{i@|ZsPBrSf)4RlvxbxLv6lPM%SiJ(v~S0b z{?ygdAzAPnEwd5$kMEO`dfD5@a$TkfelMf)L zmdNVJE*&{CL5j;cIXR_y^DhrOJcCSBaSayT+{|q5xRgmFMtn#5)wN^CLB77eMTLdP z(FgC0j*f1*s>OTnwL#A|D=Vvzh`E9?!In$&aP6Fboq15e>3)Y)GR#Sux!UMWCE&Mb z&otqfotu@LyRV`mkY(R{;>0BUY8NXjHy4)`xP*Pw2i%|ppKS{9Qm?twYTdVyfbeee znLrjz;TLA16Jys1^D|+<`Q3>Abn`HNWL6I%Zm5r)UiqFJ>e|+oE zt3Th*&o4r}WbDL=Tcu7scI+PN%|0xeo%_Op1H;f$XTwt=^h(y45!IB0z@!Y<+0t?# zB1H4-XF4S$B&aZf=6pQcg@Aii&_paZZb;=E>Xw=lcsA$(bc2MjuzJwGA4#-_8?dpl z@ke2llDv0CX<<=OQ3g?|h|SYy%;@6iIKjii!>;VLD1&9ot63*bNHXvDSEyfNRRuFeVl HCx88K<=6>o literal 0 HcmV?d00001 diff --git a/timing/timing_individual_function.py b/timing/timing_individual_function.py index 809315d0..1ca92382 100644 --- a/timing/timing_individual_function.py +++ b/timing/timing_individual_function.py @@ -15,7 +15,7 @@ number_of_nodes_list = [200, 400, 800, 1600] weighted = False pList = [1, 0.8, 0.6, 0.4, 0.2] -currFun = nx.tournament.is_reachable +currFun = nx.harmonic_centrality """ for p in pList: for num in range(len(number_of_nodes_list)): From ff5591eb15870625f184da4e758243897d775e44 Mon Sep 17 00:00:00 2001 From: RohitP2005 Date: Mon, 17 Mar 2025 22:16:33 +0530 Subject: [PATCH 09/11] benchmarks added for the algorithms --- benchmarks/benchmarks/bench_cluster.py | 11 +++++--- .../benchmarks/bench_harmonic_centrality.py | 20 +++++++++++++ benchmarks/benchmarks/bench_voterank.py | 28 +++++++++++++++++++ 3 files changed, 55 insertions(+), 4 deletions(-) create mode 100644 benchmarks/benchmarks/bench_harmonic_centrality.py create mode 100644 benchmarks/benchmarks/bench_voterank.py diff --git a/benchmarks/benchmarks/bench_cluster.py b/benchmarks/benchmarks/bench_cluster.py index c4e92493..befe7ede 100644 --- a/benchmarks/benchmarks/bench_cluster.py +++ b/benchmarks/benchmarks/bench_cluster.py @@ -5,13 +5,16 @@ get_cached_gnp_random_graph, Benchmark, ) -import networkx as nx +import nx_parallel as nxp -class Cluster(Benchmark): +class VoteRank(Benchmark): + """Benchmark for the parallelized VoteRank centrality.""" + params = [(backends), (num_nodes), (edge_prob)] param_names = ["backend", "num_nodes", "edge_prob"] - def time_square_clustering(self, backend, num_nodes, edge_prob): + def time_voterank(self, backend, num_nodes, edge_prob): + """Benchmark VoteRank on different graph sizes and backends.""" G = get_cached_gnp_random_graph(num_nodes, edge_prob) - _ = nx.square_clustering(G, backend=backend) + _ = nxp.voterank(G, number_of_nodes=min(100, num_nodes), backend=backend) diff --git a/benchmarks/benchmarks/bench_harmonic_centrality.py b/benchmarks/benchmarks/bench_harmonic_centrality.py new file mode 100644 index 00000000..76935cb8 --- /dev/null +++ b/benchmarks/benchmarks/bench_harmonic_centrality.py @@ -0,0 +1,20 @@ +from .common import ( + backends, + num_nodes, + edge_prob, + get_cached_gnp_random_graph, + Benchmark, +) +import nx_parallel as nxp + + +class HarmonicCentrality(Benchmark): + """Benchmark for the parallelized Harmonic Centrality computation.""" + + params = [(backends), (num_nodes), (edge_prob)] + param_names = ["backend", "num_nodes", "edge_prob"] + + def time_harmonic_centrality(self, backend, num_nodes, edge_prob): + """Benchmark Harmonic Centrality on different graph sizes and backends.""" + G = get_cached_gnp_random_graph(num_nodes, edge_prob) + _ = nxp.harmonic_centrality(G, backend=backend) diff --git a/benchmarks/benchmarks/bench_voterank.py b/benchmarks/benchmarks/bench_voterank.py new file mode 100644 index 00000000..b8d1048d --- /dev/null +++ b/benchmarks/benchmarks/bench_voterank.py @@ -0,0 +1,28 @@ +import networkx as nx +import nx_parallel as nxp +from asv_bench.benchmarks.utils import benchmark + + +class BenchmarkVoteRank: + """Benchmark for the voterank algorithm in nx_parallel.""" + + def setup(self): + """Set up test graphs before running the benchmarks.""" + self.G_small = nx.erdos_renyi_graph(100, 0.1, seed=42) + self.G_medium = nx.erdos_renyi_graph(1000, 0.05, seed=42) + self.G_large = nx.erdos_renyi_graph(5000, 0.01, seed=42) + + @benchmark.benchmark + def time_voterank_small(self): + """Benchmark VoteRank on a small graph.""" + nxp.voterank(self.G_small, number_of_nodes=10) + + @benchmark.benchmark + def time_voterank_medium(self): + """Benchmark VoteRank on a medium graph.""" + nxp.voterank(self.G_medium, number_of_nodes=50) + + @benchmark.benchmark + def time_voterank_large(self): + """Benchmark VoteRank on a large graph.""" + nxp.voterank(self.G_large, number_of_nodes=100) From 0f2d724f0cd3c4e1ba4a688482de333fa1f17a1d Mon Sep 17 00:00:00 2001 From: RohitP2005 Date: Mon, 17 Mar 2025 22:21:59 +0530 Subject: [PATCH 10/11] Update in tests --- .../tests/test_harmonic_centrality.py | 40 ------------------- .../tests/test_voterank_centrality.py | 29 -------------- 2 files changed, 69 deletions(-) delete mode 100644 nx_parallel/algorithms/centrality/tests/test_harmonic_centrality.py delete mode 100644 nx_parallel/algorithms/centrality/tests/test_voterank_centrality.py diff --git a/nx_parallel/algorithms/centrality/tests/test_harmonic_centrality.py b/nx_parallel/algorithms/centrality/tests/test_harmonic_centrality.py deleted file mode 100644 index f3d5c682..00000000 --- a/nx_parallel/algorithms/centrality/tests/test_harmonic_centrality.py +++ /dev/null @@ -1,40 +0,0 @@ -import networkx as nx -import nx_parallel as nxp -import math - - -def test_harmonic_centrality_get_chunks(): - def get_chunk(nodes): - num_chunks = nxp.get_n_jobs() - node_hc = {i: 0 for i in nodes} - - for node in nodes: - node_hc[node] = sum( - 1 / d - for _, d in nx.single_source_shortest_path_length(G, node).items() - if d > 0 - ) - - sorted_nodes = sorted(node_hc.items(), key=lambda x: x[1], reverse=True) - - chunks = [[] for _ in range(num_chunks)] - chunk_sums = [0] * num_chunks - - for node, value in sorted_nodes: - min_chunk_index = chunk_sums.index(min(chunk_sums)) - chunks[min_chunk_index].append(node) - chunk_sums[min_chunk_index] += value - - return chunks - - # Create a random graph - G = nx.fast_gnp_random_graph(100, 0.1, directed=False) - H = nxp.ParallelGraph(G) - - # Compute harmonic centrality with and without chunking - par_hc_chunk = nxp.harmonic_centrality(H, get_chunks=get_chunk) - par_hc = nxp.harmonic_centrality(H) - - # Validate results - for node in G.nodes: - assert math.isclose(par_hc[node], par_hc_chunk[node], abs_tol=1e-16) diff --git a/nx_parallel/algorithms/centrality/tests/test_voterank_centrality.py b/nx_parallel/algorithms/centrality/tests/test_voterank_centrality.py deleted file mode 100644 index 19c03c7a..00000000 --- a/nx_parallel/algorithms/centrality/tests/test_voterank_centrality.py +++ /dev/null @@ -1,29 +0,0 @@ -import networkx as nx -import nx_parallel as nxp - - -def test_voterank_get_chunks(): - def get_chunk(nodes): - num_chunks = nxp.get_n_jobs() - sorted_nodes = sorted(nodes, key=lambda n: G.degree(n), reverse=True) - - chunks = [[] for _ in range(num_chunks)] - chunk_sums = [0] * num_chunks - - for node in sorted_nodes: - min_chunk_index = chunk_sums.index(min(chunk_sums)) - chunks[min_chunk_index].append(node) - chunk_sums[min_chunk_index] += G.degree(node) - - return chunks - - # Generate a random graph - G = nx.fast_gnp_random_graph(100, 0.1, directed=False) - H = nxp.ParallelGraph(G) - - # Compute VoteRank with and without chunking - par_vr_chunk = nxp.voterank(H, get_chunks=get_chunk) - par_vr = nxp.voterank(H) - - # Ensure both methods produce the same influential nodes - assert par_vr_chunk == par_vr From 88ad6bcc5c83ab199ed60384f6c010274338cd5f Mon Sep 17 00:00:00 2001 From: RohitP2005 Date: Tue, 25 Mar 2025 22:15:31 +0530 Subject: [PATCH 11/11] removed unecessary comments --- nx_parallel/algorithms/centrality/tests/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nx_parallel/algorithms/centrality/tests/__init__.py b/nx_parallel/algorithms/centrality/tests/__init__.py index a11efc6d..63c57739 100644 --- a/nx_parallel/algorithms/centrality/tests/__init__.py +++ b/nx_parallel/algorithms/centrality/tests/__init__.py @@ -1,3 +1 @@ from .test_betweenness_centrality import * -from .test_harmonic_centrality import * -from .test_voterank_centrality import *