From 877fb0a474abec501fd7de87645e4f4675a8fcd2 Mon Sep 17 00:00:00 2001 From: kc611 Date: Wed, 2 Aug 2023 12:25:06 +0530 Subject: [PATCH 1/7] Refactoring jump targets and backedges to lists in order to update inplace --- .../core/datastructures/basic_block.py | 75 ++++++------------- 1 file changed, 24 insertions(+), 51 deletions(-) diff --git a/numba_rvsdg/core/datastructures/basic_block.py b/numba_rvsdg/core/datastructures/basic_block.py index 758d932..f78b872 100644 --- a/numba_rvsdg/core/datastructures/basic_block.py +++ b/numba_rvsdg/core/datastructures/basic_block.py @@ -1,6 +1,6 @@ import dis -from typing import Tuple, Dict, List, Optional -from dataclasses import dataclass, replace, field +from typing import Dict, List, Optional +from dataclasses import dataclass, field from numba_rvsdg.core.utils import _next_inst_offset from numba_rvsdg.core.datastructures import block_names @@ -26,9 +26,12 @@ class BasicBlock: name: str - _jump_targets: Tuple[str, ...] = tuple() + _jump_targets: list[str] = field(default_factory=list) + backedges: list[str] = field(default_factory=list) - backedges: Tuple[str, ...] = tuple() + def __post_init__(self) -> None: + assert isinstance(self._jump_targets, list) + assert isinstance(self.backedges, list) @property def is_exiting(self) -> bool: @@ -58,7 +61,7 @@ def fallthrough(self) -> bool: return len(self._jump_targets) == 1 @property - def jump_targets(self) -> Tuple[str, ...]: + def jump_targets(self) -> list[str]: """Retrieves the jump targets for this block, excluding any jump targets that are also backedges. @@ -73,30 +76,21 @@ def jump_targets(self) -> Tuple[str, ...]: for j in self._jump_targets: if j not in self.backedges: acc.append(j) - return tuple(acc) + return acc - def declare_backedge(self, target: str) -> "BasicBlock": + def declare_backedge(self, target: str) -> None: """Declare one of the jump targets as a backedge of this block. Parameters ---------- target: str The jump target that is to be declared as a backedge. - - Returns - ------- - basic_block: BasicBlock - The resulting block. - """ if target in self.jump_targets: assert not self.backedges - return replace(self, backedges=(target,)) - return self + self.backedges.append(target) - def replace_jump_targets( - self, jump_targets: Tuple[str, ...] - ) -> "BasicBlock": + def replace_jump_targets(self, jump_targets: list[str]) -> None: """Replaces jump targets of this block by the given tuple. This method replaces the jump targets of the current BasicBlock. @@ -111,16 +105,11 @@ def replace_jump_targets( ---------- jump_targets: Tuple The new jump target tuple. Must be ordered. - - Returns - ------- - basic_block: BasicBlock - The resulting BasicBlock. - """ - return replace(self, _jump_targets=jump_targets) + self._jump_targets.clear() + self._jump_targets.extend(jump_targets) - def replace_backedges(self, backedges: Tuple[str, ...]) -> "BasicBlock": + def replace_backedges(self, backedges: list[str]) -> None: """Replaces back edges of this block by the given tuple. This method replaces the back edges of the current BasicBlock. @@ -131,14 +120,9 @@ def replace_backedges(self, backedges: Tuple[str, ...]) -> "BasicBlock": ---------- backedges: Tuple The new back edges tuple. Must be ordered. - - Returns - ------- - basic_block: BasicBlock - The resulting BasicBlock. - """ - return replace(self, backedges=backedges) + self.backedges.clear() + self.backedges.extend(backedges) @dataclass(frozen=True) @@ -271,9 +255,7 @@ class SyntheticBranch(SyntheticBlock): variable: str = "" branch_value_table: Dict[int, str] = field(default_factory=lambda: {}) - def replace_jump_targets( - self, jump_targets: Tuple[str, ...] - ) -> "BasicBlock": + def replace_jump_targets(self, jump_targets: list[str]) -> None: """Replaces jump targets of this block by the given tuple. This method replaces the jump targets of the current BasicBlock. @@ -289,16 +271,10 @@ def replace_jump_targets( ---------- jump_targets: Tuple The new jump target tuple. Must be ordered. - - Returns - ------- - basic_block: BasicBlock - The resulting BasicBlock. - """ - old_branch_value_table = self.branch_value_table - new_branch_value_table = {} + old_branch_value_table = self.branch_value_table.copy() + self.branch_value_table.clear() for target in self._jump_targets: if target not in jump_targets: # ASSUMPTION: only one jump_target is being updated @@ -307,18 +283,15 @@ def replace_jump_targets( new_target = next(iter(diff)) for k, v in old_branch_value_table.items(): if v == target: - new_branch_value_table[k] = new_target + self.branch_value_table[k] = new_target else: # copy all old values for k, v in old_branch_value_table.items(): if v == target: - new_branch_value_table[k] = v + self.branch_value_table[k] = v - return replace( - self, - _jump_targets=jump_targets, - branch_value_table=new_branch_value_table, - ) + self._jump_targets.clear() + self._jump_targets.extend(jump_targets) @dataclass(frozen=True) From 4a0b3a99884bd68dc22c8be71d3e508bbe8346ed Mon Sep 17 00:00:00 2001 From: kc611 Date: Wed, 2 Aug 2023 12:25:59 +0530 Subject: [PATCH 2/7] Refactored core datastructures to be compatible with the changes --- numba_rvsdg/core/datastructures/byte_flow.py | 86 ++++++-------------- numba_rvsdg/core/datastructures/flow_info.py | 8 +- numba_rvsdg/core/datastructures/scfg.py | 44 +++++----- numba_rvsdg/core/transformations.py | 65 ++++++--------- numba_rvsdg/rendering/rendering.py | 12 +-- 5 files changed, 79 insertions(+), 136 deletions(-) diff --git a/numba_rvsdg/core/datastructures/byte_flow.py b/numba_rvsdg/core/datastructures/byte_flow.py index fb08a79..04e2381 100644 --- a/numba_rvsdg/core/datastructures/byte_flow.py +++ b/numba_rvsdg/core/datastructures/byte_flow.py @@ -1,5 +1,4 @@ import dis -from copy import deepcopy from dataclasses import dataclass from typing import Generator, Callable @@ -61,90 +60,51 @@ def from_bytecode(code: Callable) -> "ByteFlow": # type: ignore scfg = flowinfo.build_basicblocks() return ByteFlow(bc=bc, scfg=scfg) - def _join_returns(self) -> "ByteFlow": + def _join_returns(self) -> None: """Joins the return blocks within the corresponding SCFG. - This method creates a deep copy of the SCFG and performs - operation to join return blocks within the control flow. - It returns a new ByteFlow object with the updated SCFG. - - Returns - ------- - byteflow: ByteFlow - The new ByteFlow object with updated SCFG. + This method performs operation to join return blocks within + the control flow. """ - scfg = deepcopy(self.scfg) - scfg.join_returns() - return ByteFlow(bc=self.bc, scfg=scfg) + self.scfg.join_returns() - def _restructure_loop(self) -> "ByteFlow": + def _restructure_loop(self) -> None: """Restructures the loops within the corresponding SCFG. - Creates a deep copy of the SCFG and performs the operation to - restructure loop constructs within the control flow using - the algorithm LOOP RESTRUCTURING from section 4.1 of Bahmann2015. + Performs the operation to restructure loop constructs within + the control flow using the algorithm LOOP RESTRUCTURING from + section 4.1 of Bahmann2015. It applies the restructuring operation to both the main SCFG - and any subregions within it. It returns a new ByteFlow object - with the updated SCFG. - - Returns - ------- - byteflow: ByteFlow - The new ByteFlow object with updated SCFG. + and any subregions within it. """ - scfg = deepcopy(self.scfg) - restructure_loop(scfg.region) - for region in _iter_subregions(scfg): + restructure_loop(self.scfg.region) + for region in _iter_subregions(self.scfg): restructure_loop(region) - return ByteFlow(bc=self.bc, scfg=scfg) - def _restructure_branch(self) -> "ByteFlow": + def _restructure_branch(self) -> None: """Restructures the branches within the corresponding SCFG. - Creates a deep copy of the SCFG and performs the operation to - restructure branch constructs within the control flow. It applies - the restructuring operation to both the main SCFG and any - subregions within it. It returns a new ByteFlow object with - the updated SCFG. - - Returns - ------- - byteflow: ByteFlow - The new ByteFlow object with updated SCFG. + This method applies restructuring branch operation to both + the main SCFG and any subregions within it. """ - scfg = deepcopy(self.scfg) - restructure_branch(scfg.region) - for region in _iter_subregions(scfg): + restructure_branch(self.scfg.region) + for region in _iter_subregions(self.scfg): restructure_branch(region) - return ByteFlow(bc=self.bc, scfg=scfg) - def restructure(self) -> "ByteFlow": + def restructure(self) -> None: """Applies join_returns, restructure_loop and restructure_branch in the respective order on the SCFG. - Creates a deep copy of the SCFG and applies a series of - restructuring operations to it. The operations include - joining return blocks, restructuring loop constructs, and - restructuring branch constructs. It returns a new ByteFlow - object with the updated SCFG. - - Returns - ------- - byteflow: ByteFlow - The new ByteFlow object with updated SCFG. + Applies a series of restructuring operations to given SCFG. + The operations include joining return blocks, restructuring + loop constructs, and restructuring branch constructs. """ - scfg = deepcopy(self.scfg) # close - scfg.join_returns() + self._join_returns() # handle loop - restructure_loop(scfg.region) - for region in _iter_subregions(scfg): - restructure_loop(region) + self._restructure_loop() # handle branch - restructure_branch(scfg.region) - for region in _iter_subregions(scfg): - restructure_branch(region) - return ByteFlow(bc=self.bc, scfg=scfg) + self._restructure_branch() def _iter_subregions(scfg: SCFG) -> Generator[RegionBlock, SCFG, None]: diff --git a/numba_rvsdg/core/datastructures/flow_info.py b/numba_rvsdg/core/datastructures/flow_info.py index a6af42d..4824761 100644 --- a/numba_rvsdg/core/datastructures/flow_info.py +++ b/numba_rvsdg/core/datastructures/flow_info.py @@ -126,19 +126,19 @@ def build_basicblocks( for begin, end in zip(offsets, [*offsets[1:], end_offset]): name = names[begin] - targets: Tuple[str, ...] + targets: list[str] term_offset = _prev_inst_offset(end) if term_offset not in self.jump_insts: # implicit jump - targets = (names[end],) + targets = [names[end]] else: - targets = tuple(names[o] for o in self.jump_insts[term_offset]) + targets = [names[o] for o in self.jump_insts[term_offset]] block = PythonBytecodeBlock( name=name, begin=begin, end=end, _jump_targets=targets, - backedges=(), + backedges=[], ) scfg.add_block(block) return scfg diff --git a/numba_rvsdg/core/datastructures/scfg.py b/numba_rvsdg/core/datastructures/scfg.py index 5b4d15e..73207a4 100644 --- a/numba_rvsdg/core/datastructures/scfg.py +++ b/numba_rvsdg/core/datastructures/scfg.py @@ -527,14 +527,14 @@ def insert_block( # TODO: needs a diagram and documentaion # initialize new block new_block = block_type( - name=new_name, _jump_targets=tuple(successors), backedges=tuple() + name=new_name, _jump_targets=list(successors), backedges=[] ) # add block to self self.add_block(new_block) # Replace any arcs from any of predecessors to any of successors with # an arc through the inserted block instead. for name in predecessors: - block = self.graph.pop(name) + block = self.graph[name] jt = list(block.jump_targets) if successors: for s in successors: @@ -545,7 +545,7 @@ def insert_block( jt.pop(jt.index(s)) else: jt.append(new_name) - self.add_block(block.replace_jump_targets(jump_targets=tuple(jt))) + block.replace_jump_targets(jump_targets=jt) def insert_SyntheticExit( self, @@ -639,8 +639,8 @@ def insert_block_and_control_blocks( variable_assignment[branch_variable] = branch_variable_value synth_assign_block = SyntheticAssignment( name=synth_assign, - _jump_targets=(new_name,), - backedges=(), + _jump_targets=[new_name], + backedges=[], variable_assignment=variable_assignment, ) # add block @@ -652,16 +652,12 @@ def insert_block_and_control_blocks( # replace previous successor with synth_assign jt[jt.index(s)] = synth_assign # finally, replace the jump_targets - self.add_block( - self.graph.pop(name).replace_jump_targets( - jump_targets=tuple(jt) - ) - ) + self.graph[name].replace_jump_targets(jump_targets=jt) # initialize new block, which will hold the branching table new_block = SyntheticHead( name=new_name, - _jump_targets=tuple(successors), - backedges=tuple(), + _jump_targets=list(successors), + backedges=[], variable=branch_variable, branch_value_table=branch_value_table, ) @@ -1079,7 +1075,7 @@ def to_yaml(scfg: "SCFG") -> str: return ys @staticmethod - def to_dict(scfg: "SCFG") -> Dict[str, Dict[str, Any]]: + def to_dict(scfg: "SCFG") -> Dict[str, Dict[str, BasicBlock]]: """Helper method to convert the SCFG object to a dictionary representation. @@ -1109,13 +1105,16 @@ def reverse_lookup(value: type) -> str: raise TypeError("Block type not found.") seen = set() - q: Set[Tuple[str, BasicBlock]] = set() + q: Set[str] = set() # Order of elements doesn't matter since they're going to # be sorted at the end. - q.update(scfg.graph.items()) + graph_dict = {} + q.update(set(scfg.graph.keys())) + graph_dict.update(scfg.graph) while q: - key, value = q.pop() + key = q.pop() + value = scfg.graph[key] if key in seen: continue seen.add(key) @@ -1123,9 +1122,8 @@ def reverse_lookup(value: type) -> str: block_type = reverse_lookup(type(value)) blocks[key] = {"type": block_type} if isinstance(value, RegionBlock): - assert value.subregion is not None - assert value.parent_region is not None - q.update(value.subregion.graph.items()) + q.update(value.subregion.graph.keys()) + graph_dict.update(value.subregion.graph) blocks[key]["kind"] = value.kind blocks[key]["contains"] = sorted( [idx.name for idx in value.subregion.graph.values()] @@ -1211,14 +1209,14 @@ def extract_block_info( List of backedges of the requested block. """ block_info = blocks[current_name].copy() - block_edges = tuple(block_ref_dict[idx] for idx in edges[current_name]) + block_edges = [block_ref_dict[idx] for idx in edges[current_name]] if backedges.get(current_name): - block_backedges = tuple( + block_backedges = [ block_ref_dict[idx] for idx in backedges[current_name] - ) + ] else: - block_backedges = () + block_backedges = [] block_type = block_info.pop("type") diff --git a/numba_rvsdg/core/transformations.py b/numba_rvsdg/core/transformations.py index ab1cd13..f9da5c2 100644 --- a/numba_rvsdg/core/transformations.py +++ b/numba_rvsdg/core/transformations.py @@ -60,9 +60,7 @@ def loop_restructure_helper(scfg: SCFG, loop: Set[str]) -> None: and len(exiting_blocks) == 1 and backedge_blocks[0] == next(iter(exiting_blocks)) ): - scfg.add_block( - scfg.graph.pop(backedge_blocks[0]).declare_backedge(loop_head) - ) + scfg.graph[backedge_blocks[0]].declare_backedge(loop_head) return # The synthetic exiting latch and synthetic exit need to be created @@ -152,8 +150,8 @@ def reverse_lookup(d: Mapping[int, str], value: str) -> int: # Create the actual control variable block synth_assign_block = SyntheticAssignment( name=synth_assign, - _jump_targets=(synth_exiting_latch,), - backedges=(), + _jump_targets=[synth_exiting_latch], + backedges=[], variable_assignment=variable_assignment, ) # Insert the assignment to the scfg @@ -183,20 +181,18 @@ def reverse_lookup(d: Mapping[int, str], value: str) -> int: # that point to the headers, no need to add a backedge, # since it will be contained in the SyntheticExitingLatch # later on. - block = scfg.graph.pop(name) + block = scfg.graph[name] jts = list(block.jump_targets) for h in headers: if h in jts: jts.remove(h) - scfg.add_block( - block.replace_jump_targets(jump_targets=tuple(jts)) - ) + block.replace_jump_targets(jump_targets=jts) # Setup the assignment block and initialize it with the # correct jump_targets and variable assignment. synth_assign_block = SyntheticAssignment( name=synth_assign, - _jump_targets=(synth_exiting_latch,), - backedges=(), + _jump_targets=[synth_exiting_latch], + backedges=[], variable_assignment=variable_assignment, ) # Add the new block to the SCFG @@ -205,22 +201,19 @@ def reverse_lookup(d: Mapping[int, str], value: str) -> int: new_jt[new_jt.index(jt)] = synth_assign # finally, replace the jump_targets for this block with the new # ones - scfg.add_block( - scfg.graph.pop(name).replace_jump_targets( - jump_targets=tuple(new_jt) - ) - ) + scfg.graph[name].replace_jump_targets(jump_targets=new_jt) + # Add any new blocks to the loop. loop.update(new_blocks) # Insert the exiting latch, add it to the loop and to the graph. synth_exiting_latch_block = SyntheticExitingLatch( name=synth_exiting_latch, - _jump_targets=( + _jump_targets=[ synth_exit if needs_synth_exit else next(iter(exit_blocks)), loop_head, - ), - backedges=(loop_head,), + ], + backedges=[loop_head], variable=backedge_variable, branch_value_table=backedge_value_table, ) @@ -231,8 +224,8 @@ def reverse_lookup(d: Mapping[int, str], value: str) -> int: if needs_synth_exit: synth_exit_block = SyntheticExitBranch( name=synth_exit, - _jump_targets=tuple(exit_blocks), - backedges=(), + _jump_targets=list(exit_blocks), + backedges=[], variable=exit_variable, branch_value_table=exit_value_table, ) @@ -331,33 +324,26 @@ def find_tail_blocks( def update_exiting( region_block: RegionBlock, new_region_header: str, new_region_name: str -) -> RegionBlock: +) -> None: # Recursively updates the exiting blocks of a regionblock region_exiting = region_block.exiting - assert region_block.subregion is not None - region_exiting_block: BasicBlock = region_block.subregion.graph.pop( + region_exiting_block: BasicBlock = region_block.subregion.graph[ region_exiting - ) + ] jt = list(region_exiting_block._jump_targets) for idx, s in enumerate(jt): if s is new_region_header: jt[idx] = new_region_name - region_exiting_block = region_exiting_block.replace_jump_targets( - jump_targets=tuple(jt) - ) + region_exiting_block.replace_jump_targets(jump_targets=jt) be = list(region_exiting_block.backedges) for idx, s in enumerate(be): if s is new_region_header: be[idx] = new_region_name - region_exiting_block = region_exiting_block.replace_backedges( - backedges=tuple(be) - ) + region_exiting_block.replace_backedges(backedges=be) if isinstance(region_exiting_block, RegionBlock): - region_exiting_block = update_exiting( + update_exiting( region_exiting_block, new_region_header, new_region_name ) - region_block.subregion.add_block(region_exiting_block) - return region_block def extract_region( @@ -390,27 +376,26 @@ def extract_region( # the SCFG represents should not be the meta region. assert scfg.region.kind != "meta" continue - entry = scfg.graph.pop(name) + entry = scfg.graph[name] jt = list(entry._jump_targets) for idx, s in enumerate(jt): if s is region_header: jt[idx] = region_name - entry = entry.replace_jump_targets(jump_targets=tuple(jt)) + entry.replace_jump_targets(jump_targets=jt) be = list(entry.backedges) for idx, s in enumerate(be): if s is region_header: be[idx] = region_name - entry = entry.replace_backedges(backedges=tuple(be)) + entry.replace_backedges(backedges=be) # If the entry itself is a region, update it's # exiting blocks too, recursively if isinstance(entry, RegionBlock): - entry = update_exiting(entry, region_header, region_name) - scfg.add_block(entry) + update_exiting(entry, region_header, region_name) region = RegionBlock( name=region_name, _jump_targets=scfg[region_exiting].jump_targets, - backedges=(), + backedges=[], kind=region_kind, header=region_header, subregion=head_subgraph, diff --git a/numba_rvsdg/rendering/rendering.py b/numba_rvsdg/rendering/rendering.py index a8c1ba0..2852e00 100644 --- a/numba_rvsdg/rendering/rendering.py +++ b/numba_rvsdg/rendering/rendering.py @@ -365,14 +365,14 @@ def render_flow(flow: ByteFlow) -> None: """ ByteFlowRenderer().render_byteflow(flow).view("before") - cflow = flow._join_returns() - ByteFlowRenderer().render_byteflow(cflow).view("closed") + flow._join_returns() + ByteFlowRenderer().render_byteflow(flow).view("closed") - lflow = cflow._restructure_loop() - ByteFlowRenderer().render_byteflow(lflow).view("loop restructured") + flow._restructure_loop() + ByteFlowRenderer().render_byteflow(flow).view("loop restructured") - bflow = lflow._restructure_branch() - ByteFlowRenderer().render_byteflow(bflow).view("branch restructured") + flow._restructure_branch() + ByteFlowRenderer().render_byteflow(flow).view("branch restructured") def render_scfg(scfg: SCFG) -> None: From 43ce5bc7bab308098f3e14fd4f9b9e9759ca2623 Mon Sep 17 00:00:00 2001 From: kc611 Date: Wed, 2 Aug 2023 12:26:53 +0530 Subject: [PATCH 3/7] Refactored the test suite --- numba_rvsdg/tests/test_byteflow.py | 28 ++++++++++----------- numba_rvsdg/tests/test_figures.py | 4 +-- numba_rvsdg/tests/test_scfg.py | 9 +++---- numba_rvsdg/tests/test_simulate.py | 39 ++++++------------------------ 4 files changed, 27 insertions(+), 53 deletions(-) diff --git a/numba_rvsdg/tests/test_byteflow.py b/numba_rvsdg/tests/test_byteflow.py index 46aea65..1ed065e 100644 --- a/numba_rvsdg/tests/test_byteflow.py +++ b/numba_rvsdg/tests/test_byteflow.py @@ -114,16 +114,16 @@ def test_constructor(self): name=name_gen.new_block_name(block_names.PYTHON_BYTECODE), begin=0, end=8, - _jump_targets=(), - backedges=(), + _jump_targets=[], + backedges=[], ) self.assertEqual(block.name, "python_bytecode_block_0") self.assertEqual(block.begin, 0) self.assertEqual(block.end, 8) self.assertFalse(block.fallthrough) self.assertTrue(block.is_exiting) - self.assertEqual(block.jump_targets, ()) - self.assertEqual(block.backedges, ()) + self.assertEqual(block.jump_targets, []) + self.assertEqual(block.backedges, []) def test_is_jump_target(self): name_gen = NameGenerator() @@ -131,12 +131,12 @@ def test_is_jump_target(self): name=name_gen.new_block_name(block_names.PYTHON_BYTECODE), begin=0, end=8, - _jump_targets=( + _jump_targets=[ name_gen.new_block_name(block_names.PYTHON_BYTECODE), - ), - backedges=(), + ], + backedges=[], ) - self.assertEqual(block.jump_targets, ("python_bytecode_block_1",)) + self.assertEqual(block.jump_targets, ["python_bytecode_block_1"]) self.assertFalse(block.is_exiting) def test_get_instructions(self): @@ -145,8 +145,8 @@ def test_get_instructions(self): name=name_gen.new_block_name(block_names.PYTHON_BYTECODE), begin=0, end=8, - _jump_targets=(), - backedges=(), + _jump_targets=[], + backedges=[], ) expected = [ Instruction( @@ -242,8 +242,8 @@ def test_build_basic_blocks(self): name=new_name, begin=0, end=10, - _jump_targets=(), - backedges=(), + _jump_targets=[], + backedges=[], ) } ) @@ -266,8 +266,8 @@ def test_from_bytecode(self): name=new_name, begin=0, end=10, - _jump_targets=(), - backedges=(), + _jump_targets=[], + backedges=[], ) } ) diff --git a/numba_rvsdg/tests/test_figures.py b/numba_rvsdg/tests/test_figures.py index 3309c04..eb6f279 100644 --- a/numba_rvsdg/tests/test_figures.py +++ b/numba_rvsdg/tests/test_figures.py @@ -442,7 +442,7 @@ def test_figure_3(self): flow = FlowInfo.from_bytecode(bc) scfg = flow.build_basicblocks() byteflow = ByteFlow(bc=bc, scfg=scfg) - byteflow = byteflow.restructure() + byteflow.restructure() x, _ = SCFG.from_yaml(fig_3_yaml) self.assertSCFGEqual(x, byteflow.scfg) @@ -475,7 +475,7 @@ def test_figure_4(self): flow = FlowInfo.from_bytecode(bc) scfg = flow.build_basicblocks() byteflow = ByteFlow(bc=bc, scfg=scfg) - byteflow = byteflow.restructure() + byteflow.restructure() x, _ = SCFG.from_yaml(fig_4_yaml) self.assertSCFGEqual(x, byteflow.scfg) diff --git a/numba_rvsdg/tests/test_scfg.py b/numba_rvsdg/tests/test_scfg.py index 71e167e..f236c03 100644 --- a/numba_rvsdg/tests/test_scfg.py +++ b/numba_rvsdg/tests/test_scfg.py @@ -162,7 +162,7 @@ def test_scfg_iter(self): block_0 = name_gen.new_block_name(block_names.BASIC) block_1 = name_gen.new_block_name(block_names.BASIC) expected = [ - (block_0, BasicBlock(name=block_0, _jump_targets=(block_1,))), + (block_0, BasicBlock(name=block_0, _jump_targets=[block_1])), (block_1, BasicBlock(name=block_1)), ] scfg, _ = SCFG.from_yaml( @@ -194,17 +194,14 @@ def foo(n): def test_concealed_region_view_iter(self): flow = ByteFlow.from_bytecode(self.foo) - restructured = flow._restructure_loop() + flow._restructure_loop() expected = [ ("python_bytecode_block_0", PythonBytecodeBlock), ("loop_region_0", RegionBlock), ("python_bytecode_block_3", PythonBytecodeBlock), ] received = list( - ( - (k, type(v)) - for k, v in restructured.scfg.concealed_region_view.items() - ) + ((k, type(v)) for k, v in flow.scfg.concealed_region_view.items()) ) self.assertEqual(expected, received) diff --git a/numba_rvsdg/tests/test_simulate.py b/numba_rvsdg/tests/test_simulate.py index 9986e71..566442b 100644 --- a/numba_rvsdg/tests/test_simulate.py +++ b/numba_rvsdg/tests/test_simulate.py @@ -4,29 +4,6 @@ from numba_rvsdg.tests.simulator import Simulator import unittest -# flow = ByteFlow.from_bytecode(foo) -# #pprint(flow.scfg) -# flow = flow.restructure() -# #pprint(flow.scfg) -# # pprint(rtsflow.scfg) -# ByteFlowRenderer().render_byteflow(flow).view() -# print(dis(foo)) -# -# sim = Simulator(flow, foo.__globals__) -# ret = sim.run(dict(x=1)) -# assert ret == foo(x=1) -# -# #sim = Simulator(flow, foo.__globals__) -# #ret = sim.run(dict(x=100)) -# #assert ret == foo(x=100) - -# You can use the following snipppet to visually debug the restructured -# byteflow: -# -# ByteFlowRenderer().render_byteflow(flow).view() -# -# - class SimulatorTest(unittest.TestCase): def _run(self, func, flow, kwargs): @@ -44,7 +21,7 @@ def foo(x): return c flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() # if case self._run(foo, flow, {"x": 1}) @@ -59,7 +36,7 @@ def foo(x): return c flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() # loop bypass case self._run(foo, flow, {"x": 0}) @@ -78,7 +55,7 @@ def foo(x): return c flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() # loop bypass case self._run(foo, flow, {"x": 0}) @@ -97,7 +74,7 @@ def foo(x): return c flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() # loop bypass case self._run(foo, flow, {"x": 0}) @@ -121,7 +98,7 @@ def foo(x): return c flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() # no loop self._run(foo, flow, {"x": 0}) @@ -145,7 +122,7 @@ def foo(x): return c flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() # loop bypass self._run(foo, flow, {"x": 0}) @@ -161,7 +138,7 @@ def foo(x, y): return (x > 0 and x < 10) or (y > 0 and y < 10) flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() self._run(foo, flow, {"x": 5, "y": 5}) @@ -175,7 +152,7 @@ def foo(s, e): return c flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() # no looping self._run(foo, flow, {"s": 0, "e": 0}) From d2331e7a9a75be5b1e7ee8b25b04e78d8140864e Mon Sep 17 00:00:00 2001 From: kc611 Date: Wed, 2 Aug 2023 14:17:56 +0530 Subject: [PATCH 4/7] Fixed mypy errors --- numba_rvsdg/core/datastructures/scfg.py | 27 ++++++++++++++++--------- numba_rvsdg/core/transformations.py | 3 ++- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/numba_rvsdg/core/datastructures/scfg.py b/numba_rvsdg/core/datastructures/scfg.py index 73207a4..ac2cd26 100644 --- a/numba_rvsdg/core/datastructures/scfg.py +++ b/numba_rvsdg/core/datastructures/scfg.py @@ -1075,7 +1075,7 @@ def to_yaml(scfg: "SCFG") -> str: return ys @staticmethod - def to_dict(scfg: "SCFG") -> Dict[str, Dict[str, BasicBlock]]: + def to_dict(scfg: "SCFG") -> Dict[str, Dict[str, Any]]: """Helper method to convert the SCFG object to a dictionary representation. @@ -1109,7 +1109,8 @@ def reverse_lookup(value: type) -> str: # Order of elements doesn't matter since they're going to # be sorted at the end. graph_dict = {} - q.update(set(scfg.graph.keys())) + all_keys: set[str] = set(scfg.graph.keys()) + q.update(all_keys) graph_dict.update(scfg.graph) while q: @@ -1119,18 +1120,20 @@ def reverse_lookup(value: type) -> str: continue seen.add(key) - block_type = reverse_lookup(type(value)) + block_type: str = reverse_lookup(type(value)) blocks[key] = {"type": block_type} if isinstance(value, RegionBlock): - q.update(value.subregion.graph.keys()) - graph_dict.update(value.subregion.graph) + inner_graph = value.subregion.graph # type: ignore + q.update(inner_graph.keys()) + graph_dict.update(inner_graph) blocks[key]["kind"] = value.kind blocks[key]["contains"] = sorted( - [idx.name for idx in value.subregion.graph.values()] + [idx.name for idx in inner_graph.values()] ) blocks[key]["header"] = value.header blocks[key]["exiting"] = value.exiting - blocks[key]["parent_region"] = value.parent_region.name + parent_name = value.parent_region.name # type: ignore + blocks[key]["parent_region"] = parent_name elif isinstance(value, SyntheticBranch): blocks[key]["branch_value_table"] = value.branch_value_table blocks[key]["variable"] = value.variable @@ -1142,9 +1145,13 @@ def reverse_lookup(value: type) -> str: edges[key] = sorted([i for i in value._jump_targets]) backedges[key] = sorted([i for i in value.backedges]) - graph_dict = {"blocks": blocks, "edges": edges, "backedges": backedges} + graph_dict = { + "blocks": blocks, # type: ignore + "edges": edges, # type: ignore + "backedges": backedges, # type: ignore + } - return graph_dict + return graph_dict # type: ignore @staticmethod def find_outer_graph(graph_dict: Dict[str, Dict[str, Any]]) -> Set[str]: @@ -1179,7 +1186,7 @@ def extract_block_info( block_ref_dict: Dict[str, str], edges: Dict[str, List[str]], backedges: Dict[str, List[str]], - ) -> Tuple[Dict[str, Any], str, Tuple[str, ...], Tuple[str, ...]]: + ) -> Tuple[Dict[str, Any], str, list[str], list[str]]: """Helper method to extract information from various components of an `SCFG` graph. diff --git a/numba_rvsdg/core/transformations.py b/numba_rvsdg/core/transformations.py index f9da5c2..6b48383 100644 --- a/numba_rvsdg/core/transformations.py +++ b/numba_rvsdg/core/transformations.py @@ -327,7 +327,8 @@ def update_exiting( ) -> None: # Recursively updates the exiting blocks of a regionblock region_exiting = region_block.exiting - region_exiting_block: BasicBlock = region_block.subregion.graph[ + region_exiting_block: BasicBlock + region_exiting_block = region_block.subregion.graph[ # type: ignore region_exiting ] jt = list(region_exiting_block._jump_targets) From 688b43b7a2d4a5e74664a1bffb3a1398c6ab92f3 Mon Sep 17 00:00:00 2001 From: Kaustubh Date: Tue, 29 Aug 2023 17:18:36 +0530 Subject: [PATCH 5/7] Apply suggestions from code review Co-authored-by: esc --- numba_rvsdg/core/datastructures/basic_block.py | 18 +++++++++--------- numba_rvsdg/core/datastructures/scfg.py | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/numba_rvsdg/core/datastructures/basic_block.py b/numba_rvsdg/core/datastructures/basic_block.py index f78b872..a89d16e 100644 --- a/numba_rvsdg/core/datastructures/basic_block.py +++ b/numba_rvsdg/core/datastructures/basic_block.py @@ -91,7 +91,7 @@ def declare_backedge(self, target: str) -> None: self.backedges.append(target) def replace_jump_targets(self, jump_targets: list[str]) -> None: - """Replaces jump targets of this block by the given tuple. + """Replaces jump targets of this block by the given list. This method replaces the jump targets of the current BasicBlock. The provided jump targets must be in the same order as their @@ -103,14 +103,14 @@ def replace_jump_targets(self, jump_targets: list[str]) -> None: Parameters ---------- - jump_targets: Tuple - The new jump target tuple. Must be ordered. + jump_targets: List + The new jump target list. Must be ordered. """ self._jump_targets.clear() self._jump_targets.extend(jump_targets) def replace_backedges(self, backedges: list[str]) -> None: - """Replaces back edges of this block by the given tuple. + """Replaces back edges of this block by the given list. This method replaces the back edges of the current BasicBlock. The provided back edges must be in the same order as their @@ -118,8 +118,8 @@ def replace_backedges(self, backedges: list[str]) -> None: Parameters ---------- - backedges: Tuple - The new back edges tuple. Must be ordered. + backedges: List + The new back edges list. Must be ordered. """ self.backedges.clear() self.backedges.extend(backedges) @@ -256,7 +256,7 @@ class SyntheticBranch(SyntheticBlock): branch_value_table: Dict[int, str] = field(default_factory=lambda: {}) def replace_jump_targets(self, jump_targets: list[str]) -> None: - """Replaces jump targets of this block by the given tuple. + """Replaces jump targets of this block by the given list. This method replaces the jump targets of the current BasicBlock. The provided jump targets must be in the same order as their @@ -269,8 +269,8 @@ def replace_jump_targets(self, jump_targets: list[str]) -> None: Parameters ---------- - jump_targets: Tuple - The new jump target tuple. Must be ordered. + jump_targets: List + The new jump target list. Must be ordered. """ old_branch_value_table = self.branch_value_table.copy() diff --git a/numba_rvsdg/core/datastructures/scfg.py b/numba_rvsdg/core/datastructures/scfg.py index ac2cd26..3e3a96d 100644 --- a/numba_rvsdg/core/datastructures/scfg.py +++ b/numba_rvsdg/core/datastructures/scfg.py @@ -527,7 +527,7 @@ def insert_block( # TODO: needs a diagram and documentaion # initialize new block new_block = block_type( - name=new_name, _jump_targets=list(successors), backedges=[] + name=new_name, _jump_targets=successors, backedges=[] ) # add block to self self.add_block(new_block) @@ -656,7 +656,7 @@ def insert_block_and_control_blocks( # initialize new block, which will hold the branching table new_block = SyntheticHead( name=new_name, - _jump_targets=list(successors), + _jump_targets=successors, backedges=[], variable=branch_variable, branch_value_table=branch_value_table, From 6a18e03e59715f9309616143ec6cd87749d49c54 Mon Sep 17 00:00:00 2001 From: kc611 Date: Thu, 14 Sep 2023 18:25:20 +0530 Subject: [PATCH 6/7] Added a shallow copy for blocks to prevent memory mutations in SCFG.insert_blocks --- numba_rvsdg/core/datastructures/scfg.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numba_rvsdg/core/datastructures/scfg.py b/numba_rvsdg/core/datastructures/scfg.py index 3e3a96d..5018266 100644 --- a/numba_rvsdg/core/datastructures/scfg.py +++ b/numba_rvsdg/core/datastructures/scfg.py @@ -524,6 +524,11 @@ def insert_block( block_type: SyntheticBlock The type/class of the newly created block. """ + # To prevent spurious mutations in successsors and predecessors + # since they are a part of the graph. + successors = successors.copy() + predecessors = predecessors.copy() + # TODO: needs a diagram and documentaion # initialize new block new_block = block_type( From 74ed334a46c8fe5a6c18ccaec809b72920d1a85d Mon Sep 17 00:00:00 2001 From: kc611 Date: Thu, 14 Sep 2023 18:36:29 +0530 Subject: [PATCH 7/7] Corrected input types for various tests --- numba_rvsdg/tests/test_transforms.py | 42 ++++++++++++++-------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/numba_rvsdg/tests/test_transforms.py b/numba_rvsdg/tests/test_transforms.py index 746c7ac..c2e0055 100644 --- a/numba_rvsdg/tests/test_transforms.py +++ b/numba_rvsdg/tests/test_transforms.py @@ -40,7 +40,7 @@ def test_linear(self): expected_scfg, _ = SCFG.from_yaml(expected) new_name = original_scfg.name_gen.new_block_name(block_names.BASIC) original_scfg.insert_block( - new_name, (block_dict["0"],), (block_dict["1"],), BasicBlock + new_name, [block_dict["0"]], [block_dict["1"]], BasicBlock ) self.assertSCFGEqual(expected_scfg, original_scfg) @@ -81,8 +81,8 @@ def test_dual_predecessor(self): new_name = original_scfg.name_gen.new_block_name(block_names.BASIC) original_scfg.insert_block( new_name, - (block_dict["0"], block_dict["1"]), - (block_dict["2"],), + [block_dict["0"], block_dict["1"]], + [block_dict["2"]], BasicBlock, ) self.assertSCFGEqual( @@ -130,8 +130,8 @@ def test_dual_successor(self): expected_scfg, _ = SCFG.from_yaml(expected) original_scfg.insert_block( original_scfg.name_gen.new_block_name(block_names.BASIC), - (block_dict["0"],), - (block_dict["1"], block_dict["2"]), + [block_dict["0"]], + [block_dict["1"], block_dict["2"]], BasicBlock, ) self.assertSCFGEqual(expected_scfg, original_scfg) @@ -184,8 +184,8 @@ def test_dual_predecessor_and_dual_successor(self): expected_scfg, _ = SCFG.from_yaml(expected) original_scfg.insert_block( original_scfg.name_gen.new_block_name(block_names.BASIC), - (block_dict["1"], block_dict["2"]), - (block_dict["3"], block_dict["4"]), + [block_dict["1"], block_dict["2"]], + [block_dict["3"], block_dict["4"]], BasicBlock, ) self.assertSCFGEqual(expected_scfg, original_scfg) @@ -238,8 +238,8 @@ def test_dual_predecessor_and_dual_successor_with_additional_arcs(self): expected_scfg, expected_block_dict = SCFG.from_yaml(expected) original_scfg.insert_block( original_scfg.name_gen.new_block_name(block_names.BASIC), - (block_dict["1"], block_dict["2"]), - (block_dict["3"], block_dict["4"]), + [block_dict["1"], block_dict["2"]], + [block_dict["3"], block_dict["4"]], BasicBlock, ) self.assertSCFGEqual( @@ -318,8 +318,8 @@ def test_join_tails_and_exits_case_00(self): """ expected_scfg, _ = SCFG.from_yaml(expected) - tails = (block_dict["0"],) - exits = (block_dict["1"],) + tails = [block_dict["0"]] + exits = [block_dict["1"]] solo_tail_name, solo_exit_name = original_scfg.join_tails_and_exits( tails, exits ) @@ -369,8 +369,8 @@ def test_join_tails_and_exits_case_01(self): """ expected_scfg, _ = SCFG.from_yaml(expected) - tails = (block_dict["0"],) - exits = (block_dict["1"], block_dict["2"]) + tails = [block_dict["0"]] + exits = [block_dict["1"], block_dict["2"]] solo_tail_name, solo_exit_name = original_scfg.join_tails_and_exits( tails, exits ) @@ -423,8 +423,8 @@ def test_join_tails_and_exits_case_02_01(self): """ expected_scfg, _ = SCFG.from_yaml(expected) - tails = (block_dict["1"], block_dict["2"]) - exits = (block_dict["3"],) + tails = [block_dict["1"], block_dict["2"]] + exits = [block_dict["3"]] solo_tail_name, solo_exit_name = original_scfg.join_tails_and_exits( tails, exits ) @@ -477,8 +477,8 @@ def test_join_tails_and_exits_case_02_02(self): """ expected_scfg, _ = SCFG.from_yaml(expected) - tails = (block_dict["1"], block_dict["2"]) - exits = (block_dict["3"],) + tails = [block_dict["1"], block_dict["2"]] + exits = [block_dict["3"]] solo_tail_name, solo_exit_name = original_scfg.join_tails_and_exits( tails, exits @@ -546,8 +546,8 @@ def test_join_tails_and_exits_case_03_01(self): """ expected_scfg, _ = SCFG.from_yaml(expected) - tails = (block_dict["1"], block_dict["2"]) - exits = (block_dict["3"], block_dict["4"]) + tails = [block_dict["1"], block_dict["2"]] + exits = [block_dict["3"], block_dict["4"]] solo_tail_name, solo_exit_name = original_scfg.join_tails_and_exits( tails, exits ) @@ -616,8 +616,8 @@ def test_join_tails_and_exits_case_03_02(self): backedges: """ expected_scfg, _ = SCFG.from_yaml(expected) - tails = (block_dict["1"], block_dict["2"]) - exits = (block_dict["3"], block_dict["4"]) + tails = [block_dict["1"], block_dict["2"]] + exits = [block_dict["3"], block_dict["4"]] solo_tail_name, solo_exit_name = original_scfg.join_tails_and_exits( tails, exits )