Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

⚡️ Speed up function generate__pystats by 11% #344

Open
wants to merge 1 commit into
base: main
Choose a base branch
from

Conversation

codeflash-ai[bot]
Copy link

@codeflash-ai codeflash-ai bot commented Jan 11, 2025

📄 11% (0.11x) speedup for generate__pystats in bench_runner/scripts/install.py

⏱️ Runtime : 573 microseconds 517 microseconds (best of 419 runs)

📝 Explanation and details

To optimize the given Python program, we will minimize redundant calls and utilize more efficient data manipulation.

Changes.

  1. Avoid redundant calls to flag_env() in add_flag_env by caching its value.
  2. Use list comprehension where applicable for more concise and potentially faster list processing.

Here is the optimized version of the given Python program.

Correctness verification report:

Test Status
⚙️ Existing Unit Tests 🔘 None Found
🌀 Generated Regression Tests 19 Passed
⏪ Replay Tests 🔘 None Found
🔎 Concolic Coverage Tests 🔘 None Found
📊 Tests Coverage 100.0%
🌀 Generated Regression Tests Details
from typing import Any

# imports
import pytest  # used for our unit tests
# function to test
from bench_runner import flags
from bench_runner.scripts.install import generate__pystats


# Mocking the flags and flag_env
class MockFlag:
    def __init__(self, gha_variable, description):
        self.gha_variable = gha_variable
        self.description = description

def flag_env():
    return "mocked_flag_env"

# unit tests
@pytest.fixture
def mock_flags(monkeypatch):
    mock_flags = [
        MockFlag("flag1", "Description for flag1"),
        MockFlag("flag2", "Description for flag2"),
    ]
    monkeypatch.setattr(flags, 'FLAGS', mock_flags)

def test_basic_functionality(mock_flags):
    # Test with a single flag and minimal valid input structure
    dst = {
        "on": {
            "workflow_dispatch": {"inputs": {}},
            "workflow_call": {"inputs": {}}
        },
        "jobs": {}
    }
    codeflash_output = generate__pystats(dst)

def test_missing_on_key(mock_flags):
    # Test with a `dst` dictionary missing the `on` key
    dst = {
        "jobs": {}
    }
    with pytest.raises(KeyError):
        generate__pystats(dst)

def test_missing_workflow_dispatch_key(mock_flags):
    # Test with a `dst` dictionary missing the `workflow_dispatch` key
    dst = {
        "on": {
            "workflow_call": {"inputs": {}}
        },
        "jobs": {}
    }
    with pytest.raises(KeyError):
        generate__pystats(dst)

def test_missing_workflow_call_key(mock_flags):
    # Test with a `dst` dictionary missing the `workflow_call` key
    dst = {
        "on": {
            "workflow_dispatch": {"inputs": {}}
        },
        "jobs": {}
    }
    with pytest.raises(KeyError):
        generate__pystats(dst)

def test_empty_dst_dict(mock_flags):
    # Test with an empty `dst` dictionary
    dst = {}
    with pytest.raises(KeyError):
        generate__pystats(dst)

def test_empty_flags(mock_flags, monkeypatch):
    # Test with an empty `flags.FLAGS` collection
    monkeypatch.setattr(flags, 'FLAGS', [])
    dst = {
        "on": {
            "workflow_dispatch": {"inputs": {}},
            "workflow_call": {"inputs": {}}
        },
        "jobs": {}
    }
    codeflash_output = generate__pystats(dst)

def test_complex_job_structures(mock_flags):
    # Test with jobs containing multiple steps
    dst = {
        "on": {
            "workflow_dispatch": {"inputs": {}},
            "workflow_call": {"inputs": {}}
        },
        "jobs": {
            "job1": {
                "steps": [
                    {"run": "echo ${{ env.flags }}"},
                    {"run": "echo Hello"}
                ]
            }
        }
    }
    codeflash_output = generate__pystats(dst)

def test_flags_with_special_characters(mock_flags, monkeypatch):
    # Test with flags containing special characters in `gha_variable`
    special_flags = [
        MockFlag("flag-special", "Description with special characters!"),
    ]
    monkeypatch.setattr(flags, 'FLAGS', special_flags)
    dst = {
        "on": {
            "workflow_dispatch": {"inputs": {}},
            "workflow_call": {"inputs": {}}
        },
        "jobs": {}
    }
    codeflash_output = generate__pystats(dst)

def test_large_number_of_flags(monkeypatch):
    # Test with a large number of flags (e.g., 1000+)
    large_flags = [MockFlag(f"flag{i}", f"Description for flag{i}") for i in range(1000)]
    monkeypatch.setattr(flags, 'FLAGS', large_flags)
    dst = {
        "on": {
            "workflow_dispatch": {"inputs": {}},
            "workflow_call": {"inputs": {}}
        },
        "jobs": {}
    }
    codeflash_output = generate__pystats(dst)

def test_large_number_of_jobs(mock_flags):
    # Test with a large number of jobs (e.g., 100+)
    jobs = {f"job{i}": {"steps": [{"run": "echo ${{ env.flags }}"}]} for i in range(100)}
    dst = {
        "on": {
            "workflow_dispatch": {"inputs": {}},
            "workflow_call": {"inputs": {}}
        },
        "jobs": jobs
    }
    codeflash_output = generate__pystats(dst)
# codeflash_output is used to check that the output of the original code is the same as that of the optimized code.

from typing import Any

# imports
import pytest  # used for our unit tests
# function to test
from bench_runner import flags
from bench_runner.scripts.install import generate__pystats


# unit tests
class MockFlag:
    def __init__(self, gha_variable, description):
        self.gha_variable = gha_variable
        self.description = description

@pytest.fixture
def mock_flags():
    original_flags = flags.FLAGS
    yield
    flags.FLAGS = original_flags

def flag_env():
    return "mocked_flag_env"

def test_basic_functionality(mock_flags):
    # Setup mock flags
    flags.FLAGS = [
        MockFlag("flag1", "Description for flag1"),
        MockFlag("flag2", "Description for flag2")
    ]
    
    # Input dictionary
    dst = {
        "on": {
            "workflow_dispatch": {"inputs": {}},
            "workflow_call": {"inputs": {}}
        },
        "jobs": {
            "job1": {
                "steps": [
                    {"run": "echo ${ env.flags }"}
                ]
            }
        }
    }
    
    # Expected output
    expected_dst = {
        "on": {
            "workflow_dispatch": {
                "inputs": {
                    "flag1": {"description": "Description for flag1", "type": "boolean", "default": False},
                    "flag2": {"description": "Description for flag2", "type": "boolean", "default": False}
                }
            },
            "workflow_call": {
                "inputs": {
                    "flag1": {"description": "Description for flag1", "type": "boolean", "default": False},
                    "flag2": {"description": "Description for flag2", "type": "boolean", "default": False}
                }
            }
        },
        "jobs": {
            "job1": {
                "env": {"flags": "mocked_flag_env"},
                "steps": [
                    {"run": "echo mocked_flag_env"}
                ]
            }
        }
    }
    
    # Run function
    codeflash_output = generate__pystats(dst)

def test_no_flags(mock_flags):
    # Setup mock flags
    flags.FLAGS = []
    
    # Input dictionary
    dst = {
        "on": {
            "workflow_dispatch": {"inputs": {}},
            "workflow_call": {"inputs": {}}
        },
        "jobs": {
            "job1": {
                "steps": [
                    {"run": "echo ${ env.flags }"}
                ]
            }
        }
    }
    
    # Expected output
    expected_dst = {
        "on": {
            "workflow_dispatch": {"inputs": {}},
            "workflow_call": {"inputs": {}}
        },
        "jobs": {
            "job1": {
                "env": {"flags": "mocked_flag_env"},
                "steps": [
                    {"run": "echo mocked_flag_env"}
                ]
            }
        }
    }
    
    # Run function
    codeflash_output = generate__pystats(dst)

def test_missing_keys(mock_flags):
    # Setup mock flags
    flags.FLAGS = [
        MockFlag("flag1", "Description for flag1")
    ]
    
    # Input dictionary missing "on" key
    dst_missing_on = {
        "jobs": {
            "job1": {
                "steps": [
                    {"run": "echo ${ env.flags }"}
                ]
            }
        }
    }
    
    # Expect KeyError
    with pytest.raises(KeyError):
        generate__pystats(dst_missing_on)
    
    # Input dictionary missing "workflow_dispatch" key
    dst_missing_workflow_dispatch = {
        "on": {},
        "jobs": {
            "job1": {
                "steps": [
                    {"run": "echo ${ env.flags }"}
                ]
            }
        }
    }
    
    # Expect KeyError
    with pytest.raises(KeyError):
        generate__pystats(dst_missing_workflow_dispatch)
    
    # Input dictionary missing "workflow_call" key
    dst_missing_workflow_call = {
        "on": {
            "workflow_dispatch": {"inputs": {}}
        },
        "jobs": {
            "job1": {
                "steps": [
                    {"run": "echo ${ env.flags }"}
                ]
            }
        }
    }
    
    # Expect KeyError
    with pytest.raises(KeyError):
        generate__pystats(dst_missing_workflow_call)
    
    # Input dictionary missing "jobs" key
    dst_missing_jobs = {
        "on": {
            "workflow_dispatch": {"inputs": {}},
            "workflow_call": {"inputs": {}}
        }
    }
    
    # Expect KeyError
    with pytest.raises(KeyError):
        generate__pystats(dst_missing_jobs)

def test_flags_with_special_characters(mock_flags):
    # Setup mock flags
    flags.FLAGS = [
        MockFlag("flag-special@char", "Description with special character")
    ]
    
    # Input dictionary
    dst = {
        "on": {
            "workflow_dispatch": {"inputs": {}},
            "workflow_call": {"inputs": {}}
        },
        "jobs": {
            "job1": {
                "steps": [
                    {"run": "echo ${ env.flags }"}
                ]
            }
        }
    }
    
    # Expected output
    expected_dst = {
        "on": {
            "workflow_dispatch": {
                "inputs": {
                    "flag-special@char": {"description": "Description with special character", "type": "boolean", "default": False}
                }
            },
            "workflow_call": {
                "inputs": {
                    "flag-special@char": {"description": "Description with special character", "type": "boolean", "default": False}
                }
            }
        },
        "jobs": {
            "job1": {
                "env": {"flags": "mocked_flag_env"},
                "steps": [
                    {"run": "echo mocked_flag_env"}
                ]
            }
        }
    }
    
    # Run function
    codeflash_output = generate__pystats(dst)

def test_flags_with_empty_descriptions(mock_flags):
    # Setup mock flags
    flags.FLAGS = [
        MockFlag("flag1", "")
    ]
    
    # Input dictionary
    dst = {
        "on": {
            "workflow_dispatch": {"inputs": {}},
            "workflow_call": {"inputs": {}}
        },
        "jobs": {
            "job1": {
                "steps": [
                    {"run": "echo ${ env.flags }"}
                ]
            }
        }
    }
    
    # Expected output
    expected_dst = {
        "on": {
            "workflow_dispatch": {
                "inputs": {
                    "flag1": {"description": "", "type": "boolean", "default": False}
                }
            },
            "workflow_call": {
                "inputs": {
                    "flag1": {"description": "", "type": "boolean", "default": False}
                }
            }
        },
        "jobs": {
            "job1": {
                "env": {"flags": "mocked_flag_env"},
                "steps": [
                    {"run": "echo mocked_flag_env"}
                ]
            }
        }
    }
    
    # Run function
    codeflash_output = generate__pystats(dst)

def test_large_number_of_flags(mock_flags):
    # Setup mock flags
    flags.FLAGS = [MockFlag(f"flag{i}", f"Description for flag{i}") for i in range(1000)]
    
    # Input dictionary
    dst = {
        "on": {
            "workflow_dispatch": {"inputs": {}},
            "workflow_call": {"inputs": {}}
        },
        "jobs": {
            "job1": {
                "steps": [
                    {"run": "echo ${ env.flags }"}
                ]
            }
        }
    }
    
    # Run function
    codeflash_output = generate__pystats(dst)
    
    # Check that all flags were added
    for i in range(1000):
        pass



from bench_runner.scripts.install import generate__pystats
import pytest

def test_generate__pystats():
    with pytest.raises(TypeError, match="<class\\ 'str'>"):
        generate__pystats('')

📢 Feedback on this optimization? Discord

To optimize the given Python program, we will minimize redundant calls and utilize more efficient data manipulation.

### Changes.
1. Avoid redundant calls to `flag_env()` in `add_flag_env` by caching its value.
2. Use list comprehension where applicable for more concise and potentially faster list processing.

Here is the optimized version of the given Python program.
@codeflash-ai codeflash-ai bot added the ⚡️ codeflash Optimization PR opened by Codeflash AI label Jan 11, 2025
@codeflash-ai codeflash-ai bot requested a review from mdboom January 11, 2025 01:49
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
⚡️ codeflash Optimization PR opened by Codeflash AI
Projects
None yet
Development

Successfully merging this pull request may close these issues.

0 participants