Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Optimized ( Rect/Frect ) unionall()/unionall_ip() #2784

Merged
merged 2 commits into from
May 11, 2024

Conversation

itzpr3d4t0r
Copy link
Member

@itzpr3d4t0r itzpr3d4t0r commented Mar 31, 2024

This change is comprised of two changes:

  • made unionall/unionall_ip METH_O
  • Optimized looping with lists/tuples

Makes the function 80% faster on average (with rect lists or tuples) from my testing.
image

Test program:

from data_utils import Plotter, Evaluator
from pygame import Rect
from random import randint

def tests_setup(curr_size: int, g: dict):
    r = Rect(0, -10, 40, 23)
    rects = [
        Rect(randint(-100, 100), randint(-100, 100), randint(-100, 100), randint(-100, 100))
        for _ in range(curr_size)
    ]

    g["r"] = r
    g["rects"] = rects


tests = [
    ("unionall new", "r.unionall(rects)"),
    ("unionall_ip new", "r.unionall_ip(rects)"),
]

Evaluator(tests, tests_setup, max_size=2500, reps=1000, num=1).run()

files = [
    ("unionall old", "red"),
    ("unionall_ip old", "white"),

    ("unionall new", "blue"),
    ("unionall_ip new", "lime"),
]

p = Plotter("alphablit", files, mode="MIN")
p.plot_tests()

data_utils file:

import json

from matplotlib import pyplot as plt
import matplotlib.colors as mcolors

from statistics import mean, stdev, median
from timeit import repeat

COLORS = list(mcolors.CSS4_COLORS.keys())

__all__ = ["Plotter", "Evaluator"]

JSON_DIR = "files_json"

class Plotter:

    def __init__(self, title: str, tests: list, mode: str = "MIN",
                 limit_to_range: int = -1):
        plt.style.use(["dark_background"])
        self.title = title
        self.tests = tests

        self.mode = mode
        self.mode_func = None
        self.limit_to_range = limit_to_range
        self.filter(mode)

    def filter(self, mode: str):
        self.mode = mode

        match mode:
            case "MEAN":
                self.mode_func = mean
            case "MIN":
                self.mode_func = min
            case "MAX":
                self.mode_func = max
            case "MEDIAN":
                self.mode_func = median

    def plot_tests(self, scatter=False):
        for file_name, color in self.tests:
            try:
                with open(f"{JSON_DIR}/{file_name}.json", "r") as f:
                    data = json.load(f)
            except FileNotFoundError:
                print(f"File {file_name}.json not found!")
                quit()

            timings = [self.mode_func(dp) for dp in data["data"]][:self.limit_to_range]

            print(f"=== {file_name} ===")
            print(
                f"Total: {sum([sum(data_point) for data_point in data['data']])}\n"
                f"Mean: {mean(timings)}\n"
                f"Median: {median(timings)}\n"
                f"Stdev: {stdev(timings)}"
            )
            print()
            if scatter:
                plt.scatter(range(len(timings)), timings, color=color, label=file_name, s=1)
            else:
                plt.plot(timings, color=color, label=file_name, linewidth=1)
        plt.legend()
        plt.title(self.title)
        plt.xlabel("Surface size (px)")
        plt.ylabel("Time (s)")
        plt.show()

    def compare(self, indices: list[tuple[int, int]], c1="white", c2="lime"):
        for i1, i2 in indices:
            filename_1, _ = self.tests[i1]
            filename_2, _ = self.tests[i2]

            try:
                with open(f"{JSON_DIR}/{filename_1}.json", "r") as f:
                    data_1 = json.load(f)
            except FileNotFoundError:
                print(f"File {filename_1}.json not found!")
                quit()

            try:
                with open(f"{JSON_DIR}/{filename_2}.json", "r") as f:
                    data_2 = json.load(f)
            except FileNotFoundError:
                print(f"File {filename_2}.json not found!")
                quit()

            timings_1 = [self.mode_func(dp) for dp in data_1["data"]][
                        :self.limit_to_range]
            timings_2 = [self.mode_func(dp) for dp in data_2["data"]][
                        :self.limit_to_range]

            plt.figure(figsize=(10, 5))
            curr_index = 1

            plt.subplot(len(indices), 2, curr_index)
            plt.scatter(range(len(timings_1)), timings_1, color=c1, label=filename_1,
                        s=.5)
            plt.scatter(range(len(timings_1)), timings_2, color=c2, label=filename_2,
                        s=.5)
            plt.legend()
            plt.title("Timings")
            plt.xlabel("Surface size (px)")
            plt.ylabel("Time (s)")

            plt.subplot(len(indices), 2, curr_index + 1)
            comparative_data = [
                100 * ((t1 / t2) - 1) for t1, t2 in zip(timings_1, timings_2)
            ]
            plt.scatter(range(len(comparative_data)), comparative_data, color="red", s=1)
            plt.plot([0] * len(comparative_data), color="green", linewidth=2)
            plt.title("Relative % improvement")
            plt.xlabel("Surface size (px)")

        plt.show()


class Evaluator:
    def __init__(self, tests: list, tests_setup, pre_test_setup=None,
                 max_size: int = 1000, reps: int = 30,
                 num: int = 1):
        self.tests = tests
        self.tests_setup = tests_setup
        self.max_size = max_size
        self.reps = reps
        self.num = num
        self.G = {}

        if pre_test_setup:
            try:
                pre_test_setup(self.G)
            except TypeError:
                raise TypeError("pre_test_setup must be a callable")

    def run(self):
        for test_name, statement in self.tests:
            data = {"title": test_name, "data": []}

            print(f"\n========| {test_name.upper()} |========")

            for curr_size in range(1, self.max_size + 1):

                self.print_progress_bar(curr_size)

                self.tests_setup(curr_size, self.G)
                data["data"].append(self.run_test(statement))

            with open(f"{JSON_DIR}/{test_name}.json", "w") as f:
                json.dump(data, f)

    def print_progress_bar(self, curr_size: int):
        amt = 3 * curr_size // 100
        print(
            "\r[" + "▪" * amt + " " * (3 * 10 - amt) + f"] {curr_size} | {self.max_size}", end=""
        )

    def run_test(self, statement):
        return repeat(statement, globals=self.G, number=self.num, repeat=self.reps)

@itzpr3d4t0r itzpr3d4t0r added Performance Related to the speed or resource usage of the project rect pygame.rect labels Mar 31, 2024
@itzpr3d4t0r itzpr3d4t0r requested a review from a team as a code owner March 31, 2024 18:18
@itzpr3d4t0r itzpr3d4t0r closed this Apr 1, 2024
@itzpr3d4t0r itzpr3d4t0r reopened this Apr 1, 2024
Copy link
Member

@oddbookworm oddbookworm left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't see anything that sticks out to me as a problem, so looks good!

Copy link
Member

@MyreMylar MyreMylar left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK, LGTM 👍

I can verify the performance improvements in unioning lots of rectangles. I'm sure there is somebody out there doing something like that who will appreciate it being faster.

@MyreMylar MyreMylar merged commit 7b0beba into pygame-community:main May 11, 2024
57 of 59 checks passed
@ankith26 ankith26 added this to the 2.5.0 milestone May 11, 2024
@itzpr3d4t0r itzpr3d4t0r deleted the optimize_rect_unionall branch June 12, 2024 09:12
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
Performance Related to the speed or resource usage of the project rect pygame.rect
Projects
None yet
Development

Successfully merging this pull request may close these issues.

4 participants