diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index cf345413..374cd708 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -83,21 +83,70 @@ jobs: pip install -r benchmarks/envs/asgi.txt pip install -r benchmarks/envs/wsgi.txt - name: benchmark - env: - BENCHMARK_BASE: false - BENCHMARK_VS: true working-directory: ./benchmarks run: | - python benchmarks.py + python benchmarks.py vs - name: upload results uses: actions/upload-artifact@v4 with: name: results-vs path: benchmarks/results/* + benchmark-pyver: + runs-on: ubuntu-latest + needs: [toolchain] + + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: | + 3.10 + 3.11 + 3.12 + 3.13 + - uses: actions/download-artifact@v4 + with: + name: rewrk + - run: | + sudo mv rewrk /usr/local/bin && chmod +x /usr/local/bin/rewrk + - uses: pyo3/maturin-action@v1 + with: + command: build + args: --release --interpreter python3.10 python3.11 python3.12 python3.13 + target: x64 + manylinux: auto + container: off + - name: setup venvs + run: | + python3.10 -m venv .venv310 + python3.11 -m venv .venv311 + python3.12 -m venv .venv312 + python3.13 -m venv .venv313 + .venv310/bin/pip install $(ls target/wheels/granian-*-cp310-*.whl) + .venv311/bin/pip install $(ls target/wheels/granian-*-cp311-*.whl) + .venv312/bin/pip install $(ls target/wheels/granian-*-cp312-*.whl) + .venv313/bin/pip install $(ls target/wheels/granian-*-cp313-*.whl) + - name: benchmark + working-directory: ./benchmarks + run: | + BENCHMARK_EXC_PREFIX=${{ github.workspace }}/.venv310/bin ${{ github.workspace }}/.venv310/bin/python benchmarks.py interfaces + mv results/data.json results/py310.json + BENCHMARK_EXC_PREFIX=${{ github.workspace }}/.venv311/bin ${{ github.workspace }}/.venv311/bin/python benchmarks.py interfaces + mv results/data.json results/py311.json + BENCHMARK_EXC_PREFIX=${{ github.workspace }}/.venv312/bin ${{ github.workspace }}/.venv312/bin/python benchmarks.py interfaces + mv results/data.json results/py312.json + BENCHMARK_EXC_PREFIX=${{ github.workspace }}/.venv313/bin ${{ github.workspace }}/.venv313/bin/python benchmarks.py interfaces + mv results/data.json results/py313.json + - name: upload results + uses: actions/upload-artifact@v4 + with: + name: results-pyver + path: benchmarks/results/* + results: runs-on: ubuntu-latest - needs: [benchmark-base, benchmark-vs] + needs: [benchmark-base, benchmark-vs, benchmark-pyver] steps: - uses: actions/checkout@v4 @@ -114,11 +163,22 @@ jobs: path: benchmarks/results - run: | mv benchmarks/results/data.json benchmarks/results/vs.json + - uses: actions/download-artifact@v4 + with: + name: results-pyver + path: benchmarks/results - name: render working-directory: ./benchmarks run: | noir -c data:results/base.json -v 'benv=GHA Linux x86_64' templates/main.md > README.md noir -c data:results/vs.json -v 'benv=GHA Linux x86_64' templates/vs.md > vs.md + noir \ + -c data310:results/py310.json \ + -c data311:results/py311.json \ + -c data312:results/py312.json \ + -c data313:results/py313.json \ + -v pyvb=310 -v 'benv=GHA Linux x86_64' \ + templates/pyver.md > pyver.md - name: open PR uses: peter-evans/create-pull-request@v6 with: @@ -131,3 +191,4 @@ jobs: add-paths: | benchmarks/README.md benchmarks/vs.md + benchmarks/pyver.md diff --git a/benchmarks/app/asgi.py b/benchmarks/app/asgi.py index 3ae7ea0b..93ab89a1 100644 --- a/benchmarks/app/asgi.py +++ b/benchmarks/app/asgi.py @@ -2,12 +2,13 @@ import pathlib import sys + PLAINTEXT_RESPONSE = { 'type': 'http.response.start', 'status': 200, 'headers': [ (b'content-type', b'text/plain; charset=utf-8'), - ] + ], } MEDIA_RESPONSE = { 'type': 'http.response.start', @@ -15,69 +16,45 @@ 'headers': [[b'content-type', b'image/png'], [b'content-length', b'95']], } -BODY_BYTES_SHORT = b"Test" -BODY_BYTES_LONG = b"Test" * 20_000 -BODY_STR_SHORT = "Test" -BODY_STR_LONG = "Test" * 20_000 +BODY_BYTES_SHORT = b'Test' +BODY_BYTES_LONG = b'Test' * 20_000 +BODY_STR_SHORT = 'Test' +BODY_STR_LONG = 'Test' * 20_000 MEDIA_PATH = pathlib.Path(__file__).parent.parent / 'files' / 'media.png' async def b_short(scope, receive, send): await send(PLAINTEXT_RESPONSE) - await send({ - 'type': 'http.response.body', - 'body': BODY_BYTES_SHORT, - 'more_body': False - }) + await send({'type': 'http.response.body', 'body': BODY_BYTES_SHORT, 'more_body': False}) async def b_long(scope, receive, send): await send(PLAINTEXT_RESPONSE) - await send({ - 'type': 'http.response.body', - 'body': BODY_BYTES_LONG, - 'more_body': False - }) + await send({'type': 'http.response.body', 'body': BODY_BYTES_LONG, 'more_body': False}) async def s_short(scope, receive, send): await send(PLAINTEXT_RESPONSE) - await send({ - 'type': 'http.response.body', - 'body': BODY_STR_SHORT.encode("utf8"), - 'more_body': False - }) + await send({'type': 'http.response.body', 'body': BODY_STR_SHORT.encode('utf8'), 'more_body': False}) async def s_long(scope, receive, send): await send(PLAINTEXT_RESPONSE) - await send({ - 'type': 'http.response.body', - 'body': BODY_STR_LONG.encode("utf8"), - 'more_body': False - }) + await send({'type': 'http.response.body', 'body': BODY_STR_LONG.encode('utf8'), 'more_body': False}) async def echo(scope, receive, send): await send(PLAINTEXT_RESPONSE) msg = await receive() - await send({ - 'type': 'http.response.body', - 'body': msg['body'], - 'more_body': False - }) + await send({'type': 'http.response.body', 'body': msg['body'], 'more_body': False}) async def file_body(scope, receive, send): await send(MEDIA_RESPONSE) with MEDIA_PATH.open('rb') as f: data = f.read() - await send({ - 'type': 'http.response.body', - 'body': data, - 'more_body': False - }) + await send({'type': 'http.response.body', 'body': data, 'more_body': False}) async def file_pathsend(scope, receive, send): @@ -87,25 +64,19 @@ async def file_pathsend(scope, receive, send): def io_builder(wait): wait = wait / 1000 + async def io(scope, receive, send): await send(PLAINTEXT_RESPONSE) await asyncio.sleep(wait) - await send({ - 'type': 'http.response.body', - 'body': BODY_BYTES_SHORT, - 'more_body': False - }) + await send({'type': 'http.response.body', 'body': BODY_BYTES_SHORT, 'more_body': False}) + return io async def handle_404(scope, receive, send): content = b'Not found' await send(PLAINTEXT_RESPONSE) - await send({ - 'type': 'http.response.body', - 'body': content, - 'more_body': False - }) + await send({'type': 'http.response.body', 'body': content, 'more_body': False}) routes = { @@ -133,8 +104,9 @@ async def async_app(scope, receive, send): def granian(wrk, thr): from granian import Granian - Granian("asgi:app", workers=int(wrk), threads=int(thr), interface="asgi").serve() + + Granian('asgi:app', workers=int(wrk), threads=int(thr), interface='asgi').serve() -if __name__ == "__main__": +if __name__ == '__main__': granian(sys.argv[1], sys.argv[2]) diff --git a/benchmarks/app/rsgi.py b/benchmarks/app/rsgi.py index 82bfe010..db033b35 100644 --- a/benchmarks/app/rsgi.py +++ b/benchmarks/app/rsgi.py @@ -2,83 +2,54 @@ import pathlib import sys + HEADERS = [('content-type', 'text/plain; charset=utf-8')] HEADERS_MEDIA = [('content-type', 'image/png'), ('content-length', '95')] -BODY_BYTES_SHORT = b"Test" -BODY_BYTES_LONG = b"Test" * 20_000 -BODY_STR_SHORT = "Test" -BODY_STR_LONG = "Test" * 20_000 +BODY_BYTES_SHORT = b'Test' +BODY_BYTES_LONG = b'Test' * 20_000 +BODY_STR_SHORT = 'Test' +BODY_STR_LONG = 'Test' * 20_000 MEDIA_PATH = str(pathlib.Path(__file__).parent.parent / 'files' / 'media.png') async def b_short(scope, proto): - proto.response_bytes( - 200, - HEADERS, - BODY_BYTES_SHORT - ) + proto.response_bytes(200, HEADERS, BODY_BYTES_SHORT) async def b_long(scope, proto): - proto.response_bytes( - 200, - HEADERS, - BODY_BYTES_LONG - ) + proto.response_bytes(200, HEADERS, BODY_BYTES_LONG) async def s_short(scope, proto): - proto.response_str( - 200, - HEADERS, - BODY_STR_SHORT - ) + proto.response_str(200, HEADERS, BODY_STR_SHORT) async def s_long(scope, proto): - proto.response_str( - 200, - HEADERS, - BODY_STR_LONG - ) + proto.response_str(200, HEADERS, BODY_STR_LONG) async def echo(scope, proto): - proto.response_bytes( - 200, - HEADERS, - await proto() - ) + proto.response_bytes(200, HEADERS, await proto()) async def file(scope, proto): - proto.response_file( - 200, - HEADERS_MEDIA, - MEDIA_PATH - ) + proto.response_file(200, HEADERS_MEDIA, MEDIA_PATH) def io_builder(wait): wait = wait / 1000 + async def io(scope, proto): await asyncio.sleep(wait) - proto.response_bytes( - 200, - HEADERS, - BODY_BYTES_SHORT - ) + proto.response_bytes(200, HEADERS, BODY_BYTES_SHORT) + return io async def handle_404(scope, proto): - proto.response_str( - 404, - HEADERS, - "not found" - ) + proto.response_str(404, HEADERS, 'not found') routes = { @@ -100,8 +71,9 @@ def app(scope, proto): def granian(wrk, thr): from granian import Granian - Granian("rsgi:app", workers=int(wrk), threads=int(thr), interface="rsgi").serve() + + Granian('rsgi:app', workers=int(wrk), threads=int(thr), interface='rsgi').serve() -if __name__ == "__main__": +if __name__ == '__main__': granian(sys.argv[1], sys.argv[2]) diff --git a/benchmarks/app/wsgi.py b/benchmarks/app/wsgi.py index 98c1330b..37ae3812 100644 --- a/benchmarks/app/wsgi.py +++ b/benchmarks/app/wsgi.py @@ -1,11 +1,12 @@ import time + HEADERS = [('content-type', 'text/plain; charset=utf-8')] -BODY_BYTES_SHORT = b"Test" -BODY_BYTES_LONG = b"Test" * 20_000 -BODY_STR_SHORT = "Test" -BODY_STR_LONG = "Test" * 20_000 +BODY_BYTES_SHORT = b'Test' +BODY_BYTES_LONG = b'Test' * 20_000 +BODY_STR_SHORT = 'Test' +BODY_STR_LONG = 'Test' * 20_000 def b_short(environ, proto): @@ -20,12 +21,12 @@ def b_long(environ, proto): def s_short(environ, proto): proto('200 OK', HEADERS) - return [BODY_STR_SHORT.encode("utf8")] + return [BODY_STR_SHORT.encode('utf8')] def s_long(environ, proto): proto('200 OK', HEADERS) - return [BODY_STR_LONG.encode("utf8")] + return [BODY_STR_LONG.encode('utf8')] def echo(environ, proto): @@ -35,16 +36,18 @@ def echo(environ, proto): def io_builder(wait): wait = wait / 1000 + def io(environ, proto): proto('200 OK', HEADERS) time.sleep(wait) return [BODY_BYTES_SHORT] + return io def handle_404(environ, proto): proto('404 NOT FOUND', HEADERS) - return [b"not found"] + return [b'not found'] routes = { @@ -59,5 +62,5 @@ def handle_404(environ, proto): def app(environ, proto): - handler = routes.get(environ["PATH_INFO"], handle_404) + handler = routes.get(environ['PATH_INFO'], handle_404) return handler(environ, proto) diff --git a/benchmarks/benchmarks.py b/benchmarks/benchmarks.py index 27dd26cf..d3c6fab9 100644 --- a/benchmarks/benchmarks.py +++ b/benchmarks/benchmarks.py @@ -6,61 +6,60 @@ import subprocess import sys import time - from contextlib import contextmanager + CPU = multiprocessing.cpu_count() WRK_CONCURRENCIES = [64, 128, 256, 512] APPS = { - "asgi": ( - "granian --interface asgi --log-level warning --backlog 2048 " - "--no-ws --http {http} " - "--workers {procs} --threads {threads}{bthreads} " - "--threading-mode {thmode} app.asgi:app" + 'asgi': ( + 'granian --interface asgi --log-level warning --backlog 2048 ' + '--no-ws --http {http} ' + '--workers {procs} --threads {threads}{bthreads} ' + '--threading-mode {thmode} app.asgi:app' ), - "rsgi": ( - "granian --interface rsgi --log-level warning --backlog 2048 " - "--no-ws --http {http} " - "--workers {procs} --threads {threads}{bthreads} " - "--threading-mode {thmode} app.rsgi:app" + 'rsgi': ( + 'granian --interface rsgi --log-level warning --backlog 2048 ' + '--no-ws --http {http} ' + '--workers {procs} --threads {threads}{bthreads} ' + '--threading-mode {thmode} app.rsgi:app' ), - "wsgi": ( - "granian --interface wsgi --log-level warning --backlog 2048 " - "--no-ws --http {http} " - "--workers {procs} --threads {threads}{bthreads} " - "--threading-mode {thmode} app.wsgi:app" + 'wsgi': ( + 'granian --interface wsgi --log-level warning --backlog 2048 ' + '--no-ws --http {http} ' + '--workers {procs} --threads {threads}{bthreads} ' + '--threading-mode {thmode} app.wsgi:app' ), - "uvicorn_h11": ( - "uvicorn --interface asgi3 " - "--no-access-log --log-level warning " - "--http h11 --workers {procs} app.asgi:app" + 'uvicorn_h11': ( + 'uvicorn --interface asgi3 --no-access-log --log-level warning --http h11 --workers {procs} app.asgi:app' ), - "uvicorn_httptools": ( - "uvicorn --interface asgi3 " - "--no-access-log --log-level warning " - "--http httptools --workers {procs} app.asgi:app" + 'uvicorn_httptools': ( + 'uvicorn --interface asgi3 ' + '--no-access-log --log-level warning ' + '--http httptools --workers {procs} app.asgi:app' ), - "hypercorn": ( - "hypercorn -b localhost:8000 -k uvloop --log-level warning --backlog 2048 " - "--workers {procs} asgi:app.asgi:async_app" + 'hypercorn': ( + 'hypercorn -b localhost:8000 -k uvloop --log-level warning --backlog 2048 ' + '--workers {procs} asgi:app.asgi:async_app' + ), + 'gunicorn_gthread': 'gunicorn --workers {procs} -k gthread app.wsgi:app', + 'gunicorn_gevent': 'gunicorn --workers {procs} -k gevent app.wsgi:app', + 'uwsgi': ( + 'uwsgi --http :8000 --master --processes {procs} --enable-threads ' + '--disable-logging --die-on-term --single-interpreter --lazy-apps ' + '--wsgi-file app/wsgi.py --callable app' ), - "gunicorn_gthread": "gunicorn --workers {procs} -k gthread app.wsgi:app", - "gunicorn_gevent": "gunicorn --workers {procs} -k gevent app.wsgi:app", - "uwsgi": ( - "uwsgi --http :8000 --master --processes {procs} --enable-threads " - "--disable-logging --die-on-term --single-interpreter --lazy-apps " - "--wsgi-file app/wsgi.py --callable app" - ) } @contextmanager -def app(name, procs=None, threads=None, bthreads=None, thmode=None, http="1"): +def app(name, procs=None, threads=None, bthreads=None, thmode=None, http='1'): procs = procs or 1 threads = threads or 1 - bthreads = f" --blocking-threads {bthreads}" if bthreads else "" - thmode = thmode or "workers" + bthreads = f' --blocking-threads {bthreads}' if bthreads else '' + thmode = thmode or 'workers' + exc_prefix = os.environ.get('BENCHMARK_EXC_PREFIX') proc_cmd = APPS[name].format( procs=procs, threads=threads, @@ -68,7 +67,9 @@ def app(name, procs=None, threads=None, bthreads=None, thmode=None, http="1"): thmode=thmode, http=http, ) - proc = subprocess.Popen(proc_cmd, shell=True, preexec_fn=os.setsid) + if exc_prefix: + proc_cmd = f'{exc_prefix}/{proc_cmd}' + proc = subprocess.Popen(proc_cmd, shell=True, preexec_fn=os.setsid) # noqa: S602 time.sleep(2) yield proc os.killpg(os.getpgid(proc.pid), signal.SIGKILL) @@ -76,46 +77,39 @@ def app(name, procs=None, threads=None, bthreads=None, thmode=None, http="1"): def wrk(duration, concurrency, endpoint, post=False, h2=False): cmd_parts = [ - "rewrk", - f"-c {concurrency}", - f"-d {duration}s", - "--json", + 'rewrk', + f'-c {concurrency}', + f'-d {duration}s', + '--json', ] if h2: - cmd_parts.append("--http2") + cmd_parts.append('--http2') else: - cmd_parts.append("-H \"Connection: Keep-Alive\"") - cmd_parts.append("-H \"Keep-Alive: timeout=60'\"") + cmd_parts.append('-H "Connection: Keep-Alive"') + cmd_parts.append('-H "Keep-Alive: timeout=60\'"') if post: - cmd_parts.append("-m post") - cmd_parts.append("-H \"Content-Type: text/plain; charset=utf-8\"") - cmd_parts.append("-H \"Content-Length: 4\"") - cmd_parts.append("-b \"test\"") - cmd_parts.append(f"-h http://127.0.0.1:8000/{endpoint}") + cmd_parts.append('-m post') + cmd_parts.append('-H "Content-Type: text/plain; charset=utf-8"') + cmd_parts.append('-H "Content-Length: 4"') + cmd_parts.append('-b "test"') + cmd_parts.append(f'-h http://127.0.0.1:8000/{endpoint}') try: - proc = subprocess.run( - " ".join(cmd_parts), + proc = subprocess.run( # noqa: S602 + ' '.join(cmd_parts), shell=True, check=True, capture_output=True, ) - data = json.loads(proc.stdout.decode("utf8")) + data = json.loads(proc.stdout.decode('utf8')) return { - "requests": { - "total": data["requests_total"], - "rps": round(data["requests_avg"] or 0) - }, - "latency": { - "avg": data["latency_avg"], - "max": data["latency_max"], - "stdev": data["latency_std_deviation"] - }, + 'requests': {'total': data['requests_total'], 'rps': round(data['requests_avg'] or 0)}, + 'latency': {'avg': data['latency_avg'], 'max': data['latency_max'], 'stdev': data['latency_std_deviation']}, } except Exception as e: - print(f"WARN: got exception {e} while loading rewrk data") + print(f'WARN: got exception {e} while loading rewrk data') return { - "requests": {"total": 0, "rps": 0}, - "latency": {"avg": None, "max": None, "stdev": None}, + 'requests': {'total': 0, 'rps': 0}, + 'latency': {'avg': None, 'max': None, 'stdev': None}, } @@ -138,169 +132,186 @@ def benchmark(endpoint, post=False, h2=False, concurrencies=None): def concurrencies(): - nperm = sorted(set([1, 2, 4, round(CPU / 2), CPU])) - results = {"wsgi": {}} - for interface in ["asgi", "rsgi", "wsgi"]: + nperm = sorted({1, 2, 4, round(CPU / 2), CPU}) + results = {'wsgi': {}} + for interface in ['asgi', 'rsgi', 'wsgi']: results[interface] = {} for np in nperm: for nt in [1, 2, 4]: - for threading_mode in ["workers", "runtime"]: - key = f"P{np} T{nt} {threading_mode[0]}th" + for threading_mode in ['workers', 'runtime']: + key = f'P{np} T{nt} {threading_mode[0]}th' with app(interface, np, nt, bthreads=1, thmode=threading_mode): - print(f"Bench concurrencies - [{interface}] {threading_mode} {np}:{nt}") + print(f'Bench concurrencies - [{interface}] {threading_mode} {np}:{nt}') results[interface][key] = { - "m": threading_mode, - "p": np, - "t": nt, - "res": benchmark("b", concurrencies=[128, 512, 1024, 2048]) + 'm': threading_mode, + 'p': np, + 't': nt, + 'res': benchmark('b', concurrencies=[128, 512, 1024, 2048]), } return results def rsgi_body_type(): results = {} - benches = {"bytes small": "b", "str small": "s", "bytes big": "bb", "str big": "ss"} + benches = {'bytes small': 'b', 'str small': 's', 'bytes big': 'bb', 'str big': 'ss'} for title, route in benches.items(): - with app("rsgi"): + with app('rsgi'): results[title] = benchmark(route) return results def interfaces(): results = {} - benches = {"bytes": ("b", {}), "str": ("s", {}), "echo": ("echo", {"post": True})} - for interface in ["rsgi", "asgi", "wsgi"]: + benches = {'bytes': ('b', {}), 'str': ('s', {}), 'echo': ('echo', {'post': True})} + for interface in ['rsgi', 'asgi', 'wsgi']: for key, bench_data in benches.items(): route, opts = bench_data with app(interface, bthreads=1): - results[f"{interface.upper()} {key}"] = benchmark(route, **opts) + results[f'{interface.upper()} {key}'] = benchmark(route, **opts) return results def http2(): results = {} - benches = {"[GET]": ("b", {}), "[POST]": ("echo", {"post": True})} + benches = {'[GET]': ('b', {}), '[POST]': ('echo', {'post': True})} for http2 in [False, True]: for key, bench_data in benches.items(): route, opts = bench_data - h = "2" if http2 else "1" - with app("rsgi", http=h): - results[f"HTTP/{h} {key}"] = benchmark(route, h2=http2, **opts) + h = '2' if http2 else '1' + with app('rsgi', http=h): + results[f'HTTP/{h} {key}'] = benchmark(route, h2=http2, **opts) return results def files(): results = {} - with app("rsgi", bthreads=1): - results["RSGI"] = benchmark("fp") - with app("asgi", bthreads=1): - results["ASGI"] = benchmark("fb") - results["ASGI pathsend"] = benchmark("fp") + with app('rsgi', bthreads=1): + results['RSGI'] = benchmark('fp') + with app('asgi', bthreads=1): + results['ASGI'] = benchmark('fb') + results['ASGI pathsend'] = benchmark('fp') return results def vs_asgi(): results = {} - benches = {"[GET]": ("b", {}), "[POST]": ("echo", {"post": True})} - for fw in ["granian_asgi", "uvicorn_h11", "uvicorn_httptools", "hypercorn"]: + benches = {'[GET]': ('b', {}), '[POST]': ('echo', {'post': True})} + for fw in ['granian_asgi', 'uvicorn_h11', 'uvicorn_httptools', 'hypercorn']: for key, bench_data in benches.items(): route, opts = bench_data - fw_app = fw.split("_")[1] if fw.startswith("granian") else fw - title = " ".join(item.title() for item in fw.split("_")) + fw_app = fw.split('_')[1] if fw.startswith('granian') else fw + title = ' '.join(item.title() for item in fw.split('_')) with app(fw_app): - results[f"{title} {key}"] = benchmark(route, **opts) + results[f'{title} {key}'] = benchmark(route, **opts) return results def vs_wsgi(): results = {} - benches = {"[GET]": ("b", {}), "[POST]": ("echo", {"post": True})} - for fw in ["granian_wsgi", "gunicorn_gthread", "gunicorn_gevent", "uwsgi"]: + benches = {'[GET]': ('b', {}), '[POST]': ('echo', {'post': True})} + for fw in ['granian_wsgi', 'gunicorn_gthread', 'gunicorn_gevent', 'uwsgi']: for key, bench_data in benches.items(): route, opts = bench_data - fw_app = fw.split("_")[1] if fw.startswith("granian") else fw - title = " ".join(item.title() for item in fw.split("_")) + fw_app = fw.split('_')[1] if fw.startswith('granian') else fw + title = ' '.join(item.title() for item in fw.split('_')) with app(fw_app, bthreads=1): - results[f"{title} {key}"] = benchmark(route, **opts) + results[f'{title} {key}'] = benchmark(route, **opts) return results def vs_http2(): results = {} - benches = {"[GET]": ("b", {}), "[POST]": ("echo", {"post": True})} - for fw in ["granian_asgi", "hypercorn"]: + benches = {'[GET]': ('b', {}), '[POST]': ('echo', {'post': True})} + for fw in ['granian_asgi', 'hypercorn']: for key, bench_data in benches.items(): route, opts = bench_data - fw_app = fw.split("_")[1] if fw.startswith("granian") else fw - title = " ".join(item.title() for item in fw.split("_")) - with app(fw_app, http="2"): - results[f"{title} {key}"] = benchmark(route, h2=True, **opts) + fw_app = fw.split('_')[1] if fw.startswith('granian') else fw + title = ' '.join(item.title() for item in fw.split('_')) + with app(fw_app, http='2'): + results[f'{title} {key}'] = benchmark(route, h2=True, **opts) return results def vs_files(): results = {} - with app("asgi", bthreads=1): - results["Granian (pathsend)"] = benchmark("fp") - for fw in ["uvicorn_h11", "uvicorn_httptools", "hypercorn"]: - title = " ".join(item.title() for item in fw.split("_")) + with app('asgi', bthreads=1): + results['Granian (pathsend)'] = benchmark('fp') + for fw in ['uvicorn_h11', 'uvicorn_httptools', 'hypercorn']: + title = ' '.join(item.title() for item in fw.split('_')) with app(fw): - results[title] = benchmark("fb") + results[title] = benchmark('fb') return results def vs_io(): results = {} - benches = {"10ms": ("io10", {}), "100ms": ("io100", {})} + benches = {'10ms': ('io10', {}), '100ms': ('io100', {})} for fw in [ - "granian_rsgi", - "granian_asgi", - "granian_wsgi", - "uvicorn_httptools", - "hypercorn", - "gunicorn_gevent", - "uwsgi", + 'granian_rsgi', + 'granian_asgi', + 'granian_wsgi', + 'uvicorn_httptools', + 'hypercorn', + 'gunicorn_gevent', + 'uwsgi', ]: for key, bench_data in benches.items(): route, opts = bench_data - fw_app = fw.split("_")[1] if fw.startswith("granian") else fw - title = " ".join(item.title() for item in fw.split("_")) + fw_app = fw.split('_')[1] if fw.startswith('granian') else fw + title = ' '.join(item.title() for item in fw.split('_')) with app(fw_app): - results[f"{title} {key}"] = benchmark(route, **opts) + results[f'{title} {key}'] = benchmark(route, **opts) return results def _granian_version(): import granian + return granian.__version__ def run(): + all_benchmarks = { + 'rsgi_body': rsgi_body_type, + 'interfaces': interfaces, + 'http2': http2, + 'files': files, + 'concurrencies': concurrencies, + 'vs_asgi': vs_asgi, + 'vs_wsgi': vs_wsgi, + 'vs_http2': vs_http2, + 'vs_files': vs_files, + 'vs_io': vs_io, + } + inp_benchmarks = sys.argv[1:] or ['base'] + if 'base' in inp_benchmarks: + inp_benchmarks.remove('base') + inp_benchmarks.extend(['rsgi_body', 'interfaces', 'http2', 'files']) + if 'vs' in inp_benchmarks: + inp_benchmarks.remove('vs') + inp_benchmarks.extend(['vs_asgi', 'vs_wsgi', 'vs_http2', 'vs_files', 'vs_io']) + run_benchmarks = set(inp_benchmarks) & set(all_benchmarks.keys()) + now = datetime.datetime.utcnow() results = {} - if os.environ.get("BENCHMARK_BASE", "true") == "true": - results["rsgi_body"] = rsgi_body_type() - results["interfaces"] = interfaces() - results["http2"] = http2() - results["files"] = files() - if os.environ.get("BENCHMARK_CONCURRENCIES") == "true": - results["concurrencies"] = concurrencies() - if os.environ.get("BENCHMARK_VS") == "true": - results["vs_asgi"] = vs_asgi() - results["vs_wsgi"] = vs_wsgi() - results["vs_http2"] = vs_http2() - results["vs_files"] = vs_files() - results["vs_io"] = vs_io() - with open("results/data.json", "w") as f: + for benchmark_key in run_benchmarks: + runner = all_benchmarks[benchmark_key] + results[benchmark_key] = runner() + + with open('results/data.json', 'w') as f: pyver = sys.version_info - f.write(json.dumps({ - "cpu": CPU, - "run_at": int(now.timestamp()), - "pyver": f"{pyver.major}.{pyver.minor}", - "results": results, - "granian": _granian_version() - })) + f.write( + json.dumps( + { + 'cpu': CPU, + 'run_at': int(now.timestamp()), + 'pyver': f'{pyver.major}.{pyver.minor}', + 'results': results, + 'granian': _granian_version(), + } + ) + ) -if __name__ == "__main__": +if __name__ == '__main__': run() diff --git a/benchmarks/external/tfb/loader.py b/benchmarks/external/tfb/loader.py index 3d42ab92..af941962 100644 --- a/benchmarks/external/tfb/loader.py +++ b/benchmarks/external/tfb/loader.py @@ -5,22 +5,22 @@ from selectolax.parser import HTMLParser -TFB_URL = "https://tfb-status.techempower.com" +TFB_URL = 'https://tfb-status.techempower.com' FW_MAP = { - "fastwsgi-asgi": "FastWSGI (ASGI)", - "granian": "Granian (ASGI)", - "granian-rsgi": "Granian (RSGI)", - "granian-wsgi": "Granian (WSGI)", - "socketify.py-asgi-python3": "Socketify (ASGI)", - "socketify.py-wsgi-python3": "Socketify (WSGI)", - "uvicorn": "Uvicorn (httptools)", - "uwsgi": "uWSGI", - "uwsgi-nginx-uwsgi": "uWSGI + Nginx" + 'fastwsgi-asgi': 'FastWSGI (ASGI)', + 'granian': 'Granian (ASGI)', + 'granian-rsgi': 'Granian (RSGI)', + 'granian-wsgi': 'Granian (WSGI)', + 'socketify.py-asgi-python3': 'Socketify (ASGI)', + 'socketify.py-wsgi-python3': 'Socketify (WSGI)', + 'uvicorn': 'Uvicorn (httptools)', + 'uwsgi': 'uWSGI', + 'uwsgi-nginx-uwsgi': 'uWSGI + Nginx', } def get_runs(): - with urllib.request.urlopen(TFB_URL) as res: + with urllib.request.urlopen(TFB_URL) as res: # noqa: S310 data = res.read() return data @@ -28,48 +28,48 @@ def get_runs(): def find_last_run(data): html = HTMLParser(data) for node in html.css('tr'): - if "estimated" not in node.text(): - return node.attributes["data-uuid"] + if 'estimated' not in node.text(): + return node.attributes['data-uuid'] def get_run_meta(runid): - with urllib.request.urlopen(TFB_URL + f"/results/{runid}") as res: + with urllib.request.urlopen(TFB_URL + f'/results/{runid}') as res: # noqa: S310 data = res.read() html = HTMLParser(data) target = None - visualize_url = html.css_first('a').attributes["href"] + visualize_url = html.css_first('a').attributes['href'] for node in html.css('a'): - if 'unzip' in node.attributes["href"]: - target = node.attributes["href"] + if 'unzip' in node.attributes['href']: + target = node.attributes['href'] break assert target is not None - with urllib.request.urlopen(TFB_URL + f"{target}/results") as res: + with urllib.request.urlopen(TFB_URL + f'{target}/results') as res: # noqa: S310 data = res.read() node = HTMLParser(data).css_first('.fileName') return { - "date": datetime.datetime.strptime(node.text()[:-1], "%Y%m%d%H%M%S"), - "target": node.attributes["href"], - "visualize": visualize_url, + 'date': datetime.datetime.strptime(node.text()[:-1], '%Y%m%d%H%M%S'), + 'target': node.attributes['href'], + 'visualize': visualize_url, } def extract_run_data(data: str): rps = [] - for line in filter(lambda s: "Requests/sec:" in s, data.splitlines()): - rps.append(float(line.split("Requests/sec:")[-1].strip())) + for line in filter(lambda s: 'Requests/sec:' in s, data.splitlines()): + rps.append(float(line.split('Requests/sec:')[-1].strip())) return round(max(rps)) def get_run_results(path, fw): rv = {} - for bench in ["json", "plaintext"]: + for bench in ['json', 'plaintext']: try: - with urllib.request.urlopen(TFB_URL + f"{path}/{fw}/{bench}/raw.txt") as res: - rv[bench] = extract_run_data(res.read().decode("utf-8")) - except: + with urllib.request.urlopen(TFB_URL + f'{path}/{fw}/{bench}/raw.txt') as res: # noqa: S310 + rv[bench] = extract_run_data(res.read().decode('utf-8')) + except Exception: # noqa: S110 pass return rv @@ -79,33 +79,33 @@ def run(): run_uuid = find_last_run(runs) run_meta = get_run_meta(run_uuid) res = { - "run": run_uuid, - "results": {}, - "labels": {}, - "run_at": int(run_meta["date"].timestamp()), - "url": run_meta["visualize"], + 'run': run_uuid, + 'results': {}, + 'labels': {}, + 'run_at': int(run_meta['date'].timestamp()), + 'url': run_meta['visualize'], } # NOTE: would be nice to have updated gunicorn, hypercorn for fw in [ - "fastwsgi", - "fastwsgi-asgi", - "granian", - "granian-rsgi", - "granian-wsgi", - "robyn", - "socketify.py-asgi-python3", - "socketify.py-wsgi-python3", - "uvicorn", - "uwsgi", + 'fastwsgi', + 'fastwsgi-asgi', + 'granian', + 'granian-rsgi', + 'granian-wsgi', + 'robyn', + 'socketify.py-asgi-python3', + 'socketify.py-wsgi-python3', + 'uvicorn', + 'uwsgi', # NOTE: wsgi is gunicorn, but is Python 3.6 based with meinheld, no more avail # "wsgi", - "uwsgi-nginx-uwsgi" + 'uwsgi-nginx-uwsgi', ]: fw_label = FW_MAP.get(fw) or fw.title() - res["results"][fw] = get_run_results(run_meta["target"], fw) - res["labels"][fw] = fw_label + res['results'][fw] = get_run_results(run_meta['target'], fw) + res['labels'][fw] = fw_label print(json.dumps(res)) -if __name__ == "__main__": +if __name__ == '__main__': run() diff --git a/benchmarks/templates/main.md b/benchmarks/templates/main.md index 70dfe207..457c66d3 100644 --- a/benchmarks/templates/main.md +++ b/benchmarks/templates/main.md @@ -23,9 +23,7 @@ Granian version: {{ =data.granian }} - [Versus 3rd party servers](./vs.md) - [Concurrency](./concurrency.md) -{{ if False: }} - [Python versions](./pyver.md) -{{ pass }} ### 3rd party benchmarks diff --git a/benchmarks/templates/pyver.md b/benchmarks/templates/pyver.md new file mode 100644 index 00000000..aa824325 --- /dev/null +++ b/benchmarks/templates/pyver.md @@ -0,0 +1,25 @@ +# Granian benchmarks + +{{ include './_helpers.tpl' }} + +## Python versions + +{{ _common_data = globals().get(f"data{pyvb}") }} +Run at: {{ =datetime.datetime.fromtimestamp(_common_data.run_at).strftime('%a %d %b %Y, %H:%M') }} +Environment: {{ =benv }} (CPUs: {{ =_common_data.cpu }}) +Granian version: {{ =_common_data.granian }} + +Comparison between different Python versions of Granian application protocols using 4bytes plain text response. +Bytes and string response are reported for every protocol just to report the difference with RSGI protocol. +ASGI and WSGI responses are always returned as bytes by the application. +The "echo" request is a 4bytes POST request responding with the same body. + +| Python version | Request | Total requests | RPS | avg latency | max latency | +| --- | --- | --- | --- | --- | --- | +{{ for pykey in ["310", "311", "312", "313"]: }} +{{ _data = globals().get(f"data{pykey}") }} +{{ for key, runs in _data.results["interfaces"].items(): }} +{{ max_c, run = get_max_concurrency_run(runs) }} +| {{ =_data.pyver }} | {{ =key }} (c{{ =max_c }}) | {{ =run["requests"]["total"] }} | {{ =run["requests"]["rps"] }} | {{ =fmt_ms(run["latency"]["avg"]) }} | {{ =fmt_ms(run["latency"]["max"]) }} | +{{ pass }} +{{ pass }}