Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace pylint with ruff #756

Merged
merged 6 commits into from
Jan 15, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -310,15 +310,14 @@ jobs:
mv .venv/Scripts .venv/bin # venv is placed under `Scripts` on Windows
fi
source .venv/bin/activate
pip3 install maturin black pylint pytest
pip3 install maturin ruff pytest
maturin build -m apis/python/node/Cargo.toml
pip3 install target/wheels/*
dora new test_python_project --lang python --internal-create-with-path-dependencies
cd test_python_project

# Check Compliancy
black . --check
pylint --disable=C,R **/*.py
ruff check .
pip install -e ./*/
pytest

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/node-hub-ci-cd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ jobs:
run: |
curl -sSL https://install.python-poetry.org | python3 -
echo "$HOME/.local/bin" >> $GITHUB_PATH
pip install black pylint pytest
pip install ruff pytest

- name: Set up Rust
if: runner.os == 'Linux' || github.event_name == 'workflow_dispatch' || (github.event_name == 'release' && startsWith(github.ref, 'refs/tags/'))
Expand Down
3 changes: 1 addition & 2 deletions .github/workflows/node_hub_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,7 @@ else
if [ -f "$dir/pyproject.toml" ]; then
echo "Running linting and tests for Python project in $dir..."
pip install .
poetry run black --check .
poetry run pylint --disable=C,R --ignored-modules=cv2,pyrealsense2 **/*.py
ruff check .
poetry run pytest
fi
fi
Expand Down
8 changes: 4 additions & 4 deletions binaries/cli/src/template/python/__node-name__/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,16 @@ pip install -e .

## Contribution Guide

- Format with [black](https://github.com/psf/black):
- Format with [ruff](https://docs.astral.sh/ruff/):

```bash
black . # Format
ruff check . --fix
```

- Lint with [pylint](https://github.com/pylint-dev/pylint):
- Lint with ruff:

```bash
pylint --disable=C,R --ignored-modules=cv2 . # Lint
ruff check .
```

- Test with [pytest](https://github.com/pytest-dev/pytest)
Expand Down
7 changes: 3 additions & 4 deletions binaries/cli/src/template/python/__node-name__/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,12 @@ packages = [{ include = "__node_name__" }]
[tool.poetry.dependencies]
dora-rs = "^0.3.6"
numpy = "< 2.0.0"
pyarrow = ">= 5.0.0"
pyarrow = ">= 15.0.0"
python = "^3.7"

[tool.poetry.dev-dependencies]
pytest = ">= 8.3.4"
pylint = ">= 3.3.2"
black = ">= 24.10"
pytest = ">= 6.3.4"
ruff = ">= 0.9.1"

[tool.poetry.scripts]
__node-name__ = "__node_name__.main:main"
Expand Down
11 changes: 6 additions & 5 deletions binaries/cli/src/template/python/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,9 +77,10 @@ fn create_custom_node(
.with_context(|| format!("failed to write `{}`", node_path.display()))?;

// tests/tests___node_name__.py
let node_path = root
.join("tests")
.join(format!("test_{}.py", name.replace(" ", "_")));
let node_path = root.join("tests").join(format!(
"test_{}.py",
name.replace(" ", "_").replace("-", "_")
));
let file = replace_space(_TEST_PY, &name);
fs::write(&node_path, file)
.with_context(|| format!("failed to write `{}`", node_path.display()))?;
Expand All @@ -90,8 +91,8 @@ fn create_custom_node(
);
println!(" cd {}", Path::new(".").join(&root).display());
println!(" pip install -e . # Install",);
println!(" black . # Format");
println!(" pylint --disable=C,R . # Lint",);
println!(" ruff check . --fix # Format");
println!(" ruff check . # Lint",);
println!(" pytest . # Test");

Ok(())
Expand Down
1 change: 0 additions & 1 deletion examples/piper/dummy_inference_2.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from dora import Node


import numpy as np
import h5py

f = h5py.File("data/episode_0.hdf5", "r")
Expand Down
1 change: 0 additions & 1 deletion examples/piper/replay.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from dora import Node


import numpy as np
import h5py
import os

Expand Down
1 change: 0 additions & 1 deletion examples/python-operator-dataflow/plot.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import os
import cv2
import time

from dora import DoraStatus
from utils import LABELS
Expand Down
2 changes: 1 addition & 1 deletion node-hub/dora-argotranslate/dora_argotranslate/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

# Read the content of the README file
try:
with open(readme_path, "r", encoding="utf-8") as f:
with open(readme_path, encoding="utf-8") as f:
__doc__ = f.read()
except FileNotFoundError:
__doc__ = "README file not found."
8 changes: 4 additions & 4 deletions node-hub/dora-argotranslate/dora_argotranslate/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

os.environ["ARGOS_DEVICE_TYPE"] = "auto"

from dora import Node
import pyarrow as pa
import argostranslate.package
import argostranslate.translate
import pyarrow as pa
from dora import Node

from_code = os.getenv("SOURCE_LANGUAGE", "fr")
to_code = os.getenv("TARGET_LANGUAGE", "en")
Expand All @@ -15,8 +15,8 @@
available_packages = argostranslate.package.get_available_packages()
package_to_install = next(
filter(
lambda x: x.from_code == from_code and x.to_code == to_code, available_packages
)
lambda x: x.from_code == from_code and x.to_code == to_code, available_packages,
),
)
argostranslate.package.install_from_path(package_to_install.download())

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

# Read the content of the README file
try:
with open(readme_path, "r", encoding="utf-8") as f:
with open(readme_path, encoding="utf-8") as f:
__doc__ = f.read()
except FileNotFoundError:
__doc__ = "README file not found."
9 changes: 6 additions & 3 deletions node-hub/dora-distil-whisper/dora_distil_whisper/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ def load_model():


def load_model_mlx():
from lightning_whisper_mlx import LightningWhisperMLX # noqa
# noqa: disable: import-error
from lightning_whisper_mlx import LightningWhisperMLX

whisper = LightningWhisperMLX(model="distil-large-v3", batch_size=12, quant=None)
return whisper
Expand Down Expand Up @@ -78,7 +79,8 @@ def cut_repetition(text, min_repeat_length=4, max_repeat_length=50):
if sum(1 for char in text if "\u4e00" <= char <= "\u9fff") / len(text) > 0.5:
# Chinese text processing
for repeat_length in range(
min_repeat_length, min(max_repeat_length, len(text) // 2)
min_repeat_length,
min(max_repeat_length, len(text) // 2),
):
for i in range(len(text) - repeat_length * 2 + 1):
chunk1 = text[i : i + repeat_length]
Expand All @@ -90,7 +92,8 @@ def cut_repetition(text, min_repeat_length=4, max_repeat_length=50):
# Non-Chinese (space-separated) text processing
words = text.split()
for repeat_length in range(
min_repeat_length, min(max_repeat_length, len(words) // 2)
min_repeat_length,
min(max_repeat_length, len(words) // 2),
):
for i in range(len(words) - repeat_length * 2 + 1):
chunk1 = " ".join(words[i : i + repeat_length])
Expand Down
3 changes: 1 addition & 2 deletions node-hub/dora-distil-whisper/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,7 @@ dora-distil-whisper = "dora_distil_whisper.main:main"

[tool.poetry.dev-dependencies]
pytest = ">= 6.3.4"
pylint = ">= 3.3.2"
black = ">= 22.10"
ruff = ">= 0.9.1"

[build-system]
requires = ["poetry-core>=1.8.0"]
Expand Down
1 change: 0 additions & 1 deletion node-hub/dora-distil-whisper/tests/test_distil_whisper.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@


def test_import_main():

from dora_distil_whisper.main import main

# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.
Expand Down
2 changes: 1 addition & 1 deletion node-hub/dora-echo/dora_echo/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

# Read the content of the README file
try:
with open(readme_path, "r", encoding="utf-8") as f:
with open(readme_path, encoding="utf-8") as f:
__doc__ = f.read()
except FileNotFoundError:
__doc__ = "README file not found."
4 changes: 2 additions & 2 deletions node-hub/dora-echo/dora_echo/main.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
import argparse
import os

from dora import Node

RUNNER_CI = True if os.getenv("CI") == "true" else False


def main():

# Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables.
parser = argparse.ArgumentParser(description="Simple arrow sender")

Expand All @@ -20,7 +20,7 @@ def main():
args = parser.parse_args()

node = Node(
args.name
args.name,
) # provide the name to connect to the dataflow if dynamic node

for event in node:
Expand Down
2 changes: 1 addition & 1 deletion node-hub/dora-internvl/dora_internvl/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

# Read the content of the README file
try:
with open(readme_path, "r", encoding="utf-8") as f:
with open(readme_path, encoding="utf-8") as f:
__doc__ = f.read()
except FileNotFoundError:
__doc__ = "README file not found."
20 changes: 9 additions & 11 deletions node-hub/dora-internvl/dora_internvl/main.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import os
from dora import Node

import numpy as np
import pyarrow as pa
import torch
import torchvision.transforms as T
from dora import Node
from PIL import Image
from torchvision.transforms.functional import InterpolationMode
from transformers import AutoModel, AutoTokenizer
Expand All @@ -20,7 +21,7 @@ def build_transform(input_size):
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
T.ToTensor(),
T.Normalize(mean=MEAN, std=STD),
]
],
)
return transform

Expand All @@ -42,7 +43,7 @@ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_


def dynamic_preprocess(
image, min_num=1, max_num=12, image_size=448, use_thumbnail=False
image, min_num=1, max_num=12, image_size=448, use_thumbnail=False,
):
orig_width, orig_height = image.size
aspect_ratio = orig_width / orig_height
Expand All @@ -59,7 +60,7 @@ def dynamic_preprocess(

# find the closest aspect ratio to the target
target_aspect_ratio = find_closest_aspect_ratio(
aspect_ratio, target_ratios, orig_width, orig_height, image_size
aspect_ratio, target_ratios, orig_width, orig_height, image_size,
)

# calculate the target width and height
Expand Down Expand Up @@ -91,7 +92,7 @@ def load_image(image_array: np.array, input_size=448, max_num=12):
image = Image.fromarray(image_array).convert("RGB")
transform = build_transform(input_size=input_size)
images = dynamic_preprocess(
image, image_size=input_size, use_thumbnail=True, max_num=max_num
image, image_size=input_size, use_thumbnail=True, max_num=max_num,
)
pixel_values = [transform(image) for image in images]
pixel_values = torch.stack(pixel_values)
Expand All @@ -116,7 +117,7 @@ def main():
.to(device)
)
tokenizer = AutoTokenizer.from_pretrained(
model_path, trust_remote_code=True, use_fast=False
model_path, trust_remote_code=True, use_fast=False,
)

node = Node()
Expand All @@ -138,10 +139,7 @@ def main():
width = metadata["width"]
height = metadata["height"]

if encoding == "bgr8":
channels = 3
storage_type = np.uint8
elif encoding == "rgb8":
if encoding == "bgr8" or encoding == "rgb8":
channels = 3
storage_type = np.uint8
else:
Expand All @@ -168,7 +166,7 @@ def main():
)
generation_config = dict(max_new_tokens=1024, do_sample=True)
response = model.chat(
tokenizer, pixel_values, question, generation_config
tokenizer, pixel_values, question, generation_config,
)
node.send_output(
"text",
Expand Down
2 changes: 1 addition & 1 deletion node-hub/dora-keyboard/dora_keyboard/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

# Read the content of the README file
try:
with open(readme_path, "r", encoding="utf-8") as f:
with open(readme_path, encoding="utf-8") as f:
__doc__ = f.read()
except FileNotFoundError:
__doc__ = "README file not found."
4 changes: 2 additions & 2 deletions node-hub/dora-keyboard/dora_keyboard/main.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from pynput import keyboard
from pynput.keyboard import Events
import pyarrow as pa
from dora import Node
from pynput import keyboard
from pynput.keyboard import Events


def main():
Expand Down
2 changes: 1 addition & 1 deletion node-hub/dora-microphone/dora_microphone/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

# Read the content of the README file
try:
with open(readme_path, "r", encoding="utf-8") as f:
with open(readme_path, encoding="utf-8") as f:
__doc__ = f.read()
except FileNotFoundError:
__doc__ = "README file not found."
13 changes: 6 additions & 7 deletions node-hub/dora-microphone/dora_microphone/main.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import sounddevice as sd
import numpy as np
import pyarrow as pa
import time as tm
import os
import time as tm

import numpy as np
import pyarrow as pa
import sounddevice as sd
from dora import Node

MAX_DURATION = float(os.getenv("MAX_DURATION", "0.1"))
Expand All @@ -19,7 +19,6 @@ def main():
always_none = node.next(timeout=0.001) is None
finished = False

# pylint: disable=unused-argument
def callback(indata, frames, time, status):
nonlocal buffer, node, start_recording_time, finished

Expand All @@ -36,7 +35,7 @@ def callback(indata, frames, time, status):

# Start recording
with sd.InputStream(
callback=callback, dtype=np.int16, channels=1, samplerate=SAMPLE_RATE
callback=callback, dtype=np.int16, channels=1, samplerate=SAMPLE_RATE,
):
while not finished:
sd.sleep(int(1000))
sd.sleep(1000)
Loading
Loading