Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add a schema for the pixi.toml #936

Merged
merged 2 commits into from
Mar 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 31 additions & 0 deletions .github/workflows/schema.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
name: Test Schema

on:
push:
branches:
- main
paths-ignore:
- "docs/**"
- "mkdocs.yml"
- "*.md"
workflow_dispatch:
pull_request:
paths:
- "**/pixi.toml"
- "schema/**"
- "**/schema.yml"

jobs:
test-schema:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4

- uses: prefix-dev/setup-pixi@v0.5.1
with:
pixi-version: v0.15.2
cache: true

- name: Test Schema
run: |
pixi run -e default test-schema
6 changes: 3 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
rev: v4.5.0
hooks:
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace
# Use ruff for python examples
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.0.274
rev: v0.3.1
hooks:
- id: ruff
args: [ --fix, --exit-non-zero-on-fix ]
Expand Down Expand Up @@ -41,7 +41,7 @@ repos:
entry: cargo test
pass_filenames: false
- repo: https://github.com/codespell-project/codespell
rev: v2.2.5
rev: v2.2.6
hooks:
- id: codespell
exclude: ".snap"
6 changes: 6 additions & 0 deletions .ruff.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
target-version = "py312"
line-length = 100

[format]
quote-style = "double"
indent-style = "space"
2 changes: 1 addition & 1 deletion docs/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -501,5 +501,5 @@ test = ["test"]
[environments]
test = {features = ["test"], solve-group = "test"}
prod = {features = ["prod"], solve-group = "test"}
lint = "lint"
lint = ["lint"]
```
3 changes: 2 additions & 1 deletion examples/ctypes-factorial/src/factorial.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import argparse
import ctypes
import sys

from loguru import logger as log

log.remove()
Expand Down Expand Up @@ -49,7 +50,7 @@ def c_factorial(n):
parser.add_argument(
"n",
type=int,
nargs='?',
nargs="?",
default=10,
help="Number for which to calculate the factorial."
)
Expand Down
10 changes: 5 additions & 5 deletions examples/lightgbm/main.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
import pandas as pd
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import pandas as pd
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.model_selection import train_test_split

# load data
df = pd.read_csv("Breast_cancer_data.csv")

# Declare feature vector and target variable
X = df[[
'mean_radius','mean_texture','mean_perimeter',
'mean_area','mean_smoothness']]
y = df['diagnosis']
"mean_radius","mean_texture","mean_perimeter",
"mean_area","mean_smoothness"]]
y = df["diagnosis"]

# split the dataset into the training set and test set
X_train, X_test, y_train, y_test = train_test_split(
Expand Down
4 changes: 2 additions & 2 deletions examples/opencv/calibrate.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@
break
elif k % 256 == 32:
# SPACE pressed
img_name = "opencv_frame_{}.png".format(img_counter)
img_name = f"opencv_frame_{img_counter}.png"
cv2.imwrite(img_name, frame_clean)
print("{} written!".format(img_name))
print(f"{img_name} written!")
img_counter += 1

# Convert to grayscale
Expand Down
5 changes: 3 additions & 2 deletions examples/opencv/webcam_capture.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import cv2
import os

import cv2
import requests


Expand Down Expand Up @@ -35,7 +36,7 @@ def capture_and_grayscale():

# Check if the webcam is opened correctly
if not working_cam.isOpened():
raise IOError("Cannot open webcam")
raise OSError("Cannot open webcam")

while True:
# Read the current frame from the webcam
Expand Down
4 changes: 3 additions & 1 deletion examples/polarify/tests/test_versions.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import polars
import sys

import polars


def test_versions():
print("") # empty line
print(f"Polars version: {polars.__version__}")
Expand Down
1 change: 1 addition & 0 deletions examples/pypi/pycosat_example.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# Extremely simple example of using pycosat to show we can run sdist packages
import pycosat

cnf = [[1, -5, 4], [-1, 5, 3, 4], [-3, -4]]
result = pycosat.solve(cnf)
print(result)
4 changes: 2 additions & 2 deletions examples/qgis/get_data.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import requests
import geopandas as gpd
import requests

# URL for USGS data feed for all earthquakes in the last 7 days
url = "https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.geojson"
Expand All @@ -11,6 +11,6 @@
gdf = gpd.read_file(url)

# Save to local GeoJSON file
gdf.to_file("earthquakes.geojson", driver='GeoJSON')
gdf.to_file("earthquakes.geojson", driver="GeoJSON")

print("Data downloaded and saved to earthquakes.geojson")
5 changes: 3 additions & 2 deletions examples/rerun_example/dna_example.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import rerun as rr
from math import tau

import numpy as np
import rerun as rr
from rerun_demo.data import build_color_spiral
from rerun_demo.util import bounce_lerp
from math import tau

NUM_POINTS = 100

Expand Down
23 changes: 12 additions & 11 deletions examples/rerun_example/force_driven_lockfile_graph.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,24 @@
import rerun as rr
import networkx as nx
import yaml
import numpy as np
import hashlib
import sys

import networkx as nx
import numpy as np
import rerun as rr
import yaml

# Give relative path or default to local pixi.lock
lockfile_path = sys.argv[1] if len(sys.argv) > 1 else 'pixi.lock'
lockfile_path = sys.argv[1] if len(sys.argv) > 1 else "pixi.lock"

with open(lockfile_path, 'r') as file:
with open(lockfile_path) as file:
lockfile_data = yaml.safe_load(file)

package_data = lockfile_data['package']
package_names = [package['name'] for package in package_data]
package_data = lockfile_data["package"]
package_names = [package["name"] for package in package_data]

graph = nx.DiGraph()
for package in package_data:
package_name = package['name']
dependencies = package.get('dependencies', [])
package_name = package["name"]
dependencies = package.get("dependencies", [])
graph.add_node(package_name)
for i, dep in enumerate(dependencies):
graph.add_edge(package_name, dep.split(" ")[0])
Expand All @@ -26,7 +27,7 @@
rr.connect()

def hash_string_to_int(string):
return int(hashlib.sha256(string.encode('utf-8')).hexdigest(), 16) % (10 ** 8)
return int(hashlib.sha256(string.encode("utf-8")).hexdigest(), 16) % (10 ** 8)


# Memoization dictionary
Expand Down
2 changes: 2 additions & 0 deletions examples/solve-groups/test_imports.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import os
import sys

import pytest


def test_imports():
if os.environ["PIXI_ENVIRONMENT_NAME"] == "min-py38":
# importing pydantic is not possible in this environment
Expand Down
Loading
Loading