From 24d5facec5c9a0e0f9b37dc5259d7104cce32e22 Mon Sep 17 00:00:00 2001 From: Raymond Xu Date: Fri, 22 Nov 2024 08:43:45 -0800 Subject: [PATCH 1/3] Show the link to the All Hands product roadmap (#5192) Co-authored-by: Graham Neubig --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 97de3104472c..77633dbbbf72 100644 --- a/README.md +++ b/README.md @@ -90,6 +90,8 @@ See more about the community in [COMMUNITY.md](./COMMUNITY.md) or find details o ## 📈 Progress +See the monthly OpenHands roadmap [here](https://github.com/orgs/All-Hands-AI/projects/1) (updated at the maintainer's meeting at the end of each month). +

Star History Chart From 36e3dc5c19c323c45de88aa18906dc560fc99d4b Mon Sep 17 00:00:00 2001 From: mamoodi Date: Fri, 22 Nov 2024 13:24:33 -0500 Subject: [PATCH 2/3] Add eval workflow that triggers remote eval job (#5108) --- .github/workflows/run-eval.yml | 53 ++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 .github/workflows/run-eval.yml diff --git a/.github/workflows/run-eval.yml b/.github/workflows/run-eval.yml new file mode 100644 index 000000000000..df79872aec26 --- /dev/null +++ b/.github/workflows/run-eval.yml @@ -0,0 +1,53 @@ +# Run evaluation on a PR +name: Run Eval + +# Runs when a PR is labeled with one of the "run-eval-" labels +on: + pull_request: + types: [labeled] + +jobs: + trigger-job: + name: Trigger remote eval job + if: ${{ github.event.label.name == 'run-eval-xs' || github.event.label.name == 'run-eval-s' || github.event.label.name == 'run-eval-m' }} + runs-on: ubuntu-latest + + steps: + - name: Checkout PR branch + uses: actions/checkout@v3 + with: + ref: ${{ github.head_ref }} + + - name: Trigger remote job + run: | + REPO_URL="https://github.com/${{ github.repository }}" + PR_BRANCH="${{ github.head_ref }}" + echo "Repository URL: $REPO_URL" + echo "PR Branch: $PR_BRANCH" + + if [[ "${{ github.event.label.name }}" == "run-eval-xs" ]]; then + EVAL_INSTANCES="1" + elif [[ "${{ github.event.label.name }}" == "run-eval-s" ]]; then + EVAL_INSTANCES="5" + elif [[ "${{ github.event.label.name }}" == "run-eval-m" ]]; then + EVAL_INSTANCES="30" + fi + + curl -X POST \ + -H "Authorization: Bearer ${{ secrets.PAT_TOKEN }}" \ + -H "Accept: application/vnd.github+json" \ + -d "{\"ref\": \"main\", \"inputs\": {\"github-repo\": \"${REPO_URL}\", \"github-branch\": \"${PR_BRANCH}\", \"pr-number\": \"${{ github.event.pull_request.number }}\", \"eval-instances\": \"${EVAL_INSTANCES}\"}}" \ + https://api.github.com/repos/All-Hands-AI/evaluation/actions/workflows/create-branch.yml/dispatches + + # Send Slack message + PR_URL="https://github.com/${{ github.repository }}/pull/${{ github.event.pull_request.number }}" + slack_text="PR $PR_URL has triggered evaluation on $EVAL_INSTANCES instances..." + curl -X POST -H 'Content-type: application/json' --data '{"text":"'"$slack_text"'"}' \ + https://hooks.slack.com/services/${{ secrets.SLACK_TOKEN }} + + - name: Comment on PR + uses: KeisukeYamashita/create-comment@v1 + with: + unique: false + comment: | + Running evaluation on the PR. Once eval is done, the results will be posted. From bb8b4a0b18c3ff32ddbcaec5ebb7b87fac860e90 Mon Sep 17 00:00:00 2001 From: Xingyao Wang Date: Fri, 22 Nov 2024 12:28:32 -0600 Subject: [PATCH 3/3] feat(runtime): add system resource metrics to /server_info endpoint (#5207) Co-authored-by: openhands --- openhands/runtime/action_execution_server.py | 8 ++- openhands/runtime/utils/system_stats.py | 62 ++++++++++++++++++++ tests/runtime/utils/test_system_stats.py | 60 +++++++++++++++++++ 3 files changed, 129 insertions(+), 1 deletion(-) create mode 100644 openhands/runtime/utils/system_stats.py create mode 100644 tests/runtime/utils/test_system_stats.py diff --git a/openhands/runtime/action_execution_server.py b/openhands/runtime/action_execution_server.py index 1251aa346838..e8043133d9b5 100644 --- a/openhands/runtime/action_execution_server.py +++ b/openhands/runtime/action_execution_server.py @@ -52,6 +52,7 @@ from openhands.runtime.utils.files import insert_lines, read_lines from openhands.runtime.utils.runtime_init import init_user_and_working_directory from openhands.runtime.utils.system import check_port_available +from openhands.runtime.utils.system_stats import get_system_stats from openhands.utils.async_utils import call_sync_from_async, wait_all @@ -420,7 +421,12 @@ async def get_server_info(): current_time = time.time() uptime = current_time - client.start_time idle_time = current_time - client.last_execution_time - return {'uptime': uptime, 'idle_time': idle_time} + + return { + 'uptime': uptime, + 'idle_time': idle_time, + 'resources': get_system_stats(), + } @app.post('/execute_action') async def execute_action(action_request: ActionRequest): diff --git a/openhands/runtime/utils/system_stats.py b/openhands/runtime/utils/system_stats.py new file mode 100644 index 000000000000..d0068c248793 --- /dev/null +++ b/openhands/runtime/utils/system_stats.py @@ -0,0 +1,62 @@ +"""Utilities for getting system resource statistics.""" + +import time + +import psutil + + +def get_system_stats() -> dict: + """Get current system resource statistics. + + Returns: + dict: A dictionary containing: + - cpu_percent: CPU usage percentage for the current process + - memory: Memory usage stats (rss, vms, percent) + - disk: Disk usage stats (total, used, free, percent) + - io: I/O statistics (read/write bytes) + """ + process = psutil.Process() + # Get initial CPU percentage (this will return 0.0) + process.cpu_percent() + # Wait a bit and get the actual CPU percentage + time.sleep(0.1) + + with process.oneshot(): + cpu_percent = process.cpu_percent() + memory_info = process.memory_info() + memory_percent = process.memory_percent() + + disk_usage = psutil.disk_usage('/') + + # Get I/O stats directly from /proc/[pid]/io to avoid psutil's field name assumptions + try: + with open(f'/proc/{process.pid}/io', 'rb') as f: + io_stats = {} + for line in f: + if line: + try: + name, value = line.strip().split(b': ') + io_stats[name.decode('ascii')] = int(value) + except (ValueError, UnicodeDecodeError): + continue + except (FileNotFoundError, PermissionError): + io_stats = {'read_bytes': 0, 'write_bytes': 0} + + return { + 'cpu_percent': cpu_percent, + 'memory': { + 'rss': memory_info.rss, + 'vms': memory_info.vms, + 'percent': memory_percent, + }, + 'disk': { + 'total': disk_usage.total, + 'used': disk_usage.used, + 'free': disk_usage.free, + 'percent': disk_usage.percent, + }, + 'io': { + 'read_bytes': io_stats.get('read_bytes', 0), + 'write_bytes': io_stats.get('write_bytes', 0), + }, + } diff --git a/tests/runtime/utils/test_system_stats.py b/tests/runtime/utils/test_system_stats.py new file mode 100644 index 000000000000..afb6c00c2942 --- /dev/null +++ b/tests/runtime/utils/test_system_stats.py @@ -0,0 +1,60 @@ +"""Tests for system stats utilities.""" + +import psutil + +from openhands.runtime.utils.system_stats import get_system_stats + + +def test_get_system_stats(): + """Test that get_system_stats returns valid system statistics.""" + stats = get_system_stats() + + # Test structure + assert isinstance(stats, dict) + assert set(stats.keys()) == {'cpu_percent', 'memory', 'disk', 'io'} + + # Test CPU stats + assert isinstance(stats['cpu_percent'], float) + assert 0 <= stats['cpu_percent'] <= 100 * psutil.cpu_count() + + # Test memory stats + assert isinstance(stats['memory'], dict) + assert set(stats['memory'].keys()) == {'rss', 'vms', 'percent'} + assert isinstance(stats['memory']['rss'], int) + assert isinstance(stats['memory']['vms'], int) + assert isinstance(stats['memory']['percent'], float) + assert stats['memory']['rss'] > 0 + assert stats['memory']['vms'] > 0 + assert 0 <= stats['memory']['percent'] <= 100 + + # Test disk stats + assert isinstance(stats['disk'], dict) + assert set(stats['disk'].keys()) == {'total', 'used', 'free', 'percent'} + assert isinstance(stats['disk']['total'], int) + assert isinstance(stats['disk']['used'], int) + assert isinstance(stats['disk']['free'], int) + assert isinstance(stats['disk']['percent'], float) + assert stats['disk']['total'] > 0 + assert stats['disk']['used'] >= 0 + assert stats['disk']['free'] >= 0 + assert 0 <= stats['disk']['percent'] <= 100 + # Verify that used + free is less than or equal to total + # (might not be exactly equal due to filesystem overhead) + assert stats['disk']['used'] + stats['disk']['free'] <= stats['disk']['total'] + + # Test I/O stats + assert isinstance(stats['io'], dict) + assert set(stats['io'].keys()) == {'read_bytes', 'write_bytes'} + assert isinstance(stats['io']['read_bytes'], int) + assert isinstance(stats['io']['write_bytes'], int) + assert stats['io']['read_bytes'] >= 0 + assert stats['io']['write_bytes'] >= 0 + + +def test_get_system_stats_stability(): + """Test that get_system_stats can be called multiple times without errors.""" + # Call multiple times to ensure stability + for _ in range(3): + stats = get_system_stats() + assert isinstance(stats, dict) + assert stats['cpu_percent'] >= 0