Skip to content

Commit

Permalink
[Core] Fix Ascend NPU discovery to support 8+ cards per node (ray-pro…
Browse files Browse the repository at this point in the history
…ject#48543)

Signed-off-by: Xiaoshuang Liu <liuxiaoshuang4@huawei.com>
Signed-off-by: mohitjain2504 <mohit.jain@dream11.com>
  • Loading branch information
liuxsh9 authored and mohitjain2504 committed Nov 15, 2024
1 parent c17584c commit f0222b2
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
2 changes: 1 addition & 1 deletion python/ray/_private/accelerators/npu.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def get_current_node_num_accelerators() -> int:
logger.debug("Could not import AscendCL: %s", e)

try:
npu_files = glob.glob("/dev/davinci?")
npu_files = glob.glob("/dev/davinci[0-9]*")
return len(npu_files)
except Exception as e:
logger.debug("Failed to detect number of NPUs: %s", e)
Expand Down
4 changes: 2 additions & 2 deletions python/ray/tests/accelerators/test_npu.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@
def test_autodetect_num_npus(mock_glob):
with patch.dict(sys.modules):
sys.modules["acl"] = None
mock_glob.return_value = [f"/dev/davinci{i}" for i in range(4)]
assert Accelerator.get_current_node_num_accelerators() == 4
mock_glob.return_value = [f"/dev/davinci{i}" for i in range(64)]
assert Accelerator.get_current_node_num_accelerators() == 64


@patch("glob.glob")
Expand Down

0 comments on commit f0222b2

Please sign in to comment.