Skip to content

Commit

Permalink
Merge branch 'main' into add_calculator_only_output_field
Browse files Browse the repository at this point in the history
  • Loading branch information
lbluque authored Dec 5, 2024
2 parents 4c7c8dd + 6ab6ad7 commit 15c946b
Show file tree
Hide file tree
Showing 4 changed files with 10 additions and 4 deletions.
2 changes: 1 addition & 1 deletion packages/env.cpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ channels:
- defaults
dependencies:
- cpuonly
- pytorch>=2.4
- pytorch==2.4.0
- ase
- e3nn>=0.5
- numpy >=1.26.0,<2.0.0
Expand Down
2 changes: 1 addition & 1 deletion packages/env.gpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ channels:
- defaults
dependencies:
- pytorch-cuda=12.1
- pytorch>=2.4
- pytorch==2.4.0
- ase
- e3nn>=0.5
- numpy >=1.26.0,<2.0.0
Expand Down
2 changes: 1 addition & 1 deletion packages/fairchem-core/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ license = {text = "MIT License"}
dynamic = ["version", "readme"]
requires-python = ">=3.9, <3.13"
dependencies = [
"torch>=2.4",
"torch==2.4",
"numpy >=1.26.0, <2.0.0",
"lmdb",
"ase",
Expand Down
8 changes: 7 additions & 1 deletion src/fairchem/core/common/distutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,8 @@ def setup(config) -> None:
assign_device_for_local_rank(config["cpu"], config["local_rank"])
else:
# in the old code, all ranks can see all devices but need to be assigned a device equal to their local rank
# this is dangerous and should be deprecated
# this is dangerous and should be deprecated, however, FSDP still requires backwards compatibility with
# initializing this way for now so we need to keep it
torch.cuda.set_device(config["local_rank"])

dist.init_process_group(
Expand Down Expand Up @@ -123,6 +124,11 @@ def setup(config) -> None:
config["local_rank"] = int(os.environ.get("LOCAL_RANK"))
if config.get("use_cuda_visibile_devices"):
assign_device_for_local_rank(config["cpu"], config["local_rank"])
elif torch.cuda.is_available():
# in the old code, all ranks can see all devices but need to be assigned a device equal to their local rank
# this is dangerous and should be deprecated, however, FSDP still requires backwards compatibility with
# initializing this way for now so we need to keep it
torch.cuda.set_device(config["local_rank"])
dist.init_process_group(
backend=config["distributed_backend"],
rank=int(os.environ.get("RANK")),
Expand Down

0 comments on commit 15c946b

Please sign in to comment.