forked from SymbioticLab/Fluid
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtune_grid_dcgan.py
65 lines (48 loc) · 1.69 KB
/
tune_grid_dcgan.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
from pathlib import Path
from ray import tune
import workloads.common as com
from fluid.trainer import TorchTrainer
from workloads.common import dcgan as workload
DATA_PATH, RESULTS_PATH = com.detect_paths()
EXP_NAME = com.remove_prefix(Path(__file__).stem, "tune_")
def create_grid_search_space(exp_no):
method_name = "space.create_grid_dcgan_" + str(exp_no)
mutations = eval(method_name)()
return {key: tune.sample_from(val) for key, val in mutations.items()}
def setup_tune_scheduler(exp_no):
search_space = create_grid_search_space(exp_no)
sync_to_driver = not RESULTS_PATH.startswith("/nfs")
return dict(
config=search_space,
resources_per_trial={"gpu": 1},
sync_to_driver=sync_to_driver,
local_dir=RESULTS_PATH,
name=EXP_NAME + str(exp_no),
)
def main():
exp, sd = com.init_ray()
workload.init_dcgan()
MyTrainable = TorchTrainer.as_trainable(
data_creator=workload.data_creator,
model_creator=workload.model_creator,
loss_creator=workload.loss_creator,
optimizer_creator=workload.optimizer_creator,
training_operator_cls=workload.GANOperator,
config={
"seed": sd,
"extra_fluid_trial_resources": {},
**workload.static_config(),
},
)
params = {
# **com.run_options(__file__),
# 'stop': workload.create_stopper(),
**setup_tune_scheduler(exp),
}
analysis = tune.run(MyTrainable, stop={"training_iteration": 128}, **params)
dfs = analysis.trial_dataframes
for logdir, df in dfs.items():
ld = Path(logdir)
df.to_csv(ld / "trail_dataframe.csv")
if __name__ == "__main__":
main()