Skip to content

J90.yaml

James Horsfall Thomas edited this page Jul 6, 2020 · 1 revision
%YAML 1.2
---

# long term low LR training on T60 games, starting the SV3010
# games starting on 03-09-2020, 10M game window, shifting 50% each 5M games
# for now LR is 0.0002

name: '384x30.J90.1'                     
gpu: 0                                 

dataset: 
  num_chunks_train: 10000000           
  num_chunks_test:    500000
  input_train: 'E:/T60_training_data/testJ90/train.1/*/'
  input_test:  'E:/T60_training_data/testJ90/test.1/*/'
  
training:
    total_steps: 75000                # terminate (total) steps, for batch 4096 <=10k steps per million games

    batch_size: 4096                 
    num_batch_splits: 32              

    mask_legal_moves: true            
   
    renorm: true
    renorm_max_r: 4.0                 
    renorm_max_d: 5.0                 
    max_grad_norm: 5.0                
    
    swa: true
    swa_steps: 20
    swa_max_n: 10

    policy_loss_weight: 1.0           # weight of policy loss
    value_loss_weight:  0.7           # weight of value loss
    q_ratio: 0.2
    
    test_steps: 2000                  # eval test set values after this many steps
    train_avg_report_steps: 50        # training reports average values after this many steps
    checkpoint_steps: 2000            # frequency for checkpointing before finish

    shuffle_size: 500000              # size of the shuffle buffer
    
    lr_values: 
         - 0.0002
         - 0.0001
    lr_boundaries:                    # list of boundaries in steps
         - 100000
        
    path: 'C:/networks/'              # network storage dir

model:                                 
  filters: 384
  residual_blocks: 30
  se_ratio: 12
  policy: 'convolution'
  value: 'wdl'
...
Clone this wiki locally