diff --git a/.circleci/config.yml b/.circleci/config.yml
index 6e9a77c49b..a43430f772 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -35,26 +35,26 @@ commands:
     description: "Restore the cache with pyspec keys"
     steps:
       - restore_cached_venv:
-          venv_name: v2-pyspec
+          venv_name: v3-pyspec-bump2
           reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}
   save_pyspec_cached_venv:
     description: Save a venv into a cache with pyspec keys"
     steps:
       - save_cached_venv:
-          venv_name: v2-pyspec
+          venv_name: v3-pyspec-bump2
           reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}
           venv_path: ./test_libs/pyspec/venv
   restore_deposit_contract_cached_venv:
     description: "Restore the cache with deposit_contract keys"
     steps:
       - restore_cached_venv:
-          venv_name: v4-deposit-contract
+          venv_name: v6-deposit-contract
           reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "deposit_contract/requirements-testing.txt" }}
   save_deposit_contract_cached_venv:
     description: Save a venv into a cache with deposit_contract keys"
     steps:
       - save_cached_venv:
-          venv_name: v4-deposit-contract
+          venv_name: v6-deposit-contract
           reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "deposit_contract/requirements-testing.txt" }}
           venv_path: ./deposit_contract/venv
 jobs:
diff --git a/.gitignore b/.gitignore
index 16d39a4342..4dff5fbcb3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,7 +9,9 @@ build/
 output/
 
 eth2.0-spec-tests/
+
 .pytest_cache
+.mypy_cache
 
 # Dynamically built from Markdown spec
 test_libs/pyspec/eth2spec/phase0/spec.py
diff --git a/Makefile b/Makefile
index 4ee757f887..318056689d 100644
--- a/Makefile
+++ b/Makefile
@@ -29,16 +29,21 @@ COV_INDEX_FILE=$(PY_SPEC_DIR)/$(COV_HTML_OUT)/index.html
 
 all: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR) $(YAML_TEST_TARGETS)
 
-clean:
+# deletes everything except the venvs
+partial_clean:
 	rm -rf $(YAML_TEST_DIR)
 	rm -rf $(GENERATOR_VENVS)
-	rm -rf $(PY_SPEC_DIR)/venv $(PY_SPEC_DIR)/.pytest_cache
+	rm -rf $(PY_SPEC_DIR)/.pytest_cache
 	rm -rf $(PY_SPEC_ALL_TARGETS)
-	rm -rf $(DEPOSIT_CONTRACT_DIR)/venv $(DEPOSIT_CONTRACT_DIR)/.pytest_cache
+	rm -rf $(DEPOSIT_CONTRACT_DIR)/.pytest_cache
 	rm -rf $(PY_SPEC_DIR)/$(COV_HTML_OUT)
 	rm -rf $(PY_SPEC_DIR)/.coverage
 	rm -rf $(PY_SPEC_DIR)/test-reports
 
+clean: partial_clean
+	rm -rf $(PY_SPEC_DIR)/venv
+	rm -rf $(DEPOSIT_CONTRACT_DIR)/venv
+
 # "make gen_yaml_tests" to run generators
 gen_yaml_tests: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_TARGETS)
 
@@ -48,18 +53,20 @@ install_test:
 
 test: $(PY_SPEC_ALL_TARGETS)
 	cd $(PY_SPEC_DIR); . venv/bin/activate;	export PYTHONPATH="./"; \
-	python -m pytest --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
+	python -m pytest -n 4 --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
 
 citest: $(PY_SPEC_ALL_TARGETS)
 	cd $(PY_SPEC_DIR); mkdir -p test-reports/eth2spec; . venv/bin/activate; \
-	python -m pytest --junitxml=test-reports/eth2spec/test_results.xml eth2spec
+	python -m pytest -n 4 --junitxml=test-reports/eth2spec/test_results.xml eth2spec
 
 open_cov:
 	((open "$(COV_INDEX_FILE)" || xdg-open "$(COV_INDEX_FILE)") &> /dev/null) &
 
 lint: $(PY_SPEC_ALL_TARGETS)
 	cd $(PY_SPEC_DIR); . venv/bin/activate; \
-	flake8  --ignore=E252,W504,W503 --max-line-length=120 ./eth2spec;
+	flake8  --ignore=E252,W504,W503 --max-line-length=120 ./eth2spec \
+	&& cd ./eth2spec && mypy --follow-imports=silent --warn-unused-ignores --ignore-missing-imports --check-untyped-defs --disallow-incomplete-defs --disallow-untyped-defs -p phase0 \
+	&& mypy --follow-imports=silent --warn-unused-ignores --ignore-missing-imports --check-untyped-defs --disallow-incomplete-defs --disallow-untyped-defs -p phase1;
 
 install_deposit_contract_test: $(PY_SPEC_ALL_TARGETS)
 	cd $(DEPOSIT_CONTRACT_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements-testing.txt
@@ -76,10 +83,10 @@ test_deposit_contract:
 pyspec: $(PY_SPEC_ALL_TARGETS)
 
 $(PY_SPEC_PHASE_0_TARGETS): $(PY_SPEC_PHASE_0_DEPS)
-	python3 $(SCRIPT_DIR)/build_spec.py -p0 $(SPEC_DIR)/core/0_beacon-chain.md $@
+	python3 $(SCRIPT_DIR)/build_spec.py -p0 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/0_fork-choice.md $(SPEC_DIR)/validator/0_beacon-chain-validator.md $@
 
 $(PY_SPEC_DIR)/eth2spec/phase1/spec.py: $(PY_SPEC_PHASE_1_DEPS)
-	python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_shard-data-chains.md $@
+	python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_shard-data-chains.md $(SPEC_DIR)/core/0_fork-choice.md $@
 
 CURRENT_DIR = ${CURDIR}
 
diff --git a/configs/constant_presets/mainnet.yaml b/configs/constant_presets/mainnet.yaml
index dca2a69ba8..10ab26a00a 100644
--- a/configs/constant_presets/mainnet.yaml
+++ b/configs/constant_presets/mainnet.yaml
@@ -10,23 +10,24 @@ SHARD_COUNT: 1024
 # 2**7 (= 128)
 TARGET_COMMITTEE_SIZE: 128
 # 2**12 (= 4,096)
-MAX_INDICES_PER_ATTESTATION: 4096
+MAX_VALIDATORS_PER_COMMITTEE: 4096
 # 2**2 (= 4)
 MIN_PER_EPOCH_CHURN_LIMIT: 4
 # 2**16 (= 65,536)
 CHURN_LIMIT_QUOTIENT: 65536
-# Normalizes base rewards
-BASE_REWARDS_PER_EPOCH: 5
 # See issue 563
 SHUFFLE_ROUND_COUNT: 90
+# `2**16` (= 65,536)
+MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 65536
+# Jan 3, 2020
+MIN_GENESIS_TIME: 1578009600
+
 
 
 # Deposit contract
 # ---------------------------------------------------------------
 # **TBD**
 DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
-# 2**5 (= 32)
-DEPOSIT_CONTRACT_TREE_DEPTH: 32
 
 
 # Gwei values
@@ -43,20 +44,17 @@ EFFECTIVE_BALANCE_INCREMENT: 1000000000
 
 # Initial values
 # ---------------------------------------------------------------
-GENESIS_FORK_VERSION: 0x00000000
 # 0, GENESIS_EPOCH is derived from this constant
 GENESIS_SLOT: 0
-# 2**64 - 1
-FAR_FUTURE_EPOCH: 18446744073709551615
-BLS_WITHDRAWAL_PREFIX: 0
+BLS_WITHDRAWAL_PREFIX: 0x00
 
 
 # Time parameters
 # ---------------------------------------------------------------
 # 6 seconds 6 seconds
 SECONDS_PER_SLOT: 6
-# 2**2 (= 4) slots 24 seconds
-MIN_ATTESTATION_INCLUSION_DELAY: 4
+# 2**0 (= 1) slots 6 seconds
+MIN_ATTESTATION_INCLUSION_DELAY: 1
 # 2**6 (= 64) slots 6.4 minutes
 SLOTS_PER_EPOCH: 64
 # 2**0 (= 1) epochs 6.4 minutes
@@ -75,24 +73,29 @@ PERSISTENT_COMMITTEE_PERIOD: 2048
 MAX_EPOCHS_PER_CROSSLINK: 64
 # 2**2 (= 4) epochs 25.6 minutes
 MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
+# 2**14 (= 16,384) epochs ~73 days
+EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 16384
+
 
 
-# State list lengths
+# State vector lengths
 # ---------------------------------------------------------------
+# 2**16 (= 65,536) epochs ~0.8 years
+EPOCHS_PER_HISTORICAL_VECTOR: 65536
 # 2**13 (= 8,192) epochs ~36 days
-LATEST_RANDAO_MIXES_LENGTH: 8192
-# 2**13 (= 8,192) epochs ~36 days
-LATEST_ACTIVE_INDEX_ROOTS_LENGTH: 8192
-# 2**13 (= 8,192) epochs ~36 days
-LATEST_SLASHED_EXIT_LENGTH: 8192
+EPOCHS_PER_SLASHINGS_VECTOR: 8192
+# 2**24 (= 16,777,216) historical roots, ~26,131 years
+HISTORICAL_ROOTS_LIMIT: 16777216
+# 2**40 (= 1,099,511,627,776) validator spots
+VALIDATOR_REGISTRY_LIMIT: 1099511627776
 
 
 # Reward and penalty quotients
 # ---------------------------------------------------------------
-# 2**5 (= 32)
-BASE_REWARD_FACTOR: 32
+# 2**6 (= 64)
+BASE_REWARD_FACTOR: 64
 # 2**9 (= 512)
-WHISTLEBLOWING_REWARD_QUOTIENT: 512
+WHISTLEBLOWER_REWARD_QUOTIENT: 512
 # 2**3 (= 8)
 PROPOSER_REWARD_QUOTIENT: 8
 # 2**25 (= 33,554,432)
@@ -119,9 +122,9 @@ MAX_TRANSFERS: 0
 
 # Signature domains
 # ---------------------------------------------------------------
-DOMAIN_BEACON_PROPOSER: 0
-DOMAIN_RANDAO: 1
-DOMAIN_ATTESTATION: 2
-DOMAIN_DEPOSIT: 3
-DOMAIN_VOLUNTARY_EXIT: 4
-DOMAIN_TRANSFER: 5
+DOMAIN_BEACON_PROPOSER: 0x00000000
+DOMAIN_RANDAO: 0x01000000
+DOMAIN_ATTESTATION: 0x02000000
+DOMAIN_DEPOSIT: 0x03000000
+DOMAIN_VOLUNTARY_EXIT: 0x04000000
+DOMAIN_TRANSFER: 0x05000000
diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml
index fa337cfbf8..b030333ff0 100644
--- a/configs/constant_presets/minimal.yaml
+++ b/configs/constant_presets/minimal.yaml
@@ -9,23 +9,24 @@ SHARD_COUNT: 8
 # [customized] unsecure, but fast
 TARGET_COMMITTEE_SIZE: 4
 # 2**12 (= 4,096)
-MAX_INDICES_PER_ATTESTATION: 4096
+MAX_VALIDATORS_PER_COMMITTEE: 4096
 # 2**2 (= 4)
 MIN_PER_EPOCH_CHURN_LIMIT: 4
 # 2**16 (= 65,536)
 CHURN_LIMIT_QUOTIENT: 65536
-# Normalizes base rewards
-BASE_REWARDS_PER_EPOCH: 5
 # [customized] Faster, but unsecure.
 SHUFFLE_ROUND_COUNT: 10
+# [customized]
+MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 64
+# Jan 3, 2020
+MIN_GENESIS_TIME: 1578009600
+
 
 
 # Deposit contract
 # ---------------------------------------------------------------
 # **TBD**
 DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
-# 2**5 (= 32)
-DEPOSIT_CONTRACT_TREE_DEPTH: 32
 
 
 # Gwei values
@@ -42,20 +43,17 @@ EFFECTIVE_BALANCE_INCREMENT: 1000000000
 
 # Initial values
 # ---------------------------------------------------------------
-GENESIS_FORK_VERSION: 0x00000000
 # 0, GENESIS_EPOCH is derived from this constant
 GENESIS_SLOT: 0
-# 2**64 - 1
-FAR_FUTURE_EPOCH: 18446744073709551615
-BLS_WITHDRAWAL_PREFIX: 0
+BLS_WITHDRAWAL_PREFIX: 0x00
 
 
 # Time parameters
 # ---------------------------------------------------------------
 # 6 seconds 6 seconds
 SECONDS_PER_SLOT: 6
-# [customized] 2 slots
-MIN_ATTESTATION_INCLUSION_DELAY: 2
+# 2**0 (= 1) slots 6 seconds
+MIN_ATTESTATION_INCLUSION_DELAY: 1
 # [customized] fast epochs
 SLOTS_PER_EPOCH: 8
 # 2**0 (= 1) epochs
@@ -70,30 +68,32 @@ SLOTS_PER_HISTORICAL_ROOT: 64
 MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
 # 2**11 (= 2,048) epochs
 PERSISTENT_COMMITTEE_PERIOD: 2048
-# 2**6 (= 64) epochs
-MAX_EPOCHS_PER_CROSSLINK: 64
+# [customized] fast catchup crosslinks
+MAX_EPOCHS_PER_CROSSLINK: 4
 # 2**2 (= 4) epochs
 MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
 # [customized] 2**12 (= 4,096) epochs
 EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 4096
 
 
-# State list lengths
+# State vector lengths
 # ---------------------------------------------------------------
 # [customized] smaller state
-LATEST_RANDAO_MIXES_LENGTH: 64
+EPOCHS_PER_HISTORICAL_VECTOR: 64
 # [customized] smaller state
-LATEST_ACTIVE_INDEX_ROOTS_LENGTH: 64
-# [customized] smaller state
-LATEST_SLASHED_EXIT_LENGTH: 64
+EPOCHS_PER_SLASHINGS_VECTOR: 64
+# 2**24 (= 16,777,216) historical roots
+HISTORICAL_ROOTS_LIMIT: 16777216
+# 2**40 (= 1,099,511,627,776) validator spots
+VALIDATOR_REGISTRY_LIMIT: 1099511627776
 
 
 # Reward and penalty quotients
 # ---------------------------------------------------------------
-# 2**5 (= 32)
-BASE_REWARD_FACTOR: 32
+# 2**6 (= 64)
+BASE_REWARD_FACTOR: 64
 # 2**9 (= 512)
-WHISTLEBLOWING_REWARD_QUOTIENT: 512
+WHISTLEBLOWER_REWARD_QUOTIENT: 512
 # 2**3 (= 8)
 PROPOSER_REWARD_QUOTIENT: 8
 # 2**25 (= 33,554,432)
@@ -120,9 +120,9 @@ MAX_TRANSFERS: 0
 
 # Signature domains
 # ---------------------------------------------------------------
-DOMAIN_BEACON_PROPOSER: 0
-DOMAIN_RANDAO: 1
-DOMAIN_ATTESTATION: 2
-DOMAIN_DEPOSIT: 3
-DOMAIN_VOLUNTARY_EXIT: 4
-DOMAIN_TRANSFER: 5
+DOMAIN_BEACON_PROPOSER: 0x00000000
+DOMAIN_RANDAO: 0x01000000
+DOMAIN_ATTESTATION: 0x02000000
+DOMAIN_DEPOSIT: 0x03000000
+DOMAIN_VOLUNTARY_EXIT: 0x04000000
+DOMAIN_TRANSFER: 0x05000000
\ No newline at end of file
diff --git a/deposit_contract/contracts/validator_registration.json b/deposit_contract/contracts/validator_registration.json
index 08d57f80a5..3a6bfb2d86 100644
--- a/deposit_contract/contracts/validator_registration.json
+++ b/deposit_contract/contracts/validator_registration.json
@@ -1 +1 @@
-{"abi": [{"name": "Deposit", "inputs": [{"type": "bytes", "name": "pubkey", "indexed": false}, {"type": "bytes", "name": "withdrawal_credentials", "indexed": false}, {"type": "bytes", "name": "amount", "indexed": false}, {"type": "bytes", "name": "signature", "indexed": false}, {"type": "bytes", "name": "merkle_tree_index", "indexed": false}], "anonymous": false, "type": "event"}, {"name": "Eth2Genesis", "inputs": [{"type": "bytes32", "name": "deposit_root", "indexed": false}, {"type": "bytes", "name": "deposit_count", "indexed": false}, {"type": "bytes", "name": "time", "indexed": false}], "anonymous": false, "type": "event"}, {"outputs": [], "inputs": [], "constant": false, "payable": false, "type": "constructor"}, {"name": "to_little_endian_64", "outputs": [{"type": "bytes", "name": "out"}], "inputs": [{"type": "uint256", "name": "value"}], "constant": true, "payable": false, "type": "function", "gas": 7077}, {"name": "get_deposit_root", "outputs": [{"type": "bytes32", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 79221}, {"name": "get_deposit_count", "outputs": [{"type": "bytes", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 11026}, {"name": "deposit", "outputs": [], "inputs": [{"type": "bytes", "name": "pubkey"}, {"type": "bytes", "name": "withdrawal_credentials"}, {"type": "bytes", "name": "signature"}], "constant": false, "payable": true, "type": "function", "gas": 445994}, {"name": "chainStarted", "outputs": [{"type": "bool", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 603}], "bytecode": "0x600035601c52740100000000000000000000000000000000000000006020526f7fffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff8000000000000000000000000000000060605274012a05f1fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffed5fa0e000000000000000000000000000000000060a052341561009e57600080fd5b6101406000601f818352015b600061014051602081106100bd57600080fd5b600060c052602060c020015460208261016001015260208101905061014051602081106100e957600080fd5b600060c052602060c020015460208261016001015260208101905080610160526101609050602060c0825160208401600060025af161012757600080fd5b60c0519050606051600161014051018060405190131561014657600080fd5b809190121561015457600080fd5b6020811061016157600080fd5b600060c052602060c02001555b81516001018083528114156100aa575b505061140756600035601c52740100000000000000000000000000000000000000006020526f7fffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff8000000000000000000000000000000060605274012a05f1fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffed5fa0e000000000000000000000000000000000060a0526380673289600051141561026b57602060046101403734156100b457600080fd5b67ffffffffffffffff6101405111156100cc57600080fd5b60006101605261014051610180526101a060006008818352015b6101605160086000811215610103578060000360020a820461010a565b8060020a82025b905090506101605260ff61018051166101c052610160516101c0516101605101101561013557600080fd5b6101c051610160510161016052610180517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8600081121561017e578060000360020a8204610185565b8060020a82025b90509050610180525b81516001018083528114156100e6575b505060186008602082066101e001602082840111156101bc57600080fd5b60208061020082610160600060046015f15050818152809050905090508051602001806102a0828460006004600a8704601201f16101f957600080fd5b50506102a05160206001820306601f82010390506103006102a0516008818352015b8261030051111561022b57610247565b6000610300516102c001535b815160010180835281141561021b575b50505060206102805260406102a0510160206001820306601f8201039050610280f3005b63c5f2892f60005114156103c357341561028457600080fd5b6000610140526002546101605261018060006020818352015b600160016101605116141561031e57600061018051602081106102bf57600080fd5b600160c052602060c02001546020826102200101526020810190506101405160208261022001015260208101905080610220526102209050602060c0825160208401600060025af161031057600080fd5b60c05190506101405261038c565b6000610140516020826101a0010152602081019050610180516020811061034457600080fd5b600060c052602060c02001546020826101a0010152602081019050806101a0526101a09050602060c0825160208401600060025af161038257600080fd5b60c0519050610140525b610160600261039a57600080fd5b60028151048152505b815160010180835281141561029d575b50506101405160005260206000f3005b63621fd13060005114156104995734156103dc57600080fd5b60606101c060246380673289610140526002546101605261015c6000305af161040457600080fd5b6101e0805160200180610260828460006004600a8704601201f161042757600080fd5b50506102605160206001820306601f82010390506102c0610260516008818352015b826102c051111561045957610475565b60006102c05161028001535b8151600101808352811415610449575b5050506020610240526040610260510160206001820306601f8201039050610240f3005b63c47e300d600051141561125657606060046101403760506004356004016101a03760306004356004013511156104cf57600080fd5b60406024356004016102203760206024356004013511156104ef57600080fd5b608060443560040161028037606060443560040135111561050f57600080fd5b63ffffffff6002541061052157600080fd5b60306101a0511461053157600080fd5b6020610220511461054157600080fd5b6060610280511461055157600080fd5b633b9aca00610340526103405161056757600080fd5b61034051340461032052633b9aca0061032051101561058557600080fd5b6060610440602463806732896103c052610320516103e0526103dc6000305af16105ae57600080fd5b610460805160200180610360828460006004600a8704601201f16105d157600080fd5b50506002546104a05260006104c0526104a05160016104a0510110156105f657600080fd5b60016104a051016104e05261050060006020818352015b600160016104e05116141561062157610674565b6104c060605160018251018060405190131561063c57600080fd5b809190121561064a57600080fd5b8152506104e0600261065b57600080fd5b60028151048152505b815160010180835281141561060d575b505060006101a06030806020846105e001018260208501600060046016f1505080518201915050600060106020820661056001602082840111156106b757600080fd5b60208061058082610520600060046015f15050818152809050905090506010806020846105e001018260208501600060046013f1505080518201915050806105e0526105e09050602060c0825160208401600060025af161071757600080fd5b60c051905061054052600060006040602082066106800161028051828401111561074057600080fd5b6060806106a0826020602088068803016102800160006004601bf1505081815280905090509050602060c0825160208401600060025af161078057600080fd5b60c051905060208261088001015260208101905060006040602060208206610740016102805182840111156107b457600080fd5b606080610760826020602088068803016102800160006004601bf150508181528090509050905060208060208461080001018260208501600060046015f15050805182019150506105205160208261080001015260208101905080610800526108009050602060c0825160208401600060025af161083157600080fd5b60c051905060208261088001015260208101905080610880526108809050602060c0825160208401600060025af161086857600080fd5b60c051905061066052600060006105405160208261092001015260208101905061022060208060208461092001018260208501600060046015f150508051820191505080610920526109209050602060c0825160208401600060025af16108ce57600080fd5b60c0519050602082610aa00101526020810190506000610360600880602084610a2001018260208501600060046012f150508051820191505060006018602082066109a0016020828401111561092357600080fd5b6020806109c082610520600060046015f1505081815280905090509050601880602084610a2001018260208501600060046014f150508051820191505061066051602082610a2001015260208101905080610a2052610a209050602060c0825160208401600060025af161099657600080fd5b60c0519050602082610aa001015260208101905080610aa052610aa09050602060c0825160208401600060025af16109cd57600080fd5b60c051905061090052610b2060006020818352015b6104c051610b20511215610a62576000610b205160208110610a0357600080fd5b600160c052602060c0200154602082610b4001015260208101905061090051602082610b4001015260208101905080610b4052610b409050602060c0825160208401600060025af1610a5457600080fd5b60c051905061090052610a67565b610a78565b5b81516001018083528114156109e2575b5050610900516104c05160208110610a8f57600080fd5b600160c052602060c02001556002805460018254011015610aaf57600080fd5b60018154018155506060610c4060246380673289610bc0526104a051610be052610bdc6000305af1610ae057600080fd5b610c60805160200180610ca0828460006004600a8704601201f1610b0357600080fd5b505060a0610d2052610d2051610d60526101a0805160200180610d2051610d6001828460006004600a8704601201f1610b3b57600080fd5b5050610d2051610d60015160206001820306601f8201039050610d2051610d6001610d0081516040818352015b83610d0051101515610b7957610b96565b6000610d00516020850101535b8151600101808352811415610b68575b505050506020610d2051610d60015160206001820306601f8201039050610d20510101610d2052610d2051610d8052610220805160200180610d2051610d6001828460006004600a8704601201f1610bed57600080fd5b5050610d2051610d60015160206001820306601f8201039050610d2051610d6001610d0081516020818352015b83610d0051101515610c2b57610c48565b6000610d00516020850101535b8151600101808352811415610c1a575b505050506020610d2051610d60015160206001820306601f8201039050610d20510101610d2052610d2051610da052610360805160200180610d2051610d6001828460006004600a8704601201f1610c9f57600080fd5b5050610d2051610d60015160206001820306601f8201039050610d2051610d6001610d0081516020818352015b83610d0051101515610cdd57610cfa565b6000610d00516020850101535b8151600101808352811415610ccc575b505050506020610d2051610d60015160206001820306601f8201039050610d20510101610d2052610d2051610dc052610280805160200180610d2051610d6001828460006004600a8704601201f1610d5157600080fd5b5050610d2051610d60015160206001820306601f8201039050610d2051610d6001610d0081516060818352015b83610d0051101515610d8f57610dac565b6000610d00516020850101535b8151600101808352811415610d7e575b505050506020610d2051610d60015160206001820306601f8201039050610d20510101610d2052610d2051610de052610ca0805160200180610d2051610d6001828460006004600a8704601201f1610e0357600080fd5b5050610d2051610d60015160206001820306601f8201039050610d2051610d6001610d0081516020818352015b83610d0051101515610e4157610e5e565b6000610d00516020850101535b8151600101808352811415610e30575b505050506020610d2051610d60015160206001820306601f8201039050610d20510101610d20527fdc5fc95703516abd38fa03c3737ff3b52dc52347055c8028460fdf5bbe2f12ce610d2051610d60a164077359400061032051101515611254576003805460018254011015610ed357600080fd5b60018154018155506201000060035414156112535742610e205242610e405262015180610eff57600080fd5b62015180610e405106610e20511015610f1757600080fd5b42610e405262015180610f2957600080fd5b62015180610e405106610e2051036202a30042610e205242610e405262015180610f5257600080fd5b62015180610e405106610e20511015610f6a57600080fd5b42610e405262015180610f7c57600080fd5b62015180610e405106610e205103011015610f9657600080fd5b6202a30042610e205242610e405262015180610fb157600080fd5b62015180610e405106610e20511015610fc957600080fd5b42610e405262015180610fdb57600080fd5b62015180610e405106610e20510301610e00526020610ee0600463c5f2892f610e8052610e9c6000305af161100f57600080fd5b610ee051610e60526060610f8060246380673289610f0052600254610f2052610f1c6000305af161103f57600080fd5b610fa0805160200180610fe0828460006004600a8704601201f161106257600080fd5b505060606110c06024638067328961104052610e00516110605261105c6000305af161108d57600080fd5b6110e0805160200180611120828460006004600a8704601201f16110b057600080fd5b5050610e60516111e05260606111a0526111a05161120052610fe08051602001806111a0516111e001828460006004600a8704601201f16110f057600080fd5b50506111a0516111e0015160206001820306601f82010390506111a0516111e00161118081516020818352015b836111805110151561112e5761114b565b6000611180516020850101535b815160010180835281141561111d575b5050505060206111a0516111e0015160206001820306601f82010390506111a05101016111a0526111a051611220526111208051602001806111a0516111e001828460006004600a8704601201f16111a257600080fd5b50506111a0516111e0015160206001820306601f82010390506111a0516111e00161118081516020818352015b83611180511015156111e0576111fd565b6000611180516020850101535b81516001018083528114156111cf575b5050505060206111a0516111e0015160206001820306601f82010390506111a05101016111a0527f08b71ef3f1b58f7a23ffb82e27f12f0888c8403f1ceb0ea7ea26b274e2189d4c6111a0516111e0a160016004555b5b005b63845980e8600051141561127c57341561126f57600080fd5b60045460005260206000f3005b60006000fd5b61018561140703610185600039610185611407036000f3"}
\ No newline at end of file
+{"abi": [{"name": "DepositEvent", "inputs": [{"type": "bytes", "name": "pubkey", "indexed": false}, {"type": "bytes", "name": "withdrawal_credentials", "indexed": false}, {"type": "bytes", "name": "amount", "indexed": false}, {"type": "bytes", "name": "signature", "indexed": false}, {"type": "bytes", "name": "index", "indexed": false}], "anonymous": false, "type": "event"}, {"outputs": [], "inputs": [], "constant": false, "payable": false, "type": "constructor"}, {"name": "get_hash_tree_root", "outputs": [{"type": "bytes32", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 91674}, {"name": "get_deposit_count", "outputs": [{"type": "bytes", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 10433}, {"name": "deposit", "outputs": [], "inputs": [{"type": "bytes", "name": "pubkey"}, {"type": "bytes", "name": "withdrawal_credentials"}, {"type": "bytes", "name": "signature"}], "constant": false, "payable": true, "type": "function", "gas": 1334417}], "bytecode": "0x740100000000000000000000000000000000000000006020526f7fffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff8000000000000000000000000000000060605274012a05f1fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffed5fa0e000000000000000000000000000000000060a052341561009857600080fd5b6101406000601f818352015b600061014051602081106100b757600080fd5b600260c052602060c020015460208261016001015260208101905061014051602081106100e357600080fd5b600260c052602060c020015460208261016001015260208101905080610160526101609050602060c0825160208401600060025af161012157600080fd5b60c0519050606051600161014051018060405190131561014057600080fd5b809190121561014e57600080fd5b6020811061015b57600080fd5b600260c052602060c02001555b81516001018083528114156100a4575b50506112f956600035601c52740100000000000000000000000000000000000000006020526f7fffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff8000000000000000000000000000000060605274012a05f1fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffed5fa0e000000000000000000000000000000000060a052600015610277575b6101605261014052600061018052610140516101a0526101c060006008818352015b61018051600860008112156100da578060000360020a82046100e1565b8060020a82025b905090506101805260ff6101a051166101e052610180516101e0516101805101101561010c57600080fd5b6101e0516101805101610180526101a0517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff86000811215610155578060000360020a820461015c565b8060020a82025b905090506101a0525b81516001018083528114156100bd575b50506018600860208206610200016020828401111561019357600080fd5b60208061022082610180600060046015f15050818152809050905090508051602001806102c0828460006004600a8704601201f16101d057600080fd5b50506102c05160206001820306601f82010390506103206102c0516008818352015b826103205111156102025761021e565b6000610320516102e001535b81516001018083528114156101f2575b50505060206102a05260406102c0510160206001820306601f8201039050610280525b6000610280511115156102535761026f565b602061028051036102a001516020610280510361028052610241565b610160515650005b63863a311b600051141561050857341561029057600080fd5b6000610140526101405161016052600154610180526101a060006020818352015b60016001610180511614156103325760006101a051602081106102d357600080fd5b600060c052602060c02001546020826102400101526020810190506101605160208261024001015260208101905080610240526102409050602060c0825160208401600060025af161032457600080fd5b60c0519050610160526103a0565b6000610160516020826101c00101526020810190506101a0516020811061035857600080fd5b600260c052602060c02001546020826101c0010152602081019050806101c0526101c09050602060c0825160208401600060025af161039657600080fd5b60c0519050610160525b61018060026103ae57600080fd5b60028151048152505b81516001018083528114156102b1575b505060006101605160208261044001015260208101905061014051610160516101805163806732896102c0526001546102e0526102e0516006580161009b565b506103405260006103a0525b6103405160206001820306601f82010390506103a0511015156104355761044e565b6103a05161036001526103a0516020016103a052610413565b61018052610160526101405261034060088060208461044001018260208501600060046012f150508051820191505060006018602082066103c0016020828401111561049957600080fd5b6020806103e082610140600060046015f150508181528090509050905060188060208461044001018260208501600060046014f150508051820191505080610440526104409050602060c0825160208401600060025af16104f957600080fd5b60c051905060005260206000f3005b63621fd130600051141561061a57341561052157600080fd5b63806732896101405260015461016052610160516006580161009b565b506101c0526000610220525b6101c05160206001820306601f82010390506102205110151561056c57610585565b610220516101e00152610220516020016102205261054a565b6101c0805160200180610280828460006004600a8704601201f16105a857600080fd5b50506102805160206001820306601f82010390506102e0610280516008818352015b826102e05111156105da576105f6565b60006102e0516102a001535b81516001018083528114156105ca575b5050506020610260526040610280510160206001820306601f8201039050610260f3005b63c47e300d600051141561117457606060046101403760506004356004016101a037603060043560040135111561065057600080fd5b604060243560040161022037602060243560040135111561067057600080fd5b608060443560040161028037606060443560040135111561069057600080fd5b63ffffffff600154106106a257600080fd5b633b9aca0061034052610340516106b857600080fd5b61034051340461032052633b9aca006103205110156106d657600080fd5b60306101a051146106e657600080fd5b602061022051146106f657600080fd5b6060610280511461070657600080fd5b6101a0516101c0516101e05161020051610220516102405161026051610280516102a0516102c0516102e05161030051610320516103405161036051610380516103a05163806732896103c052610320516103e0526103e0516006580161009b565b506104405260006104a0525b6104405160206001820306601f82010390506104a051101515610796576107af565b6104a05161046001526104a0516020016104a052610774565b6103a05261038052610360526103405261032052610300526102e0526102c0526102a05261028052610260526102405261022052610200526101e0526101c0526101a052610440805160200180610360828460006004600a8704601201f161081657600080fd5b50506101a0516101c0516101e05161020051610220516102405161026051610280516102a0516102c0516102e05161030051610320516103405161036051610380516103a0516103c0516103e05161040051610420516104405161046051610480516104a05163806732896104c0526001546104e0526104e0516006580161009b565b506105405260006105a0525b6105405160206001820306601f82010390506105a0511015156108c7576108e0565b6105a05161056001526105a0516020016105a0526108a5565b6104a05261048052610460526104405261042052610400526103e0526103c0526103a05261038052610360526103405261032052610300526102e0526102c0526102a05261028052610260526102405261022052610200526101e0526101c0526101a0526105408051602001806105c0828460006004600a8704601201f161096757600080fd5b505060a06106405261064051610680526101a08051602001806106405161068001828460006004600a8704601201f161099f57600080fd5b505061064051610680015160206001820306601f8201039050610640516106800161062081516040818352015b83610620511015156109dd576109fa565b6000610620516020850101535b81516001018083528114156109cc575b50505050602061064051610680015160206001820306601f820103905061064051010161064052610640516106a0526102208051602001806106405161068001828460006004600a8704601201f1610a5157600080fd5b505061064051610680015160206001820306601f8201039050610640516106800161062081516020818352015b8361062051101515610a8f57610aac565b6000610620516020850101535b8151600101808352811415610a7e575b50505050602061064051610680015160206001820306601f820103905061064051010161064052610640516106c0526103608051602001806106405161068001828460006004600a8704601201f1610b0357600080fd5b505061064051610680015160206001820306601f8201039050610640516106800161062081516020818352015b8361062051101515610b4157610b5e565b6000610620516020850101535b8151600101808352811415610b30575b50505050602061064051610680015160206001820306601f820103905061064051010161064052610640516106e0526102808051602001806106405161068001828460006004600a8704601201f1610bb557600080fd5b505061064051610680015160206001820306601f8201039050610640516106800161062081516060818352015b8361062051101515610bf357610c10565b6000610620516020850101535b8151600101808352811415610be2575b50505050602061064051610680015160206001820306601f82010390506106405101016106405261064051610700526105c08051602001806106405161068001828460006004600a8704601201f1610c6757600080fd5b505061064051610680015160206001820306601f8201039050610640516106800161062081516020818352015b8361062051101515610ca557610cc2565b6000610620516020850101535b8151600101808352811415610c94575b50505050602061064051610680015160206001820306601f8201039050610640510101610640527f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c561064051610680a160006107205260006101a06030806020846107e001018260208501600060046016f150508051820191505060006010602082066107600160208284011115610d5957600080fd5b60208061078082610720600060046015f15050818152809050905090506010806020846107e001018260208501600060046013f1505080518201915050806107e0526107e09050602060c0825160208401600060025af1610db957600080fd5b60c0519050610740526000600060406020820661088001610280518284011115610de257600080fd5b6060806108a0826020602088068803016102800160006004601bf1505081815280905090509050602060c0825160208401600060025af1610e2257600080fd5b60c0519050602082610a800101526020810190506000604060206020820661094001610280518284011115610e5657600080fd5b606080610960826020602088068803016102800160006004601bf1505081815280905090509050602080602084610a0001018260208501600060046015f150508051820191505061072051602082610a0001015260208101905080610a0052610a009050602060c0825160208401600060025af1610ed357600080fd5b60c0519050602082610a8001015260208101905080610a8052610a809050602060c0825160208401600060025af1610f0a57600080fd5b60c0519050610860526000600061074051602082610b20010152602081019050610220602080602084610b2001018260208501600060046015f150508051820191505080610b2052610b209050602060c0825160208401600060025af1610f7057600080fd5b60c0519050602082610ca00101526020810190506000610360600880602084610c2001018260208501600060046012f15050805182019150506000601860208206610ba00160208284011115610fc557600080fd5b602080610bc082610720600060046015f1505081815280905090509050601880602084610c2001018260208501600060046014f150508051820191505061086051602082610c2001015260208101905080610c2052610c209050602060c0825160208401600060025af161103857600080fd5b60c0519050602082610ca001015260208101905080610ca052610ca09050602060c0825160208401600060025af161106f57600080fd5b60c0519050610b0052600180546001825401101561108c57600080fd5b6001815401815550600154610d2052610d4060006020818352015b60016001610d20511614156110dc57610b0051610d4051602081106110cb57600080fd5b600060c052602060c0200155611170565b6000610d4051602081106110ef57600080fd5b600060c052602060c0200154602082610d60010152602081019050610b0051602082610d6001015260208101905080610d6052610d609050602060c0825160208401600060025af161114057600080fd5b60c0519050610b0052610d20600261115757600080fd5b60028151048152505b81516001018083528114156110a7575b5050005b60006000fd5b61017f6112f90361017f60003961017f6112f9036000f3"}
\ No newline at end of file
diff --git a/deposit_contract/contracts/validator_registration.v.py b/deposit_contract/contracts/validator_registration.v.py
index 1d475311ae..bad619b076 100644
--- a/deposit_contract/contracts/validator_registration.v.py
+++ b/deposit_contract/contracts/validator_registration.v.py
@@ -1,140 +1,104 @@
 MIN_DEPOSIT_AMOUNT: constant(uint256) = 1000000000  # Gwei
-FULL_DEPOSIT_AMOUNT: constant(uint256) = 32000000000  # Gwei
-CHAIN_START_FULL_DEPOSIT_THRESHOLD: constant(uint256) = 65536  # 2**16
 DEPOSIT_CONTRACT_TREE_DEPTH: constant(uint256) = 32
-SECONDS_PER_DAY: constant(uint256) = 86400
-MAX_64_BIT_VALUE: constant(uint256) = 18446744073709551615  # 2**64 - 1
+MAX_DEPOSIT_COUNT: constant(uint256) = 4294967295 # 2**DEPOSIT_CONTRACT_TREE_DEPTH - 1
 PUBKEY_LENGTH: constant(uint256) = 48  # bytes
 WITHDRAWAL_CREDENTIALS_LENGTH: constant(uint256) = 32  # bytes
+AMOUNT_LENGTH: constant(uint256) = 8  # bytes
 SIGNATURE_LENGTH: constant(uint256) = 96  # bytes
-MAX_DEPOSIT_COUNT: constant(uint256) = 4294967295 # 2**DEPOSIT_CONTRACT_TREE_DEPTH - 1
 
-Deposit: event({
+DepositEvent: event({
     pubkey: bytes[48],
     withdrawal_credentials: bytes[32],
     amount: bytes[8],
     signature: bytes[96],
-    merkle_tree_index: bytes[8],
+    index: bytes[8],
 })
-Eth2Genesis: event({deposit_root: bytes32, deposit_count: bytes[8], time: bytes[8]})
 
-zerohashes: bytes32[DEPOSIT_CONTRACT_TREE_DEPTH]
 branch: bytes32[DEPOSIT_CONTRACT_TREE_DEPTH]
 deposit_count: uint256
-full_deposit_count: uint256
-chainStarted: public(bool)
-
 
+# Compute hashes in empty sparse Merkle tree
+zero_hashes: bytes32[DEPOSIT_CONTRACT_TREE_DEPTH]
 @public
 def __init__():
     for i in range(DEPOSIT_CONTRACT_TREE_DEPTH - 1):
-        self.zerohashes[i+1] = sha256(concat(self.zerohashes[i], self.zerohashes[i]))
+        self.zero_hashes[i + 1] = sha256(concat(self.zero_hashes[i], self.zero_hashes[i]))
 
 
-@public
+@private
 @constant
 def to_little_endian_64(value: uint256) -> bytes[8]:
-    assert value <= MAX_64_BIT_VALUE
-
-    # array access for bytes[] not currently supported in vyper so
-    # reversing bytes using bitwise uint256 manipulations
+    # Reversing bytes using bitwise uint256 manipulations
+    # Note: array accesses of bytes[] are not currently supported in Vyper
+    # Note: this function is only called when `value < 2**64`
     y: uint256 = 0
     x: uint256 = value
-    for i in range(8):
+    for _ in range(8):
         y = shift(y, 8)
         y = y + bitwise_and(x, 255)
         x = shift(x, -8)
-
     return slice(convert(y, bytes32), start=24, len=8)
 
 
 @public
 @constant
-def get_deposit_root() -> bytes32:
-    root: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000
+def get_hash_tree_root() -> bytes32:
+    zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000
+    node: bytes32 = zero_bytes32
     size: uint256 = self.deposit_count
-    for h in range(DEPOSIT_CONTRACT_TREE_DEPTH):
-        if bitwise_and(size, 1) == 1:
-            root = sha256(concat(self.branch[h], root))
+    for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):
+        if bitwise_and(size, 1) == 1:  # More gas efficient than `size % 2 == 1`
+            node = sha256(concat(self.branch[height], node))
         else:
-            root = sha256(concat(root, self.zerohashes[h]))
+            node = sha256(concat(node, self.zero_hashes[height]))
         size /= 2
-    return root
+    return sha256(concat(node, self.to_little_endian_64(self.deposit_count), slice(zero_bytes32, start=0, len=24)))
+
 
 @public
 @constant
 def get_deposit_count() -> bytes[8]:
     return self.to_little_endian_64(self.deposit_count)
 
+
 @payable
 @public
 def deposit(pubkey: bytes[PUBKEY_LENGTH],
             withdrawal_credentials: bytes[WITHDRAWAL_CREDENTIALS_LENGTH],
             signature: bytes[SIGNATURE_LENGTH]):
-    # Prevent edge case in computing `self.branch` when `self.deposit_count == MAX_DEPOSIT_COUNT`
-    # NOTE: reaching this point with the constants as currently defined is impossible due to the
-    # uni-directional nature of transfers from eth1 to eth2 and the total ether supply (< 130M).
+    # Avoid overflowing the Merkle tree (and prevent edge case in computing `self.branch`)
     assert self.deposit_count < MAX_DEPOSIT_COUNT
 
+    # Validate deposit data
+    deposit_amount: uint256 = msg.value / as_wei_value(1, "gwei")
+    assert deposit_amount >= MIN_DEPOSIT_AMOUNT
     assert len(pubkey) == PUBKEY_LENGTH
     assert len(withdrawal_credentials) == WITHDRAWAL_CREDENTIALS_LENGTH
     assert len(signature) == SIGNATURE_LENGTH
 
-    deposit_amount: uint256 = msg.value / as_wei_value(1, "gwei")
-    assert deposit_amount >= MIN_DEPOSIT_AMOUNT
+    # Emit `DepositEvent` log
     amount: bytes[8] = self.to_little_endian_64(deposit_amount)
+    log.DepositEvent(pubkey, withdrawal_credentials, amount, signature, self.to_little_endian_64(self.deposit_count))
 
-    index: uint256 = self.deposit_count
-
-    # add deposit to merkle tree
-    i: int128 = 0
-    size: uint256 = index + 1
-    for _ in range(DEPOSIT_CONTRACT_TREE_DEPTH):
-        if bitwise_and(size, 1) == 1:
-            break
-        i += 1
-        size /= 2
-
-    zero_bytes_32: bytes32
-    pubkey_root: bytes32 = sha256(concat(pubkey, slice(zero_bytes_32, start=0, len=16)))
+    # Compute `DepositData` hash tree root
+    zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000
+    pubkey_root: bytes32 = sha256(concat(pubkey, slice(zero_bytes32, start=0, len=64 - PUBKEY_LENGTH)))
     signature_root: bytes32 = sha256(concat(
         sha256(slice(signature, start=0, len=64)),
-        sha256(concat(slice(signature, start=64, len=32), zero_bytes_32))
+        sha256(concat(slice(signature, start=64, len=SIGNATURE_LENGTH - 64), zero_bytes32)),
     ))
-    value: bytes32 = sha256(concat(
+    node: bytes32 = sha256(concat(
         sha256(concat(pubkey_root, withdrawal_credentials)),
-        sha256(concat(
-            amount,
-            slice(zero_bytes_32, start=0, len=24),
-            signature_root,
-        ))
+        sha256(concat(amount, slice(zero_bytes32, start=0, len=32 - AMOUNT_LENGTH), signature_root)),
     ))
-    for j in range(DEPOSIT_CONTRACT_TREE_DEPTH):
-        if j < i:
-            value = sha256(concat(self.branch[j], value))
-        else:
-            break
-    self.branch[i] = value
 
+    # Add `DepositData` hash tree root to Merkle tree (update a single `branch` node)
     self.deposit_count += 1
-    log.Deposit(
-        pubkey,
-        withdrawal_credentials,
-        amount,
-        signature,
-        self.to_little_endian_64(index),
-    )
-
-    if deposit_amount >= FULL_DEPOSIT_AMOUNT:
-        self.full_deposit_count += 1
-        if self.full_deposit_count == CHAIN_START_FULL_DEPOSIT_THRESHOLD:
-            timestamp_day_boundary: uint256 = (
-                as_unitless_number(block.timestamp) -
-                as_unitless_number(block.timestamp) % SECONDS_PER_DAY +
-                2 * SECONDS_PER_DAY
-            )
-            new_deposit_root: bytes32 = self.get_deposit_root()
-            log.Eth2Genesis(new_deposit_root,
-                            self.to_little_endian_64(self.deposit_count),
-                            self.to_little_endian_64(timestamp_day_boundary))
-            self.chainStarted = True
+    size: uint256 = self.deposit_count
+    for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):
+        if bitwise_and(size, 1) == 1:  # More gas efficient than `size % 2 == 1`
+            self.branch[height] = node
+            break
+        node = sha256(concat(self.branch[height], node))
+        size /= 2
+
diff --git a/deposit_contract/requirements-testing.txt b/deposit_contract/requirements-testing.txt
index b3a90a88a7..280d7e5279 100644
--- a/deposit_contract/requirements-testing.txt
+++ b/deposit_contract/requirements-testing.txt
@@ -1,5 +1,5 @@
 eth-tester[py-evm]==0.1.0b39
-vyper==0.1.0b9
+vyper==0.1.0b10
 web3==5.0.0b2
 pytest==3.6.1
 ../test_libs/pyspec
diff --git a/deposit_contract/tests/contracts/conftest.py b/deposit_contract/tests/contracts/conftest.py
index 69ece247d6..d4c7da9aac 100644
--- a/deposit_contract/tests/contracts/conftest.py
+++ b/deposit_contract/tests/contracts/conftest.py
@@ -26,7 +26,6 @@
 # Constants
 MIN_DEPOSIT_AMOUNT = 1000000000  # Gwei
 FULL_DEPOSIT_AMOUNT = 32000000000  # Gwei
-CHAIN_START_FULL_DEPOSIT_THRESHOLD = 65536  # 2**16
 DEPOSIT_CONTRACT_TREE_DEPTH = 32
 TWO_TO_POWER_OF_TREE_DEPTH = 2**DEPOSIT_CONTRACT_TREE_DEPTH
 
@@ -63,45 +62,6 @@ def registration_contract(w3, tester):
     return registration_deployed
 
 
-@pytest.fixture(scope="session")
-def chain_start_full_deposit_thresholds():
-    return [randint(1, 5), randint(6, 10), randint(11, 15)]
-
-
-@pytest.fixture(params=[0, 1, 2])
-def modified_registration_contract(
-        request,
-        w3,
-        tester,
-        chain_start_full_deposit_thresholds):
-    # Set CHAIN_START_FULL_DEPOSIT_THRESHOLD to different threshold t
-    registration_code = get_deposit_contract_code()
-    t = str(chain_start_full_deposit_thresholds[request.param])
-    modified_registration_code = re.sub(
-        r'CHAIN_START_FULL_DEPOSIT_THRESHOLD: constant\(uint256\) = [0-9]+',
-        'CHAIN_START_FULL_DEPOSIT_THRESHOLD: constant(uint256) = ' + t,
-        registration_code,
-    )
-    assert modified_registration_code != registration_code
-    contract_bytecode = compiler.compile_code(modified_registration_code)['bytecode']
-    contract_abi = compiler.mk_full_signature(modified_registration_code)
-    registration = w3.eth.contract(
-        abi=contract_abi,
-        bytecode=contract_bytecode)
-    tx_hash = registration.constructor().transact()
-    tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
-    registration_deployed = w3.eth.contract(
-        address=tx_receipt.contractAddress,
-        abi=contract_abi
-    )
-    setattr(
-        registration_deployed,
-        'chain_start_full_deposit_threshold',
-        chain_start_full_deposit_thresholds[request.param]
-    )
-    return registration_deployed
-
-
 @pytest.fixture
 def assert_tx_failed(tester):
     def assert_tx_failed(function_to_test, exception=eth_tester.exceptions.TransactionFailed):
diff --git a/deposit_contract/tests/contracts/test_deposit.py b/deposit_contract/tests/contracts/test_deposit.py
index 8492d63478..1c96d074ec 100644
--- a/deposit_contract/tests/contracts/test_deposit.py
+++ b/deposit_contract/tests/contracts/test_deposit.py
@@ -15,26 +15,12 @@
     DepositData,
 )
 from eth2spec.utils.hash_function import hash
+from eth2spec.utils.ssz.ssz_typing import List
 from eth2spec.utils.ssz.ssz_impl import (
     hash_tree_root,
 )
 
 
-def compute_merkle_root(leaf_nodes):
-    assert len(leaf_nodes) >= 1
-    empty_node = b'\x00' * 32
-    child_nodes = leaf_nodes[:]
-    for _ in range(DEPOSIT_CONTRACT_TREE_DEPTH):
-        parent_nodes = []
-        if len(child_nodes) % 2 == 1:
-            child_nodes.append(empty_node)
-        for j in range(0, len(child_nodes), 2):
-            parent_nodes.append(hash(child_nodes[j] + child_nodes[j + 1]))
-        child_nodes = parent_nodes
-        empty_node = hash(empty_node + empty_node)
-    return child_nodes[0]
-
-
 @pytest.fixture
 def deposit_input():
     """
@@ -49,28 +35,6 @@ def deposit_input():
     )
 
 
-@pytest.mark.parametrize(
-    'value,success',
-    [
-        (0, True),
-        (10, True),
-        (55555, True),
-        (2**64 - 1, True),
-        (2**64, False),
-    ]
-)
-def test_to_little_endian_64(registration_contract, value, success, assert_tx_failed):
-    call = registration_contract.functions.to_little_endian_64(value)
-
-    if success:
-        little_endian_64 = call.call()
-        assert little_endian_64 == (value).to_bytes(8, 'little')
-    else:
-        assert_tx_failed(
-            lambda: call.call()
-        )
-
-
 @pytest.mark.parametrize(
     'success,deposit_amount',
     [
@@ -132,8 +96,8 @@ def test_deposit_inputs(registration_contract,
         )
 
 
-def test_deposit_log(registration_contract, a0, w3, deposit_input):
-    log_filter = registration_contract.events.Deposit.createFilter(
+def test_deposit_event_log(registration_contract, a0, w3, deposit_input):
+    log_filter = registration_contract.events.DepositEvent.createFilter(
         fromBlock='latest',
     )
 
@@ -151,16 +115,16 @@ def test_deposit_log(registration_contract, a0, w3, deposit_input):
         assert log['withdrawal_credentials'] == deposit_input[1]
         assert log['amount'] == deposit_amount_list[i].to_bytes(8, 'little')
         assert log['signature'] == deposit_input[2]
-        assert log['merkle_tree_index'] == i.to_bytes(8, 'little')
+        assert log['index'] == i.to_bytes(8, 'little')
 
 
 def test_deposit_tree(registration_contract, w3, assert_tx_failed, deposit_input):
-    log_filter = registration_contract.events.Deposit.createFilter(
+    log_filter = registration_contract.events.DepositEvent.createFilter(
         fromBlock='latest',
     )
 
     deposit_amount_list = [randint(MIN_DEPOSIT_AMOUNT, FULL_DEPOSIT_AMOUNT * 2) for _ in range(10)]
-    leaf_nodes = []
+    deposit_data_list = []
     for i in range(0, 10):
         tx_hash = registration_contract.functions.deposit(
             *deposit_input,
@@ -172,65 +136,14 @@ def test_deposit_tree(registration_contract, w3, assert_tx_failed, deposit_input
         assert len(logs) == 1
         log = logs[0]['args']
 
-        assert log["merkle_tree_index"] == i.to_bytes(8, 'little')
+        assert log["index"] == i.to_bytes(8, 'little')
 
-        deposit_data = DepositData(
+        deposit_data_list.append(DepositData(
             pubkey=deposit_input[0],
             withdrawal_credentials=deposit_input[1],
             amount=deposit_amount_list[i],
             signature=deposit_input[2],
-        )
-        hash_tree_root_result = hash_tree_root(deposit_data)
-        leaf_nodes.append(hash_tree_root_result)
-        root = compute_merkle_root(leaf_nodes)
-        assert root == registration_contract.functions.get_deposit_root().call()
-
-
-def test_chain_start(modified_registration_contract, w3, assert_tx_failed, deposit_input):
-    t = getattr(modified_registration_contract, 'chain_start_full_deposit_threshold')
-    # CHAIN_START_FULL_DEPOSIT_THRESHOLD is set to t
-    min_deposit_amount = MIN_DEPOSIT_AMOUNT * eth_utils.denoms.gwei  # in wei
-    full_deposit_amount = FULL_DEPOSIT_AMOUNT * eth_utils.denoms.gwei
-    log_filter = modified_registration_contract.events.Eth2Genesis.createFilter(
-        fromBlock='latest',
-    )
+        ))
 
-    index_not_full_deposit = randint(0, t - 1)
-    for i in range(t):
-        if i == index_not_full_deposit:
-            # Deposit with value below FULL_DEPOSIT_AMOUNT
-            modified_registration_contract.functions.deposit(
-                *deposit_input,
-            ).transact({"value": min_deposit_amount})
-            logs = log_filter.get_new_entries()
-            # Eth2Genesis event should not be triggered
-            assert len(logs) == 0
-        else:
-            # Deposit with value FULL_DEPOSIT_AMOUNT
-            modified_registration_contract.functions.deposit(
-                *deposit_input,
-            ).transact({"value": full_deposit_amount})
-            logs = log_filter.get_new_entries()
-            # Eth2Genesis event should not be triggered
-            assert len(logs) == 0
-
-    # Make 1 more deposit with value FULL_DEPOSIT_AMOUNT to trigger Eth2Genesis event
-    modified_registration_contract.functions.deposit(
-        *deposit_input,
-    ).transact({"value": full_deposit_amount})
-    logs = log_filter.get_new_entries()
-    assert len(logs) == 1
-    timestamp = int(w3.eth.getBlock(w3.eth.blockNumber)['timestamp'])
-    timestamp_day_boundary = timestamp + (86400 - timestamp % 86400) + 86400
-    log = logs[0]['args']
-    assert log['deposit_root'] == modified_registration_contract.functions.get_deposit_root().call()
-    assert int.from_bytes(log['time'], byteorder='little') == timestamp_day_boundary
-    assert modified_registration_contract.functions.chainStarted().call() is True
-
-    # Make 1 deposit with value FULL_DEPOSIT_AMOUNT and
-    # check that Eth2Genesis event is not triggered
-    modified_registration_contract.functions.deposit(
-        *deposit_input,
-    ).transact({"value": full_deposit_amount})
-    logs = log_filter.get_new_entries()
-    assert len(logs) == 0
+        root = hash_tree_root(List[DepositData, 2**32](*deposit_data_list))
+        assert root == registration_contract.functions.get_hash_tree_root().call()
diff --git a/scripts/README.md b/scripts/README.md
index 25b46decfe..9d5849053f 100644
--- a/scripts/README.md
+++ b/scripts/README.md
@@ -1,18 +1,18 @@
 # Building pyspecs from specs.md
 
-The benefit of the particular spec design is that the given markdown files can be converted to a `spec.py` file for the purposes of testing and linting. The result of this is that bugs are discovered and patched more quickly.
+The benefit of the particular spec design is that the given Markdown files can be converted to a `spec.py` file for the purposes of testing and linting. As a result, bugs are discovered and patched more quickly.
 
-Specs can be built from either a single markdown document or multiple files that must be combined in a given order. Given 2 spec objects, `build_spec.combine_spec_objects` will combine them into a single spec object which, subsequently, can be converted into a `specs.py`.
+Specs can be built from either a single Markdown document or multiple files that must be combined in a given order. Given 2 spec objects, `build_spec.combine_spec_objects` will combine them into a single spec object which, subsequently, can be converted into a `specs.py`.
 
 ## Usage
 
-For usage of the spec builder run `python3 -m build_spec --help`.
+For usage of the spec builder, run `python3 -m build_spec --help`.
 
 ## `@Labels` and inserts
 
 The functioning of the spec combiner is largely automatic in that given `spec0.md` and `spec1.md`, SSZ Objects will be extended and old functions will be overwritten. Extra functionality is provided for more granular control over how files are combined. In the event that only a small portion of code is to be added to an existing function, insert functionality is provided. This saves having to completely redefine the old function from `spec0.md` in `spec1.md`. This is done by marking where the change is to occur in the old file and marking which code is to be inserted in the new file. This is done as follows:
 
-* In the old file, a label is added as a python comment marking where the code is to be inserted. This would appear as follows in `spec0.md`:
+* In the old file, a label is added as a Python comment marking where the code is to be inserted. This would appear as follows in `spec0.md`:
 
 ```python
 def foo(x):
@@ -21,7 +21,7 @@ def foo(x):
     return x
 ```
 
-* In spec1, the new code could then be inserted by having a code-block that looked as follows:
+* In spec1, the new code can then be inserted by having a code-block that looks as follows:
 
 ```python
 #begin insert @YourLabelHere
@@ -29,4 +29,4 @@ def foo(x):
 #end insert @YourLabelHere
 ```
 
-**Note** that the code to be inserted has the **same level of indentation** as the surrounding code of its destination insert point.
+*Note*: The code to be inserted has the **same level of indentation** as the surrounding code of its destination insert point.
diff --git a/scripts/build_spec.py b/scripts/build_spec.py
index 7a51970e38..8b541ff50c 100644
--- a/scripts/build_spec.py
+++ b/scripts/build_spec.py
@@ -6,17 +6,17 @@
 from argparse import ArgumentParser
 from typing import (
     Dict,
-    List,
     Optional,
 )
 
 
 PHASE0_IMPORTS = '''from typing import (
-    Any,
-    Dict,
-    List,
-    NewType,
-    Tuple,
+    Any, Dict, Set, Sequence, Tuple, Optional
+)
+
+from dataclasses import (
+    dataclass,
+    field,
 )
 
 from eth2spec.utils.ssz.ssz_impl import (
@@ -24,24 +24,25 @@
     signing_root,
 )
 from eth2spec.utils.ssz.ssz_typing import (
-    # unused: uint8, uint16, uint32, uint128, uint256,
-    uint64, Container, Vector, BytesN
+    bit, boolean, Container, List, Vector, uint64,
+    Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
 )
 from eth2spec.utils.bls import (
     bls_aggregate_pubkeys,
     bls_verify,
     bls_verify_multiple,
+    bls_sign,
 )
-# Note: 'int' type defaults to being interpreted as a uint64 by SSZ implementation.
 
 from eth2spec.utils.hash_function import hash
 '''
 PHASE1_IMPORTS = '''from typing import (
-    Any,
-    Dict,
-    List,
-    NewType,
-    Tuple,
+    Any, Dict, Optional, Set, Sequence, MutableSequence, Tuple,
+)
+
+from dataclasses import (
+    dataclass,
+    field,
 )
 
 from eth2spec.utils.ssz.ssz_impl import (
@@ -51,8 +52,8 @@
     is_empty,
 )
 from eth2spec.utils.ssz.ssz_typing import (
-    # unused: uint8, uint16, uint32, uint128, uint256,
-    uint64, Container, Vector, BytesN
+    bit, boolean, Container, List, Vector, Bytes, uint64,
+    Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
 )
 from eth2spec.utils.bls import (
     bls_aggregate_pubkeys,
@@ -62,89 +63,101 @@
 
 from eth2spec.utils.hash_function import hash
 '''
-NEW_TYPES = {
-    'Slot': 'int',
-    'Epoch': 'int',
-    'Shard': 'int',
-    'ValidatorIndex': 'int',
-    'Gwei': 'int',
-}
-BYTE_TYPES = [4, 32, 48, 96]
 SUNDRY_FUNCTIONS = '''
-def get_ssz_type_by_name(name: str) -> Container:
-    return globals()[name]
+# Monkey patch hash cache
+_hash = hash
+hash_cache: Dict[bytes, Hash] = {}
 
 
-# Monkey patch validator compute committee code
-_compute_committee = compute_committee
-committee_cache = {}
+def get_eth1_data(distance: uint64) -> Hash:
+    return hash(distance)
 
 
-def compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:
-    param_hash = (hash_tree_root(indices), seed, index, count)
+def hash(x: bytes) -> Hash:
+    if x not in hash_cache:
+        hash_cache[x] = Hash(_hash(x))
+    return hash_cache[x]
 
-    if param_hash in committee_cache:
-        return committee_cache[param_hash]
-    else:
-        ret = _compute_committee(indices, seed, index, count)
-        committee_cache[param_hash] = ret
-        return ret
 
+# Monkey patch validator compute committee code
+_compute_committee = compute_committee
+committee_cache: Dict[Tuple[Hash, Hash, int, int], Sequence[ValidatorIndex]] = {}
 
-# Monkey patch hash cache
-_hash = hash
-hash_cache = {}
 
+def compute_committee(indices: Sequence[ValidatorIndex],  # type: ignore
+                      seed: Hash,
+                      index: int,
+                      count: int) -> Sequence[ValidatorIndex]:
+    param_hash = (hash(b''.join(index.to_bytes(length=4, byteorder='little') for index in indices)), seed, index, count)
 
-def hash(x):
-    if x in hash_cache:
-        return hash_cache[x]
-    else:
-        ret = _hash(x)
-        hash_cache[x] = ret
-        return ret
+    if param_hash not in committee_cache:
+        committee_cache[param_hash] = _compute_committee(indices, seed, index, count)
+    return committee_cache[param_hash]
 
 
 # Access to overwrite spec constants based on configuration
-def apply_constants_preset(preset: Dict[str, Any]):
+def apply_constants_preset(preset: Dict[str, Any]) -> None:
     global_vars = globals()
     for k, v in preset.items():
-        global_vars[k] = v
+        if k.startswith('DOMAIN_'):
+            global_vars[k] = DomainType(v)  # domain types are defined as bytes in the configs
+        else:
+            global_vars[k] = v
 
     # Deal with derived constants
-    global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)
+    global_vars['GENESIS_EPOCH'] = compute_epoch_of_slot(GENESIS_SLOT)
 
     # Initialize SSZ types again, to account for changed lengths
     init_SSZ_types()
 '''
 
 
+def strip_comments(raw: str) -> str:
+    comment_line_regex = re.compile(r'^\s+# ')
+    lines = raw.split('\n')
+    out = []
+    for line in lines:
+        if not comment_line_regex.match(line):
+            if '  #' in line:
+                line = line[:line.index('  #')]
+            out.append(line)
+    return '\n'.join(out)
+
+
 def objects_to_spec(functions: Dict[str, str],
+                    custom_types: Dict[str, str],
                     constants: Dict[str, str],
                     ssz_objects: Dict[str, str],
                     inserts: Dict[str, str],
                     imports: Dict[str, str],
-                    new_types: Dict[str, str],
-                    byte_types: List[int],
                     ) -> str:
     """
     Given all the objects that constitute a spec, combine them into a single pyfile.
     """
-    new_type_definitions = \
-        '\n'.join(['''%s = NewType('%s', %s)''' % (key, key, value) for key, value in new_types.items()])
-    new_type_definitions += '\n' + '\n'.join(['Bytes%s = BytesN[%s]' % (n, n) for n in byte_types])
+    new_type_definitions = (
+        '\n\n'.join(
+            [
+                f"class {key}({value}):\n    pass\n"
+                for key, value in custom_types.items()
+            ]
+        )
+    )
     functions_spec = '\n\n'.join(functions.values())
+    for k in list(constants.keys()):
+        if k.startswith('DOMAIN_'):
+            constants[k] = f"DomainType(({constants[k]}).to_bytes(length=4, byteorder='little'))"
     constants_spec = '\n'.join(map(lambda x: '%s = %s' % (x, constants[x]), constants))
     ssz_objects_instantiation_spec = '\n\n'.join(ssz_objects.values())
     ssz_objects_reinitialization_spec = (
-        'def init_SSZ_types():\n    global_vars = globals()\n\n    '
-        + '\n\n    '.join([re.sub(r'(?!\n\n)\n', r'\n    ', value[:-1]) for value in ssz_objects.values()])
+        'def init_SSZ_types() -> None:\n    global_vars = globals()\n\n    '
+        + '\n\n    '.join([strip_comments(re.sub(r'(?!\n\n)\n', r'\n    ', value[:-1]))
+                           for value in ssz_objects.values()])
         + '\n\n'
         + '\n'.join(map(lambda x: '    global_vars[\'%s\'] = %s' % (x, x), ssz_objects.keys()))
     )
     spec = (
         imports
-        + '\n' + new_type_definitions
+        + '\n\n' + new_type_definitions
         + '\n\n' + constants_spec
         + '\n\n\n' + ssz_objects_instantiation_spec
         + '\n\n' + functions_spec
@@ -170,23 +183,38 @@ def combine_constants(old_constants: Dict[str, str], new_constants: Dict[str, st
     return old_constants
 
 
-def dependency_order_ssz_objects(objects: Dict[str, str]) -> None:
+ignored_dependencies = [
+    'bit', 'boolean', 'Vector', 'List', 'Container', 'Hash', 'BLSPubkey', 'BLSSignature', 'Bytes', 'BytesN'
+    'Bytes1', 'Bytes4', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',
+    'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',
+    'bytes'  # to be removed after updating spec doc
+]
+
+
+def dependency_order_ssz_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None:
     """
     Determines which SSZ Object is depenedent on which other and orders them appropriately
     """
     items = list(objects.items())
     for key, value in items:
-        dependencies = re.findall(r'(: [A-Z][\w[]*)', value)
-        dependencies = map(lambda x: re.sub(r'\W|Vector|List|Container|uint\d+|Bytes\d+|bytes', '', x), dependencies)
+        dependencies = []
+        for line in value.split('\n'):
+            if not re.match(r'\s+\w+: .+', line):
+                continue  # skip whitespace etc.
+            line = line[line.index(':') + 1:]  # strip of field name
+            if '#' in line:
+                line = line[:line.index('#')]  # strip of comment
+            dependencies.extend(re.findall(r'(\w+)', line))  # catch all legible words, potential dependencies
+        dependencies = filter(lambda x: '_' not in x and x.upper() != x, dependencies)  # filter out constants
+        dependencies = filter(lambda x: x not in ignored_dependencies, dependencies)
+        dependencies = filter(lambda x: x not in custom_types, dependencies)
         for dep in dependencies:
-            if dep in NEW_TYPES or len(dep) == 0:
-                continue
             key_list = list(objects.keys())
             for item in [dep, key] + key_list[key_list.index(dep)+1:]:
                 objects[item] = objects.pop(item)
 
 
-def combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str]) -> Dict[str, str]:
+def combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str], custom_types) -> Dict[str, str]:
     """
     Takes in old spec and new spec ssz objects, combines them,
     and returns the newer versions of the objects in dependency order.
@@ -198,7 +226,7 @@ def combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str]
             # remove leading variable name
             value = re.sub(r'^class [\w]*\(Container\):\n', '', value)
         old_objects[key] = old_objects.get(key, '') + value
-    dependency_order_ssz_objects(old_objects)
+    dependency_order_ssz_objects(old_objects, custom_types)
     return old_objects
 
 
@@ -210,18 +238,25 @@ def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:
     """
     Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.
     """
-    functions0, constants0, ssz_objects0, inserts0 = spec0
-    functions1, constants1, ssz_objects1, inserts1 = spec1
+    functions0, custom_types0, constants0, ssz_objects0, inserts0 = spec0
+    functions1, custom_types1, constants1, ssz_objects1, inserts1 = spec1
     functions = combine_functions(functions0, functions1)
+    custom_types = combine_constants(custom_types0, custom_types1)
     constants = combine_constants(constants0, constants1)
-    ssz_objects = combine_ssz_objects(ssz_objects0, ssz_objects1)
+    ssz_objects = combine_ssz_objects(ssz_objects0, ssz_objects1, custom_types)
     inserts = combine_inserts(inserts0, inserts1)
-    return functions, constants, ssz_objects, inserts
+    return functions, custom_types, constants, ssz_objects, inserts
 
 
-def build_phase0_spec(sourcefile: str, outfile: str=None) -> Optional[str]:
-    functions, constants, ssz_objects, inserts = get_spec(sourcefile)
-    spec = objects_to_spec(functions, constants, ssz_objects, inserts, PHASE0_IMPORTS, NEW_TYPES, BYTE_TYPES)
+def build_phase0_spec(phase0_sourcefile: str, fork_choice_sourcefile: str,
+                      v_guide_sourcefile: str, outfile: str=None) -> Optional[str]:
+    phase0_spec = get_spec(phase0_sourcefile)
+    fork_choice_spec = get_spec(fork_choice_sourcefile)
+    v_guide = get_spec(v_guide_sourcefile)
+    spec_objects = phase0_spec
+    for value in [fork_choice_spec, v_guide]:
+        spec_objects = combine_spec_objects(spec_objects, value)
+    spec = objects_to_spec(*spec_objects, PHASE0_IMPORTS)
     if outfile is not None:
         with open(outfile, 'w') as out:
             out.write(spec)
@@ -231,14 +266,16 @@ def build_phase0_spec(sourcefile: str, outfile: str=None) -> Optional[str]:
 def build_phase1_spec(phase0_sourcefile: str,
                       phase1_custody_sourcefile: str,
                       phase1_shard_sourcefile: str,
+                      fork_choice_sourcefile: str,
                       outfile: str=None) -> Optional[str]:
     phase0_spec = get_spec(phase0_sourcefile)
     phase1_custody = get_spec(phase1_custody_sourcefile)
     phase1_shard_data = get_spec(phase1_shard_sourcefile)
+    fork_choice_spec = get_spec(fork_choice_sourcefile)
     spec_objects = phase0_spec
-    for value in [phase1_custody, phase1_shard_data]:
+    for value in [phase1_custody, phase1_shard_data, fork_choice_spec]:
         spec_objects = combine_spec_objects(spec_objects, value)
-    spec = objects_to_spec(*spec_objects, PHASE1_IMPORTS, NEW_TYPES, BYTE_TYPES)
+    spec = objects_to_spec(*spec_objects, PHASE1_IMPORTS)
     if outfile is not None:
         with open(outfile, 'w') as out:
             out.write(spec)
@@ -250,13 +287,16 @@ def build_phase1_spec(phase0_sourcefile: str,
 Build the specs from the md docs.
 If building phase 0:
     1st argument is input spec.md
-    2nd argument is output spec.py
+    2nd argument is input fork_choice.md
+    3rd argument is input validator_guide.md
+    4th argument is output spec.py
 
 If building phase 1:
     1st argument is input spec_phase0.md
     2nd argument is input spec_phase1_custody.md
     3rd argument is input spec_phase1_shard_data.md
-    4th argument is output spec.py
+    4th argument is input fork_choice.md
+    5th argument is output spec.py
 '''
     parser = ArgumentParser(description=description)
     parser.add_argument("-p", "--phase", dest="phase", type=int, default=0, help="Build for phase #")
@@ -264,14 +304,15 @@ def build_phase1_spec(phase0_sourcefile: str,
 
     args = parser.parse_args()
     if args.phase == 0:
-        if len(args.files) == 2:
+        if len(args.files) == 4:
             build_phase0_spec(*args.files)
         else:
-            print(" Phase 0 requires an output as well as an input file.")
+            print(" Phase 0 requires spec, forkchoice, and v-guide inputs as well as an output file.")
     elif args.phase == 1:
-        if len(args.files) == 4:
+        if len(args.files) == 5:
             build_phase1_spec(*args.files)
         else:
-            print(" Phase 1 requires an output as well as 3 input files (phase0.md and phase1.md, phase1.md)")
+            print(" Phase 1 requires 4 input files as well as an output file: "
+                  + "(phase0.md and phase1.md, phase1.md, fork_choice.md, output.py)")
     else:
         print("Invalid phase: {0}".format(args.phase))
diff --git a/scripts/function_puller.py b/scripts/function_puller.py
index 303d4ec2f5..26671bafcf 100644
--- a/scripts/function_puller.py
+++ b/scripts/function_puller.py
@@ -29,6 +29,8 @@ def get_spec(file_name: str) -> SpecObject:
     inserts = {}
     function_matcher = re.compile(FUNCTION_REGEX)
     inserts_matcher = re.compile(BEGIN_INSERT_REGEX)
+    is_ssz = False
+    custom_types = {}
     for linenum, line in enumerate(open(file_name).readlines()):
         line = line.rstrip()
         if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`':
@@ -64,7 +66,7 @@ def get_spec(file_name: str) -> SpecObject:
                     ssz_objects[current_name] = ssz_objects.get(current_name, '') + line + '\n'
                 else:
                     functions[current_name] = functions.get(current_name, '') + line + '\n'
-            # Handle constant table entries
+            # Handle constant and custom types table entries
             elif pulling_from is None and len(line) > 0 and line[0] == '|':
                 row = line[1:].split('|')
                 if len(row) >= 2:
@@ -72,12 +74,14 @@ def get_spec(file_name: str) -> SpecObject:
                         row[i] = row[i].strip().strip('`')
                         if '`' in row[i]:
                             row[i] = row[i][:row[i].find('`')]
-                    eligible = True
+                    is_constant_def = True
                     if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_':
-                        eligible = False
+                        is_constant_def = False
                     for c in row[0]:
                         if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':
-                            eligible = False
-                    if eligible:
+                            is_constant_def = False
+                    if is_constant_def:
                         constants[row[0]] = row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890')
-    return functions, constants, ssz_objects, inserts
+                    elif row[1].startswith('uint') or row[1].startswith('Bytes'):
+                        custom_types[row[0]] = row[1]
+    return functions, custom_types, constants, ssz_objects, inserts
diff --git a/specs/bls_signature.md b/specs/bls_signature.md
index 3fe1bcc0eb..652279cd7f 100644
--- a/specs/bls_signature.md
+++ b/specs/bls_signature.md
@@ -71,10 +71,10 @@ We require:
 G2_cofactor = 305502333931268344200999753193121504214466019254188142667664032982267604182971884026507427359259977847832272839041616661285803823378372096355777062779109
 q = 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787
 
-def hash_to_G2(message_hash: Bytes32, domain: uint64) -> [uint384]:
+def hash_to_G2(message_hash: Bytes32, domain: Bytes8) -> Tuple[uint384, uint384]:
     # Initial candidate x coordinate
-    x_re = int.from_bytes(hash(message_hash + bytes8(domain) + b'\x01'), 'big')
-    x_im = int.from_bytes(hash(message_hash + bytes8(domain) + b'\x02'), 'big')
+    x_re = int.from_bytes(hash(message_hash + domain + b'\x01'), 'big')
+    x_im = int.from_bytes(hash(message_hash + domain + b'\x02'), 'big')
     x_coordinate = Fq2([x_re, x_im])  # x = x_re + i * x_im
     
     # Test candidate y coordinates until a one is found
@@ -130,7 +130,7 @@ g = Fq2([g_x, g_y])
 
 ### `bls_verify`
 
-Let `bls_verify(pubkey: Bytes48, message_hash: Bytes32, signature: Bytes96, domain: uint64) -> bool`:
+Let `bls_verify(pubkey: Bytes48, message_hash: Bytes32, signature: Bytes96, domain: Bytes8) -> bool`:
 
 * Verify that `pubkey` is a valid G1 point.
 * Verify that `signature` is a valid G2 point.
@@ -138,7 +138,7 @@ Let `bls_verify(pubkey: Bytes48, message_hash: Bytes32, signature: Bytes96, doma
 
 ### `bls_verify_multiple`
 
-Let `bls_verify_multiple(pubkeys: List[Bytes48], message_hashes: List[Bytes32], signature: Bytes96, domain: uint64) -> bool`:
+Let `bls_verify_multiple(pubkeys: List[Bytes48], message_hashes: List[Bytes32], signature: Bytes96, domain: Bytes8) -> bool`:
 
 * Verify that each `pubkey` in `pubkeys` is a valid G1 point.
 * Verify that `signature` is a valid G2 point.
diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md
index a6d9d23c58..975874d51d 100644
--- a/specs/core/0_beacon-chain.md
+++ b/specs/core/0_beacon-chain.md
@@ -9,20 +9,21 @@
     - [Table of contents](#table-of-contents)
     - [Introduction](#introduction)
     - [Notation](#notation)
-    - [Terminology](#terminology)
+    - [Custom types](#custom-types)
     - [Constants](#constants)
+    - [Configuration](#configuration)
         - [Misc](#misc)
-        - [Deposit contract](#deposit-contract)
         - [Gwei values](#gwei-values)
         - [Initial values](#initial-values)
         - [Time parameters](#time-parameters)
         - [State list lengths](#state-list-lengths)
         - [Rewards and penalties](#rewards-and-penalties)
         - [Max operations per block](#max-operations-per-block)
-        - [Signature domains](#signature-domains)
-    - [Data structures](#data-structures)
+        - [Signature domain types](#signature-domain-types)
+    - [Containers](#containers)
         - [Misc dependencies](#misc-dependencies)
             - [`Fork`](#fork)
+            - [`Checkpoint`](#checkpoint)
             - [`Validator`](#validator)
             - [`Crosslink`](#crosslink)
             - [`AttestationData`](#attestationdata)
@@ -32,6 +33,7 @@
             - [`Eth1Data`](#eth1data)
             - [`HistoricalBatch`](#historicalbatch)
             - [`DepositData`](#depositdata)
+            - [`CompactCommittee`](#compactcommittee)
             - [`BeaconBlockHeader`](#beaconblockheader)
         - [Beacon operations](#beacon-operations)
             - [`ProposerSlashing`](#proposerslashing)
@@ -45,57 +47,59 @@
             - [`BeaconBlock`](#beaconblock)
         - [Beacon state](#beacon-state)
             - [`BeaconState`](#beaconstate)
-    - [Custom types](#custom-types)
     - [Helper functions](#helper-functions)
-        - [`xor`](#xor)
-        - [`hash`](#hash)
-        - [`hash_tree_root`](#hash_tree_root)
-        - [`signing_root`](#signing_root)
-        - [`bls_domain`](#bls_domain)
-        - [`slot_to_epoch`](#slot_to_epoch)
-        - [`get_previous_epoch`](#get_previous_epoch)
-        - [`get_current_epoch`](#get_current_epoch)
-        - [`get_epoch_start_slot`](#get_epoch_start_slot)
-        - [`is_active_validator`](#is_active_validator)
-        - [`is_slashable_validator`](#is_slashable_validator)
-        - [`get_active_validator_indices`](#get_active_validator_indices)
-        - [`increase_balance`](#increase_balance)
-        - [`decrease_balance`](#decrease_balance)
-        - [`get_epoch_committee_count`](#get_epoch_committee_count)
-        - [`get_shard_delta`](#get_shard_delta)
-        - [`get_epoch_start_shard`](#get_epoch_start_shard)
-        - [`get_attestation_data_slot`](#get_attestation_data_slot)
-        - [`get_block_root_at_slot`](#get_block_root_at_slot)
-        - [`get_block_root`](#get_block_root)
-        - [`get_randao_mix`](#get_randao_mix)
-        - [`get_active_index_root`](#get_active_index_root)
-        - [`generate_seed`](#generate_seed)
-        - [`get_beacon_proposer_index`](#get_beacon_proposer_index)
-        - [`verify_merkle_branch`](#verify_merkle_branch)
-        - [`get_shuffled_index`](#get_shuffled_index)
-        - [`compute_committee`](#compute_committee)
-        - [`get_crosslink_committee`](#get_crosslink_committee)
-        - [`get_attesting_indices`](#get_attesting_indices)
-        - [`int_to_bytes`](#int_to_bytes)
-        - [`bytes_to_int`](#bytes_to_int)
-        - [`get_total_balance`](#get_total_balance)
-        - [`get_domain`](#get_domain)
-        - [`get_bitfield_bit`](#get_bitfield_bit)
-        - [`verify_bitfield`](#verify_bitfield)
-        - [`convert_to_indexed`](#convert_to_indexed)
-        - [`validate_indexed_attestation`](#validate_indexed_attestation)
-        - [`is_slashable_attestation_data`](#is_slashable_attestation_data)
-        - [`integer_squareroot`](#integer_squareroot)
-        - [`get_delayed_activation_exit_epoch`](#get_delayed_activation_exit_epoch)
-        - [`get_churn_limit`](#get_churn_limit)
-        - [`bls_verify`](#bls_verify)
-        - [`bls_verify_multiple`](#bls_verify_multiple)
-        - [`bls_aggregate_pubkeys`](#bls_aggregate_pubkeys)
-        - [Routines for updating validator status](#routines-for-updating-validator-status)
+        - [Math](#math)
+            - [`int_to_bytes`](#int_to_bytes)
+            - [`integer_squareroot`](#integer_squareroot)
+            - [`xor`](#xor)
+            - [`bytes_to_int`](#bytes_to_int)
+        - [Crypto](#crypto)
+            - [`hash`](#hash)
+            - [`hash_tree_root`](#hash_tree_root)
+            - [`signing_root`](#signing_root)
+            - [`bls_verify`](#bls_verify)
+            - [`bls_verify_multiple`](#bls_verify_multiple)
+            - [`bls_aggregate_pubkeys`](#bls_aggregate_pubkeys)
+        - [Predicates](#predicates)
+            - [`is_active_validator`](#is_active_validator)
+            - [`is_slashable_validator`](#is_slashable_validator)
+            - [`is_slashable_attestation_data`](#is_slashable_attestation_data)
+            - [`is_valid_indexed_attestation`](#is_valid_indexed_attestation)
+            - [`is_valid_merkle_branch`](#is_valid_merkle_branch)
+        - [Misc](#misc-1)
+            - [`compute_shuffled_index`](#compute_shuffled_index)
+            - [`compute_committee`](#compute_committee)
+            - [`compute_epoch_of_slot`](#compute_epoch_of_slot)
+            - [`compute_start_slot_of_epoch`](#compute_start_slot_of_epoch)
+            - [`compute_activation_exit_epoch`](#compute_activation_exit_epoch)
+            - [`compute_domain`](#compute_domain)
+        - [Beacon state accessors](#beacon-state-accessors)
+            - [`get_current_epoch`](#get_current_epoch)
+            - [`get_previous_epoch`](#get_previous_epoch)
+            - [`get_block_root`](#get_block_root)
+            - [`get_block_root_at_slot`](#get_block_root_at_slot)
+            - [`get_randao_mix`](#get_randao_mix)
+            - [`get_active_validator_indices`](#get_active_validator_indices)
+            - [`get_validator_churn_limit`](#get_validator_churn_limit)
+            - [`get_seed`](#get_seed)
+            - [`get_committee_count`](#get_committee_count)
+            - [`get_crosslink_committee`](#get_crosslink_committee)
+            - [`get_start_shard`](#get_start_shard)
+            - [`get_shard_delta`](#get_shard_delta)
+            - [`get_beacon_proposer_index`](#get_beacon_proposer_index)
+            - [`get_attestation_data_slot`](#get_attestation_data_slot)
+            - [`get_compact_committees_root`](#get_compact_committees_root)
+            - [`get_total_balance`](#get_total_balance)
+            - [`get_total_active_balance`](#get_total_active_balance)
+            - [`get_domain`](#get_domain)
+            - [`get_indexed_attestation`](#get_indexed_attestation)
+            - [`get_attesting_indices`](#get_attesting_indices)
+        - [Beacon state mutators](#beacon-state-mutators)
+            - [`increase_balance`](#increase_balance)
+            - [`decrease_balance`](#decrease_balance)
             - [`initiate_validator_exit`](#initiate_validator_exit)
             - [`slash_validator`](#slash_validator)
     - [Genesis](#genesis)
-        - [`Eth2Genesis`](#eth2genesis)
         - [Genesis state](#genesis-state)
         - [Genesis block](#genesis-block)
     - [Beacon chain state transition function](#beacon-chain-state-transition-function)
@@ -125,36 +129,47 @@
 
 This document represents the specification for Phase 0 of Ethereum 2.0 -- The Beacon Chain.
 
-At the core of Ethereum 2.0 is a system chain called the "beacon chain". The beacon chain stores and manages the registry of [validators](#dfn-validator). In the initial deployment phases of Ethereum 2.0, the only mechanism to become a [validator](#dfn-validator) is to make a one-way ETH transaction to a deposit contract on Ethereum 1.0. Activation as a [validator](#dfn-validator) happens when Ethereum 1.0 deposit receipts are processed by the beacon chain, the activation balance is reached, and a queuing process is completed. Exit is either voluntary or done forcibly as a penalty for misbehavior.
-
+At the core of Ethereum 2.0 is a system chain called the "beacon chain". The beacon chain stores and manages the registry of validators. In the initial deployment phases of Ethereum 2.0, the only mechanism to become a validator is to make a one-way ETH transaction to a deposit contract on Ethereum 1.0. Activation as a validator happens when Ethereum 1.0 deposit receipts are processed by the beacon chain, the activation balance is reached, and a queuing process is completed. Exit is either voluntary or done forcibly as a penalty for misbehavior.
 The primary source of load on the beacon chain is "attestations". Attestations are simultaneously availability votes for a shard block and proof-of-stake votes for a beacon block. A sufficient number of attestations for the same shard block create a "crosslink", confirming the shard segment up to that shard block into the beacon chain. Crosslinks also serve as infrastructure for asynchronous cross-shard communication.
 
 ## Notation
 
-Code snippets appearing in `this style` are to be interpreted as Python code.
-
-## Terminology
-
-* **Validator**<a id="dfn-validator"></a>—a registered participant in the beacon chain. You can become one by sending ether into the Ethereum 1.0 deposit contract.
-* **Active validator**<a id="dfn-active-validator"></a>—an active participant in the Ethereum 2.0 consensus invited to, among other things, propose and attest to blocks and vote for crosslinks.
-* **Committee**—a (pseudo-) randomly sampled subset of [active validators](#dfn-active-validator). When a committee is referred to collectively, as in "this committee attests to X", this is assumed to mean "some subset of that committee that contains enough [validators](#dfn-validator) that the protocol recognizes it as representing the committee".
-* **Proposer**—the [validator](#dfn-validator) that creates a beacon chain block.
-* **Attester**—a [validator](#dfn-validator) that is part of a committee that needs to sign off on a beacon chain block while simultaneously creating a link (crosslink) to a recent shard block on a particular shard chain.
-* **Beacon chain**—the central PoS chain that is the base of the sharding system.
-* **Shard chain**—one of the chains on which user transactions take place and account data is stored.
-* **Block root**—a 32-byte Merkle root of a beacon chain block or shard chain block. Previously called "block hash".
-* **Crosslink**—a set of signatures from a committee attesting to a block in a shard chain that can be included into the beacon chain. Crosslinks are the main means by which the beacon chain "learns about" the updated state of shard chains.
-* **Slot**—a period during which one proposer has the ability to create a beacon chain block and some attesters have the ability to make attestations.
-* **Epoch**—an aligned span of slots during which all [validators](#dfn-validator) get exactly one chance to make an attestation.
-* **Finalized**, **justified**—see the [Casper FFG paper](https://arxiv.org/abs/1710.09437).
-* **Withdrawal period**—the number of slots between a [validator](#dfn-validator) exit and the [validator](#dfn-validator) balance being withdrawable.
-* **Genesis time**—the Unix time of the genesis beacon chain block at slot 0.
+Code snippets appearing in `this style` are to be interpreted as Python 3 code.
+
+## Custom types
+
+We define the following Python custom types for type hinting and readability:
+
+| Name | SSZ equivalent | Description |
+| - | - | - |
+| `Slot` | `uint64` | a slot number |
+| `Epoch` | `uint64` | an epoch number |
+| `Shard` | `uint64` | a shard number |
+| `ValidatorIndex` | `uint64` | a validator registry index |
+| `Gwei` | `uint64` | an amount in Gwei |
+| `Hash` | `Bytes32` | a hash |
+| `Version` | `Bytes4` | a fork version number |
+| `DomainType` | `Bytes4` | a signature domain type |
+| `Domain` | `Bytes8` | a signature domain |
+| `BLSPubkey` | `Bytes48` | a BLS12-381 public key |
+| `BLSSignature` | `Bytes96` | a BLS12-381 signature |
 
 ## Constants
 
-*Note*: The default mainnet values for the constants are included here for spec-design purposes.
-The different configurations for mainnet, testnets, and YAML-based testing can be found in the `configs/constant_presets/` directory.
-These configurations are updated for releases, but may be out of sync during `dev` changes.
+The following values are (non-configurable) constants used throughout the specification.
+
+| Name | Value |
+| - | - |
+| `FAR_FUTURE_EPOCH` | `Epoch(2**64 - 1)` |
+| `BASE_REWARDS_PER_EPOCH` | `5` |
+| `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) |
+| `SECONDS_PER_DAY` | `86400` |
+| `JUSTIFICATION_BITS_LENGTH` | `4` |
+| `ENDIANNESS` | `'little'` |
+
+## Configuration
+
+*Note*: The default mainnet configuration values are included here for spec-design purposes. The different configurations for mainnet, testnets, and YAML-based testing can be found in the [`configs/constant_presets`](../../configs/constant_presets) directory. These configurations are updated for releases and may be out of sync during `dev` changes.
 
 ### Misc
 
@@ -162,76 +177,67 @@ These configurations are updated for releases, but may be out of sync during `de
 | - | - |
 | `SHARD_COUNT` | `2**10` (= 1,024) |
 | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) |
-| `MAX_INDICES_PER_ATTESTATION` | `2**12` (= 4,096) |
+| `MAX_VALIDATORS_PER_COMMITTEE` | `2**12` (= 4,096) |
 | `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) |
 | `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) |
-| `BASE_REWARDS_PER_EPOCH` | `5` |
 | `SHUFFLE_ROUND_COUNT` | `90` |
+| `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT` | `2**16` (= 65,536) |
+| `MIN_GENESIS_TIME` | `1578009600` (Jan 3, 2020) |
 
-* For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.)
+- For the safety of crosslinks, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.)
 
-### Deposit contract
+### Gwei values
 
 | Name | Value |
 | - | - |
-| `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) |
-
-### Gwei values
-
-| Name | Value | Unit |
-| - | - | :-: |
-| `MIN_DEPOSIT_AMOUNT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei |
-| `MAX_EFFECTIVE_BALANCE` | `2**5 * 10**9` (= 32,000,000,000) | Gwei |
-| `EJECTION_BALANCE` | `2**4 * 10**9` (= 16,000,000,000) | Gwei |
-| `EFFECTIVE_BALANCE_INCREMENT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei |
+| `MIN_DEPOSIT_AMOUNT` | `Gwei(2**0 * 10**9)` (= 1,000,000,000) |
+| `MAX_EFFECTIVE_BALANCE` | `Gwei(2**5 * 10**9)` (= 32,000,000,000) |
+| `EJECTION_BALANCE` | `Gwei(2**4 * 10**9)` (= 16,000,000,000) |
+| `EFFECTIVE_BALANCE_INCREMENT` | `Gwei(2**0 * 10**9)` (= 1,000,000,000) |
 
 ### Initial values
 
 | Name | Value |
 | - | - |
-| `GENESIS_SLOT` | `0` |
-| `GENESIS_EPOCH` | `0` |
-| `FAR_FUTURE_EPOCH` | `2**64 - 1` |
-| `ZERO_HASH` | `b'\x00' * 32` |
-| `BLS_WITHDRAWAL_PREFIX` | `0` |
+| `GENESIS_SLOT` | `Slot(0)` |
+| `GENESIS_EPOCH` | `Epoch(0)` |
+| `BLS_WITHDRAWAL_PREFIX` | `Bytes1(b'\x00')` |
 
 ### Time parameters
 
 | Name | Value | Unit | Duration |
 | - | - | :-: | :-: |
-| `MIN_ATTESTATION_INCLUSION_DELAY` | `2**2` (= 4) | slots | 24 seconds |
+| `MIN_ATTESTATION_INCLUSION_DELAY` | `2**0` (= 1) | slots | 6 seconds |
 | `SLOTS_PER_EPOCH` | `2**6` (= 64) | slots | 6.4 minutes |
 | `MIN_SEED_LOOKAHEAD` | `2**0` (= 1) | epochs | 6.4 minutes |
 | `ACTIVATION_EXIT_DELAY` | `2**2` (= 4) | epochs | 25.6 minutes |
 | `SLOTS_PER_ETH1_VOTING_PERIOD` | `2**10` (= 1,024) | slots | ~1.7 hours |
 | `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~13 hours |
 | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours |
-| `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048)  | epochs | 9 days  |
+| `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days |
 | `MAX_EPOCHS_PER_CROSSLINK` | `2**6` (= 64) | epochs | ~7 hours |
 | `MIN_EPOCHS_TO_INACTIVITY_PENALTY` | `2**2` (= 4) | epochs | 25.6 minutes |
 
-* `MAX_EPOCHS_PER_CROSSLINK` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH`
-
 ### State list lengths
 
 | Name | Value | Unit | Duration |
 | - | - | :-: | :-: |
-| `LATEST_RANDAO_MIXES_LENGTH` | `2**13` (= 8,192) | epochs | ~36 days |
-| `LATEST_ACTIVE_INDEX_ROOTS_LENGTH` | `2**13` (= 8,192) | epochs | ~36 days |
-| `LATEST_SLASHED_EXIT_LENGTH` | `2**13` (= 8,192) | epochs | ~36 days |
+| `EPOCHS_PER_HISTORICAL_VECTOR` | `2**16` (= 65,536) | epochs | ~0.8 years |
+| `EPOCHS_PER_SLASHINGS_VECTOR` | `2**13` (= 8,192) | epochs | ~36 days |
+| `HISTORICAL_ROOTS_LIMIT` | `2**24` (= 16,777,216) | historical roots | ~26,131 years |
+| `VALIDATOR_REGISTRY_LIMIT` | `2**40` (= 1,099,511,627,776) | validator spots |
 
 ### Rewards and penalties
 
 | Name | Value |
 | - | - |
-| `BASE_REWARD_FACTOR` | `2**5` (= 32) |
-| `WHISTLEBLOWING_REWARD_QUOTIENT` | `2**9` (= 512) |
+| `BASE_REWARD_FACTOR` | `2**6` (= 64) |
+| `WHISTLEBLOWER_REWARD_QUOTIENT` | `2**9` (= 512) |
 | `PROPOSER_REWARD_QUOTIENT` | `2**3` (= 8) |
 | `INACTIVITY_PENALTY_QUOTIENT` | `2**25` (= 33,554,432) |
 | `MIN_SLASHING_PENALTY_QUOTIENT` | `2**5` (= 32) |
 
-* **The `BASE_REWARD_FACTOR` is NOT final. Once all other protocol details are finalized, it will be adjusted to target a theoretical maximum total issuance of `2**21` ETH per year if `2**27` ETH is validating (and therefore `2**20` per year if `2**25` ETH is validating, etc.)**
-* The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (~18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating [validators](#dfn-validator) to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline [validators](#dfn-validator) after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)` so after `INVERSE_SQRT_E_DROP_TIME` epochs it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`.
+- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (about 18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`.
 
 ### Max operations per block
 
@@ -244,7 +250,9 @@ These configurations are updated for releases, but may be out of sync during `de
 | `MAX_VOLUNTARY_EXITS` | `2**4` (= 16) |
 | `MAX_TRANSFERS` | `0` |
 
-### Signature domains
+### Signature domain types
+
+The following types are defined, mapping into `DomainType` (little endian):
 
 | Name | Value |
 | - | - |
@@ -255,11 +263,13 @@ These configurations are updated for releases, but may be out of sync during `de
 | `DOMAIN_VOLUNTARY_EXIT` | `4` |
 | `DOMAIN_TRANSFER` | `5` |
 
-## Data structures
+## Containers
 
-The following data structures are defined as [SimpleSerialize (SSZ)](../simple-serialize.md) objects.
+The following types are [SimpleSerialize (SSZ)](../simple-serialize.md) containers.
 
-The types are defined topologically to aid in facilitating an executable version of the spec.
+*Note*: The definitions are ordered topologically to facilitate execution of the spec.
+
+*Note*: Fields missing in container instantiations default to their zero value.
 
 ### Misc dependencies
 
@@ -267,49 +277,44 @@ The types are defined topologically to aid in facilitating an executable version
 
 ```python
 class Fork(Container):
-    # Previous fork version
-    previous_version: Bytes4
-    # Current fork version
-    current_version: Bytes4
-    # Fork epoch number
-    epoch: uint64
+    previous_version: Version
+    current_version: Version
+    epoch: Epoch  # Epoch of latest fork
+```
+
+#### `Checkpoint`
+
+```python
+class Checkpoint(Container):
+    epoch: Epoch
+    root: Hash
 ```
 
 #### `Validator`
 
 ```python
 class Validator(Container):
-    # BLS public key
-    pubkey: Bytes48
-    # Withdrawal credentials
-    withdrawal_credentials: Bytes32
-    # Epoch when became eligible for activation
-    activation_eligibility_epoch: uint64
-    # Epoch when validator activated
-    activation_epoch: uint64
-    # Epoch when validator exited
-    exit_epoch: uint64
-    # Epoch when validator is eligible to withdraw
-    withdrawable_epoch: uint64
-    # Was the validator slashed
-    slashed: bool
-    # Effective balance
-    effective_balance: uint64
+    pubkey: BLSPubkey
+    withdrawal_credentials: Hash  # Commitment to pubkey for withdrawals and transfers
+    effective_balance: Gwei  # Balance at stake
+    slashed: boolean
+    # Status epochs
+    activation_eligibility_epoch: Epoch  # When criteria for activation were met
+    activation_epoch: Epoch
+    exit_epoch: Epoch
+    withdrawable_epoch: Epoch  # When validator can withdraw or transfer funds
 ```
 
 #### `Crosslink`
 
 ```python
 class Crosslink(Container):
-    # Shard number
-    shard: uint64
-    # Crosslinking data from epochs [start....end-1]
-    start_epoch: uint64
-    end_epoch: uint64
-    # Root of the previous crosslink
-    parent_root: Bytes32
-    # Root of the crosslinked shard data since the previous crosslink
-    data_root: Bytes32
+    shard: Shard
+    parent_root: Hash
+    # Crosslinking data
+    start_epoch: Epoch
+    end_epoch: Epoch
+    data_root: Hash
 ```
 
 #### `AttestationData`
@@ -317,14 +322,10 @@ class Crosslink(Container):
 ```python
 class AttestationData(Container):
     # LMD GHOST vote
-    beacon_block_root: Bytes32
-
+    beacon_block_root: Hash
     # FFG vote
-    source_epoch: uint64
-    source_root: Bytes32
-    target_epoch: uint64
-    target_root: Bytes32
-
+    source: Checkpoint
+    target: Checkpoint
     # Crosslink vote
     crosslink: Crosslink
 ```
@@ -333,84 +334,74 @@ class AttestationData(Container):
 
 ```python
 class AttestationDataAndCustodyBit(Container):
-    # Attestation data
     data: AttestationData
-    # Custody bit
-    custody_bit: bool
+    custody_bit: bit  # Challengeable bit (SSZ-bool, 1 byte) for the custody of crosslink data
 ```
 
 #### `IndexedAttestation`
 
 ```python
 class IndexedAttestation(Container):
-    # Validator indices
-    custody_bit_0_indices: List[uint64]
-    custody_bit_1_indices: List[uint64]
-    # Attestation data
+    custody_bit_0_indices: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE]  # Indices with custody bit equal to 0
+    custody_bit_1_indices: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE]  # Indices with custody bit equal to 1
     data: AttestationData
-    # Aggregate signature
-    signature: Bytes96
+    signature: BLSSignature
 ```
 
 #### `PendingAttestation`
 
 ```python
 class PendingAttestation(Container):
-    # Attester aggregation bitfield
-    aggregation_bitfield: bytes
-    # Attestation data
+    aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
     data: AttestationData
-    # Inclusion delay
-    inclusion_delay: uint64
-    # Proposer index
-    proposer_index: uint64
+    inclusion_delay: Slot
+    proposer_index: ValidatorIndex
 ```
 
 #### `Eth1Data`
 
 ```python
 class Eth1Data(Container):
-    # Root of the deposit tree
-    deposit_root: Bytes32
-    # Total number of deposits
+    deposit_root: Hash
     deposit_count: uint64
-    # Block hash
-    block_hash: Bytes32
+    block_hash: Hash
 ```
 
 #### `HistoricalBatch`
 
 ```python
 class HistoricalBatch(Container):
-    # Block roots
-    block_roots: Vector[Bytes32, SLOTS_PER_HISTORICAL_ROOT]
-    # State roots
-    state_roots: Vector[Bytes32, SLOTS_PER_HISTORICAL_ROOT]
+    block_roots: Vector[Hash, SLOTS_PER_HISTORICAL_ROOT]
+    state_roots: Vector[Hash, SLOTS_PER_HISTORICAL_ROOT]
 ```
 
 #### `DepositData`
 
 ```python
 class DepositData(Container):
-    # BLS pubkey
-    pubkey: Bytes48
-    # Withdrawal credentials
-    withdrawal_credentials: Bytes32
-    # Amount in Gwei
-    amount: uint64
-    # Container self-signature
-    signature: Bytes96
+    pubkey: BLSPubkey
+    withdrawal_credentials: Hash
+    amount: Gwei
+    signature: BLSSignature
+```
+
+#### `CompactCommittee`
+
+```python
+class CompactCommittee(Container):
+    pubkeys: List[Bytes48, MAX_VALIDATORS_PER_COMMITTEE]
+    compact_validators: List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
 ```
 
 #### `BeaconBlockHeader`
 
 ```python
 class BeaconBlockHeader(Container):
-    slot: uint64
-    parent_root: Bytes32
-    state_root: Bytes32
-    body_root: Bytes32
-    signature: Bytes96
+    slot: Slot
+    parent_root: Hash
+    state_root: Hash
+    body_root: Hash
+    signature: BLSSignature
 ```
 
 ### Beacon operations
@@ -419,11 +410,8 @@ class BeaconBlockHeader(Container):
 
 ```python
 class ProposerSlashing(Container):
-    # Proposer index
-    proposer_index: uint64
-    # First block header
+    proposer_index: ValidatorIndex
     header_1: BeaconBlockHeader
-    # Second block header
     header_2: BeaconBlockHeader
 ```
 
@@ -431,9 +419,7 @@ class ProposerSlashing(Container):
 
 ```python
 class AttesterSlashing(Container):
-    # First attestation
     attestation_1: IndexedAttestation
-    # Second attestation
     attestation_2: IndexedAttestation
 ```
 
@@ -441,23 +427,17 @@ class AttesterSlashing(Container):
 
 ```python
 class Attestation(Container):
-    # Attester aggregation bitfield
-    aggregation_bitfield: bytes
-    # Attestation data
+    aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
     data: AttestationData
-    # Custody bitfield
-    custody_bitfield: bytes
-    # BLS aggregate signature
-    signature: Bytes96
+    custody_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
+    signature: BLSSignature
 ```
 
 #### `Deposit`
 
 ```python
 class Deposit(Container):
-    # Branch in the deposit tree
-    proof: Vector[Bytes32, DEPOSIT_CONTRACT_TREE_DEPTH]
-    # Data
+    proof: Vector[Hash, DEPOSIT_CONTRACT_TREE_DEPTH + 1]  # Merkle path to deposit data list root
     data: DepositData
 ```
 
@@ -465,32 +445,22 @@ class Deposit(Container):
 
 ```python
 class VoluntaryExit(Container):
-    # Minimum epoch for processing exit
-    epoch: uint64
-    # Index of the exiting validator
-    validator_index: uint64
-    # Validator signature
-    signature: Bytes96
+    epoch: Epoch  # Earliest epoch when voluntary exit can be processed
+    validator_index: ValidatorIndex
+    signature: BLSSignature
 ```
 
 #### `Transfer`
 
 ```python
 class Transfer(Container):
-    # Sender index
-    sender: uint64
-    # Recipient index
-    recipient: uint64
-    # Amount in Gwei
-    amount: uint64
-    # Fee in Gwei for block proposer
-    fee: uint64
-    # Inclusion slot
-    slot: uint64
-    # Sender withdrawal pubkey
-    pubkey: Bytes48
-    # Sender signature
-    signature: Bytes96
+    sender: ValidatorIndex
+    recipient: ValidatorIndex
+    amount: Gwei
+    fee: Gwei
+    slot: Slot  # Slot at which transfer must be processed
+    pubkey: BLSPubkey  # Withdrawal pubkey
+    signature: BLSSignature  # Signature checked against withdrawal pubkey
 ```
 
 ### Beacon blocks
@@ -499,27 +469,27 @@ class Transfer(Container):
 
 ```python
 class BeaconBlockBody(Container):
-    randao_reveal: Bytes96
-    eth1_data: Eth1Data
-    graffiti: Bytes32
-    proposer_slashings: List[ProposerSlashing]
-    attester_slashings: List[AttesterSlashing]
-    attestations: List[Attestation]
-    deposits: List[Deposit]
-    voluntary_exits: List[VoluntaryExit]
-    transfers: List[Transfer]
+    randao_reveal: BLSSignature
+    eth1_data: Eth1Data  # Eth1 data vote
+    graffiti: Bytes32  # Arbitrary data
+    # Operations
+    proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
+    attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
+    attestations: List[Attestation, MAX_ATTESTATIONS]
+    deposits: List[Deposit, MAX_DEPOSITS]
+    voluntary_exits: List[VoluntaryExit, MAX_VOLUNTARY_EXITS]
+    transfers: List[Transfer, MAX_TRANSFERS]
 ```
 
 #### `BeaconBlock`
 
 ```python
 class BeaconBlock(Container):
-    # Header
-    slot: uint64
-    parent_root: Bytes32
-    state_root: Bytes32
+    slot: Slot
+    parent_root: Hash
+    state_root: Hash
     body: BeaconBlockBody
-    signature: Bytes96
+    signature: BLSSignature
 ```
 
 ### Beacon state
@@ -528,133 +498,122 @@ class BeaconBlock(Container):
 
 ```python
 class BeaconState(Container):
-    # Misc
-    slot: uint64
+    # Versioning
     genesis_time: uint64
-    fork: Fork  # For versioning hard forks
-    # Validator registry
-    validator_registry: List[Validator]
-    balances: List[uint64]
-    # Randomness and committees
-    latest_randao_mixes: Vector[Bytes32, LATEST_RANDAO_MIXES_LENGTH]
-    latest_start_shard: uint64
-    # Finality
-    previous_epoch_attestations: List[PendingAttestation]
-    current_epoch_attestations: List[PendingAttestation]
-    previous_justified_epoch: uint64
-    current_justified_epoch: uint64
-    previous_justified_root: Bytes32
-    current_justified_root: Bytes32
-    justification_bitfield: uint64
-    finalized_epoch: uint64
-    finalized_root: Bytes32
-    # Recent state
-    current_crosslinks: Vector[Crosslink, SHARD_COUNT]
-    previous_crosslinks: Vector[Crosslink, SHARD_COUNT]
-    latest_block_roots: Vector[Bytes32, SLOTS_PER_HISTORICAL_ROOT]
-    latest_state_roots: Vector[Bytes32, SLOTS_PER_HISTORICAL_ROOT]
-    latest_active_index_roots: Vector[Bytes32, LATEST_ACTIVE_INDEX_ROOTS_LENGTH]
-    latest_slashed_balances: Vector[uint64, LATEST_SLASHED_EXIT_LENGTH]
+    slot: Slot
+    fork: Fork
+    # History
     latest_block_header: BeaconBlockHeader
-    historical_roots: List[Bytes32]
-    # Ethereum 1.0 chain data
-    latest_eth1_data: Eth1Data
-    eth1_data_votes: List[Eth1Data]
-    deposit_index: uint64
+    block_roots: Vector[Hash, SLOTS_PER_HISTORICAL_ROOT]
+    state_roots: Vector[Hash, SLOTS_PER_HISTORICAL_ROOT]
+    historical_roots: List[Hash, HISTORICAL_ROOTS_LIMIT]
+    # Eth1
+    eth1_data: Eth1Data
+    eth1_data_votes: List[Eth1Data, SLOTS_PER_ETH1_VOTING_PERIOD]
+    eth1_deposit_index: uint64
+    # Registry
+    validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
+    balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
+    # Shuffling
+    start_shard: Shard
+    randao_mixes: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR]
+    active_index_roots: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR]  # Active index digests for light clients
+    compact_committees_roots: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR]  # Committee digests for light clients
+    # Slashings
+    slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR]  # Per-epoch sums of slashed effective balances
+    # Attestations
+    previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
+    current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
+    # Crosslinks
+    previous_crosslinks: Vector[Crosslink, SHARD_COUNT]  # Previous epoch snapshot
+    current_crosslinks: Vector[Crosslink, SHARD_COUNT]
+    # Finality
+    justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH]  # Bit set for every recent justified epoch
+    previous_justified_checkpoint: Checkpoint  # Previous epoch snapshot
+    current_justified_checkpoint: Checkpoint
+    finalized_checkpoint: Checkpoint
 ```
 
-## Custom types
-
-We define the following Python custom types for type hinting and readability:
-
-| Name | SSZ equivalent | Description |
-| - | - | - |
-| `Slot` | `uint64` | a slot number |
-| `Epoch` | `uint64` | an epoch number |
-| `Shard` | `uint64` | a shard number |
-| `ValidatorIndex` | `uint64` | a validator registry index |
-| `Gwei` | `uint64` | an amount in Gwei |
-| `BLSPubkey` | `Bytes48` | a BLS12-381 public key |
-| `BLSSignature` | `Bytes96` | a BLS12-381 signature |
-
 ## Helper functions
 
 *Note*: The definitions below are for specification purposes and are not necessarily optimal implementations.
 
-### `xor`
-
-```python
-def xor(bytes1: Bytes32, bytes2: Bytes32) -> Bytes32:
-    return Bytes32(a ^ b for a, b in zip(bytes1, bytes2))
-```
-
-### `hash`
+### Math
 
-The `hash` function is SHA256.
+#### `int_to_bytes`
 
-*Note*: We aim to migrate to a S[T/N]ARK-friendly hash function in a future Ethereum 2.0 deployment phase.
-
-### `hash_tree_root`
-
-`def hash_tree_root(object: SSZSerializable) -> Bytes32` is a function for hashing objects into a single root utilizing a hash tree structure. `hash_tree_root` is defined in the [SimpleSerialize spec](../simple-serialize.md#merkleization).
-
-### `signing_root`
-
-`def signing_root(object: Container) -> Bytes32` is a function defined in the [SimpleSerialize spec](../simple-serialize.md#self-signed-containers) to compute signing messages.
-
-### `bls_domain`
+#### `integer_squareroot`
 
 ```python
-def bls_domain(domain_type: int, fork_version: bytes=b'\x00\x00\x00\x00') -> int:
+def integer_squareroot(n: uint64) -> uint64:
     """
-    Return the bls domain given by the ``domain_type`` and optional 4 byte ``fork_version`` (defaults to zero).
+    Return the largest integer ``x`` such that ``x**2 <= n``.
     """
-    return bytes_to_int(int_to_bytes(domain_type, length=4) + fork_version)
+    x = n
+    y = (x + 1) // 2
+    while y < x:
+        x = y
+        y = (x + n // x) // 2
+    return x
 ```
 
-### `slot_to_epoch`
+#### `xor`
 
 ```python
-def slot_to_epoch(slot: Slot) -> Epoch:
+def xor(bytes1: Bytes32, bytes2: Bytes32) -> Bytes32:
     """
-    Return the epoch number of the given ``slot``.
+    Return the exclusive-or of two 32-byte strings.
     """
-    return slot // SLOTS_PER_EPOCH
+    return Bytes32(a ^ b for a, b in zip(bytes1, bytes2))
 ```
 
-### `get_previous_epoch`
-
 ```python
-def get_previous_epoch(state: BeaconState) -> Epoch:
-    """`
-    Return the previous epoch of the given ``state``.
-    Return the current epoch if it's genesis epoch.
+def int_to_bytes(n: uint64, length: uint64) -> bytes:
     """
-    current_epoch = get_current_epoch(state)
-    return GENESIS_EPOCH if current_epoch == GENESIS_EPOCH else current_epoch - 1
+    Return the ``length``-byte serialization of ``n``.
+    """
+    return n.to_bytes(length, ENDIANNESS)
 ```
 
-### `get_current_epoch`
+#### `bytes_to_int`
 
 ```python
-def get_current_epoch(state: BeaconState) -> Epoch:
+def bytes_to_int(data: bytes) -> uint64:
     """
-    Return the current epoch of the given ``state``.
+    Return the integer deserialization of ``data``.
     """
-    return slot_to_epoch(state.slot)
+    return int.from_bytes(data, ENDIANNESS)
 ```
 
-### `get_epoch_start_slot`
+### Crypto
 
-```python
-def get_epoch_start_slot(epoch: Epoch) -> Slot:
-    """
-    Return the starting slot of the given ``epoch``.
-    """
-    return epoch * SLOTS_PER_EPOCH
-```
+#### `hash`
+
+`def hash(data: bytes) -> Hash` is SHA256.
+
+#### `hash_tree_root`
+
+`def hash_tree_root(object: SSZSerializable) -> Hash` is a function for hashing objects into a single root by utilizing a hash tree structure, as defined in the [SSZ spec](../simple-serialize.md#merkleization).
+
+#### `signing_root`
+
+`def signing_root(object: Container) -> Hash` is a function for computing signing messages, as defined in the [SSZ spec](../simple-serialize.md#self-signed-containers).
+
+#### `bls_verify`
+
+`bls_verify` is a function for verifying a BLS signature, as defined in the [BLS Signature spec](../bls_signature.md#bls_verify).
+
+#### `bls_verify_multiple`
+
+`bls_verify_multiple` is a function for verifying a BLS signature constructed from multiple messages, as defined in the [BLS Signature spec](../bls_signature.md#bls_verify_multiple).
+
+#### `bls_aggregate_pubkeys`
+
+`bls_aggregate_pubkeys` is a function for aggregating multiple BLS public keys into a single aggregate key, as defined in the [BLS Signature spec](../bls_signature.md#bls_aggregate_pubkeys).
 
-### `is_active_validator`
+### Predicates
+
+#### `is_active_validator`
 
 ```python
 def is_active_validator(validator: Validator, epoch: Epoch) -> bool:
@@ -664,468 +623,474 @@ def is_active_validator(validator: Validator, epoch: Epoch) -> bool:
     return validator.activation_epoch <= epoch < validator.exit_epoch
 ```
 
-### `is_slashable_validator`
+#### `is_slashable_validator`
 
 ```python
 def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool:
     """
     Check if ``validator`` is slashable.
     """
-    return validator.slashed is False and (validator.activation_epoch <= epoch < validator.withdrawable_epoch)
+    return (not validator.slashed) and (validator.activation_epoch <= epoch < validator.withdrawable_epoch)
 ```
 
-### `get_active_validator_indices`
+#### `is_slashable_attestation_data`
 
 ```python
-def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> List[ValidatorIndex]:
+def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationData) -> bool:
     """
-    Get active validator indices at ``epoch``.
+    Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG rules.
     """
-    return [i for i, v in enumerate(state.validator_registry) if is_active_validator(v, epoch)]
+    return (
+        # Double vote
+        (data_1 != data_2 and data_1.target.epoch == data_2.target.epoch) or
+        # Surround vote
+        (data_1.source.epoch < data_2.source.epoch and data_2.target.epoch < data_1.target.epoch)
+    )
 ```
 
-### `increase_balance`
+#### `is_valid_indexed_attestation`
 
 ```python
-def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None:
+def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
     """
-    Increase validator balance by ``delta``.
+    Verify validity of ``indexed_attestation``.
     """
-    state.balances[index] += delta
+    bit_0_indices = indexed_attestation.custody_bit_0_indices
+    bit_1_indices = indexed_attestation.custody_bit_1_indices
+
+    # Verify no index has custody bit equal to 1 [to be removed in phase 1]
+    if not len(bit_1_indices) == 0:
+        return False
+    # Verify max number of indices
+    if not len(bit_0_indices) + len(bit_1_indices) <= MAX_VALIDATORS_PER_COMMITTEE:
+        return False
+    # Verify index sets are disjoint
+    if not len(set(bit_0_indices).intersection(bit_1_indices)) == 0:
+        return False
+    # Verify indices are sorted
+    if not (bit_0_indices == sorted(bit_0_indices) and bit_1_indices == sorted(bit_1_indices)):
+        return False
+    # Verify aggregate signature
+    if not bls_verify_multiple(
+        pubkeys=[
+            bls_aggregate_pubkeys([state.validators[i].pubkey for i in bit_0_indices]),
+            bls_aggregate_pubkeys([state.validators[i].pubkey for i in bit_1_indices]),
+        ],
+        message_hashes=[
+            hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b0)),
+            hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b1)),
+        ],
+        signature=indexed_attestation.signature,
+        domain=get_domain(state, DOMAIN_ATTESTATION, indexed_attestation.data.target.epoch),
+    ):
+        return False
+    return True
 ```
 
-### `decrease_balance`
+#### `is_valid_merkle_branch`
 
 ```python
-def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None:
+def is_valid_merkle_branch(leaf: Hash, branch: Sequence[Hash], depth: uint64, index: uint64, root: Hash) -> bool:
     """
-    Decrease validator balance by ``delta`` with underflow protection.
+    Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and ``branch``.
     """
-    state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta
+    value = leaf
+    for i in range(depth):
+        if index // (2**i) % 2:
+            value = hash(branch[i] + value)
+        else:
+            value = hash(value + branch[i])
+    return value == root
 ```
 
-### `get_epoch_committee_count`
+### Misc
+
+#### `compute_shuffled_index`
 
 ```python
-def get_epoch_committee_count(state: BeaconState, epoch: Epoch) -> int:
+def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Hash) -> ValidatorIndex:
     """
-    Return the number of committees at ``epoch``.
+    Return the shuffled validator index corresponding to ``seed`` (and ``index_count``).
     """
-    active_validator_indices = get_active_validator_indices(state, epoch)
-    return max(
-        1,
-        min(
-            SHARD_COUNT // SLOTS_PER_EPOCH,
-            len(active_validator_indices) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE,
-        )
-    ) * SLOTS_PER_EPOCH
+    assert index < index_count
+
+    # Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf)
+    # See the 'generalized domain' algorithm on page 3
+    for current_round in range(SHUFFLE_ROUND_COUNT):
+        pivot = bytes_to_int(hash(seed + int_to_bytes(current_round, length=1))[0:8]) % index_count
+        flip = ValidatorIndex((pivot + index_count - index) % index_count)
+        position = max(index, flip)
+        source = hash(seed + int_to_bytes(current_round, length=1) + int_to_bytes(position // 256, length=4))
+        byte = source[(position % 256) // 8]
+        bit = (byte >> (position % 8)) % 2
+        index = flip if bit else index
+
+    return ValidatorIndex(index)
 ```
 
-### `get_shard_delta`
+#### `compute_committee`
 
 ```python
-def get_shard_delta(state: BeaconState, epoch: Epoch) -> int:
+def compute_committee(indices: Sequence[ValidatorIndex],
+                      seed: Hash,
+                      index: uint64,
+                      count: uint64) -> Sequence[ValidatorIndex]:
     """
-    Return the number of shards to increment ``state.latest_start_shard`` during ``epoch``.
+    Return the committee corresponding to ``indices``, ``seed``, ``index``, and committee ``count``.
     """
-    return min(get_epoch_committee_count(state, epoch), SHARD_COUNT - SHARD_COUNT // SLOTS_PER_EPOCH)
+    start = (len(indices) * index) // count
+    end = (len(indices) * (index + 1)) // count
+    return [indices[compute_shuffled_index(ValidatorIndex(i), len(indices), seed)] for i in range(start, end)]
 ```
 
-### `get_epoch_start_shard`
+#### `compute_epoch_of_slot`
 
 ```python
-def get_epoch_start_shard(state: BeaconState, epoch: Epoch) -> Shard:
-    assert epoch <= get_current_epoch(state) + 1
-    check_epoch = get_current_epoch(state) + 1
-    shard = (state.latest_start_shard + get_shard_delta(state, get_current_epoch(state))) % SHARD_COUNT
-    while check_epoch > epoch:
-        check_epoch -= 1
-        shard = (shard + SHARD_COUNT - get_shard_delta(state, check_epoch)) % SHARD_COUNT
-    return shard
+def compute_epoch_of_slot(slot: Slot) -> Epoch:
+    """
+    Return the epoch number of ``slot``.
+    """
+    return Epoch(slot // SLOTS_PER_EPOCH)
 ```
 
-### `get_attestation_data_slot`
+#### `compute_start_slot_of_epoch`
 
 ```python
-def get_attestation_data_slot(state: BeaconState, data: AttestationData) -> Slot:
-    committee_count = get_epoch_committee_count(state, data.target_epoch)
-    offset = (data.crosslink.shard + SHARD_COUNT - get_epoch_start_shard(state, data.target_epoch)) % SHARD_COUNT
-    return get_epoch_start_slot(data.target_epoch) + offset // (committee_count // SLOTS_PER_EPOCH)
+def compute_start_slot_of_epoch(epoch: Epoch) -> Slot:
+    """
+    Return the start slot of ``epoch``.
+    """
+    return Slot(epoch * SLOTS_PER_EPOCH)
 ```
 
-### `get_block_root_at_slot`
+#### `compute_activation_exit_epoch`
 
 ```python
-def get_block_root_at_slot(state: BeaconState,
-                           slot: Slot) -> Bytes32:
+def compute_activation_exit_epoch(epoch: Epoch) -> Epoch:
     """
-    Return the block root at a recent ``slot``.
+    Return the epoch during which validator activations and exits initiated in ``epoch`` take effect.
     """
-    assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT
-    return state.latest_block_roots[slot % SLOTS_PER_HISTORICAL_ROOT]
+    return Epoch(epoch + 1 + ACTIVATION_EXIT_DELAY)
 ```
 
-### `get_block_root`
+#### `compute_domain`
 
 ```python
-def get_block_root(state: BeaconState,
-                   epoch: Epoch) -> Bytes32:
+def compute_domain(domain_type: DomainType, fork_version: Version=Version()) -> Domain:
     """
-    Return the block root at a recent ``epoch``.
+    Return the domain for the ``domain_type`` and ``fork_version``.
     """
-    return get_block_root_at_slot(state, get_epoch_start_slot(epoch))
+    return Domain(domain_type + fork_version)
 ```
 
-### `get_randao_mix`
+### Beacon state accessors
+
+#### `get_current_epoch`
 
 ```python
-def get_randao_mix(state: BeaconState,
-                   epoch: Epoch) -> Bytes32:
+def get_current_epoch(state: BeaconState) -> Epoch:
     """
-    Return the randao mix at a recent ``epoch``.
-    ``epoch`` expected to be between (current_epoch - LATEST_RANDAO_MIXES_LENGTH, current_epoch].
+    Return the current epoch.
     """
-    return state.latest_randao_mixes[epoch % LATEST_RANDAO_MIXES_LENGTH]
+    return compute_epoch_of_slot(state.slot)
 ```
 
-### `get_active_index_root`
+#### `get_previous_epoch`
 
 ```python
-def get_active_index_root(state: BeaconState,
-                          epoch: Epoch) -> Bytes32:
-    """
-    Return the index root at a recent ``epoch``.
-    ``epoch`` expected to be between
-    (current_epoch - LATEST_ACTIVE_INDEX_ROOTS_LENGTH + ACTIVATION_EXIT_DELAY, current_epoch + ACTIVATION_EXIT_DELAY].
+def get_previous_epoch(state: BeaconState) -> Epoch:
+    """`
+    Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``).
     """
-    return state.latest_active_index_roots[epoch % LATEST_ACTIVE_INDEX_ROOTS_LENGTH]
+    current_epoch = get_current_epoch(state)
+    return GENESIS_EPOCH if current_epoch == GENESIS_EPOCH else Epoch(current_epoch - 1)
 ```
 
-### `generate_seed`
+#### `get_block_root`
 
 ```python
-def generate_seed(state: BeaconState,
-                  epoch: Epoch) -> Bytes32:
+def get_block_root(state: BeaconState, epoch: Epoch) -> Hash:
     """
-    Generate a seed for the given ``epoch``.
+    Return the block root at the start of a recent ``epoch``.
     """
-    return hash(
-        get_randao_mix(state, epoch + LATEST_RANDAO_MIXES_LENGTH - MIN_SEED_LOOKAHEAD) +
-        get_active_index_root(state, epoch) +
-        int_to_bytes(epoch, length=32)
-    )
+    return get_block_root_at_slot(state, compute_start_slot_of_epoch(epoch))
 ```
 
-### `get_beacon_proposer_index`
+#### `get_block_root_at_slot`
 
 ```python
-def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
+def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Hash:
     """
-    Return the current beacon proposer index.
+    Return the block root at a recent ``slot``.
     """
-    epoch = get_current_epoch(state)
-    committees_per_slot = get_epoch_committee_count(state, epoch) // SLOTS_PER_EPOCH
-    offset = committees_per_slot * (state.slot % SLOTS_PER_EPOCH)
-    shard = (get_epoch_start_shard(state, epoch) + offset) % SHARD_COUNT
-    first_committee = get_crosslink_committee(state, epoch, shard)
-    MAX_RANDOM_BYTE = 2**8 - 1
-    seed = generate_seed(state, epoch)
-    i = 0
-    while True:
-        candidate_index = first_committee[(epoch + i) % len(first_committee)]
-        random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32]
-        effective_balance = state.validator_registry[candidate_index].effective_balance
-        if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
-            return candidate_index
-        i += 1
+    assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT
+    return state.block_roots[slot % SLOTS_PER_HISTORICAL_ROOT]
 ```
 
-### `verify_merkle_branch`
+#### `get_randao_mix`
 
 ```python
-def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: int, root: Bytes32) -> bool:
+def get_randao_mix(state: BeaconState, epoch: Epoch) -> Hash:
     """
-    Verify that the given ``leaf`` is on the merkle branch ``proof``
-    starting with the given ``root``.
+    Return the randao mix at a recent ``epoch``.
     """
-    value = leaf
-    for i in range(depth):
-        if index // (2**i) % 2:
-            value = hash(proof[i] + value)
-        else:
-            value = hash(value + proof[i])
-    return value == root
+    return state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR]
 ```
 
-### `get_shuffled_index`
+#### `get_active_validator_indices`
 
 ```python
-def get_shuffled_index(index: ValidatorIndex, index_count: int, seed: Bytes32) -> ValidatorIndex:
+def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
     """
-    Return the shuffled validator index corresponding to ``seed`` (and ``index_count``).
+    Return the sequence of active validator indices at ``epoch``.
     """
-    assert index < index_count
-    assert index_count <= 2**40
-
-    # Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf)
-    # See the 'generalized domain' algorithm on page 3
-    for round in range(SHUFFLE_ROUND_COUNT):
-        pivot = bytes_to_int(hash(seed + int_to_bytes(round, length=1))[0:8]) % index_count
-        flip = (pivot + index_count - index) % index_count
-        position = max(index, flip)
-        source = hash(seed + int_to_bytes(round, length=1) + int_to_bytes(position // 256, length=4))
-        byte = source[(position % 256) // 8]
-        bit = (byte >> (position % 8)) % 2
-        index = flip if bit else index
-
-    return index
+    return [ValidatorIndex(i) for i, v in enumerate(state.validators) if is_active_validator(v, epoch)]
 ```
 
-### `compute_committee`
+#### `get_validator_churn_limit`
 
 ```python
-def compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:
-    start = (len(indices) * index) // count
-    end = (len(indices) * (index + 1)) // count
-    return [indices[get_shuffled_index(i, len(indices), seed)] for i in range(start, end)]
+def get_validator_churn_limit(state: BeaconState) -> uint64:
+    """
+    Return the validator churn limit for the current epoch.
+    """
+    active_validator_indices = get_active_validator_indices(state, get_current_epoch(state))
+    return max(MIN_PER_EPOCH_CHURN_LIMIT, len(active_validator_indices) // CHURN_LIMIT_QUOTIENT)
 ```
 
-### `get_crosslink_committee`
+#### `get_seed`
 
 ```python
-def get_crosslink_committee(state: BeaconState, epoch: Epoch, shard: Shard) -> List[ValidatorIndex]:
-    return compute_committee(
-        indices=get_active_validator_indices(state, epoch),
-        seed=generate_seed(state, epoch),
-        index=(shard + SHARD_COUNT - get_epoch_start_shard(state, epoch)) % SHARD_COUNT,
-        count=get_epoch_committee_count(state, epoch),
-    )
+def get_seed(state: BeaconState, epoch: Epoch) -> Hash:
+    """
+    Return the seed at ``epoch``.
+    """
+    mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD))  # Avoid underflow
+    active_index_root = state.active_index_roots[epoch % EPOCHS_PER_HISTORICAL_VECTOR]
+    return hash(mix + active_index_root + int_to_bytes(epoch, length=32))
 ```
 
-### `get_attesting_indices`
+#### `get_committee_count`
 
 ```python
-def get_attesting_indices(state: BeaconState,
-                          attestation_data: AttestationData,
-                          bitfield: bytes) -> List[ValidatorIndex]:
+def get_committee_count(state: BeaconState, epoch: Epoch) -> uint64:
     """
-    Return the sorted attesting indices corresponding to ``attestation_data`` and ``bitfield``.
+    Return the number of committees at ``epoch``.
     """
-    committee = get_crosslink_committee(state, attestation_data.target_epoch, attestation_data.crosslink.shard)
-    assert verify_bitfield(bitfield, len(committee))
-    return sorted([index for i, index in enumerate(committee) if get_bitfield_bit(bitfield, i) == 0b1])
+    committees_per_slot = max(1, min(
+        SHARD_COUNT // SLOTS_PER_EPOCH,
+        len(get_active_validator_indices(state, epoch)) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE,
+    ))
+    return committees_per_slot * SLOTS_PER_EPOCH
 ```
 
-### `int_to_bytes`
+#### `get_crosslink_committee`
 
 ```python
-def int_to_bytes(integer: int, length: int) -> bytes:
-    return integer.to_bytes(length, 'little')
+def get_crosslink_committee(state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]:
+    """
+    Return the crosslink committee at ``epoch`` for ``shard``.
+    """
+    return compute_committee(
+        indices=get_active_validator_indices(state, epoch),
+        seed=get_seed(state, epoch),
+        index=(shard + SHARD_COUNT - get_start_shard(state, epoch)) % SHARD_COUNT,
+        count=get_committee_count(state, epoch),
+    )
 ```
 
-### `bytes_to_int`
+#### `get_start_shard`
 
 ```python
-def bytes_to_int(data: bytes) -> int:
-    return int.from_bytes(data, 'little')
+def get_start_shard(state: BeaconState, epoch: Epoch) -> Shard:
+    """
+    Return the start shard of the 0th committee at ``epoch``.
+    """
+    assert epoch <= get_current_epoch(state) + 1
+    check_epoch = Epoch(get_current_epoch(state) + 1)
+    shard = Shard((state.start_shard + get_shard_delta(state, get_current_epoch(state))) % SHARD_COUNT)
+    while check_epoch > epoch:
+        check_epoch -= Epoch(1)
+        shard = Shard((shard + SHARD_COUNT - get_shard_delta(state, check_epoch)) % SHARD_COUNT)
+    return shard
 ```
 
-### `get_total_balance`
+#### `get_shard_delta`
 
 ```python
-def get_total_balance(state: BeaconState, indices: List[ValidatorIndex]) -> Gwei:
+def get_shard_delta(state: BeaconState, epoch: Epoch) -> uint64:
     """
-    Return the combined effective balance of the ``indices``. (1 Gwei minimum to avoid divisions by zero.)
+    Return the number of shards to increment ``state.start_shard`` at ``epoch``.
     """
-    return max(sum([state.validator_registry[index].effective_balance for index in indices]), 1)
+    return min(get_committee_count(state, epoch), SHARD_COUNT - SHARD_COUNT // SLOTS_PER_EPOCH)
 ```
 
-### `get_domain`
+#### `get_beacon_proposer_index`
 
 ```python
-def get_domain(state: BeaconState,
-               domain_type: int,
-               message_epoch: int=None) -> int:
+def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
     """
-    Return the signature domain (fork version concatenated with domain type) of a message.
+    Return the beacon proposer index at the current slot.
     """
-    epoch = get_current_epoch(state) if message_epoch is None else message_epoch
-    fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version
-    return bls_domain(domain_type, fork_version)
+    epoch = get_current_epoch(state)
+    committees_per_slot = get_committee_count(state, epoch) // SLOTS_PER_EPOCH
+    offset = committees_per_slot * (state.slot % SLOTS_PER_EPOCH)
+    shard = Shard((get_start_shard(state, epoch) + offset) % SHARD_COUNT)
+    first_committee = get_crosslink_committee(state, epoch, shard)
+    MAX_RANDOM_BYTE = 2**8 - 1
+    seed = get_seed(state, epoch)
+    i = 0
+    while True:
+        candidate_index = first_committee[(epoch + i) % len(first_committee)]
+        random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32]
+        effective_balance = state.validators[candidate_index].effective_balance
+        if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
+            return ValidatorIndex(candidate_index)
+        i += 1
 ```
 
-### `get_bitfield_bit`
+#### `get_attestation_data_slot`
 
 ```python
-def get_bitfield_bit(bitfield: bytes, i: int) -> int:
+def get_attestation_data_slot(state: BeaconState, data: AttestationData) -> Slot:
     """
-    Extract the bit in ``bitfield`` at position ``i``.
+    Return the slot corresponding to the attestation ``data``.
     """
-    return (bitfield[i // 8] >> (i % 8)) % 2
+    committee_count = get_committee_count(state, data.target.epoch)
+    offset = (data.crosslink.shard + SHARD_COUNT - get_start_shard(state, data.target.epoch)) % SHARD_COUNT
+    return Slot(compute_start_slot_of_epoch(data.target.epoch) + offset // (committee_count // SLOTS_PER_EPOCH))
 ```
 
-### `verify_bitfield`
+#### `get_compact_committees_root`
 
 ```python
-def verify_bitfield(bitfield: bytes, committee_size: int) -> bool:
+def get_compact_committees_root(state: BeaconState, epoch: Epoch) -> Hash:
     """
-    Verify ``bitfield`` against the ``committee_size``.
+    Return the compact committee root at ``epoch``.
     """
-    if len(bitfield) != (committee_size + 7) // 8:
-        return False
+    committees = [CompactCommittee() for _ in range(SHARD_COUNT)]
+    start_shard = get_start_shard(state, epoch)
+    for committee_number in range(get_committee_count(state, epoch)):
+        shard = Shard((start_shard + committee_number) % SHARD_COUNT)
+        for index in get_crosslink_committee(state, epoch, shard):
+            validator = state.validators[index]
+            committees[shard].pubkeys.append(validator.pubkey)
+            compact_balance = validator.effective_balance // EFFECTIVE_BALANCE_INCREMENT
+            # `index` (top 6 bytes) + `slashed` (16th bit) + `compact_balance` (bottom 15 bits)
+            compact_validator = uint64((index << 16) + (validator.slashed << 15) + compact_balance)
+            committees[shard].compact_validators.append(compact_validator)
+    return hash_tree_root(Vector[CompactCommittee, SHARD_COUNT](committees))
+```
 
-    # Check `bitfield` is padded with zero bits only
-    for i in range(committee_size, len(bitfield) * 8):
-        if get_bitfield_bit(bitfield, i) == 0b1:
-            return False
+#### `get_total_balance`
 
-    return True
+```python
+def get_total_balance(state: BeaconState, indices: Set[ValidatorIndex]) -> Gwei:
+    """
+    Return the combined effective balance of the ``indices``. (1 Gwei minimum to avoid divisions by zero.)
+    """
+    return Gwei(max(sum([state.validators[index].effective_balance for index in indices]), 1))
 ```
 
-### `convert_to_indexed`
+#### `get_total_active_balance`
 
 ```python
-def convert_to_indexed(state: BeaconState, attestation: Attestation) -> IndexedAttestation:
+def get_total_active_balance(state: BeaconState) -> Gwei:
     """
-    Convert ``attestation`` to (almost) indexed-verifiable form.
+    Return the combined effective balance of the active validators.
     """
-    attesting_indices = get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield)
-    custody_bit_1_indices = get_attesting_indices(state, attestation.data, attestation.custody_bitfield)
-    custody_bit_0_indices = [index for index in attesting_indices if index not in custody_bit_1_indices]
-
-    return IndexedAttestation(
-        custody_bit_0_indices=custody_bit_0_indices,
-        custody_bit_1_indices=custody_bit_1_indices,
-        data=attestation.data,
-        signature=attestation.signature,
-    )
+    return get_total_balance(state, set(get_active_validator_indices(state, get_current_epoch(state))))
 ```
 
-### `validate_indexed_attestation`
+#### `get_domain`
 
 ```python
-def validate_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> None:
+def get_domain(state: BeaconState, domain_type: DomainType, message_epoch: Epoch=None) -> Domain:
     """
-    Verify validity of ``indexed_attestation``.
+    Return the signature domain (fork version concatenated with domain type) of a message.
     """
-    bit_0_indices = indexed_attestation.custody_bit_0_indices
-    bit_1_indices = indexed_attestation.custody_bit_1_indices
-
-    # Verify no index has custody bit equal to 1 [to be removed in phase 1]
-    assert len(bit_1_indices) == 0
-    # Verify max number of indices
-    assert len(bit_0_indices) + len(bit_1_indices) <= MAX_INDICES_PER_ATTESTATION
-    # Verify index sets are disjoint
-    assert len(set(bit_0_indices).intersection(bit_1_indices)) == 0
-    # Verify indices are sorted
-    assert bit_0_indices == sorted(bit_0_indices) and bit_1_indices == sorted(bit_1_indices)
-    # Verify aggregate signature
-    assert bls_verify_multiple(
-        pubkeys=[
-            bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in bit_0_indices]),
-            bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in bit_1_indices]),
-        ],
-        message_hashes=[
-            hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b0)),
-            hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b1)),
-        ],
-        signature=indexed_attestation.signature,
-        domain=get_domain(state, DOMAIN_ATTESTATION, indexed_attestation.data.target_epoch),
-    )
+    epoch = get_current_epoch(state) if message_epoch is None else message_epoch
+    fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version
+    return compute_domain(domain_type, fork_version)
 ```
 
-### `is_slashable_attestation_data`
+#### `get_indexed_attestation`
 
 ```python
-def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationData) -> bool:
+def get_indexed_attestation(state: BeaconState, attestation: Attestation) -> IndexedAttestation:
     """
-    Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG rules.
+    Return the indexed attestation corresponding to ``attestation``.
     """
-    return (
-        # Double vote
-        (data_1 != data_2 and data_1.target_epoch == data_2.target_epoch) or
-        # Surround vote
-        (data_1.source_epoch < data_2.source_epoch and data_2.target_epoch < data_1.target_epoch)
+    attesting_indices = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
+    custody_bit_1_indices = get_attesting_indices(state, attestation.data, attestation.custody_bits)
+    assert custody_bit_1_indices.issubset(attesting_indices)
+    custody_bit_0_indices = attesting_indices.difference(custody_bit_1_indices)
+
+    return IndexedAttestation(
+        custody_bit_0_indices=sorted(custody_bit_0_indices),
+        custody_bit_1_indices=sorted(custody_bit_1_indices),
+        data=attestation.data,
+        signature=attestation.signature,
     )
 ```
 
-### `integer_squareroot`
+#### `get_attesting_indices`
 
 ```python
-def integer_squareroot(n: int) -> int:
+def get_attesting_indices(state: BeaconState,
+                          data: AttestationData,
+                          bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]) -> Set[ValidatorIndex]:
     """
-    The largest integer ``x`` such that ``x**2`` is less than or equal to ``n``.
+    Return the set of attesting indices corresponding to ``data`` and ``bits``.
     """
-    assert n >= 0
-    x = n
-    y = (x + 1) // 2
-    while y < x:
-        x = y
-        y = (x + n // x) // 2
-    return x
+    committee = get_crosslink_committee(state, data.target.epoch, data.crosslink.shard)
+    return set(index for i, index in enumerate(committee) if bits[i])
 ```
 
-### `get_delayed_activation_exit_epoch`
+### Beacon state mutators
+
+#### `increase_balance`
 
 ```python
-def get_delayed_activation_exit_epoch(epoch: Epoch) -> Epoch:
+def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None:
     """
-    Return the epoch at which an activation or exit triggered in ``epoch`` takes effect.
+    Increase the validator balance at index ``index`` by ``delta``.
     """
-    return epoch + 1 + ACTIVATION_EXIT_DELAY
+    state.balances[index] += delta
 ```
 
-### `get_churn_limit`
+#### `decrease_balance`
 
 ```python
-def get_churn_limit(state: BeaconState) -> int:
+def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None:
     """
-    Return the churn limit based on the active validator count.
+    Decrease the validator balance at index ``index`` by ``delta``, with underflow protection.
     """
-    return max(
-        MIN_PER_EPOCH_CHURN_LIMIT,
-        len(get_active_validator_indices(state, get_current_epoch(state))) // CHURN_LIMIT_QUOTIENT
-    )
+    state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta
 ```
 
-### `bls_verify`
-
-`bls_verify` is a function for verifying a BLS signature, defined in the [BLS Signature spec](../bls_signature.md#bls_verify).
-
-### `bls_verify_multiple`
-
-`bls_verify_multiple` is a function for verifying a BLS signature constructed from multiple messages, defined in the [BLS Signature spec](../bls_signature.md#bls_verify_multiple).
-
-### `bls_aggregate_pubkeys`
-
-`bls_aggregate_pubkeys` is a function for aggregating multiple BLS public keys into a single aggregate key, defined in the [BLS Signature spec](../bls_signature.md#bls_aggregate_pubkeys).
-
-### Routines for updating validator status
-
-*Note*: All functions in this section mutate `state`.
-
 #### `initiate_validator_exit`
 
 ```python
 def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None:
     """
-    Initiate the exit of the validator of the given ``index``.
+    Initiate the exit of the validator with index ``index``.
     """
     # Return if validator already initiated exit
-    validator = state.validator_registry[index]
+    validator = state.validators[index]
     if validator.exit_epoch != FAR_FUTURE_EPOCH:
         return
 
     # Compute exit queue epoch
-    exit_epochs = [v.exit_epoch for v in state.validator_registry if v.exit_epoch != FAR_FUTURE_EPOCH]
-    exit_queue_epoch = max(exit_epochs + [get_delayed_activation_exit_epoch(get_current_epoch(state))])
-    exit_queue_churn = len([v for v in state.validator_registry if v.exit_epoch == exit_queue_epoch])
-    if exit_queue_churn >= get_churn_limit(state):
-        exit_queue_epoch += 1
+    exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
+    exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])
+    exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch])
+    if exit_queue_churn >= get_validator_churn_limit(state):
+        exit_queue_epoch += Epoch(1)
 
     # Set validator exit epoch and withdrawable epoch
     validator.exit_epoch = exit_queue_epoch
-    validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
+    validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
 ```
 
 #### `slash_validator`
@@ -1137,73 +1102,89 @@ def slash_validator(state: BeaconState,
     """
     Slash the validator with index ``slashed_index``.
     """
-    current_epoch = get_current_epoch(state)
+    epoch = get_current_epoch(state)
     initiate_validator_exit(state, slashed_index)
-    state.validator_registry[slashed_index].slashed = True
-    state.validator_registry[slashed_index].withdrawable_epoch = current_epoch + LATEST_SLASHED_EXIT_LENGTH
-    slashed_balance = state.validator_registry[slashed_index].effective_balance
-    state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] += slashed_balance
+    validator = state.validators[slashed_index]
+    validator.slashed = True
+    validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR))
+    state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance
+    decrease_balance(state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT)
 
+    # Apply proposer and whistleblower rewards
     proposer_index = get_beacon_proposer_index(state)
     if whistleblower_index is None:
         whistleblower_index = proposer_index
-    whistleblowing_reward = slashed_balance // WHISTLEBLOWING_REWARD_QUOTIENT
-    proposer_reward = whistleblowing_reward // PROPOSER_REWARD_QUOTIENT
+    whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT)
+    proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT)
     increase_balance(state, proposer_index, proposer_reward)
-    increase_balance(state, whistleblower_index, whistleblowing_reward - proposer_reward)
-    decrease_balance(state, slashed_index, whistleblowing_reward)
+    increase_balance(state, whistleblower_index, whistleblower_reward - proposer_reward)
 ```
 
 ## Genesis
 
-### `Eth2Genesis`
-
-When enough deposits of size `MAX_EFFECTIVE_BALANCE` have been made to the deposit contract an `Eth2Genesis` log is emitted triggering the genesis of the beacon chain. Let:
-
-* `eth2genesis` be the object corresponding to `Eth2Genesis`
-* `genesis_eth1_data` be object of type `Eth1Data` where
-    * `genesis_eth1_data.deposit_root = eth2genesis.deposit_root`
-    * `genesis_eth1_data.deposit_count = eth2genesis.deposit_count`
-    * `genesis_eth1_data.block_hash` is the hash of the Ethereum 1.0 block that emitted the `Eth2Genesis` log
-* `genesis_deposits` be the object of type `List[Deposit]` with deposits ordered chronologically up to and including the deposit that triggered the `Eth2Genesis` log
-
-### Genesis state
+Before the Ethereum 2.0 genesis has been triggered, and for every Ethereum 1.0 block, let `candidate_state = initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)` where:
 
-Let `genesis_state = get_genesis_beacon_state(genesis_deposits, eth2genesis.genesis_time, genesis_eth1_data)`.
+- `eth1_block_hash` is the hash of the Ethereum 1.0 block
+- `eth1_timestamp` is the Unix timestamp corresponding to `eth1_block_hash`
+- `deposits` is the sequence of all deposits, ordered chronologically, up to the block with hash `eth1_block_hash`
 
 ```python
-def get_genesis_beacon_state(deposits: List[Deposit], genesis_time: int, genesis_eth1_data: Eth1Data) -> BeaconState:
+def initialize_beacon_state_from_eth1(eth1_block_hash: Hash,
+                                      eth1_timestamp: uint64,
+                                      deposits: Sequence[Deposit]) -> BeaconState:
     state = BeaconState(
-        genesis_time=genesis_time,
-        latest_eth1_data=genesis_eth1_data,
+        genesis_time=eth1_timestamp - eth1_timestamp % SECONDS_PER_DAY + 2 * SECONDS_PER_DAY,
+        eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=len(deposits)),
         latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
     )
 
-    # Process genesis deposits
-    for deposit in deposits:
+    # Process deposits
+    leaves = list(map(lambda deposit: deposit.data, deposits))
+    for index, deposit in enumerate(deposits):
+        deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
+        state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
         process_deposit(state, deposit)
 
-    # Process genesis activations
-    for validator in state.validator_registry:
-        if validator.effective_balance >= MAX_EFFECTIVE_BALANCE:
+    # Process activations
+    for index, validator in enumerate(state.validators):
+        balance = state.balances[index]
+        validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
+        if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
             validator.activation_eligibility_epoch = GENESIS_EPOCH
             validator.activation_epoch = GENESIS_EPOCH
 
-    # Populate latest_active_index_roots
-    genesis_active_index_root = hash_tree_root(get_active_validator_indices(state, GENESIS_EPOCH))
-    for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH):
-        state.latest_active_index_roots[index] = genesis_active_index_root
-
+    # Populate active_index_roots and compact_committees_roots
+    indices_list = List[ValidatorIndex, VALIDATOR_REGISTRY_LIMIT](get_active_validator_indices(state, GENESIS_EPOCH))
+    active_index_root = hash_tree_root(indices_list)
+    committee_root = get_compact_committees_root(state, GENESIS_EPOCH)
+    for index in range(EPOCHS_PER_HISTORICAL_VECTOR):
+        state.active_index_roots[index] = active_index_root
+        state.compact_committees_roots[index] = committee_root
     return state
 ```
 
+### Genesis state
+
+Let `genesis_state = candidate_state` whenever `is_valid_genesis_state(candidate_state) is True` for the first time.
+
+```python
+def is_valid_genesis_state(state: BeaconState) -> bool:
+    if state.genesis_time < MIN_GENESIS_TIME:
+        return False
+    if len(get_active_validator_indices(state, GENESIS_EPOCH)) < MIN_GENESIS_ACTIVE_VALIDATOR_COUNT:
+        return False
+    return True
+```
+
+*Note*: The `is_valid_genesis_state` function (including `MIN_GENESIS_TIME` and `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT`) is a placeholder for testing. It has yet to be finalized by the community, and can be updated as necessary.
+
 ### Genesis block
 
 Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`.
 
 ## Beacon chain state transition function
 
-The post-state corresponding to a pre-state `state` and a block `block` is defined as `state_transition(state, block)`. State transitions that trigger an unhandled excpetion (e.g. a failed `assert` or an out-of-range list access) are considered invalid.
+The post-state corresponding to a pre-state `state` and a block `block` is defined as `state_transition(state, block)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid.
 
 ```python
 def state_transition(state: BeaconState, block: BeaconBlock, validate_state_root: bool=False) -> BeaconState:
@@ -1223,30 +1204,28 @@ def process_slots(state: BeaconState, slot: Slot) -> None:
     assert state.slot <= slot
     while state.slot < slot:
         process_slot(state)
-        # Process epoch on the first slot of the next epoch
+        # Process epoch on the start slot of the next epoch
         if (state.slot + 1) % SLOTS_PER_EPOCH == 0:
             process_epoch(state)
-        state.slot += 1
+        state.slot += Slot(1)
 ```
 
 ```python
 def process_slot(state: BeaconState) -> None:
     # Cache state root
     previous_state_root = hash_tree_root(state)
-    state.latest_state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_state_root
-
+    state.state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_state_root
     # Cache latest block header state root
-    if state.latest_block_header.state_root == ZERO_HASH:
+    if state.latest_block_header.state_root == Hash():
         state.latest_block_header.state_root = previous_state_root
-
     # Cache block root
     previous_block_root = signing_root(state.latest_block_header)
-    state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_block_root
+    state.block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_block_root
 ```
 
 ### Epoch processing
 
-Note: the `# @LabelHere` lines below are placeholders to show that code will be inserted here in a future phase.
+*Note*: The `# @LabelHere` lines below are placeholders to show that code will be inserted here in a future phase.
 
 ```python
 def process_epoch(state: BeaconState) -> None:
@@ -1264,26 +1243,21 @@ def process_epoch(state: BeaconState) -> None:
 #### Helper functions
 
 ```python
-def get_total_active_balance(state: BeaconState) -> Gwei:
-    return get_total_balance(state, get_active_validator_indices(state, get_current_epoch(state)))
-```
-
-```python
-def get_matching_source_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]:
-    assert epoch in (get_current_epoch(state), get_previous_epoch(state))
+def get_matching_source_attestations(state: BeaconState, epoch: Epoch) -> Sequence[PendingAttestation]:
+    assert epoch in (get_previous_epoch(state), get_current_epoch(state))
     return state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations
 ```
 
 ```python
-def get_matching_target_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]:
+def get_matching_target_attestations(state: BeaconState, epoch: Epoch) -> Sequence[PendingAttestation]:
     return [
         a for a in get_matching_source_attestations(state, epoch)
-        if a.data.target_root == get_block_root(state, epoch)
+        if a.data.target.root == get_block_root(state, epoch)
     ]
 ```
 
 ```python
-def get_matching_head_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]:
+def get_matching_head_attestations(state: BeaconState, epoch: Epoch) -> Sequence[PendingAttestation]:
     return [
         a for a in get_matching_source_attestations(state, epoch)
         if a.data.beacon_block_root == get_block_root_at_slot(state, get_attestation_data_slot(state, a.data))
@@ -1292,22 +1266,22 @@ def get_matching_head_attestations(state: BeaconState, epoch: Epoch) -> List[Pen
 
 ```python
 def get_unslashed_attesting_indices(state: BeaconState,
-                                    attestations: List[PendingAttestation]) -> List[ValidatorIndex]:
-    output = set()
+                                    attestations: Sequence[PendingAttestation]) -> Set[ValidatorIndex]:
+    output = set()  # type: Set[ValidatorIndex]
     for a in attestations:
-        output = output.union(get_attesting_indices(state, a.data, a.aggregation_bitfield))
-    return sorted(filter(lambda index: not state.validator_registry[index].slashed, list(output)))
+        output = output.union(get_attesting_indices(state, a.data, a.aggregation_bits))
+    return set(filter(lambda index: not state.validators[index].slashed, list(output)))
 ```
 
 ```python
-def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> Gwei:
+def get_attesting_balance(state: BeaconState, attestations: Sequence[PendingAttestation]) -> Gwei:
     return get_total_balance(state, get_unslashed_attesting_indices(state, attestations))
 ```
 
 ```python
 def get_winning_crosslink_and_attesting_indices(state: BeaconState,
                                                 epoch: Epoch,
-                                                shard: Shard) -> Tuple[Crosslink, List[ValidatorIndex]]:
+                                                shard: Shard) -> Tuple[Crosslink, Set[ValidatorIndex]]:
     attestations = [a for a in get_matching_source_attestations(state, epoch) if a.data.crosslink.shard == shard]
     crosslinks = list(filter(
         lambda c: hash_tree_root(state.current_crosslinks[shard]) in (c.parent_root, hash_tree_root(c)),
@@ -1330,46 +1304,38 @@ def process_justification_and_finalization(state: BeaconState) -> None:
 
     previous_epoch = get_previous_epoch(state)
     current_epoch = get_current_epoch(state)
-    old_previous_justified_epoch = state.previous_justified_epoch
-    old_current_justified_epoch = state.current_justified_epoch
+    old_previous_justified_checkpoint = state.previous_justified_checkpoint
+    old_current_justified_checkpoint = state.current_justified_checkpoint
 
     # Process justifications
-    state.previous_justified_epoch = state.current_justified_epoch
-    state.previous_justified_root = state.current_justified_root
-    state.justification_bitfield = (state.justification_bitfield << 1) % 2**64
-    previous_epoch_matching_target_balance = get_attesting_balance(
-        state, get_matching_target_attestations(state, previous_epoch)
-    )
-    if previous_epoch_matching_target_balance * 3 >= get_total_active_balance(state) * 2:
-        state.current_justified_epoch = previous_epoch
-        state.current_justified_root = get_block_root(state, state.current_justified_epoch)
-        state.justification_bitfield |= (1 << 1)
-    current_epoch_matching_target_balance = get_attesting_balance(
-        state, get_matching_target_attestations(state, current_epoch)
-    )
-    if current_epoch_matching_target_balance * 3 >= get_total_active_balance(state) * 2:
-        state.current_justified_epoch = current_epoch
-        state.current_justified_root = get_block_root(state, state.current_justified_epoch)
-        state.justification_bitfield |= (1 << 0)
+    state.previous_justified_checkpoint = state.current_justified_checkpoint
+    state.justification_bits[1:] = state.justification_bits[:-1]
+    state.justification_bits[0] = 0b0
+    matching_target_attestations = get_matching_target_attestations(state, previous_epoch)  # Previous epoch
+    if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2:
+        state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch,
+                                                        root=get_block_root(state, previous_epoch))
+        state.justification_bits[1] = 0b1
+    matching_target_attestations = get_matching_target_attestations(state, current_epoch)  # Current epoch
+    if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2:
+        state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
+                                                        root=get_block_root(state, current_epoch))
+        state.justification_bits[0] = 0b1
 
     # Process finalizations
-    bitfield = state.justification_bitfield
+    bits = state.justification_bits
     # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source
-    if (bitfield >> 1) % 8 == 0b111 and old_previous_justified_epoch + 3 == current_epoch:
-        state.finalized_epoch = old_previous_justified_epoch
-        state.finalized_root = get_block_root(state, state.finalized_epoch)
+    if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch:
+        state.finalized_checkpoint = old_previous_justified_checkpoint
     # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source
-    if (bitfield >> 1) % 4 == 0b11 and old_previous_justified_epoch + 2 == current_epoch:
-        state.finalized_epoch = old_previous_justified_epoch
-        state.finalized_root = get_block_root(state, state.finalized_epoch)
+    if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch:
+        state.finalized_checkpoint = old_previous_justified_checkpoint
     # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source
-    if (bitfield >> 0) % 8 == 0b111 and old_current_justified_epoch + 2 == current_epoch:
-        state.finalized_epoch = old_current_justified_epoch
-        state.finalized_root = get_block_root(state, state.finalized_epoch)
+    if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch:
+        state.finalized_checkpoint = old_current_justified_checkpoint
     # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
-    if (bitfield >> 0) % 4 == 0b11 and old_current_justified_epoch + 1 == current_epoch:
-        state.finalized_epoch = old_current_justified_epoch
-        state.finalized_root = get_block_root(state, state.finalized_epoch)
+    if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
+        state.finalized_checkpoint = old_current_justified_checkpoint
 ```
 
 #### Crosslinks
@@ -1378,9 +1344,9 @@ def process_justification_and_finalization(state: BeaconState) -> None:
 def process_crosslinks(state: BeaconState) -> None:
     state.previous_crosslinks = [c for c in state.current_crosslinks]
     for epoch in (get_previous_epoch(state), get_current_epoch(state)):
-        for offset in range(get_epoch_committee_count(state, epoch)):
-            shard = (get_epoch_start_shard(state, epoch) + offset) % SHARD_COUNT
-            crosslink_committee = get_crosslink_committee(state, epoch, shard)
+        for offset in range(get_committee_count(state, epoch)):
+            shard = Shard((get_start_shard(state, epoch) + offset) % SHARD_COUNT)
+            crosslink_committee = set(get_crosslink_committee(state, epoch, shard))
             winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, epoch, shard)
             if 3 * get_total_balance(state, attesting_indices) >= 2 * get_total_balance(state, crosslink_committee):
                 state.current_crosslinks[shard] = winning_crosslink
@@ -1391,18 +1357,18 @@ def process_crosslinks(state: BeaconState) -> None:
 ```python
 def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
     total_balance = get_total_active_balance(state)
-    effective_balance = state.validator_registry[index].effective_balance
-    return effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH
+    effective_balance = state.validators[index].effective_balance
+    return Gwei(effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH)
 ```
 
 ```python
-def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]:
+def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
     previous_epoch = get_previous_epoch(state)
     total_balance = get_total_active_balance(state)
-    rewards = [0 for _ in range(len(state.validator_registry))]
-    penalties = [0 for _ in range(len(state.validator_registry))]
+    rewards = [Gwei(0) for _ in range(len(state.validators))]
+    penalties = [Gwei(0) for _ in range(len(state.validators))]
     eligible_validator_indices = [
-        index for index, v in enumerate(state.validator_registry)
+        ValidatorIndex(index) for index, v in enumerate(state.validators)
         if is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch)
     ]
 
@@ -1423,33 +1389,39 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]:
     for index in get_unslashed_attesting_indices(state, matching_source_attestations):
         attestation = min([
             a for a in matching_source_attestations
-            if index in get_attesting_indices(state, a.data, a.aggregation_bitfield)
+            if index in get_attesting_indices(state, a.data, a.aggregation_bits)
         ], key=lambda a: a.inclusion_delay)
-        rewards[attestation.proposer_index] += get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT
-        rewards[index] += get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // attestation.inclusion_delay
+        proposer_reward = Gwei(get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT)
+        rewards[attestation.proposer_index] += proposer_reward
+        max_attester_reward = get_base_reward(state, index) - proposer_reward
+        rewards[index] += Gwei(
+            max_attester_reward
+            * (SLOTS_PER_EPOCH + MIN_ATTESTATION_INCLUSION_DELAY - attestation.inclusion_delay)
+            // SLOTS_PER_EPOCH
+        )
 
     # Inactivity penalty
-    finality_delay = previous_epoch - state.finalized_epoch
+    finality_delay = previous_epoch - state.finalized_checkpoint.epoch
     if finality_delay > MIN_EPOCHS_TO_INACTIVITY_PENALTY:
         matching_target_attesting_indices = get_unslashed_attesting_indices(state, matching_target_attestations)
         for index in eligible_validator_indices:
-            penalties[index] += BASE_REWARDS_PER_EPOCH * get_base_reward(state, index)
+            penalties[index] += Gwei(BASE_REWARDS_PER_EPOCH * get_base_reward(state, index))
             if index not in matching_target_attesting_indices:
-                penalties[index] += (
-                    state.validator_registry[index].effective_balance * finality_delay // INACTIVITY_PENALTY_QUOTIENT
+                penalties[index] += Gwei(
+                    state.validators[index].effective_balance * finality_delay // INACTIVITY_PENALTY_QUOTIENT
                 )
 
     return rewards, penalties
 ```
 
 ```python
-def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]:
-    rewards = [0 for index in range(len(state.validator_registry))]
-    penalties = [0 for index in range(len(state.validator_registry))]
+def get_crosslink_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
+    rewards = [Gwei(0) for _ in range(len(state.validators))]
+    penalties = [Gwei(0) for _ in range(len(state.validators))]
     epoch = get_previous_epoch(state)
-    for offset in range(get_epoch_committee_count(state, epoch)):
-        shard = (get_epoch_start_shard(state, epoch) + offset) % SHARD_COUNT
-        crosslink_committee = get_crosslink_committee(state, epoch, shard)
+    for offset in range(get_committee_count(state, epoch)):
+        shard = Shard((get_start_shard(state, epoch) + offset) % SHARD_COUNT)
+        crosslink_committee = set(get_crosslink_committee(state, epoch, shard))
         winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, epoch, shard)
         attesting_balance = get_total_balance(state, attesting_indices)
         committee_balance = get_total_balance(state, crosslink_committee)
@@ -1469,9 +1441,9 @@ def process_rewards_and_penalties(state: BeaconState) -> None:
 
     rewards1, penalties1 = get_attestation_deltas(state)
     rewards2, penalties2 = get_crosslink_deltas(state)
-    for i in range(len(state.validator_registry)):
-        increase_balance(state, i, rewards1[i] + rewards2[i])
-        decrease_balance(state, i, penalties1[i] + penalties2[i])
+    for index in range(len(state.validators)):
+        increase_balance(state, ValidatorIndex(index), rewards1[index] + rewards2[index])
+        decrease_balance(state, ValidatorIndex(index), penalties1[index] + penalties2[index])
 ```
 
 #### Registry updates
@@ -1479,48 +1451,39 @@ def process_rewards_and_penalties(state: BeaconState) -> None:
 ```python
 def process_registry_updates(state: BeaconState) -> None:
     # Process activation eligibility and ejections
-    for index, validator in enumerate(state.validator_registry):
+    for index, validator in enumerate(state.validators):
         if (
-            validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
-            validator.effective_balance >= MAX_EFFECTIVE_BALANCE
+            validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH
+            and validator.effective_balance == MAX_EFFECTIVE_BALANCE
         ):
             validator.activation_eligibility_epoch = get_current_epoch(state)
 
         if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE:
-            initiate_validator_exit(state, index)
+            initiate_validator_exit(state, ValidatorIndex(index))
 
     # Queue validators eligible for activation and not dequeued for activation prior to finalized epoch
     activation_queue = sorted([
-        index for index, validator in enumerate(state.validator_registry) if
-        validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and
-        validator.activation_epoch >= get_delayed_activation_exit_epoch(state.finalized_epoch)
-    ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch)
+        index for index, validator in enumerate(state.validators)
+        if validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH
+        and validator.activation_epoch >= compute_activation_exit_epoch(state.finalized_checkpoint.epoch)
+    ], key=lambda index: state.validators[index].activation_eligibility_epoch)
     # Dequeued validators for activation up to churn limit (without resetting activation epoch)
-    for index in activation_queue[:get_churn_limit(state)]:
-        validator = state.validator_registry[index]
+    for index in activation_queue[:get_validator_churn_limit(state)]:
+        validator = state.validators[index]
         if validator.activation_epoch == FAR_FUTURE_EPOCH:
-            validator.activation_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state))
+            validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
 ```
 
 #### Slashings
 
 ```python
 def process_slashings(state: BeaconState) -> None:
-    current_epoch = get_current_epoch(state)
+    epoch = get_current_epoch(state)
     total_balance = get_total_active_balance(state)
-
-    # Compute slashed balances in the current epoch
-    total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH]
-    total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH]
-    total_penalties = total_at_end - total_at_start
-
-    for index, validator in enumerate(state.validator_registry):
-        if validator.slashed and current_epoch == validator.withdrawable_epoch - LATEST_SLASHED_EXIT_LENGTH // 2:
-            penalty = max(
-                validator.effective_balance * min(total_penalties * 3, total_balance) // total_balance,
-                validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT
-            )
-            decrease_balance(state, index, penalty)
+    for index, validator in enumerate(state.validators):
+        if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
+            penalty = validator.effective_balance * min(sum(state.slashings) * 3, total_balance) // total_balance
+            decrease_balance(state, ValidatorIndex(index), penalty)
 ```
 
 #### Final updates
@@ -1528,35 +1491,33 @@ def process_slashings(state: BeaconState) -> None:
 ```python
 def process_final_updates(state: BeaconState) -> None:
     current_epoch = get_current_epoch(state)
-    next_epoch = current_epoch + 1
+    next_epoch = Epoch(current_epoch + 1)
     # Reset eth1 data votes
     if (state.slot + 1) % SLOTS_PER_ETH1_VOTING_PERIOD == 0:
         state.eth1_data_votes = []
     # Update effective balances with hysteresis
-    for index, validator in enumerate(state.validator_registry):
+    for index, validator in enumerate(state.validators):
         balance = state.balances[index]
         HALF_INCREMENT = EFFECTIVE_BALANCE_INCREMENT // 2
         if balance < validator.effective_balance or validator.effective_balance + 3 * HALF_INCREMENT < balance:
             validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
     # Update start shard
-    state.latest_start_shard = (state.latest_start_shard + get_shard_delta(state, current_epoch)) % SHARD_COUNT
+    state.start_shard = Shard((state.start_shard + get_shard_delta(state, current_epoch)) % SHARD_COUNT)
     # Set active index root
-    index_root_position = (next_epoch + ACTIVATION_EXIT_DELAY) % LATEST_ACTIVE_INDEX_ROOTS_LENGTH
-    state.latest_active_index_roots[index_root_position] = hash_tree_root(
-        get_active_validator_indices(state, next_epoch + ACTIVATION_EXIT_DELAY)
-    )
-    # Set total slashed balances
-    state.latest_slashed_balances[next_epoch % LATEST_SLASHED_EXIT_LENGTH] = (
-        state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH]
-    )
+    index_epoch = Epoch(next_epoch + ACTIVATION_EXIT_DELAY)
+    index_root_position = index_epoch % EPOCHS_PER_HISTORICAL_VECTOR
+    indices_list = List[ValidatorIndex, VALIDATOR_REGISTRY_LIMIT](get_active_validator_indices(state, index_epoch))
+    state.active_index_roots[index_root_position] = hash_tree_root(indices_list)
+    # Set committees root
+    committee_root_position = next_epoch % EPOCHS_PER_HISTORICAL_VECTOR
+    state.compact_committees_roots[committee_root_position] = get_compact_committees_root(state, next_epoch)
+    # Reset slashings
+    state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
     # Set randao mix
-    state.latest_randao_mixes[next_epoch % LATEST_RANDAO_MIXES_LENGTH] = get_randao_mix(state, current_epoch)
+    state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch)
     # Set historical root accumulator
     if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
-        historical_batch = HistoricalBatch(
-            block_roots=state.latest_block_roots,
-            state_roots=state.latest_state_roots,
-        )
+        historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
         state.historical_roots.append(hash_tree_root(historical_batch))
     # Rotate current/previous epoch attestations
     state.previous_epoch_attestations = state.current_epoch_attestations
@@ -1585,10 +1546,11 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
     state.latest_block_header = BeaconBlockHeader(
         slot=block.slot,
         parent_root=block.parent_root,
+        state_root=Hash(),  # Overwritten in the next `process_slot` call
         body_root=hash_tree_root(block.body),
     )
     # Verify proposer is not slashed
-    proposer = state.validator_registry[get_beacon_proposer_index(state)]
+    proposer = state.validators[get_beacon_proposer_index(state)]
     assert not proposer.slashed
     # Verify proposer signature
     assert bls_verify(proposer.pubkey, signing_root(block), block.signature, get_domain(state, DOMAIN_BEACON_PROPOSER))
@@ -1598,19 +1560,13 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
 
 ```python
 def process_randao(state: BeaconState, body: BeaconBlockBody) -> None:
-    proposer = state.validator_registry[get_beacon_proposer_index(state)]
-    # Verify that the provided randao value is valid
-    assert bls_verify(
-        proposer.pubkey,
-        hash_tree_root(get_current_epoch(state)),
-        body.randao_reveal,
-        get_domain(state, DOMAIN_RANDAO),
-    )
-    # Mix it in
-    state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = (
-        xor(get_randao_mix(state, get_current_epoch(state)),
-            hash(body.randao_reveal))
-    )
+    epoch = get_current_epoch(state)
+    # Verify RANDAO reveal
+    proposer = state.validators[get_beacon_proposer_index(state)]
+    assert bls_verify(proposer.pubkey, hash_tree_root(epoch), body.randao_reveal, get_domain(state, DOMAIN_RANDAO))
+    # Mix in RANDAO reveal
+    mix = xor(get_randao_mix(state, epoch), hash(body.randao_reveal))
+    state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] = mix
 ```
 
 #### Eth1 data
@@ -1619,7 +1575,7 @@ def process_randao(state: BeaconState, body: BeaconBlockBody) -> None:
 def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None:
     state.eth1_data_votes.append(body.eth1_data)
     if state.eth1_data_votes.count(body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD:
-        state.latest_eth1_data = body.eth1_data
+        state.eth1_data = body.eth1_data
 ```
 
 #### Operations
@@ -1627,19 +1583,18 @@ def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None:
 ```python
 def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
     # Verify that outstanding deposits are processed up to the maximum number of deposits
-    assert len(body.deposits) == min(MAX_DEPOSITS, state.latest_eth1_data.deposit_count - state.deposit_index)
+    assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index)
     # Verify that there are no duplicate transfers
     assert len(body.transfers) == len(set(body.transfers))
 
-    for operations, max_operations, function in (
-        (body.proposer_slashings, MAX_PROPOSER_SLASHINGS, process_proposer_slashing),
-        (body.attester_slashings, MAX_ATTESTER_SLASHINGS, process_attester_slashing),
-        (body.attestations, MAX_ATTESTATIONS, process_attestation),
-        (body.deposits, MAX_DEPOSITS, process_deposit),
-        (body.voluntary_exits, MAX_VOLUNTARY_EXITS, process_voluntary_exit),
-        (body.transfers, MAX_TRANSFERS, process_transfer),
+    for operations, function in (
+        (body.proposer_slashings, process_proposer_slashing),
+        (body.attester_slashings, process_attester_slashing),
+        (body.attestations, process_attestation),
+        (body.deposits, process_deposit),
+        (body.voluntary_exits, process_voluntary_exit),
+        (body.transfers, process_transfer),
     ):
-        assert len(operations) <= max_operations
         for operation in operations:
             function(state, operation)
 ```
@@ -1648,19 +1603,17 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
 
 ```python
 def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None:
-    """
-    Process ``ProposerSlashing`` operation.
-    """
-    proposer = state.validator_registry[proposer_slashing.proposer_index]
+    proposer = state.validators[proposer_slashing.proposer_index]
     # Verify that the epoch is the same
-    assert slot_to_epoch(proposer_slashing.header_1.slot) == slot_to_epoch(proposer_slashing.header_2.slot)
+    assert (compute_epoch_of_slot(proposer_slashing.header_1.slot)
+            == compute_epoch_of_slot(proposer_slashing.header_2.slot))
     # But the headers are different
     assert proposer_slashing.header_1 != proposer_slashing.header_2
     # Check proposer is slashable
     assert is_slashable_validator(proposer, get_current_epoch(state))
     # Signatures are valid
     for header in (proposer_slashing.header_1, proposer_slashing.header_2):
-        domain = get_domain(state, DOMAIN_BEACON_PROPOSER, slot_to_epoch(header.slot))
+        domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_of_slot(header.slot))
         assert bls_verify(proposer.pubkey, signing_root(header), header.signature, domain)
 
     slash_validator(state, proposer_slashing.proposer_index)
@@ -1670,20 +1623,17 @@ def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSla
 
 ```python
 def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None:
-    """
-    Process ``AttesterSlashing`` operation.
-    """
     attestation_1 = attester_slashing.attestation_1
     attestation_2 = attester_slashing.attestation_2
     assert is_slashable_attestation_data(attestation_1.data, attestation_2.data)
-    validate_indexed_attestation(state, attestation_1)
-    validate_indexed_attestation(state, attestation_2)
+    assert is_valid_indexed_attestation(state, attestation_1)
+    assert is_valid_indexed_attestation(state, attestation_2)
 
     slashed_any = False
     attesting_indices_1 = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices
     attesting_indices_2 = attestation_2.custody_bit_0_indices + attestation_2.custody_bit_1_indices
     for index in sorted(set(attesting_indices_1).intersection(attesting_indices_2)):
-        if is_slashable_validator(state.validator_registry[index], get_current_epoch(state)):
+        if is_slashable_validator(state.validators[index], get_current_epoch(state)):
             slash_validator(state, index)
             slashed_any = True
     assert slashed_any
@@ -1693,85 +1643,80 @@ def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSla
 
 ```python
 def process_attestation(state: BeaconState, attestation: Attestation) -> None:
-    """
-    Process ``Attestation`` operation.
-    """
     data = attestation.data
+    assert data.crosslink.shard < SHARD_COUNT
+    assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
+
     attestation_slot = get_attestation_data_slot(state, data)
     assert attestation_slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= attestation_slot + SLOTS_PER_EPOCH
 
     pending_attestation = PendingAttestation(
         data=data,
-        aggregation_bitfield=attestation.aggregation_bitfield,
+        aggregation_bits=attestation.aggregation_bits,
         inclusion_delay=state.slot - attestation_slot,
         proposer_index=get_beacon_proposer_index(state),
     )
 
-    assert data.target_epoch in (get_previous_epoch(state), get_current_epoch(state))
-    if data.target_epoch == get_current_epoch(state):
-        ffg_data = (state.current_justified_epoch, state.current_justified_root, get_current_epoch(state))
+    if data.target.epoch == get_current_epoch(state):
+        assert data.source == state.current_justified_checkpoint
         parent_crosslink = state.current_crosslinks[data.crosslink.shard]
         state.current_epoch_attestations.append(pending_attestation)
     else:
-        ffg_data = (state.previous_justified_epoch, state.previous_justified_root, get_previous_epoch(state))
+        assert data.source == state.previous_justified_checkpoint
         parent_crosslink = state.previous_crosslinks[data.crosslink.shard]
         state.previous_epoch_attestations.append(pending_attestation)
 
-    # Check FFG data, crosslink data, and signature
-    assert ffg_data == (data.source_epoch, data.source_root, data.target_epoch)
-    assert data.crosslink.start_epoch == parent_crosslink.end_epoch
-    assert data.crosslink.end_epoch == min(data.target_epoch, parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK)
+    # Check crosslink against expected parent crosslink
     assert data.crosslink.parent_root == hash_tree_root(parent_crosslink)
-    assert data.crosslink.data_root == ZERO_HASH  # [to be removed in phase 1]
-    validate_indexed_attestation(state, convert_to_indexed(state, attestation))
+    assert data.crosslink.start_epoch == parent_crosslink.end_epoch
+    assert data.crosslink.end_epoch == min(data.target.epoch, parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK)
+    assert data.crosslink.data_root == Hash()  # [to be removed in phase 1]
+
+    # Check signature
+    assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
 ```
 
 ##### Deposits
 
 ```python
 def process_deposit(state: BeaconState, deposit: Deposit) -> None:
-    """
-    Process an Eth1 deposit, registering a validator or increasing its balance.
-    """
     # Verify the Merkle branch
-    assert verify_merkle_branch(
+    assert is_valid_merkle_branch(
         leaf=hash_tree_root(deposit.data),
-        proof=deposit.proof,
-        depth=DEPOSIT_CONTRACT_TREE_DEPTH,
-        index=state.deposit_index,
-        root=state.latest_eth1_data.deposit_root,
+        branch=deposit.proof,
+        depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1,  # Add 1 for the `List` length mix-in
+        index=state.eth1_deposit_index,
+        root=state.eth1_data.deposit_root,
     )
 
     # Deposits must be processed in order
-    state.deposit_index += 1
+    state.eth1_deposit_index += 1
 
     pubkey = deposit.data.pubkey
     amount = deposit.data.amount
-    validator_pubkeys = [v.pubkey for v in state.validator_registry]
+    validator_pubkeys = [v.pubkey for v in state.validators]
     if pubkey not in validator_pubkeys:
-        # Verify the deposit signature (proof of possession).
-        # Invalid signatures are allowed by the deposit contract,
-        # and hence included on-chain, but must not be processed.
-        # Note: deposits are valid across forks, hence the deposit domain is retrieved directly from `bls_domain`
-        if not bls_verify(
-            pubkey, signing_root(deposit.data), deposit.data.signature, bls_domain(DOMAIN_DEPOSIT)
-        ):
+        # Verify the deposit signature (proof of possession) for new validators.
+        # Note: The deposit contract does not check signatures.
+        # Note: Deposits are valid across forks, thus the deposit domain is retrieved directly from `compute_domain`.
+        domain = compute_domain(DOMAIN_DEPOSIT)
+        if not bls_verify(pubkey, signing_root(deposit.data), deposit.data.signature, domain):
             return
 
         # Add validator and balance entries
-        state.validator_registry.append(Validator(
+        state.validators.append(Validator(
             pubkey=pubkey,
             withdrawal_credentials=deposit.data.withdrawal_credentials,
             activation_eligibility_epoch=FAR_FUTURE_EPOCH,
             activation_epoch=FAR_FUTURE_EPOCH,
             exit_epoch=FAR_FUTURE_EPOCH,
             withdrawable_epoch=FAR_FUTURE_EPOCH,
-            effective_balance=min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
+            effective_balance=min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE),
         ))
         state.balances.append(amount)
     else:
         # Increase balance by deposit amount
-        index = validator_pubkeys.index(pubkey)
+        index = ValidatorIndex(validator_pubkeys.index(pubkey))
         increase_balance(state, index, amount)
 ```
 
@@ -1779,10 +1724,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
 
 ```python
 def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None:
-    """
-    Process ``VoluntaryExit`` operation.
-    """
-    validator = state.validator_registry[exit.validator_index]
+    validator = state.validators[exit.validator_index]
     # Verify the validator is active
     assert is_active_validator(validator, get_current_epoch(state))
     # Verify the validator has not yet exited
@@ -1802,24 +1744,21 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None:
 
 ```python
 def process_transfer(state: BeaconState, transfer: Transfer) -> None:
-    """
-    Process ``Transfer`` operation.
-    """
-    # Verify the amount and fee are not individually too big (for anti-overflow purposes)
-    assert state.balances[transfer.sender] >= max(transfer.amount, transfer.fee)
+    # Verify the balance the covers amount and fee (with overflow protection)
+    assert state.balances[transfer.sender] >= max(transfer.amount + transfer.fee, transfer.amount, transfer.fee)
     # A transfer is valid in only one slot
     assert state.slot == transfer.slot
-    # Sender must be not yet eligible for activation, withdrawn, or transfer balance over MAX_EFFECTIVE_BALANCE
+    # Sender must satisfy at least one of the following:
     assert (
-        state.validator_registry[transfer.sender].activation_eligibility_epoch == FAR_FUTURE_EPOCH or
-        get_current_epoch(state) >= state.validator_registry[transfer.sender].withdrawable_epoch or
-        transfer.amount + transfer.fee + MAX_EFFECTIVE_BALANCE <= state.balances[transfer.sender]
+        # 1) Never have been eligible for activation
+        state.validators[transfer.sender].activation_eligibility_epoch == FAR_FUTURE_EPOCH or
+        # 2) Be withdrawable
+        get_current_epoch(state) >= state.validators[transfer.sender].withdrawable_epoch or
+        # 3) Have a balance of at least MAX_EFFECTIVE_BALANCE after the transfer
+        state.balances[transfer.sender] >= transfer.amount + transfer.fee + MAX_EFFECTIVE_BALANCE
     )
     # Verify that the pubkey is valid
-    assert (
-        state.validator_registry[transfer.sender].withdrawal_credentials ==
-        int_to_bytes(BLS_WITHDRAWAL_PREFIX, length=1) + hash(transfer.pubkey)[1:]
-    )
+    assert state.validators[transfer.sender].withdrawal_credentials == BLS_WITHDRAWAL_PREFIX + hash(transfer.pubkey)[1:]
     # Verify that the signature is valid
     assert bls_verify(transfer.pubkey, signing_root(transfer), transfer.signature, get_domain(state, DOMAIN_TRANSFER))
     # Process the transfer
diff --git a/specs/core/0_deposit-contract.md b/specs/core/0_deposit-contract.md
index e80dad1c50..af81c6bec7 100644
--- a/specs/core/0_deposit-contract.md
+++ b/specs/core/0_deposit-contract.md
@@ -9,15 +9,12 @@
     - [Table of contents](#table-of-contents)
     - [Introduction](#introduction)
     - [Constants](#constants)
-        - [Gwei values](#gwei-values)
         - [Contract](#contract)
     - [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract)
-        - [Arguments](#arguments)
+        - [`deposit` function](#deposit-function)
+            - [Deposit amount](#deposit-amount)
             - [Withdrawal credentials](#withdrawal-credentials)
-            - [Amount](#amount)
-    - [Event logs](#event-logs)
-        - [`Deposit` logs](#deposit-logs)
-        - [`Eth2Genesis` log](#eth2genesis-log)
+            - [`DepositEvent` log](#depositevent-log)
     - [Vyper code](#vyper-code)
 
 <!-- /TOC -->
@@ -28,66 +25,40 @@ This document represents the specification for the beacon chain deposit contract
 
 ## Constants
 
-### Gwei values
-
-| Name | Value | Unit |
-| - | - | - |
-| `FULL_DEPOSIT_AMOUNT` | `32 * 10**9` | Gwei |
-
 ### Contract
 
 | Name | Value |
 | - | - |
 | `DEPOSIT_CONTRACT_ADDRESS` | **TBD** |
 | `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) |
-| `CHAIN_START_FULL_DEPOSIT_THRESHOLD` | `2**16` (= 65,536) |
 
 ## Ethereum 1.0 deposit contract
 
 The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in Phase 2 (i.e. when the EVM 2.0 is deployed and the shards have state).
 
-### Arguments
+### `deposit` function
+
+The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96]` corresponding to a `DepositData` object.
 
-The deposit contract has a `deposit` function which takes the amount in Ethereum 1.0 transaction, and arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96]` corresponding to `DepositData`.
+#### Deposit amount
+
+The amount of ETH (rounded down to the closest Gwei) sent to the deposit contract is the deposit amount, which must be of size at least `MIN_DEPOSIT_AMOUNT` Gwei. Note that ETH consumed by the deposit contract is no longer usable on Ethereum 1.0.
 
 #### Withdrawal credentials
 
-One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawals to shards. The first byte of `withdrawal_credentials` is a version number. As of now, the only expected format is as follows:
+One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawing validator balance (e.g. to another validator, or to shards). The first byte of `withdrawal_credentials` is a version number. As of now, the only expected format is as follows:
 
-* `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX_BYTE`
+* `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX`
 * `withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:]` where `withdrawal_pubkey` is a BLS pubkey
 
 The private key corresponding to `withdrawal_pubkey` will be required to initiate a withdrawal. It can be stored separately until a withdrawal is required, e.g. in cold storage.
 
-#### Amount
-
-* A valid deposit amount should be at least `MIN_DEPOSIT_AMOUNT` in Gwei.
-* A deposit with an amount greater than or equal to `FULL_DEPOSIT_AMOUNT` in Gwei is considered as a full deposit.
-
-## Event logs
+#### `DepositEvent` log
 
-### `Deposit` logs
-
-Every Ethereum 1.0 deposit, of size at least `MIN_DEPOSIT_AMOUNT`, emits a `Deposit` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12-381 signature) is not verified by the deposit contract.
-
-### `Eth2Genesis` log
-
-When `CHAIN_START_FULL_DEPOSIT_THRESHOLD` of full deposits have been made, the deposit contract emits the `Eth2Genesis` log. The beacon chain state may then be initialized by calling the `get_genesis_beacon_state` function (defined [here](./0_beacon-chain.md#genesis-state)) where:
-
-* `genesis_time` equals `time` in the `Eth2Genesis` log
-* `latest_eth1_data.deposit_root` equals `deposit_root` in the `Eth2Genesis` log
-* `latest_eth1_data.deposit_count` equals `deposit_count` in the `Eth2Genesis` log
-* `latest_eth1_data.block_hash` equals the hash of the block that included the log
-* `genesis_validator_deposits` is a list of `Deposit` objects built according to the `Deposit` logs up to the deposit that triggered the `Eth2Genesis` log, processed in the order in which they were emitted (oldest to newest)
+Every Ethereum 1.0 deposit emits a `DepositEvent` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12-381 signature) is not verified by the deposit contract.
 
 ## Vyper code
 
-The source for the Vyper contract lives [here](./../../deposit_contract/contracts/validator_registration.v.py).
-
-*Note*: To save ~10x on gas, this contract uses a somewhat unintuitive progressive Merkle root calculation algo that requires only O(log(n)) storage. See https://github.com/ethereum/research/blob/master/beacon_chain_impl/progressive_merkle_tree.py for an implementation of the same algo in Python tested for correctness.
-
-For convenience, we provide the interface to the contract here:
+The deposit contract source code, written in Vyper, is available [here](../../deposit_contract/contracts/validator_registration.v.py).
 
-* `__init__()`: initializes the contract
-* `get_deposit_root() -> bytes32`: returns the current root of the deposit tree
-* `deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])`: adds a deposit instance to the deposit tree, incorporating the input arguments and the value transferred in the given call. *Note*: The amount of value transferred *must* be at least `MIN_DEPOSIT_AMOUNT`. Each of these constants are specified in units of Gwei.
+*Note*: To save on gas, the deposit contract uses a progressive Merkle root calculation algorithm that requires only O(log(n)) storage. See [here](https://github.com/ethereum/research/blob/master/beacon_chain_impl/progressive_merkle_tree.py) for a Python implementation, and [here](https://github.com/runtimeverification/verified-smart-contracts/blob/master/deposit/formal-incremental-merkle-tree-algorithm.pdf) for a formal correctness proof.
diff --git a/specs/core/0_fork-choice.md b/specs/core/0_fork-choice.md
index 91c3e27ee4..9fd8ab53ec 100644
--- a/specs/core/0_fork-choice.md
+++ b/specs/core/0_fork-choice.md
@@ -8,25 +8,28 @@
 - [Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice](#ethereum-20-phase-0----beacon-chain-fork-choice)
     - [Table of contents](#table-of-contents)
     - [Introduction](#introduction)
-    - [Prerequisites](#prerequisites)
-    - [Constants](#constants)
+    - [Configuration](#configuration)
         - [Time parameters](#time-parameters)
-    - [Beacon chain processing](#beacon-chain-processing)
-        - [Beacon chain fork choice rule](#beacon-chain-fork-choice-rule)
-    - [Implementation notes](#implementation-notes)
-        - [Justification and finality at genesis](#justification-and-finality-at-genesis)
+    - [Fork choice](#fork-choice)
+        - [Helpers](#helpers)
+            - [`LatestMessage`](#latestmessage)
+            - [`Store`](#store)
+            - [`get_genesis_store`](#get_genesis_store)
+            - [`get_ancestor`](#get_ancestor)
+            - [`get_latest_attesting_balance`](#get_latest_attesting_balance)
+            - [`get_head`](#get_head)
+        - [Handlers](#handlers)
+            - [`on_tick`](#on_tick)
+            - [`on_block`](#on_block)
+            - [`on_attestation`](#on_attestation)
 
 <!-- /TOC -->
 
 ## Introduction
 
-This document represents the specification for the beacon chain fork choice rule, part of Ethereum 2.0 Phase 0.
+This document is the beacon chain fork choice spec, part of Ethereum 2.0 Phase 0. It assumes the [beacon chain state transition function spec](./0_beacon-chain.md).
 
-## Prerequisites
-
-All terminology, constants, functions, and protocol mechanics defined in the [Phase 0 -- The Beacon Chain](./0_beacon-chain.md) doc are requisite for this document and used throughout. Please see the Phase 0 doc before continuing and use as a reference throughout.
-
-## Constants
+## Configuration
 
 ### Time parameters
 
@@ -34,76 +37,176 @@ All terminology, constants, functions, and protocol mechanics defined in the [Ph
 | - | - | :-: | :-: |
 | `SECONDS_PER_SLOT` | `6` | seconds | 6 seconds |
 
-## Beacon chain processing
+## Fork choice
+
+The head block root associated with a `store` is defined as `get_head(store)`. At genesis, let `store = get_genesis_store(genesis_state)` and update `store` by running:
+
+- `on_tick(time)` whenever `time > store.time` where `time` is the current Unix time
+- `on_block(block)` whenever a block `block` is received
+- `on_attestation(attestation)` whenever an attestation `attestation` is received
+
+*Notes*:
+
+1) **Leap seconds**: Slots will last `SECONDS_PER_SLOT + 1` or `SECONDS_PER_SLOT - 1` seconds around leap seconds. This is automatically handled by [UNIX time](https://en.wikipedia.org/wiki/Unix_time).
+2) **Honest clocks**: Honest nodes are assumed to have clocks synchronized within `SECONDS_PER_SLOT` seconds of each other.
+3) **Eth1 data**: The large `ETH1_FOLLOW_DISTANCE` specified in the [honest validator document](../validator/0_beacon-chain-validator.md) should ensure that `state.latest_eth1_data` of the canonical Ethereum 2.0 chain remains consistent with the canonical Ethereum 1.0 chain. If not, emergency manual intervention will be required.
+4) **Manual forks**: Manual forks may arbitrarily change the fork choice rule but are expected to be enacted at epoch transitions, with the fork details reflected in `state.fork`.
+5) **Implementation**: The implementation found in this specification is constructed for ease of understanding rather than for optimization in computation, space, or any other resource. A number of optimized alternatives can be found [here](https://github.com/protolambda/lmd-ghost).
+
+### Helpers
 
-Processing the beacon chain is similar to processing the Ethereum 1.0 chain. Clients download and process blocks and maintain a view of what is the current "canonical chain", terminating at the current "head". For a beacon block, `block`, to be processed by a node, the following conditions must be met:
+#### `LatestMessage`
 
-* The parent block with root `block.parent_root` has been processed and accepted.
-* An Ethereum 1.0 block pointed to by the `state.latest_eth1_data.block_hash` has been processed and accepted.
-* The node's Unix time is greater than or equal to `state.genesis_time + block.slot * SECONDS_PER_SLOT`.
+```python
+@dataclass(eq=True, frozen=True)
+class LatestMessage(object):
+    epoch: Epoch
+    root: Hash
+```
 
-*Note*: Leap seconds mean that slots will occasionally last `SECONDS_PER_SLOT + 1` or `SECONDS_PER_SLOT - 1` seconds, possibly several times a year.
+#### `Store`
 
-*Note*: Nodes needs to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT` seconds) synchronized with the other nodes.
+```python
+@dataclass
+class Store(object):
+    time: uint64
+    justified_checkpoint: Checkpoint
+    finalized_checkpoint: Checkpoint
+    blocks: Dict[Hash, BeaconBlock] = field(default_factory=dict)
+    block_states: Dict[Hash, BeaconState] = field(default_factory=dict)
+    checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
+    latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
+```
 
-### Beacon chain fork choice rule
+#### `get_genesis_store`
 
-The beacon chain fork choice rule is a hybrid that combines justification and finality with Latest Message Driven (LMD) Greediest Heaviest Observed SubTree (GHOST). At any point in time, a validator `v` subjectively calculates the beacon chain head as follows.
+```python
+def get_genesis_store(genesis_state: BeaconState) -> Store:
+    genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))
+    root = signing_root(genesis_block)
+    justified_checkpoint = Checkpoint(epoch=GENESIS_EPOCH, root=root)
+    finalized_checkpoint = Checkpoint(epoch=GENESIS_EPOCH, root=root)
+    return Store(
+        time=genesis_state.genesis_time,
+        justified_checkpoint=justified_checkpoint,
+        finalized_checkpoint=finalized_checkpoint,
+        blocks={root: genesis_block},
+        block_states={root: genesis_state.copy()},
+        checkpoint_states={justified_checkpoint: genesis_state.copy()},
+    )
+```
 
-* Abstractly define `Store` as the type of storage object for the chain data, and let `store` be the set of attestations and blocks that the validator `v` has observed and verified (in particular, block ancestors must be recursively verified). Attestations not yet included in any chain are still included in `store`.
-* Let `finalized_head` be the finalized block with the highest epoch. (A block `B` is finalized if there is a descendant of `B` in `store`, the processing of which sets `B` as finalized.)
-* Let `justified_head` be the descendant of `finalized_head` with the highest epoch that has been justified for at least 1 epoch. (A block `B` is justified if there is a descendant of `B` in `store` the processing of which sets `B` as justified.) If no such descendant exists, set `justified_head` to `finalized_head`.
-* Let `get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock` be the ancestor of `block` with slot number `slot`. The `get_ancestor` function can be defined recursively as:
+#### `get_ancestor`
 
 ```python
-def get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock:
-    """
-    Get the ancestor of ``block`` with slot number ``slot``; return ``None`` if not found.
-    """
-    if block.slot == slot:
-        return block
-    elif block.slot < slot:
-        return None
-    else:
-        return get_ancestor(store, store.get_parent(block), slot)
+def get_ancestor(store: Store, root: Hash, slot: Slot) -> Hash:
+    block = store.blocks[root]
+    assert block.slot >= slot
+    return root if block.slot == slot else get_ancestor(store, block.parent_root, slot)
 ```
 
-* Let `get_latest_attestation(store: Store, index: ValidatorIndex) -> Attestation` be the attestation with the highest slot number in `store` from the validator with the given `index`. If several such attestations exist, use the one the validator `v` observed first.
-* Let `get_latest_attestation_target(store: Store, index: ValidatorIndex) -> BeaconBlock` be the target block in the attestation `get_latest_attestation(store, index)`.
-* Let `get_children(store: Store, block: BeaconBlock) -> List[BeaconBlock]` return the child blocks of the given `block`.
-* Let `justified_head_state` be the resulting `BeaconState` object from processing the chain up to the `justified_head`.
-* The `head` is `lmd_ghost(store, justified_head_state, justified_head)` where the function `lmd_ghost` is defined below. Note that the implementation below is suboptimal; there are implementations that compute the head in time logarithmic in slot count.
+#### `get_latest_attesting_balance`
 
 ```python
-def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) -> BeaconBlock:
-    """
-    Execute the LMD-GHOST algorithm to find the head ``BeaconBlock``.
-    """
-    validators = start_state.validator_registry
-    active_validator_indices = get_active_validator_indices(validators, slot_to_epoch(start_state.slot))
-    attestation_targets = [(i, get_latest_attestation_target(store, i)) for i in active_validator_indices]
-
-    # Use the rounded-balance-with-hysteresis supplied by the protocol for fork
-    # choice voting. This reduces the number of recomputations that need to be
-    # made for optimized implementations that precompute and save data
-    def get_vote_count(block: BeaconBlock) -> int:
-        return sum(
-            start_state.validator_registry[validator_index].effective_balance
-            for validator_index, target in attestation_targets
-            if get_ancestor(store, target, block.slot) == block
-        )
-
-    head = start_block
-    while 1:
-        children = get_children(store, head)
+def get_latest_attesting_balance(store: Store, root: Hash) -> Gwei:
+    state = store.checkpoint_states[store.justified_checkpoint]
+    active_indices = get_active_validator_indices(state, get_current_epoch(state))
+    return Gwei(sum(
+        state.validators[i].effective_balance for i in active_indices
+        if (i in store.latest_messages 
+            and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
+    ))
+```
+
+#### `get_head`
+
+```python
+def get_head(store: Store) -> Hash:
+    # Execute the LMD-GHOST fork choice
+    head = store.justified_checkpoint.root
+    justified_slot = compute_start_slot_of_epoch(store.justified_checkpoint.epoch)
+    while True:
+        children = [
+            root for root in store.blocks.keys()
+            if store.blocks[root].parent_root == head and store.blocks[root].slot > justified_slot
+        ]
         if len(children) == 0:
             return head
-        # Ties broken by favoring block with lexicographically higher root
-        head = max(children, key=lambda x: (get_vote_count(x), hash_tree_root(x)))
+        # Sort by latest attesting balance with ties broken lexicographically
+        head = max(children, key=lambda root: (get_latest_attesting_balance(store, root), root))
 ```
 
-## Implementation notes
+### Handlers
+
+#### `on_tick`
+
+```python
+def on_tick(store: Store, time: uint64) -> None:
+    store.time = time
+```
 
-### Justification and finality at genesis
+#### `on_block`
 
-During genesis, justification and finality root fields within the `BeaconState` reference `ZERO_HASH` rather than a known block. `ZERO_HASH` in `previous_justified_root`, `current_justified_root`, and `finalized_root` should be considered as an alias to the root of the genesis block.
+```python
+def on_block(store: Store, block: BeaconBlock) -> None:
+    # Make a copy of the state to avoid mutability issues
+    assert block.parent_root in store.block_states
+    pre_state = store.block_states[block.parent_root].copy()
+    # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
+    assert store.time >= pre_state.genesis_time + block.slot * SECONDS_PER_SLOT
+    # Add new block to the store
+    store.blocks[signing_root(block)] = block
+    # Check block is a descendant of the finalized block
+    assert (
+        get_ancestor(store, signing_root(block), store.blocks[store.finalized_checkpoint.root].slot) ==
+        store.finalized_checkpoint.root
+    )
+    # Check that block is later than the finalized epoch slot
+    assert block.slot > compute_start_slot_of_epoch(store.finalized_checkpoint.epoch)
+    # Check the block is valid and compute the post-state
+    state = state_transition(pre_state, block)
+    # Add new state for this block to the store
+    store.block_states[signing_root(block)] = state
+
+    # Update justified checkpoint
+    if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
+        store.justified_checkpoint = state.current_justified_checkpoint
+
+    # Update finalized checkpoint
+    if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
+        store.finalized_checkpoint = state.finalized_checkpoint
+```
+
+#### `on_attestation`
+
+```python
+def on_attestation(store: Store, attestation: Attestation) -> None:
+    target = attestation.data.target
+
+    # Cannot calculate the current shuffling if have not seen the target
+    assert target.root in store.blocks
+
+    # Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
+    base_state = store.block_states[target.root].copy()
+    assert store.time >= base_state.genesis_time + compute_start_slot_of_epoch(target.epoch) * SECONDS_PER_SLOT
+
+    # Store target checkpoint state if not yet seen
+    if target not in store.checkpoint_states:
+        process_slots(base_state, compute_start_slot_of_epoch(target.epoch))
+        store.checkpoint_states[target] = base_state
+    target_state = store.checkpoint_states[target]
+
+    # Attestations can only affect the fork choice of subsequent slots.
+    # Delay consideration in the fork choice until their slot is in the past.
+    attestation_slot = get_attestation_data_slot(target_state, attestation.data)
+    assert store.time >= (attestation_slot + 1) * SECONDS_PER_SLOT
+
+    # Get state at the `target` to validate attestation and calculate the committees
+    indexed_attestation = get_indexed_attestation(target_state, attestation)
+    assert is_valid_indexed_attestation(target_state, indexed_attestation)
+
+    # Update latest messages
+    for i in indexed_attestation.custody_bit_0_indices + indexed_attestation.custody_bit_1_indices:
+        if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
+            store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=attestation.data.beacon_block_root)
+```
diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md
index 6c89ef8531..48100e7bae 100644
--- a/specs/core/1_custody-game.md
+++ b/specs/core/1_custody-game.md
@@ -16,6 +16,7 @@
         - [Max operations per block](#max-operations-per-block)
         - [Reward and penalty quotients](#reward-and-penalty-quotients)
         - [Signature domains](#signature-domains)
+        - [TODO PLACEHOLDER](#todo-placeholder)
     - [Data structures](#data-structures)
         - [Custody objects](#custody-objects)
             - [`CustodyChunkChallenge`](#custodychunkchallenge)
@@ -33,10 +34,11 @@
     - [Helpers](#helpers)
         - [`ceillog2`](#ceillog2)
         - [`get_crosslink_chunk_count`](#get_crosslink_chunk_count)
+        - [`get_bit`](#get_bit)
         - [`get_custody_chunk_bit`](#get_custody_chunk_bit)
         - [`get_chunk_bits_root`](#get_chunk_bits_root)
         - [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period)
-        - [`get_validators_custody_reveal_period`](#get_validators_custody_reveal_period)
+        - [`get_reveal_period`](#get_reveal_period)
         - [`replace_empty_or_append`](#replace_empty_or_append)
     - [Per-block processing](#per-block-processing)
         - [Operations](#operations)
@@ -56,18 +58,18 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
 
 ## Terminology
 
-* **Custody game**—
-* **Custody period**—
-* **Custody chunk**—
-* **Custody chunk bit**—
-* **Custody chunk challenge**—
-* **Custody bit**—
-* **Custody bit challenge**—
-* **Custody key**—
-* **Custody key reveal**—
-* **Custody key mask**—
-* **Custody response**—
-* **Custody response deadline**—
+- **Custody game**—
+- **Custody period**—
+- **Custody chunk**—
+- **Custody chunk bit**—
+- **Custody chunk challenge**—
+- **Custody bit**—
+- **Custody bit challenge**—
+- **Custody key**—
+- **Custody key reveal**—
+- **Custody key mask**—
+- **Custody response**—
+- **Custody response deadline**—
 
 ## Constants
 
@@ -107,12 +109,20 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
 | - | - |
 | `EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE` | `2**1` (= 2) |
 
-### Signature domains
+### Signature domain types
+
+The following types are defined, mapping into `DomainType` (little endian):
 
 | Name | Value |
 | - | - |
 | `DOMAIN_CUSTODY_BIT_CHALLENGE` | `6` |
 
+### TODO PLACEHOLDER
+
+| Name | Value |
+| - | - |
+| `PLACEHOLDER` | `2**32` |
+
 ## Data structures
 
 ### Custody objects
@@ -133,9 +143,9 @@ class CustodyBitChallenge(Container):
     responder_index: ValidatorIndex
     attestation: Attestation
     challenger_index: ValidatorIndex
-    responder_key: Bytes96
-    chunk_bits: bytes
-    signature: Bytes96
+    responder_key: BLSSignature
+    chunk_bits: Bytes[PLACEHOLDER]
+    signature: BLSSignature
 ```
 
 #### `CustodyChunkChallengeRecord`
@@ -162,7 +172,7 @@ class CustodyBitChallengeRecord(Container):
     data_root: Bytes32
     chunk_count: uint64
     chunk_bits_merkle_root: Bytes32
-    responder_key: Bytes96
+    responder_key: BLSSignature
 ```
 
 #### `CustodyResponse`
@@ -171,9 +181,9 @@ class CustodyBitChallengeRecord(Container):
 class CustodyResponse(Container):
     challenge_index: uint64
     chunk_index: uint64
-    chunk: Vector[bytes, BYTES_PER_CUSTODY_CHUNK]
-    data_branch: List[Bytes32]
-    chunk_bits_branch: List[Bytes32]
+    chunk: Vector[Bytes[PLACEHOLDER], BYTES_PER_CUSTODY_CHUNK]
+    data_branch: List[Bytes32, PLACEHOLDER]
+    chunk_bits_branch: List[Bytes32, PLACEHOLDER]
     chunk_bits_leaf: Bytes32
 ```
 
@@ -184,9 +194,9 @@ class CustodyResponse(Container):
 ```python
 class CustodyKeyReveal(Container):
     # Index of the validator whose key is being revealed
-    revealer_index: uint64
+    revealer_index: ValidatorIndex
     # Reveal (masked signature)
-    reveal: Bytes96
+    reveal: BLSSignature
 ```
 
 #### `EarlyDerivedSecretReveal`
@@ -196,15 +206,15 @@ Represents an early (punishable) reveal of one of the derived secrets, where der
 ```python
 class EarlyDerivedSecretReveal(Container):
     # Index of the validator whose key is being revealed
-    revealed_index: uint64
+    revealed_index: ValidatorIndex
     # RANDAO epoch of the key that is being revealed
-    epoch: uint64
+    epoch: Epoch
     # Reveal (masked signature)
-    reveal: Bytes96
+    reveal: BLSSignature
     # Index of the validator who revealed (whistleblower)
-    masker_index: uint64
+    masker_index: ValidatorIndex
     # Mask used to hide the actual reveal signature (prevent reveal from being stolen)
-    mask: Bytes32
+    mask: Hash
 ```
 
 ### Phase 0 container updates
@@ -217,7 +227,7 @@ Add the following fields to the end of the specified container objects. Fields w
 class Validator(Container):
     # next_custody_reveal_period is initialised to the custody period
     # (of the particular validator) in which the validator is activated
-    # = get_validators_custody_reveal_period(...)
+    # = get_reveal_period(...)
     next_custody_reveal_period: uint64
     max_reveal_lateness: uint64
 ```
@@ -226,24 +236,25 @@ class Validator(Container):
 
 ```python
 class BeaconState(Container):
-    custody_chunk_challenge_records: List[CustodyChunkChallengeRecord]
-    custody_bit_challenge_records: List[CustodyBitChallengeRecord]
+    custody_chunk_challenge_records: List[CustodyChunkChallengeRecord, PLACEHOLDER]
+    custody_bit_challenge_records: List[CustodyBitChallengeRecord, PLACEHOLDER]
     custody_challenge_index: uint64
 
     # Future derived secrets already exposed; contains the indices of the exposed validator
     # at RANDAO reveal period % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS
-    exposed_derived_secrets: Vector[List[uint64], EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS]
+    exposed_derived_secrets: Vector[List[ValidatorIndex, PLACEHOLDER],
+                                    EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS]
 ```
 
 #### `BeaconBlockBody`
 
 ```python
 class BeaconBlockBody(Container):
-    custody_chunk_challenges: List[CustodyChunkChallenge]
-    custody_bit_challenges: List[CustodyBitChallenge]
-    custody_responses: List[CustodyResponse]
-    custody_key_reveals: List[CustodyKeyReveal]
-    early_derived_secret_reveals: List[EarlyDerivedSecretReveal]
+    custody_chunk_challenges: List[CustodyChunkChallenge, PLACEHOLDER]
+    custody_bit_challenges: List[CustodyBitChallenge, PLACEHOLDER]
+    custody_responses: List[CustodyResponse, PLACEHOLDER]
+    custody_key_reveals: List[CustodyKeyReveal, PLACEHOLDER]
+    early_derived_secret_reveals: List[EarlyDerivedSecretReveal, PLACEHOLDER]
 ```
 
 ## Helpers
@@ -251,7 +262,7 @@ class BeaconBlockBody(Container):
 ### `ceillog2`
 
 ```python
-def ceillog2(x):
+def ceillog2(x: uint64) -> int:
     return x.bit_length()
 ```
 
@@ -264,44 +275,49 @@ def get_custody_chunk_count(crosslink: Crosslink) -> int:
     return crosslink_length * chunks_per_epoch
 ```
 
+### `get_bit`
+
+```python
+def get_bit(serialization: bytes, i: uint64) -> int:
+    """
+    Extract the bit in ``serialization`` at position ``i``.
+    """
+    return (serialization[i // 8] >> (i % 8)) % 2
+```
+
 ### `get_custody_chunk_bit`
 
 ```python
-def get_custody_chunk_bit(key: Bytes96, chunk: bytes) -> bool:
+def get_custody_chunk_bit(key: BLSSignature, chunk: bytes) -> bool:
     # TODO: Replace with something MPC-friendly, e.g. the Legendre symbol
-    return get_bitfield_bit(hash(key + chunk), 0)
+    return bool(get_bit(hash(key + chunk), 0))
 ```
 
 ### `get_chunk_bits_root`
 
 ```python
-def get_chunk_bits_root(chunk_bitfield: bytes) -> Bytes32:
+def get_chunk_bits_root(chunk_bits: bytes) -> Bytes32:
     aggregated_bits = bytearray([0] * 32)
-    for i in range(0, len(chunk_bitfield), 32):
+    for i in range(0, len(chunk_bits), 32):
         for j in range(32):
-            aggregated_bits[j] ^= chunk_bitfield[i + j]
+            aggregated_bits[j] ^= chunk_bits[i + j]
     return hash(aggregated_bits)
 ```
 
 ### `get_randao_epoch_for_custody_period`
 
 ```python
-def get_randao_epoch_for_custody_period(period: int, validator_index: ValidatorIndex) -> Epoch:
+def get_randao_epoch_for_custody_period(period: uint64, validator_index: ValidatorIndex) -> Epoch:
     next_period_start = (period + 1) * EPOCHS_PER_CUSTODY_PERIOD - validator_index % EPOCHS_PER_CUSTODY_PERIOD
-    return next_period_start + CUSTODY_PERIOD_TO_RANDAO_PADDING
+    return Epoch(next_period_start + CUSTODY_PERIOD_TO_RANDAO_PADDING)
 ```
 
-### `get_validators_custody_reveal_period`
+### `get_reveal_period`
 
 ```python
-def get_validators_custody_reveal_period(state: BeaconState,
-                                         validator_index: ValidatorIndex,
-                                         epoch: Epoch=None) -> int:
+def get_reveal_period(state: BeaconState, validator_index: ValidatorIndex, epoch: Epoch=None) -> int:
     '''
-    This function returns the reveal period for a given validator.
-    If no epoch is supplied, the current epoch is assumed.
-    Note: This function implicitly requires that validators are not removed from the
-    validator set in fewer than EPOCHS_PER_CUSTODY_PERIOD epochs
+    Return the reveal period for a given validator.
     '''
     epoch = get_current_epoch(state) if epoch is None else epoch
     return (epoch + validator_index % EPOCHS_PER_CUSTODY_PERIOD) // EPOCHS_PER_CUSTODY_PERIOD
@@ -310,7 +326,7 @@ def get_validators_custody_reveal_period(state: BeaconState,
 ### `replace_empty_or_append`
 
 ```python
-def replace_empty_or_append(list: List[Any], new_element: Any) -> int:
+def replace_empty_or_append(list: MutableSequence[Any], new_element: Any) -> int:
     for i in range(len(list)):
         if is_empty(list[i]):
             list[i] = new_element
@@ -332,17 +348,15 @@ Verify that `len(block.body.custody_key_reveals) <= MAX_CUSTODY_KEY_REVEALS`.
 For each `reveal` in `block.body.custody_key_reveals`, run the following function:
 
 ```python
-def process_custody_key_reveal(state: BeaconState,
-                               reveal: CustodyKeyReveal) -> None:
+def process_custody_key_reveal(state: BeaconState, reveal: CustodyKeyReveal) -> None:
     """
     Process ``CustodyKeyReveal`` operation.
     Note that this function mutates ``state``.
     """
-
-    revealer = state.validator_registry[reveal.revealer_index]
+    revealer = state.validators[reveal.revealer_index]
     epoch_to_sign = get_randao_epoch_for_custody_period(revealer.next_custody_reveal_period, reveal.revealed_index)
 
-    assert revealer.next_custody_reveal_period < get_validators_custody_reveal_period(state, reveal.revealed_index)
+    assert revealer.next_custody_reveal_period < get_reveal_period(state, reveal.revealed_index)
 
     # Revealed validator is active or exited, but not withdrawn
     assert is_slashable_validator(revealer, get_current_epoch(state))
@@ -360,11 +374,11 @@ def process_custody_key_reveal(state: BeaconState,
     )
 
     # Decrement max reveal lateness if response is timely
-    if revealer.next_custody_reveal_period == get_validators_custody_reveal_period(state, reveal.revealer_index) - 2:
+    if revealer.next_custody_reveal_period == get_reveal_period(state, reveal.revealer_index) - 2:
         revealer.max_reveal_lateness -= MAX_REVEAL_LATENESS_DECREMENT
     revealer.max_reveal_lateness = max(
         revealer.max_reveal_lateness,
-        get_validators_custody_reveal_period(state, reveal.revealed_index) - revealer.next_custody_reveal_period
+        get_reveal_period(state, reveal.revealed_index) - revealer.next_custody_reveal_period
     )
 
     # Process reveal
@@ -372,7 +386,11 @@ def process_custody_key_reveal(state: BeaconState,
 
     # Reward Block Preposer
     proposer_index = get_beacon_proposer_index(state)
-    increase_balance(state, proposer_index, get_base_reward(state, reveal.revealer_index) // MINOR_REWARD_QUOTIENT)
+    increase_balance(
+        state,
+        proposer_index,
+        Gwei(get_base_reward(state, reveal.revealer_index) // MINOR_REWARD_QUOTIENT)
+    )
 ```
 
 #### Early derived secret reveals
@@ -382,24 +400,21 @@ Verify that `len(block.body.early_derived_secret_reveals) <= MAX_EARLY_DERIVED_S
 For each `reveal` in `block.body.early_derived_secret_reveals`, run the following function:
 
 ```python
-def process_early_derived_secret_reveal(state: BeaconState,
-                                        reveal: EarlyDerivedSecretReveal) -> None:
+def process_early_derived_secret_reveal(state: BeaconState, reveal: EarlyDerivedSecretReveal) -> None:
     """
     Process ``EarlyDerivedSecretReveal`` operation.
     Note that this function mutates ``state``.
     """
-
-    revealed_validator = state.validator_registry[reveal.revealed_index]
-    masker = state.validator_registry[reveal.masker_index]
+    revealed_validator = state.validators[reveal.revealed_index]
     derived_secret_location = reveal.epoch % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS
 
     assert reveal.epoch >= get_current_epoch(state) + RANDAO_PENALTY_EPOCHS
     assert reveal.epoch < get_current_epoch(state) + EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS
-    assert revealed_validator.slashed is False
+    assert not revealed_validator.slashed
     assert reveal.revealed_index not in state.exposed_derived_secrets[derived_secret_location]
 
     # Verify signature correctness
-    masker = state.validator_registry[reveal.masker_index]
+    masker = state.validators[reveal.masker_index]
     pubkeys = [revealed_validator.pubkey, masker.pubkey]
     message_hashes = [
         hash_tree_root(reveal.epoch),
@@ -433,7 +448,7 @@ def process_early_derived_secret_reveal(state: BeaconState,
             // len(get_active_validator_indices(state, get_current_epoch(state)))
             // PROPOSER_REWARD_QUOTIENT
         )
-        penalty = (
+        penalty = Gwei(
             max_proposer_slot_reward
             * EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE
             * (len(state.exposed_derived_secrets[derived_secret_location]) + 1)
@@ -442,8 +457,8 @@ def process_early_derived_secret_reveal(state: BeaconState,
         # Apply penalty
         proposer_index = get_beacon_proposer_index(state)
         whistleblower_index = reveal.masker_index
-        whistleblowing_reward = penalty // WHISTLEBLOWING_REWARD_QUOTIENT
-        proposer_reward = whistleblowing_reward // PROPOSER_REWARD_QUOTIENT
+        whistleblowing_reward = Gwei(penalty // WHISTLEBLOWER_REWARD_QUOTIENT)
+        proposer_reward = Gwei(whistleblowing_reward // PROPOSER_REWARD_QUOTIENT)
         increase_balance(state, proposer_index, proposer_reward)
         increase_balance(state, whistleblower_index, whistleblowing_reward - proposer_reward)
         decrease_balance(state, reveal.revealed_index, penalty)
@@ -459,16 +474,16 @@ Verify that `len(block.body.custody_chunk_challenges) <= MAX_CUSTODY_CHUNK_CHALL
 For each `challenge` in `block.body.custody_chunk_challenges`, run the following function:
 
 ```python
-def process_chunk_challenge(state: BeaconState,
-                            challenge: CustodyChunkChallenge) -> None:
+def process_chunk_challenge(state: BeaconState, challenge: CustodyChunkChallenge) -> None:
     # Verify the attestation
-    validate_indexed_attestation(state, convert_to_indexed(state, challenge.attestation))
+    assert is_valid_indexed_attestation(state, get_indexed_attestation(state, challenge.attestation))
     # Verify it is not too late to challenge
-    assert slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY
-    responder = state.validator_registry[challenge.responder_index]
+    assert (compute_epoch_of_slot(challenge.attestation.data.slot)
+            >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY)
+    responder = state.validators[challenge.responder_index]
     assert responder.exit_epoch >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY
     # Verify the responder participated in the attestation
-    attesters = get_attesting_indices(state, challenge.attestation.data, challenge.attestation.aggregation_bitfield)
+    attesters = get_attesting_indices(state, challenge.attestation.data, challenge.attestation.aggregation_bits)
     assert challenge.responder_index in attesters
     # Verify the challenge is not a duplicate
     for record in state.custody_chunk_challenge_records:
@@ -503,60 +518,42 @@ Verify that `len(block.body.custody_bit_challenges) <= MAX_CUSTODY_BIT_CHALLENGE
 For each `challenge` in `block.body.custody_bit_challenges`, run the following function:
 
 ```python
-def process_bit_challenge(state: BeaconState,
-                          challenge: CustodyBitChallenge) -> None:
+def process_bit_challenge(state: BeaconState, challenge: CustodyBitChallenge) -> None:
+    attestation = challenge.attestation
+    epoch = compute_epoch_of_slot(attestation.data.slot)
+    shard = attestation.data.crosslink.shard
 
     # Verify challenge signature
-    challenger = state.validator_registry[challenge.challenger_index]
-    assert bls_verify(
-        pubkey=challenger.pubkey,
-        message_hash=signing_root(challenge),
-        signature=challenge.signature,
-        domain=get_domain(state, get_current_epoch(state), DOMAIN_CUSTODY_BIT_CHALLENGE),
-    )
+    challenger = state.validators[challenge.challenger_index]
+    domain = get_domain(state, DOMAIN_CUSTODY_BIT_CHALLENGE, get_current_epoch(state))
+    assert bls_verify(challenger.pubkey, signing_root(challenge), challenge.signature, domain)
+    # Verify challenger is slashable
     assert is_slashable_validator(challenger, get_current_epoch(state))
-
-    # Verify the attestation
-    attestation = challenge.attestation
-    validate_indexed_attestation(state, convert_to_indexed(state, attestation))
-    # Verify the attestation is eligible for challenging
-    responder = state.validator_registry[challenge.responder_index]
-    assert (slot_to_epoch(attestation.data.slot) + responder.max_reveal_lateness <=
-            get_validators_custody_reveal_period(state, challenge.responder_index))
+    # Verify attestation
+    assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
+    # Verify attestation is eligible for challenging
+    responder = state.validators[challenge.responder_index]
+    assert epoch + responder.max_reveal_lateness <= get_reveal_period(state, challenge.responder_index)
 
     # Verify the responder participated in the attestation
-    attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield)
+    attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
     assert challenge.responder_index in attesters
-
-    # A validator can be the challenger for at most one challenge at a time
+    # Verifier challenger is not already challenging
     for record in state.custody_bit_challenge_records:
         assert record.challenger_index != challenge.challenger_index
-
-    # Verify the responder is a valid custody key
+    # Verify the responder custody key
     epoch_to_sign = get_randao_epoch_for_custody_period(
-        get_validators_custody_reveal_period(
-            state=state,
-            index=challenge.responder_index,
-            epoch=slot_to_epoch(attestation.data.slot)),
-        challenge.responder_index
-    )
-    assert bls_verify(
-        pubkey=responder.pubkey,
-        message_hash=hash_tree_root(epoch_to_sign),
-        signature=challenge.responder_key,
-        domain=get_domain(
-            state=state,
-            domain_type=DOMAIN_RANDAO,
-            message_epoch=epoch_to_sign,
-        ),
+        get_reveal_period(state, challenge.responder_index, epoch),
+        challenge.responder_index,
     )
-
+    domain = get_domain(state, DOMAIN_RANDAO, epoch_to_sign)
+    assert bls_verify(responder.pubkey, hash_tree_root(epoch_to_sign), challenge.responder_key, domain)
     # Verify the chunk count
     chunk_count = get_custody_chunk_count(attestation.data.crosslink)
-    assert verify_bitfield(challenge.chunk_bits, chunk_count)
     # Verify the first bit of the hash of the chunk bits does not equal the custody bit
-    custody_bit = get_bitfield_bit(attestation.custody_bitfield, attesters.index(challenge.responder_index))
-    assert custody_bit != get_bitfield_bit(get_chunk_bits_root(challenge.chunk_bits), 0)
+    committee = get_crosslink_committee(state, epoch, shard)
+    custody_bit = attestation.custody_bits[committee.index(challenge.responder_index)]
+    assert custody_bit != get_bit(get_chunk_bits_root(challenge.chunk_bits), 0)
     # Add new bit challenge record
     new_record = CustodyBitChallengeRecord(
         challenge_index=state.custody_challenge_index,
@@ -570,7 +567,6 @@ def process_bit_challenge(state: BeaconState,
     )
     replace_empty_or_append(state.custody_bit_challenge_records, new_record)
     state.custody_challenge_index += 1
-
     # Postpone responder withdrawability
     responder.withdrawable_epoch = FAR_FUTURE_EPOCH
 ```
@@ -582,8 +578,7 @@ Verify that `len(block.body.custody_responses) <= MAX_CUSTODY_RESPONSES`.
 For each `response` in `block.body.custody_responses`, run the following function:
 
 ```python
-def process_custody_response(state: BeaconState,
-                             response: CustodyResponse) -> None:
+def process_custody_response(state: BeaconState, response: CustodyResponse) -> None:
     chunk_challenge = next((record for record in state.custody_chunk_challenge_records
                             if record.challenge_index == response.challenge_index), None)
     if chunk_challenge is not None:
@@ -604,11 +599,11 @@ def process_chunk_challenge_response(state: BeaconState,
     # Verify chunk index
     assert response.chunk_index == challenge.chunk_index
     # Verify bit challenge data is null
-    assert response.chunk_bits_branch == [] and response.chunk_bits_leaf == ZERO_HASH
+    assert response.chunk_bits_branch == [] and response.chunk_bits_leaf == Hash()
     # Verify minimum delay
     assert get_current_epoch(state) >= challenge.inclusion_epoch + ACTIVATION_EXIT_DELAY
     # Verify the chunk matches the crosslink data root
-    assert verify_merkle_branch(
+    assert is_valid_merkle_branch(
         leaf=hash_tree_root(response.chunk),
         branch=response.data_branch,
         depth=challenge.depth,
@@ -620,7 +615,7 @@ def process_chunk_challenge_response(state: BeaconState,
     records[records.index(challenge)] = CustodyChunkChallengeRecord()
     # Reward the proposer
     proposer_index = get_beacon_proposer_index(state)
-    increase_balance(state, proposer_index, get_base_reward(state, proposer_index) // MINOR_REWARD_QUOTIENT)
+    increase_balance(state, proposer_index, Gwei(get_base_reward(state, proposer_index) // MINOR_REWARD_QUOTIENT))
 ```
 
 ```python
@@ -630,10 +625,10 @@ def process_bit_challenge_response(state: BeaconState,
     # Verify chunk index
     assert response.chunk_index < challenge.chunk_count
     # Verify responder has not been slashed
-    responder = state.validator_registry[challenge.responder_index]
+    responder = state.validators[challenge.responder_index]
     assert not responder.slashed
     # Verify the chunk matches the crosslink data root
-    assert verify_merkle_branch(
+    assert is_valid_merkle_branch(
         leaf=hash_tree_root(response.chunk),
         branch=response.data_branch,
         depth=ceillog2(challenge.chunk_count),
@@ -641,7 +636,7 @@ def process_bit_challenge_response(state: BeaconState,
         root=challenge.data_root,
     )
     # Verify the chunk bit leaf matches the challenge data
-    assert verify_merkle_branch(
+    assert is_valid_merkle_branch(
         leaf=response.chunk_bits_leaf,
         branch=response.chunk_bits_branch,
         depth=ceillog2(challenge.chunk_count) >> 8,
@@ -650,7 +645,7 @@ def process_bit_challenge_response(state: BeaconState,
     )
     # Verify the chunk bit does not match the challenge chunk bit
     assert (get_custody_chunk_bit(challenge.responder_key, response.chunk)
-            != get_bitfield_bit(challenge.chunk_bits_leaf, response.chunk_index % 256))
+            != get_bit(challenge.chunk_bits_leaf, response.chunk_index % 256))
     # Clear the challenge
     records = state.custody_bit_challenge_records
     records[records.index(challenge)] = CustodyBitChallengeRecord()
@@ -669,10 +664,10 @@ Run `process_reveal_deadlines(state)` immediately after `process_registry_update
     process_reveal_deadlines(state)
 # end insert @process_reveal_deadlines
 def process_reveal_deadlines(state: BeaconState) -> None:
-    for index, validator in enumerate(state.validator_registry):
+    for index, validator in enumerate(state.validators):
         deadline = validator.next_custody_reveal_period + (CUSTODY_RESPONSE_DEADLINE // EPOCHS_PER_CUSTODY_PERIOD)
-        if get_validators_custody_reveal_period(state, index) > deadline:
-            slash_validator(state, index)
+        if get_reveal_period(state, ValidatorIndex(index)) > deadline:
+            slash_validator(state, ValidatorIndex(index))
 ```
 
 Run `process_challenge_deadlines(state)` immediately after `process_reveal_deadlines(state)`:
@@ -682,17 +677,17 @@ Run `process_challenge_deadlines(state)` immediately after `process_reveal_deadl
     process_challenge_deadlines(state)
 # end insert @process_challenge_deadlines
 def process_challenge_deadlines(state: BeaconState) -> None:
-    for challenge in state.custody_chunk_challenge_records:
-        if get_current_epoch(state) > challenge.inclusion_epoch + CUSTODY_RESPONSE_DEADLINE:
-            slash_validator(state, challenge.responder_index, challenge.challenger_index)
-            records = state.custody_chunk_challenge_records
-            records[records.index(challenge)] = CustodyChunkChallengeRecord()
-
-    for challenge in state.custody_bit_challenge_records:
-        if get_current_epoch(state) > challenge.inclusion_epoch + CUSTODY_RESPONSE_DEADLINE:
-            slash_validator(state, challenge.responder_index, challenge.challenger_index)
+    for custody_chunk_challenge in state.custody_chunk_challenge_records:
+        if get_current_epoch(state) > custody_chunk_challenge.inclusion_epoch + CUSTODY_RESPONSE_DEADLINE:
+            slash_validator(state, custody_chunk_challenge.responder_index, custody_chunk_challenge.challenger_index)
+            records = state.custody_chunk_challenge
+            records[records.index(custody_chunk_challenge)] = CustodyChunkChallengeRecord()
+
+    for custody_bit_challenge in state.custody_bit_challenge_records:
+        if get_current_epoch(state) > custody_bit_challenge.inclusion_epoch + CUSTODY_RESPONSE_DEADLINE:
+            slash_validator(state, custody_bit_challenge.responder_index, custody_bit_challenge.challenger_index)
             records = state.custody_bit_challenge_records
-            records[records.index(challenge)] = CustodyBitChallengeRecord()
+            records[records.index(custody_bit_challenge)] = CustodyBitChallengeRecord()
 ```
 
 Append this to `process_final_updates(state)`:
@@ -710,8 +705,8 @@ def after_process_final_updates(state: BeaconState) -> None:
     validator_indices_in_records = set(
         [record.challenger_index for record in records] + [record.responder_index for record in records]
     )
-    for index, validator in enumerate(state.validator_registry):
+    for index, validator in enumerate(state.validators):
         if index not in validator_indices_in_records:
             if validator.exit_epoch != FAR_FUTURE_EPOCH and validator.withdrawable_epoch == FAR_FUTURE_EPOCH:
-                validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
+                validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
 ```
diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md
index 21e08e7c96..613b4c4c22 100644
--- a/specs/core/1_shard-data-chains.md
+++ b/specs/core/1_shard-data-chains.md
@@ -9,10 +9,12 @@
 - [Ethereum 2.0 Phase 1 -- Shard Data Chains](#ethereum-20-phase-1----shard-data-chains)
     - [Table of contents](#table-of-contents)
     - [Introduction](#introduction)
-    - [Constants](#constants)
+    - [Configuration](#configuration)
         - [Misc](#misc)
+        - [Initial values](#initial-values)
         - [Time parameters](#time-parameters)
         - [Signature domains](#signature-domains)
+        - [TODO PLACEHOLDER](#todo-placeholder)
     - [Data structures](#data-structures)
         - [`ShardBlockBody`](#shardblockbody)
         - [`ShardAttestation`](#shardattestation)
@@ -38,7 +40,7 @@
 
 This document describes the shard data layer and the shard fork choice rule in Phase 1 of Ethereum 2.0.
 
-## Constants
+## Configuration
 
 ### Misc
 
@@ -46,6 +48,10 @@ This document describes the shard data layer and the shard fork choice rule in P
 | - | - |
 | `BYTES_PER_SHARD_BLOCK_BODY` | `2**14` (= 16,384) |
 | `MAX_SHARD_ATTESTIONS` | `2**4` (= 16) |
+
+### Initial values
+
+| Name | Value |
 | `PHASE_1_FORK_EPOCH` | **TBD** |
 | `PHASE_1_FORK_SLOT` | **TBD** |
 | `GENESIS_SHARD_SLOT` | 0 |
@@ -56,22 +62,29 @@ This document describes the shard data layer and the shard fork choice rule in P
 | - | - | :-: | :-: |
 | `CROSSLINK_LOOKBACK` | `2**0` (= 1) | epochs  | 6.2 minutes |
 | `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | ~9 days |
-| `SECONDS_PER_SLOT` | `2**1 * 3**1` (= 6) | 6 seconds |
 
-### Signature domains
+### Signature domain types
+
+The following types are defined, mapping into `DomainType` (little endian):
 
 | Name | Value |
 | - | - |
 | `DOMAIN_SHARD_PROPOSER` | `128` |
 | `DOMAIN_SHARD_ATTESTER` | `129` |
 
+### TODO PLACEHOLDER
+
+| Name | Value |
+| - | - |
+| `PLACEHOLDER` | `2**32` |
+
 ## Data structures
 
 ### `ShardBlockBody`
 
 ```python
 class ShardBlockBody(Container):
-    data: Vector[bytes, BYTES_PER_SHARD_BLOCK_BODY]
+    data: Vector[Bytes[PLACEHOLDER], BYTES_PER_SHARD_BLOCK_BODY]
 ```
 
 ### `ShardAttestation`
@@ -79,39 +92,39 @@ class ShardBlockBody(Container):
 ```python
 class ShardAttestation(Container):
     class data(Container):
-        slot: uint64
-        shard: uint64
+        slot: Slot
+        shard: Shard
         shard_block_root: Bytes32
-    aggregation_bitfield: bytes
-    aggregate_signature: Bytes96
+    aggregation_bits: Bitlist[PLACEHOLDER]
+    aggregate_signature: BLSSignature
 ```
 
 ### `ShardBlock`
 
 ```python
 class ShardBlock(Container):
-    slot: uint64
-    shard: uint64
+    slot: Slot
+    shard: Shard
     beacon_chain_root: Bytes32
     parent_root: Bytes32
     data: ShardBlockBody
     state_root: Bytes32
-    attestations: List[ShardAttestation]
-    signature: Bytes96
+    attestations: List[ShardAttestation, PLACEHOLDER]
+    signature: BLSSignature
 ```
 
 ### `ShardBlockHeader`
 
 ```python
 class ShardBlockHeader(Container):
-    slot: uint64
-    shard: uint64
+    slot: Slot
+    shard: Shard
     beacon_chain_root: Bytes32
     parent_root: Bytes32
     body_root: Bytes32
     state_root: Bytes32
-    attestations: List[ShardAttestation]
-    signature: Bytes96
+    attestations: List[ShardAttestation, PLACEHOLDER]
+    signature: BLSSignature
 ```
 
 ## Helper functions
@@ -122,14 +135,14 @@ class ShardBlockHeader(Container):
 def get_period_committee(state: BeaconState,
                          epoch: Epoch,
                          shard: Shard,
-                         index: int,
-                         count: int) -> List[ValidatorIndex]:
+                         index: uint64,
+                         count: uint64) -> Sequence[ValidatorIndex]:
     """
     Return committee for a period. Used to construct persistent committees.
     """
     return compute_committee(
         indices=get_active_validator_indices(state, epoch),
-        seed=generate_seed(state, epoch),
+        seed=get_seed(state, epoch),
         index=shard * count + index,
         count=SHARD_COUNT * count,
     )
@@ -138,9 +151,9 @@ def get_period_committee(state: BeaconState,
 ### `get_switchover_epoch`
 
 ```python
-def get_switchover_epoch(state: BeaconState, epoch: Epoch, index: ValidatorIndex):
-    earlier_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2
-    return (bytes_to_int(hash(generate_seed(state, earlier_start_epoch) + int_to_bytes(index, length=3)[0:8]))
+def get_switchover_epoch(state: BeaconState, epoch: Epoch, index: ValidatorIndex) -> int:
+    earlier_start_epoch = Epoch(epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2)
+    return (bytes_to_int(hash(get_seed(state, earlier_start_epoch) + int_to_bytes(index, length=3)[0:8]))
             % PERSISTENT_COMMITTEE_PERIOD)
 ```
 
@@ -149,30 +162,30 @@ def get_switchover_epoch(state: BeaconState, epoch: Epoch, index: ValidatorIndex
 ```python
 def get_persistent_committee(state: BeaconState,
                              shard: Shard,
-                             slot: Slot) -> List[ValidatorIndex]:
+                             slot: Slot) -> Sequence[ValidatorIndex]:
     """
     Return the persistent committee for the given ``shard`` at the given ``slot``.
     """
-    epoch = slot_to_epoch(slot)
-    earlier_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2
-    later_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD
+    epoch = compute_epoch_of_slot(slot)
+    earlier_start_epoch = Epoch(epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2)
+    later_start_epoch = Epoch(epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD)
 
     committee_count = max(
-        len(get_active_validator_indices(state.validator_registry, earlier_start_epoch)) //
+        len(get_active_validator_indices(state, earlier_start_epoch)) //
         (SHARD_COUNT * TARGET_COMMITTEE_SIZE),
-        len(get_active_validator_indices(state.validator_registry, later_start_epoch)) //
+        len(get_active_validator_indices(state, later_start_epoch)) //
         (SHARD_COUNT * TARGET_COMMITTEE_SIZE),
     ) + 1
 
     index = slot % committee_count
-    earlier_committee = get_period_committee(state, shard, earlier_start_epoch, index, committee_count)
-    later_committee = get_period_committee(state, shard, later_start_epoch, index, committee_count)
+    earlier_committee = get_period_committee(state, earlier_start_epoch, shard, index, committee_count)
+    later_committee = get_period_committee(state, later_start_epoch, shard, index, committee_count)
 
     # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from
     # later committee; return a sorted list of the union of the two, deduplicated
     return sorted(list(set(
-        [i for i in earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(state, epoch, i)] +
-        [i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(state, epoch, i)]
+        [i for i in earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(state, epoch, i)]
+        + [i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(state, epoch, i)]
     )))
 ```
 
@@ -181,16 +194,16 @@ def get_persistent_committee(state: BeaconState,
 ```python
 def get_shard_proposer_index(state: BeaconState,
                              shard: Shard,
-                             slot: Slot) -> ValidatorIndex:
+                             slot: Slot) -> Optional[ValidatorIndex]:
     # Randomly shift persistent committee
-    persistent_committee = get_persistent_committee(state, shard, slot)
+    persistent_committee = list(get_persistent_committee(state, shard, slot))
     seed = hash(state.current_shuffling_seed + int_to_bytes(shard, length=8) + int_to_bytes(slot, length=8))
     random_index = bytes_to_int(seed[0:8]) % len(persistent_committee)
     persistent_committee = persistent_committee[random_index:] + persistent_committee[:random_index]
 
     # Search for an active proposer
     for index in persistent_committee:
-        if is_active_validator(state.validator_registry[index], get_current_epoch(state)):
+        if is_active_validator(state.validators[index], get_current_epoch(state)):
             return index
 
     # No block can be proposed if no validator is active
@@ -220,40 +233,41 @@ def verify_shard_attestation_signature(state: BeaconState,
                                        attestation: ShardAttestation) -> None:
     data = attestation.data
     persistent_committee = get_persistent_committee(state, data.shard, data.slot)
-    assert verify_bitfield(attestation.aggregation_bitfield, len(persistent_committee))
     pubkeys = []
     for i, index in enumerate(persistent_committee):
-        if get_bitfield_bit(attestation.aggregation_bitfield, i) == 0b1:
-            validator = state.validator_registry[index]
+        if attestation.aggregation_bits[i]:
+            validator = state.validators[index]
             assert is_active_validator(validator, get_current_epoch(state))
             pubkeys.append(validator.pubkey)
     assert bls_verify(
         pubkey=bls_aggregate_pubkeys(pubkeys),
         message_hash=data.shard_block_root,
         signature=attestation.aggregate_signature,
-        domain=get_domain(state, slot_to_epoch(data.slot), DOMAIN_SHARD_ATTESTER)
+        domain=get_domain(state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_slot(data.slot))
     )
 ```
 
 ### `compute_crosslink_data_root`
 
 ```python
-def compute_crosslink_data_root(blocks: List[ShardBlock]) -> Bytes32:
-    def is_power_of_two(value: int) -> bool:
+def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Bytes32:
+    def is_power_of_two(value: uint64) -> bool:
         return (value > 0) and (value & (value - 1) == 0)
 
-    def pad_to_power_of_2(values: List[bytes]) -> List[bytes]:
+    def pad_to_power_of_2(values: MutableSequence[bytes]) -> Sequence[bytes]:
         while not is_power_of_two(len(values)):
-            values += [b'\x00' * BYTES_PER_SHARD_BLOCK_BODY]
+            values.append(b'\x00' * BYTES_PER_SHARD_BLOCK_BODY)
         return values
 
     def hash_tree_root_of_bytes(data: bytes) -> bytes:
         return hash_tree_root([data[i:i + 32] for i in range(0, len(data), 32)])
 
-    def zpad(data: bytes, length: int) -> bytes:
+    def zpad(data: bytes, length: uint64) -> bytes:
         return data + b'\x00' * (length - len(data))
 
     return hash(
+        # TODO untested code.
+        #  Need to either pass a typed list to hash-tree-root, or merkleize_chunks(values, pad_to=2**x)
         hash_tree_root(pad_to_power_of_2([
             hash_tree_root_of_bytes(
                 zpad(serialize(get_shard_header(block)), BYTES_PER_SHARD_BLOCK_BODY)
@@ -271,15 +285,15 @@ def compute_crosslink_data_root(blocks: List[ShardBlock]) -> Bytes32:
 
 Let:
 
-* `beacon_blocks` be the `BeaconBlock` list such that `beacon_blocks[slot]` is the canonical `BeaconBlock` at slot `slot`
-* `beacon_state` be the canonical `BeaconState` after processing `beacon_blocks[-1]`
-* `valid_shard_blocks` be the list of valid `ShardBlock`, recursively defined
-* `candidate` be a candidate `ShardBlock` for which validity is to be determined by running `is_valid_shard_block`
+- `beacon_blocks` be the `BeaconBlock` list such that `beacon_blocks[slot]` is the canonical `BeaconBlock` at slot `slot`
+- `beacon_state` be the canonical `BeaconState` after processing `beacon_blocks[-1]`
+- `valid_shard_blocks` be the list of valid `ShardBlock`, recursively defined
+- `candidate` be a candidate `ShardBlock` for which validity is to be determined by running `is_valid_shard_block`
 
 ```python
-def is_valid_shard_block(beacon_blocks: List[BeaconBlock],
+def is_valid_shard_block(beacon_blocks: Sequence[BeaconBlock],
                          beacon_state: BeaconState,
-                         valid_shard_blocks: List[ShardBlock],
+                         valid_shard_blocks: Sequence[ShardBlock],
                          candidate: ShardBlock) -> bool:
     # Check if block is already determined valid
     for _, block in enumerate(valid_shard_blocks):
@@ -298,11 +312,11 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock],
     assert beacon_block.slot <= candidate.slot
 
     # Check state root
-    assert candidate.state_root == ZERO_HASH  # [to be removed in phase 2]
+    assert candidate.state_root == Hash()  # [to be removed in phase 2]
 
     # Check parent block
     if candidate.slot == PHASE_1_FORK_SLOT:
-        assert candidate.parent_root == ZERO_HASH
+        assert candidate.parent_root == Hash()
     else:
         parent_block = next(
             (block for block in valid_shard_blocks if signing_root(block) == candidate.parent_root),
@@ -325,10 +339,10 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock],
     proposer_index = get_shard_proposer_index(beacon_state, candidate.shard, candidate.slot)
     assert proposer_index is not None
     assert bls_verify(
-        pubkey=beacon_state.validator_registry[proposer_index].pubkey,
-        message_hash=signing_root(block),
+        pubkey=beacon_state.validators[proposer_index].pubkey,
+        message_hash=signing_root(candidate),
         signature=candidate.signature,
-        domain=get_domain(beacon_state, slot_to_epoch(candidate.slot), DOMAIN_SHARD_PROPOSER),
+        domain=get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_slot(candidate.slot)),
     )
 
     return True
@@ -338,12 +352,12 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock],
 
 Let:
 
-* `valid_shard_blocks` be the list of valid `ShardBlock`
-* `beacon_state` be the canonical `BeaconState`
-* `candidate` be a candidate `ShardAttestation` for which validity is to be determined by running `is_valid_shard_attestation`
+- `valid_shard_blocks` be the list of valid `ShardBlock`
+- `beacon_state` be the canonical `BeaconState`
+- `candidate` be a candidate `ShardAttestation` for which validity is to be determined by running `is_valid_shard_attestation`
 
 ```python
-def is_valid_shard_attestation(valid_shard_blocks: List[ShardBlock],
+def is_valid_shard_attestation(valid_shard_blocks: Sequence[ShardBlock],
                                beacon_state: BeaconState,
                                candidate: ShardAttestation) -> bool:
     # Check shard block
@@ -365,38 +379,39 @@ def is_valid_shard_attestation(valid_shard_blocks: List[ShardBlock],
 
 Let:
 
-* `shard` be a valid `Shard`
-* `shard_blocks` be the `ShardBlock` list such that `shard_blocks[slot]` is the canonical `ShardBlock` for shard `shard` at slot `slot`
-* `beacon_state` be the canonical `BeaconState`
-* `valid_attestations` be the list of valid `Attestation`, recursively defined
-* `candidate` be a candidate `Attestation` which is valid under Phase 0 rules, and for which validity is to be determined under Phase 1 rules by running `is_valid_beacon_attestation`
+- `shard` be a valid `Shard`
+- `shard_blocks` be the `ShardBlock` list such that `shard_blocks[slot]` is the canonical `ShardBlock` for shard `shard` at slot `slot`
+- `beacon_state` be the canonical `BeaconState`
+- `valid_attestations` be the set of valid `Attestation` objects, recursively defined
+- `candidate` be a candidate `Attestation` which is valid under Phase 0 rules, and for which validity is to be determined under Phase 1 rules by running `is_valid_beacon_attestation`
 
 ```python
 def is_valid_beacon_attestation(shard: Shard,
-                                shard_blocks: List[ShardBlock],
+                                shard_blocks: Sequence[ShardBlock],
                                 beacon_state: BeaconState,
-                                valid_attestations: List[Attestation],
+                                valid_attestations: Set[Attestation],
                                 candidate: Attestation) -> bool:
     # Check if attestation is already determined valid
-    for _, attestation in enumerate(valid_attestations):
+    for attestation in valid_attestations:
         if candidate == attestation:
             return True
 
     # Check previous attestation
     if candidate.data.previous_crosslink.epoch <= PHASE_1_FORK_EPOCH:
-        assert candidate.data.previous_crosslink.data_root == ZERO_HASH
+        assert candidate.data.previous_crosslink.data_root == Hash()
     else:
         previous_attestation = next(
-            (attestation for attestation in valid_attestations if
-                attestation.data.crosslink.data_root == candidate.data.previous_crosslink.data_root),
+            (attestation for attestation in valid_attestations
+             if attestation.data.crosslink.data_root == candidate.data.previous_crosslink.data_root),
             None,
         )
         assert previous_attestation is not None
-        assert candidate.data.previous_attestation.epoch < slot_to_epoch(candidate.data.slot)
+        assert candidate.data.previous_attestation.epoch < compute_epoch_of_slot(candidate.data.slot)
 
     # Check crosslink data root
-    start_epoch = beacon_state.latest_crosslinks[shard].epoch
-    end_epoch = min(slot_to_epoch(candidate.data.slot) - CROSSLINK_LOOKBACK, start_epoch + MAX_EPOCHS_PER_CROSSLINK)
+    start_epoch = beacon_state.crosslinks[shard].epoch
+    end_epoch = min(compute_epoch_of_slot(candidate.data.slot) - CROSSLINK_LOOKBACK,
+                    start_epoch + MAX_EPOCHS_PER_CROSSLINK)
     blocks = []
     for slot in range(start_epoch * SLOTS_PER_EPOCH, end_epoch * SLOTS_PER_EPOCH):
         blocks.append(shard_blocks[slot])
diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md
index 8501c58690..580b669f27 100644
--- a/specs/light_client/sync_protocol.md
+++ b/specs/light_client/sync_protocol.md
@@ -31,7 +31,7 @@ We define an "expansion" of an object as an object where a field in an object th
 
 We define two expansions:
 
-* `ExtendedBeaconState`, which is identical to a `BeaconState` except `latest_active_index_roots: List[Bytes32]` is replaced by `latest_active_indices: List[List[ValidatorIndex]]`, where `BeaconState.latest_active_index_roots[i] = hash_tree_root(ExtendedBeaconState.latest_active_indices[i])`.
+* `ExtendedBeaconState`, which is identical to a `BeaconState` except `compact_committees_roots: List[Bytes32]` is replaced by `active_indices: List[List[ValidatorIndex]]`, where `BeaconState.compact_committees_roots[i] = hash_tree_root(ExtendedBeaconState.active_indices[i])`.
 * `ExtendedBeaconBlock`, which is identical to a `BeaconBlock` except `state_root` is replaced with the corresponding `state: ExtendedBeaconState`.
 
 ### `get_active_validator_indices`
@@ -40,10 +40,10 @@ Note that there is now a new way to compute `get_active_validator_indices`:
 
 ```python
 def get_active_validator_indices(state: ExtendedBeaconState, epoch: Epoch) -> List[ValidatorIndex]:
-    return state.latest_active_indices[epoch % LATEST_ACTIVE_INDEX_ROOTS_LENGTH]
+    return state.active_indices[epoch % EPOCHS_PER_HISTORICAL_VECTOR]
 ```
 
-Note that it takes `state` instead of `state.validator_registry` as an argument. This does not affect its use in `get_shuffled_committee`, because `get_shuffled_committee` has access to the full `state` as one of its arguments.
+Note that it takes `state` instead of `state.validators` as an argument. This does not affect its use in `get_shuffled_committee`, because `get_shuffled_committee` has access to the full `state` as one of its arguments.
 
 
 ### `MerklePartial`
@@ -84,8 +84,8 @@ def get_period_data(block: ExtendedBeaconBlock, shard_id: Shard, later: bool) ->
     indices = get_period_committee(block.state, shard_id, period_start, 0, committee_count)
     return PeriodData(
         validator_count,
-        generate_seed(block.state, period_start),
-        [block.state.validator_registry[i] for i in indices],
+        get_seed(block.state, period_start),
+        [block.state.validators[i] for i in indices],
     )
 ```
 
@@ -124,7 +124,7 @@ def compute_committee(header: BeaconBlockHeader,
     maximal_later_committee = validator_memory.later_period_data.committee
     earlier_start_epoch = get_earlier_start_epoch(header.slot)
     later_start_epoch = get_later_start_epoch(header.slot)
-    epoch = slot_to_epoch(header.slot)
+    epoch = compute_epoch_of_slot(header.slot)
 
     committee_count = max(
         earlier_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE),
@@ -153,8 +153,8 @@ def compute_committee(header: BeaconBlockHeader,
     # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from
     # later committee; return a sorted list of the union of the two, deduplicated
     return sorted(list(set(
-        [i for i in actual_earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)] +
-        [i for i in actual_later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)]
+        [i for i in actual_earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)]
+        + [i for i in actual_later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)]
     )))
 ```
 
@@ -167,8 +167,8 @@ If a client wants to update its `finalized_header` it asks the network for a `Bl
 ```python
 {
     'header': BeaconBlockHeader,
-    'shard_aggregate_signature': 'bytes96',
-    'shard_bitfield': 'bytes',
+    'shard_aggregate_signature': BLSSignature,
+    'shard_bits': Bitlist[PLACEHOLDER],
     'shard_parent_block': ShardBlock,
 }
 ```
@@ -180,20 +180,20 @@ def verify_block_validity_proof(proof: BlockValidityProof, validator_memory: Val
     assert proof.shard_parent_block.beacon_chain_root == hash_tree_root(proof.header)
     committee = compute_committee(proof.header, validator_memory)
     # Verify that we have >=50% support
-    support_balance = sum([v.effective_balance for i, v in enumerate(committee) if get_bitfield_bit(proof.shard_bitfield, i) is True])
+    support_balance = sum([v.effective_balance for i, v in enumerate(committee) if proof.shard_bits[i]])
     total_balance = sum([v.effective_balance for i, v in enumerate(committee)])
     assert support_balance * 2 > total_balance
     # Verify shard attestations
     group_public_key = bls_aggregate_pubkeys([
         v.pubkey for v, index in enumerate(committee)
-        if get_bitfield_bit(proof.shard_bitfield, index) is True
+        if proof.shard_bits[index]
     ])
     assert bls_verify(
         pubkey=group_public_key,
         message_hash=hash_tree_root(shard_parent_block),
         signature=proof.shard_aggregate_signature,
-        domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_ATTESTER),
+        domain=get_domain(state, compute_epoch_of_slot(shard_block.slot), DOMAIN_SHARD_ATTESTER),
     )
 ```
 
-The size of this proof is only 200 (header) + 96 (signature) + 16 (bitfield) + 352 (shard block) = 664 bytes. It can be reduced further by replacing `ShardBlock` with `MerklePartial(lambda x: x.beacon_chain_root, ShardBlock)`, which would cut off ~220 bytes.
+The size of this proof is only 200 (header) + 96 (signature) + 16 (bits) + 352 (shard block) = 664 bytes. It can be reduced further by replacing `ShardBlock` with `MerklePartial(lambda x: x.beacon_chain_root, ShardBlock)`, which would cut off ~220 bytes.
diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md
index b81f78408f..be154075c1 100644
--- a/specs/networking/rpc-interface.md
+++ b/specs/networking/rpc-interface.md
@@ -95,8 +95,8 @@ Since some clients are waiting for `libp2p` implementations in their respective
 (
     network_id: uint8
     chain_id: uint64
-    latest_finalized_root: bytes32
-    latest_finalized_epoch: uint64
+    finalized_root: bytes32
+    finalized_epoch: uint64
     best_root: bytes32
     best_slot: uint64
 )
@@ -107,7 +107,7 @@ Clients exchange `hello` messages upon connection, forming a two-phase handshake
 Clients SHOULD immediately disconnect from one another following the handshake above under the following conditions:
 
 1. If `network_id` belongs to a different chain, since the client definitionally cannot sync with this client.
-2. If the `latest_finalized_root` shared by the peer is not in the client's chain at the expected epoch. For example, if Peer 1 in the diagram below has `(root, epoch)` of `(A, 5)` and Peer 2 has `(B, 3)`, Peer 1 would disconnect because it knows that `B` is not the root in their chain at epoch 3:
+2. If the `finalized_root` shared by the peer is not in the client's chain at the expected epoch. For example, if Peer 1 in the diagram below has `(root, epoch)` of `(A, 5)` and Peer 2 has `(B, 3)`, Peer 1 would disconnect because it knows that `B` is not the root in their chain at epoch 3:
 
 ```
               Root A
@@ -136,7 +136,7 @@ Root B          ^
        +---+
 ```
 
-Once the handshake completes, the client with the higher `latest_finalized_epoch` or `best_slot` (if the clients have equal `latest_finalized_epoch`s) SHOULD request beacon block roots from its counterparty via `beacon_block_roots` (i.e. RPC method `10`).
+Once the handshake completes, the client with the higher `finalized_epoch` or `best_slot` (if the clients have equal `finalized_epoch`s) SHOULD request beacon block roots from its counterparty via `beacon_block_roots` (i.e. RPC method `10`).
 
 ### Goodbye
 
diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md
index 2adff23888..8d9c331033 100644
--- a/specs/simple-serialize.md
+++ b/specs/simple-serialize.md
@@ -11,16 +11,22 @@
     - [Typing](#typing)
         - [Basic types](#basic-types)
         - [Composite types](#composite-types)
+        - [Variable-size and fixed-size](#variable-size-and-fixed-size)
         - [Aliases](#aliases)
         - [Default values](#default-values)
+            - [`is_empty`](#is_empty)
         - [Illegal types](#illegal-types)
     - [Serialization](#serialization)
-        - [`"uintN"`](#uintn)
-        - [`"bool"`](#bool)
-        - [`"null`](#null)
+        - [`uintN`](#uintn)
+        - [`boolean`](#boolean)
+        - [`null`](#null)
+        - [`Bitvector[N]`](#bitvectorn)
+        - [`Bitlist[N]`](#bitlistn)
         - [Vectors, containers, lists, unions](#vectors-containers-lists-unions)
     - [Deserialization](#deserialization)
     - [Merkleization](#merkleization)
+        - [Merkleization of `Bitvector[N]`](#merkleization-of-bitvectorn)
+        - [`Bitlist[N]`](#bitlistn-1)
     - [Self-signed containers](#self-signed-containers)
     - [Implementations](#implementations)
 
@@ -37,71 +43,96 @@
 ## Typing
 ### Basic types
 
-* `"uintN"`: `N`-bit unsigned integer (where `N in [8, 16, 32, 64, 128, 256]`)
-* `"bool"`: `True` or `False`
+* `uintN`: `N`-bit unsigned integer (where `N in [8, 16, 32, 64, 128, 256]`)
+* `boolean`: `True` or `False`
 
 ### Composite types
 
 * **container**: ordered heterogeneous collection of values
-    * key-pair curly bracket notation `{}`, e.g. `{"foo": "uint64", "bar": "bool"}`
-* **vector**: ordered fixed-length homogeneous collection of values
-    * angle bracket notation `[type, N]`, e.g. `["uint64", N]`
-* **list**: ordered variable-length homogeneous collection of values
-    * angle bracket notation `[type]`, e.g. `["uint64"]`
+    * python dataclass notation with key-type pairs, e.g.
+```python
+class ContainerExample(Container):
+    foo: uint64
+    bar: boolean
+```
+* **vector**: ordered fixed-length homogeneous collection, with `N` values
+    * notation `Vector[type, N]`, e.g. `Vector[uint64, N]`
+* **list**: ordered variable-length homogeneous collection, limited to `N` values
+    * notation `List[type, N]`, e.g. `List[uint64, N]`
+* **bitvector**: ordered fixed-length collection of `boolean` values, with `N` bits
+    * notation `Bitvector[N]`
+* **bitlist**: ordered variable-length collection of `boolean` values, limited to `N` bits
+    * notation `Bitlist[N]`
 * **union**: union type containing one of the given subtypes
-    * round bracket notation `(type_1, type_2, ...)`, e.g. `("null", "uint64")`
+    * notation `Union[type_1, type_2, ...]`, e.g. `union[null, uint64]`
 
 ### Variable-size and fixed-size
 
-We recursively define "variable-size" types to be lists and unions and all types that contain a variable-size type. All other types are said to be "fixed-size".
+We recursively define "variable-size" types to be lists, unions, `Bitlist` and all types that contain a variable-size type. All other types are said to be "fixed-size".
 
 ### Aliases
 
 For convenience we alias:
 
-* `"byte"` to `"uint8"` (this is a basic type)
-* `"bytes"` to `["byte"]` (this is *not* a basic type)
-* `"bytesN"` to `["byte", N]` (this is *not* a basic type)
-* `"null"`: `{}`, i.e. the empty container
+* `bit` to `boolean`
+* `byte` to `uint8` (this is a basic type)
+* `BytesN` to `Vector[byte, N]` (this is *not* a basic type)
+* `null`: `{}`, i.e. the empty container
 
 ### Default values
 
-The default value of a type upon initialization is recursively defined using `0` for `"uintN"`, `False` for `"bool"`, and `[]` for lists. Unions default to the first type in the union (with type index zero), which is `"null"` if present in the union.
+The default value of a type upon initialization is recursively defined using `0` for `uintN`, `False` for `boolean` and the elements of `Bitvector`, and `[]` for lists and `Bitlist`. Unions default to the first type in the union (with type index zero), which is `null` if present in the union.
 
 #### `is_empty`
 
-An SSZ object is called empty (and thus `is_empty(object)` returns true) if it is equal to the default value for that type.
+An SSZ object is called empty (and thus, `is_empty(object)` returns true) if it is equal to the default value for that type.
 
 ### Illegal types
 
-Empty vector types (i.e. `[subtype, 0]` for some `subtype`) are not legal. The `"null"` type is only legal as the first type in a union subtype (i.e., with type index zero).
+Empty vector types (i.e. `[subtype, 0]` for some `subtype`) are not legal. The `null` type is only legal as the first type in a union subtype (i.e. with type index zero).
 
 ## Serialization
 
-We recursively define the `serialize` function which consumes an object `value` (of the type specified) and returns a bytestring of type `"bytes"`.
+We recursively define the `serialize` function which consumes an object `value` (of the type specified) and returns a bytestring of type `bytes`.
 
-> *Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signing_root`, `is_variable_size`, etc.) objects implicitly carry their type.
+*Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signing_root`, `is_variable_size`, etc.) objects implicitly carry their type.
 
-### `"uintN"`
+### `uintN`
 
 ```python
 assert N in [8, 16, 32, 64, 128, 256]
 return value.to_bytes(N // 8, "little")
 ```
 
-### `"bool"`
+### `boolean`
 
 ```python
 assert value in (True, False)
 return b"\x01" if value is True else b"\x00"
 ```
 
-### `"null"`
+### `null`
 
 ```python
 return b""
 ```
 
+### `Bitvector[N]`
+
+```python
+as_integer = sum([value[i] << i for i in range(len(value))])
+return as_integer.to_bytes((N + 7) // 8, "little")
+```
+
+### `Bitlist[N]`
+
+Note that from the offset coding, the length (in bytes) of the bitlist is known. An additional leading `1` bit is added so that the length in bits will also be known.
+
+```python
+as_integer = (1 << len(value)) + sum([value[i] << i for i in range(len(value))])
+return as_integer.to_bytes((as_integer.bit_length() + 7) // 8, "little")
+```
+
 ### Vectors, containers, lists, unions
 
 ```python
@@ -136,23 +167,47 @@ return serialized_type_index + serialized_bytes
 
 Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to. Efficient algorithms for computing this object can be found in [the implementations](#implementations).
 
+Note that deserialization requires hardening against invalid inputs. A non-exhaustive list:
+- Offsets: out of order, out of range, mismatching minimum element size
+- Scope: Extra unused bytes, not aligned with element size.
+- More elements than a list limit allows. Part of enforcing consensus.
+
 ## Merkleization
 
 We first define helper functions:
 
 * `pack`: Given ordered objects of the same basic type, serialize them, pack them into `BYTES_PER_CHUNK`-byte chunks, right-pad the last chunk with zero bytes, and return the chunks.
-* `merkleize`: Given ordered `BYTES_PER_CHUNK`-byte chunks, if necessary append zero chunks so that the number of chunks is a power of two, Merkleize the chunks, and return the root. Note that `merkleize` on a single chunk is simply that chunk, i.e. the identity when the number of chunks is one.
+* `next_pow_of_two(i)`: get the next power of 2 of `i`, if not already a power of 2, with 0 mapping to 1. Examples: `0->1, 1->1, 2->2, 3->4, 4->4, 6->8, 9->16`
+* `merkleize(data, pad_for)`: Given ordered `BYTES_PER_CHUNK`-byte chunks, if necessary append zero chunks so that the number of chunks is a power of two, Merkleize the chunks, and return the root.
+  The merkleization depends on the effective input, which can be padded: if `pad_for=L`, then pad the `data` with zeroed chunks to `next_pow_of_two(L)` (virtually for memory efficiency).
+  Then, merkleize the chunks (empty input is padded to 1 zero chunk):
+    - If `1` chunk: A single chunk is simply that chunk, i.e. the identity when the number of chunks is one.
+    - If `> 1` chunks: pad to `next_pow_of_two(len(chunks))`, merkleize as binary tree.
 * `mix_in_length`: Given a Merkle root `root` and a length `length` (`"uint256"` little-endian serialization) return `hash(root + length)`.
 * `mix_in_type`: Given a Merkle root `root` and a type_index `type_index` (`"uint256"` little-endian serialization) return `hash(root + type_index)`.
 
 We now define Merkleization `hash_tree_root(value)` of an object `value` recursively:
 
 * `merkleize(pack(value))` if `value` is a basic object or a vector of basic objects
-* `mix_in_length(merkleize(pack(value)), len(value))` if `value` is a list of basic objects
+* `mix_in_length(merkleize(pack(value), pad_for=(N * elem_size / BYTES_PER_CHUNK)), len(value))` if `value` is a list of basic objects.
 * `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container
-* `mix_in_length(merkleize([hash_tree_root(element) for element in value]), len(value))` if `value` is a list of composite objects
+* `mix_in_length(merkleize([hash_tree_root(element) for element in value], pad_for=N), len(value))` if `value` is a list of composite objects.
 * `mix_in_type(merkleize(value.value), value.type_index)` if `value` is of union type
 
+### Merkleization of `Bitvector[N]`
+
+```python
+as_integer = sum([value[i] << i for i in range(len(value))])
+return merkleize(as_integer.to_bytes((N + 7) // 8, "little"))
+```
+
+### `Bitlist[N]`
+
+```python
+as_integer = sum([value[i] << i for i in range(len(value))])
+return mix_in_length(merkleize(as_integer.to_bytes((N + 7) // 8, "little")), len(value))
+```
+
 ## Self-signed containers
 
 Let `value` be a self-signed container object. The convention is that the signature (e.g. a `"bytes96"` BLS12-381 signature) be the last field of `value`. Further, the signed message for `value` is `signing_root(value) = hash_tree_root(truncate_last(value))` where `truncate_last` truncates the last element of `value`.
@@ -164,7 +219,7 @@ Let `value` be a self-signed container object. The convention is that the signat
 | Python | Ethereum 2.0 | Ethereum Foundation | [https://github.com/ethereum/py-ssz](https://github.com/ethereum/py-ssz) |
 | Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz](https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz) |
 | Nim | Nimbus | Status | [https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) |
-| Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/util/ssz](https://github.com/paritytech/shasper/tree/master/util/ssz) |
+| Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/utils/ssz](https://github.com/paritytech/shasper/tree/master/util/ssz) |
 | TypeScript | Lodestar | ChainSafe Systems | [https://github.com/ChainSafe/ssz-js](https://github.com/ChainSafe/ssz-js) |
 | Java | Cava | ConsenSys | [https://www.github.com/ConsenSys/cava/tree/master/ssz](https://www.github.com/ConsenSys/cava/tree/master/ssz) |
 | Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/go-ssz](https://github.com/prysmaticlabs/go-ssz) |
diff --git a/specs/test_formats/bls/msg_hash_g2_compressed.md b/specs/test_formats/bls/msg_hash_g2_compressed.md
index 2feeb92ba5..bbc1b82fef 100644
--- a/specs/test_formats/bls/msg_hash_g2_compressed.md
+++ b/specs/test_formats/bls/msg_hash_g2_compressed.md
@@ -6,8 +6,8 @@ A BLS compressed-hash to G2.
 
 ```yaml
 input: 
-  message: bytes32,
-  domain: bytes -- any number
+  message: bytes32
+  domain: bytes8   -- the BLS domain
 output: List[bytes48] -- length of two
 ```
 
diff --git a/specs/test_formats/bls/msg_hash_g2_uncompressed.md b/specs/test_formats/bls/msg_hash_g2_uncompressed.md
index 792fe1f038..c79afa94cb 100644
--- a/specs/test_formats/bls/msg_hash_g2_uncompressed.md
+++ b/specs/test_formats/bls/msg_hash_g2_uncompressed.md
@@ -1,13 +1,13 @@
 # Test format: BLS hash-uncompressed
 
-A BLS uncompressed-hash to G2. 
+A BLS uncompressed-hash to G2.
 
 ## Test case format
 
 ```yaml
-input: 
-  message: bytes32,
-  domain: bytes -- any number
+input:
+  message: bytes32
+  domain: bytes8   -- the BLS domain
 output: List[List[bytes48]] -- 3 lists, each a length of two
 ```
 
diff --git a/specs/test_formats/bls/sign_msg.md b/specs/test_formats/bls/sign_msg.md
index 9916f2cc2c..46f9f16970 100644
--- a/specs/test_formats/bls/sign_msg.md
+++ b/specs/test_formats/bls/sign_msg.md
@@ -1,6 +1,6 @@
 # Test format: BLS sign message
 
-Message signing with BLS should produce a signature. 
+Message signing with BLS should produce a signature.
 
 ## Test case format
 
@@ -8,7 +8,7 @@ Message signing with BLS should produce a signature.
 input:
   privkey: bytes32 -- the private key used for signing
   message: bytes32 -- input message to sign (a hash)
-  domain: bytes    -- BLS domain
+  domain: bytes8   -- the BLS domain
 output: bytes96    -- expected signature
 ```
 
diff --git a/specs/test_formats/epoch_processing/README.md b/specs/test_formats/epoch_processing/README.md
index 6384a0eda9..dbd4ca639f 100644
--- a/specs/test_formats/epoch_processing/README.md
+++ b/specs/test_formats/epoch_processing/README.md
@@ -17,13 +17,17 @@ post: BeaconState      -- state after applying the epoch sub-transition.
 ## Condition
 
 A handler of the `epoch_processing` test-runner should process these cases, 
- calling the corresponding processing implementation.
+ calling the corresponding processing implementation (same name, prefixed with `process_`).
+This excludes the other parts of the epoch-transition.
+The provided pre-state is already transitioned to just before the specific sub-transition of focus of the handler.
 
 Sub-transitions:
 
-| *`sub-transition-name`* | *`processing call`*               |
-|-------------------------|-----------------------------------|
-| `crosslinks`            | `process_crosslinks(state)`       |
-| `registry_updates`      | `process_registry_updates(state)` |
+- `justification_and_finalization`
+- `crosslinks`
+- *`rewards_and_penalties` - planned testing extension*
+- `registry_updates`
+- `slashings`
+- `final_updates`
 
 The resulting state should match the expected `post` state.
diff --git a/specs/test_formats/genesis/README.md b/specs/test_formats/genesis/README.md
new file mode 100644
index 0000000000..25761e2f6a
--- /dev/null
+++ b/specs/test_formats/genesis/README.md
@@ -0,0 +1,8 @@
+# Genesis tests
+
+The aim of the genesis tests is to provide a baseline to test genesis-state initialization and test
+ if the proposed genesis-validity conditions are working.
+
+There are two handlers, documented individually:
+- [`validity`](./validity.md): Tests if a genesis state is valid, i.e. if it counts as trigger to launch.
+- [`initialization`](./initialization.md): Tests the initialization of a genesis state based on Eth1 data.
diff --git a/specs/test_formats/genesis/initialization.md b/specs/test_formats/genesis/initialization.md
new file mode 100644
index 0000000000..437dd91a33
--- /dev/null
+++ b/specs/test_formats/genesis/initialization.md
@@ -0,0 +1,22 @@
+# Genesis creation testing
+
+Tests the initialization of a genesis state based on Eth1 data.
+
+## Test case format
+
+```yaml
+description: string            -- description of test case, purely for debugging purposes
+bls_setting: int               -- see general test-format spec.
+eth1_block_hash: Bytes32       -- the root of the Eth-1 block, hex encoded, with prefix 0x
+eth1_timestamp: int            -- the timestamp of the block, in seconds.
+deposits: [Deposit]            -- list of deposits to build the genesis state with
+state: BeaconState             -- the expected genesis state.
+```
+
+To process this test, build a genesis state with the provided `eth1_block_hash`, `eth1_timestamp` and `deposits`:
+`initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)`,
+ as described in the Beacon Chain specification.
+
+## Condition
+
+The resulting state should match the expected `state`.
diff --git a/specs/test_formats/genesis/validity.md b/specs/test_formats/genesis/validity.md
new file mode 100644
index 0000000000..792923e3a0
--- /dev/null
+++ b/specs/test_formats/genesis/validity.md
@@ -0,0 +1,19 @@
+# Genesis validity testing
+
+Tests if a genesis state is valid, i.e. if it counts as trigger to launch.
+
+## Test case format
+
+```yaml
+description: string    -- description of test case, purely for debugging purposes
+bls_setting: int       -- see general test-format spec.
+genesis: BeaconState   -- state to validate.
+is_valid: bool         -- true if the genesis state is deemed valid as to launch with, false otherwise.
+```
+
+To process the data, call `is_valid_genesis_state(genesis)`.
+
+
+## Condition
+
+The result of calling `is_valid_genesis_state(genesis)` should match the expected `is_valid` boolean.
diff --git a/specs/test_formats/operations/README.md b/specs/test_formats/operations/README.md
index 32cf880b36..37c5df498b 100644
--- a/specs/test_formats/operations/README.md
+++ b/specs/test_formats/operations/README.md
@@ -14,22 +14,23 @@ post: BeaconState                      -- state after applying the operation. No
 
 ## Condition
 
-A handler of the `operations` test-runner should process these cases, 
+A handler of the `operations` test-runner should process these cases,
  calling the corresponding processing implementation.
+This excludes the other parts of the block-transition.
 
 Operations:
 
 | *`operation-name`*      | *`operation-object`* | *`input name`*       | *`processing call`*                                    |
 |-------------------------|----------------------|----------------------|--------------------------------------------------------|
-| `attestation`           | `Attestation`        | `attestation`        | `process_attestation(state, attestation)`                  |
-| `attester_slashing`     | `AttesterSlashing`   | `attester_slashing`  | `process_attester_slashing(state, attester_slashing)`            |
+| `attestation`           | `Attestation`        | `attestation`        | `process_attestation(state, attestation)`              |
+| `attester_slashing`     | `AttesterSlashing`   | `attester_slashing`  | `process_attester_slashing(state, attester_slashing)`  |
 | `block_header`          | `Block`              | `block`              | `process_block_header(state, block)`                   |
 | `deposit`               | `Deposit`            | `deposit`            | `process_deposit(state, deposit)`                      |
 | `proposer_slashing`     | `ProposerSlashing`   | `proposer_slashing`  | `process_proposer_slashing(state, proposer_slashing)`  |
 | `transfer`              | `Transfer`           | `transfer`           | `process_transfer(state, transfer)`                    |
 | `voluntary_exit`        | `VoluntaryExit`      | `voluntary_exit`     | `process_voluntary_exit(state, voluntary_exit)`        |
 
-Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here. 
+Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here.
 
 The resulting state should match the expected `post` state, or if the `post` state is left blank,
  the handler should reject the input operation as invalid.
diff --git a/specs/test_formats/sanity/slots.md b/specs/test_formats/sanity/slots.md
index 81866d47b9..04fecd1867 100644
--- a/specs/test_formats/sanity/slots.md
+++ b/specs/test_formats/sanity/slots.md
@@ -8,15 +8,14 @@ Sanity tests to cover a series of one or more empty-slot transitions being proce
 description: string    -- description of test case, purely for debugging purposes
 bls_setting: int       -- see general test-format spec.
 pre: BeaconState       -- state before running through the transitions.
-slots: N               -- amount of slots to process, N being a positive numer.
+slots: N               -- amount of slots to process, N being a positive number.
 post: BeaconState      -- state after applying all the transitions.
 ```
 
-The transition with pure time, no blocks, is known as `state_transition_to(state, slot)` in the spec.
+The transition with pure time, no blocks, is known as `process_slots(state, slot)` in the spec.
 This runs state-caching (pure slot transition) and epoch processing (every E slots).
 
-To process the data, call `state_transition_to(pre, pre.slot + N)`. And see if `pre` mutated into the equivalent of `post`.
-
+To process the data, call `process_slots(pre, pre.slot + N)`.
 
 ## Condition
 
diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md
index 29724da514..58d87d4500 100644
--- a/specs/validator/0_beacon-chain-validator.md
+++ b/specs/validator/0_beacon-chain-validator.md
@@ -44,8 +44,8 @@
                 - [Crosslink vote](#crosslink-vote)
             - [Construct attestation](#construct-attestation)
                 - [Data](#data)
-                - [Aggregation bitfield](#aggregation-bitfield)
-                - [Custody bitfield](#custody-bitfield)
+                - [Aggregation bits](#aggregation-bits)
+                - [Custody bits](#custody-bits)
                 - [Aggregate signature](#aggregate-signature)
     - [How to avoid slashing](#how-to-avoid-slashing)
         - [Proposer slashing](#proposer-slashing)
@@ -57,7 +57,7 @@
 
 This document represents the expected behavior of an "honest validator" with respect to Phase 0 of the Ethereum 2.0 protocol. This document does not distinguish between a "node" (i.e. the functionality of following and reading the beacon chain) and a "validator client" (i.e. the functionality of actively participating in consensus). The separation of concerns between these (potentially) two pieces of software is left as a design decision that is out of scope.
 
-A validator is an entity that participates in the consensus of the Ethereum 2.0 protocol. This is an optional role for users in which they can post ETH as collateral and verify and attest to the validity of blocks to seek financial returns in exchange for building and securing the protocol. This is similar to proof of work networks in which a miner provides collateral in the form of hardware/hash-power to seek returns in exchange for building and securing the protocol.
+A validator is an entity that participates in the consensus of the Ethereum 2.0 protocol. This is an optional role for users in which they can post ETH as collateral and verify and attest to the validity of blocks to seek financial returns in exchange for building and securing the protocol. This is similar to proof-of-work networks in which miners provide collateral in the form of hardware/hash-power to seek returns in exchange for building and securing the protocol.
 
 ## Prerequisites
 
@@ -86,75 +86,78 @@ Validator public keys are [G1 points](../bls_signature.md#g1-points) on the [BLS
 A secondary withdrawal private key, `withdrawal_privkey`, must also be securely generated along with the resultant `withdrawal_pubkey`. This `withdrawal_privkey` does not have to be available for signing during the normal lifetime of a validator and can live in "cold storage".
 
 The validator constructs their `withdrawal_credentials` via the following:
-* Set `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX_BYTE`.
+
+* Set `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX`.
 * Set `withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:]`.
 
 ### Submit deposit
 
-In Phase 0, all incoming validator deposits originate from the Ethereum 1.0 PoW chain. Deposits are made to the [deposit contract](../core/0_deposit-contract.md) located at `DEPOSIT_CONTRACT_ADDRESS`.
+In Phase 0, all incoming validator deposits originate from the Ethereum 1.0 proof-of-work chain. Deposits are made to the [deposit contract](../core/0_deposit-contract.md) located at `DEPOSIT_CONTRACT_ADDRESS`.
 
 To submit a deposit:
 
-* Pack the validator's [initialization parameters](#initialization) into `deposit_data`, a [`DepositData`](../core/0_beacon-chain.md#depositdata) SSZ object.
-* Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_EFFECTIVE_BALANCE`.
-* Set `deposit_data.amount = amount`.
-* Let `signature` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=bls_domain(DOMAIN_DEPOSIT)`. (Deposits are valid regardless of fork version, `bls_domain` will default to zeroes there).
-* Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])` along with a deposit of `amount` Gwei.
+- Pack the validator's [initialization parameters](#initialization) into `deposit_data`, a [`DepositData`](../core/0_beacon-chain.md#depositdata) SSZ object.
+- Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_EFFECTIVE_BALANCE`.
+- Set `deposit_data.amount = amount`.
+- Let `signature` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=compute_domain(DOMAIN_DEPOSIT)`. (Deposits are valid regardless of fork version, `compute_domain` will default to zeroes there).
+- Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])` along with a deposit of `amount` Gwei.
 
-*Note*: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validator_registry` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_EFFECTIVE_BALANCE`.
+*Note*: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validators` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_EFFECTIVE_BALANCE`.
 
 ### Process deposit
 
-Deposits cannot be processed into the beacon chain until the Eth 1.0 block in which they were deposited or any of its descendants is added to the beacon chain `state.eth1_data`. This takes _a minimum_ of `ETH1_FOLLOW_DISTANCE` Eth 1.0 blocks (~4 hours) plus `ETH1_DATA_VOTING_PERIOD` epochs (~1.7 hours). Once the requisite Eth 1.0 data is added, the deposit will normally be added to a beacon chain block and processed into the `state.validator_registry` within an epoch or two. The validator is then in a queue to be activated.
+Deposits cannot be processed into the beacon chain until the Eth 1.0 block in which they were deposited or any of its descendants is added to the beacon chain `state.eth1_data`. This takes _a minimum_ of `ETH1_FOLLOW_DISTANCE` Eth 1.0 blocks (~4 hours) plus `ETH1_DATA_VOTING_PERIOD` epochs (~1.7 hours). Once the requisite Eth 1.0 data is added, the deposit will normally be added to a beacon chain block and processed into the `state.validators` within an epoch or two. The validator is then in a queue to be activated.
 
 ### Validator index
 
-Once a validator has been processed and added to the beacon state's `validator_registry`, the validator's `validator_index` is defined by the index into the registry at which the [`ValidatorRecord`](../core/0_beacon-chain.md#validator) contains the `pubkey` specified in the validator's deposit. A validator's `validator_index` is guaranteed to not change from the time of initial deposit until the validator exits and fully withdraws. This `validator_index` is used throughout the specification to dictate validator roles and responsibilities at any point and should be stored locally.
+Once a validator has been processed and added to the beacon state's `validators`, the validator's `validator_index` is defined by the index into the registry at which the [`ValidatorRecord`](../core/0_beacon-chain.md#validator) contains the `pubkey` specified in the validator's deposit. A validator's `validator_index` is guaranteed to not change from the time of initial deposit until the validator exits and fully withdraws. This `validator_index` is used throughout the specification to dictate validator roles and responsibilities at any point and should be stored locally.
 
 ### Activation
 
-In normal operation, the validator is quickly activated at which point the validator is added to the shuffling and begins validation after an additional `ACTIVATION_EXIT_DELAY` epochs (25.6 minutes).
+In normal operation, the validator is quickly activated, at which point the validator is added to the shuffling and begins validation after an additional `ACTIVATION_EXIT_DELAY` epochs (25.6 minutes).
 
 The function [`is_active_validator`](../core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given epoch. Usage is as follows:
 
 ```python
-validator = state.validator_registry[validator_index]
-is_active = is_active_validator(validator, get_current_epoch(state))
+def check_if_validator_active(state: BeaconState, validator_index: ValidatorIndex) -> bool:
+    validator = state.validators[validator_index]
+    return is_active_validator(validator, get_current_epoch(state))
 ```
 
 Once a validator is activated, the validator is assigned [responsibilities](#beacon-chain-responsibilities) until exited.
 
-*Note*: There is a maximum validator churn per finalized epoch so the delay until activation is variable depending upon finality, total active validator balance, and the number of validators in the queue to be activated.
+*Note*: There is a maximum validator churn per finalized epoch, so the delay until activation is variable depending upon finality, total active validator balance, and the number of validators in the queue to be activated.
 
 ## Validator assignments
 
 A validator can get committee assignments for a given epoch using the following helper via `get_committee_assignment(state, epoch, validator_index)` where `epoch <= next_epoch`.
 
 ```python
-def get_committee_assignment(
-        state: BeaconState,
-        epoch: Epoch,
-        validator_index: ValidatorIndex) -> Tuple[List[ValidatorIndex], Shard, Slot]:
+def get_committee_assignment(state: BeaconState,
+                             epoch: Epoch,
+                             validator_index: ValidatorIndex) -> Optional[Tuple[Sequence[ValidatorIndex], Shard, Slot]]:
     """
     Return the committee assignment in the ``epoch`` for ``validator_index``.
     ``assignment`` returned is a tuple of the following form:
         * ``assignment[0]`` is the list of validators in the committee
         * ``assignment[1]`` is the shard to which the committee is assigned
         * ``assignment[2]`` is the slot at which the committee is assigned
+    Return None if no assignment.
     """
     next_epoch = get_current_epoch(state) + 1
     assert epoch <= next_epoch
 
-    committees_per_slot = get_epoch_committee_count(state, epoch) // SLOTS_PER_EPOCH
-    epoch_start_slot = get_epoch_start_slot(epoch)
-    for slot in range(epoch_start_slot, epoch_start_slot + SLOTS_PER_EPOCH):
+    committees_per_slot = get_committee_count(state, epoch) // SLOTS_PER_EPOCH
+    start_slot = compute_start_slot_of_epoch(epoch)
+    for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH):
         offset = committees_per_slot * (slot % SLOTS_PER_EPOCH)
-        slot_start_shard = (get_epoch_start_shard(state, epoch) + offset) % SHARD_COUNT
+        slot_start_shard = (get_start_shard(state, epoch) + offset) % SHARD_COUNT
         for i in range(committees_per_slot):
-            shard = (slot_start_shard + i) % SHARD_COUNT
+            shard = Shard((slot_start_shard + i) % SHARD_COUNT)
             committee = get_crosslink_committee(state, epoch, shard)
             if validator_index in committee:
-                return committee, shard, slot
+                return committee, shard, Slot(slot)
+    return None
 ```
 
 A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run with a `state` of the slot in question. Proposer selection is only stable within the context of the current epoch.
@@ -183,7 +186,7 @@ A validator has two primary responsibilities to the beacon chain: [proposing blo
 
 A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `is_proposer(state, validator_index)` returns `True`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator creates, signs, and broadcasts a `block` that is a child of `parent` that satisfies a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function).
 
-There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (e.g. at 312500 validators = 10 million ETH, that's once per ~3 weeks).
+There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (e.g. at 312,500 validators = 10 million ETH, that's once per ~3 weeks).
 
 #### Block header
 
@@ -205,88 +208,84 @@ Set `block.state_root = hash_tree_root(state)` of the resulting `state` of the `
 
 ##### Randao reveal
 
-Set `block.randao_reveal = epoch_signature` where `epoch_signature` is defined as:
+Set `block.randao_reveal = epoch_signature` where `epoch_signature` is obtained from:
 
 ```python
-epoch_signature = bls_sign(
-    privkey=validator.privkey,  # privkey stored locally, not in state
-    message_hash=hash_tree_root(slot_to_epoch(block.slot)),
-    domain=get_domain(
-        fork=fork,  # `fork` is the fork object at the slot `block.slot`
-        epoch=slot_to_epoch(block.slot),
-        domain_type=DOMAIN_RANDAO,
-    )
-)
+def get_epoch_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> BLSSignature:
+    domain = get_domain(state, DOMAIN_RANDAO, compute_epoch_of_slot(block.slot))
+    return bls_sign(privkey, hash_tree_root(compute_epoch_of_slot(block.slot)), domain)
 ```
 
 ##### Eth1 Data
 
-`block.eth1_data` is a mechanism used by block proposers vote on a recent Ethereum 1.0 block hash and an associated deposit root found in the Ethereum 1.0 deposit contract. When consensus is formed, `state.latest_eth1_data` is updated, and validator deposits up to this root can be processed. The deposit root can be calculated by calling the `get_deposit_root()` function of the deposit contract using the post-state of the block hash.
+The `block.eth1_data` field is for block proposers to vote on recent Eth 1.0 data. This recent data contains an Eth 1.0 block hash as well as the associated deposit root (as calculated by the `get_hash_tree_root()` method of the deposit contract) and deposit count after execution of the corresponding Eth 1.0 block. If over half of the block proposers in the current Eth 1.0 voting period vote for the same `eth1_data` then `state.eth1_data` updates at the end of the voting period. Each deposit in `block.body.deposits` must verify against `state.eth1_data.eth1_deposit_root`.
+
+Let `get_eth1_data(distance: uint64) -> Eth1Data` be the (subjective) function that returns the Eth 1.0 data at distance `distance` relative to the Eth 1.0 head at the start of the current Eth 1.0 voting period. Let `previous_eth1_distance` be the distance relative to the Eth 1.0 block corresponding to `state.eth1_data.block_hash` at the start of the current Eth 1.0 voting period. An honest block proposer sets `block.eth1_data = get_eth1_vote(state, previous_eth1_distance)` where:
 
-* Let `D` be the list of `Eth1DataVote` objects `vote` in `state.eth1_data_votes` where:
-    * `vote.eth1_data.block_hash` is the hash of an Eth 1.0 block that is (i) part of the canonical chain, (ii) >= `ETH1_FOLLOW_DISTANCE` blocks behind the head, and (iii) newer than `state.latest_eth1_data.block_hash`.
-    * `vote.eth1_data.deposit_count` is the deposit count of the Eth 1.0 deposit contract at the block defined by `vote.eth1_data.block_hash`.
-    * `vote.eth1_data.deposit_root` is the deposit root of the Eth 1.0 deposit contract at the block defined by `vote.eth1_data.block_hash`.
-* If `D` is empty:
-    * Let `block_hash` be the block hash of the `ETH1_FOLLOW_DISTANCE`'th ancestor of the head of the canonical Eth 1.0 chain.
-    * Let `deposit_root` and `deposit_count` be the deposit root and deposit count of the Eth 1.0 deposit contract in the post-state of the block referenced by `block_hash`
-    * Let `best_vote_data = Eth1Data(block_hash=block_hash, deposit_root=deposit_root, deposit_count=deposit_count)`.
-* If `D` is nonempty:
-    * Let `best_vote_data` be the `eth1_data` member of `D` that has the highest vote count (`D.count(eth1_data)`), breaking ties by favoring block hashes with higher associated block height.
-* Set `block.eth1_data = best_vote_data`.
+```python
+def get_eth1_vote(state: BeaconState, previous_eth1_distance: uint64) -> Eth1Data:
+    new_eth1_data = [get_eth1_data(distance) for distance in range(ETH1_FOLLOW_DISTANCE, 2 * ETH1_FOLLOW_DISTANCE)]
+    all_eth1_data = [get_eth1_data(distance) for distance in range(ETH1_FOLLOW_DISTANCE, previous_eth1_distance)]
+
+    valid_votes = []
+    for slot, vote in enumerate(state.eth1_data_votes):
+        period_tail = slot % SLOTS_PER_ETH1_VOTING_PERIOD >= integer_squareroot(SLOTS_PER_ETH1_VOTING_PERIOD)
+        if vote in new_eth1_data or (period_tail and vote in all_eth1_data):
+            valid_votes.append(vote)
+
+    return max(
+        valid_votes,
+        key=lambda v: (valid_votes.count(v), -all_eth1_data.index(v)),  # Tiebreak by smallest distance
+        default=get_eth1_data(ETH1_FOLLOW_DISTANCE),
+    )
+```
 
 ##### Signature
 
-Set `block.signature = block_signature` where `block_signature` is defined as:
+Set `header.signature = block_signature` where `block_signature` is obtained from:
 
 ```python
-block_signature = bls_sign(
-    privkey=validator.privkey,  # privkey store locally, not in state
-    message_hash=signing_root(block),
-    domain=get_domain(
-        fork=fork,  # `fork` is the fork object at the slot `block.slot`
-        epoch=slot_to_epoch(block.slot),
-        domain_type=DOMAIN_BEACON_BLOCK,
-    )
-)
+def get_block_signature(state: BeaconState, header: BeaconBlockHeader, privkey: int) -> BLSSignature:
+    domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_of_slot(header.slot))
+    return bls_sign(privkey, signing_root(header), domain)
 ```
 
 #### Block body
 
 ##### Proposer slashings
 
-Up to `MAX_PROPOSER_SLASHINGS` [`ProposerSlashing`](../core/0_beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](../core/0_beacon-chain.md#proposer-slashings). The validator receives a small "whistleblower" reward for each proposer slashing found and included.
+Up to `MAX_PROPOSER_SLASHINGS`, [`ProposerSlashing`](../core/0_beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](../core/0_beacon-chain.md#proposer-slashings). The validator receives a small "whistleblower" reward for each proposer slashing found and included.
 
 ##### Attester slashings
 
-Up to `MAX_ATTESTER_SLASHINGS` [`AttesterSlashing`](../core/0_beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [Attester slashings processing](../core/0_beacon-chain.md#attester-slashings). The validator receives a small "whistleblower" reward for each attester slashing found and included.
+Up to `MAX_ATTESTER_SLASHINGS`, [`AttesterSlashing`](../core/0_beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [attester slashings processing](../core/0_beacon-chain.md#attester-slashings). The validator receives a small "whistleblower" reward for each attester slashing found and included.
 
 ##### Attestations
 
-Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](../core/0_beacon-chain.md#attestations). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain.
+Up to `MAX_ATTESTATIONS`, aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](../core/0_beacon-chain.md#attestations). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain.
 
 ##### Deposits
 
-If there are any unprocessed deposits for the existing `state.latest_eth1_data` (i.e. `state.latest_eth1_data.deposit_count > state.deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`.  These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth 1.0 deposit contract](../core/0_deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits).
+If there are any unprocessed deposits for the existing `state.eth1_data` (i.e. `state.eth1_data.deposit_count > state.eth1_deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)`.  These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth 1.0 deposit contract](../core/0_deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits).
 
-The `proof` for each deposit must be constructed against the deposit root contained in `state.latest_eth1_data` rather than the deposit root at the time the deposit was initially logged from the 1.0 chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `latest_eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation.
+The `proof` for each deposit must be constructed against the deposit root contained in `state.eth1_data` rather than the deposit root at the time the deposit was initially logged from the 1.0 chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation.
 
 ##### Voluntary exits
 
-Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](../core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](../core/0_beacon-chain.md#voluntary-exits).
+Up to `MAX_VOLUNTARY_EXITS`, [`VoluntaryExit`](../core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](../core/0_beacon-chain.md#voluntary-exits).
 
 ### Attestations
 
-A validator is expected to create, sign, and broadcast an attestation during each epoch. The `committee`, assigned `shard`, and assigned `slot` for which the validator performs this role during an epoch is defined by `get_committee_assignment(state, epoch, validator_index)`.
+A validator is expected to create, sign, and broadcast an attestation during each epoch. The `committee`, assigned `shard`, and assigned `slot` for which the validator performs this role during an epoch are defined by `get_committee_assignment(state, epoch, validator_index)`.
 
-A validator should create and broadcast the attestation halfway through the `slot` during which the validator is assigned ― that is, `SECONDS_PER_SLOT * 0.5` seconds after the start of `slot`.
+A validator should create and broadcast the attestation halfway through the `slot` during which the validator is assigned―that is, `SECONDS_PER_SLOT * 0.5` seconds after the start of `slot`.
 
 #### Attestation data
 
-First the validator should construct `attestation_data`, an [`AttestationData`](../core/0_beacon-chain.md#attestationdata) object based upon the state at the assigned slot.
+First, the validator should construct `attestation_data`, an [`AttestationData`](../core/0_beacon-chain.md#attestationdata) object based upon the state at the assigned slot.
 
-* Let `head_block` be the result of running the fork choice during the assigned slot.
-* Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot using `process_slots(state, slot)`.
+- Let `head_block` be the result of running the fork choice during the assigned slot.
+- Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot using `process_slots(state, slot)`.
 
 ##### LMD GHOST vote
 
@@ -294,77 +293,65 @@ Set `attestation_data.beacon_block_root = signing_root(head_block)`.
 
 ##### FFG vote
 
-* Set `attestation_data.source_epoch = head_state.current_justified_epoch`.
-* Set `attestation_data.source_root = head_state.current_justified_root`.
-* Set `attestation_data.target_epoch = get_current_epoch(head_state)`
-* Set `attestation_data.target_root = epoch_boundary_block_root` where `epoch_boundary_block_root` is the root of block at the most recent epoch boundary.
+- Set `attestation_data.source = head_state.current_justified_checkpoint`.
+- Set `attestation_data.target = Checkpoint(epoch=get_current_epoch(head_state), root=epoch_boundary_block_root)` where `epoch_boundary_block_root` is the root of block at the most recent epoch boundary.
 
 *Note*: `epoch_boundary_block_root` can be looked up in the state using:
-* Let `epoch_start_slot = get_epoch_start_slot(get_current_epoch(head_state))`.
-* Let `epoch_boundary_block_root = signing_root(head_block) if epoch_start_slot == head_state.slot else get_block_root(state, epoch_start_slot)`.
+
+- Let `start_slot = compute_start_slot_of_epoch(get_current_epoch(head_state))`.
+- Let `epoch_boundary_block_root = signing_root(head_block) if start_slot == head_state.slot else get_block_root(state, start_slot)`.
 
 ##### Crosslink vote
 
 Construct `attestation_data.crosslink` via the following.
 
-* Set `attestation_data.crosslink.shard = shard` where `shard` is the shard associated with the validator's committee.
-* Let `parent_crosslink = head_state.current_crosslinks[shard]`.
-* Set `attestation_data.crosslink.start_epoch = parent_crosslink.end_epoch`.
-* Set `attestation_data.crosslink.end_epoch = min(attestation_data.target_epoch, parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK)`.
-* Set `attestation_data.crosslink.parent_root = hash_tree_root(head_state.current_crosslinks[shard])`.
-* Set `attestation_data.crosslink.data_root = ZERO_HASH`. *Note*: This is a stub for Phase 0.
+- Set `attestation_data.crosslink.shard = shard` where `shard` is the shard associated with the validator's committee.
+- Let `parent_crosslink = head_state.current_crosslinks[shard]`.
+- Set `attestation_data.crosslink.start_epoch = parent_crosslink.end_epoch`.
+- Set `attestation_data.crosslink.end_epoch = min(attestation_data.target.epoch, parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK)`.
+- Set `attestation_data.crosslink.parent_root = hash_tree_root(head_state.current_crosslinks[shard])`.
+- Set `attestation_data.crosslink.data_root = ZERO_HASH`. *Note*: This is a stub for Phase 0.
 
 #### Construct attestation
 
-Next the validator creates `attestation`, an [`Attestation`](../core/0_beacon-chain.md#attestation) object.
+Next, the validator creates `attestation`, an [`Attestation`](../core/0_beacon-chain.md#attestation) object.
 
 ##### Data
 
 Set `attestation.data = attestation_data` where `attestation_data` is the `AttestationData` object defined in the previous section, [attestation data](#attestation-data).
 
-##### Aggregation bitfield
+##### Aggregation bits
 
-* Let `aggregation_bitfield` be a byte array filled with zeros of length `(len(committee) + 7) // 8`.
-* Let `index_into_committee` be the index into the validator's `committee` at which `validator_index` is located.
-* Set `aggregation_bitfield[index_into_committee // 8] |= 2 ** (index_into_committee % 8)`.
-* Set `attestation.aggregation_bitfield = aggregation_bitfield`.
+- Let `attestation.aggregation_bits` be a `Bitlist[MAX_INDICES_PER_ATTESTATION]` where the bits at the index in the aggregated validator's `committee` is set to `0b1`.
 
-*Note*: Calling `get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield)` should return a list of length equal to 1, containing `validator_index`.
+*Note*: Calling `get_attesting_indices(state, attestation.data, attestation.aggregation_bits)` should return a list of length equal to 1, containing `validator_index`.
 
-##### Custody bitfield
+##### Custody bits
 
-* Let `custody_bitfield` be a byte array filled with zeros of length `(len(committee) + 7) // 8`.
-* Set `attestation.custody_bitfield = custody_bitfield`.
+- Let `attestation.custody_bits` be a `Bitlist[MAX_INDICES_PER_ATTESTATION]` filled with zeros of length `len(committee)`.
 
 *Note*: This is a stub for Phase 0.
 
 ##### Aggregate signature
 
-Set `attestation.aggregate_signature = signed_attestation_data` where `signed_attestation_data` is defined as:
+Set `attestation.signature = signed_attestation_data` where `signed_attestation_data` is obtained from:
 
 ```python
-attestation_data_and_custody_bit = AttestationDataAndCustodyBit(
-    data=attestation.data,
-    custody_bit=0b0,
-)
-attestation_message = hash_tree_root(attestation_data_and_custody_bit)
-
-signed_attestation_data = bls_sign(
-    privkey=validator.privkey,  # privkey stored locally, not in state
-    message_hash=attestation_message,
-    domain=get_domain(
-        fork=fork,  # `fork` is the fork object at the slot, `attestation_data.slot`
-        epoch=slot_to_epoch(attestation_data.slot),
-        domain_type=DOMAIN_ATTESTATION,
+def get_signed_attestation_data(state: BeaconState, attestation: IndexedAttestation, privkey: int) -> BLSSignature:
+    attestation_data_and_custody_bit = AttestationDataAndCustodyBit(
+        data=attestation.data,
+        custody_bit=0b0,
     )
-)
+
+    domain = get_domain(state, DOMAIN_ATTESTATION, attestation.data.target.epoch)
+    return bls_sign(privkey, hash_tree_root(attestation_data_and_custody_bit), domain)
 ```
 
 ## How to avoid slashing
 
-"Slashing" is the burning of some amount of validator funds and immediate ejection from the active validator set. In Phase 0, there are two ways in which funds can be slashed -- [proposer slashing](#proposer-slashing) and [attester slashing](#attester-slashing). Although being slashed has serious repercussions, it is simple enough to avoid being slashed all together by remaining _consistent_ with respect to the messages a validator has previously signed.
+"Slashing" is the burning of some amount of validator funds and immediate ejection from the active validator set. In Phase 0, there are two ways in which funds can be slashed: [proposer slashing](#proposer-slashing) and [attester slashing](#attester-slashing). Although being slashed has serious repercussions, it is simple enough to avoid being slashed all together by remaining _consistent_ with respect to the messages a validator has previously signed.
 
-*Note*: Signed data must be within a sequential `Fork` context to conflict. Messages cannot be slashed across diverging forks. If the previous fork version is 1 and the chain splits into fork 2 and 102, messages from 1 can slashable against messages in forks 1, 2, and 102. Messages in 2 cannot be slashable against messages in 102 and vice versa.
+*Note*: Signed data must be within a sequential `Fork` context to conflict. Messages cannot be slashed across diverging forks. If the previous fork version is 1 and the chain splits into fork 2 and 102, messages from 1 can slashable against messages in forks 1, 2, and 102. Messages in 2 cannot be slashable against messages in 102, and vice versa.
 
 ### Proposer slashing
 
@@ -373,17 +360,19 @@ To avoid "proposer slashings", a validator must not sign two conflicting [`Beaco
 *In Phase 0, as long as the validator does not sign two different beacon blocks for the same epoch, the validator is safe against proposer slashings.*
 
 Specifically, when signing a `BeaconBlock`, a validator should perform the following steps in the following order:
-1. Save a record to hard disk that a beacon block has been signed for the `epoch=slot_to_epoch(block.slot)`.
+
+1. Save a record to hard disk that a beacon block has been signed for the `epoch=compute_epoch_of_slot(block.slot)`.
 2. Generate and broadcast the block.
 
-If the software crashes at some point within this routine, then when the validator comes back online the hard disk has the record of the *potentially* signed/broadcast block and can effectively avoid slashing.
+If the software crashes at some point within this routine, then when the validator comes back online, the hard disk has the record of the *potentially* signed/broadcast block and can effectively avoid slashing.
 
 ### Attester slashing
 
 To avoid "attester slashings", a validator must not sign two conflicting [`AttestationData`](../core/0_beacon-chain.md#attestationdata) objects, i.e. two attestations that satisfy [`is_slashable_attestation_data`](../core/0_beacon-chain.md#is_slashable_attestation_data).
 
 Specifically, when signing an `Attestation`, a validator should perform the following steps in the following order:
-1. Save a record to hard disk that an attestation has been signed for source -- `attestation_data.source_epoch` -- and target -- `slot_to_epoch(attestation_data.slot)`.
+
+1. Save a record to hard disk that an attestation has been signed for source (i.e. `attestation_data.source.epoch`) and target (i.e. `attestation_data.target.epoch`).
 2. Generate and broadcast attestation.
 
-If the software crashes at some point within this routine, then when the validator comes back online the hard disk has the record of the *potentially* signed/broadcast attestation and can effectively avoid slashing.
+If the software crashes at some point within this routine, then when the validator comes back online, the hard disk has the record of the *potentially* signed/broadcast attestation and can effectively avoid slashing.
diff --git a/specs/validator/0_beacon-node-validator-api.md b/specs/validator/0_beacon-node-validator-api.md
index 2a5fe7fcda..e87d367019 100644
--- a/specs/validator/0_beacon-node-validator-api.md
+++ b/specs/validator/0_beacon-node-validator-api.md
@@ -1,28 +1,27 @@
 # Ethereum 2.0 Phase 0 -- Beacon Node API for Validator
 
-__NOTICE__: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- Honest Validator](0_beacon-chain-validator.md) that describes an API exposed by the beacon node, which enables the validator client to participate in the Ethereum 2.0 protocol.
+**Notice**: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- Honest Validator](0_beacon-chain-validator.md) that describes an API exposed by the beacon node, which enables the validator client to participate in the Ethereum 2.0 protocol.
 
 ## Outline
 
-This document outlines a minimal application programming interface (API) which is exposed by a beacon node for use by a validator client implementation which aims to facilitate [_phase 0_](../../README.md#phase-0) of Ethereum 2.0.
+This document outlines a minimal application programming interface (API) which is exposed by a beacon node for use by a validator client implementation which aims to facilitate [Phase 0](../../README.md#phase-0) of Ethereum 2.0.
 
-The API is a REST interface, accessed via HTTP, designed for use as a local communications protocol between binaries. The only supported return data type is currently JSON.
+The API is a REST interface, accessed via HTTP, designed for use as a local communications protocol between binaries. Currently, the only supported return data type is JSON.
 
-###  Background
-The beacon node maintains the state of the beacon chain by communicating with other beacon nodes in the Ethereum Serenity network. Conceptually, it does not maintain keypairs that participate with the beacon chain.
+## Background
 
-The validator client is a conceptually separate entity which utilizes private keys to perform validator related tasks on the beacon chain, which we call validator "duties". These duties include the production of beacon blocks and signing of attestations.
+The beacon node maintains the state of the beacon chain by communicating with other beacon nodes in the Ethereum 2.0 network. Conceptually, it does not maintain keypairs that participate with the beacon chain.
 
-Since it is recommended to separate these concerns in the client implementations, we must clearly define the communication between them.
+The validator client is a conceptually separate entity which utilizes private keys to perform validator related tasks, called "duties", on the beacon chain. These duties include the production of beacon blocks and signing of attestations.
 
-The goal of this specification is to promote interoperability between beacon nodes and validator clients derived from different projects and to encourage innovation in validator client implementations, independently from beacon node development. For example, the validator client from Lighthouse could communicate with a running instance of the beacon node from Prysm, or a staking pool might create a decentrally managed validator client which utilises the same API.
+Since it is recommended to separate these concerns in the client implementations, we must clearly define the communication between them.
 
-This specification is derived from a proposal and discussion on Issues [#1011](https://github.com/ethereum/eth2.0-specs/issues/1011) and [#1012](https://github.com/ethereum/eth2.0-specs/issues/1012)
+The goal of this specification is to promote interoperability between beacon nodes and validator clients derived from different projects and to encourage innovation in validator client implementations, independently from beacon node development. For example, the validator client from [Lighthouse](https://github.com/sigp/lighthouse) could communicate with a running instance of the beacon node from [Prysm](https://github.com/prysmaticlabs/prysm), or a staking pool might create a decentrally managed validator client which utilizes the same API.
 
+This specification is derived from a proposal and discussion on Issues [#1011](https://github.com/ethereum/eth2.0-specs/issues/1011) and [#1012](https://github.com/ethereum/eth2.0-specs/issues/1012).
 
-## Specification 
+## Specification
 
 The API specification has been written in [OpenAPI 3.0](https://swagger.io/docs/specification/about/) and is provided in the [beacon_node_oapi.yaml](beacon_node_oapi.yaml) file alongside this document.
 
-For convenience, this specification has been uploaded to [SwaggerHub](https://swagger.io/tools/swaggerhub/) at the following URL:
-[https://app.swaggerhub.com/apis/spble/beacon_node_api_for_validator](https://app.swaggerhub.com/apis/spble/beacon_node_api_for_validator)
+For convenience, this specification has been uploaded to SwaggerHub [here](https://app.swaggerhub.com/apis/spble/beacon_node_api_for_validator).
diff --git a/specs/validator/beacon_node_oapi.yaml b/specs/validator/beacon_node_oapi.yaml
index 74be21fac8..4da8f7933e 100644
--- a/specs/validator/beacon_node_oapi.yaml
+++ b/specs/validator/beacon_node_oapi.yaml
@@ -415,16 +415,16 @@ components:
             type: object
             description: "The [`Attestation`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestation) object from the Eth2.0 spec."
             properties:
-              aggregation_bitfield:
+              aggregation_bits:
                 type: string
                 format: byte
                 pattern: "^0x[a-fA-F0-9]+$"
-                description: "Attester aggregation bitfield."
-              custody_bitfield:
+                description: "Attester aggregation bits."
+              custody_bits:
                 type: string
                 format: byte
                 pattern: "^0x[a-fA-F0-9]+$"
-                description: "Custody bitfield."
+                description: "Custody bits."
               signature:
                 type: string
                 format: byte
diff --git a/test_generators/bls/README.md b/test_generators/bls/README.md
index a21ad16d94..2bf46e9ea1 100644
--- a/test_generators/bls/README.md
+++ b/test_generators/bls/README.md
@@ -9,7 +9,7 @@ The base unit is bytes48 of which only 381 bits are used
 
 ## Resources
 
-- [Eth2.0 spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/bls_signature.md)
+- [Eth2.0 spec](../../specs/bls_signature.md)
 - [Finite Field Arithmetic](http://www.springeronline.com/sgw/cda/pageitems/document/cda_downloaddocument/0,11996,0-0-45-110359-0,00.pdf)
 - Chapter 2 of [Elliptic Curve Cryptography](http://cacr.uwaterloo.ca/ecc/). Darrel Hankerson, Alfred Menezes, and Scott Vanstone 
 - [Zcash BLS parameters](https://github.com/zkcrypto/pairing/tree/master/src/bls12_381)
diff --git a/test_generators/bls/main.py b/test_generators/bls/main.py
index 8a6a7dafec..284cf68b0e 100644
--- a/test_generators/bls/main.py
+++ b/test_generators/bls/main.py
@@ -27,9 +27,6 @@ def hex_to_int(x: str) -> int:
     return int(x, 16)
 
 
-# Note: even though a domain is only an uint64,
-# To avoid issues with YAML parsers that are limited to 53-bit (JS language limit)
-# It is serialized as an hex string as well.
 DOMAINS = [
     0,
     1,
@@ -92,7 +89,7 @@ def case01_message_hash_G2_uncompressed():
             yield {
                 'input': {
                     'message': '0x' + msg.hex(),
-                    'domain': int_to_hex(domain)
+                    'domain': int_to_hex(domain, byte_length=8)
                 },
                 'output': hash_message(msg, domain)
             }
@@ -104,7 +101,7 @@ def case02_message_hash_G2_compressed():
             yield {
                 'input': {
                     'message': '0x' + msg.hex(),
-                    'domain': int_to_hex(domain)
+                    'domain': int_to_hex(domain, byte_length=8)
                 },
                 'output': hash_message_compressed(msg, domain)
             }
@@ -129,7 +126,7 @@ def case04_sign_messages():
                     'input': {
                         'privkey': int_to_hex(privkey),
                         'message': '0x' + message.hex(),
-                        'domain': int_to_hex(domain)
+                        'domain': int_to_hex(domain, byte_length=8)
                     },
                     'output': '0x' + sig.hex()
                 }
diff --git a/test_generators/epoch_processing/main.py b/test_generators/epoch_processing/main.py
index 846f463a18..6a578c598e 100644
--- a/test_generators/epoch_processing/main.py
+++ b/test_generators/epoch_processing/main.py
@@ -4,7 +4,10 @@
 from eth2spec.phase1 import spec as spec_phase1
 from eth2spec.test.phase_0.epoch_processing import (
     test_process_crosslinks,
-    test_process_registry_updates
+    test_process_final_updates,
+    test_process_justification_and_finalization,
+    test_process_registry_updates,
+    test_process_slashings
 )
 from gen_base import gen_runner, gen_suite, gen_typing
 from gen_from_tests.gen import generate_from_tests
@@ -35,8 +38,16 @@ def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput:
     gen_runner.run_generator("epoch_processing", [
         create_suite('crosslinks', 'minimal', lambda: generate_from_tests(test_process_crosslinks, 'phase0')),
         create_suite('crosslinks', 'mainnet', lambda: generate_from_tests(test_process_crosslinks, 'phase0')),
+        create_suite('final_updates', 'minimal', lambda: generate_from_tests(test_process_final_updates, 'phase0')),
+        create_suite('final_updates', 'mainnet', lambda: generate_from_tests(test_process_final_updates, 'phase0')),
+        create_suite('justification_and_finalization', 'minimal',
+                     lambda: generate_from_tests(test_process_justification_and_finalization, 'phase0')),
+        create_suite('justification_and_finalization', 'mainnet',
+                     lambda: generate_from_tests(test_process_justification_and_finalization, 'phase0')),
         create_suite('registry_updates', 'minimal',
                      lambda: generate_from_tests(test_process_registry_updates, 'phase0')),
         create_suite('registry_updates', 'mainnet',
                      lambda: generate_from_tests(test_process_registry_updates, 'phase0')),
+        create_suite('slashings', 'minimal', lambda: generate_from_tests(test_process_slashings, 'phase0')),
+        create_suite('slashings', 'mainnet', lambda: generate_from_tests(test_process_slashings, 'phase0')),
     ])
diff --git a/test_generators/genesis/README.md b/test_generators/genesis/README.md
new file mode 100644
index 0000000000..8a2b01c626
--- /dev/null
+++ b/test_generators/genesis/README.md
@@ -0,0 +1,8 @@
+# Genesis test generator
+
+Genesis tests cover the initialization and validity-based launch trigger for the Beacon Chain genesis state.
+
+Information on the format of the tests can be found in the [genesis test formats documentation](../../specs/test_formats/genesis/README.md).
+
+ 
+
diff --git a/test_generators/genesis/main.py b/test_generators/genesis/main.py
new file mode 100644
index 0000000000..82899b967f
--- /dev/null
+++ b/test_generators/genesis/main.py
@@ -0,0 +1,33 @@
+from typing import Callable, Iterable
+
+from eth2spec.test.genesis import test_initialization, test_validity
+
+from gen_base import gen_runner, gen_suite, gen_typing
+from gen_from_tests.gen import generate_from_tests
+from preset_loader import loader
+from eth2spec.phase0 import spec as spec
+
+
+def create_suite(handler_name: str, config_name: str, get_cases: Callable[[], Iterable[gen_typing.TestCase]]) \
+        -> Callable[[str], gen_typing.TestSuiteOutput]:
+    def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput:
+        presets = loader.load_presets(configs_path, config_name)
+        spec.apply_constants_preset(presets)
+
+        return ("genesis_%s_%s" % (handler_name, config_name), handler_name, gen_suite.render_suite(
+            title="genesis testing",
+            summary="Genesis test suite, %s type, generated from pytests" % handler_name,
+            forks_timeline="testing",
+            forks=["phase0"],
+            config=config_name,
+            runner="genesis",
+            handler=handler_name,
+            test_cases=get_cases()))
+    return suite_definition
+
+
+if __name__ == "__main__":
+    gen_runner.run_generator("genesis", [
+        create_suite('initialization', 'minimal', lambda: generate_from_tests(test_initialization, 'phase0')),
+        create_suite('validity', 'minimal', lambda: generate_from_tests(test_validity, 'phase0')),
+    ])
diff --git a/test_generators/genesis/requirements.txt b/test_generators/genesis/requirements.txt
new file mode 100644
index 0000000000..595cee69cd
--- /dev/null
+++ b/test_generators/genesis/requirements.txt
@@ -0,0 +1,4 @@
+eth-utils==1.6.0
+../../test_libs/gen_helpers
+../../test_libs/config_helpers
+../../test_libs/pyspec
\ No newline at end of file
diff --git a/test_generators/operations/main.py b/test_generators/operations/main.py
index 38fa42f689..b61e98526f 100644
--- a/test_generators/operations/main.py
+++ b/test_generators/operations/main.py
@@ -49,7 +49,9 @@ def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput:
         create_suite('proposer_slashing', 'minimal', lambda: generate_from_tests(test_process_proposer_slashing, 'phase0')),
         create_suite('proposer_slashing', 'mainnet', lambda: generate_from_tests(test_process_proposer_slashing, 'phase0')),
         create_suite('transfer',          'minimal', lambda: generate_from_tests(test_process_transfer, 'phase0')),
-        create_suite('transfer',          'mainnet', lambda: generate_from_tests(test_process_transfer, 'phase0')),
+        # Disabled, due to the high amount of different transfer tests, this produces a shocking size of tests.
+        # Unnecessarily, as transfer are disabled currently, so not a priority.
+        # create_suite('transfer',          'mainnet', lambda: generate_from_tests(test_process_transfer, 'phase0')),
         create_suite('voluntary_exit',    'minimal', lambda: generate_from_tests(test_process_voluntary_exit, 'phase0')),
         create_suite('voluntary_exit',    'mainnet', lambda: generate_from_tests(test_process_voluntary_exit, 'phase0')),
     ])
diff --git a/test_generators/shuffling/main.py b/test_generators/shuffling/main.py
index 291aa2c47d..adfab8cfb3 100644
--- a/test_generators/shuffling/main.py
+++ b/test_generators/shuffling/main.py
@@ -10,7 +10,7 @@
 def shuffling_case(seed, count):
     yield 'seed', '0x' + seed.hex()
     yield 'count', count
-    yield 'shuffled', [spec.get_shuffled_index(i, count, seed) for i in range(count)]
+    yield 'shuffled', [int(spec.compute_shuffled_index(i, count, seed)) for i in range(count)]
 
 
 @to_tuple
diff --git a/test_generators/ssz_static/main.py b/test_generators/ssz_static/main.py
index 9d9af8c5e5..0dfdebf5dc 100644
--- a/test_generators/ssz_static/main.py
+++ b/test_generators/ssz_static/main.py
@@ -21,8 +21,8 @@
 
 
 @to_dict
-def create_test_case_contents(value, typ):
-    yield "value", encode.encode(value, typ)
+def create_test_case_contents(value):
+    yield "value", encode.encode(value)
     yield "serialized", '0x' + serialize(value).hex()
     yield "root", '0x' + hash_tree_root(value).hex()
     if hasattr(value, "signature"):
@@ -32,7 +32,7 @@ def create_test_case_contents(value, typ):
 @to_dict
 def create_test_case(rng: Random, name: str, typ, mode: random_value.RandomizationMode, chaos: bool):
     value = random_value.get_random_ssz_object(rng, typ, MAX_BYTES_LENGTH, MAX_LIST_LENGTH, mode, chaos)
-    yield name, create_test_case_contents(value, typ)
+    yield name, create_test_case_contents(value)
 
 
 def get_spec_ssz_types():
diff --git a/test_libs/pyspec/eth2spec/debug/decode.py b/test_libs/pyspec/eth2spec/debug/decode.py
index 5ce1160258..c0b977ab34 100644
--- a/test_libs/pyspec/eth2spec/debug/decode.py
+++ b/test_libs/pyspec/eth2spec/debug/decode.py
@@ -1,39 +1,29 @@
+from typing import Any
 from eth2spec.utils.ssz.ssz_impl import hash_tree_root
 from eth2spec.utils.ssz.ssz_typing import (
-    is_uint_type, is_bool_type, is_list_type,
-    is_vector_type, is_bytes_type, is_bytesn_type, is_container_type,
-    read_vector_elem_type, read_list_elem_type,
+    SSZType, SSZValue, uint, Container, Bytes, List, boolean,
     Vector, BytesN
 )
 
 
-def decode(data, typ):
-    if is_uint_type(typ):
-        return data
-    elif is_bool_type(typ):
-        assert data in (True, False)
-        return data
-    elif is_list_type(typ):
-        elem_typ = read_list_elem_type(typ)
-        return [decode(element, elem_typ) for element in data]
-    elif is_vector_type(typ):
-        elem_typ = read_vector_elem_type(typ)
-        return Vector(decode(element, elem_typ) for element in data)
-    elif is_bytes_type(typ):
-        return bytes.fromhex(data[2:])
-    elif is_bytesn_type(typ):
-        return BytesN(bytes.fromhex(data[2:]))
-    elif is_container_type(typ):
+def decode(data: Any, typ: SSZType) -> SSZValue:
+    if issubclass(typ, (uint, boolean)):
+        return typ(data)
+    elif issubclass(typ, (List, Vector)):
+        return typ(decode(element, typ.elem_type) for element in data)
+    elif issubclass(typ, (Bytes, BytesN)):
+        return typ(bytes.fromhex(data[2:]))
+    elif issubclass(typ, Container):
         temp = {}
-        for field, subtype in typ.get_fields():
-            temp[field] = decode(data[field], subtype)
-            if field + "_hash_tree_root" in data:
-                assert(data[field + "_hash_tree_root"][2:] ==
-                       hash_tree_root(temp[field], subtype).hex())
+        for field_name, field_type in typ.get_fields().items():
+            temp[field_name] = decode(data[field_name], field_type)
+            if field_name + "_hash_tree_root" in data:
+                assert (data[field_name + "_hash_tree_root"][2:] ==
+                        hash_tree_root(temp[field_name]).hex())
         ret = typ(**temp)
         if "hash_tree_root" in data:
-            assert(data["hash_tree_root"][2:] ==
-                   hash_tree_root(ret, typ).hex())
+            assert (data["hash_tree_root"][2:] ==
+                    hash_tree_root(ret).hex())
         return ret
     else:
         raise Exception(f"Type not recognized: data={data}, typ={typ}")
diff --git a/test_libs/pyspec/eth2spec/debug/encode.py b/test_libs/pyspec/eth2spec/debug/encode.py
index 61dd87928b..ac4bd9df22 100644
--- a/test_libs/pyspec/eth2spec/debug/encode.py
+++ b/test_libs/pyspec/eth2spec/debug/encode.py
@@ -1,36 +1,32 @@
-from eth2spec.utils.ssz.ssz_impl import hash_tree_root
+from eth2spec.utils.ssz.ssz_impl import hash_tree_root, serialize
 from eth2spec.utils.ssz.ssz_typing import (
-    is_uint_type, is_bool_type, is_list_type, is_vector_type, is_container_type,
-    read_elem_type,
-    uint
+    uint, boolean,
+    Bitlist, Bitvector, Container
 )
 
 
-def encode(value, typ, include_hash_tree_roots=False):
-    if is_uint_type(typ):
-        if hasattr(typ, '__supertype__'):
-            typ = typ.__supertype__
+def encode(value, include_hash_tree_roots=False):
+    if isinstance(value, uint):
         # Larger uints are boxed and the class declares their byte length
-        if issubclass(typ, uint) and typ.byte_len > 8:
-            return str(value)
-        return value
-    elif is_bool_type(typ):
-        assert value in (True, False)
-        return value
-    elif is_list_type(typ) or is_vector_type(typ):
-        elem_typ = read_elem_type(typ)
-        return [encode(element, elem_typ, include_hash_tree_roots) for element in value]
-    elif isinstance(typ, type) and issubclass(typ, bytes):  # both bytes and BytesN
+        if value.type().byte_len > 8:
+            return str(int(value))
+        return int(value)
+    elif isinstance(value, boolean):
+        return value == 1
+    elif isinstance(value, (Bitlist, Bitvector)):
+        return '0x' + serialize(value).hex()
+    elif isinstance(value, list):  # normal python lists, ssz-List, Vector
+        return [encode(element, include_hash_tree_roots) for element in value]
+    elif isinstance(value, bytes):  # both bytes and BytesN
         return '0x' + value.hex()
-    elif is_container_type(typ):
+    elif isinstance(value, Container):
         ret = {}
-        for field, subtype in typ.get_fields():
-            field_value = getattr(value, field)
-            ret[field] = encode(field_value, subtype, include_hash_tree_roots)
+        for field_value, field_name in zip(value, value.get_fields().keys()):
+            ret[field_name] = encode(field_value, include_hash_tree_roots)
             if include_hash_tree_roots:
-                ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(field_value, subtype).hex()
+                ret[field_name + "_hash_tree_root"] = '0x' + hash_tree_root(field_value).hex()
         if include_hash_tree_roots:
-            ret["hash_tree_root"] = '0x' + hash_tree_root(value, typ).hex()
+            ret["hash_tree_root"] = '0x' + hash_tree_root(value).hex()
         return ret
     else:
-        raise Exception(f"Type not recognized: value={value}, typ={typ}")
+        raise Exception(f"Type not recognized: value={value}, typ={value.type()}")
diff --git a/test_libs/pyspec/eth2spec/debug/random_value.py b/test_libs/pyspec/eth2spec/debug/random_value.py
index 3edcc88084..95a3ae9707 100644
--- a/test_libs/pyspec/eth2spec/debug/random_value.py
+++ b/test_libs/pyspec/eth2spec/debug/random_value.py
@@ -1,18 +1,13 @@
 from random import Random
-from typing import Any
 from enum import Enum
 
-from eth2spec.utils.ssz.ssz_impl import is_basic_type
-
 from eth2spec.utils.ssz.ssz_typing import (
-    is_uint_type, is_bool_type, is_list_type,
-    is_vector_type, is_bytes_type, is_bytesn_type, is_container_type,
-    read_vector_elem_type, read_list_elem_type,
-    uint_byte_size
+    SSZType, SSZValue, BasicValue, BasicType, uint, Container, Bytes, List, boolean,
+    Vector, BytesN, Bitlist, Bitvector
 )
 
 # in bytes
-UINT_SIZES = (1, 2, 4, 8, 16, 32)
+UINT_BYTE_SIZES = (1, 2, 4, 8, 16, 32)
 
 random_mode_names = ("random", "zero", "max", "nil", "one", "lengthy")
 
@@ -39,11 +34,11 @@ def is_changing(self):
 
 
 def get_random_ssz_object(rng: Random,
-                          typ: Any,
+                          typ: SSZType,
                           max_bytes_length: int,
                           max_list_length: int,
                           mode: RandomizationMode,
-                          chaos: bool) -> Any:
+                          chaos: bool) -> SSZValue:
     """
     Create an object for a given type, filled with random data.
     :param rng: The random number generator to use.
@@ -56,33 +51,31 @@ def get_random_ssz_object(rng: Random,
     """
     if chaos:
         mode = rng.choice(list(RandomizationMode))
-    if is_bytes_type(typ):
+    if issubclass(typ, Bytes):
         # Bytes array
         if mode == RandomizationMode.mode_nil_count:
-            return b''
+            return typ(b'')
         elif mode == RandomizationMode.mode_max_count:
-            return get_random_bytes_list(rng, max_bytes_length)
+            return typ(get_random_bytes_list(rng, max_bytes_length))
         elif mode == RandomizationMode.mode_one_count:
-            return get_random_bytes_list(rng, 1)
+            return typ(get_random_bytes_list(rng, 1))
         elif mode == RandomizationMode.mode_zero:
-            return b'\x00'
+            return typ(b'\x00')
         elif mode == RandomizationMode.mode_max:
-            return b'\xff'
+            return typ(b'\xff')
         else:
-            return get_random_bytes_list(rng, rng.randint(0, max_bytes_length))
-    elif is_bytesn_type(typ):
-        # BytesN
-        length = typ.length
+            return typ(get_random_bytes_list(rng, rng.randint(0, max_bytes_length)))
+    elif issubclass(typ, BytesN):
         # Sanity, don't generate absurdly big random values
         # If a client is aiming to performance-test, they should create a benchmark suite.
-        assert length <= max_bytes_length
+        assert typ.length <= max_bytes_length
         if mode == RandomizationMode.mode_zero:
-            return b'\x00' * length
+            return typ(b'\x00' * typ.length)
         elif mode == RandomizationMode.mode_max:
-            return b'\xff' * length
+            return typ(b'\xff' * typ.length)
         else:
-            return get_random_bytes_list(rng, length)
-    elif is_basic_type(typ):
+            return typ(get_random_bytes_list(rng, typ.length))
+    elif issubclass(typ, BasicValue):
         # Basic types
         if mode == RandomizationMode.mode_zero:
             return get_min_basic_value(typ)
@@ -90,32 +83,31 @@ def get_random_ssz_object(rng: Random,
             return get_max_basic_value(typ)
         else:
             return get_random_basic_value(rng, typ)
-    elif is_vector_type(typ):
-        # Vector
-        elem_typ = read_vector_elem_type(typ)
-        return [
-            get_random_ssz_object(rng, elem_typ, max_bytes_length, max_list_length, mode, chaos)
+    elif issubclass(typ, Vector) or issubclass(typ, Bitvector):
+        return typ(
+            get_random_ssz_object(rng, typ.elem_type, max_bytes_length, max_list_length, mode, chaos)
             for _ in range(typ.length)
-        ]
-    elif is_list_type(typ):
-        # List
-        elem_typ = read_list_elem_type(typ)
-        length = rng.randint(0, max_list_length)
+        )
+    elif issubclass(typ, List) or issubclass(typ, Bitlist):
+        length = rng.randint(0, min(typ.length, max_list_length))
         if mode == RandomizationMode.mode_one_count:
             length = 1
         elif mode == RandomizationMode.mode_max_count:
             length = max_list_length
 
-        return [
-            get_random_ssz_object(rng, elem_typ, max_bytes_length, max_list_length, mode, chaos)
+        if typ.length < length:  # SSZ imposes a hard limit on lists, we can't put in more than that
+            length = typ.length
+
+        return typ(
+            get_random_ssz_object(rng, typ.elem_type, max_bytes_length, max_list_length, mode, chaos)
             for _ in range(length)
-        ]
-    elif is_container_type(typ):
+        )
+    elif issubclass(typ, Container):
         # Container
         return typ(**{
-            field:
-                get_random_ssz_object(rng, subtype, max_bytes_length, max_list_length, mode, chaos)
-                for field, subtype in typ.get_fields()
+            field_name:
+                get_random_ssz_object(rng, field_type, max_bytes_length, max_list_length, mode, chaos)
+            for field_name, field_type in typ.get_fields().items()
         })
     else:
         raise Exception(f"Type not recognized: typ={typ}")
@@ -125,34 +117,31 @@ def get_random_bytes_list(rng: Random, length: int) -> bytes:
     return bytes(rng.getrandbits(8) for _ in range(length))
 
 
-def get_random_basic_value(rng: Random, typ) -> Any:
-    if is_bool_type(typ):
-        return rng.choice((True, False))
-    elif is_uint_type(typ):
-        size = uint_byte_size(typ)
-        assert size in UINT_SIZES
-        return rng.randint(0, 256**size - 1)
+def get_random_basic_value(rng: Random, typ: BasicType) -> BasicValue:
+    if issubclass(typ, boolean):
+        return typ(rng.choice((True, False)))
+    elif issubclass(typ, uint):
+        assert typ.byte_len in UINT_BYTE_SIZES
+        return typ(rng.randint(0, 256 ** typ.byte_len - 1))
     else:
         raise ValueError(f"Not a basic type: typ={typ}")
 
 
-def get_min_basic_value(typ) -> Any:
-    if is_bool_type(typ):
-        return False
-    elif is_uint_type(typ):
-        size = uint_byte_size(typ)
-        assert size in UINT_SIZES
-        return 0
+def get_min_basic_value(typ: BasicType) -> BasicValue:
+    if issubclass(typ, boolean):
+        return typ(False)
+    elif issubclass(typ, uint):
+        assert typ.byte_len in UINT_BYTE_SIZES
+        return typ(0)
     else:
         raise ValueError(f"Not a basic type: typ={typ}")
 
 
-def get_max_basic_value(typ) -> Any:
-    if is_bool_type(typ):
-        return True
-    elif is_uint_type(typ):
-        size = uint_byte_size(typ)
-        assert size in UINT_SIZES
-        return 256**size - 1
+def get_max_basic_value(typ: BasicType) -> BasicValue:
+    if issubclass(typ, boolean):
+        return typ(True)
+    elif issubclass(typ, uint):
+        assert typ.byte_len in UINT_BYTE_SIZES
+        return typ(256 ** typ.byte_len - 1)
     else:
         raise ValueError(f"Not a basic type: typ={typ}")
diff --git a/test_libs/pyspec/eth2spec/fuzzing/decoder.py b/test_libs/pyspec/eth2spec/fuzzing/decoder.py
index a5d3dfd97a..ccca17385d 100644
--- a/test_libs/pyspec/eth2spec/fuzzing/decoder.py
+++ b/test_libs/pyspec/eth2spec/fuzzing/decoder.py
@@ -8,32 +8,38 @@ def translate_typ(typ) -> ssz.BaseSedes:
     :param typ: The spec type, a class.
     :return: The Py-SSZ equivalent.
     """
-    if spec_ssz.is_container_type(typ):
+    if issubclass(typ, spec_ssz.Container):
         return ssz.Container(
-            [translate_typ(field_typ) for (field_name, field_typ) in typ.get_fields()])
-    elif spec_ssz.is_bytesn_type(typ):
+            [translate_typ(field_typ) for field_name, field_typ in typ.get_fields().items()])
+    elif issubclass(typ, spec_ssz.BytesN):
         return ssz.ByteVector(typ.length)
-    elif spec_ssz.is_bytes_type(typ):
+    elif issubclass(typ, spec_ssz.Bytes):
         return ssz.ByteList()
-    elif spec_ssz.is_vector_type(typ):
-        return ssz.Vector(translate_typ(spec_ssz.read_vector_elem_type(typ)), typ.length)
-    elif spec_ssz.is_list_type(typ):
-        return ssz.List(translate_typ(spec_ssz.read_list_elem_type(typ)))
-    elif spec_ssz.is_bool_type(typ):
+    elif issubclass(typ, spec_ssz.Vector):
+        return ssz.Vector(translate_typ(typ.elem_type), typ.length)
+    elif issubclass(typ, spec_ssz.List):
+        # TODO: Make py-ssz List support the new fixed length list
+        return ssz.List(translate_typ(typ.elem_type))
+    elif issubclass(typ, spec_ssz.Bitlist):
+        # TODO: Once Bitlist implemented in py-ssz, use appropriate type
+        return ssz.List(translate_typ(typ.elem_type))
+    elif issubclass(typ, spec_ssz.Bitvector):
+        # TODO: Once Bitvector implemented in py-ssz, use appropriate type
+        return ssz.Vector(translate_typ(typ.elem_type), typ.length)
+    elif issubclass(typ, spec_ssz.boolean):
         return ssz.boolean
-    elif spec_ssz.is_uint_type(typ):
-        size = spec_ssz.uint_byte_size(typ)
-        if size == 1:
+    elif issubclass(typ, spec_ssz.uint):
+        if typ.byte_len == 1:
             return ssz.uint8
-        elif size == 2:
+        elif typ.byte_len == 2:
             return ssz.uint16
-        elif size == 4:
+        elif typ.byte_len == 4:
             return ssz.uint32
-        elif size == 8:
+        elif typ.byte_len == 8:
             return ssz.uint64
-        elif size == 16:
+        elif typ.byte_len == 16:
             return ssz.uint128
-        elif size == 32:
+        elif typ.byte_len == 32:
             return ssz.uint256
         else:
             raise TypeError("invalid uint size")
@@ -48,37 +54,37 @@ def translate_value(value, typ):
     :param typ: The type from the spec to translate into
     :return: the translated value
     """
-    if spec_ssz.is_uint_type(typ):
-        size = spec_ssz.uint_byte_size(typ)
-        if size == 1:
+    if issubclass(typ, spec_ssz.uint):
+        if typ.byte_len == 1:
             return spec_ssz.uint8(value)
-        elif size == 2:
+        elif typ.byte_len == 2:
             return spec_ssz.uint16(value)
-        elif size == 4:
+        elif typ.byte_len == 4:
             return spec_ssz.uint32(value)
-        elif size == 8:
-            # uint64 is default (TODO this is changing soon)
-            return value
-        elif size == 16:
+        elif typ.byte_len == 8:
+            return spec_ssz.uint64(value)
+        elif typ.byte_len == 16:
             return spec_ssz.uint128(value)
-        elif size == 32:
+        elif typ.byte_len == 32:
             return spec_ssz.uint256(value)
         else:
             raise TypeError("invalid uint size")
-    elif spec_ssz.is_list_type(typ):
-        elem_typ = spec_ssz.read_elem_type(typ)
-        return [translate_value(elem, elem_typ) for elem in value]
-    elif spec_ssz.is_bool_type(typ):
+    elif issubclass(typ, spec_ssz.List):
+        return [translate_value(elem, typ.elem_type) for elem in value]
+    elif issubclass(typ, spec_ssz.boolean):
         return value
-    elif spec_ssz.is_vector_type(typ):
-        elem_typ = spec_ssz.read_elem_type(typ)
-        return typ(*(translate_value(elem, elem_typ) for elem in value))
-    elif spec_ssz.is_bytesn_type(typ):
+    elif issubclass(typ, spec_ssz.Vector):
+        return typ(*(translate_value(elem, typ.elem_type) for elem in value))
+    elif issubclass(typ, spec_ssz.Bitlist):
         return typ(value)
-    elif spec_ssz.is_bytes_type(typ):
+    elif issubclass(typ, spec_ssz.Bitvector):
+        return typ(value)
+    elif issubclass(typ, spec_ssz.BytesN):
+        return typ(value)
+    elif issubclass(typ, spec_ssz.Bytes):
         return value
-    elif spec_ssz.is_container_type(typ):
-        return typ(**{f_name: translate_value(f_val, f_typ) for (f_name, f_val, f_typ)
-                      in zip(typ.get_field_names(), value, typ.get_field_types())})
+    if issubclass(typ, spec_ssz.Container):
+        return typ(**{f_name: translate_value(f_val, f_typ) for (f_val, (f_name, f_typ))
+                      in zip(value, typ.get_fields().items())})
     else:
         raise TypeError("Type not supported: {}".format(typ))
diff --git a/test_libs/pyspec/eth2spec/fuzzing/test_decoder.py b/test_libs/pyspec/eth2spec/fuzzing/test_decoder.py
index 26ee6e913a..ea1f1d47f8 100644
--- a/test_libs/pyspec/eth2spec/fuzzing/test_decoder.py
+++ b/test_libs/pyspec/eth2spec/fuzzing/test_decoder.py
@@ -9,7 +9,9 @@ def test_decoder():
     rng = Random(123)
 
     # check these types only, Block covers a lot of operation types already.
-    for typ in [spec.BeaconBlock, spec.BeaconState, spec.IndexedAttestation, spec.AttestationDataAndCustodyBit]:
+    # TODO: Once has Bitlists and Bitvectors, add back
+    #       spec.BeaconState and spec.BeaconBlock
+    for typ in [spec.IndexedAttestation, spec.AttestationDataAndCustodyBit]:
         # create a random pyspec value
         original = random_value.get_random_ssz_object(rng, typ, 100, 10,
                                                       mode=random_value.RandomizationMode.mode_random,
diff --git a/test_libs/pyspec/eth2spec/test/context.py b/test_libs/pyspec/eth2spec/test/context.py
index 97266acf2c..e7560afc6d 100644
--- a/test_libs/pyspec/eth2spec/test/context.py
+++ b/test_libs/pyspec/eth2spec/test/context.py
@@ -27,9 +27,13 @@ def entry(*args, **kw):
 DEFAULT_BLS_ACTIVE = False
 
 
+def spectest_with_bls_switch(fn):
+    return bls_switch(spectest()(fn))
+
+
 # shorthand for decorating @with_state @spectest()
 def spec_state_test(fn):
-    return with_state(bls_switch(spectest()(fn)))
+    return with_state(spectest_with_bls_switch(fn))
 
 
 def expect_assertion_error(fn):
diff --git a/test_libs/pyspec/eth2spec/test/fork_choice/test_get_head.py b/test_libs/pyspec/eth2spec/test/fork_choice/test_get_head.py
new file mode 100644
index 0000000000..6ac46ba6c2
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/fork_choice/test_get_head.py
@@ -0,0 +1,118 @@
+from eth2spec.test.context import with_all_phases, with_state, bls_switch
+from eth2spec.test.helpers.attestations import get_valid_attestation
+from eth2spec.test.helpers.block import build_empty_block_for_next_slot
+from eth2spec.test.helpers.state import state_transition_and_sign_block
+
+
+def add_block_to_store(spec, store, block):
+    pre_state = store.block_states[block.parent_root]
+    block_time = pre_state.genesis_time + block.slot * spec.SECONDS_PER_SLOT
+
+    if store.time < block_time:
+        spec.on_tick(store, block_time)
+
+    spec.on_block(store, block)
+
+
+def add_attestation_to_store(spec, store, attestation):
+    parent_block = store.blocks[attestation.data.beacon_block_root]
+    pre_state = store.block_states[spec.signing_root(parent_block)]
+    block_time = pre_state.genesis_time + parent_block.slot * spec.SECONDS_PER_SLOT
+    next_epoch_time = block_time + spec.SLOTS_PER_EPOCH * spec.SECONDS_PER_SLOT
+
+    if store.time < next_epoch_time:
+        spec.on_tick(store, next_epoch_time)
+
+    spec.on_attestation(store, attestation)
+
+
+@with_all_phases
+@with_state
+@bls_switch
+def test_genesis(spec, state):
+    # Initialization
+    store = spec.get_genesis_store(state)
+    genesis_block = spec.BeaconBlock(state_root=state.hash_tree_root())
+    assert spec.get_head(store) == spec.signing_root(genesis_block)
+
+
+@with_all_phases
+@with_state
+@bls_switch
+def test_chain_no_attestations(spec, state):
+    # Initialization
+    store = spec.get_genesis_store(state)
+    genesis_block = spec.BeaconBlock(state_root=state.hash_tree_root())
+    assert spec.get_head(store) == spec.signing_root(genesis_block)
+
+    # On receiving a block of `GENESIS_SLOT + 1` slot
+    block_1 = build_empty_block_for_next_slot(spec, state)
+    state_transition_and_sign_block(spec, state, block_1)
+    add_block_to_store(spec, store, block_1)
+
+    # On receiving a block of next epoch
+    block_2 = build_empty_block_for_next_slot(spec, state)
+    state_transition_and_sign_block(spec, state, block_2)
+    add_block_to_store(spec, store, block_2)
+
+    assert spec.get_head(store) == spec.signing_root(block_2)
+
+
+@with_all_phases
+@with_state
+@bls_switch
+def test_split_tie_breaker_no_attestations(spec, state):
+    genesis_state = state.copy()
+
+    # Initialization
+    store = spec.get_genesis_store(state)
+    genesis_block = spec.BeaconBlock(state_root=state.hash_tree_root())
+    assert spec.get_head(store) == spec.signing_root(genesis_block)
+
+    # block at slot 1
+    block_1_state = genesis_state.copy()
+    block_1 = build_empty_block_for_next_slot(spec, block_1_state)
+    state_transition_and_sign_block(spec, block_1_state, block_1)
+    add_block_to_store(spec, store, block_1)
+
+    # additional block at slot 1
+    block_2_state = genesis_state.copy()
+    block_2 = build_empty_block_for_next_slot(spec, block_2_state)
+    block_2.body.graffiti = b'\x42' * 32
+    state_transition_and_sign_block(spec, block_2_state, block_2)
+    add_block_to_store(spec, store, block_2)
+
+    highest_root = max(spec.signing_root(block_1), spec.signing_root(block_2))
+
+    assert spec.get_head(store) == highest_root
+
+
+@with_all_phases
+@with_state
+@bls_switch
+def test_shorter_chain_but_heavier_weight(spec, state):
+    genesis_state = state.copy()
+
+    # Initialization
+    store = spec.get_genesis_store(state)
+    genesis_block = spec.BeaconBlock(state_root=state.hash_tree_root())
+    assert spec.get_head(store) == spec.signing_root(genesis_block)
+
+    # build longer tree
+    long_state = genesis_state.copy()
+    for i in range(3):
+        long_block = build_empty_block_for_next_slot(spec, long_state)
+        state_transition_and_sign_block(spec, long_state, long_block)
+        add_block_to_store(spec, store, long_block)
+
+    # build short tree
+    short_state = genesis_state.copy()
+    short_block = build_empty_block_for_next_slot(spec, short_state)
+    short_block.body.graffiti = b'\x42' * 32
+    state_transition_and_sign_block(spec, short_state, short_block)
+    add_block_to_store(spec, store, short_block)
+
+    short_attestation = get_valid_attestation(spec, short_state, short_block.slot, signed=True)
+    add_attestation_to_store(spec, store, short_attestation)
+
+    assert spec.get_head(store) == spec.signing_root(short_block)
diff --git a/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py b/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py
new file mode 100644
index 0000000000..4006758880
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py
@@ -0,0 +1,122 @@
+from eth2spec.test.context import with_all_phases, with_state, bls_switch
+
+from eth2spec.test.helpers.block import build_empty_block_for_next_slot
+from eth2spec.test.helpers.attestations import get_valid_attestation
+from eth2spec.test.helpers.state import next_slot
+
+
+def run_on_attestation(spec, state, store, attestation, valid=True):
+    if not valid:
+        try:
+            spec.on_attestation(store, attestation)
+        except AssertionError:
+            return
+        else:
+            assert False
+
+    indexed_attestation = spec.get_indexed_attestation(state, attestation)
+    spec.on_attestation(store, attestation)
+    assert (
+        store.latest_messages[indexed_attestation.custody_bit_0_indices[0]] ==
+        spec.LatestMessage(
+            epoch=attestation.data.target.epoch,
+            root=attestation.data.beacon_block_root,
+        )
+    )
+
+
+@with_all_phases
+@with_state
+@bls_switch
+def test_on_attestation(spec, state):
+    store = spec.get_genesis_store(state)
+    time = 100
+    spec.on_tick(store, time)
+
+    block = build_empty_block_for_next_slot(spec, state, signed=True)
+
+    # store block in store
+    spec.on_block(store, block)
+
+    next_slot(spec, state)
+
+    attestation = get_valid_attestation(spec, state, slot=block.slot)
+    run_on_attestation(spec, state, store, attestation)
+
+
+@with_all_phases
+@with_state
+@bls_switch
+def test_on_attestation_target_not_in_store(spec, state):
+    store = spec.get_genesis_store(state)
+    time = 100
+    spec.on_tick(store, time)
+
+    # move to next epoch to make block new target
+    state.slot += spec.SLOTS_PER_EPOCH
+
+    block = build_empty_block_for_next_slot(spec, state, signed=True)
+
+    # do not add block to store
+
+    next_slot(spec, state)
+    attestation = get_valid_attestation(spec, state, slot=block.slot)
+    run_on_attestation(spec, state, store, attestation, False)
+
+
+@with_all_phases
+@with_state
+@bls_switch
+def test_on_attestation_future_epoch(spec, state):
+    store = spec.get_genesis_store(state)
+    time = 3 * spec.SECONDS_PER_SLOT
+    spec.on_tick(store, time)
+
+    block = build_empty_block_for_next_slot(spec, state, signed=True)
+
+    # store block in store
+    spec.on_block(store, block)
+    next_slot(spec, state)
+
+    # move state forward but not store
+    attestation_slot = block.slot + spec.SLOTS_PER_EPOCH
+    state.slot = attestation_slot
+
+    attestation = get_valid_attestation(spec, state, slot=state.slot)
+    run_on_attestation(spec, state, store, attestation, False)
+
+
+@with_all_phases
+@with_state
+@bls_switch
+def test_on_attestation_same_slot(spec, state):
+    store = spec.get_genesis_store(state)
+    time = 1 * spec.SECONDS_PER_SLOT
+    spec.on_tick(store, time)
+
+    block = build_empty_block_for_next_slot(spec, state, signed=True)
+
+    spec.on_block(store, block)
+    next_slot(spec, state)
+
+    attestation = get_valid_attestation(spec, state, slot=block.slot)
+    run_on_attestation(spec, state, store, attestation, False)
+
+
+@with_all_phases
+@with_state
+@bls_switch
+def test_on_attestation_invalid_attestation(spec, state):
+    store = spec.get_genesis_store(state)
+    time = 3 * spec.SECONDS_PER_SLOT
+    spec.on_tick(store, time)
+
+    block = build_empty_block_for_next_slot(spec, state, signed=True)
+
+    spec.on_block(store, block)
+    next_slot(spec, state)
+
+    attestation = get_valid_attestation(spec, state, slot=block.slot)
+    # make attestation invalid
+    attestation.custody_bits[0:8] = [0, 0, 0, 0, 1, 1, 1, 1]
+    run_on_attestation(spec, state, store, attestation, False)
diff --git a/test_libs/pyspec/eth2spec/test/fork_choice/test_on_block.py b/test_libs/pyspec/eth2spec/test/fork_choice/test_on_block.py
new file mode 100644
index 0000000000..90f161fa22
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/fork_choice/test_on_block.py
@@ -0,0 +1,127 @@
+from eth2spec.utils.ssz.ssz_impl import signing_root
+
+from eth2spec.test.context import with_all_phases, with_state, bls_switch
+from eth2spec.test.helpers.block import build_empty_block_for_next_slot
+from eth2spec.test.helpers.state import next_epoch, next_epoch_with_attestations
+
+
+def run_on_block(spec, state, store, block, valid=True):
+    if not valid:
+        try:
+            spec.on_block(store, block)
+        except AssertionError:
+            return
+        else:
+            assert False
+
+    spec.on_block(store, block)
+    assert store.blocks[signing_root(block)] == block
+
+
+def apply_next_epoch_with_attestations(spec, state, store):
+    _, new_blocks, state = next_epoch_with_attestations(spec, state, True, False)
+    for block in new_blocks:
+        block_root = signing_root(block)
+        store.blocks[block_root] = block
+        store.block_states[block_root] = state
+        last_block = block
+    spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
+    return state, store, last_block
+
+
+@with_all_phases
+@with_state
+@bls_switch
+def test_basic(spec, state):
+    # Initialization
+    store = spec.get_genesis_store(state)
+    time = 100
+    spec.on_tick(store, time)
+    assert store.time == time
+
+    # On receiving a block of `GENESIS_SLOT + 1` slot
+    block = build_empty_block_for_next_slot(spec, state)
+    run_on_block(spec, state, store, block)
+
+    # On receiving a block of next epoch
+    store.time = time + spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
+    block = build_empty_block_for_next_slot(spec, state)
+    block.slot += spec.SLOTS_PER_EPOCH
+
+    run_on_block(spec, state, store, block)
+
+    # TODO: add tests for justified_root and finalized_root
+
+
+@with_all_phases
+@with_state
+@bls_switch
+def test_on_block_checkpoints(spec, state):
+    # Initialization
+    store = spec.get_genesis_store(state)
+    time = 100
+    spec.on_tick(store, time)
+
+    next_epoch(spec, state)
+    spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
+    state, store, last_block = apply_next_epoch_with_attestations(spec, state, store)
+    next_epoch(spec, state)
+    spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
+    last_block_root = signing_root(last_block)
+
+    # Mock the finalized_checkpoint
+    store.block_states[last_block_root].finalized_checkpoint = (
+        store.block_states[last_block_root].current_justified_checkpoint
+    )
+
+    # On receiving a block of `GENESIS_SLOT + 1` slot
+    block = build_empty_block_for_next_slot(spec, state)
+    run_on_block(spec, state, store, block)
+
+
+@with_all_phases
+@with_state
+@bls_switch
+def test_on_block_future_block(spec, state):
+    # Initialization
+    store = spec.get_genesis_store(state)
+
+    # do not tick time
+
+    # Fail receiving block of `GENESIS_SLOT + 1` slot
+    block = build_empty_block_for_next_slot(spec, state)
+    run_on_block(spec, state, store, block, False)
+
+
+@with_all_phases
+@with_state
+@bls_switch
+def test_on_block_bad_parent_root(spec, state):
+    # Initialization
+    store = spec.get_genesis_store(state)
+    time = 100
+    spec.on_tick(store, time)
+
+    # Fail receiving block of `GENESIS_SLOT + 1` slot
+    block = build_empty_block_for_next_slot(spec, state)
+    block.parent_root = b'\x45' * 32
+    run_on_block(spec, state, store, block, False)
+
+
+@with_all_phases
+@with_state
+@bls_switch
+def test_on_block_before_finalized(spec, state):
+    # Initialization
+    store = spec.get_genesis_store(state)
+    time = 100
+    spec.on_tick(store, time)
+
+    store.finalized_checkpoint = spec.Checkpoint(
+        epoch=store.finalized_checkpoint.epoch + 2,
+        root=store.finalized_checkpoint.root
+    )
+
+    # Fail receiving block of `GENESIS_SLOT + 1` slot
+    block = build_empty_block_for_next_slot(spec, state)
+    run_on_block(spec, state, store, block, False)
diff --git a/test_libs/pyspec/eth2spec/test/genesis/__init__.py b/test_libs/pyspec/eth2spec/test/genesis/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test_libs/pyspec/eth2spec/test/genesis/test_initialization.py b/test_libs/pyspec/eth2spec/test/genesis/test_initialization.py
new file mode 100644
index 0000000000..b95b70feff
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/genesis/test_initialization.py
@@ -0,0 +1,30 @@
+from eth2spec.test.context import spectest_with_bls_switch, with_phases
+from eth2spec.test.helpers.deposits import (
+    prepare_genesis_deposits,
+)
+
+
+@with_phases(['phase0'])
+@spectest_with_bls_switch
+def test_initialize_beacon_state_from_eth1(spec):
+    deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
+    deposits, deposit_root = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
+
+    eth1_block_hash = b'\x12' * 32
+    eth1_timestamp = spec.MIN_GENESIS_TIME
+
+    yield 'eth1_block_hash', eth1_block_hash
+    yield 'eth1_timestamp', eth1_timestamp
+    yield 'deposits', deposits
+
+    # initialize beacon_state
+    state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
+
+    assert state.genesis_time == eth1_timestamp - eth1_timestamp % spec.SECONDS_PER_DAY + 2 * spec.SECONDS_PER_DAY
+    assert len(state.validators) == deposit_count
+    assert state.eth1_data.deposit_root == deposit_root
+    assert state.eth1_data.deposit_count == deposit_count
+    assert state.eth1_data.block_hash == eth1_block_hash
+
+    # yield state
+    yield 'state', state
diff --git a/test_libs/pyspec/eth2spec/test/genesis/test_validity.py b/test_libs/pyspec/eth2spec/test/genesis/test_validity.py
new file mode 100644
index 0000000000..bb95bb2b0f
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/genesis/test_validity.py
@@ -0,0 +1,87 @@
+from eth2spec.test.context import spectest_with_bls_switch, with_phases
+from eth2spec.test.helpers.deposits import (
+    prepare_genesis_deposits,
+)
+
+
+def create_valid_beacon_state(spec):
+    deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
+    deposits, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
+
+    eth1_block_hash = b'\x12' * 32
+    eth1_timestamp = spec.MIN_GENESIS_TIME
+    return spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
+
+
+def run_is_valid_genesis_state(spec, state, valid=True):
+    """
+    Run ``is_valid_genesis_state``, yielding:
+      - genesis ('state')
+      - is_valid ('is_valid')
+    """
+    yield 'genesis', state
+    is_valid = spec.is_valid_genesis_state(state)
+    yield 'is_valid', is_valid
+    assert is_valid == valid
+
+
+@with_phases(['phase0'])
+@spectest_with_bls_switch
+def test_is_valid_genesis_state_true(spec):
+    state = create_valid_beacon_state(spec)
+
+    yield from run_is_valid_genesis_state(spec, state, valid=True)
+
+
+@with_phases(['phase0'])
+@spectest_with_bls_switch
+def test_is_valid_genesis_state_false_invalid_timestamp(spec):
+    state = create_valid_beacon_state(spec)
+    state.genesis_time = spec.MIN_GENESIS_TIME - 1
+
+    yield from run_is_valid_genesis_state(spec, state, valid=False)
+
+
+@with_phases(['phase0'])
+@spectest_with_bls_switch
+def test_is_valid_genesis_state_true_more_balance(spec):
+    state = create_valid_beacon_state(spec)
+    state.validators[0].effective_balance = spec.MAX_EFFECTIVE_BALANCE + 1
+
+    yield from run_is_valid_genesis_state(spec, state, valid=True)
+
+
+# TODO: not part of the genesis function yet. Erroneously merged.
+# @with_phases(['phase0'])
+# @spectest_with_bls_switch
+# def test_is_valid_genesis_state_false_not_enough_balance(spec):
+#     state = create_valid_beacon_state(spec)
+#     state.validators[0].effective_balance = spec.MAX_EFFECTIVE_BALANCE - 1
+#
+#     yield from run_is_valid_genesis_state(spec, state, valid=False)
+
+
+@with_phases(['phase0'])
+@spectest_with_bls_switch
+def test_is_valid_genesis_state_true_one_more_validator(spec):
+    deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT + 1
+    deposits, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
+
+    eth1_block_hash = b'\x12' * 32
+    eth1_timestamp = spec.MIN_GENESIS_TIME
+    state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
+
+    yield from run_is_valid_genesis_state(spec, state, valid=True)
+
+
+@with_phases(['phase0'])
+@spectest_with_bls_switch
+def test_is_valid_genesis_state_false_not_enough_validator(spec):
+    deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 1
+    deposits, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
+
+    eth1_block_hash = b'\x12' * 32
+    eth1_timestamp = spec.MIN_GENESIS_TIME
+    state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
+
+    yield from run_is_valid_genesis_state(spec, state, valid=False)
diff --git a/test_libs/pyspec/eth2spec/test/helpers/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/attestations.py
index 4c8b5c7ebb..8685170180 100644
--- a/test_libs/pyspec/eth2spec/test/helpers/attestations.py
+++ b/test_libs/pyspec/eth2spec/test/helpers/attestations.py
@@ -1,10 +1,10 @@
 from typing import List
 
-from eth2spec.test.helpers.bitfields import set_bitfield_bit
 from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block
 from eth2spec.test.helpers.keys import privkeys
 from eth2spec.utils.bls import bls_sign, bls_aggregate_signatures
 from eth2spec.utils.ssz.ssz_impl import hash_tree_root
+from eth2spec.utils.ssz.ssz_typing import Bitlist
 
 
 def build_attestation_data(spec, state, slot, shard):
@@ -15,7 +15,7 @@ def build_attestation_data(spec, state, slot, shard):
     else:
         block_root = spec.get_block_root_at_slot(state, slot)
 
-    current_epoch_start_slot = spec.get_epoch_start_slot(spec.get_current_epoch(state))
+    current_epoch_start_slot = spec.compute_start_slot_of_epoch(spec.get_current_epoch(state))
     if slot < current_epoch_start_slot:
         epoch_boundary_root = spec.get_block_root(state, spec.get_previous_epoch(state))
     elif slot == current_epoch_start_slot:
@@ -24,28 +24,26 @@ def build_attestation_data(spec, state, slot, shard):
         epoch_boundary_root = spec.get_block_root(state, spec.get_current_epoch(state))
 
     if slot < current_epoch_start_slot:
-        justified_epoch = state.previous_justified_epoch
-        justified_block_root = state.previous_justified_root
+        source_epoch = state.previous_justified_checkpoint.epoch
+        source_root = state.previous_justified_checkpoint.root
     else:
-        justified_epoch = state.current_justified_epoch
-        justified_block_root = state.current_justified_root
+        source_epoch = state.current_justified_checkpoint.epoch
+        source_root = state.current_justified_checkpoint.root
 
-    if spec.slot_to_epoch(slot) == spec.get_current_epoch(state):
+    if spec.compute_epoch_of_slot(slot) == spec.get_current_epoch(state):
         parent_crosslink = state.current_crosslinks[shard]
     else:
         parent_crosslink = state.previous_crosslinks[shard]
 
     return spec.AttestationData(
         beacon_block_root=block_root,
-        source_epoch=justified_epoch,
-        source_root=justified_block_root,
-        target_epoch=spec.slot_to_epoch(slot),
-        target_root=epoch_boundary_root,
+        source=spec.Checkpoint(epoch=source_epoch, root=source_root),
+        target=spec.Checkpoint(epoch=spec.compute_epoch_of_slot(slot), root=epoch_boundary_root),
         crosslink=spec.Crosslink(
             shard=shard,
             start_epoch=parent_crosslink.end_epoch,
-            end_epoch=min(spec.slot_to_epoch(slot), parent_crosslink.end_epoch + spec.MAX_EPOCHS_PER_CROSSLINK),
-            data_root=spec.ZERO_HASH,
+            end_epoch=min(spec.compute_epoch_of_slot(slot), parent_crosslink.end_epoch + spec.MAX_EPOCHS_PER_CROSSLINK),
+            data_root=spec.Hash(),
             parent_root=hash_tree_root(parent_crosslink),
         ),
     )
@@ -55,27 +53,26 @@ def get_valid_attestation(spec, state, slot=None, signed=False):
     if slot is None:
         slot = state.slot
 
-    epoch = spec.slot_to_epoch(slot)
-    epoch_start_shard = spec.get_epoch_start_shard(state, epoch)
-    committees_per_slot = spec.get_epoch_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH
+    epoch = spec.compute_epoch_of_slot(slot)
+    epoch_start_shard = spec.get_start_shard(state, epoch)
+    committees_per_slot = spec.get_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH
     shard = (epoch_start_shard + committees_per_slot * (slot % spec.SLOTS_PER_EPOCH)) % spec.SHARD_COUNT
 
     attestation_data = build_attestation_data(spec, state, slot, shard)
 
     crosslink_committee = spec.get_crosslink_committee(
         state,
-        attestation_data.target_epoch,
-        attestation_data.crosslink.shard
+        attestation_data.target.epoch,
+        attestation_data.crosslink.shard,
     )
 
     committee_size = len(crosslink_committee)
-    bitfield_length = (committee_size + 7) // 8
-    aggregation_bitfield = b'\x00' * bitfield_length
-    custody_bitfield = b'\x00' * bitfield_length
+    aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](*([0] * committee_size))
+    custody_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](*([0] * committee_size))
     attestation = spec.Attestation(
-        aggregation_bitfield=aggregation_bitfield,
+        aggregation_bits=aggregation_bits,
         data=attestation_data,
-        custody_bitfield=custody_bitfield,
+        custody_bits=custody_bits,
     )
     fill_aggregate_attestation(spec, state, attestation)
     if signed:
@@ -108,7 +105,7 @@ def sign_attestation(spec, state, attestation):
     participants = spec.get_attesting_indices(
         state,
         attestation.data,
-        attestation.aggregation_bitfield,
+        attestation.aggregation_bits,
     )
 
     attestation.signature = sign_aggregate_attestation(spec, state, attestation.data, participants)
@@ -126,7 +123,7 @@ def get_attestation_signature(spec, state, attestation_data, privkey, custody_bi
         domain=spec.get_domain(
             state=state,
             domain_type=spec.DOMAIN_ATTESTATION,
-            message_epoch=attestation_data.target_epoch,
+            message_epoch=attestation_data.target.epoch,
         )
     )
 
@@ -134,11 +131,11 @@ def get_attestation_signature(spec, state, attestation_data, privkey, custody_bi
 def fill_aggregate_attestation(spec, state, attestation):
     crosslink_committee = spec.get_crosslink_committee(
         state,
-        attestation.data.target_epoch,
+        attestation.data.target.epoch,
         attestation.data.crosslink.shard,
     )
     for i in range(len(crosslink_committee)):
-        attestation.aggregation_bitfield = set_bitfield_bit(attestation.aggregation_bitfield, i)
+        attestation.aggregation_bits[i] = True
 
 
 def add_attestation_to_state(spec, state, attestation, slot):
diff --git a/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py b/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py
index 9fd34520c2..20abcacfb8 100644
--- a/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py
+++ b/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py
@@ -7,12 +7,12 @@ def get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False):
     attestation_1 = get_valid_attestation(spec, state, signed=signed_1)
 
     attestation_2 = deepcopy(attestation_1)
-    attestation_2.data.target_root = b'\x01' * 32
+    attestation_2.data.target.root = b'\x01' * 32
 
     if signed_2:
         sign_attestation(spec, state, attestation_2)
 
     return spec.AttesterSlashing(
-        attestation_1=spec.convert_to_indexed(state, attestation_1),
-        attestation_2=spec.convert_to_indexed(state, attestation_2),
+        attestation_1=spec.get_indexed_attestation(state, attestation_1),
+        attestation_2=spec.get_indexed_attestation(state, attestation_2),
     )
diff --git a/test_libs/pyspec/eth2spec/test/helpers/bitfields.py b/test_libs/pyspec/eth2spec/test/helpers/bitfields.py
deleted file mode 100644
index 50e5b6cbad..0000000000
--- a/test_libs/pyspec/eth2spec/test/helpers/bitfields.py
+++ /dev/null
@@ -1,11 +0,0 @@
-def set_bitfield_bit(bitfield, i):
-    """
-    Set the bit in ``bitfield`` at position ``i`` to ``1``.
-    """
-    byte_index = i // 8
-    bit_index = i % 8
-    return (
-        bitfield[:byte_index] +
-        bytes([bitfield[byte_index] | (1 << bit_index)]) +
-        bitfield[byte_index + 1:]
-    )
diff --git a/test_libs/pyspec/eth2spec/test/helpers/block.py b/test_libs/pyspec/eth2spec/test/helpers/block.py
index 5c7cb02a0f..2682a0c82a 100644
--- a/test_libs/pyspec/eth2spec/test/helpers/block.py
+++ b/test_libs/pyspec/eth2spec/test/helpers/block.py
@@ -14,7 +14,7 @@ def sign_block(spec, state, block, proposer_index=None):
         if block.slot == state.slot:
             proposer_index = spec.get_beacon_proposer_index(state)
         else:
-            if spec.slot_to_epoch(state.slot) + 1 > spec.slot_to_epoch(block.slot):
+            if spec.compute_epoch_of_slot(state.slot) + 1 > spec.compute_epoch_of_slot(block.slot):
                 print("warning: block slot far away, and no proposer index manually given."
                       " Signing block is slow due to transition for proposer index calculation.")
             # use stub state to get proposer index of future slot
@@ -26,10 +26,10 @@ def sign_block(spec, state, block, proposer_index=None):
 
     block.body.randao_reveal = bls_sign(
         privkey=privkey,
-        message_hash=hash_tree_root(spec.slot_to_epoch(block.slot)),
+        message_hash=hash_tree_root(spec.compute_epoch_of_slot(block.slot)),
         domain=spec.get_domain(
             state,
-            message_epoch=spec.slot_to_epoch(block.slot),
+            message_epoch=spec.compute_epoch_of_slot(block.slot),
             domain_type=spec.DOMAIN_RANDAO,
         )
     )
@@ -39,7 +39,7 @@ def sign_block(spec, state, block, proposer_index=None):
         domain=spec.get_domain(
             state,
             spec.DOMAIN_BEACON_PROPOSER,
-            spec.slot_to_epoch(block.slot)))
+            spec.compute_epoch_of_slot(block.slot)))
 
 
 def apply_empty_block(spec, state):
@@ -57,9 +57,9 @@ def build_empty_block(spec, state, slot=None, signed=False):
         slot = state.slot
     empty_block = spec.BeaconBlock()
     empty_block.slot = slot
-    empty_block.body.eth1_data.deposit_count = state.deposit_index
+    empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
     previous_block_header = deepcopy(state.latest_block_header)
-    if previous_block_header.state_root == spec.ZERO_HASH:
+    if previous_block_header.state_root == spec.Hash():
         previous_block_header.state_root = state.hash_tree_root()
     empty_block.parent_root = signing_root(previous_block_header)
 
diff --git a/test_libs/pyspec/eth2spec/test/helpers/custody.py b/test_libs/pyspec/eth2spec/test/helpers/custody.py
index 67df12fcdb..36f23ad1c7 100644
--- a/test_libs/pyspec/eth2spec/test/helpers/custody.py
+++ b/test_libs/pyspec/eth2spec/test/helpers/custody.py
@@ -1,5 +1,6 @@
 from eth2spec.test.helpers.keys import privkeys
-from eth2spec.utils.bls import bls_sign
+from eth2spec.utils.bls import bls_sign, bls_aggregate_signatures
+from eth2spec.utils.hash_function import hash
 
 
 def get_valid_early_derived_secret_reveal(spec, state, epoch=None):
@@ -10,8 +11,9 @@ def get_valid_early_derived_secret_reveal(spec, state, epoch=None):
     if epoch is None:
         epoch = current_epoch + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING
 
+    # Generate the secret that is being revealed
     reveal = bls_sign(
-        message_hash=spec.hash_tree_root(epoch),
+        message_hash=spec.hash_tree_root(spec.Epoch(epoch)),
         privkey=privkeys[revealed_index],
         domain=spec.get_domain(
             state=state,
@@ -19,8 +21,11 @@ def get_valid_early_derived_secret_reveal(spec, state, epoch=None):
             message_epoch=epoch,
         ),
     )
-    mask = bls_sign(
-        message_hash=spec.hash_tree_root(epoch),
+    # Generate the mask (any random 32 bytes that don't reveal the masker's secret will do)
+    mask = hash(reveal)
+    # Generate masker's signature on the mask
+    masker_signature = bls_sign(
+        message_hash=mask,
         privkey=privkeys[masker_index],
         domain=spec.get_domain(
             state=state,
@@ -28,11 +33,12 @@ def get_valid_early_derived_secret_reveal(spec, state, epoch=None):
             message_epoch=epoch,
         ),
     )
+    masked_reveal = bls_aggregate_signatures([reveal, masker_signature])
 
     return spec.EarlyDerivedSecretReveal(
         revealed_index=revealed_index,
         epoch=epoch,
-        reveal=reveal,
+        reveal=masked_reveal,
         masker_index=masker_index,
         mask=mask,
     )
diff --git a/test_libs/pyspec/eth2spec/test/helpers/deposits.py b/test_libs/pyspec/eth2spec/test/helpers/deposits.py
index 1d4761753f..8dc6b3b589 100644
--- a/test_libs/pyspec/eth2spec/test/helpers/deposits.py
+++ b/test_libs/pyspec/eth2spec/test/helpers/deposits.py
@@ -1,78 +1,100 @@
 from eth2spec.test.helpers.keys import pubkeys, privkeys
 from eth2spec.utils.bls import bls_sign
-from eth2spec.utils.merkle_minimal import calc_merkle_tree_from_leaves, get_merkle_root, get_merkle_proof
-from eth2spec.utils.ssz.ssz_impl import signing_root
+from eth2spec.utils.merkle_minimal import calc_merkle_tree_from_leaves, get_merkle_proof
+from eth2spec.utils.ssz.ssz_impl import signing_root, hash_tree_root
+from eth2spec.utils.ssz.ssz_typing import List
 
 
-def build_deposit_data(spec, state, pubkey, privkey, amount, withdrawal_credentials, signed=False):
+def build_deposit_data(spec, pubkey, privkey, amount, withdrawal_credentials, state=None, signed=False):
     deposit_data = spec.DepositData(
         pubkey=pubkey,
         withdrawal_credentials=withdrawal_credentials,
         amount=amount,
     )
     if signed:
-        sign_deposit_data(spec, state, deposit_data, privkey)
+        sign_deposit_data(spec, deposit_data, privkey, state)
     return deposit_data
 
 
-def sign_deposit_data(spec, state, deposit_data, privkey):
-    signature = bls_sign(
-        message_hash=signing_root(deposit_data),
-        privkey=privkey,
-        domain=spec.get_domain(
+def sign_deposit_data(spec, deposit_data, privkey, state=None):
+    if state is None:
+        # Genesis
+        domain = spec.compute_domain(spec.DOMAIN_DEPOSIT)
+    else:
+        domain = spec.get_domain(
             state,
             spec.DOMAIN_DEPOSIT,
         )
+
+    signature = bls_sign(
+        message_hash=signing_root(deposit_data),
+        privkey=privkey,
+        domain=domain,
     )
     deposit_data.signature = signature
 
 
 def build_deposit(spec,
                   state,
-                  deposit_data_leaves,
+                  deposit_data_list,
                   pubkey,
                   privkey,
                   amount,
                   withdrawal_credentials,
                   signed):
-    deposit_data = build_deposit_data(spec, state, pubkey, privkey, amount, withdrawal_credentials, signed)
-
-    item = deposit_data.hash_tree_root()
-    index = len(deposit_data_leaves)
-    deposit_data_leaves.append(item)
-    tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
-    root = get_merkle_root((tuple(deposit_data_leaves)))
-    proof = list(get_merkle_proof(tree, item_index=index))
-    assert spec.verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root)
-
-    deposit = spec.Deposit(
-        proof=list(proof),
-        index=index,
-        data=deposit_data,
-    )
+    deposit_data = build_deposit_data(spec, pubkey, privkey, amount, withdrawal_credentials, state=state, signed=signed)
+    index = len(deposit_data_list)
+    deposit_data_list.append(deposit_data)
+    root = hash_tree_root(List[spec.DepositData, 2**spec.DEPOSIT_CONTRACT_TREE_DEPTH](*deposit_data_list))
+    tree = calc_merkle_tree_from_leaves(tuple([d.hash_tree_root() for d in deposit_data_list]))
+    proof = list(get_merkle_proof(tree, item_index=index)) + [(index + 1).to_bytes(32, 'little')]
+    leaf = deposit_data.hash_tree_root()
+    assert spec.is_valid_merkle_branch(leaf, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH + 1, index, root)
+    deposit = spec.Deposit(proof=proof, data=deposit_data)
+
+    return deposit, root, deposit_data_list
+
+
+def prepare_genesis_deposits(spec, genesis_validator_count, amount, signed=False):
+    deposit_data_list = []
+    genesis_deposits = []
+    for validator_index in range(genesis_validator_count):
+        pubkey = pubkeys[validator_index]
+        privkey = privkeys[validator_index]
+        # insecurely use pubkey as withdrawal key if no credentials provided
+        withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
+        deposit, root, deposit_data_list = build_deposit(
+            spec,
+            None,
+            deposit_data_list,
+            pubkey,
+            privkey,
+            amount,
+            withdrawal_credentials,
+            signed,
+        )
+        genesis_deposits.append(deposit)
 
-    return deposit, root, deposit_data_leaves
+    return genesis_deposits, root
 
 
 def prepare_state_and_deposit(spec, state, validator_index, amount, withdrawal_credentials=None, signed=False):
     """
     Prepare the state for the deposit, and create a deposit for the given validator, depositing the given amount.
     """
-    pre_validator_count = len(state.validator_registry)
-    # fill previous deposits with zero-hash
-    deposit_data_leaves = [spec.ZERO_HASH] * pre_validator_count
+    deposit_data_list = []
 
     pubkey = pubkeys[validator_index]
     privkey = privkeys[validator_index]
 
     # insecurely use pubkey as withdrawal key if no credentials provided
     if withdrawal_credentials is None:
-        withdrawal_credentials = spec.int_to_bytes(spec.BLS_WITHDRAWAL_PREFIX, length=1) + spec.hash(pubkey)[1:]
+        withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
 
-    deposit, root, deposit_data_leaves = build_deposit(
+    deposit, root, deposit_data_list = build_deposit(
         spec,
         state,
-        deposit_data_leaves,
+        deposit_data_list,
         pubkey,
         privkey,
         amount,
@@ -80,6 +102,7 @@ def prepare_state_and_deposit(spec, state, validator_index, amount, withdrawal_c
         signed,
     )
 
-    state.latest_eth1_data.deposit_root = root
-    state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
+    state.eth1_deposit_index = 0
+    state.eth1_data.deposit_root = root
+    state.eth1_data.deposit_count = len(deposit_data_list)
     return deposit
diff --git a/test_libs/pyspec/eth2spec/test/helpers/genesis.py b/test_libs/pyspec/eth2spec/test/helpers/genesis.py
index a3ca915ef5..11ab76b791 100644
--- a/test_libs/pyspec/eth2spec/test/helpers/genesis.py
+++ b/test_libs/pyspec/eth2spec/test/helpers/genesis.py
@@ -1,11 +1,12 @@
 from eth2spec.test.helpers.keys import pubkeys
 from eth2spec.utils.ssz.ssz_impl import hash_tree_root
+from eth2spec.utils.ssz.ssz_typing import List
 
 
 def build_mock_validator(spec, i: int, balance: int):
     pubkey = pubkeys[i]
     # insecurely use pubkey as withdrawal key as well
-    withdrawal_credentials = spec.int_to_bytes(spec.BLS_WITHDRAWAL_PREFIX, length=1) + spec.hash(pubkey)[1:]
+    withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
     return spec.Validator(
         pubkey=pubkeys[i],
         withdrawal_credentials=withdrawal_credentials,
@@ -22,26 +23,32 @@ def create_genesis_state(spec, num_validators):
 
     state = spec.BeaconState(
         genesis_time=0,
-        deposit_index=num_validators,
-        latest_eth1_data=spec.Eth1Data(
+        eth1_deposit_index=num_validators,
+        eth1_data=spec.Eth1Data(
             deposit_root=deposit_root,
             deposit_count=num_validators,
-            block_hash=spec.ZERO_HASH,
-        ))
+            block_hash=spec.Hash(),
+        ),
+        latest_block_header=spec.BeaconBlockHeader(body_root=spec.hash_tree_root(spec.BeaconBlockBody())),
+    )
 
     # We "hack" in the initial validators,
     #  as it is much faster than creating and processing genesis deposits for every single test case.
     state.balances = [spec.MAX_EFFECTIVE_BALANCE] * num_validators
-    state.validator_registry = [build_mock_validator(spec, i, state.balances[i]) for i in range(num_validators)]
+    state.validators = [build_mock_validator(spec, i, state.balances[i]) for i in range(num_validators)]
 
     # Process genesis activations
-    for validator in state.validator_registry:
+    for validator in state.validators:
         if validator.effective_balance >= spec.MAX_EFFECTIVE_BALANCE:
             validator.activation_eligibility_epoch = spec.GENESIS_EPOCH
             validator.activation_epoch = spec.GENESIS_EPOCH
 
-    genesis_active_index_root = hash_tree_root(spec.get_active_validator_indices(state, spec.GENESIS_EPOCH))
-    for index in range(spec.LATEST_ACTIVE_INDEX_ROOTS_LENGTH):
-        state.latest_active_index_roots[index] = genesis_active_index_root
+    genesis_active_index_root = hash_tree_root(List[spec.ValidatorIndex, spec.VALIDATOR_REGISTRY_LIMIT](
+        spec.get_active_validator_indices(state, spec.GENESIS_EPOCH)))
+    genesis_compact_committees_root = hash_tree_root(List[spec.ValidatorIndex, spec.VALIDATOR_REGISTRY_LIMIT](
+        spec.get_active_validator_indices(state, spec.GENESIS_EPOCH)))
+    for index in range(spec.EPOCHS_PER_HISTORICAL_VECTOR):
+        state.active_index_roots[index] = genesis_active_index_root
+        state.compact_committees_roots[index] = genesis_compact_committees_root
 
     return state
diff --git a/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py b/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py
index 86c6acf47c..d5b7f7b7fd 100644
--- a/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py
+++ b/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py
@@ -7,7 +7,7 @@
 def get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=False):
     current_epoch = spec.get_current_epoch(state)
     validator_index = spec.get_active_validator_indices(state, current_epoch)[-1]
-    privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+    privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
     slot = state.slot
 
     header_1 = spec.BeaconBlockHeader(
diff --git a/test_libs/pyspec/eth2spec/test/helpers/state.py b/test_libs/pyspec/eth2spec/test/helpers/state.py
index 8641d4c0d0..e88fc6adee 100644
--- a/test_libs/pyspec/eth2spec/test/helpers/state.py
+++ b/test_libs/pyspec/eth2spec/test/helpers/state.py
@@ -1,4 +1,6 @@
-from eth2spec.test.helpers.block import sign_block
+from copy import deepcopy
+from eth2spec.test.helpers.attestations import get_valid_attestation
+from eth2spec.test.helpers.block import sign_block, build_empty_block_for_next_slot
 
 
 def get_balance(state, index):
@@ -25,7 +27,7 @@ def get_state_root(spec, state, slot) -> bytes:
     Return the state root at a recent ``slot``.
     """
     assert slot < state.slot <= slot + spec.SLOTS_PER_HISTORICAL_ROOT
-    return state.latest_state_roots[slot % spec.SLOTS_PER_HISTORICAL_ROOT]
+    return state.state_roots[slot % spec.SLOTS_PER_HISTORICAL_ROOT]
 
 
 def state_transition_and_sign_block(spec, state, block):
@@ -36,3 +38,30 @@ def state_transition_and_sign_block(spec, state, block):
     spec.state_transition(state, block)
     block.state_root = state.hash_tree_root()
     sign_block(spec, state, block)
+
+
+def next_epoch_with_attestations(spec,
+                                 state,
+                                 fill_cur_epoch,
+                                 fill_prev_epoch):
+    assert state.slot % spec.SLOTS_PER_EPOCH == 0
+
+    post_state = deepcopy(state)
+    blocks = []
+    for _ in range(spec.SLOTS_PER_EPOCH):
+        block = build_empty_block_for_next_slot(spec, post_state)
+        if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY:
+            slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
+            if slot_to_attest >= spec.compute_start_slot_of_epoch(spec.get_current_epoch(post_state)):
+                cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest)
+                block.body.attestations.append(cur_attestation)
+
+        if fill_prev_epoch:
+            slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1
+            prev_attestation = get_valid_attestation(spec, post_state, slot_to_attest)
+            block.body.attestations.append(prev_attestation)
+
+        state_transition_and_sign_block(spec, post_state, block)
+        blocks.append(block)
+
+    return state, blocks, post_state
diff --git a/test_libs/pyspec/eth2spec/test/helpers/transfers.py b/test_libs/pyspec/eth2spec/test/helpers/transfers.py
index 4d40deee70..3d3b0f4e37 100644
--- a/test_libs/pyspec/eth2spec/test/helpers/transfers.py
+++ b/test_libs/pyspec/eth2spec/test/helpers/transfers.py
@@ -4,13 +4,15 @@
 from eth2spec.utils.ssz.ssz_impl import signing_root
 
 
-def get_valid_transfer(spec, state, slot=None, sender_index=None, amount=None, fee=None, signed=False):
+def get_valid_transfer(spec, state, slot=None, sender_index=None,
+                       recipient_index=None, amount=None, fee=None, signed=False):
     if slot is None:
         slot = state.slot
     current_epoch = spec.get_current_epoch(state)
     if sender_index is None:
         sender_index = spec.get_active_validator_indices(state, current_epoch)[-1]
-    recipient_index = spec.get_active_validator_indices(state, current_epoch)[0]
+    if recipient_index is None:
+        recipient_index = spec.get_active_validator_indices(state, current_epoch)[0]
     transfer_pubkey = pubkeys[-1]
     transfer_privkey = privkeys[-1]
 
@@ -31,8 +33,8 @@ def get_valid_transfer(spec, state, slot=None, sender_index=None, amount=None, f
         sign_transfer(spec, state, transfer, transfer_privkey)
 
     # ensure withdrawal_credentials reproducible
-    state.validator_registry[transfer.sender].withdrawal_credentials = (
-        spec.int_to_bytes(spec.BLS_WITHDRAWAL_PREFIX, length=1) + spec.hash(transfer.pubkey)[1:]
+    state.validators[transfer.sender].withdrawal_credentials = (
+        spec.BLS_WITHDRAWAL_PREFIX + spec.hash(transfer.pubkey)[1:]
     )
 
     return transfer
diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py
index 2b34ab405e..ab46a0d8ce 100644
--- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py
+++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py
@@ -1,8 +1,7 @@
-from copy import deepcopy
-
 from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases, with_phases
 from eth2spec.test.helpers.attestations import (
     get_valid_attestation,
+    sign_aggregate_attestation,
     sign_attestation,
 )
 from eth2spec.test.helpers.state import (
@@ -10,6 +9,7 @@
     next_slot,
 )
 from eth2spec.test.helpers.block import apply_empty_block
+from eth2spec.utils.ssz.ssz_typing import Bitlist
 
 
 def run_attestation_processing(spec, state, attestation, valid=True):
@@ -38,7 +38,7 @@ def run_attestation_processing(spec, state, attestation, valid=True):
     spec.process_attestation(state, attestation)
 
     # Make sure the attestation has been processed
-    if attestation.data.target_epoch == spec.get_current_epoch(state):
+    if attestation.data.target.epoch == spec.get_current_epoch(state):
         assert len(state.current_epoch_attestations) == current_epoch_count + 1
     else:
         assert len(state.previous_epoch_attestations) == previous_epoch_count + 1
@@ -60,6 +60,7 @@ def test_success(spec, state):
 @spec_state_test
 def test_success_previous_epoch(spec, state):
     attestation = get_valid_attestation(spec, state, signed=True)
+    state.slot = spec.SLOTS_PER_EPOCH - 1
     next_epoch(spec, state)
     apply_empty_block(spec, state)
 
@@ -69,6 +70,9 @@ def test_success_previous_epoch(spec, state):
 @with_all_phases
 @spec_state_test
 def test_success_since_max_epochs_per_crosslink(spec, state):
+    # Do not run mainnet (64 epochs), that would mean the equivalent of ~7 hours chain simulation.
+    if spec.MAX_EPOCHS_PER_CROSSLINK > 4:
+        return
     for _ in range(spec.MAX_EPOCHS_PER_CROSSLINK + 2):
         next_epoch(spec, state)
     apply_empty_block(spec, state)
@@ -85,6 +89,32 @@ def test_success_since_max_epochs_per_crosslink(spec, state):
     yield from run_attestation_processing(spec, state, attestation)
 
 
+@with_all_phases
+@spec_state_test
+def test_wrong_end_epoch_with_max_epochs_per_crosslink(spec, state):
+    # Do not run mainnet (64 epochs), that would mean the equivalent of ~7 hours chain simulation.
+    if spec.MAX_EPOCHS_PER_CROSSLINK > 4:
+        return
+    for _ in range(spec.MAX_EPOCHS_PER_CROSSLINK + 2):
+        next_epoch(spec, state)
+    apply_empty_block(spec, state)
+
+    attestation = get_valid_attestation(spec, state)
+    data = attestation.data
+    # test logic sanity check: make sure the attestation only includes MAX_EPOCHS_PER_CROSSLINK epochs
+    assert data.crosslink.end_epoch - data.crosslink.start_epoch == spec.MAX_EPOCHS_PER_CROSSLINK
+    # Now change it to be different
+    data.crosslink.end_epoch += 1
+
+    sign_attestation(spec, state, attestation)
+
+    for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
+        next_slot(spec, state)
+    apply_empty_block(spec, state)
+
+    yield from run_attestation_processing(spec, state, attestation, False)
+
+
 @with_all_phases
 @always_bls
 @spec_state_test
@@ -108,8 +138,9 @@ def test_before_inclusion_delay(spec, state):
 @spec_state_test
 def test_after_epoch_slots(spec, state):
     attestation = get_valid_attestation(spec, state, signed=True)
+    state.slot = spec.SLOTS_PER_EPOCH - 1
     # increment past latest inclusion slot
-    spec.process_slots(state, state.slot + spec.SLOTS_PER_EPOCH + 1)
+    spec.process_slots(state, state.slot + 2)
     apply_empty_block(spec, state)
 
     yield from run_attestation_processing(spec, state, attestation, False)
@@ -119,16 +150,16 @@ def test_after_epoch_slots(spec, state):
 @spec_state_test
 def test_old_source_epoch(spec, state):
     state.slot = spec.SLOTS_PER_EPOCH * 5
-    state.finalized_epoch = 2
-    state.previous_justified_epoch = 3
-    state.current_justified_epoch = 4
+    state.finalized_checkpoint.epoch = 2
+    state.previous_justified_checkpoint.epoch = 3
+    state.current_justified_checkpoint.epoch = 4
     attestation = get_valid_attestation(spec, state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1)
 
     # test logic sanity check: make sure the attestation is pointing to oldest known source epoch
-    assert attestation.data.source_epoch == state.previous_justified_epoch
+    assert attestation.data.source.epoch == state.previous_justified_checkpoint.epoch
 
     # Now go beyond that, it will be invalid
-    attestation.data.source_epoch -= 1
+    attestation.data.source.epoch -= 1
 
     sign_attestation(spec, state, attestation)
 
@@ -148,13 +179,61 @@ def test_wrong_shard(spec, state):
     yield from run_attestation_processing(spec, state, attestation, False)
 
 
+@with_all_phases
+@spec_state_test
+def test_invalid_shard(spec, state):
+    attestation = get_valid_attestation(spec, state)
+    state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
+
+    # off by one (with respect to valid range) on purpose
+    attestation.data.crosslink.shard = spec.SHARD_COUNT
+
+    sign_attestation(spec, state, attestation)
+
+    yield from run_attestation_processing(spec, state, attestation, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_old_target_epoch(spec, state):
+    assert spec.MIN_ATTESTATION_INCLUSION_DELAY < spec.SLOTS_PER_EPOCH * 2
+
+    attestation = get_valid_attestation(spec, state, signed=True)
+
+    state.slot = spec.SLOTS_PER_EPOCH * 2  # target epoch will be too old to handle
+
+    yield from run_attestation_processing(spec, state, attestation, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_future_target_epoch(spec, state):
+    assert spec.MIN_ATTESTATION_INCLUSION_DELAY < spec.SLOTS_PER_EPOCH * 2
+
+    attestation = get_valid_attestation(spec, state)
+
+    participants = spec.get_attesting_indices(
+        state,
+        attestation.data,
+        attestation.aggregation_bits
+    )
+    attestation.data.target.epoch = spec.get_current_epoch(state) + 1  # target epoch will be too new to handle
+
+    # manually add signature for correct participants
+    attestation.signature = sign_aggregate_attestation(spec, state, attestation.data, participants)
+
+    state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
+
+    yield from run_attestation_processing(spec, state, attestation, False)
+
+
 @with_all_phases
 @spec_state_test
 def test_new_source_epoch(spec, state):
     attestation = get_valid_attestation(spec, state)
     state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
 
-    attestation.data.source_epoch += 1
+    attestation.data.source.epoch += 1
 
     sign_attestation(spec, state, attestation)
 
@@ -167,7 +246,7 @@ def test_source_root_is_target_root(spec, state):
     attestation = get_valid_attestation(spec, state)
     state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
 
-    attestation.data.source_root = attestation.data.target_root
+    attestation.data.source.root = attestation.data.target.root
 
     sign_attestation(spec, state, attestation)
 
@@ -178,23 +257,20 @@ def test_source_root_is_target_root(spec, state):
 @spec_state_test
 def test_invalid_current_source_root(spec, state):
     state.slot = spec.SLOTS_PER_EPOCH * 5
-    state.finalized_epoch = 2
-
-    state.previous_justified_epoch = 3
-    state.previous_justified_root = b'\x01' * 32
+    state.finalized_checkpoint.epoch = 2
 
-    state.current_justified_epoch = 4
-    state.current_justified_root = b'\xff' * 32
+    state.previous_justified_checkpoint = spec.Checkpoint(epoch=3, root=b'\x01' * 32)
+    state.current_justified_checkpoint = spec.Checkpoint(epoch=4, root=b'\x32' * 32)
 
     attestation = get_valid_attestation(spec, state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1)
     state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
 
     # Test logic sanity checks:
-    assert state.current_justified_root != state.previous_justified_root
-    assert attestation.data.source_root == state.previous_justified_root
+    assert state.current_justified_checkpoint.root != state.previous_justified_checkpoint.root
+    assert attestation.data.source.root == state.previous_justified_checkpoint.root
 
     # Make attestation source root invalid: should be previous justified, not current one
-    attestation.data.source_root = state.current_justified_root
+    attestation.data.source.root = state.current_justified_checkpoint.root
 
     sign_attestation(spec, state, attestation)
 
@@ -207,7 +283,7 @@ def test_bad_source_root(spec, state):
     attestation = get_valid_attestation(spec, state)
     state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
 
-    attestation.data.source_root = b'\x42' * 32
+    attestation.data.source.root = b'\x42' * 32
 
     sign_attestation(spec, state, attestation)
 
@@ -230,15 +306,17 @@ def test_non_zero_crosslink_data_root(spec, state):
 @with_all_phases
 @spec_state_test
 def test_bad_parent_crosslink(spec, state):
+    state.slot = spec.SLOTS_PER_EPOCH - 1
     next_epoch(spec, state)
     apply_empty_block(spec, state)
 
-    attestation = get_valid_attestation(spec, state, signed=True)
+    attestation = get_valid_attestation(spec, state, signed=False)
     for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
         next_slot(spec, state)
     apply_empty_block(spec, state)
 
     attestation.data.crosslink.parent_root = b'\x27' * 32
+    sign_attestation(spec, state, attestation)
 
     yield from run_attestation_processing(spec, state, attestation, False)
 
@@ -246,15 +324,17 @@ def test_bad_parent_crosslink(spec, state):
 @with_all_phases
 @spec_state_test
 def test_bad_crosslink_start_epoch(spec, state):
+    state.slot = spec.SLOTS_PER_EPOCH - 1
     next_epoch(spec, state)
     apply_empty_block(spec, state)
 
-    attestation = get_valid_attestation(spec, state, signed=True)
+    attestation = get_valid_attestation(spec, state, signed=False)
     for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
         next_slot(spec, state)
     apply_empty_block(spec, state)
 
     attestation.data.crosslink.start_epoch += 1
+    sign_attestation(spec, state, attestation)
 
     yield from run_attestation_processing(spec, state, attestation, False)
 
@@ -262,26 +342,31 @@ def test_bad_crosslink_start_epoch(spec, state):
 @with_all_phases
 @spec_state_test
 def test_bad_crosslink_end_epoch(spec, state):
+    state.slot = spec.SLOTS_PER_EPOCH - 1
     next_epoch(spec, state)
     apply_empty_block(spec, state)
 
-    attestation = get_valid_attestation(spec, state, signed=True)
+    attestation = get_valid_attestation(spec, state, signed=False)
     for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
         next_slot(spec, state)
     apply_empty_block(spec, state)
 
     attestation.data.crosslink.end_epoch += 1
+    sign_attestation(spec, state, attestation)
 
     yield from run_attestation_processing(spec, state, attestation, False)
 
 
 @with_all_phases
 @spec_state_test
-def test_inconsistent_bitfields(spec, state):
+def test_inconsistent_bits(spec, state):
     attestation = get_valid_attestation(spec, state)
     state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
 
-    attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield) + b'\x00'
+    custody_bits = attestation.aggregation_bits[:]
+    custody_bits.append(False)
+
+    attestation.custody_bits = custody_bits
 
     sign_attestation(spec, state, attestation)
 
@@ -290,11 +375,11 @@ def test_inconsistent_bitfields(spec, state):
 
 @with_phases(['phase0'])
 @spec_state_test
-def test_non_empty_custody_bitfield(spec, state):
+def test_non_empty_custody_bits(spec, state):
     attestation = get_valid_attestation(spec, state)
     state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
 
-    attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield)
+    attestation.custody_bits = attestation.aggregation_bits[:]
 
     sign_attestation(spec, state, attestation)
 
@@ -303,11 +388,12 @@ def test_non_empty_custody_bitfield(spec, state):
 
 @with_all_phases
 @spec_state_test
-def test_empty_aggregation_bitfield(spec, state):
+def test_empty_aggregation_bits(spec, state):
     attestation = get_valid_attestation(spec, state)
     state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
 
-    attestation.aggregation_bitfield = b'\x00' * len(attestation.aggregation_bitfield)
+    attestation.aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](
+        *([0b0] * len(attestation.aggregation_bits)))
 
     sign_attestation(spec, state, attestation)
 
diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py
index 6c7637d599..7a60301577 100644
--- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py
+++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py
@@ -25,31 +25,56 @@ def run_attester_slashing_processing(spec, state, attester_slashing, valid=True)
         yield 'post', None
         return
 
-    slashed_index = attester_slashing.attestation_1.custody_bit_0_indices[0]
-    pre_slashed_balance = get_balance(state, slashed_index)
+    slashed_indices = (
+        attester_slashing.attestation_1.custody_bit_0_indices
+        + attester_slashing.attestation_1.custody_bit_1_indices
+    )
 
     proposer_index = spec.get_beacon_proposer_index(state)
     pre_proposer_balance = get_balance(state, proposer_index)
+    pre_slashings = {slashed_index: get_balance(state, slashed_index) for slashed_index in slashed_indices}
+    pre_withdrawalable_epochs = {
+        slashed_index: state.validators[slashed_index].withdrawable_epoch
+        for slashed_index in slashed_indices
+    }
+
+    total_proposer_rewards = sum(
+        balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT
+        for balance in pre_slashings.values()
+    )
 
     # Process slashing
     spec.process_attester_slashing(state, attester_slashing)
 
-    slashed_validator = state.validator_registry[slashed_index]
-
-    # Check slashing
-    assert slashed_validator.slashed
-    assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
-    assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
-
-    if slashed_index != proposer_index:
-        # lost whistleblower reward
-        assert get_balance(state, slashed_index) < pre_slashed_balance
+    for slashed_index in slashed_indices:
+        pre_withdrawalable_epoch = pre_withdrawalable_epochs[slashed_index]
+        slashed_validator = state.validators[slashed_index]
+
+        # Check slashing
+        assert slashed_validator.slashed
+        assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
+        if pre_withdrawalable_epoch < spec.FAR_FUTURE_EPOCH:
+            expected_withdrawable_epoch = max(
+                pre_withdrawalable_epoch,
+                spec.get_current_epoch(state) + spec.EPOCHS_PER_SLASHINGS_VECTOR
+            )
+            assert slashed_validator.withdrawable_epoch == expected_withdrawable_epoch
+        else:
+            assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
+        assert get_balance(state, slashed_index) < pre_slashings[slashed_index]
+
+    if proposer_index not in slashed_indices:
         # gained whistleblower reward
-        assert get_balance(state, proposer_index) > pre_proposer_balance
+        assert get_balance(state, proposer_index) == pre_proposer_balance + total_proposer_rewards
     else:
         # gained rewards for all slashings, which may include others. And only lost that of themselves.
-        # Netto at least 0, if more people where slashed, a balance increase.
-        assert get_balance(state, slashed_index) >= pre_slashed_balance
+        expected_balance = (
+            pre_proposer_balance
+            + total_proposer_rewards
+            - pre_slashings[proposer_index] // spec.MIN_SLASHING_PENALTY_QUOTIENT
+        )
+
+        assert get_balance(state, proposer_index) == expected_balance
 
     yield 'post', state
 
@@ -68,18 +93,51 @@ def test_success_surround(spec, state):
     next_epoch(spec, state)
     apply_empty_block(spec, state)
 
-    state.current_justified_epoch += 1
+    state.current_justified_checkpoint.epoch += 1
     attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
+    attestation_1 = attester_slashing.attestation_1
+    attestation_2 = attester_slashing.attestation_2
 
     # set attestion1 to surround attestation 2
-    attester_slashing.attestation_1.data.source_epoch = attester_slashing.attestation_2.data.source_epoch - 1
-    attester_slashing.attestation_1.data.target_epoch = attester_slashing.attestation_2.data.target_epoch + 1
+    attestation_1.data.source.epoch = attestation_2.data.source.epoch - 1
+    attestation_1.data.target.epoch = attestation_2.data.target.epoch + 1
 
     sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
 
     yield from run_attester_slashing_processing(spec, state, attester_slashing)
 
 
+@with_all_phases
+@always_bls
+@spec_state_test
+def test_success_already_exited_recent(spec, state):
+    attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
+    slashed_indices = (
+        attester_slashing.attestation_1.custody_bit_0_indices
+        + attester_slashing.attestation_1.custody_bit_1_indices
+    )
+    for index in slashed_indices:
+        spec.initiate_validator_exit(state, index)
+
+    yield from run_attester_slashing_processing(spec, state, attester_slashing)
+
+
+@with_all_phases
+@always_bls
+@spec_state_test
+def test_success_already_exited_long_ago(spec, state):
+    attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
+    slashed_indices = (
+        attester_slashing.attestation_1.custody_bit_0_indices
+        + attester_slashing.attestation_1.custody_bit_1_indices
+    )
+    for index in slashed_indices:
+        spec.initiate_validator_exit(state, index)
+        state.validators[index].withdrawable_epoch = spec.get_current_epoch(state) + 2
+
+    yield from run_attester_slashing_processing(spec, state, attester_slashing)
+
+
 @with_all_phases
 @always_bls
 @spec_state_test
@@ -120,7 +178,7 @@ def test_same_data(spec, state):
 def test_no_double_or_surround(spec, state):
     attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
 
-    attester_slashing.attestation_1.data.target_epoch += 1
+    attester_slashing.attestation_1.data.target.epoch += 1
     sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
 
     yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@@ -135,19 +193,113 @@ def test_participants_already_slashed(spec, state):
     attestation_1 = attester_slashing.attestation_1
     validator_indices = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices
     for index in validator_indices:
-        state.validator_registry[index].slashed = True
+        state.validators[index].slashed = True
 
     yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
 
 
 @with_all_phases
 @spec_state_test
-def test_custody_bit_0_and_1(spec, state):
+def test_custody_bit_0_and_1_intersect(spec, state):
     attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
 
-    attester_slashing.attestation_1.custody_bit_1_indices = (
-        attester_slashing.attestation_1.custody_bit_0_indices
+    attester_slashing.attestation_1.custody_bit_1_indices.append(
+        attester_slashing.attestation_1.custody_bit_0_indices[0]
     )
+
+    sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
+
+    yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
+
+
+@always_bls
+@with_all_phases
+@spec_state_test
+def test_att1_bad_extra_index(spec, state):
+    attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
+
+    indices = attester_slashing.attestation_1.custody_bit_0_indices
+    options = list(set(range(len(state.validators))) - set(indices))
+    indices.append(options[len(options) // 2])  # add random index, not previously in attestation.
+    attester_slashing.attestation_1.custody_bit_0_indices = sorted(indices)
+    # Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
+    # see if the bad extra index is spotted, and slashing is aborted.
+
+    yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
+
+
+@always_bls
+@with_all_phases
+@spec_state_test
+def test_att1_bad_replaced_index(spec, state):
+    attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
+
+    indices = attester_slashing.attestation_1.custody_bit_0_indices
+    options = list(set(range(len(state.validators))) - set(indices))
+    indices[3] = options[len(options) // 2]  # replace with random index, not previously in attestation.
+    attester_slashing.attestation_1.custody_bit_0_indices = sorted(indices)
+    # Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
+    # see if the bad replaced index is spotted, and slashing is aborted.
+
+    yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
+
+
+@always_bls
+@with_all_phases
+@spec_state_test
+def test_att2_bad_extra_index(spec, state):
+    attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
+
+    indices = attester_slashing.attestation_2.custody_bit_0_indices
+    options = list(set(range(len(state.validators))) - set(indices))
+    indices.append(options[len(options) // 2])  # add random index, not previously in attestation.
+    attester_slashing.attestation_2.custody_bit_0_indices = sorted(indices)
+    # Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
+    # see if the bad extra index is spotted, and slashing is aborted.
+
+    yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
+
+
+@always_bls
+@with_all_phases
+@spec_state_test
+def test_att2_bad_replaced_index(spec, state):
+    attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
+
+    indices = attester_slashing.attestation_2.custody_bit_0_indices
+    options = list(set(range(len(state.validators))) - set(indices))
+    indices[3] = options[len(options) // 2]  # replace with random index, not previously in attestation.
+    attester_slashing.attestation_2.custody_bit_0_indices = sorted(indices)
+    # Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
+    # see if the bad replaced index is spotted, and slashing is aborted.
+
+    yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_unsorted_att_1_bit0(spec, state):
+    attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
+
+    indices = attester_slashing.attestation_1.custody_bit_0_indices
+    assert len(indices) >= 3
+    indices[1], indices[2] = indices[2], indices[1]  # unsort second and third index
     sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
 
     yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_unsorted_att_2_bit0(spec, state):
+    attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
+
+    indices = attester_slashing.attestation_2.custody_bit_0_indices
+    assert len(indices) >= 3
+    indices[1], indices[2] = indices[2], indices[1]  # unsort second and third index
+    sign_indexed_attestation(spec, state, attester_slashing.attestation_2)
+
+    yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
+
+
+# note: unsorted indices for custody bit 0 are to be introduced in phase 1 testing.
diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_block_header.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_block_header.py
index f3c017982d..a2306ef4d9 100644
--- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_block_header.py
+++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_block_header.py
@@ -78,7 +78,7 @@ def test_proposer_slashed(spec, state):
     proposer_index = spec.get_beacon_proposer_index(stub_state)
 
     # set proposer to slashed
-    state.validator_registry[proposer_index].slashed = True
+    state.validators[proposer_index].slashed = True
 
     block = build_empty_block_for_next_slot(spec, state, signed=True)
 
diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_deposit.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_deposit.py
index 603a07c3db..3dbbeedf02 100644
--- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_deposit.py
+++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_deposit.py
@@ -16,7 +16,7 @@ def run_deposit_processing(spec, state, deposit, validator_index, valid=True, ef
       - post-state ('post').
     If ``valid == False``, run expecting ``AssertionError``
     """
-    pre_validator_count = len(state.validator_registry)
+    pre_validator_count = len(state.validators)
     pre_balance = 0
     if validator_index < pre_validator_count:
         pre_balance = get_balance(state, validator_index)
@@ -34,41 +34,71 @@ def run_deposit_processing(spec, state, deposit, validator_index, valid=True, ef
     yield 'post', state
 
     if not effective:
-        assert len(state.validator_registry) == pre_validator_count
+        assert len(state.validators) == pre_validator_count
         assert len(state.balances) == pre_validator_count
         if validator_index < pre_validator_count:
             assert get_balance(state, validator_index) == pre_balance
     else:
         if validator_index < pre_validator_count:
             # top-up
-            assert len(state.validator_registry) == pre_validator_count
+            assert len(state.validators) == pre_validator_count
             assert len(state.balances) == pre_validator_count
         else:
             # new validator
-            assert len(state.validator_registry) == pre_validator_count + 1
+            assert len(state.validators) == pre_validator_count + 1
             assert len(state.balances) == pre_validator_count + 1
         assert get_balance(state, validator_index) == pre_balance + deposit.data.amount
 
-    assert state.deposit_index == state.latest_eth1_data.deposit_count
+        effective = min(spec.MAX_EFFECTIVE_BALANCE,
+                        pre_balance + deposit.data.amount)
+        effective -= effective % spec.EFFECTIVE_BALANCE_INCREMENT
+        assert state.validators[validator_index].effective_balance == effective
+
+    assert state.eth1_deposit_index == state.eth1_data.deposit_count
 
 
 @with_all_phases
 @spec_state_test
-def test_new_deposit(spec, state):
+def test_new_deposit_under_max(spec, state):
     # fresh deposit = next validator index = validator appended to registry
-    validator_index = len(state.validator_registry)
+    validator_index = len(state.validators)
+    # effective balance will be 1 EFFECTIVE_BALANCE_INCREMENT smaller because of this small decrement.
+    amount = spec.MAX_EFFECTIVE_BALANCE - 1
+    deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
+
+    yield from run_deposit_processing(spec, state, deposit, validator_index)
+
+
+@with_all_phases
+@spec_state_test
+def test_new_deposit_max(spec, state):
+    # fresh deposit = next validator index = validator appended to registry
+    validator_index = len(state.validators)
+    # effective balance will be exactly the same as balance.
     amount = spec.MAX_EFFECTIVE_BALANCE
     deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
 
     yield from run_deposit_processing(spec, state, deposit, validator_index)
 
 
+@with_all_phases
+@spec_state_test
+def test_new_deposit_over_max(spec, state):
+    # fresh deposit = next validator index = validator appended to registry
+    validator_index = len(state.validators)
+    # just 1 over the limit, effective balance should be set MAX_EFFECTIVE_BALANCE during processing
+    amount = spec.MAX_EFFECTIVE_BALANCE + 1
+    deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
+
+    yield from run_deposit_processing(spec, state, deposit, validator_index)
+
+
 @with_all_phases
 @always_bls
 @spec_state_test
 def test_invalid_sig_new_deposit(spec, state):
     # fresh deposit = next validator index = validator appended to registry
-    validator_index = len(state.validator_registry)
+    validator_index = len(state.validators)
     amount = spec.MAX_EFFECTIVE_BALANCE
     deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
     yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=False)
@@ -101,7 +131,7 @@ def test_invalid_sig_top_up(spec, state):
 def test_invalid_withdrawal_credentials_top_up(spec, state):
     validator_index = 0
     amount = spec.MAX_EFFECTIVE_BALANCE // 4
-    withdrawal_credentials = spec.int_to_bytes(spec.BLS_WITHDRAWAL_PREFIX, length=1) + spec.hash(b"junk")[1:]
+    withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(b"junk")[1:]
     deposit = prepare_state_and_deposit(
         spec,
         state,
@@ -114,25 +144,10 @@ def test_invalid_withdrawal_credentials_top_up(spec, state):
     yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=True)
 
 
-@with_all_phases
-@spec_state_test
-def test_wrong_index(spec, state):
-    validator_index = len(state.validator_registry)
-    amount = spec.MAX_EFFECTIVE_BALANCE
-    deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
-
-    # mess up deposit_index
-    deposit.index = state.deposit_index + 1
-
-    sign_deposit_data(spec, state, deposit.data, privkeys[validator_index])
-
-    yield from run_deposit_processing(spec, state, deposit, validator_index, valid=False)
-
-
 @with_all_phases
 @spec_state_test
 def test_wrong_deposit_for_deposit_count(spec, state):
-    deposit_data_leaves = [spec.ZERO_HASH] * len(state.validator_registry)
+    deposit_data_leaves = [spec.DepositData() for _ in range(len(state.validators))]
 
     # build root for deposit_1
     index_1 = len(deposit_data_leaves)
@@ -166,25 +181,22 @@ def test_wrong_deposit_for_deposit_count(spec, state):
     )
 
     # state has root for deposit_2 but is at deposit_count for deposit_1
-    state.latest_eth1_data.deposit_root = root_2
-    state.latest_eth1_data.deposit_count = deposit_count_1
+    state.eth1_data.deposit_root = root_2
+    state.eth1_data.deposit_count = deposit_count_1
 
     yield from run_deposit_processing(spec, state, deposit_2, index_2, valid=False)
 
 
-# TODO: test invalid signature
-
-
 @with_all_phases
 @spec_state_test
 def test_bad_merkle_proof(spec, state):
-    validator_index = len(state.validator_registry)
+    validator_index = len(state.validators)
     amount = spec.MAX_EFFECTIVE_BALANCE
     deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
 
     # mess up merkle branch
-    deposit.proof[-1] = spec.ZERO_HASH
+    deposit.proof[5] = spec.Hash()
 
-    sign_deposit_data(spec, state, deposit.data, privkeys[validator_index])
+    sign_deposit_data(spec, deposit.data, privkeys[validator_index], state=state)
 
     yield from run_deposit_processing(spec, state, deposit, validator_index, valid=False)
diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_proposer_slashing.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_proposer_slashing.py
index b35241859c..af34ea7099 100644
--- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_proposer_slashing.py
+++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_proposer_slashing.py
@@ -28,7 +28,7 @@ def run_proposer_slashing_processing(spec, state, proposer_slashing, valid=True)
     yield 'post', state
 
     # check if slashed
-    slashed_validator = state.validator_registry[proposer_slashing.proposer_index]
+    slashed_validator = state.validators[proposer_slashing.proposer_index]
     assert slashed_validator.slashed
     assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
     assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
@@ -77,7 +77,7 @@ def test_invalid_sig_1_and_2(spec, state):
 def test_invalid_proposer_index(spec, state):
     proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
     # Index just too high (by 1)
-    proposer_slashing.proposer_index = len(state.validator_registry)
+    proposer_slashing.proposer_index = len(state.validators)
 
     yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
 
@@ -111,7 +111,7 @@ def test_proposer_is_not_activated(spec, state):
     proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
 
     # set proposer to be not active yet
-    state.validator_registry[proposer_slashing.proposer_index].activation_epoch = spec.get_current_epoch(state) + 1
+    state.validators[proposer_slashing.proposer_index].activation_epoch = spec.get_current_epoch(state) + 1
 
     yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
 
@@ -122,7 +122,7 @@ def test_proposer_is_slashed(spec, state):
     proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
 
     # set proposer to slashed
-    state.validator_registry[proposer_slashing.proposer_index].slashed = True
+    state.validators[proposer_slashing.proposer_index].slashed = True
 
     yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
 
@@ -137,6 +137,6 @@ def test_proposer_is_withdrawn(spec, state):
     # set proposer withdrawable_epoch in past
     current_epoch = spec.get_current_epoch(state)
     proposer_index = proposer_slashing.proposer_index
-    state.validator_registry[proposer_index].withdrawable_epoch = current_epoch - 1
+    state.validators[proposer_index].withdrawable_epoch = current_epoch - 1
 
     yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_transfer.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_transfer.py
index 1294ca84a2..f079ff5781 100644
--- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_transfer.py
+++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_transfer.py
@@ -1,7 +1,7 @@
 from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases
 from eth2spec.test.helpers.state import next_epoch
 from eth2spec.test.helpers.block import apply_empty_block
-from eth2spec.test.helpers.transfers import get_valid_transfer
+from eth2spec.test.helpers.transfers import get_valid_transfer, sign_transfer
 
 
 def run_transfer_processing(spec, state, transfer, valid=True):
@@ -13,11 +13,6 @@ def run_transfer_processing(spec, state, transfer, valid=True):
     If ``valid == False``, run expecting ``AssertionError``
     """
 
-    proposer_index = spec.get_beacon_proposer_index(state)
-    pre_transfer_sender_balance = state.balances[transfer.sender]
-    pre_transfer_recipient_balance = state.balances[transfer.recipient]
-    pre_transfer_proposer_balance = state.balances[proposer_index]
-
     yield 'pre', state
     yield 'transfer', transfer
 
@@ -26,6 +21,11 @@ def run_transfer_processing(spec, state, transfer, valid=True):
         yield 'post', None
         return
 
+    proposer_index = spec.get_beacon_proposer_index(state)
+    pre_transfer_sender_balance = state.balances[transfer.sender]
+    pre_transfer_recipient_balance = state.balances[transfer.recipient]
+    pre_transfer_proposer_balance = state.balances[proposer_index]
+
     spec.process_transfer(state, transfer)
     yield 'post', state
 
@@ -41,7 +41,7 @@ def run_transfer_processing(spec, state, transfer, valid=True):
 def test_success_non_activated(spec, state):
     transfer = get_valid_transfer(spec, state, signed=True)
     # un-activate so validator can transfer
-    state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
 
     yield from run_transfer_processing(spec, state, transfer)
 
@@ -55,7 +55,7 @@ def test_success_withdrawable(spec, state):
     transfer = get_valid_transfer(spec, state, signed=True)
 
     # withdrawable_epoch in past so can transfer
-    state.validator_registry[transfer.sender].withdrawable_epoch = spec.get_current_epoch(state) - 1
+    state.validators[transfer.sender].withdrawable_epoch = spec.get_current_epoch(state) - 1
 
     yield from run_transfer_processing(spec, state, transfer)
 
@@ -86,7 +86,7 @@ def test_success_active_above_max_effective_fee(spec, state):
 def test_invalid_signature(spec, state):
     transfer = get_valid_transfer(spec, state)
     # un-activate so validator can transfer
-    state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
 
     yield from run_transfer_processing(spec, state, transfer, False)
 
@@ -107,33 +107,195 @@ def test_active_but_transfer_past_effective_balance(spec, state):
 def test_incorrect_slot(spec, state):
     transfer = get_valid_transfer(spec, state, slot=state.slot + 1, signed=True)
     # un-activate so validator can transfer
-    state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
 
     yield from run_transfer_processing(spec, state, transfer, False)
 
 
+@with_all_phases
+@spec_state_test
+def test_transfer_clean(spec, state):
+    sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
+    state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT
+    transfer = get_valid_transfer(spec, state, sender_index=sender_index,
+                                  amount=spec.MIN_DEPOSIT_AMOUNT, fee=0, signed=True)
+
+    # un-activate so validator can transfer
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+    yield from run_transfer_processing(spec, state, transfer)
+
+
+@with_all_phases
+@spec_state_test
+def test_transfer_clean_split_to_fee(spec, state):
+    sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
+    state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT
+    transfer = get_valid_transfer(spec, state, sender_index=sender_index,
+                                  amount=spec.MIN_DEPOSIT_AMOUNT // 2, fee=spec.MIN_DEPOSIT_AMOUNT // 2, signed=True)
+
+    # un-activate so validator can transfer
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+    yield from run_transfer_processing(spec, state, transfer)
+
+
 @with_all_phases
 @spec_state_test
 def test_insufficient_balance_for_fee(spec, state):
     sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
-    state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE
+    state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT
     transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=0, fee=1, signed=True)
 
     # un-activate so validator can transfer
-    state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
 
     yield from run_transfer_processing(spec, state, transfer, False)
 
 
 @with_all_phases
 @spec_state_test
-def test_insufficient_balance(spec, state):
+def test_insufficient_balance_for_fee_result_full(spec, state):
     sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
-    state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE
+    transfer = get_valid_transfer(spec, state, sender_index=sender_index,
+                                  amount=0, fee=state.balances[sender_index] + 1, signed=True)
+
+    # un-activate so validator can transfer
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+    yield from run_transfer_processing(spec, state, transfer, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_insufficient_balance_for_amount_result_dust(spec, state):
+    sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
+    state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT
     transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0, signed=True)
 
     # un-activate so validator can transfer
-    state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+    yield from run_transfer_processing(spec, state, transfer, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_insufficient_balance_for_amount_result_full(spec, state):
+    sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
+    transfer = get_valid_transfer(spec, state, sender_index=sender_index,
+                                  amount=state.balances[sender_index] + 1, fee=0, signed=True)
+
+    # un-activate so validator can transfer
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+    yield from run_transfer_processing(spec, state, transfer, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_insufficient_balance_for_combined_result_dust(spec, state):
+    sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
+    # Enough to pay fee without dust, and amount without dust, but not both.
+    state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT + 1
+    transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=1, signed=True)
+
+    # un-activate so validator can transfer
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+    yield from run_transfer_processing(spec, state, transfer, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_insufficient_balance_for_combined_result_full(spec, state):
+    sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
+    # Enough to pay fee fully without dust left, and amount fully without dust left, but not both.
+    state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT * 2 + 1
+    transfer = get_valid_transfer(spec, state, sender_index=sender_index,
+                                  amount=spec.MIN_DEPOSIT_AMOUNT + 1,
+                                  fee=spec.MIN_DEPOSIT_AMOUNT + 1, signed=True)
+
+    # un-activate so validator can transfer
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+    yield from run_transfer_processing(spec, state, transfer, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_insufficient_balance_for_combined_big_amount(spec, state):
+    sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
+    # Enough to pay fee fully without dust left, and amount fully without dust left, but not both.
+    # Try to create a dust balance (off by 1) with combination of fee and amount.
+    state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT * 2 + 1
+    transfer = get_valid_transfer(spec, state, sender_index=sender_index,
+                                  amount=spec.MIN_DEPOSIT_AMOUNT + 1, fee=1, signed=True)
+
+    # un-activate so validator can transfer
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+    yield from run_transfer_processing(spec, state, transfer, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_insufficient_balance_for_combined_big_fee(spec, state):
+    sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
+    # Enough to pay fee fully without dust left, and amount fully without dust left, but not both.
+    # Try to create a dust balance (off by 1) with combination of fee and amount.
+    state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT * 2 + 1
+    transfer = get_valid_transfer(spec, state, sender_index=sender_index,
+                                  amount=1, fee=spec.MIN_DEPOSIT_AMOUNT + 1, signed=True)
+
+    # un-activate so validator can transfer
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+    yield from run_transfer_processing(spec, state, transfer, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_insufficient_balance_off_by_1_fee(spec, state):
+    sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
+    # Enough to pay fee fully without dust left, and amount fully without dust left, but not both.
+    # Try to print money by using the full balance as amount, plus 1 for fee.
+    transfer = get_valid_transfer(spec, state, sender_index=sender_index,
+                                  amount=state.balances[sender_index], fee=1, signed=True)
+
+    # un-activate so validator can transfer
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+    yield from run_transfer_processing(spec, state, transfer, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_insufficient_balance_off_by_1_amount(spec, state):
+    sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
+    # Enough to pay fee fully without dust left, and amount fully without dust left, but not both.
+    # Try to print money by using the full balance as fee, plus 1 for amount.
+    transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1,
+                                  fee=state.balances[sender_index], signed=True)
+
+    # un-activate so validator can transfer
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+    yield from run_transfer_processing(spec, state, transfer, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_insufficient_balance_duplicate_as_fee_and_amount(spec, state):
+    sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
+    # Enough to pay fee fully without dust left, and amount fully without dust left, but not both.
+    # Try to print money by using the full balance, twice.
+    transfer = get_valid_transfer(spec, state, sender_index=sender_index,
+                                  amount=state.balances[sender_index],
+                                  fee=state.balances[sender_index], signed=True)
+
+    # un-activate so validator can transfer
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
 
     yield from run_transfer_processing(spec, state, transfer, False)
 
@@ -153,7 +315,7 @@ def test_no_dust_sender(spec, state):
     )
 
     # un-activate so validator can transfer
-    state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
 
     yield from run_transfer_processing(spec, state, transfer, False)
 
@@ -167,7 +329,29 @@ def test_no_dust_recipient(spec, state):
     state.balances[transfer.recipient] = 0
 
     # un-activate so validator can transfer
-    state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+    yield from run_transfer_processing(spec, state, transfer, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_non_existent_sender(spec, state):
+    sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
+    transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0)
+    transfer.sender = len(state.validators)
+    sign_transfer(spec, state, transfer, 42)  # mostly valid signature, but sender won't exist, use bogus key.
+
+    yield from run_transfer_processing(spec, state, transfer, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_non_existent_recipient(spec, state):
+    sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
+    state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1
+    transfer = get_valid_transfer(spec, state, sender_index=sender_index,
+                                  recipient_index=len(state.validators), amount=1, fee=0, signed=True)
 
     yield from run_transfer_processing(spec, state, transfer, False)
 
@@ -176,9 +360,9 @@ def test_no_dust_recipient(spec, state):
 @spec_state_test
 def test_invalid_pubkey(spec, state):
     transfer = get_valid_transfer(spec, state, signed=True)
-    state.validator_registry[transfer.sender].withdrawal_credentials = spec.ZERO_HASH
+    state.validators[transfer.sender].withdrawal_credentials = spec.Hash()
 
     # un-activate so validator can transfer
-    state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
+    state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
 
     yield from run_transfer_processing(spec, state, transfer, False)
diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_voluntary_exit.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_voluntary_exit.py
index 3359c5e789..6c9298eccc 100644
--- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_voluntary_exit.py
+++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_voluntary_exit.py
@@ -21,14 +21,14 @@ def run_voluntary_exit_processing(spec, state, voluntary_exit, valid=True):
         yield 'post', None
         return
 
-    pre_exit_epoch = state.validator_registry[validator_index].exit_epoch
+    pre_exit_epoch = state.validators[validator_index].exit_epoch
 
     spec.process_voluntary_exit(state, voluntary_exit)
 
     yield 'post', state
 
     assert pre_exit_epoch == spec.FAR_FUTURE_EPOCH
-    assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
+    assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
 
 
 @with_all_phases
@@ -39,7 +39,7 @@ def test_success(spec, state):
 
     current_epoch = spec.get_current_epoch(state)
     validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
-    privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+    privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
 
     voluntary_exit = build_voluntary_exit(spec, state, current_epoch, validator_index, privkey, signed=True)
 
@@ -55,7 +55,7 @@ def test_invalid_signature(spec, state):
 
     current_epoch = spec.get_current_epoch(state)
     validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
-    privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+    privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
 
     voluntary_exit = build_voluntary_exit(spec, state, current_epoch, validator_index, privkey)
 
@@ -71,12 +71,12 @@ def test_success_exit_queue(spec, state):
     current_epoch = spec.get_current_epoch(state)
 
     # exit `MAX_EXITS_PER_EPOCH`
-    initial_indices = spec.get_active_validator_indices(state, current_epoch)[:spec.get_churn_limit(state)]
+    initial_indices = spec.get_active_validator_indices(state, current_epoch)[:spec.get_validator_churn_limit(state)]
 
     # Prepare a bunch of exits, based on the current state
     exit_queue = []
     for index in initial_indices:
-        privkey = pubkey_to_privkey[state.validator_registry[index].pubkey]
+        privkey = pubkey_to_privkey[state.validators[index].pubkey]
         exit_queue.append(build_voluntary_exit(
             spec,
             state,
@@ -94,7 +94,7 @@ def test_success_exit_queue(spec, state):
 
     # exit an additional validator
     validator_index = spec.get_active_validator_indices(state, current_epoch)[-1]
-    privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+    privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
     voluntary_exit = build_voluntary_exit(
         spec,
         state,
@@ -109,8 +109,8 @@ def test_success_exit_queue(spec, state):
     yield from run_voluntary_exit_processing(spec, state, voluntary_exit)
 
     assert (
-        state.validator_registry[validator_index].exit_epoch ==
-        state.validator_registry[initial_indices[0]].exit_epoch + 1
+        state.validators[validator_index].exit_epoch ==
+        state.validators[initial_indices[0]].exit_epoch + 1
     )
 
 
@@ -122,7 +122,7 @@ def test_validator_exit_in_future(spec, state):
 
     current_epoch = spec.get_current_epoch(state)
     validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
-    privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+    privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
 
     voluntary_exit = build_voluntary_exit(
         spec,
@@ -146,7 +146,7 @@ def test_validator_invalid_validator_index(spec, state):
 
     current_epoch = spec.get_current_epoch(state)
     validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
-    privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+    privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
 
     voluntary_exit = build_voluntary_exit(
         spec,
@@ -156,7 +156,7 @@ def test_validator_invalid_validator_index(spec, state):
         privkey,
         signed=False,
     )
-    voluntary_exit.validator_index = len(state.validator_registry)
+    voluntary_exit.validator_index = len(state.validators)
     sign_voluntary_exit(spec, state, voluntary_exit, privkey)
 
     yield from run_voluntary_exit_processing(spec, state, voluntary_exit, False)
@@ -167,9 +167,9 @@ def test_validator_invalid_validator_index(spec, state):
 def test_validator_not_active(spec, state):
     current_epoch = spec.get_current_epoch(state)
     validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
-    privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+    privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
 
-    state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH
+    state.validators[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH
 
     # build and test voluntary exit
     voluntary_exit = build_voluntary_exit(
@@ -192,10 +192,10 @@ def test_validator_already_exited(spec, state):
 
     current_epoch = spec.get_current_epoch(state)
     validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
-    privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+    privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
 
     # but validator already has exited
-    state.validator_registry[validator_index].exit_epoch = current_epoch + 2
+    state.validators[validator_index].exit_epoch = current_epoch + 2
 
     voluntary_exit = build_voluntary_exit(
         spec,
@@ -214,7 +214,7 @@ def test_validator_already_exited(spec, state):
 def test_validator_not_active_long_enough(spec, state):
     current_epoch = spec.get_current_epoch(state)
     validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
-    privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+    privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
 
     voluntary_exit = build_voluntary_exit(
         spec,
@@ -226,7 +226,7 @@ def test_validator_not_active_long_enough(spec, state):
     )
 
     assert (
-        current_epoch - state.validator_registry[validator_index].activation_epoch <
+        current_epoch - state.validators[validator_index].activation_epoch <
         spec.PERSISTENT_COMMITTEE_PERIOD
     )
 
diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/run_epoch_process_base.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/run_epoch_process_base.py
new file mode 100644
index 0000000000..5b2a2ece49
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/run_epoch_process_base.py
@@ -0,0 +1,45 @@
+
+process_calls = [
+    'process_justification_and_finalization',
+    'process_crosslinks',
+    'process_rewards_and_penalties',
+    'process_registry_updates',
+    'process_reveal_deadlines',
+    'process_challenge_deadlines',
+    'process_slashings',
+    'process_final_updates',
+    'after_process_final_updates',
+]
+
+
+def run_epoch_processing_to(spec, state, process_name: str):
+    """
+    Processes to the next epoch transition, up to, but not including, the sub-transition named ``process_name``
+    """
+    slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH)
+
+    # transition state to slot before epoch state transition
+    spec.process_slots(state, slot - 1)
+
+    # start transitioning, do one slot update before the epoch itself.
+    spec.process_slot(state)
+
+    # process components of epoch transition before final-updates
+    for name in process_calls:
+        if name == process_name:
+            break
+        # only run when present. Later phases introduce more to the epoch-processing.
+        if hasattr(spec, name):
+            getattr(spec, name)(state)
+
+
+def run_epoch_processing_with(spec, state, process_name: str):
+    """
+    Processes to the next epoch transition, up to and including the sub-transition named ``process_name``
+      - pre-state ('pre'), state before calling ``process_name``
+      - post-state ('post'), state after calling ``process_name``
+    """
+    run_epoch_processing_to(spec, state, process_name)
+    yield 'pre', state
+    getattr(spec, process_name)(state)
+    yield 'post', state
diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py
index d51191efba..41d784c50c 100644
--- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py
+++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py
@@ -3,42 +3,20 @@
 from eth2spec.test.context import spec_state_test, with_all_phases
 from eth2spec.test.helpers.state import (
     next_epoch,
-    next_slot,
-    state_transition_and_sign_block,
+    next_slot
 )
-from eth2spec.test.helpers.block import apply_empty_block, sign_block
+from eth2spec.test.helpers.block import apply_empty_block
 from eth2spec.test.helpers.attestations import (
     add_attestation_to_state,
-    build_empty_block_for_next_slot,
     fill_aggregate_attestation,
     get_valid_attestation,
     sign_attestation,
 )
+from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
 
 
-def run_process_crosslinks(spec, state, valid=True):
-    """
-    Run ``process_crosslinks``, yielding:
-      - pre-state ('pre')
-      - post-state ('post').
-    If ``valid == False``, run expecting ``AssertionError``
-    """
-    # transition state to slot before state transition
-    slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1
-    block = build_empty_block_for_next_slot(spec, state)
-    block.slot = slot
-    sign_block(spec, state, block)
-    state_transition_and_sign_block(spec, state, block)
-
-    # cache state before epoch transition
-    spec.process_slot(state)
-
-    # process components of epoch transition before processing crosslinks
-    spec.process_justification_and_finalization(state)
-
-    yield 'pre', state
-    spec.process_crosslinks(state)
-    yield 'post', state
+def run_process_crosslinks(spec, state):
+    yield from run_epoch_processing_with(spec, state, 'process_crosslinks')
 
 
 @with_all_phases
@@ -96,7 +74,7 @@ def test_single_crosslink_update_from_previous_epoch(spec, state):
     # ensure rewarded
     for index in spec.get_crosslink_committee(
             state,
-            attestation.data.target_epoch,
+            attestation.data.target.epoch,
             attestation.data.crosslink.shard):
         assert crosslink_deltas[0][index] > 0
         assert crosslink_deltas[1][index] == 0
@@ -105,7 +83,7 @@ def test_single_crosslink_update_from_previous_epoch(spec, state):
 @with_all_phases
 @spec_state_test
 def test_double_late_crosslink(spec, state):
-    if spec.get_epoch_committee_count(state, spec.get_current_epoch(state)) < spec.SHARD_COUNT:
+    if spec.get_committee_count(state, spec.get_current_epoch(state)) < spec.SHARD_COUNT:
         print("warning: ignoring test, test-assumptions are incompatible with configuration")
         return
 
@@ -148,7 +126,7 @@ def test_double_late_crosslink(spec, state):
     # ensure no reward, only penalties for the failed crosslink
     for index in spec.get_crosslink_committee(
             state,
-            attestation_2.data.target_epoch,
+            attestation_2.data.target.epoch,
             attestation_2.data.crosslink.shard):
         assert crosslink_deltas[0][index] == 0
         assert crosslink_deltas[1][index] > 0
diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_final_updates.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_final_updates.py
new file mode 100644
index 0000000000..58882a44f8
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_final_updates.py
@@ -0,0 +1,91 @@
+from eth2spec.test.context import spec_state_test, with_all_phases
+from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import (
+    run_epoch_processing_with, run_epoch_processing_to
+)
+
+
+def run_process_final_updates(spec, state):
+    yield from run_epoch_processing_with(spec, state, 'process_final_updates')
+
+
+@with_all_phases
+@spec_state_test
+def test_eth1_vote_no_reset(spec, state):
+    assert spec.SLOTS_PER_ETH1_VOTING_PERIOD > spec.SLOTS_PER_EPOCH
+    # skip ahead to the end of the epoch
+    state.slot = spec.SLOTS_PER_EPOCH - 1
+    for i in range(state.slot + 1):  # add a vote for each skipped slot.
+        state.eth1_data_votes.append(
+            spec.Eth1Data(deposit_root=b'\xaa' * 32,
+                          deposit_count=state.eth1_deposit_index,
+                          block_hash=b'\xbb' * 32))
+
+    yield from run_process_final_updates(spec, state)
+
+    assert len(state.eth1_data_votes) == spec.SLOTS_PER_EPOCH
+
+
+@with_all_phases
+@spec_state_test
+def test_eth1_vote_reset(spec, state):
+    # skip ahead to the end of the voting period
+    state.slot = spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1
+    for i in range(state.slot + 1):  # add a vote for each skipped slot.
+        state.eth1_data_votes.append(
+            spec.Eth1Data(deposit_root=b'\xaa' * 32,
+                          deposit_count=state.eth1_deposit_index,
+                          block_hash=b'\xbb' * 32))
+
+    yield from run_process_final_updates(spec, state)
+
+    assert len(state.eth1_data_votes) == 0
+
+
+@with_all_phases
+@spec_state_test
+def test_effective_balance_hysteresis(spec, state):
+    # Prepare state up to the final-updates.
+    # Then overwrite the balances, we only want to focus to be on the hysteresis based changes.
+    run_epoch_processing_to(spec, state, 'process_final_updates')
+    # Set some edge cases for balances
+    max = spec.MAX_EFFECTIVE_BALANCE
+    min = spec.EJECTION_BALANCE
+    inc = spec.EFFECTIVE_BALANCE_INCREMENT
+    half_inc = inc // 2
+    cases = [
+        (max, max, max, "as-is"),
+        (max, max - 1, max - inc, "round down, step lower"),
+        (max, max + 1, max, "round down"),
+        (max, max - inc, max - inc, "exactly 1 step lower"),
+        (max, max - inc - 1, max - (2 * inc), "just 1 over 1 step lower"),
+        (max, max - inc + 1, max - inc, "close to 1 step lower"),
+        (min, min + (half_inc * 3), min, "bigger balance, but not high enough"),
+        (min, min + (half_inc * 3) + 1, min + inc, "bigger balance, high enough, but small step"),
+        (min, min + (half_inc * 4) - 1, min + inc, "bigger balance, high enough, close to double step"),
+        (min, min + (half_inc * 4), min + (2 * inc), "exact two step balance increment"),
+        (min, min + (half_inc * 4) + 1, min + (2 * inc), "over two steps, round down"),
+    ]
+    current_epoch = spec.get_current_epoch(state)
+    for i, (pre_eff, bal, _, _) in enumerate(cases):
+        assert spec.is_active_validator(state.validators[i], current_epoch)
+        state.validators[i].effective_balance = pre_eff
+        state.balances[i] = bal
+
+    yield 'pre', state
+    spec.process_final_updates(state)
+    yield 'post', state
+
+    for i, (_, _, post_eff, name) in enumerate(cases):
+        assert state.validators[i].effective_balance == post_eff, name
+
+
+@with_all_phases
+@spec_state_test
+def test_historical_root_accumulator(spec, state):
+    # skip ahead to near the end of the historical roots period (excl block before epoch processing)
+    state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1
+    history_len = len(state.historical_roots)
+
+    yield from run_process_final_updates(spec, state)
+
+    assert len(state.historical_roots) == history_len + 1
diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py
new file mode 100644
index 0000000000..7dcdb42a4f
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py
@@ -0,0 +1,280 @@
+from eth2spec.test.context import spec_state_test, with_all_phases
+from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import (
+    run_epoch_processing_with
+)
+
+
+def run_process_just_and_fin(spec, state):
+    yield from run_epoch_processing_with(spec, state, 'process_justification_and_finalization')
+
+
+def get_shards_for_slot(spec, state, slot):
+    epoch = spec.compute_epoch_of_slot(slot)
+    epoch_start_shard = spec.get_start_shard(state, epoch)
+    committees_per_slot = spec.get_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH
+    shard = (epoch_start_shard + committees_per_slot * (slot % spec.SLOTS_PER_EPOCH)) % spec.SHARD_COUNT
+    return [shard + i for i in range(committees_per_slot)]
+
+
+def add_mock_attestations(spec, state, epoch, source, target, sufficient_support=False):
+    # we must be at the end of the epoch
+    assert (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0
+
+    previous_epoch = spec.get_previous_epoch(state)
+    current_epoch = spec.get_current_epoch(state)
+
+    if current_epoch == epoch:
+        attestations = state.current_epoch_attestations
+    elif previous_epoch == epoch:
+        attestations = state.previous_epoch_attestations
+    else:
+        raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}")
+
+    total_balance = spec.get_total_active_balance(state)
+    remaining_balance = total_balance * 2 // 3
+
+    start_slot = spec.compute_start_slot_of_epoch(epoch)
+    for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH):
+        for shard in get_shards_for_slot(spec, state, slot):
+            # Check if we already have had sufficient balance. (and undone if we don't want it).
+            # If so, do not create more attestations. (we do not have empty pending attestations normally anyway)
+            if remaining_balance < 0:
+                return
+
+            committee = spec.get_crosslink_committee(state, spec.compute_epoch_of_slot(slot), shard)
+            # Create a bitfield filled with the given count per attestation,
+            #  exactly on the right-most part of the committee field.
+
+            aggregation_bits = [0] * len(committee)
+            for v in range(len(committee) * 2 // 3 + 1):
+                if remaining_balance > 0:
+                    remaining_balance -= state.validators[v].effective_balance
+                    aggregation_bits[v] = 1
+                else:
+                    break
+
+            # remove just one attester to make the marginal support insufficient
+            if not sufficient_support:
+                aggregation_bits[aggregation_bits.index(1)] = 0
+
+            attestations.append(spec.PendingAttestation(
+                aggregation_bits=aggregation_bits,
+                data=spec.AttestationData(
+                    beacon_block_root=b'\xff' * 32,  # irrelevant to testing
+                    source=source,
+                    target=target,
+                    crosslink=spec.Crosslink(shard=shard)
+                ),
+                inclusion_delay=1,
+            ))
+
+
+def get_checkpoints(spec, epoch):
+    c1 = None if epoch < 1 else spec.Checkpoint(epoch=epoch - 1, root=b'\xaa' * 32)
+    c2 = None if epoch < 2 else spec.Checkpoint(epoch=epoch - 2, root=b'\xbb' * 32)
+    c3 = None if epoch < 3 else spec.Checkpoint(epoch=epoch - 3, root=b'\xcc' * 32)
+    c4 = None if epoch < 4 else spec.Checkpoint(epoch=epoch - 4, root=b'\xdd' * 32)
+    c5 = None if epoch < 5 else spec.Checkpoint(epoch=epoch - 5, root=b'\xee' * 32)
+    return c1, c2, c3, c4, c5
+
+
+def put_checkpoints_in_block_roots(spec, state, checkpoints):
+    for c in checkpoints:
+        state.block_roots[spec.compute_start_slot_of_epoch(c.epoch) % spec.SLOTS_PER_HISTORICAL_ROOT] = c.root
+
+
+def finalize_on_234(spec, state, epoch, sufficient_support):
+    assert epoch > 4
+    state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1  # skip ahead to just before epoch
+
+    # 43210 -- epochs ago
+    # 3210x -- justification bitfield indices
+    # 11*0. -- justification bitfield contents, . = this epoch, * is being justified now
+    # checkpoints for the epochs ago:
+    c1, c2, c3, c4, _ = get_checkpoints(spec, epoch)
+    put_checkpoints_in_block_roots(spec, state, [c1, c2, c3, c4])
+
+    old_finalized = state.finalized_checkpoint
+    state.previous_justified_checkpoint = c4
+    state.current_justified_checkpoint = c3
+    state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]()
+    state.justification_bits[1:3] = [1, 1]  # mock 3rd and 4th latest epochs as justified (indices are pre-shift)
+    # mock the 2nd latest epoch as justifiable, with 4th as source
+    add_mock_attestations(spec, state,
+                          epoch=epoch - 2,
+                          source=c4,
+                          target=c2,
+                          sufficient_support=sufficient_support)
+
+    # process!
+    yield from run_process_just_and_fin(spec, state)
+
+    assert state.previous_justified_checkpoint == c3  # changed to old current
+    if sufficient_support:
+        assert state.current_justified_checkpoint == c2  # changed to 2nd latest
+        assert state.finalized_checkpoint == c4  # finalized old previous justified epoch
+    else:
+        assert state.current_justified_checkpoint == c3  # still old current
+        assert state.finalized_checkpoint == old_finalized  # no new finalized
+
+
+def finalize_on_23(spec, state, epoch, sufficient_support):
+    assert epoch > 3
+    state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1  # skip ahead to just before epoch
+
+    # 43210 -- epochs ago
+    # 210xx  -- justification bitfield indices (pre shift)
+    # 3210x -- justification bitfield indices (post shift)
+    # 01*0. -- justification bitfield contents, . = this epoch, * is being justified now
+    # checkpoints for the epochs ago:
+    c1, c2, c3, _, _ = get_checkpoints(spec, epoch)
+    put_checkpoints_in_block_roots(spec, state, [c1, c2, c3])
+
+    old_finalized = state.finalized_checkpoint
+    state.previous_justified_checkpoint = c3
+    state.current_justified_checkpoint = c3
+    state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]()
+    state.justification_bits[1] = 1  # mock 3rd latest epoch as justified (index is pre-shift)
+    # mock the 2nd latest epoch as justifiable, with 3rd as source
+    add_mock_attestations(spec, state,
+                          epoch=epoch - 2,
+                          source=c3,
+                          target=c2,
+                          sufficient_support=sufficient_support)
+
+    # process!
+    yield from run_process_just_and_fin(spec, state)
+
+    assert state.previous_justified_checkpoint == c3  # changed to old current
+    if sufficient_support:
+        assert state.current_justified_checkpoint == c2  # changed to 2nd latest
+        assert state.finalized_checkpoint == c3  # finalized old previous justified epoch
+    else:
+        assert state.current_justified_checkpoint == c3  # still old current
+        assert state.finalized_checkpoint == old_finalized  # no new finalized
+
+
+def finalize_on_123(spec, state, epoch, sufficient_support):
+    assert epoch > 5
+    state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1  # skip ahead to just before epoch
+
+    # 43210 -- epochs ago
+    # 210xx  -- justification bitfield indices (pre shift)
+    # 3210x -- justification bitfield indices (post shift)
+    # 011*. -- justification bitfield contents, . = this epoch, * is being justified now
+    # checkpoints for the epochs ago:
+    c1, c2, c3, c4, c5 = get_checkpoints(spec, epoch)
+    put_checkpoints_in_block_roots(spec, state, [c1, c2, c3, c4, c5])
+
+    old_finalized = state.finalized_checkpoint
+    state.previous_justified_checkpoint = c5
+    state.current_justified_checkpoint = c3
+    state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]()
+    state.justification_bits[1] = 1  # mock 3rd latest epochs as justified (index is pre-shift)
+    # mock the 2nd latest epoch as justifiable, with 5th as source
+    add_mock_attestations(spec, state,
+                          epoch=epoch - 2,
+                          source=c5,
+                          target=c2,
+                          sufficient_support=sufficient_support)
+    # mock the 1st latest epoch as justifiable, with 3rd as source
+    add_mock_attestations(spec, state,
+                          epoch=epoch - 1,
+                          source=c3,
+                          target=c1,
+                          sufficient_support=sufficient_support)
+
+    # process!
+    yield from run_process_just_and_fin(spec, state)
+
+    assert state.previous_justified_checkpoint == c3  # changed to old current
+    if sufficient_support:
+        assert state.current_justified_checkpoint == c1  # changed to 1st latest
+        assert state.finalized_checkpoint == c3  # finalized old current
+    else:
+        assert state.current_justified_checkpoint == c3  # still old current
+        assert state.finalized_checkpoint == old_finalized  # no new finalized
+
+
+def finalize_on_12(spec, state, epoch, sufficient_support):
+    assert epoch > 2
+    state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1  # skip ahead to just before epoch
+
+    # 43210 -- epochs ago
+    # 210xx  -- justification bitfield indices (pre shift)
+    # 3210x -- justification bitfield indices (post shift)
+    # 001*. -- justification bitfield contents, . = this epoch, * is being justified now
+    # checkpoints for the epochs ago:
+    c1, c2, _, _, _ = get_checkpoints(spec, epoch)
+    put_checkpoints_in_block_roots(spec, state, [c1, c2])
+
+    old_finalized = state.finalized_checkpoint
+    state.previous_justified_checkpoint = c2
+    state.current_justified_checkpoint = c2
+    state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]()
+    state.justification_bits[0] = 1  # mock 2nd latest epoch as justified (this is pre-shift)
+    # mock the 1st latest epoch as justifiable, with 2nd as source
+    add_mock_attestations(spec, state,
+                          epoch=epoch - 1,
+                          source=c2,
+                          target=c1,
+                          sufficient_support=sufficient_support)
+
+    # process!
+    yield from run_process_just_and_fin(spec, state)
+
+    assert state.previous_justified_checkpoint == c2  # changed to old current
+    if sufficient_support:
+        assert state.current_justified_checkpoint == c1  # changed to 1st latest
+        assert state.finalized_checkpoint == c2  # finalized previous justified epoch
+    else:
+        assert state.current_justified_checkpoint == c2  # still old current
+        assert state.finalized_checkpoint == old_finalized  # no new finalized
+
+
+@with_all_phases
+@spec_state_test
+def test_234_ok_support(spec, state):
+    yield from finalize_on_234(spec, state, 5, True)
+
+
+@with_all_phases
+@spec_state_test
+def test_234_poor_support(spec, state):
+    yield from finalize_on_234(spec, state, 5, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_23_ok_support(spec, state):
+    yield from finalize_on_23(spec, state, 4, True)
+
+
+@with_all_phases
+@spec_state_test
+def test_23_poor_support(spec, state):
+    yield from finalize_on_23(spec, state, 4, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_123_ok_support(spec, state):
+    yield from finalize_on_123(spec, state, 6, True)
+
+
+@with_all_phases
+@spec_state_test
+def test_123_poor_support(spec, state):
+    yield from finalize_on_123(spec, state, 6, False)
+
+
+@with_all_phases
+@spec_state_test
+def test_12_ok_support(spec, state):
+    yield from finalize_on_12(spec, state, 3, True)
+
+
+@with_all_phases
+@spec_state_test
+def test_12_poor_support(spec, state):
+    yield from finalize_on_12(spec, state, 3, False)
diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py
index 4f6d700b7b..ab6a74a704 100644
--- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py
+++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py
@@ -1,77 +1,85 @@
-from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block
-from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_block
+from eth2spec.test.helpers.state import next_epoch
 from eth2spec.test.context import spec_state_test, with_all_phases
+from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
 
 
-def run_process_registry_updates(spec, state, valid=True):
-    """
-    Run ``process_crosslinks``, yielding:
-      - pre-state ('pre')
-      - post-state ('post').
-    If ``valid == False``, run expecting ``AssertionError``
-    """
-    # transition state to slot before state transition
-    slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1
-    block = build_empty_block_for_next_slot(spec, state)
-    block.slot = slot
-    sign_block(spec, state, block)
-    state_transition_and_sign_block(spec, state, block)
+def run_process_registry_updates(spec, state):
+    yield from run_epoch_processing_with(spec, state, 'process_registry_updates')
 
-    # cache state before epoch transition
-    spec.process_slot(state)
 
-    # process components of epoch transition before registry update
-    spec.process_justification_and_finalization(state)
-    spec.process_crosslinks(state)
-    spec.process_rewards_and_penalties(state)
-
-    yield 'pre', state
-    spec.process_registry_updates(state)
-    yield 'post', state
+def mock_deposit(spec, state, index):
+    assert spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
+    state.validators[index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+    state.validators[index].activation_epoch = spec.FAR_FUTURE_EPOCH
+    state.validators[index].effective_balance = spec.MAX_EFFECTIVE_BALANCE
+    assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
 
 
 @with_all_phases
 @spec_state_test
 def test_activation(spec, state):
     index = 0
-    assert spec.is_active_validator(state.validator_registry[index], spec.get_current_epoch(state))
-
-    # Mock a new deposit
-    state.validator_registry[index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
-    state.validator_registry[index].activation_epoch = spec.FAR_FUTURE_EPOCH
-    state.validator_registry[index].effective_balance = spec.MAX_EFFECTIVE_BALANCE
-    assert not spec.is_active_validator(state.validator_registry[index], spec.get_current_epoch(state))
+    mock_deposit(spec, state, index)
 
     for _ in range(spec.ACTIVATION_EXIT_DELAY + 1):
         next_epoch(spec, state)
 
     yield from run_process_registry_updates(spec, state)
 
-    assert state.validator_registry[index].activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH
-    assert state.validator_registry[index].activation_epoch != spec.FAR_FUTURE_EPOCH
-    assert spec.is_active_validator(
-        state.validator_registry[index],
-        spec.get_current_epoch(state),
-    )
+    assert state.validators[index].activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH
+    assert state.validators[index].activation_epoch != spec.FAR_FUTURE_EPOCH
+    assert spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
+
+
+@with_all_phases
+@spec_state_test
+def test_activation_queue_sorting(spec, state):
+    mock_activations = 10
+
+    epoch = spec.get_current_epoch(state)
+    for i in range(mock_activations):
+        mock_deposit(spec, state, i)
+        state.validators[i].activation_eligibility_epoch = epoch + 1
+
+    # give the last priority over the others
+    state.validators[mock_activations - 1].activation_eligibility_epoch = epoch
+
+    # make sure we are hitting the churn
+    churn_limit = spec.get_validator_churn_limit(state)
+    assert mock_activations > churn_limit
+
+    yield from run_process_registry_updates(spec, state)
+
+    # the first got in as second
+    assert state.validators[0].activation_epoch != spec.FAR_FUTURE_EPOCH
+    # the prioritized got in as first
+    assert state.validators[mock_activations - 1].activation_epoch != spec.FAR_FUTURE_EPOCH
+    # the second last is at the end of the queue, and did not make the churn,
+    #  hence is not assigned an activation_epoch yet.
+    assert state.validators[mock_activations - 2].activation_epoch == spec.FAR_FUTURE_EPOCH
+    # the one at churn_limit - 1 did not make it, it was out-prioritized
+    assert state.validators[churn_limit - 1].activation_epoch == spec.FAR_FUTURE_EPOCH
+    # but the the one in front of the above did
+    assert state.validators[churn_limit - 2].activation_epoch != spec.FAR_FUTURE_EPOCH
 
 
 @with_all_phases
 @spec_state_test
 def test_ejection(spec, state):
     index = 0
-    assert spec.is_active_validator(state.validator_registry[index], spec.get_current_epoch(state))
-    assert state.validator_registry[index].exit_epoch == spec.FAR_FUTURE_EPOCH
+    assert spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
+    assert state.validators[index].exit_epoch == spec.FAR_FUTURE_EPOCH
 
     # Mock an ejection
-    state.validator_registry[index].effective_balance = spec.EJECTION_BALANCE
+    state.validators[index].effective_balance = spec.EJECTION_BALANCE
 
     for _ in range(spec.ACTIVATION_EXIT_DELAY + 1):
         next_epoch(spec, state)
 
     yield from run_process_registry_updates(spec, state)
 
-    assert state.validator_registry[index].exit_epoch != spec.FAR_FUTURE_EPOCH
+    assert state.validators[index].exit_epoch != spec.FAR_FUTURE_EPOCH
     assert not spec.is_active_validator(
-        state.validator_registry[index],
+        state.validators[index],
         spec.get_current_epoch(state),
     )
diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_slashings.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_slashings.py
new file mode 100644
index 0000000000..7be23a04d0
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_slashings.py
@@ -0,0 +1,125 @@
+from eth2spec.test.context import spec_state_test, with_all_phases
+from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import (
+    run_epoch_processing_with, run_epoch_processing_to
+)
+
+
+def run_process_slashings(spec, state):
+    yield from run_epoch_processing_with(spec, state, 'process_slashings')
+
+
+def slash_validators(spec, state, indices, out_epochs):
+    total_slashed_balance = 0
+    for i, out_epoch in zip(indices, out_epochs):
+        v = state.validators[i]
+        v.slashed = True
+        spec.initiate_validator_exit(state, i)
+        v.withdrawable_epoch = out_epoch
+        total_slashed_balance += v.effective_balance
+
+    state.slashings[
+        spec.get_current_epoch(state) % spec.EPOCHS_PER_SLASHINGS_VECTOR
+    ] = total_slashed_balance
+
+
+@with_all_phases
+@spec_state_test
+def test_max_penalties(spec, state):
+    slashed_count = (len(state.validators) // 3) + 1
+    out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2)
+
+    slashed_indices = list(range(slashed_count))
+    slash_validators(spec, state, slashed_indices, [out_epoch] * slashed_count)
+
+    total_balance = spec.get_total_active_balance(state)
+    total_penalties = sum(state.slashings)
+
+    assert total_balance // 3 <= total_penalties
+
+    yield from run_process_slashings(spec, state)
+
+    for i in slashed_indices:
+        assert state.balances[i] == 0
+
+
+@with_all_phases
+@spec_state_test
+def test_small_penalty(spec, state):
+    # Just the bare minimum for this one validator
+    state.balances[0] = state.validators[0].effective_balance = spec.EJECTION_BALANCE
+    # All the other validators get the maximum.
+    for i in range(1, len(state.validators)):
+        state.validators[i].effective_balance = state.balances[i] = spec.MAX_EFFECTIVE_BALANCE
+
+    out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2)
+
+    slash_validators(spec, state, [0], [out_epoch])
+
+    total_balance = spec.get_total_active_balance(state)
+    total_penalties = sum(state.slashings)
+
+    assert total_balance // 3 > total_penalties
+
+    run_epoch_processing_to(spec, state, 'process_slashings')
+    pre_slash_balances = list(state.balances)
+    yield 'pre', state
+    spec.process_slashings(state)
+    yield 'post', state
+
+    assert state.balances[0] == pre_slash_balances[0] - (state.validators[0].effective_balance
+                                                         * 3 * total_penalties // total_balance)
+
+
+@with_all_phases
+@spec_state_test
+def test_scaled_penalties(spec, state):
+    # skip to next epoch
+    state.slot = spec.SLOTS_PER_EPOCH
+
+    # Also mock some previous slashings, so that we test to have the delta in the penalties computation.
+    base = spec.EJECTION_BALANCE
+    incr = spec.EFFECTIVE_BALANCE_INCREMENT
+    # Just add some random slashings. non-zero slashings are at least the minimal effective balance.
+    state.slashings[0] = base + (incr * 12)
+    state.slashings[4] = base + (incr * 3)
+    state.slashings[5] = base + (incr * 6)
+    state.slashings[spec.EPOCHS_PER_SLASHINGS_VECTOR - 1] = base + (incr * 7)
+
+    slashed_count = len(state.validators) // 4
+
+    assert slashed_count > 10
+
+    # make the balances non-uniform.
+    # Otherwise it would just be a simple 3/4 balance slashing. Test the per-validator scaled penalties.
+    diff = spec.MAX_EFFECTIVE_BALANCE - base
+    increments = diff // incr
+    for i in range(10):
+        state.validators[i].effective_balance = base + (incr * (i % increments))
+        assert state.validators[i].effective_balance <= spec.MAX_EFFECTIVE_BALANCE
+        # add/remove some, see if balances different than the effective balances are picked up
+        state.balances[i] = state.validators[i].effective_balance + i - 5
+
+    total_balance = spec.get_total_active_balance(state)
+
+    out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2)
+
+    slashed_indices = list(range(slashed_count))
+
+    # Process up to the sub-transition, then Hi-jack and get the balances.
+    # We just want to test the slashings.
+    # But we are not interested in the other balance changes during the same epoch transition.
+    run_epoch_processing_to(spec, state, 'process_slashings')
+    pre_slash_balances = list(state.balances)
+
+    slash_validators(spec, state, slashed_indices, [out_epoch] * slashed_count)
+
+    yield 'pre', state
+    spec.process_slashings(state)
+    yield 'post', state
+
+    total_penalties = sum(state.slashings)
+
+    for i in slashed_indices:
+        v = state.validators[i]
+        penalty = v.effective_balance * total_penalties * 3 // total_balance
+        assert state.balances[i] == pre_slash_balances[i] - penalty
diff --git a/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py
index 110231d77e..831ad35a55 100644
--- a/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py
+++ b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py
@@ -1,7 +1,13 @@
 from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal
 from eth2spec.test.helpers.block import apply_empty_block
 from eth2spec.test.helpers.state import next_epoch, get_balance
-from eth2spec.test.context import with_all_phases_except, spec_state_test, expect_assertion_error
+from eth2spec.test.context import (
+    with_all_phases_except,
+    spec_state_test,
+    expect_assertion_error,
+    always_bls,
+    never_bls,
+)
 
 
 def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, valid=True):
@@ -24,7 +30,7 @@ def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, v
 
     spec.process_early_derived_secret_reveal(state, randao_key_reveal)
 
-    slashed_validator = state.validator_registry[randao_key_reveal.revealed_index]
+    slashed_validator = state.validators[randao_key_reveal.revealed_index]
 
     if randao_key_reveal.epoch >= spec.get_current_epoch(state) + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING:
         assert slashed_validator.slashed
@@ -36,6 +42,7 @@ def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, v
 
 
 @with_all_phases_except(['phase0'])
+@always_bls
 @spec_state_test
 def test_success(spec, state):
     randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state)
@@ -44,6 +51,7 @@ def test_success(spec, state):
 
 
 @with_all_phases_except(['phase0'])
+@never_bls
 @spec_state_test
 def test_reveal_from_current_epoch(spec, state):
     randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state))
@@ -52,6 +60,7 @@ def test_reveal_from_current_epoch(spec, state):
 
 
 @with_all_phases_except(['phase0'])
+@never_bls
 @spec_state_test
 def test_reveal_from_past_epoch(spec, state):
     next_epoch(spec, state)
@@ -62,6 +71,7 @@ def test_reveal_from_past_epoch(spec, state):
 
 
 @with_all_phases_except(['phase0'])
+@always_bls
 @spec_state_test
 def test_reveal_with_custody_padding(spec, state):
     randao_key_reveal = get_valid_early_derived_secret_reveal(
@@ -73,6 +83,7 @@ def test_reveal_with_custody_padding(spec, state):
 
 
 @with_all_phases_except(['phase0'])
+@always_bls
 @spec_state_test
 def test_reveal_with_custody_padding_minus_one(spec, state):
     randao_key_reveal = get_valid_early_derived_secret_reveal(
@@ -84,6 +95,7 @@ def test_reveal_with_custody_padding_minus_one(spec, state):
 
 
 @with_all_phases_except(['phase0'])
+@never_bls
 @spec_state_test
 def test_double_reveal(spec, state):
     randao_key_reveal1 = get_valid_early_derived_secret_reveal(
@@ -108,15 +120,17 @@ def test_double_reveal(spec, state):
 
 
 @with_all_phases_except(['phase0'])
+@never_bls
 @spec_state_test
 def test_revealer_is_slashed(spec, state):
     randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state))
-    state.validator_registry[randao_key_reveal.revealed_index].slashed = True
+    state.validators[randao_key_reveal.revealed_index].slashed = True
 
     yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
 
 
 @with_all_phases_except(['phase0'])
+@never_bls
 @spec_state_test
 def test_far_future_epoch(spec, state):
     randao_key_reveal = get_valid_early_derived_secret_reveal(
diff --git a/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py b/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py
index e19fbc97c6..886f9bf6ab 100644
--- a/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py
+++ b/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py
@@ -1,19 +1,49 @@
 from copy import deepcopy
-from typing import List
 
 from eth2spec.utils.ssz.ssz_impl import signing_root
 from eth2spec.utils.bls import bls_sign
 
 from eth2spec.test.helpers.state import get_balance, state_transition_and_sign_block
-# from eth2spec.test.helpers.transfers import get_valid_transfer
-from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block
+from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block, sign_block
 from eth2spec.test.helpers.keys import privkeys, pubkeys
 from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing
 from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing
 from eth2spec.test.helpers.attestations import get_valid_attestation
 from eth2spec.test.helpers.deposits import prepare_state_and_deposit
 
-from eth2spec.test.context import spec_state_test, with_all_phases
+from eth2spec.test.context import spec_state_test, with_all_phases, expect_assertion_error
+
+
+@with_all_phases
+@spec_state_test
+def test_prev_slot_block_transition(spec, state):
+    # Go to clean slot
+    spec.process_slots(state, state.slot + 1)
+    # Make a block for it
+    block = build_empty_block(spec, state, slot=state.slot, signed=True)
+    # Transition to next slot, above block will not be invalid on top of new state.
+    spec.process_slots(state, state.slot + 1)
+
+    yield 'pre', state
+    expect_assertion_error(lambda: state_transition_and_sign_block(spec, state, block))
+    yield 'blocks', [block]
+    yield 'post', None
+
+
+@with_all_phases
+@spec_state_test
+def test_same_slot_block_transition(spec, state):
+    # Same slot on top of pre-state, but move out of slot 0 first.
+    spec.process_slots(state, state.slot + 1)
+
+    block = build_empty_block(spec, state, slot=state.slot, signed=True)
+
+    yield 'pre', state
+
+    state_transition_and_sign_block(spec, state, block)
+
+    yield 'blocks', [block]
+    yield 'post', state
 
 
 @with_all_phases
@@ -28,12 +58,28 @@ def test_empty_block_transition(spec, state):
 
     state_transition_and_sign_block(spec, state, block)
 
-    yield 'blocks', [block], List[spec.BeaconBlock]
+    yield 'blocks', [block]
     yield 'post', state
 
     assert len(state.eth1_data_votes) == pre_eth1_votes + 1
     assert spec.get_block_root_at_slot(state, pre_slot) == block.parent_root
-    assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.ZERO_HASH
+    assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Hash()
+
+
+@with_all_phases
+@spec_state_test
+def test_invalid_state_root(spec, state):
+    yield 'pre', state
+
+    block = build_empty_block_for_next_slot(spec, state)
+    block.state_root = b"\xaa" * 32
+    sign_block(spec, state, block)
+
+    expect_assertion_error(
+        lambda: spec.state_transition(state, block, validate_state_root=True))
+
+    yield 'blocks', [block]
+    yield 'post', None
 
 
 @with_all_phases
@@ -48,11 +94,11 @@ def test_skipped_slots(spec, state):
 
     state_transition_and_sign_block(spec, state, block)
 
-    yield 'blocks', [block], List[spec.BeaconBlock]
+    yield 'blocks', [block]
     yield 'post', state
 
     assert state.slot == block.slot
-    assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.ZERO_HASH
+    assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Hash()
     for slot in range(pre_slot, state.slot):
         assert spec.get_block_root_at_slot(state, slot) == block.parent_root
 
@@ -69,7 +115,7 @@ def test_empty_epoch_transition(spec, state):
 
     state_transition_and_sign_block(spec, state, block)
 
-    yield 'blocks', [block], List[spec.BeaconBlock]
+    yield 'blocks', [block]
     yield 'post', state
 
     assert state.slot == block.slot
@@ -77,26 +123,29 @@ def test_empty_epoch_transition(spec, state):
         assert spec.get_block_root_at_slot(state, slot) == block.parent_root
 
 
-# @with_all_phases
-# @spec_state_test
-# def test_empty_epoch_transition_not_finalizing(spec, state):
-#     # copy for later balance lookups.
-#     pre_state = deepcopy(state)
-#     yield 'pre', state
+@with_all_phases
+@spec_state_test
+def test_empty_epoch_transition_not_finalizing(spec, state):
+    # Don't run for non-minimal configs, it takes very long, and the effect
+    # of calling finalization/justification is just the same as with the minimal configuration.
+    if spec.SLOTS_PER_EPOCH > 8:
+        return
 
-#     block = build_empty_block_for_next_slot(spec, state)
-#     block.slot += spec.SLOTS_PER_EPOCH * 5
-#     sign_block(spec, state, block, proposer_index=0)
+    # copy for later balance lookups.
+    pre_balances = list(state.balances)
+    yield 'pre', state
 
-#     state_transition_and_sign_block(spec, state, block)
+    spec.process_slots(state, state.slot + (spec.SLOTS_PER_EPOCH * 5))
+    block = build_empty_block_for_next_slot(spec, state, signed=True)
+    state_transition_and_sign_block(spec, state, block)
 
-#     yield 'blocks', [block], List[spec.BeaconBlock]
-#     yield 'post', state
+    yield 'blocks', [block]
+    yield 'post', state
 
-#     assert state.slot == block.slot
-#     assert state.finalized_epoch < spec.get_current_epoch(state) - 4
-#     for index in range(len(state.validator_registry)):
-#         assert get_balance(state, index) < get_balance(pre_state, index)
+    assert state.slot == block.slot
+    assert state.finalized_checkpoint.epoch < spec.get_current_epoch(state) - 4
+    for index in range(len(state.validators)):
+        assert state.balances[index] < pre_balances[index]
 
 
 @with_all_phases
@@ -107,7 +156,7 @@ def test_proposer_slashing(spec, state):
     proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
     validator_index = proposer_slashing.proposer_index
 
-    assert not state.validator_registry[validator_index].slashed
+    assert not state.validators[validator_index].slashed
 
     yield 'pre', state
 
@@ -120,11 +169,11 @@ def test_proposer_slashing(spec, state):
 
     state_transition_and_sign_block(spec, state, block)
 
-    yield 'blocks', [block], List[spec.BeaconBlock]
+    yield 'blocks', [block]
     yield 'post', state
 
     # check if slashed
-    slashed_validator = state.validator_registry[validator_index]
+    slashed_validator = state.validators[validator_index]
     assert slashed_validator.slashed
     assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
     assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
@@ -139,10 +188,10 @@ def test_attester_slashing(spec, state):
     pre_state = deepcopy(state)
 
     attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
-    validator_index = (attester_slashing.attestation_1.custody_bit_0_indices +
-                       attester_slashing.attestation_1.custody_bit_1_indices)[0]
+    validator_index = (attester_slashing.attestation_1.custody_bit_0_indices
+                       + attester_slashing.attestation_1.custody_bit_1_indices)[0]
 
-    assert not state.validator_registry[validator_index].slashed
+    assert not state.validators[validator_index].slashed
 
     yield 'pre', state
 
@@ -155,10 +204,10 @@ def test_attester_slashing(spec, state):
 
     state_transition_and_sign_block(spec, state, block)
 
-    yield 'blocks', [block], List[spec.BeaconBlock]
+    yield 'blocks', [block]
     yield 'post', state
 
-    slashed_validator = state.validator_registry[validator_index]
+    slashed_validator = state.validators[validator_index]
     assert slashed_validator.slashed
     assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
     assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
@@ -173,15 +222,35 @@ def test_attester_slashing(spec, state):
     )
 
 
-# TODO update functions below to be like above, i.e. with @spec_state_test and yielding data to put into the test vector
+@with_all_phases
+@spec_state_test
+def test_expected_deposit_in_block(spec, state):
+    # Make the state expect a deposit, then don't provide it.
+    state.eth1_data.deposit_count += 1
+    yield 'pre', state
+
+    block = build_empty_block_for_next_slot(spec, state)
+    sign_block(spec, state, block)
+    bad = False
+    try:
+        state_transition_and_sign_block(spec, state, block)
+        bad = True
+    except AssertionError:
+        pass
+    if bad:
+        raise AssertionError("expected deposit was not enforced")
+
+    yield 'blocks', [block]
+    yield 'post', None
+
 
 @with_all_phases
 @spec_state_test
 def test_deposit_in_block(spec, state):
-    initial_registry_len = len(state.validator_registry)
+    initial_registry_len = len(state.validators)
     initial_balances_len = len(state.balances)
 
-    validator_index = len(state.validator_registry)
+    validator_index = len(state.validators)
     amount = spec.MAX_EFFECTIVE_BALANCE
     deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
 
@@ -193,13 +262,13 @@ def test_deposit_in_block(spec, state):
 
     state_transition_and_sign_block(spec, state, block)
 
-    yield 'blocks', [block], List[spec.BeaconBlock]
+    yield 'blocks', [block]
     yield 'post', state
 
-    assert len(state.validator_registry) == initial_registry_len + 1
+    assert len(state.validators) == initial_registry_len + 1
     assert len(state.balances) == initial_balances_len + 1
     assert get_balance(state, validator_index) == spec.MAX_EFFECTIVE_BALANCE
-    assert state.validator_registry[validator_index].pubkey == pubkeys[validator_index]
+    assert state.validators[validator_index].pubkey == pubkeys[validator_index]
 
 
 @with_all_phases
@@ -209,7 +278,7 @@ def test_deposit_top_up(spec, state):
     amount = spec.MAX_EFFECTIVE_BALANCE // 4
     deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
 
-    initial_registry_len = len(state.validator_registry)
+    initial_registry_len = len(state.validators)
     initial_balances_len = len(state.balances)
     validator_pre_balance = get_balance(state, validator_index)
 
@@ -221,10 +290,10 @@ def test_deposit_top_up(spec, state):
 
     state_transition_and_sign_block(spec, state, block)
 
-    yield 'blocks', [block], List[spec.BeaconBlock]
+    yield 'blocks', [block]
     yield 'post', state
 
-    assert len(state.validator_registry) == initial_registry_len
+    assert len(state.validators) == initial_registry_len
     assert len(state.balances) == initial_balances_len
     assert get_balance(state, validator_index) == validator_pre_balance + amount
 
@@ -256,7 +325,7 @@ def test_attestation(spec, state):
     sign_block(spec, state, epoch_block)
     state_transition_and_sign_block(spec, state, epoch_block)
 
-    yield 'blocks', [attestation_block, epoch_block], List[spec.BeaconBlock]
+    yield 'blocks', [attestation_block, epoch_block]
     yield 'post', state
 
     assert len(state.current_epoch_attestations) == 0
@@ -295,7 +364,7 @@ def test_voluntary_exit(spec, state):
     sign_block(spec, state, initiate_exit_block)
     state_transition_and_sign_block(spec, state, initiate_exit_block)
 
-    assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
+    assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
 
     # Process within epoch transition
     exit_block = build_empty_block_for_next_slot(spec, state)
@@ -303,10 +372,10 @@ def test_voluntary_exit(spec, state):
     sign_block(spec, state, exit_block)
     state_transition_and_sign_block(spec, state, exit_block)
 
-    yield 'blocks', [initiate_exit_block, exit_block], List[spec.BeaconBlock]
+    yield 'blocks', [initiate_exit_block, exit_block]
     yield 'post', state
 
-    assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
+    assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
 
 
 # @with_all_phases
@@ -323,7 +392,7 @@ def test_voluntary_exit(spec, state):
     # pre_transfer_recipient_balance = get_balance(state, recipient_index)
 
     # un-activate so validator can transfer
-    # state.validator_registry[sender_index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+    # state.validators[sender_index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
 
     # yield 'pre', state
 
@@ -334,7 +403,7 @@ def test_voluntary_exit(spec, state):
 
     # state_transition_and_sign_block(spec, state, block)
 
-    # yield 'blocks', [block], List[spec.BeaconBlock]
+    # yield 'blocks', [block]
     # yield 'post', state
 
     # sender_balance = get_balance(state, sender_index)
@@ -349,10 +418,10 @@ def test_balance_driven_status_transitions(spec, state):
     current_epoch = spec.get_current_epoch(state)
     validator_index = spec.get_active_validator_indices(state, current_epoch)[-1]
 
-    assert state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH
+    assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH
 
     # set validator balance to below ejection threshold
-    state.validator_registry[validator_index].effective_balance = spec.EJECTION_BALANCE
+    state.validators[validator_index].effective_balance = spec.EJECTION_BALANCE
 
     yield 'pre', state
 
@@ -362,10 +431,10 @@ def test_balance_driven_status_transitions(spec, state):
     sign_block(spec, state, block)
     state_transition_and_sign_block(spec, state, block)
 
-    yield 'blocks', [block], List[spec.BeaconBlock]
+    yield 'blocks', [block]
     yield 'post', state
 
-    assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
+    assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
 
 
 @with_all_phases
@@ -377,9 +446,10 @@ def test_historical_batch(spec, state):
     yield 'pre', state
 
     block = build_empty_block_for_next_slot(spec, state, signed=True)
+    sign_block(spec, state, block)
     state_transition_and_sign_block(spec, state, block)
 
-    yield 'blocks', [block], List[spec.BeaconBlock]
+    yield 'blocks', [block]
     yield 'post', state
 
     assert state.slot == block.slot
@@ -387,29 +457,78 @@ def test_historical_batch(spec, state):
     assert len(state.historical_roots) == pre_historical_roots_len + 1
 
 
-# @with_all_phases
-# @spec_state_test
-# def test_eth1_data_votes(spec, state):
-#     yield 'pre', state
+@with_all_phases
+@spec_state_test
+def test_eth1_data_votes_consensus(spec, state):
+    # Don't run when it will take very, very long to simulate. Minimal configuration suffices.
+    if spec.SLOTS_PER_ETH1_VOTING_PERIOD > 16:
+        return
+
+    offset_block = build_empty_block(spec, state, slot=spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1)
+    sign_block(spec, state, offset_block)
+    state_transition_and_sign_block(spec, state, offset_block)
+    yield 'pre', state
 
-#     expected_votes = 0
-#     assert len(state.eth1_data_votes) == expected_votes
+    a = b'\xaa' * 32
+    b = b'\xbb' * 32
+    c = b'\xcc' * 32
 
-#     blocks = []
-#     for _ in range(spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1):
-#         block = build_empty_block_for_next_slot(spec, state)
-#         state_transition_and_sign_block(spec, state, block)
-#         expected_votes += 1
-#         assert len(state.eth1_data_votes) == expected_votes
-#         blocks.append(block)
+    blocks = []
 
-#     block = build_empty_block_for_next_slot(spec, state)
-#     blocks.append(block)
+    for i in range(0, spec.SLOTS_PER_ETH1_VOTING_PERIOD):
+        block = build_empty_block_for_next_slot(spec, state)
+        # wait for over 50% for A, then start voting B
+        block.body.eth1_data.block_hash = b if i * 2 > spec.SLOTS_PER_ETH1_VOTING_PERIOD else a
+        sign_block(spec, state, block)
+        state_transition_and_sign_block(spec, state, block)
+        blocks.append(block)
 
-#     state_transition_and_sign_block(spec, state, block)
+    assert len(state.eth1_data_votes) == spec.SLOTS_PER_ETH1_VOTING_PERIOD
+    assert state.eth1_data.block_hash == a
 
-#     yield 'blocks', [block], List[spec.BeaconBlock]
-#     yield 'post', state
+    # transition to next eth1 voting period
+    block = build_empty_block_for_next_slot(spec, state)
+    block.body.eth1_data.block_hash = c
+    sign_block(spec, state, block)
+    state_transition_and_sign_block(spec, state, block)
+    blocks.append(block)
+
+    yield 'blocks', blocks
+    yield 'post', state
+
+    assert state.eth1_data.block_hash == a
+    assert state.slot % spec.SLOTS_PER_ETH1_VOTING_PERIOD == 0
+    assert len(state.eth1_data_votes) == 1
+    assert state.eth1_data_votes[0].block_hash == c
+
+
+@with_all_phases
+@spec_state_test
+def test_eth1_data_votes_no_consensus(spec, state):
+    # Don't run when it will take very, very long to simulate. Minimal configuration suffices.
+    if spec.SLOTS_PER_ETH1_VOTING_PERIOD > 16:
+        return
+
+    offset_block = build_empty_block(spec, state, slot=spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1)
+    sign_block(spec, state, offset_block)
+    state_transition_and_sign_block(spec, state, offset_block)
+    yield 'pre', state
+
+    a = b'\xaa' * 32
+    b = b'\xbb' * 32
 
-#     assert state.slot % spec.SLOTS_PER_ETH1_VOTING_PERIOD == 0
-#     assert len(state.eth1_data_votes) == 1
+    blocks = []
+
+    for i in range(0, spec.SLOTS_PER_ETH1_VOTING_PERIOD):
+        block = build_empty_block_for_next_slot(spec, state)
+        # wait for precisely 50% for A, then start voting B for other 50%
+        block.body.eth1_data.block_hash = b if i * 2 >= spec.SLOTS_PER_ETH1_VOTING_PERIOD else a
+        sign_block(spec, state, block)
+        state_transition_and_sign_block(spec, state, block)
+        blocks.append(block)
+
+    assert len(state.eth1_data_votes) == spec.SLOTS_PER_ETH1_VOTING_PERIOD
+    assert state.eth1_data.block_hash == b'\x00' * 32
+
+    yield 'blocks', blocks
+    yield 'post', state
diff --git a/test_libs/pyspec/eth2spec/test/test_finality.py b/test_libs/pyspec/eth2spec/test/test_finality.py
index 5e81f52c88..6250a685d7 100644
--- a/test_libs/pyspec/eth2spec/test/test_finality.py
+++ b/test_libs/pyspec/eth2spec/test/test_finality.py
@@ -1,10 +1,6 @@
-from copy import deepcopy
-from typing import List
-
 from eth2spec.test.context import spec_state_test, never_bls, with_all_phases
-from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_block
-from eth2spec.test.helpers.block import build_empty_block_for_next_slot, apply_empty_block
-from eth2spec.test.helpers.attestations import get_valid_attestation
+from eth2spec.test.helpers.state import next_epoch, next_epoch_with_attestations
+from eth2spec.test.helpers.block import apply_empty_block
 
 
 def check_finality(spec,
@@ -14,78 +10,73 @@ def check_finality(spec,
                    previous_justified_changed,
                    finalized_changed):
     if current_justified_changed:
-        assert state.current_justified_epoch > prev_state.current_justified_epoch
-        assert state.current_justified_root != prev_state.current_justified_root
+        assert state.current_justified_checkpoint.epoch > prev_state.current_justified_checkpoint.epoch
+        assert state.current_justified_checkpoint.root != prev_state.current_justified_checkpoint.root
     else:
-        assert state.current_justified_epoch == prev_state.current_justified_epoch
-        assert state.current_justified_root == prev_state.current_justified_root
+        assert state.current_justified_checkpoint == prev_state.current_justified_checkpoint
 
     if previous_justified_changed:
-        assert state.previous_justified_epoch > prev_state.previous_justified_epoch
-        assert state.previous_justified_root != prev_state.previous_justified_root
+        assert state.previous_justified_checkpoint.epoch > prev_state.previous_justified_checkpoint.epoch
+        assert state.previous_justified_checkpoint.root != prev_state.previous_justified_checkpoint.root
     else:
-        assert state.previous_justified_epoch == prev_state.previous_justified_epoch
-        assert state.previous_justified_root == prev_state.previous_justified_root
+        assert state.previous_justified_checkpoint == prev_state.previous_justified_checkpoint
 
     if finalized_changed:
-        assert state.finalized_epoch > prev_state.finalized_epoch
-        assert state.finalized_root != prev_state.finalized_root
+        assert state.finalized_checkpoint.epoch > prev_state.finalized_checkpoint.epoch
+        assert state.finalized_checkpoint.root != prev_state.finalized_checkpoint.root
     else:
-        assert state.finalized_epoch == prev_state.finalized_epoch
-        assert state.finalized_root == prev_state.finalized_root
+        assert state.finalized_checkpoint == prev_state.finalized_checkpoint
 
 
-def next_epoch_with_attestations(spec,
-                                 state,
-                                 fill_cur_epoch,
-                                 fill_prev_epoch):
-    post_state = deepcopy(state)
-    blocks = []
-    for _ in range(spec.SLOTS_PER_EPOCH):
-        block = build_empty_block_for_next_slot(spec, post_state)
-        if fill_cur_epoch:
-            slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
-            if slot_to_attest >= spec.get_epoch_start_slot(spec.get_current_epoch(post_state)):
-                cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest)
-                block.body.attestations.append(cur_attestation)
+@with_all_phases
+@never_bls
+@spec_state_test
+def test_finality_no_updates_at_genesis(spec, state):
+    assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
 
-        if fill_prev_epoch:
-            slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1
-            prev_attestation = get_valid_attestation(spec, post_state, slot_to_attest)
-            block.body.attestations.append(prev_attestation)
+    yield 'pre', state
 
-        state_transition_and_sign_block(spec, post_state, block)
-        blocks.append(block)
+    blocks = []
+    for epoch in range(2):
+        prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False)
+        blocks += new_blocks
+
+        # justification/finalization skipped at GENESIS_EPOCH
+        if epoch == 0:
+            check_finality(spec, state, prev_state, False, False, False)
+        # justification/finalization skipped at GENESIS_EPOCH + 1
+        elif epoch == 1:
+            check_finality(spec, state, prev_state, False, False, False)
 
-    return state, blocks, post_state
+    yield 'blocks', blocks
+    yield 'post', state
 
 
 @with_all_phases
 @never_bls
 @spec_state_test
 def test_finality_rule_4(spec, state):
+    # get past first two epochs that finality does not run on
+    next_epoch(spec, state)
+    apply_empty_block(spec, state)
+    next_epoch(spec, state)
+    apply_empty_block(spec, state)
+
     yield 'pre', state
 
     blocks = []
-    for epoch in range(4):
+    for epoch in range(2):
         prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False)
         blocks += new_blocks
 
-        # justification/finalization skipped at GENESIS_EPOCH
         if epoch == 0:
-            check_finality(spec, state, prev_state, False, False, False)
-        # justification/finalization skipped at GENESIS_EPOCH + 1
-        elif epoch == 1:
-            check_finality(spec, state, prev_state, False, False, False)
-        elif epoch == 2:
             check_finality(spec, state, prev_state, True, False, False)
-        elif epoch >= 3:
+        elif epoch == 1:
             # rule 4 of finality
             check_finality(spec, state, prev_state, True, True, True)
-            assert state.finalized_epoch == prev_state.current_justified_epoch
-            assert state.finalized_root == prev_state.current_justified_root
+            assert state.finalized_checkpoint == prev_state.current_justified_checkpoint
 
-    yield 'blocks', blocks, List[spec.BeaconBlock]
+    yield 'blocks', blocks
     yield 'post', state
 
 
@@ -113,10 +104,9 @@ def test_finality_rule_1(spec, state):
         elif epoch == 2:
             # finalized by rule 1
             check_finality(spec, state, prev_state, True, True, True)
-            assert state.finalized_epoch == prev_state.previous_justified_epoch
-            assert state.finalized_root == prev_state.previous_justified_root
+            assert state.finalized_checkpoint == prev_state.previous_justified_checkpoint
 
-    yield 'blocks', blocks, List[spec.BeaconBlock]
+    yield 'blocks', blocks
     yield 'post', state
 
 
@@ -144,12 +134,11 @@ def test_finality_rule_2(spec, state):
             prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, True)
             # finalized by rule 2
             check_finality(spec, state, prev_state, True, False, True)
-            assert state.finalized_epoch == prev_state.previous_justified_epoch
-            assert state.finalized_root == prev_state.previous_justified_root
+            assert state.finalized_checkpoint == prev_state.previous_justified_checkpoint
 
         blocks += new_blocks
 
-    yield 'blocks', blocks, List[spec.BeaconBlock]
+    yield 'blocks', blocks
     yield 'post', state
 
 
@@ -196,8 +185,7 @@ def test_finality_rule_3(spec, state):
     blocks += new_blocks
     # rule 3
     check_finality(spec, state, prev_state, True, True, True)
-    assert state.finalized_epoch == prev_state.current_justified_epoch
-    assert state.finalized_root == prev_state.current_justified_root
+    assert state.finalized_checkpoint == prev_state.current_justified_checkpoint
 
-    yield 'blocks', blocks, List[spec.BeaconBlock]
+    yield 'blocks', blocks
     yield 'post', state
diff --git a/test_libs/pyspec/eth2spec/test/utils.py b/test_libs/pyspec/eth2spec/test/utils.py
index 817c952b7e..253691764f 100644
--- a/test_libs/pyspec/eth2spec/test/utils.py
+++ b/test_libs/pyspec/eth2spec/test/utils.py
@@ -1,6 +1,6 @@
 from typing import Dict, Any, Callable, Iterable
 from eth2spec.debug.encode import encode
-from eth2spec.utils.ssz.ssz_typing import Container
+from eth2spec.utils.ssz.ssz_typing import SSZValue
 
 
 def spectest(description: str = None):
@@ -29,10 +29,12 @@ def entry(*args, **kw):
                         (key, value, typ) = data
                         out[key] = encode(value, typ)
                     else:
-                        # Otherwise, try to infer the type, but keep it as-is if it's not a SSZ container.
+                        # Otherwise, try to infer the type, but keep it as-is if it's not a SSZ type or bytes.
                         (key, value) = data
-                        if isinstance(value, Container):
-                            out[key] = encode(value, value.__class__)
+                        if isinstance(value, (SSZValue, bytes)):
+                            out[key] = encode(value)
+                        elif isinstance(value, list) and all([isinstance(el, (SSZValue, bytes)) for el in value]):
+                            out[key] = [encode(el) for el in value]
                         else:
                             # not a ssz value.
                             # It could be vector or bytes still, but it is a rare case,
diff --git a/test_libs/pyspec/eth2spec/utils/bls.py b/test_libs/pyspec/eth2spec/utils/bls.py
index 52f1fed632..ab2327f438 100644
--- a/test_libs/pyspec/eth2spec/utils/bls.py
+++ b/test_libs/pyspec/eth2spec/utils/bls.py
@@ -23,12 +23,14 @@ def entry(*args, **kw):
 
 @only_with_bls(alt_return=True)
 def bls_verify(pubkey, message_hash, signature, domain):
-    return bls.verify(message_hash=message_hash, pubkey=pubkey, signature=signature, domain=domain)
+    return bls.verify(message_hash=message_hash, pubkey=pubkey,
+                      signature=signature, domain=int.from_bytes(domain, byteorder='little'))
 
 
 @only_with_bls(alt_return=True)
 def bls_verify_multiple(pubkeys, message_hashes, signature, domain):
-    return bls.verify_multiple(pubkeys, message_hashes, signature, domain)
+    return bls.verify_multiple(pubkeys=pubkeys, message_hashes=message_hashes,
+                               signature=signature, domain=int.from_bytes(domain, byteorder='little'))
 
 
 @only_with_bls(alt_return=STUB_PUBKEY)
@@ -43,4 +45,5 @@ def bls_aggregate_signatures(signatures):
 
 @only_with_bls(alt_return=STUB_SIGNATURE)
 def bls_sign(message_hash, privkey, domain):
-    return bls.sign(message_hash=message_hash, privkey=privkey, domain=domain)
+    return bls.sign(message_hash=message_hash, privkey=privkey,
+                    domain=int.from_bytes(domain, byteorder='little'))
diff --git a/test_libs/pyspec/eth2spec/utils/merkle_minimal.py b/test_libs/pyspec/eth2spec/utils/merkle_minimal.py
index c508f0df29..e9416ea051 100644
--- a/test_libs/pyspec/eth2spec/utils/merkle_minimal.py
+++ b/test_libs/pyspec/eth2spec/utils/merkle_minimal.py
@@ -1,18 +1,18 @@
 from .hash_function import hash
+from math import log2
 
 
 ZERO_BYTES32 = b'\x00' * 32
 
 zerohashes = [ZERO_BYTES32]
-for layer in range(1, 32):
+for layer in range(1, 100):
     zerohashes.append(hash(zerohashes[layer - 1] + zerohashes[layer - 1]))
 
 
-# Compute a Merkle root of a right-zerobyte-padded 2**32 sized tree
-def calc_merkle_tree_from_leaves(values):
+def calc_merkle_tree_from_leaves(values, layer_count=32):
     values = list(values)
     tree = [values[::]]
-    for h in range(32):
+    for h in range(layer_count):
         if len(values) % 2 == 1:
             values.append(zerohashes[h])
         values = [hash(values[i] + values[i + 1]) for i in range(0, len(values), 2)]
@@ -20,8 +20,11 @@ def calc_merkle_tree_from_leaves(values):
     return tree
 
 
-def get_merkle_root(values):
-    return calc_merkle_tree_from_leaves(values)[-1][0]
+def get_merkle_root(values, pad_to=1):
+    layer_count = int(log2(pad_to))
+    if len(values) == 0:
+        return zerohashes[layer_count]
+    return calc_merkle_tree_from_leaves(values, layer_count)[-1][0]
 
 
 def get_merkle_proof(tree, item_index):
@@ -32,23 +35,35 @@ def get_merkle_proof(tree, item_index):
     return proof
 
 
-def next_power_of_two(v: int) -> int:
-    """
-    Get the next power of 2. (for 64 bit range ints).
-    0 is a special case, to have non-empty defaults.
-    Examples:
-    0 -> 1, 1 -> 1, 2 -> 2, 3 -> 4, 32 -> 32, 33 -> 64
-    """
-    if v == 0:
-        return 1
-    return 1 << (v - 1).bit_length()
-
-
-def merkleize_chunks(chunks):
-    tree = chunks[::]
-    margin = next_power_of_two(len(chunks)) - len(chunks)
-    tree.extend([ZERO_BYTES32] * margin)
-    tree = [ZERO_BYTES32] * len(tree) + tree
-    for i in range(len(tree) // 2 - 1, 0, -1):
-        tree[i] = hash(tree[i * 2] + tree[i * 2 + 1])
-    return tree[1]
+def merkleize_chunks(chunks, pad_to: int=1):
+    count = len(chunks)
+    depth = max(count - 1, 0).bit_length()
+    max_depth = max(depth, (pad_to - 1).bit_length())
+    tmp = [None for _ in range(max_depth + 1)]
+
+    def merge(h, i):
+        j = 0
+        while True:
+            if i & (1 << j) == 0:
+                if i == count and j < depth:
+                    h = hash(h + zerohashes[j])  # keep going if we are complementing the void to the next power of 2
+                else:
+                    break
+            else:
+                h = hash(tmp[j] + h)
+            j += 1
+        tmp[j] = h
+
+    # merge in leaf by leaf.
+    for i in range(count):
+        merge(chunks[i], i)
+
+    # complement with 0 if empty, or if not the right power of 2
+    if 1 << depth != count:
+        merge(zerohashes[0], count)
+
+    # the next power of two may be smaller than the ultimate virtual size, complement with zero-hashes at each depth.
+    for j in range(depth, max_depth):
+        tmp[j + 1] = hash(tmp[j] + zerohashes[j])
+
+    return tmp[max_depth]
diff --git a/test_libs/pyspec/eth2spec/utils/ssz/ssz_impl.py b/test_libs/pyspec/eth2spec/utils/ssz/ssz_impl.py
index b3c877d484..d5855a755f 100644
--- a/test_libs/pyspec/eth2spec/utils/ssz/ssz_impl.py
+++ b/test_libs/pyspec/eth2spec/utils/ssz/ssz_impl.py
@@ -1,11 +1,8 @@
-from ..merkle_minimal import merkleize_chunks, hash
-from eth2spec.utils.ssz.ssz_typing import (
-    is_uint_type, is_bool_type, is_container_type,
-    is_list_kind, is_vector_kind,
-    read_vector_elem_type, read_elem_type,
-    uint_byte_size,
-    infer_input_type,
-    get_zero_value,
+from ..merkle_minimal import merkleize_chunks
+from ..hash_function import hash
+from .ssz_typing import (
+    SSZValue, SSZType, BasicValue, BasicType, Series, Elements, Bits, boolean, Container, List, Bytes,
+    Bitlist, Bitvector, uint,
 )
 
 # SSZ Serialization
@@ -14,68 +11,53 @@
 BYTES_PER_LENGTH_OFFSET = 4
 
 
-def is_basic_type(typ):
-    return is_uint_type(typ) or is_bool_type(typ)
-
-
-def serialize_basic(value, typ):
-    if is_uint_type(typ):
-        return value.to_bytes(uint_byte_size(typ), 'little')
-    elif is_bool_type(typ):
+def serialize_basic(value: SSZValue):
+    if isinstance(value, uint):
+        return value.to_bytes(value.type().byte_len, 'little')
+    elif isinstance(value, boolean):
         if value:
             return b'\x01'
         else:
             return b'\x00'
     else:
-        raise Exception("Type not supported: {}".format(typ))
+        raise Exception(f"Type not supported: {type(value)}")
 
 
-def deserialize_basic(value, typ):
-    if is_uint_type(typ):
+def deserialize_basic(value, typ: BasicType):
+    if issubclass(typ, uint):
         return typ(int.from_bytes(value, 'little'))
-    elif is_bool_type(typ):
+    elif issubclass(typ, boolean):
         assert value in (b'\x00', b'\x01')
-        return True if value == b'\x01' else False
-    else:
-        raise Exception("Type not supported: {}".format(typ))
-
-
-def is_fixed_size(typ):
-    if is_basic_type(typ):
-        return True
-    elif is_list_kind(typ):
-        return False
-    elif is_vector_kind(typ):
-        return is_fixed_size(read_vector_elem_type(typ))
-    elif is_container_type(typ):
-        return all(is_fixed_size(t) for t in typ.get_field_types())
+        return typ(value == b'\x01')
     else:
-        raise Exception("Type not supported: {}".format(typ))
+        raise Exception(f"Type not supported: {typ}")
 
 
-def is_empty(obj):
-    return get_zero_value(type(obj)) == obj
+def is_empty(obj: SSZValue):
+    return type(obj).default() == obj
 
 
-@infer_input_type
-def serialize(obj, typ=None):
-    if is_basic_type(typ):
-        return serialize_basic(obj, typ)
-    elif is_list_kind(typ) or is_vector_kind(typ):
-        return encode_series(obj, [read_elem_type(typ)] * len(obj))
-    elif is_container_type(typ):
-        return encode_series(obj.get_field_values(), typ.get_field_types())
+def serialize(obj: SSZValue):
+    if isinstance(obj, BasicValue):
+        return serialize_basic(obj)
+    elif isinstance(obj, Bitvector):
+        as_integer = sum([obj[i] << i for i in range(len(obj))])
+        return as_integer.to_bytes((len(obj) + 7) // 8, "little")
+    elif isinstance(obj, Bitlist):
+        as_integer = (1 << len(obj)) + sum([obj[i] << i for i in range(len(obj))])
+        return as_integer.to_bytes((as_integer.bit_length() + 7) // 8, "little")
+    elif isinstance(obj, Series):
+        return encode_series(obj)
     else:
-        raise Exception("Type not supported: {}".format(typ))
+        raise Exception(f"Type not supported: {type(obj)}")
 
 
-def encode_series(values, types):
-    # bytes and bytesN are already in the right format.
-    if isinstance(values, bytes):
+def encode_series(values: Series):
+    if isinstance(values, bytes):  # Bytes and BytesN are already like serialized output
         return values
 
     # Recursively serialize
-    parts = [(is_fixed_size(types[i]), serialize(values[i], typ=types[i])) for i in range(len(values))]
+    parts = [(v.type().is_fixed_size(), serialize(v)) for v in values]
 
     # Compute and check lengths
     fixed_lengths = [len(serialized) if constant_size else BYTES_PER_LENGTH_OFFSET
@@ -107,10 +89,16 @@ def encode_series(values, types):
 # -----------------------------
 
 
-def pack(values, subtype):
-    if isinstance(values, bytes):
+def pack(values: Series):
+    if isinstance(values, bytes):  # Bytes and BytesN are already packed
         return values
-    return b''.join([serialize_basic(value, subtype) for value in values])
+    elif isinstance(values, Bitvector):
+        as_integer = sum([values[i] << i for i in range(len(values))])
+        return as_integer.to_bytes((values.length + 7) // 8, "little")
+    elif isinstance(values, Bitlist):
+        as_integer = sum([values[i] << i for i in range(len(values))])
+        return as_integer.to_bytes((values.length + 7) // 8, "little")
+    return b''.join([serialize_basic(value) for value in values])
 
 
 def chunkify(bytez):
@@ -123,41 +111,52 @@ def mix_in_length(root, length):
     return hash(root + length.to_bytes(32, 'little'))
 
 
-def is_bottom_layer_kind(typ):
+def is_bottom_layer_kind(typ: SSZType):
     return (
-        is_basic_type(typ) or
-        (is_list_kind(typ) or is_vector_kind(typ)) and is_basic_type(read_elem_type(typ))
+        isinstance(typ, BasicType) or
+        (issubclass(typ, Elements) and isinstance(typ.elem_type, BasicType))
     )
 
 
-@infer_input_type
-def get_typed_values(obj, typ=None):
-    if is_container_type(typ):
-        return obj.get_typed_values()
-    elif is_list_kind(typ) or is_vector_kind(typ):
-        elem_type = read_elem_type(typ)
-        return list(zip(obj, [elem_type] * len(obj)))
+def item_length(typ: SSZType) -> int:
+    if issubclass(typ, BasicValue):
+        return typ.byte_len
     else:
-        raise Exception("Invalid type")
+        return 32
+
+
+def chunk_count(typ: SSZType) -> int:
+    if isinstance(typ, BasicType):
+        return 1
+    elif issubclass(typ, Bits):
+        return (typ.length + 255) // 256
+    elif issubclass(typ, Elements):
+        return (typ.length * item_length(typ.elem_type) + 31) // 32
+    elif issubclass(typ, Container):
+        return len(typ.get_fields())
+    else:
+        raise Exception(f"Type not supported: {typ}")
 
 
-@infer_input_type
-def hash_tree_root(obj, typ=None):
-    if is_bottom_layer_kind(typ):
-        data = serialize_basic(obj, typ) if is_basic_type(typ) else pack(obj, read_elem_type(typ))
-        leaves = chunkify(data)
+def hash_tree_root(obj: SSZValue):
+    if isinstance(obj, Series):
+        if is_bottom_layer_kind(obj.type()):
+            leaves = chunkify(pack(obj))
+        else:
+            leaves = [hash_tree_root(value) for value in obj]
+    elif isinstance(obj, BasicValue):
+        leaves = chunkify(serialize_basic(obj))
     else:
-        fields = get_typed_values(obj, typ=typ)
-        leaves = [hash_tree_root(field_value, typ=field_typ) for field_value, field_typ in fields]
-    if is_list_kind(typ):
-        return mix_in_length(merkleize_chunks(leaves), len(obj))
+        raise Exception(f"Type not supported: {type(obj)}")
+
+    if isinstance(obj, (List, Bytes, Bitlist)):
+        return mix_in_length(merkleize_chunks(leaves, pad_to=chunk_count(obj.type())), len(obj))
     else:
         return merkleize_chunks(leaves)
 
 
-@infer_input_type
-def signing_root(obj, typ):
-    assert is_container_type(typ)
+def signing_root(obj: Container):
     # ignore last field
-    leaves = [hash_tree_root(field_value, typ=field_typ) for field_value, field_typ in obj.get_typed_values()[:-1]]
+    fields = [field for field in obj][:-1]
+    leaves = [hash_tree_root(f) for f in fields]
     return merkleize_chunks(chunkify(b''.join(leaves)))
diff --git a/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py b/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py
index bb98fb0846..2ec4b5ce29 100644
--- a/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py
+++ b/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py
@@ -1,137 +1,183 @@
+from typing import Dict, Iterator
+import copy
 from types import GeneratorType
-from typing import List, Iterable, TypeVar, Type, NewType
-from typing import Union
-from typing_inspect import get_origin
 
-# SSZ integers
-# -----------------------------
 
+class DefaultingTypeMeta(type):
+    def default(cls):
+        raise Exception("Not implemented")
 
-class uint(int):
+
+class SSZType(DefaultingTypeMeta):
+
+    def is_fixed_size(cls):
+        raise Exception("Not implemented")
+
+
+class SSZValue(object, metaclass=SSZType):
+
+    def type(self):
+        return self.__class__
+
+
+class BasicType(SSZType):
     byte_len = 0
 
-    def __new__(cls, value, *args, **kwargs):
+    def is_fixed_size(cls):
+        return True
+
+
+class BasicValue(int, SSZValue, metaclass=BasicType):
+    pass
+
+
+class boolean(BasicValue):  # can't subclass bool.
+    byte_len = 1
+
+    def __new__(cls, value: int):  # int value, but can be any subclass of int (bool, Bit, Bool, etc...)
+        if value < 0 or value > 1:
+            raise ValueError(f"value {value} out of bounds for bit")
+        return super().__new__(cls, value)
+
+    @classmethod
+    def default(cls):
+        return cls(0)
+
+    def __bool__(self):
+        return self > 0
+
+
+# Alias for Bool
+class bit(boolean):
+    pass
+
+
+class uint(BasicValue, metaclass=BasicType):
+
+    def __new__(cls, value: int):
         if value < 0:
             raise ValueError("unsigned types must not be negative")
+        if cls.byte_len and value.bit_length() > (cls.byte_len << 3):
+            raise ValueError("value out of bounds for uint{}".format(cls.byte_len * 8))
         return super().__new__(cls, value)
 
+    def __add__(self, other):
+        return self.__class__(super().__add__(coerce_type_maybe(other, self.__class__, strict=True)))
+
+    def __sub__(self, other):
+        return self.__class__(super().__sub__(coerce_type_maybe(other, self.__class__, strict=True)))
+
+    @classmethod
+    def default(cls):
+        return cls(0)
+
 
 class uint8(uint):
     byte_len = 1
 
-    def __new__(cls, value, *args, **kwargs):
-        if value.bit_length() > 8:
-            raise ValueError("value out of bounds for uint8")
-        return super().__new__(cls, value)
-
 
 # Alias for uint8
-byte = NewType('byte', uint8)
+class byte(uint8):
+    pass
 
 
 class uint16(uint):
     byte_len = 2
 
-    def __new__(cls, value, *args, **kwargs):
-        if value.bit_length() > 16:
-            raise ValueError("value out of bounds for uint16")
-        return super().__new__(cls, value)
-
 
 class uint32(uint):
     byte_len = 4
 
-    def __new__(cls, value, *args, **kwargs):
-        if value.bit_length() > 32:
-            raise ValueError("value out of bounds for uint16")
-        return super().__new__(cls, value)
-
 
-# We simply default to uint64. But do give it a name, for readability
-uint64 = NewType('uint64', int)
+class uint64(uint):
+    byte_len = 8
 
 
 class uint128(uint):
     byte_len = 16
 
-    def __new__(cls, value, *args, **kwargs):
-        if value.bit_length() > 128:
-            raise ValueError("value out of bounds for uint128")
-        return super().__new__(cls, value)
-
 
 class uint256(uint):
     byte_len = 32
 
-    def __new__(cls, value, *args, **kwargs):
-        if value.bit_length() > 256:
-            raise ValueError("value out of bounds for uint256")
-        return super().__new__(cls, value)
-
 
-def is_uint_type(typ):
-    # All integers are uint in the scope of the spec here.
-    # Since we default to uint64. Bounds can be checked elsewhere.
-    # However, some are wrapped in a NewType
-    if hasattr(typ, '__supertype__'):
-        # get the type that the NewType is wrapping
-        typ = typ.__supertype__
-
-    return isinstance(typ, type) and issubclass(typ, int) and not issubclass(typ, bool)
+def coerce_type_maybe(v, typ: SSZType, strict: bool = False):
+    v_typ = type(v)
+    # shortcut if it's already the type we are looking for
+    if v_typ == typ:
+        return v
+    elif isinstance(v, int):
+        if isinstance(v, uint):  # do not coerce from one uintX to another uintY
+            if issubclass(typ, uint) and v.type().byte_len == typ.byte_len:
+                return typ(v)
+            # revert to default behavior below if-else. (ValueError/bare)
+        else:
+            return typ(v)
+    elif isinstance(v, (list, tuple)):
+        return typ(*v)
+    elif isinstance(v, (bytes, BytesN, Bytes)):
+        return typ(v)
+    elif isinstance(v, GeneratorType):
+        return typ(v)
 
+    # just return as-is, Value-checkers will take care of it not being coerced, if we are not strict.
+    if strict and not isinstance(v, typ):
+        raise ValueError("Type coercion of {} to {} failed".format(v, typ))
+    return v
 
-def uint_byte_size(typ):
-    if hasattr(typ, '__supertype__'):
-        typ = typ.__supertype__
 
-    if isinstance(typ, type):
-        if issubclass(typ, uint):
-            return typ.byte_len
-        elif issubclass(typ, int):
-            # Default to uint64
-            return 8
-    else:
-        raise TypeError("Type %s is not an uint (or int-default uint64) type" % typ)
+class Series(SSZValue):
 
+    def __iter__(self) -> Iterator[SSZValue]:
+        raise Exception("Not implemented")
 
-# SSZ Container base class
-# -----------------------------
 
 # Note: importing ssz functionality locally, to avoid import loop
 
-class Container(object):
+class Container(Series, metaclass=SSZType):
 
     def __init__(self, **kwargs):
         cls = self.__class__
-        for f, t in cls.get_fields():
+        for f, t in cls.get_fields().items():
             if f not in kwargs:
-                setattr(self, f, get_zero_value(t))
+                setattr(self, f, t.default())
             else:
-                setattr(self, f, kwargs[f])
+                value = coerce_type_maybe(kwargs[f], t)
+                if not isinstance(value, t):
+                    raise ValueError(f"Bad input for class {self.__class__}:"
+                                     f" field: {f} type: {t} value: {value} value type: {type(value)}")
+                setattr(self, f, value)
 
     def serialize(self):
         from .ssz_impl import serialize
-        return serialize(self, self.__class__)
+        return serialize(self)
 
     def hash_tree_root(self):
         from .ssz_impl import hash_tree_root
-        return hash_tree_root(self, self.__class__)
+        return hash_tree_root(self)
 
     def signing_root(self):
         from .ssz_impl import signing_root
-        return signing_root(self, self.__class__)
-
-    def get_field_values(self):
-        cls = self.__class__
-        return [getattr(self, field) for field in cls.get_field_names()]
+        return signing_root(self)
+
+    def __setattr__(self, name, value):
+        if name not in self.__class__.__annotations__:
+            raise AttributeError("Cannot change non-existing SSZ-container attribute")
+        field_typ = self.__class__.__annotations__[name]
+        value = coerce_type_maybe(value, field_typ)
+        if not isinstance(value, field_typ):
+            raise ValueError(f"Cannot set field of {self.__class__}:"
+                             f" field: {name} type: {field_typ} value: {value} value type: {type(value)}")
+        super().__setattr__(name, value)
 
     def __repr__(self):
-        return repr({field: getattr(self, field) for field in self.get_field_names()})
+        return repr({field: (getattr(self, field) if hasattr(self, field) else 'unset')
+                     for field in self.get_fields().keys()})
 
     def __str__(self):
-        output = []
-        for field in self.get_field_names():
-            output.append(f'{field}: {getattr(self, field)}')
+        output = [f'{self.__class__.__name__}']
+        for field in self.get_fields().keys():
+            output.append(f'  {field}: {getattr(self, field)}')
         return "\n".join(output)
 
     def __eq__(self, other):
@@ -140,387 +186,319 @@ def __eq__(self, other):
     def __hash__(self):
         return hash(self.hash_tree_root())
 
-    @classmethod
-    def get_fields_dict(cls):
-        return dict(cls.__annotations__)
+    def copy(self):
+        return copy.deepcopy(self)
 
     @classmethod
-    def get_fields(cls):
-        return list(dict(cls.__annotations__).items())
-
-    def get_typed_values(self):
-        return list(zip(self.get_field_values(), self.get_field_types()))
+    def get_fields(cls) -> Dict[str, SSZType]:
+        if not hasattr(cls, '__annotations__'):  # no container fields
+            return {}
+        return dict(cls.__annotations__)
 
     @classmethod
-    def get_field_names(cls):
-        return list(cls.__annotations__.keys())
+    def default(cls):
+        return cls(**{f: t.default() for f, t in cls.get_fields().items()})
 
     @classmethod
-    def get_field_types(cls):
-        # values of annotations are the types corresponding to the fields, not instance values.
-        return list(cls.__annotations__.values())
+    def is_fixed_size(cls):
+        return all(t.is_fixed_size() for t in cls.get_fields().values())
 
+    def __iter__(self) -> Iterator[SSZValue]:
+        return iter([getattr(self, field) for field in self.get_fields().keys()])
 
-# SSZ vector
-# -----------------------------
 
+class ParamsBase(Series):
+    _has_params = False
 
-def _is_vector_instance_of(a, b):
-    # Other must not be a BytesN
-    if issubclass(b, bytes):
-        return False
-    elif not hasattr(b, 'elem_type') or not hasattr(b, 'length'):
-        # Vector (b) is not an instance of Vector[X, Y] (a)
-        return False
-    elif not hasattr(a, 'elem_type') or not hasattr(a, 'length'):
-        # Vector[X, Y] (b) is an instance of Vector (a)
-        return True
-    else:
-        # Vector[X, Y] (a) is an instance of Vector[X, Y] (b)
-        return a.elem_type == b.elem_type and a.length == b.length
-
+    def __new__(cls, *args, **kwargs):
+        if not cls._has_params:
+            raise Exception("cannot init bare type without params")
+        return super().__new__(cls, **kwargs)
 
-def _is_equal_vector_type(a, b):
-    # Other must not be a BytesN
-    if issubclass(b, bytes):
-        return False
-    elif not hasattr(a, 'elem_type') or not hasattr(a, 'length'):
-        if not hasattr(b, 'elem_type') or not hasattr(b, 'length'):
-            # Vector == Vector
-            return True
-        else:
-            # Vector != Vector[X, Y]
-            return False
-    elif not hasattr(b, 'elem_type') or not hasattr(b, 'length'):
-        # Vector[X, Y] != Vector
-        return False
-    else:
-        # Vector[X, Y] == Vector[X, Y]
-        return a.elem_type == b.elem_type and a.length == b.length
 
+class ParamsMeta(SSZType):
 
-class VectorMeta(type):
     def __new__(cls, class_name, parents, attrs):
         out = type.__new__(cls, class_name, parents, attrs)
-        if 'elem_type' in attrs and 'length' in attrs:
-            setattr(out, 'elem_type', attrs['elem_type'])
-            setattr(out, 'length', attrs['length'])
+        if hasattr(out, "_has_params") and getattr(out, "_has_params"):
+            for k, v in attrs.items():
+                setattr(out, k, v)
         return out
 
     def __getitem__(self, params):
-        if not isinstance(params, tuple) or len(params) != 2:
-            raise Exception("Vector must be instantiated with two args: elem type and length")
-        o = self.__class__(self.__name__, (Vector,), {'elem_type': params[0], 'length': params[1]})
-        o._name = 'Vector'
+        o = self.__class__(self.__name__, (self,), self.attr_from_params(params))
         return o
 
-    def __subclasscheck__(self, sub):
-        return _is_vector_instance_of(self, sub)
+    def __str__(self):
+        return f"{self.__name__}~{self.__class__.__name__}"
 
-    def __instancecheck__(self, other):
-        return _is_vector_instance_of(self, other.__class__)
+    def __repr__(self):
+        return f"{self.__name__}~{self.__class__.__name__}"
+
+    def attr_from_params(self, p):
+        # single key params are valid too. Wrap them in a tuple.
+        params = p if isinstance(p, tuple) else (p,)
+        res = {'_has_params': True}
+        i = 0
+        for (name, typ) in self.__annotations__.items():
+            if hasattr(self.__class__, name):
+                res[name] = getattr(self.__class__, name)
+            else:
+                if i >= len(params):
+                    i += 1
+                    continue
+                param = params[i]
+                if not isinstance(param, typ):
+                    raise TypeError(
+                        "cannot create parametrized class with param {} as {} of type {}".format(param, name, typ))
+                res[name] = param
+                i += 1
+        if len(params) != i:
+            raise TypeError("provided parameters {} mismatch required parameter count {}".format(params, i))
+        return res
+
+    def __subclasscheck__(self, subclass):
+        # check regular class system if we can, solves a lot of the normal cases.
+        if super().__subclasscheck__(subclass):
+            return True
+        # if they are not normal subclasses, they are of the same class.
+        # then they should have the same name
+        if subclass.__name__ != self.__name__:
+            return False
+        # If they do have the same name, they should also have the same params.
+        for name, typ in self.__annotations__.items():
+            if hasattr(self, name) and hasattr(subclass, name) \
+                    and getattr(subclass, name) != getattr(self, name):
+                return False
+        return True
 
-    def __eq__(self, other):
-        return _is_equal_vector_type(self, other)
+    def __instancecheck__(self, obj):
+        return self.__subclasscheck__(obj.__class__)
 
-    def __ne__(self, other):
-        return not _is_equal_vector_type(self, other)
 
-    def __hash__(self):
-        return hash(self.__class__)
+class ElementsType(ParamsMeta):
+    elem_type: SSZType
+    length: int
 
 
-class Vector(metaclass=VectorMeta):
+class Elements(ParamsBase, metaclass=ElementsType):
+    pass
 
-    def __init__(self, *args: Iterable):
-        cls = self.__class__
-        if not hasattr(cls, 'elem_type'):
-            raise TypeError("Type Vector without elem_type data cannot be instantiated")
-        elif not hasattr(cls, 'length'):
-            raise TypeError("Type Vector without length data cannot be instantiated")
-
-        if len(args) != cls.length:
-            if len(args) == 0:
-                args = [get_zero_value(cls.elem_type) for _ in range(cls.length)]
-            else:
-                raise TypeError("Typed vector with length %d cannot hold %d items" % (cls.length, len(args)))
 
-        self.items = list(args)
+class BaseList(list, Elements):
 
-        # cannot check non-type objects, or parametrized types
-        if isinstance(cls.elem_type, type) and not hasattr(cls.elem_type, '__args__'):
-            for i, item in enumerate(self.items):
-                if not issubclass(type(item), cls.elem_type):
-                    raise TypeError("Typed vector cannot hold differently typed value"
-                                    " at index %d. Got type: %s, expected type: %s" % (i, type(item), cls.elem_type))
+    def __init__(self, *args):
+        items = self.extract_args(*args)
 
-    def serialize(self):
-        from .ssz_impl import serialize
-        return serialize(self, self.__class__)
+        if not self.value_check(items):
+            raise ValueError(f"Bad input for class {self.__class__}: {items}")
+        super().__init__(items)
 
-    def hash_tree_root(self):
-        from .ssz_impl import hash_tree_root
-        return hash_tree_root(self, self.__class__)
+    @classmethod
+    def value_check(cls, value):
+        return all(isinstance(v, cls.elem_type) for v in value) and len(value) <= cls.length
 
-    def __repr__(self):
-        return repr({'length': self.__class__.length, 'items': self.items})
+    @classmethod
+    def extract_args(cls, *args):
+        x = list(args)
+        if len(x) == 1 and isinstance(x[0], (GeneratorType, list, tuple)):
+            x = list(x[0])
+        x = [coerce_type_maybe(v, cls.elem_type) for v in x]
+        return x
 
-    def __getitem__(self, key):
-        return self.items[key]
+    def __str__(self):
+        cls = self.__class__
+        return f"{cls.__name__}[{cls.elem_type.__name__}, {cls.length}]({', '.join(str(v) for v in self)})"
 
-    def __setitem__(self, key, value):
-        self.items[key] = value
+    def __repr__(self):
+        cls = self.__class__
+        return f"{cls.__name__}[{cls.elem_type.__name__}, {cls.length}]({', '.join(str(v) for v in self)})"
+
+    def __getitem__(self, k) -> SSZValue:
+        if isinstance(k, int):  # check if we are just doing a lookup, and not slicing
+            if k < 0:
+                raise IndexError(f"cannot get item in type {self.__class__} at negative index {k}")
+            if k > len(self):
+                raise IndexError(f"cannot get item in type {self.__class__}"
+                                 f" at out of bounds index {k}")
+        return super().__getitem__(k)
+
+    def __setitem__(self, k, v):
+        if type(k) == slice:
+            if (k.start is not None and k.start < 0) or (k.stop is not None and k.stop > len(self)):
+                raise IndexError(f"cannot set item in type {self.__class__}"
+                                 f" at out of bounds slice {k} (to {v}, bound: {len(self)})")
+            super().__setitem__(k, [coerce_type_maybe(x, self.__class__.elem_type) for x in v])
+        else:
+            if k < 0:
+                raise IndexError(f"cannot set item in type {self.__class__} at negative index {k} (to {v})")
+            if k > len(self):
+                raise IndexError(f"cannot set item in type {self.__class__}"
+                                 f" at out of bounds index {k} (to {v}, bound: {len(self)})")
+            super().__setitem__(k, coerce_type_maybe(v, self.__class__.elem_type, strict=True))
 
-    def __iter__(self):
-        return iter(self.items)
+    def append(self, v):
+        super().append(coerce_type_maybe(v, self.__class__.elem_type, strict=True))
 
-    def __len__(self):
-        return len(self.items)
+    def __iter__(self) -> Iterator[SSZValue]:
+        return super().__iter__()
 
-    def __eq__(self, other):
-        return self.hash_tree_root() == other.hash_tree_root()
+    def last(self):
+        # be explict about getting the last item, for the non-python readers, and negative-index safety
+        return self[len(self) - 1]
 
 
-# SSZ BytesN
-# -----------------------------
+class BitElementsType(ElementsType):
+    elem_type: SSZType = boolean
+    length: int
 
 
-def _is_bytes_n_instance_of(a, b):
-    # Other has to be a Bytes derivative class to be a BytesN
-    if not issubclass(b, bytes):
-        return False
-    elif not hasattr(b, 'length'):
-        # BytesN (b) is not an instance of BytesN[X] (a)
-        return False
-    elif not hasattr(a, 'length'):
-        # BytesN[X] (b) is an instance of BytesN (a)
-        return True
-    else:
-        # BytesN[X] (a) is an instance of BytesN[X] (b)
-        return a.length == b.length
+class Bits(BaseList, metaclass=BitElementsType):
+    pass
 
 
-def _is_equal_bytes_n_type(a, b):
-    # Other has to be a Bytes derivative class to be a BytesN
-    if not issubclass(b, bytes):
-        return False
-    elif not hasattr(a, 'length'):
-        if not hasattr(b, 'length'):
-            # BytesN == BytesN
-            return True
-        else:
-            # BytesN != BytesN[X]
-            return False
-    elif not hasattr(b, 'length'):
-        # BytesN[X] != BytesN
+class Bitlist(Bits):
+    @classmethod
+    def is_fixed_size(cls):
         return False
-    else:
-        # BytesN[X] == BytesN[X]
-        return a.length == b.length
 
+    @classmethod
+    def default(cls):
+        return cls()
 
-class BytesNMeta(type):
-    def __new__(cls, class_name, parents, attrs):
-        out = type.__new__(cls, class_name, parents, attrs)
-        if 'length' in attrs:
-            setattr(out, 'length', attrs['length'])
-        out._name = 'BytesN'
-        out.elem_type = byte
-        return out
-
-    def __getitem__(self, n):
-        return self.__class__(self.__name__, (BytesN,), {'length': n})
 
-    def __subclasscheck__(self, sub):
-        return _is_bytes_n_instance_of(self, sub)
+class Bitvector(Bits):
 
-    def __instancecheck__(self, other):
-        return _is_bytes_n_instance_of(self, other.__class__)
+    @classmethod
+    def extract_args(cls, *args):
+        if len(args) == 0:
+            return cls.default()
+        else:
+            return super().extract_args(*args)
 
-    def __eq__(self, other):
-        return _is_equal_bytes_n_type(self, other)
+    @classmethod
+    def value_check(cls, value):
+        # check length limit strictly
+        return len(value) == cls.length and super().value_check(value)
 
-    def __ne__(self, other):
-        return not _is_equal_bytes_n_type(self, other)
+    @classmethod
+    def is_fixed_size(cls):
+        return True
 
-    def __hash__(self):
-        return hash(self.__class__)
-
-
-def parse_bytes(val):
-    if val is None:
-        return None
-    elif isinstance(val, str):
-        # TODO: import from eth-utils instead, and do: hexstr_if_str(to_bytes, val)
-        return None
-    elif isinstance(val, bytes):
-        return val
-    elif isinstance(val, int):
-        return bytes([val])
-    elif isinstance(val, (list, GeneratorType)):
-        return bytes(val)
-    else:
-        return None
-
-
-class BytesN(bytes, metaclass=BytesNMeta):
-    def __new__(cls, *args):
-        if not hasattr(cls, 'length'):
-            return
-        bytesval = None
-        if len(args) == 1:
-            val: Union[bytes, int, str] = args[0]
-            bytesval = parse_bytes(val)
-        elif len(args) > 1:
-            # TODO: each int is 1 byte, check size, create bytesval
-            bytesval = bytes(args)
-
-        if bytesval is None:
-            if cls.length == 0:
-                bytesval = b''
-            else:
-                bytesval = b'\x00' * cls.length
-        if len(bytesval) != cls.length:
-            raise TypeError("BytesN[%d] cannot be initialized with value of %d bytes" % (cls.length, len(bytesval)))
-        return super().__new__(cls, bytesval)
+    @classmethod
+    def default(cls):
+        return cls(0 for _ in range(cls.length))
 
-    def serialize(self):
-        from .ssz_impl import serialize
-        return serialize(self, self.__class__)
 
-    def hash_tree_root(self):
-        from .ssz_impl import hash_tree_root
-        return hash_tree_root(self, self.__class__)
+class List(BaseList):
 
+    @classmethod
+    def default(cls):
+        return cls()
 
-# SSZ Defaults
-# -----------------------------
-def get_zero_value(typ):
-    if is_uint_type(typ):
-        return 0
-    elif is_list_type(typ):
-        return []
-    elif is_bool_type(typ):
+    @classmethod
+    def is_fixed_size(cls):
         return False
-    elif is_vector_type(typ):
-        return typ()
-    elif is_bytesn_type(typ):
-        return typ()
-    elif is_bytes_type(typ):
-        return b''
-    elif is_container_type(typ):
-        return typ(**{f: get_zero_value(t) for f, t in typ.get_fields()})
-    else:
-        raise Exception("Type not supported: {}".format(typ))
-
-
-# Type helpers
-# -----------------------------
-
 
-def infer_type(obj):
-    if is_uint_type(obj.__class__):
-        return obj.__class__
-    elif isinstance(obj, int):
-        return uint64
-    elif isinstance(obj, list):
-        return List[infer_type(obj[0])]
-    elif isinstance(obj, (Vector, Container, bool, BytesN, bytes)):
-        return obj.__class__
-    else:
-        raise Exception("Unknown type for {}".format(obj))
 
+class Vector(BaseList):
 
-def infer_input_type(fn):
-    """
-    Decorator to run infer_type on the obj if typ argument is None
-    """
-    def infer_helper(obj, typ=None, **kwargs):
-        if typ is None:
-            typ = infer_type(obj)
-        return fn(obj, typ=typ, **kwargs)
-    return infer_helper
-
+    @classmethod
+    def value_check(cls, value):
+        # check length limit strictly
+        return len(value) == cls.length and super().value_check(value)
 
-def is_bool_type(typ):
-    """
-    Check if the given type is a bool.
-    """
-    if hasattr(typ, '__supertype__'):
-        typ = typ.__supertype__
-    return isinstance(typ, type) and issubclass(typ, bool)
+    @classmethod
+    def default(cls):
+        return cls(cls.elem_type.default() for _ in range(cls.length))
 
+    @classmethod
+    def is_fixed_size(cls):
+        return cls.elem_type.is_fixed_size()
+
+    def append(self, v):
+        # Deep-copy and other utils like to change the internals during work.
+        # Only complain if we had the right size.
+        if len(self) == self.__class__.length:
+            raise Exception("cannot modify vector length")
+        else:
+            super().append(v)
 
-def is_list_type(typ):
-    """
-    Check if the given type is a list.
-    """
-    return get_origin(typ) is List or get_origin(typ) is list
+    def pop(self, *args):
+        raise Exception("cannot modify vector length")
 
 
-def is_bytes_type(typ):
-    """
-    Check if the given type is a ``bytes``.
-    """
-    # Do not accept subclasses of bytes here, to avoid confusion with BytesN
-    return typ == bytes
+class BytesType(ElementsType):
+    elem_type: SSZType = byte
+    length: int
 
 
-def is_bytesn_type(typ):
-    """
-    Check if the given type is a BytesN.
-    """
-    return isinstance(typ, type) and issubclass(typ, BytesN)
+class BaseBytes(bytes, Elements, metaclass=BytesType):
 
+    def __new__(cls, *args) -> "BaseBytes":
+        extracted_val = cls.extract_args(*args)
+        if not cls.value_check(extracted_val):
+            raise ValueError(f"Bad input for class {cls}: {extracted_val}")
+        return super().__new__(cls, extracted_val)
 
-def is_list_kind(typ):
-    """
-    Check if the given type is a kind of list. Can be bytes.
-    """
-    return is_list_type(typ) or is_bytes_type(typ)
+    @classmethod
+    def extract_args(cls, *args):
+        x = args
+        if len(x) == 1 and isinstance(x[0], (GeneratorType, bytes)):
+            x = x[0]
+        if isinstance(x, bytes):  # Includes BytesLike
+            return x
+        else:
+            return bytes(x)  # E.g. GeneratorType put into bytes.
 
+    @classmethod
+    def value_check(cls, value):
+        # check type and virtual length limit
+        return isinstance(value, bytes) and len(value) <= cls.length
 
-def is_vector_type(typ):
-    """
-    Check if the given type is a vector.
-    """
-    return isinstance(typ, type) and issubclass(typ, Vector)
+    def __str__(self):
+        cls = self.__class__
+        return f"{cls.__name__}[{cls.length}]: {self.hex()}"
 
 
-def is_vector_kind(typ):
-    """
-    Check if the given type is a kind of vector. Can be BytesN.
-    """
-    return is_vector_type(typ) or is_bytesn_type(typ)
+class Bytes(BaseBytes):
 
+    @classmethod
+    def default(cls):
+        return b''
 
-def is_container_type(typ):
-    """
-    Check if the given type is a container.
-    """
-    return isinstance(typ, type) and issubclass(typ, Container)
+    @classmethod
+    def is_fixed_size(cls):
+        return False
 
 
-T = TypeVar('T')
-L = TypeVar('L')
+class BytesN(BaseBytes):
 
+    @classmethod
+    def extract_args(cls, *args):
+        if len(args) == 0:
+            return cls.default()
+        else:
+            return super().extract_args(*args)
 
-def read_list_elem_type(list_typ: Type[List[T]]) -> T:
-    if list_typ.__args__ is None or len(list_typ.__args__) != 1:
-        raise TypeError("Supplied list-type is invalid, no element type found.")
-    return list_typ.__args__[0]
+    @classmethod
+    def default(cls):
+        return b'\x00' * cls.length
 
+    @classmethod
+    def value_check(cls, value):
+        # check length limit strictly
+        return len(value) == cls.length and super().value_check(value)
 
-def read_vector_elem_type(vector_typ: Type[Vector[T, L]]) -> T:
-    return vector_typ.elem_type
+    @classmethod
+    def is_fixed_size(cls):
+        return True
 
 
-def read_elem_type(typ):
-    if typ == bytes or (isinstance(typ, type) and issubclass(typ, bytes)):  # bytes or bytesN
-        return byte
-    elif is_list_type(typ):
-        return read_list_elem_type(typ)
-    elif is_vector_type(typ):
-        return read_vector_elem_type(typ)
-    else:
-        raise TypeError("Unexpected type: {}".format(typ))
+# Helpers for common BytesN types
+Bytes1: BytesType = BytesN[1]
+Bytes4: BytesType = BytesN[4]
+Bytes8: BytesType = BytesN[8]
+Bytes32: BytesType = BytesN[32]
+Bytes48: BytesType = BytesN[48]
+Bytes96: BytesType = BytesN[96]
diff --git a/test_libs/pyspec/eth2spec/utils/ssz/test_ssz_impl.py b/test_libs/pyspec/eth2spec/utils/ssz/test_ssz_impl.py
new file mode 100644
index 0000000000..637d9c5c4a
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/utils/ssz/test_ssz_impl.py
@@ -0,0 +1,264 @@
+from typing import Iterable
+from .ssz_impl import serialize, hash_tree_root
+from .ssz_typing import (
+    bit, boolean, Container, List, Vector, Bytes, BytesN,
+    Bitlist, Bitvector,
+    uint8, uint16, uint32, uint64, uint256, byte
+)
+from ..hash_function import hash as bytes_hash
+
+import pytest
+
+
+class EmptyTestStruct(Container):
+    pass
+
+
+class SingleFieldTestStruct(Container):
+    A: byte
+
+
+class SmallTestStruct(Container):
+    A: uint16
+    B: uint16
+
+
+class FixedTestStruct(Container):
+    A: uint8
+    B: uint64
+    C: uint32
+
+
+class VarTestStruct(Container):
+    A: uint16
+    B: List[uint16, 1024]
+    C: uint8
+
+
+class ComplexTestStruct(Container):
+    A: uint16
+    B: List[uint16, 128]
+    C: uint8
+    D: Bytes[256]
+    E: VarTestStruct
+    F: Vector[FixedTestStruct, 4]
+    G: Vector[VarTestStruct, 2]
+
+
+sig_test_data = [0 for i in range(96)]
+for k, v in {0: 1, 32: 2, 64: 3, 95: 0xff}.items():
+    sig_test_data[k] = v
+
+
+def chunk(hex: str) -> str:
+    return (hex + ("00" * 32))[:64]  # just pad on the right, to 32 bytes (64 hex chars)
+
+
+def h(a: str, b: str) -> str:
+    return bytes_hash(bytes.fromhex(a) + bytes.fromhex(b)).hex()
+
+
+# zero hashes, as strings, for
+zero_hashes = [chunk("")]
+for layer in range(1, 32):
+    zero_hashes.append(h(zero_hashes[layer - 1], zero_hashes[layer - 1]))
+
+
+def merge(a: str, branch: Iterable[str]) -> str:
+    """
+    Merge (out on left, branch on right) leaf a with branch items, branch is from bottom to top.
+    """
+    out = a
+    for b in branch:
+        out = h(out, b)
+    return out
+
+
+test_data = [
+    ("bit F", bit(False), "00", chunk("00")),
+    ("bit T", bit(True), "01", chunk("01")),
+    ("boolean F", boolean(False), "00", chunk("00")),
+    ("boolean T", boolean(True), "01", chunk("01")),
+    ("bitvector TTFTFTFF", Bitvector[8](1, 1, 0, 1, 0, 1, 0, 0), "2b", chunk("2b")),
+    ("bitlist TTFTFTFF", Bitlist[8](1, 1, 0, 1, 0, 1, 0, 0), "2b01", h(chunk("2b"), chunk("08"))),
+    ("bitvector FTFT", Bitvector[4](0, 1, 0, 1), "0a", chunk("0a")),
+    ("bitlist FTFT", Bitlist[4](0, 1, 0, 1), "1a", h(chunk("0a"), chunk("04"))),
+    ("bitvector FTF", Bitvector[3](0, 1, 0), "02", chunk("02")),
+    ("bitlist FTF", Bitlist[3](0, 1, 0), "0a", h(chunk("02"), chunk("03"))),
+    ("bitvector TFTFFFTTFT", Bitvector[10](1, 0, 1, 0, 0, 0, 1, 1, 0, 1), "c502", chunk("c502")),
+    ("bitlist TFTFFFTTFT", Bitlist[16](1, 0, 1, 0, 0, 0, 1, 1, 0, 1), "c506", h(chunk("c502"), chunk("0A"))),
+    ("bitvector TFTFFFTTFTFFFFTT", Bitvector[16](1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1),
+     "c5c2", chunk("c5c2")),
+    ("bitlist TFTFFFTTFTFFFFTT", Bitlist[16](1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1),
+     "c5c201", h(chunk("c5c2"), chunk("10"))),
+    ("long bitvector", Bitvector[512](1 for i in range(512)),
+     "ff" * 64, h("ff" * 32, "ff" * 32)),
+    ("long bitlist", Bitlist[512](1),
+     "03", h(h(chunk("01"), chunk("")), chunk("01"))),
+    ("long bitlist", Bitlist[512](1 for i in range(512)),
+     "ff" * 64 + "01", h(h("ff" * 32, "ff" * 32), chunk("0002"))),
+    ("odd bitvector", Bitvector[513](1 for i in range(513)),
+     "ff" * 64 + "01", h(h("ff" * 32, "ff" * 32), h(chunk("01"), chunk("")))),
+    ("odd bitlist", Bitlist[513](1 for i in range(513)),
+     "ff" * 64 + "03", h(h(h("ff" * 32, "ff" * 32), h(chunk("01"), chunk(""))), chunk("0102"))),
+    ("uint8 00", uint8(0x00), "00", chunk("00")),
+    ("uint8 01", uint8(0x01), "01", chunk("01")),
+    ("uint8 ab", uint8(0xab), "ab", chunk("ab")),
+    ("byte 00", byte(0x00), "00", chunk("00")),
+    ("byte 01", byte(0x01), "01", chunk("01")),
+    ("byte ab", byte(0xab), "ab", chunk("ab")),
+    ("uint16 0000", uint16(0x0000), "0000", chunk("0000")),
+    ("uint16 abcd", uint16(0xabcd), "cdab", chunk("cdab")),
+    ("uint32 00000000", uint32(0x00000000), "00000000", chunk("00000000")),
+    ("uint32 01234567", uint32(0x01234567), "67452301", chunk("67452301")),
+    ("small (4567, 0123)", SmallTestStruct(A=0x4567, B=0x0123), "67452301", h(chunk("6745"), chunk("2301"))),
+    ("small [4567, 0123]::2", Vector[uint16, 2](uint16(0x4567), uint16(0x0123)), "67452301", chunk("67452301")),
+    ("uint32 01234567", uint32(0x01234567), "67452301", chunk("67452301")),
+    ("uint64 0000000000000000", uint64(0x00000000), "0000000000000000", chunk("0000000000000000")),
+    ("uint64 0123456789abcdef", uint64(0x0123456789abcdef), "efcdab8967452301", chunk("efcdab8967452301")),
+    ("sig", BytesN[96](*sig_test_data),
+     "0100000000000000000000000000000000000000000000000000000000000000"
+     "0200000000000000000000000000000000000000000000000000000000000000"
+     "03000000000000000000000000000000000000000000000000000000000000ff",
+     h(h(chunk("01"), chunk("02")),
+       h("03000000000000000000000000000000000000000000000000000000000000ff", chunk("")))),
+    ("emptyTestStruct", EmptyTestStruct(), "", chunk("")),
+    ("singleFieldTestStruct", SingleFieldTestStruct(A=0xab), "ab", chunk("ab")),
+    ("uint16 list", List[uint16, 32](uint16(0xaabb), uint16(0xc0ad), uint16(0xeeff)), "bbaaadc0ffee",
+     h(h(chunk("bbaaadc0ffee"), chunk("")), chunk("03000000"))  # max length: 32 * 2 = 64 bytes = 2 chunks
+     ),
+    ("uint32 list", List[uint32, 128](uint32(0xaabb), uint32(0xc0ad), uint32(0xeeff)), "bbaa0000adc00000ffee0000",
+     # max length: 128 * 4 = 512 bytes = 16 chunks
+     h(merge(chunk("bbaa0000adc00000ffee0000"), zero_hashes[0:4]), chunk("03000000"))
+     ),
+    ("uint256 list", List[uint256, 32](uint256(0xaabb), uint256(0xc0ad), uint256(0xeeff)),
+     "bbaa000000000000000000000000000000000000000000000000000000000000"
+     "adc0000000000000000000000000000000000000000000000000000000000000"
+     "ffee000000000000000000000000000000000000000000000000000000000000",
+     h(merge(h(h(chunk("bbaa"), chunk("adc0")), h(chunk("ffee"), chunk(""))), zero_hashes[2:5]), chunk("03000000"))
+     ),
+    ("uint256 list long", List[uint256, 128](i for i in range(1, 20)),
+     "".join([i.to_bytes(length=32, byteorder='little').hex() for i in range(1, 20)]),
+     h(merge(
+         h(
+             h(
+                 h(
+                     h(h(chunk("01"), chunk("02")), h(chunk("03"), chunk("04"))),
+                     h(h(chunk("05"), chunk("06")), h(chunk("07"), chunk("08"))),
+                 ),
+                 h(
+                     h(h(chunk("09"), chunk("0a")), h(chunk("0b"), chunk("0c"))),
+                     h(h(chunk("0d"), chunk("0e")), h(chunk("0f"), chunk("10"))),
+                 )
+             ),
+             h(
+                 h(
+                     h(h(chunk("11"), chunk("12")), h(chunk("13"), chunk(""))),
+                     zero_hashes[2]
+                 ),
+                 zero_hashes[3]
+             )
+         ),
+         zero_hashes[5:7]), chunk("13000000"))  # 128 chunks = 7 deep
+     ),
+    ("fixedTestStruct", FixedTestStruct(A=0xab, B=0xaabbccdd00112233, C=0x12345678), "ab33221100ddccbbaa78563412",
+     h(h(chunk("ab"), chunk("33221100ddccbbaa")), h(chunk("78563412"), chunk("")))),
+    ("varTestStruct nil", VarTestStruct(A=0xabcd, C=0xff), "cdab07000000ff",
+     h(h(chunk("cdab"), h(zero_hashes[6], chunk("00000000"))), h(chunk("ff"), chunk("")))),
+    ("varTestStruct empty", VarTestStruct(A=0xabcd, B=List[uint16, 1024](), C=0xff), "cdab07000000ff",
+     h(h(chunk("cdab"), h(zero_hashes[6], chunk("00000000"))), h(chunk("ff"), chunk("")))),  # log2(1024*2/32)= 6 deep
+    ("varTestStruct some", VarTestStruct(A=0xabcd, B=List[uint16, 1024](1, 2, 3), C=0xff),
+     "cdab07000000ff010002000300",
+     h(
+         h(
+             chunk("cdab"),
+             h(
+                 merge(
+                     chunk("010002000300"),
+                     zero_hashes[0:6]
+                 ),
+                 chunk("03000000")  # length mix in
+             )
+         ),
+         h(chunk("ff"), chunk(""))
+    )),
+    ("complexTestStruct",
+     ComplexTestStruct(
+         A=0xaabb,
+         B=List[uint16, 128](0x1122, 0x3344),
+         C=0xff,
+         D=Bytes[256](b"foobar"),
+         E=VarTestStruct(A=0xabcd, B=List[uint16, 1024](1, 2, 3), C=0xff),
+         F=Vector[FixedTestStruct, 4](
+             FixedTestStruct(A=0xcc, B=0x4242424242424242, C=0x13371337),
+             FixedTestStruct(A=0xdd, B=0x3333333333333333, C=0xabcdabcd),
+             FixedTestStruct(A=0xee, B=0x4444444444444444, C=0x00112233),
+             FixedTestStruct(A=0xff, B=0x5555555555555555, C=0x44556677)),
+         G=Vector[VarTestStruct, 2](
+             VarTestStruct(A=0xdead, B=List[uint16, 1024](1, 2, 3), C=0x11),
+             VarTestStruct(A=0xbeef, B=List[uint16, 1024](4, 5, 6), C=0x22)),
+     ),
+     "bbaa"
+     "47000000"  # offset of B, []uint16
+     "ff"
+     "4b000000"  # offset of foobar
+     "51000000"  # offset of E
+     "cc424242424242424237133713"
+     "dd3333333333333333cdabcdab"
+     "ee444444444444444433221100"
+     "ff555555555555555577665544"
+     "5e000000"  # pointer to G
+     "22114433"  # contents of B
+     "666f6f626172"  # foobar
+     "cdab07000000ff010002000300"  # contents of E
+     "08000000" "15000000"  # [start G]: local offsets of [2]varTestStruct
+     "adde0700000011010002000300"
+     "efbe0700000022040005000600",
+     h(
+         h(
+             h(  # A and B
+                 chunk("bbaa"),
+                 h(merge(chunk("22114433"), zero_hashes[0:3]), chunk("02000000"))  # 2*128/32 = 8 chunks
+             ),
+             h(  # C and D
+                 chunk("ff"),
+                 h(merge(chunk("666f6f626172"), zero_hashes[0:3]), chunk("06000000"))  # 256/32 = 8 chunks
+             )
+         ),
+         h(
+             h(  # E and F
+                 h(h(chunk("cdab"), h(merge(chunk("010002000300"), zero_hashes[0:6]), chunk("03000000"))),
+                   h(chunk("ff"), chunk(""))),
+                 h(
+                     h(
+                         h(h(chunk("cc"), chunk("4242424242424242")), h(chunk("37133713"), chunk(""))),
+                         h(h(chunk("dd"), chunk("3333333333333333")), h(chunk("cdabcdab"), chunk(""))),
+                     ),
+                     h(
+                         h(h(chunk("ee"), chunk("4444444444444444")), h(chunk("33221100"), chunk(""))),
+                         h(h(chunk("ff"), chunk("5555555555555555")), h(chunk("77665544"), chunk(""))),
+                     ),
+                 )
+             ),
+             h(  # G and padding
+                 h(
+                     h(h(chunk("adde"), h(merge(chunk("010002000300"), zero_hashes[0:6]), chunk("03000000"))),
+                       h(chunk("11"), chunk(""))),
+                     h(h(chunk("efbe"), h(merge(chunk("040005000600"), zero_hashes[0:6]), chunk("03000000"))),
+                       h(chunk("22"), chunk(""))),
+                 ),
+                 chunk("")
+             )
+         )
+     ))
+]
+
+
+@pytest.mark.parametrize("name, value, serialized, _", test_data)
+def test_serialize(name, value, serialized, _):
+    assert serialize(value) == bytes.fromhex(serialized)
+
+
+@pytest.mark.parametrize("name, value, _, root", test_data)
+def test_hash_tree_root(name, value, _, root):
+    assert hash_tree_root(value) == bytes.fromhex(root)
diff --git a/test_libs/pyspec/eth2spec/utils/ssz/test_ssz_typing.py b/test_libs/pyspec/eth2spec/utils/ssz/test_ssz_typing.py
new file mode 100644
index 0000000000..f746a29c9b
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/utils/ssz/test_ssz_typing.py
@@ -0,0 +1,233 @@
+from .ssz_typing import (
+    SSZValue, SSZType, BasicValue, BasicType, Series, ElementsType,
+    Elements, bit, boolean, Container, List, Vector, Bytes, BytesN,
+    byte, uint, uint8, uint16, uint32, uint64, uint128, uint256,
+    Bytes32, Bytes48
+)
+
+
+def expect_value_error(fn, msg):
+    try:
+        fn()
+        raise AssertionError(msg)
+    except ValueError:
+        pass
+
+
+def test_subclasses():
+    for u in [uint, uint8, uint16, uint32, uint64, uint128, uint256]:
+        assert issubclass(u, uint)
+        assert issubclass(u, int)
+        assert issubclass(u, BasicValue)
+        assert issubclass(u, SSZValue)
+        assert isinstance(u, SSZType)
+        assert isinstance(u, BasicType)
+    assert issubclass(boolean, BasicValue)
+    assert isinstance(boolean, BasicType)
+
+    for c in [Container, List, Vector, Bytes, BytesN]:
+        assert issubclass(c, Series)
+        assert issubclass(c, SSZValue)
+        assert isinstance(c, SSZType)
+        assert not issubclass(c, BasicValue)
+        assert not isinstance(c, BasicType)
+
+    for c in [List, Vector, Bytes, BytesN]:
+        assert issubclass(c, Elements)
+        assert isinstance(c, ElementsType)
+
+
+def test_basic_instances():
+    for u in [uint, uint8, byte, uint16, uint32, uint64, uint128, uint256]:
+        v = u(123)
+        assert isinstance(v, uint)
+        assert isinstance(v, int)
+        assert isinstance(v, BasicValue)
+        assert isinstance(v, SSZValue)
+
+    assert isinstance(boolean(True), BasicValue)
+    assert isinstance(boolean(False), BasicValue)
+    assert isinstance(bit(True), boolean)
+    assert isinstance(bit(False), boolean)
+
+
+def test_basic_value_bounds():
+    max = {
+        boolean: 2 ** 1,
+        bit: 2 ** 1,
+        uint8: 2 ** (8 * 1),
+        byte: 2 ** (8 * 1),
+        uint16: 2 ** (8 * 2),
+        uint32: 2 ** (8 * 4),
+        uint64: 2 ** (8 * 8),
+        uint128: 2 ** (8 * 16),
+        uint256: 2 ** (8 * 32),
+    }
+    for k, v in max.items():
+        # this should work
+        assert k(v - 1) == v - 1
+        # but we do not allow overflows
+        expect_value_error(lambda: k(v), "no overflows allowed")
+
+    for k, _ in max.items():
+        # this should work
+        assert k(0) == 0
+        # but we do not allow underflows
+        expect_value_error(lambda: k(-1), "no underflows allowed")
+
+
+def test_container():
+    class Foo(Container):
+        a: uint8
+        b: uint32
+
+    empty = Foo()
+    assert empty.a == uint8(0)
+    assert empty.b == uint32(0)
+
+    assert issubclass(Foo, Container)
+    assert issubclass(Foo, SSZValue)
+    assert issubclass(Foo, Series)
+
+    assert Foo.is_fixed_size()
+    x = Foo(a=uint8(123), b=uint32(45))
+    assert x.a == 123
+    assert x.b == 45
+    assert isinstance(x.a, uint8)
+    assert isinstance(x.b, uint32)
+    assert x.type().is_fixed_size()
+
+    class Bar(Container):
+        a: uint8
+        b: List[uint8, 1024]
+
+    assert not Bar.is_fixed_size()
+
+    y = Bar(a=123, b=List[uint8, 1024](uint8(1), uint8(2)))
+    assert y.a == 123
+    assert isinstance(y.a, uint8)
+    assert len(y.b) == 2
+    assert isinstance(y.a, uint8)
+    assert isinstance(y.b, List[uint8, 1024])
+    assert not y.type().is_fixed_size()
+    assert y.b[0] == 1
+    v: List = y.b
+    assert v.type().elem_type == uint8
+    assert v.type().length == 1024
+
+    y.a = 42
+    try:
+        y.a = 256  # out of bounds
+        assert False
+    except ValueError:
+        pass
+
+    try:
+        y.a = uint16(255)  # within bounds, wrong type
+        assert False
+    except ValueError:
+        pass
+
+    try:
+        y.not_here = 5
+        assert False
+    except AttributeError:
+        pass
+
+
+def test_list():
+    typ = List[uint64, 128]
+    assert issubclass(typ, List)
+    assert issubclass(typ, SSZValue)
+    assert issubclass(typ, Series)
+    assert issubclass(typ, Elements)
+    assert isinstance(typ, ElementsType)
+
+    assert not typ.is_fixed_size()
+
+    assert len(typ()) == 0  # empty
+    assert len(typ(uint64(0))) == 1  # single arg
+    assert len(typ(uint64(i) for i in range(10))) == 10  # generator
+    assert len(typ(uint64(0), uint64(1), uint64(2))) == 3  # args
+    assert isinstance(typ(1, 2, 3, 4, 5)[4], uint64)  # coercion
+    assert isinstance(typ(i for i in range(10))[9], uint64)  # coercion in generator
+
+    v = typ(uint64(0))
+    v[0] = uint64(123)
+    assert v[0] == 123
+    assert isinstance(v[0], uint64)
+
+    assert isinstance(v, List)
+    assert isinstance(v, List[uint64, 128])
+    assert isinstance(v, typ)
+    assert isinstance(v, SSZValue)
+    assert isinstance(v, Series)
+    assert issubclass(v.type(), Elements)
+    assert isinstance(v.type(), ElementsType)
+
+    assert len(typ([i for i in range(10)])) == 10  # cast py list to SSZ list
+
+    foo = List[uint32, 128](0 for i in range(128))
+    foo[0] = 123
+    foo[1] = 654
+    foo[127] = 222
+    assert sum(foo) == 999
+    try:
+        foo[3] = 2 ** 32  # out of bounds
+    except ValueError:
+        pass
+
+    try:
+        foo[3] = uint64(2 ** 32 - 1)  # within bounds, wrong type
+        assert False
+    except ValueError:
+        pass
+
+    try:
+        foo[128] = 100
+        assert False
+    except IndexError:
+        pass
+
+    try:
+        foo[-1] = 100  # valid in normal python lists
+        assert False
+    except IndexError:
+        pass
+
+    try:
+        foo[128] = 100  # out of bounds
+        assert False
+    except IndexError:
+        pass
+
+
+def test_bytesn_subclass():
+    assert isinstance(BytesN[32](b'\xab' * 32), Bytes32)
+    assert not isinstance(BytesN[32](b'\xab' * 32), Bytes48)
+    assert issubclass(BytesN[32](b'\xab' * 32).type(), Bytes32)
+    assert issubclass(BytesN[32], Bytes32)
+
+    class Hash(Bytes32):
+        pass
+
+    assert isinstance(Hash(b'\xab' * 32), Bytes32)
+    assert not isinstance(Hash(b'\xab' * 32), Bytes48)
+    assert issubclass(Hash(b'\xab' * 32).type(), Bytes32)
+    assert issubclass(Hash, Bytes32)
+
+    assert not issubclass(Bytes48, Bytes32)
+
+    assert len(Bytes32() + Bytes48()) == 80
+
+
+def test_uint_math():
+    assert uint8(0) + uint8(uint32(16)) == uint8(16)  # allow explict casting to make invalid addition valid
+
+    expect_value_error(lambda: uint8(0) - uint8(1), "no underflows allowed")
+    expect_value_error(lambda: uint8(1) + uint8(255), "no overflows allowed")
+    expect_value_error(lambda: uint8(0) + 256, "no overflows allowed")
+    expect_value_error(lambda: uint8(42) + uint32(123), "no mixed types")
+    expect_value_error(lambda: uint32(42) + uint8(123), "no mixed types")
+
+    assert type(uint32(1234) + 56) == uint32
diff --git a/test_libs/pyspec/eth2spec/utils/test_merkle_minimal.py b/test_libs/pyspec/eth2spec/utils/test_merkle_minimal.py
new file mode 100644
index 0000000000..f1ed768e63
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/utils/test_merkle_minimal.py
@@ -0,0 +1,59 @@
+import pytest
+from .merkle_minimal import zerohashes, merkleize_chunks, get_merkle_root
+from .hash_function import hash
+
+
+def h(a: bytes, b: bytes) -> bytes:
+    return hash(a + b)
+
+
+def e(v: int) -> bytes:
+    return v.to_bytes(length=32, byteorder='little')
+
+
+def z(i: int) -> bytes:
+    return zerohashes[i]
+
+
+cases = [
+    (0, 0, 1, z(0)),
+    (0, 1, 1, e(0)),
+    (1, 0, 2, h(z(0), z(0))),
+    (1, 1, 2, h(e(0), z(0))),
+    (1, 2, 2, h(e(0), e(1))),
+    (2, 0, 4, h(h(z(0), z(0)), z(1))),
+    (2, 1, 4, h(h(e(0), z(0)), z(1))),
+    (2, 2, 4, h(h(e(0), e(1)), z(1))),
+    (2, 3, 4, h(h(e(0), e(1)), h(e(2), z(0)))),
+    (2, 4, 4, h(h(e(0), e(1)), h(e(2), e(3)))),
+    (3, 0, 8, h(h(h(z(0), z(0)), z(1)), z(2))),
+    (3, 1, 8, h(h(h(e(0), z(0)), z(1)), z(2))),
+    (3, 2, 8, h(h(h(e(0), e(1)), z(1)), z(2))),
+    (3, 3, 8, h(h(h(e(0), e(1)), h(e(2), z(0))), z(2))),
+    (3, 4, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), z(2))),
+    (3, 5, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), z(0)), z(1)))),
+    (3, 6, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(z(0), z(0))))),
+    (3, 7, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), z(0))))),
+    (3, 8, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), e(7))))),
+    (4, 0, 16, h(h(h(h(z(0), z(0)), z(1)), z(2)), z(3))),
+    (4, 1, 16, h(h(h(h(e(0), z(0)), z(1)), z(2)), z(3))),
+    (4, 2, 16, h(h(h(h(e(0), e(1)), z(1)), z(2)), z(3))),
+    (4, 3, 16, h(h(h(h(e(0), e(1)), h(e(2), z(0))), z(2)), z(3))),
+    (4, 4, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), z(2)), z(3))),
+    (4, 5, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), z(0)), z(1))), z(3))),
+    (4, 6, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(z(0), z(0)))), z(3))),
+    (4, 7, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), z(0)))), z(3))),
+    (4, 8, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), e(7)))), z(3))),
+    (4, 9, 16,
+     h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), e(7)))), h(h(h(e(8), z(0)), z(1)), z(2)))),
+]
+
+
+@pytest.mark.parametrize(
+    'depth,count,pow2,value',
+    cases,
+)
+def test_merkleize_chunks_and_get_merkle_root(depth, count, pow2, value):
+    chunks = [e(i) for i in range(count)]
+    assert merkleize_chunks(chunks, pad_to=pow2) == value
+    assert get_merkle_root(chunks, pad_to=pow2) == value
diff --git a/test_libs/pyspec/requirements-testing.txt b/test_libs/pyspec/requirements-testing.txt
index edd1416504..b5229ae20f 100644
--- a/test_libs/pyspec/requirements-testing.txt
+++ b/test_libs/pyspec/requirements-testing.txt
@@ -1,5 +1,7 @@
 -r requirements.txt
-pytest>=3.6,<3.7
+pytest>=4.4
 ../config_helpers
 flake8==3.7.7
+mypy==0.701
 pytest-cov
+pytest-xdist
diff --git a/test_libs/pyspec/requirements.txt b/test_libs/pyspec/requirements.txt
index 2de2aa84bb..83197af9c8 100644
--- a/test_libs/pyspec/requirements.txt
+++ b/test_libs/pyspec/requirements.txt
@@ -2,5 +2,5 @@ eth-utils>=1.3.0,<2
 eth-typing>=2.1.0,<3.0.0
 pycryptodome==3.7.3
 py_ecc>=1.6.0
-typing_inspect==0.4.0
+dataclasses==0.6
 ssz==0.1.0a10
diff --git a/test_libs/pyspec/setup.py b/test_libs/pyspec/setup.py
index 3856640abf..d8d54eab74 100644
--- a/test_libs/pyspec/setup.py
+++ b/test_libs/pyspec/setup.py
@@ -9,7 +9,7 @@
         "eth-typing>=2.1.0,<3.0.0",
         "pycryptodome==3.7.3",
         "py_ecc>=1.6.0",
-        "typing_inspect==0.4.0",
-        "ssz==0.1.0a10"
+        "ssz==0.1.0a10",
+        "dataclasses==0.6",
     ]
 )