forked from stellar/stellar-core
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathstellar-core_example.cfg
670 lines (581 loc) · 28.2 KB
/
stellar-core_example.cfg
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
#
# This file gives details of the various configuration parameters you can set
# when running stellar-core. You will need to edit to fit your own set up.
#
# This is a TOML file. See https://github.com/toml-lang/toml for syntax.
###########################
## General admin settings
# LOG_FILE_PATH (string) default "stellar-core-{datetime:%Y-%m-%d_%H-%M-%S}.log"
# Path to the file you want stellar-core to write its log to.
# You can set to "" for no log file.
LOG_FILE_PATH="stellar-core-{datetime:%Y-%m-%d_%H-%M-%S}.log"
# LOG_COLOR (boolean) default false
# Whether to highlight stdout log messages with ANSI terminal colors.
LOG_COLOR=false
# BUCKET_DIR_PATH (string) default "buckets"
# Specifies the directory where stellar-core should store the bucket list.
# This will get written to a lot and will grow as the size of the ledger grows.
BUCKET_DIR_PATH="buckets"
# DATABASE (string) default "sqlite3://:memory:"
# Sets the DB connection string for SOCI.
# Defaults to an in memory database.
# If using sqlite, a string like:
#
# "sqlite3://path/to/dbname.db"
#
# alternatively, if using postgresql, a string like:
#
# "postgresql://dbname=stellar user=xxxx password=yyyy host=10.0.x.y"
#
# taking any combination of parameters from:
#
# http://www.postgresql.org/docs/devel/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
#
DATABASE="sqlite3://stellar.db"
# Data layer cache configuration
# - ENTRY_CACHE_SIZE controls the maximum number of LedgerEntry objects
# that will be stored in the cache (default 4096)
# - PREFETCH_BATCH_SIZE determines batch size for bulk loads used for
# prefetching
ENTRY_CACHE_SIZE=100000
PREFETCH_BATCH_SIZE=1000
# HTTP_PORT (integer) default 11626
# What port stellar-core listens for commands on.
# If set to 0, disable HTTP interface entirely
HTTP_PORT=11626
# PUBLIC_HTTP_PORT (true or false) default false
# If false you only accept stellar commands from localhost.
# Do not set to true and expose the port to the open internet. This will allow
# random people to run stellar commands on your server. (such as `stop`)
PUBLIC_HTTP_PORT=false
# Maximum number of simultaneous HTTP clients
HTTP_MAX_CLIENT=128
# COMMANDS (list of strings) default is empty
# List of commands to run on startup.
# Right now only setting log levels really makes sense.
COMMANDS=[
"ll?level=info&partition=Herder"
]
# convenience mapping of common names to node IDs. The common names can be used
# in the .cfg. `$common_name`. If set, they will also appear in your logs
# instead of the less friendly nodeID.
NODE_NAMES=[
"GA22N4YGO7IJDRF2SISA5KHULGYYKDXBQGYIWUVNMSNHF5G2DNBKP3M5 eliza",
"GCDENOCHA6TQL6DFC4FS54HIH7RP7XR7VZCQZFANMGLT2WXJ7D7KGV2P hal9000"
]
###########################
## Configure which network this instance should talk to
NETWORK_PASSPHRASE="Public Global Stellar Network ; September 2015"
###########################
## Overlay configuration
# PEER_PORT (Integer) defaults to 11625
# The port other instances of stellar-core can connect to you on.
PEER_PORT=11625
# TARGET_PEER_CONNECTIONS (Integer) default 8
# This controls how aggressively the server will connect to other peers.
# It will send outbound connection attempts until it is at this
# number of outbound peer connections.
TARGET_PEER_CONNECTIONS=8
# MAX_ADDITIONAL_PEER_CONNECTIONS (Integer) default -1
# Numbers of peers allowed to make inbound connection to this instance
# Setting this too low will result in peers stranded out of the network
# -1: use TARGET_PEER_CONNECTIONS*8 as value for this field
MAX_ADDITIONAL_PEER_CONNECTIONS=-1
# MAX_PENDING_CONNECTIONS (Integer) default 500
# Maximum number of pending (non authenticated) connections to this server.
# This value is split between inbound and oubound connections in the same
# proportion as MAX_ADDITIONAL_PEER_CONNECTIONS is to TARGET_PEER_CONNECTIONS.
# This value may be additionally capped by OS limits of open connections.
# Additionally, 2 more inbound connections are allowed if coming from
# preferred peers.
MAX_PENDING_CONNECTIONS=500
# PEER_AUTHENTICATION_TIMEOUT (Integer) default 2
# This server will drop peer that does not authenticate itself during that
# time.
PEER_AUTHENTICATION_TIMEOUT=2
# PEER_TIMEOUT (Integer) default 30
# This server will drop peer that does not send or receive anything during that
# time when authenticated.
PEER_TIMEOUT=30
# PEER_STRAGGLER_TIMEOUT (Integer) default 120
# This server will drop peer that does not drain its outgoing queue during that
# time when authenticated.
PEER_STRAGGLER_TIMEOUT=120
# MAX_BATCH_WRITE_COUNT (Integer) default 1024
# How many messages can this server send at once to a peer
MAX_BATCH_WRITE_COUNT=1024
# MAX_BATCH_WRITE_BYTES (Integer) default 1048576 (1 Megabyte)
# How many bytes can this server send at once to a peer
MAX_BATCH_WRITE_BYTES=1048576
# FLOOD_OP_RATE_PER_LEDGER (Floating point) default 1.0
# Used to derive how many operations get flooded per ledger
# FLOOD_OP_RATE_PER_LEDGER*<maximum number of operations per ledger>
FLOOD_OP_RATE_PER_LEDGER = 1.0
# FLOOD_TX_PERIOD_MS (Integer) default 200
# Time in milliseconds between transaction flood events
# Transaction flooding is delayed and governed by
# FLOOD_OP_RATE_PER_LEDGER so that the target rate is met on
# a per ledger basis
FLOOD_TX_PERIOD_MS=200
# FLOOD_ARB_BASE_ALLOWANCE (Integer) default 5
# Number of cyclical path-payments (arbitrage attempts) to flood per
# asset pair, per flood period, before appplying damping function.
# Set to -1 to disable traffic damping on arbitrage traffic.
FLOOD_ARB_TX_BASE_ALLOWANCE = 5
# FLOOD_ARB_TX_DAMPING_FACTOR (floating point) default 0.8
# Parameter > 0.0 and <= 1.0 that controls intensity of geometric
# damping of cyclical path-payments (arbitrage attempts). Higher
# numbers make for more forceful damping.
FLOOD_ARB_TX_DAMPING_FACTOR = 0.8
# PREFERRED_PEERS (list of strings) default is empty
# These are IP:port strings that this server will add to its DB of peers.
# This server will try to always stay connected to the other peers on this list.
PREFERRED_PEERS=["127.0.0.1:7000","127.0.0.1:8000"]
# PREFERRED_PEER_KEYS (list of strings) default is empty
# These are public key identities that this server will treat as preferred
# when connecting, similar to the PREFERRED_PEERS list.
# can use a name already defined in the .cfg
PREFERRED_PEER_KEYS=[
"GBKXI3TVIFHD6QDSNMUOTJFDWHDYDVRRPWIHN4IM2YFXIUEWDTY7DSSI",
"GBDOAYUPGQCPLJCP2HYJQ4W3ADODJFZISHRBQTQB7SFVR4BRUX46RYIP optional_common_name",
"$eliza"]
# PREFERRED_PEERS_ONLY (boolean) default is false
# When set to true, this peer will only connect to PREFERRED_PEERS and will
# only accept connections from PREFERRED_PEERS or PREFERRED_PEER_KEYS
PREFERRED_PEERS_ONLY=false
# SURVEYOR_KEYS (list of strings) default is empty
# These are public key identities. If empty, this node will relay/respond to survey messages
# originating from a node in this nodes transitive quorum. If this list is NOT empty,
# this node will only relay/respond to messages that originate from nodes in this list
# can use a name already defined in the .cfg
SURVEYOR_KEYS=[
"GBKXI3TVIFHD6QDSNMUOTJFDWHDYDVRRPWIHN4IM2YFXIUEWDTY7DSSI",
"$eliza"]
# Percentage, between 0 and 100, of system activity (measured in terms
# of both event-loop cycles and database time) below-which the system
# will consider itself "loaded" and attempt to shed load. Set this
# number low and the system will be tolerant of overloading. Set it
# high and the system will be intolerant. By default it is 0, meaning
# totally insensitive to overloading.
MINIMUM_IDLE_PERCENT=0
# KNOWN_PEERS (list of strings) default is empty
# These are IP:port strings that this server will add to its DB of peers.
# It will try to connect to these when it is below TARGET_PEER_CONNECTIONS.
KNOWN_PEERS=[
"core-testnet1.stellar.org",
"core-testnet2.stellar.org",
"core-testnet3.stellar.org"]
# KNOWN_CURSORS (list of strings) default is empty
# Set of cursors added at each startup with value '1'.
KNOWN_CURSORS=["HORIZON"]
#######################
## SCP settings
# NODE_SEED (string) default random, regenerated each run.
# The seed used for generating the public key this node will
# be identified with in SCP.
# Your seed should be unique. Protect your seed. Treat it like a password.
# If you don't set a NODE_SEED one will be generated for you randomly
# on each startup.
#
# To generate a new, stable seed (and associated public key), run:
#
# stellar-core gen-seed
#
# You only need to keep the seed from this; you can always recover the
# public key from the seed by running:
#
# stellar-core convert-id <seed>
#
# This example also adds a common name to NODE_NAMES list named `self` with the
# public key associated to this seed
NODE_SEED="SBI3CZU7XZEWVXU7OZLW5MMUQAP334JFOPXSLTPOH43IRTEQ2QYXU5RG self"
# NODE_IS_VALIDATOR (boolean) default false.
# Only nodes that want to participate in SCP should set NODE_IS_VALIDATOR=true.
# Most instances should operate in observer mode with NODE_IS_VALIDATOR=false.
# See QUORUM_SET below.
NODE_IS_VALIDATOR=false
# NODE_HOME_DOMAIN (string) default empty.
# HOME_DOMAIN for this validator
# Required when NODE_IS_VALIDATOR=true
# When set, this validator will be grouped with other validators with the
# same HOME_DOMAIN (as defined in VALIDATORS/HOME_DOMAINS)
NODE_HOME_DOMAIN=""
###########################
# Consensus settings
# FAILURE_SAFETY (integer) default -1
# Most people should leave this to -1
# This is the maximum number of validator failures from your QUORUM_SET that
# you want to be able to tolerate.
# Typically, you will need at least 3f+1 nodes in your quorum set.
# If you don't have enough nodes in your quorum set to tolerate the level you
# set here stellar-core won't run as a precaution.
# A value of -1 indicates to use (n-1)/3 (n being the number of nodes
# and groups from the top level of your QUORUM_SET)
# A value of 0 is only allowed if UNSAFE_QUORUM is set
# Note: The value of 1 below is the maximum number derived from the value of
# QUORUM_SET in this configuration file
FAILURE_SAFETY=-1
# UNSAFE_QUORUM (true or false) default false
# Most people should leave this to false.
# If set to true allows to specify a potentially unsafe quorum set.
# Otherwise it won't start if
# a threshold % is set too low (threshold below 66% for the top level,
# 51% for other levels)
# FAILURE_SAFETY at 0 or above the number of failures that can occur
# You might want to set this if you are running your own network and
# aren't concerned with byzantine failures or if you fully understand how the
# quorum sets of other nodes relate to yours when it comes to
# quorum intersection.
UNSAFE_QUORUM=false
#########################
## History
# CATCHUP_COMPLETE (true or false) defaults to false
# if true will catchup to the network "completely" (replaying all history)
# if false will look for CATCHUP_RECENT for catchup settings
CATCHUP_COMPLETE=false
# CATCHUP_RECENT (integer) default to 0
# if CATCHUP_COMPLETE is true this option is ignored
# if set to 0 will catchup "minimally", using deltas to the most recent
# snapshot.
# if set to any other number, will catchup "minimally" to some past snapshot,
# then will replay history from that point to current snapshot, ensuring that
# at least CATCHUP_RECENT number of ledger entries will be present in database
# if "some past snapshot" is already present in database, it just replays all
# new history
CATCHUP_RECENT=0
# WORKER_THREADS (integer) default 11
# Number of threads available for doing long durations jobs, like bucket
# merging and vertification.
WORKER_THREADS=11
# QUORUM_INTERSECTION_CHECKER (boolean) default true
# Enable/disable computation of quorum intersection monitoring
QUORUM_INTERSECTION_CHECKER=true
# MAX_CONCURRENT_SUBPROCESSES (integer) default 16
# History catchup can potentially spawn a bunch of sub-processes.
# This limits the number that will be active at a time.
MAX_CONCURRENT_SUBPROCESSES=16
# AUTOMATIC_MAINTENANCE_PERIOD (integer, seconds) default 359
# Interval between automatic maintenance executions
# Set to 0 to disable automatic maintenance
AUTOMATIC_MAINTENANCE_PERIOD=359
# AUTOMATIC_MAINTENANCE_COUNT (integer) default 400
# Number of unneeded ledgers in each table that will be removed during one
# maintenance run.
# NB: make sure that enough ledgers are deleted as to offset the growth of
# data accumulated by closing ledgers (catchup and normal operation)
# Set to 0 to disable automatic maintenance
AUTOMATIC_MAINTENANCE_COUNT=400
# AUTOMATIC_SELF_CHECK_PERIOD (integer, seconds) default 10800
# Interval between automatic self-checks, including connectivity
# and consistency checking against configured history archives.
# Set to zero to disable automatic self-checks.
AUTOMATIC_SELF_CHECK_PERIOD=10800
###############################
## The following options should probably never be set. They are used primarily
## for testing.
# RUN_STANDALONE (true or false) defaults to false
# This is a mode for testing. It prevents you from trying to connect
# to other peers
RUN_STANDALONE=false
# INVARIANT_CHECKS (list of strings) default is empty
# Setting this will cause specified invariants to be checked on ledger close and
# on bucket apply.
# Strings specified are matched (as regex) against the list of invariants.
# For example, to enable all invariants use ".*"
# List of invariants:
# - "AccountSubEntriesCountIsValid"
# Setting this will cause additional work on each operation apply - it
# checks if the change in the number of subentries of account (signers +
# offers + data + trustlines) equals the change in the value numsubentries
# store in account. This check is only performed for accounts modified in
# any way in given ledger.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
# - "BucketListIsConsistentWithDatabase"
# Setting this will cause additional work on each bucket apply - it checks a
# variety of properties that should be satisfied by an applied bucket, for
# detailed information about what is checked see the comment in the header
# invariant/BucketListIsConsistentWithDatabase.h.
# The overhead may cause a system to catch-up more than once before being
# in sync with the network.
# - "CacheIsConsistentWithDatabase"
# Setting this will cause additional work on each operation apply - it
# checks if internal cache of ledger entries is consistent with content of
# database. It is equivalent to PARANOID_MODE from older versions of
# stellar-core.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
# - "ConservationOfLumens"
# Setting this will cause additional work on each operation apply - it
# checks that the total number of lumens only changes during inflation.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
# - "LedgerEntryIsValid"
# Setting this will cause additional work on each operation apply - it
# checks a variety of properties that must be true for a LedgerEntry to be
# valid.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
# - "LiabilitiesMatchOffers"
# Setting this will cause additional work on each operation apply - it
# checks that accounts, trust lines, and offers satisfy all constraints
# associated with liabilities. For additional information, see the comment
# in the header invariant/LiabilitiesMatchOffers.h.
# The overhead may cause slower systems to not perform as fast as the rest
# of the network, caution is advised when using this.
INVARIANT_CHECKS = []
# MANUAL_CLOSE (true or false) defaults to false
# Mode for testing. Ledger will only close when stellar-core gets
# the `manualclose` command
MANUAL_CLOSE=false
# ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING (true or false) defaults to false
# Enables synthetic load generation on demand.
# The load is triggered by the `generateload` runtime command.
# This option only exists for stress-testing and should not be enabled in
# production networks.
ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING=false
# ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING (true or false) defaults to false
# Reduces ledger close time to 1s and checkpoint frequency to every 8 ledgers.
# Do not ever set this in production, as it will make your history archives
# incompatible with those of anyone else.
ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING=false
# ARTIFICIALLY_SET_CLOSE_TIME_FOR_TESTING (in seconds), defaults to no override
# Overrides the close time to the specified value but does not change checkpoint
# frequency - this may cause network instability.
# Do not use in production.
ARTIFICIALLY_SET_CLOSE_TIME_FOR_TESTING=0
# ALLOW_LOCALHOST_FOR_TESTING defaults to false
# Allows to connect to localhost, should not be enabled on production systems
# as this is a security threat.
ALLOW_LOCALHOST_FOR_TESTING=false
# CATCHUP_WAIT_MERGES_TX_APPLY_FOR_TESTING defaults to false
# When set, during catchup, waits for bucket merges to complete
# before applying transactions.
CATCHUP_WAIT_MERGES_TX_APPLY_FOR_TESTING=false
# MAXIMUM_LEDGER_CLOSETIME_DRIFT (in seconds) defaults to 50
# Maximum drift between the local clock and the network time.
# When joining the network for the first time, ignore SCP messages that are
# unlikely to be for the latest ledger.
MAXIMUM_LEDGER_CLOSETIME_DRIFT=50
# DISABLE_XDR_FSYNC (true or false) defaults to false.
# If set to true, writing an XDR file (a bucket or a checkpoint) will not
# be followed by an fsync on the file. This in turn means that XDR files
# (which hold the canonical state of the ledger) may be corrupted if the
# operating system suddenly crashes or loses power, causing the node to
# diverge and get stuck on restart, or potentially even publish bad
# history. This option only exists as an escape hatch if the local
# filesystem is so unusably slow that you prefer operating without
# durability guarantees. Do not set it to true unless you're very certain
# you want to make that trade.
DISABLE_XDR_FSYNC=false
# MAX_SLOTS_TO_REMEMBER (in ledgers) defaults to 12
# Most people should leave this to 12
# Number of most recent ledgers keep in memory. Storing more ledgers allows other
# nodes to join the network without catching up. This is useful for simulation
# testing purposes.
MAX_SLOTS_TO_REMEMBER=12
# METADATA_OUTPUT_STREAM defaults to "", disabling it.
# A string specifying a stream to write fine-grained metadata to for each ledger
# close while running. This will be opened at startup and synchronously
# streamed-to during both catchup and live ledger-closing.
#
# Streams may be specified either as a pathname (typically a named FIFO on POSIX
# or a named pipe on Windows, though plain files also work) or a string of the
# form "fd:N" for some integer N which, on POSIX, specifies the existing open
# file descriptor N inherited by the process (for example to write to an
# anonymous pipe).
#
# As a further safety check, this option is mutually exclusive with
# NODE_IS_VALIDATOR, as its typical use writing to a pipe with a reader process
# on the other end introduces a potentially-unbounded synchronous delay in
# closing a ledger, and should not be used on a node participating in consensus,
# only a passive "watcher" node.
METADATA_OUTPUT_STREAM=""
# Setting EXPERIMENTAL_PRECAUTION_DELAY_META to true causes a stateless node
# which is streaming meta to delay streaming the meta for a given ledger until
# it closes the next ledger. This ensures that if a local bug had corrupted the
# given ledger, then the meta for the corrupted ledger will never be emitted, as
# the node will not be able to reach consensus with the network on the next
# ledger.
#
# Setting EXPERIMENTAL_PRECAUTION_DELAY_META to true in combination with a
# non-empty METADATA_OUTPUT_STREAM (which can be configured on the command line
# as well as in the config file) requires an in-memory database (specified by
# using --in-memory on the command line).
EXPERIMENTAL_PRECAUTION_DELAY_META=false
# Number of ledgers worth of transaction metadata to preserve on disk for
# debugging purposes. These records are automatically maintained and rotated
# during processing, and are helpful for recovery in case of a serious error;
# they should only be reduced or disabled if disk space is at a premium.
METADATA_DEBUG_LEDGERS=0
# EXCLUDE_TRANSACTIONS_CONTAINING_OPERATION_TYPE (list of strings) default is empty
# Setting this will cause the node to reject transactions that it receives if
# they contain any operation in this list. It will not, however, stop the node
# from voting for or applying such transactions.
#
# Strings specified are matched against the names of values for OperationType,
# such as "CREATE_ACCOUNT" or "PATH_PAYMENT_STRICT_SEND".
EXCLUDE_TRANSACTIONS_CONTAINING_OPERATION_TYPE=[]
#####################
## Tables must come at the end. (TOML you are almost perfect!)
# HOME_DOMAINS
# list of properties for home domains
# HOME_DOMAIN: string (required) home domain identifier
# QUALITY: string (required) quality of all validators in HOME_DOMAIN
# CRITICAL, HIGH, MEDIUM, LOW
# HIGH quality validators must have archives and redundancy
# CRITICAL quality validators must have archive and redundancy like HIGH,
# but also enforces that this HOME_DOMAIN must be included to achieve consensus
#
[[HOME_DOMAINS]]
HOME_DOMAIN="testnet.stellar.org"
QUALITY="HIGH"
# [[HOME_DOMAINS]]
# HOME_DOMAIN="some-other-domain"
# QUALITY="LOW"
# VALIDATORS
# List of validators used to automatically generate quorum sets
#
# NB: you need to either depend on exactly one entity OR
# have at least 4 entities to have a "safe" configuration
# see below rules for detail.
#
# The quorum set is generated using the following rules:
# validators with the same home domain (representing an entity) are automatically
# grouped together; the threshold used assumes a simple majority (2f+1)
# entities are grouped by QUALITY
# groups are nested such that the group for the quality that precedes a given group
# is added as a backup for the higher quality group.
# ie: at top level group contains HIGH quality entities and the group that
# contains MEDIUM quality entities
# heterogeneous groups use a threshold assuming byzantine failure (3f+1)
#
#
# Individual validators can be added in standard form
# NAME: string (required) unique identifier to use to identify a validator
# NAME is added as an alias for PUBLIC_KEY
# QUALITY: string (required*) quality of validator
# all validators must have a quality, either directly (as set by this property)
# or indirectly via HOME_DOMAINS (see HOME_DOMAINS for more detail on QUALITY)
# HOME_DOMAIN: string (required) home domain for validator
# PUBLIC_KEY: string (required) public key associated with a validator
# ADDRESS: string (optional) peer:port associated with a validator
# ADDRESS will be added to the KNOWN_PEERS list
# HISTORY: string (optional) history archive GET command associated
# with a validator
# HISTORY will be added to the list of known archives that can be downloaded from
# Stellar Testnet validators
[[VALIDATORS]]
NAME="sdftest1"
HOME_DOMAIN="testnet.stellar.org"
PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y"
ADDRESS="core-testnet1.stellar.org"
HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}"
[[VALIDATORS]]
NAME="sdftest2"
HOME_DOMAIN="testnet.stellar.org"
PUBLIC_KEY="GCUCJTIYXSOXKBSNFGNFWW5MUQ54HKRPGJUTQFJ5RQXZXNOLNXYDHRAP"
ADDRESS="core-testnet2.stellar.org"
HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}"
[[VALIDATORS]]
NAME="sdftest3"
HOME_DOMAIN="testnet.stellar.org"
PUBLIC_KEY="GC2V2EFSXN6SQTWVYA5EPJPBWWIMSD2XQNKUOHGEKB535AQE2I6IXV2Z"
ADDRESS="core-testnet3.stellar.org"
HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}"
# HISTORY
# Used to specify where to fetch and store the history archives.
# Fetching and storing history is kept as general as possible.
# Any place you can save and load static files from should be usable by the
# stellar-core history system. s3, the file system, http, etc
# stellar-core will call any external process you specify and will pass it the
# name of the file to save or load.
# Simply use template parameters `{0}` and `{1}` in place of the files being transmitted or retrieved.
# You can specify multiple places to store and fetch from. stellar-core will
# use multiple fetching locations as backup in case there is a failure fetching from one.
#
# Note: any archive you *put* to you must run `$ stellar-core new-hist <historyarchive>`
# once before you start.
# for example this config you would run: $ stellar-core new-hist local
# this creates a `local` archive on the local drive
# NB: this is an example, in general you should probably not do this as
# archives grow indefinitely
[HISTORY.local]
get="cp /var/lib/stellar-core/history/vs/{0} {1}"
put="cp {0} /var/lib/stellar-core/history/vs/{1}"
mkdir="mkdir -p /var/lib/stellar-core/history/vs/{0}"
# other examples:
# [HISTORY.stellar]
# get="curl http://history.stellar.org/{0} -o {1}"
# put="aws s3 cp {0} s3://history.stellar.org/{1}"
# [HISTORY.backup]
# get="curl http://backupstore.blob.core.windows.net/backupstore/{0} -o {1}"
# put="azure storage blob upload {0} backupstore {1}"
#The history store of the Stellar testnet
#[HISTORY.h1]
#get="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}"
#[HISTORY.h2]
#get="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}"
#[HISTORY.h3]
#get="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}"
# QUORUM_SET (optional)
# This is how you specify this server's quorum set manually
#
# *** this section is for advanced users and exists mostly for historical reasons ***
# the preferred way to configure your quorum set is to use instead [[VALIDATORS]]
#
# It can be nested up to 2 levels: {A,B,C,{D,E,F},{G,H,{I,J,K,L}}}
# THRESHOLD_PERCENT is how many have to agree (1-100%) within a given set.
# Each set is treated as one vote.
# So for example in the above there are 5 things that can vote:
# individual validators: A,B,C, and the sets {D,E,F} and {G,H with subset {I,J,K,L}}
# the sets each have their own threshold.
# For example with {100% G,H with subset (50% I,J,K,L}}
# means that quorum will be met with G, H and any 2 (50%) of {I, J, K, L}
#
# a [QUORUM_SET.path] section is constructed as
# THRESHOLD_PERCENT: how many have to agree, defaults to 67 (rounds up).
# VALIDATORS: array of node IDs
# additional subsets [QUORUM_SET.path.item_number]
# a QUORUM_SET
# must not contain duplicate entries {{A,B},{A,C}} is invalid for example
# The key for "self" is implicitly added at the top level, so the effective
# quorum set is [t:2, self, QUORUM_SET]. Note that "self" is always agreeing
# with the instance (if QUORUM_SET includes it)
#
# The following setup is equivalent to the example given above.
#
# Note on naming: you can add common names to the NAMED_NODES list here as
# shown in the first 3 validators or use common names that have been
# previously defined.
# [QUORUM_SET]
# THRESHOLD_PERCENT=66
# VALIDATORS=[
# "GDQWITFJLZ5HT6JCOXYEVV5VFD6FTLAKJAUDKHAV3HKYGVJWA2DPYSQV A_from_above",
# "GANLKVE4WOTE75MJS6FQ73CL65TSPYYMFZKC4VDEZ45LGQRCATGAIGIA B_from_above",
# "GDV46EIEF57TDL4W27UFDAUVPDDCKJNVBYB3WIV2WYUYUG753FCFU6EJ C_from_above"
# ]
#
# [QUORUM_SET.1]
# THRESHOLD_PERCENT=67
# VALIDATORS=[
# "$self", # 'D' from above is this node
# "GDXJAZZJ3H5MJGR6PDQX3JHRREAVYNCVM7FJYGLZJKEHQV2ZXEUO5SX2 E_from_above",
# "GB6GK3WWTZYY2JXWM6C5LRKLQ2X7INQ7IYTSECCG3SMZFYOZNEZR4SO5 F_from_above"
# ]
#
# [QUORUM_SET.2]
# THRESHOLD_PERCENT=100
# VALIDATORS=[
# "GCTAIXWDDBM3HBDHGSAOLY223QZHPS2EDROF7YUBB3GNYXLOCPV5PXUK G_from_above",
# "GCJ6UBAOXNQFN3HGLCVQBWGEZO6IABSMNE2OCQC4FJAZXJA5AIE7WSPW H_from_above"
# ]
#
# [QUORUM_SET.2.1]
# THRESHOLD_PERCENT=50
# VALIDATORS=[
# "GC4X65TQJVI3OWAS4DTA2EN2VNZ5ZRJD646H5WKEJHO5ZHURDRAX2OTH I_from_above",
# "GAXSWUO4RBELRQT5WMDLIKTRIKC722GGXX2GIGEYQZDQDLOTINQ4DX6F J_from_above",
# "GAWOEMG7DQDWHCFDTPJEBYWRKUUZTX2M2HLMNABM42G7C7IAPU54GL6X K_from_above",
# "GDZAJNUUDJFKTZX3YWZSOAS4S4NGCJ5RQAY7JPYBG5CUFL3JZ5C3ECOH L_from_above"
# ]