-
Notifications
You must be signed in to change notification settings - Fork 74
/
runtime_service.rs
2093 lines (1876 loc) · 88.3 KB
/
runtime_service.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Smoldot
// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//! Background runtime download service.
//!
//! This service plugs on top of a [`sync_service`], listens for new best blocks and checks
//! whether the runtime has changed in any way. Its objective is to always provide an up-to-date
//! [`executor::host::HostVmPrototype`] ready to be called by other services.
//!
//! # Usage
//!
//! The runtime service lets user subscribe to block updates, similar to the [`sync_service`].
//! These subscriptions are implemented by subscribing to the underlying [`sync_service`] and,
//! for each notification, checking whether the runtime has changed (thanks to the presence or
//! absence of a header digest item), and downloading the runtime code if necessary. Therefore,
//! these notifications might come with a delay compared to directly using the [`sync_service`].
//!
//! If it isn't possible to download the runtime code of a block (for example because peers refuse
//! to answer or have already pruned the block) or if the runtime service already has too many
//! pending downloads, this block is simply not reported on the subscriptions. The download will
//! be repeatedly tried until it succeeds.
//!
//! Consequently, you are strongly encouraged to not use both the [`sync_service`] *and* the
//! [`RuntimeService`] of the same chain. They each provide a consistent view of the chain, but
//! this view isn't necessarily the same on both services.
//!
//! The main service offered by the runtime service is [`RuntimeService::subscribe_all`], that
//! notifies about new blocks once their runtime is known.
//!
//! # Blocks pinning
//!
//! Blocks that are reported through [`RuntimeService::subscribe_all`] are automatically *pinned*.
//! If multiple subscriptions exist, each block is pinned once per subscription.
//!
//! As long as a block is pinned, the [`RuntimeService`] is guaranteed to keep in its internal
//! state the runtime of this block and its properties.
//!
//! Blocks must be manually unpinned by calling [`Subscription::unpin_block`].
//! Failing to do so is effectively a memory leak. If the number of pinned blocks becomes too
//! large, the subscription is force-killed by the [`RuntimeService`].
//!
use crate::{network_service, platform::Platform, sync_service};
use alloc::{
boxed::Box,
collections::BTreeMap,
format,
string::{String, ToString as _},
sync::{Arc, Weak},
vec,
vec::Vec,
};
use core::{
iter, mem,
num::{NonZeroU32, NonZeroUsize},
pin::Pin,
time::Duration,
};
use futures::{
channel::mpsc,
lock::{Mutex, MutexGuard},
prelude::*,
};
use itertools::Itertools as _;
use smoldot::{
chain::async_tree,
executor, header,
informant::{BytesDisplay, HashDisplay},
network::protocol,
trie::{self, proof_verify},
};
/// Configuration for a runtime service.
pub struct Config<TPlat: Platform> {
/// Name of the chain, for logging purposes.
///
/// > **Note**: This name will be directly printed out. Any special character should already
/// > have been filtered out from this name.
pub log_name: String,
/// Closure that spawns background tasks.
pub tasks_executor: Box<dyn FnMut(String, future::BoxFuture<'static, ()>) + Send>,
/// Service responsible for synchronizing the chain.
pub sync_service: Arc<sync_service::SyncService<TPlat>>,
/// Header of the genesis block of the chain, in SCALE encoding.
pub genesis_block_scale_encoded_header: Vec<u8>,
}
/// Identifies a runtime currently pinned within a [`RuntimeService`].
#[derive(Clone)]
pub struct PinnedRuntimeId(Arc<Runtime>);
/// See [the module-level documentation](..).
pub struct RuntimeService<TPlat: Platform> {
/// See [`Config::sync_service`].
sync_service: Arc<sync_service::SyncService<TPlat>>,
/// Fields behind a `Mutex`. Should only be locked for short-lived operations.
guarded: Arc<Mutex<Guarded<TPlat>>>,
/// Handle to abort the background task.
background_task_abort: future::AbortHandle,
}
impl<TPlat: Platform> RuntimeService<TPlat> {
/// Initializes a new runtime service.
///
/// The future returned by this function is expected to finish relatively quickly and is
/// necessary only for locking purposes.
pub async fn new(mut config: Config<TPlat>) -> Self {
// Target to use for all the logs of this service.
let log_target = format!("runtime-{}", config.log_name);
let best_near_head_of_chain = config.sync_service.is_near_head_of_chain_heuristic().await;
let tree = {
let mut tree = async_tree::AsyncTree::new(async_tree::Config {
finalized_async_user_data: None,
retry_after_failed: Duration::from_secs(10),
blocks_capacity: 32,
});
let node_index = tree.input_insert_block(
Block {
hash: header::hash_from_scale_encoded_header(
&config.genesis_block_scale_encoded_header,
),
scale_encoded_header: config.genesis_block_scale_encoded_header,
},
None,
false,
true,
);
tree.input_finalize(node_index, node_index);
GuardedInner::FinalizedBlockRuntimeUnknown {
tree,
when_known: event_listener::Event::new(),
}
};
let guarded = Arc::new(Mutex::new(Guarded {
next_subscription_id: 0,
best_near_head_of_chain,
tree,
runtimes: slab::Slab::with_capacity(2),
}));
// Spawns a task that runs in the background and updates the content of the mutex.
let background_task_abort;
(config.tasks_executor)(log_target.clone(), {
let sync_service = config.sync_service.clone();
let guarded = guarded.clone();
let (abortable, abort) = future::abortable(async move {
run_background(log_target, sync_service, guarded).await;
});
background_task_abort = abort;
abortable.map(|_| ()).boxed()
});
RuntimeService {
sync_service: config.sync_service,
guarded,
background_task_abort,
}
}
/// Calls [`sync_service::SyncService::block_number_bytes`] on the sync service associated to
/// this runtime service.
pub fn block_number_bytes(&self) -> usize {
self.sync_service.block_number_bytes()
}
/// Subscribes to the state of the chain: the current state and the new blocks.
///
/// This function only returns once the runtime of the current finalized block is known. This
/// might take a long time.
///
/// A name must be passed to be used for debugging purposes. At the time of writing of this
/// comment, the `#[must_use]` attribute doesn't work on asynchronous functions, making a name
/// extremely useful. If `#[must_use]` ever works on asynchronous functions, this `name` might
/// be removed.
///
/// Only up to `buffer_size` block notifications are buffered in the channel. If the channel
/// is full when a new notification is attempted to be pushed, the channel gets closed.
///
/// A maximum number of finalized or non-canonical (i.e. not part of the finalized chain)
/// pinned blocks must be passed, indicating the maximum number of blocks that are finalized
/// or non-canonical that the runtime service will pin at the same time for this subscription.
/// If this maximum is reached, the channel will get closed. In situations where the subscriber
/// is guaranteed to always properly unpin blocks, a value of `usize::max_value()` can be
/// passed in order to ignore this maximum.
///
/// The channel also gets closed if a gap in the finality happens, such as after a Grandpa
/// warp syncing.
///
/// See [`SubscribeAll`] for information about the return value.
pub async fn subscribe_all(
&self,
subscription_name: &'static str,
buffer_size: usize,
max_pinned_blocks: NonZeroUsize,
) -> SubscribeAll<TPlat> {
// First, lock `guarded` and wait for the tree to be in `FinalizedBlockRuntimeKnown` mode.
// This can take a long time.
let mut guarded_lock = loop {
let guarded_lock = self.guarded.lock().await;
match &guarded_lock.tree {
GuardedInner::FinalizedBlockRuntimeKnown { .. } => break guarded_lock,
GuardedInner::FinalizedBlockRuntimeUnknown { when_known, .. } => {
let wait_fut = when_known.listen();
drop(guarded_lock);
wait_fut.await;
}
}
};
let mut guarded_lock = &mut *guarded_lock;
// Extract the components of the `FinalizedBlockRuntimeKnown`. We are guaranteed by the
// block above to be in this state.
let (tree, finalized_block, pinned_blocks, all_blocks_subscriptions) =
match &mut guarded_lock.tree {
GuardedInner::FinalizedBlockRuntimeKnown {
tree,
finalized_block,
pinned_blocks,
all_blocks_subscriptions,
} => (
tree,
finalized_block,
pinned_blocks,
all_blocks_subscriptions,
),
_ => unreachable!(),
};
let (tx, new_blocks_channel) = mpsc::channel(buffer_size);
let subscription_id = guarded_lock.next_subscription_id;
debug_assert_eq!(
pinned_blocks
.range((subscription_id, [0; 32])..=(subscription_id, [0xff; 32]))
.count(),
0
);
guarded_lock.next_subscription_id += 1;
let decoded_finalized_block = header::decode(
&finalized_block.scale_encoded_header,
self.sync_service.block_number_bytes(),
)
.unwrap();
let _prev_value = pinned_blocks.insert(
(subscription_id, finalized_block.hash),
PinnedBlock {
runtime: tree.finalized_async_user_data().clone(),
state_trie_root_hash: *decoded_finalized_block.state_root,
block_number: decoded_finalized_block.number,
block_ignores_limit: false,
},
);
debug_assert!(_prev_value.is_none());
let mut non_finalized_blocks_ancestry_order =
Vec::with_capacity(tree.num_input_non_finalized_blocks());
for block in tree.input_iter_ancestry_order() {
let runtime = match block.async_op_user_data {
Some(rt) => rt.clone(),
None => continue, // Runtime of that block not known yet, so it shouldn't be reported.
};
let block_hash = block.user_data.hash;
let parent_runtime = tree
.parent(block.id)
.map_or(tree.finalized_async_user_data().clone(), |parent_idx| {
tree.block_async_user_data(parent_idx).unwrap().clone()
});
let parent_hash = *header::decode(
&block.user_data.scale_encoded_header,
self.sync_service.block_number_bytes(),
)
.unwrap()
.parent_hash; // TODO: correct? if yes, document
debug_assert!(
parent_hash == finalized_block.hash
|| tree
.input_iter_ancestry_order()
.any(|b| parent_hash == b.user_data.hash && b.async_op_user_data.is_some())
);
let decoded_header = header::decode(
&block.user_data.scale_encoded_header,
self.sync_service.block_number_bytes(),
)
.unwrap();
let _prev_value = pinned_blocks.insert(
(subscription_id, block_hash),
PinnedBlock {
runtime: runtime.clone(),
state_trie_root_hash: *decoded_header.state_root,
block_number: decoded_header.number,
block_ignores_limit: true,
},
);
debug_assert!(_prev_value.is_none());
non_finalized_blocks_ancestry_order.push(BlockNotification {
is_new_best: block.is_output_best,
parent_hash,
scale_encoded_header: block.user_data.scale_encoded_header.clone(),
new_runtime: if !Arc::ptr_eq(&runtime, &parent_runtime) {
Some(
runtime
.runtime
.as_ref()
.map(|rt| rt.runtime_spec.clone())
.map_err(|err| err.clone()),
)
} else {
None
},
});
}
debug_assert!(matches!(
non_finalized_blocks_ancestry_order
.iter()
.filter(|b| b.is_new_best)
.count(),
0 | 1
));
all_blocks_subscriptions.insert(
subscription_id,
(subscription_name, tx, max_pinned_blocks.get() - 1),
);
SubscribeAll {
finalized_block_scale_encoded_header: finalized_block.scale_encoded_header.clone(),
finalized_block_runtime: tree
.finalized_async_user_data()
.runtime
.as_ref()
.map(|rt| rt.runtime_spec.clone())
.map_err(|err| err.clone()),
non_finalized_blocks_ancestry_order,
new_blocks: Subscription {
subscription_id,
channel: new_blocks_channel,
guarded: self.guarded.clone(),
},
}
}
/// Unpins a block after it has been reported by a subscription.
///
/// Has no effect if the [`SubscriptionId`] is not or no longer valid (as the runtime service
/// can kill any subscription at any moment).
///
/// # Panic
///
/// Panics if the block hash has not been reported or has already been unpinned.
///
#[track_caller]
pub async fn unpin_block(&self, subscription_id: SubscriptionId, block_hash: &[u8; 32]) {
Self::unpin_block_inner(&self.guarded, subscription_id, block_hash).await
}
#[track_caller]
async fn unpin_block_inner(
guarded: &Arc<Mutex<Guarded<TPlat>>>,
subscription_id: SubscriptionId,
block_hash: &[u8; 32],
) {
let mut guarded_lock = guarded.lock().await;
let guarded_lock = &mut *guarded_lock;
if let GuardedInner::FinalizedBlockRuntimeKnown {
all_blocks_subscriptions,
pinned_blocks,
..
} = &mut guarded_lock.tree
{
let block_ignores_limit = match pinned_blocks.remove(&(subscription_id.0, *block_hash))
{
Some(b) => b.block_ignores_limit,
None => {
// Cold path.
if let Some((sub_name, _, _)) = all_blocks_subscriptions.get(&subscription_id.0)
{
panic!("block already unpinned for {} subscription", sub_name);
} else {
return;
}
}
};
guarded_lock.runtimes.retain(|_, rt| rt.strong_count() > 0);
if !block_ignores_limit {
let (_name, _, finalized_pinned_remaining) = all_blocks_subscriptions
.get_mut(&subscription_id.0)
.unwrap();
*finalized_pinned_remaining += 1;
}
}
}
/// Lock the runtime service and prepare a call to a runtime entry point.
///
/// The hash of the block passed as parameter corresponds to the block whose runtime to use
/// to make the call. The block must be currently pinned in the context of the provided
/// [`SubscriptionId`].
///
/// Returns an error if the subscription is stale, meaning that it has been reset by the
/// runtime service.
///
/// # Panic
///
/// Panics if the given block isn't currently pinned by the given subscription.
///
pub async fn pinned_block_runtime_lock<'a>(
&'a self,
subscription_id: SubscriptionId,
block_hash: &[u8; 32],
) -> Result<RuntimeLock<'a, TPlat>, PinnedBlockRuntimeLockError> {
// Note: copying the hash ahead of time fixes some weird intermittent borrow checker
// issue.
let block_hash = *block_hash;
let mut guarded = self.guarded.lock().await;
let guarded = &mut *guarded;
let pinned_block = {
if let GuardedInner::FinalizedBlockRuntimeKnown {
all_blocks_subscriptions,
pinned_blocks,
..
} = &mut guarded.tree
{
match pinned_blocks.get(&(subscription_id.0, block_hash)) {
Some(v) => v.clone(),
None => {
// Cold path.
if let Some((sub_name, _, _)) =
all_blocks_subscriptions.get(&subscription_id.0)
{
panic!("block already unpinned for subscription {}", sub_name);
} else {
return Err(PinnedBlockRuntimeLockError::ObsoleteSubscription);
}
}
}
} else {
return Err(PinnedBlockRuntimeLockError::ObsoleteSubscription);
}
};
Ok(RuntimeLock {
service: self,
hash: block_hash,
runtime: pinned_block.runtime,
block_number: pinned_block.block_number,
block_state_root_hash: pinned_block.state_trie_root_hash,
})
}
/// Lock the runtime service and prepare a call to a runtime entry point.
///
/// The hash of the block passed as parameter corresponds to the block whose runtime to use
/// to make the call. The block must be currently pinned in the context of the provided
/// [`SubscriptionId`].
///
/// # Panic
///
/// Panics if the provided [`PinnedRuntimeId`] is stale or invalid.
///
pub async fn pinned_runtime_lock<'a>(
&'a self,
pinned_runtime_id: PinnedRuntimeId,
block_hash: [u8; 32],
block_number: u64,
block_state_trie_root_hash: [u8; 32],
) -> RuntimeLock<'a, TPlat> {
RuntimeLock {
service: self,
hash: block_hash,
runtime: pinned_runtime_id.0.clone(),
block_number,
block_state_root_hash: block_state_trie_root_hash,
}
}
/// Tries to find a runtime within the [`RuntimeService`] that has the given storage code and
/// heap pages. If none is found, compiles the runtime and stores it within the
/// [`RuntimeService`]. In both cases, it is kept pinned until it is unpinned with
/// [`RuntimeService::unpin_runtime`].
pub async fn compile_and_pin_runtime(
&self,
storage_code: Option<Vec<u8>>,
storage_heap_pages: Option<Vec<u8>>,
) -> PinnedRuntimeId {
let mut guarded = self.guarded.lock().await;
// Try to find an existing identical runtime.
let existing_runtime = guarded
.runtimes
.iter()
.filter_map(|(_, rt)| rt.upgrade())
.find(|rt| rt.runtime_code == storage_code && rt.heap_pages == storage_heap_pages);
let runtime = if let Some(existing_runtime) = existing_runtime {
existing_runtime
} else {
// No identical runtime was found. Try compiling the new runtime.
let runtime = SuccessfulRuntime::from_storage(&storage_code, &storage_heap_pages).await;
let runtime = Arc::new(Runtime {
heap_pages: storage_heap_pages,
runtime_code: storage_code,
runtime,
});
guarded.runtimes.insert(Arc::downgrade(&runtime));
runtime
};
PinnedRuntimeId(runtime)
}
/// Un-pins a previously-pinned runtime.
///
/// # Panic
///
/// Panics if the provided [`PinnedRuntimeId`] is stale or invalid.
///
pub async fn unpin_runtime(&self, id: PinnedRuntimeId) {
// Nothing to do.
// TODO: doesn't check whether id is stale
drop(id);
}
/// Returns true if it is believed that we are near the head of the chain.
///
/// The way this method is implemented is opaque and cannot be relied on. The return value
/// should only ever be shown to the user and not used for any meaningful logic.
pub async fn is_near_head_of_chain_heuristic(&self) -> bool {
is_near_head_of_chain_heuristic(&self.sync_service, &self.guarded).await
}
}
impl<TPlat: Platform> Drop for RuntimeService<TPlat> {
fn drop(&mut self) {
self.background_task_abort.abort();
}
}
/// Return value of [`RuntimeService::subscribe_all`].
pub struct SubscribeAll<TPlat: Platform> {
/// SCALE-encoded header of the finalized block at the time of the subscription.
pub finalized_block_scale_encoded_header: Vec<u8>,
/// If the runtime of the finalized block is known, contains the information about it.
pub finalized_block_runtime: Result<executor::CoreVersion, RuntimeError>,
/// List of all known non-finalized blocks at the time of subscription.
///
/// Only one element in this list has [`BlockNotification::is_new_best`] equal to true.
///
/// The blocks are guaranteed to be ordered so that parents are always found before their
/// children.
pub non_finalized_blocks_ancestry_order: Vec<BlockNotification>,
/// Channel onto which new blocks are sent. The channel gets closed if it is full when a new
/// block needs to be reported.
pub new_blocks: Subscription<TPlat>,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct SubscriptionId(u64);
pub struct Subscription<TPlat: Platform> {
subscription_id: u64,
channel: mpsc::Receiver<Notification>,
guarded: Arc<Mutex<Guarded<TPlat>>>,
}
impl<TPlat: Platform> Subscription<TPlat> {
pub async fn next(&mut self) -> Option<Notification> {
self.channel.next().await
}
/// Returns an opaque identifier that can be used to call [`RuntimeService::unpin_block`].
pub fn id(&self) -> SubscriptionId {
SubscriptionId(self.subscription_id)
}
/// Unpins a block after it has been reported.
///
/// # Panic
///
/// Panics if the block hash has not been reported or has already been unpinned.
///
pub async fn unpin_block(&self, block_hash: &[u8; 32]) {
RuntimeService::unpin_block_inner(
&self.guarded,
SubscriptionId(self.subscription_id),
block_hash,
)
.await
}
}
/// Notification about a new block or a new finalized block.
///
/// See [`RuntimeService::subscribe_all`].
#[derive(Debug, Clone)]
pub enum Notification {
/// A non-finalized block has been finalized.
Finalized {
/// BLAKE2 hash of the header of the block that has been finalized.
///
/// A block with this hash is guaranteed to have earlier been reported in a
/// [`BlockNotification`], either in [`SubscribeAll::non_finalized_blocks_ancestry_order`]
/// or in a [`Notification::Block`].
///
/// It is also guaranteed that this block is a child of the previously-finalized block. In
/// other words, if multiple blocks are finalized at the same time, only one
/// [`Notification::Finalized`] is generated and contains the highest finalized block.
///
/// If it is not possible for the [`RuntimeService`] to avoid a gap in the list of
/// finalized blocks, then the [`SubscribeAll::new_blocks`] channel is force-closed.
hash: [u8; 32],
/// Hash of the header of the best block after the finalization.
///
/// If the newly-finalized block is an ancestor of the current best block, then this field
/// contains the hash of this current best block. Otherwise, the best block is now
/// the non-finalized block with the given hash.
///
/// A block with this hash is guaranteed to have earlier been reported in a
/// [`BlockNotification`], either in [`SubscribeAll::non_finalized_blocks_ancestry_order`]
/// or in a [`Notification::Block`].
best_block_hash: [u8; 32],
/// List of BLAKE2 hashes of the headers of the blocks that have been discarded because
/// they're not descendants of the newly-finalized block.
///
/// This list contains all the siblings of the newly-finalized block and all their
/// descendants.
pruned_blocks: Vec<[u8; 32]>,
},
/// A new block has been added to the list of unfinalized blocks.
Block(BlockNotification),
/// The best block has changed to a different one.
BestBlockChanged {
/// Hash of the new best block.
///
/// This can be either the hash of the latest finalized block or the hash of a
/// non-finalized block.
hash: [u8; 32],
},
}
/// Notification about a new block.
///
/// See [`RuntimeService::subscribe_all`].
#[derive(Debug, Clone)]
pub struct BlockNotification {
/// True if this block is considered as the best block of the chain.
pub is_new_best: bool,
/// SCALE-encoded header of the block.
pub scale_encoded_header: Vec<u8>,
/// BLAKE2 hash of the header of the parent of this block.
///
///
/// A block with this hash is guaranteed to have earlier been reported in a
/// [`BlockNotification`], either in [`SubscribeAll::non_finalized_blocks_ancestry_order`] or
/// in a [`Notification::Block`].
///
/// > **Note**: The header of a block contains the hash of its parent. When it comes to
/// > consensus algorithms such as Babe or Aura, the syncing code verifies that this
/// > hash, stored in the header, actually corresponds to a valid block. However,
/// > when it comes to parachain consensus, no such verification is performed.
/// > Contrary to the hash stored in the header, the value of this field is
/// > guaranteed to refer to a block that is known by the syncing service. This
/// > allows a subscriber of the state of the chain to precisely track the hierarchy
/// > of blocks, without risking to run into a problem in case of a block with an
/// > invalid header.
pub parent_hash: [u8; 32],
/// If the runtime of the block is different from its parent, contains the information about
/// the new runtime.
pub new_runtime: Option<Result<executor::CoreVersion, RuntimeError>>,
}
async fn is_near_head_of_chain_heuristic<TPlat: Platform>(
sync_service: &sync_service::SyncService<TPlat>,
guarded: &Mutex<Guarded<TPlat>>,
) -> bool {
// The runtime service adds a delay between the moment a best block is reported by the
// sync service and the moment it is reported by the runtime service.
// Because of this, any "far from head of chain" to "near head of chain" transition
// must take that delay into account. The other way around ("near" to "far") is
// unaffected.
// If the sync service is far from the head, the runtime service is also far.
if !sync_service.is_near_head_of_chain_heuristic().await {
return false;
}
// If the sync service is near, report the result of `is_near_head_of_chain_heuristic()`
// when called at the latest best block that the runtime service reported through its API,
// to make sure that we don't report "near" while having reported only blocks that were
// far.
guarded.lock().await.best_near_head_of_chain
}
/// See [`RuntimeService::pinned_block_runtime_lock`].
#[derive(Debug, derive_more::Display, Clone)]
pub enum PinnedBlockRuntimeLockError {
/// Subscription is dead.
ObsoleteSubscription,
}
/// See [`RuntimeService::pinned_block_runtime_lock`].
// TODO: rename, as it doesn't lock anything anymore
#[must_use]
pub struct RuntimeLock<'a, TPlat: Platform> {
service: &'a RuntimeService<TPlat>,
block_number: u64,
block_state_root_hash: [u8; 32],
hash: [u8; 32],
runtime: Arc<Runtime>,
}
impl<'a, TPlat: Platform> RuntimeLock<'a, TPlat> {
/// Returns the hash of the block the call is being made against.
pub fn block_hash(&self) -> &[u8; 32] {
&self.hash
}
/// Returns the specification of the given runtime.
pub fn specification(&self) -> Result<executor::CoreVersion, RuntimeError> {
match self.runtime.runtime.as_ref() {
Ok(r) => Ok(r.runtime_spec.clone()),
Err(err) => Err(err.clone()),
}
}
pub async fn start<'b>(
&'a self,
method: &'b str,
parameter_vectored: impl Iterator<Item = impl AsRef<[u8]>> + Clone + 'b,
total_attempts: u32,
timeout_per_request: Duration,
max_parallel: NonZeroU32,
) -> Result<(RuntimeCallLock<'a>, executor::host::HostVmPrototype), RuntimeCallError> {
// TODO: DRY :-/ this whole thing is messy
// Perform the call proof request.
// Note that `guarded` is not locked.
// TODO: there's no way to verify that the call proof is actually correct; we have to ban the peer and restart the whole call process if it turns out that it's not
// TODO: also, an empty proof will be reported as an error right now, which is weird
let call_proof = self
.service
.sync_service
.clone()
.call_proof_query(
self.block_number,
protocol::CallProofRequestConfig {
block_hash: self.hash,
method,
parameter_vectored: parameter_vectored.clone(),
},
total_attempts,
timeout_per_request,
max_parallel,
)
.await
.map_err(RuntimeCallError::CallProof);
let (guarded, virtual_machine) = match self.runtime.runtime.as_ref() {
Ok(r) => {
let mut lock = r.virtual_machine.lock().await;
let vm = lock.take().unwrap();
(lock, vm)
}
Err(err) => {
return Err(RuntimeCallError::InvalidRuntime(err.clone()));
}
};
let lock = RuntimeCallLock {
guarded,
block_state_root_hash: self.block_state_root_hash,
call_proof,
};
Ok((lock, virtual_machine))
}
}
/// See [`RuntimeService::pinned_block_runtime_lock`].
#[must_use]
pub struct RuntimeCallLock<'a> {
guarded: MutexGuard<'a, Option<executor::host::HostVmPrototype>>,
block_state_root_hash: [u8; 32],
call_proof: Result<network_service::EncodedMerkleProof, RuntimeCallError>,
}
impl<'a> RuntimeCallLock<'a> {
/// Returns the storage root of the block the call is being made against.
pub fn block_storage_root(&self) -> &[u8; 32] {
&self.block_state_root_hash
}
/// Finds the given key in the call proof and returns the associated storage value.
///
/// Returns an error if the key couldn't be found in the proof, meaning that the proof is
/// invalid.
// TODO: if proof is invalid, we should give the option to fetch another call proof
pub fn storage_entry(&self, requested_key: &[u8]) -> Result<Option<&[u8]>, RuntimeCallError> {
let call_proof = match &self.call_proof {
Ok(p) => p.decode(),
Err(err) => return Err(err.clone()),
};
match proof_verify::verify_proof(proof_verify::VerifyProofConfig {
requested_key,
trie_root_hash: self.block_storage_root(),
proof: call_proof.iter().map(|v| &v[..]),
}) {
Ok(v) => Ok(v),
Err(err) => Err(RuntimeCallError::StorageRetrieval(err)),
}
}
/// Finds in the call proof the list of keys that match a certain prefix.
///
/// Returns an error if not all the keys could be found in the proof, meaning that the proof
/// is invalid.
///
/// The keys returned are ordered lexicographically.
// TODO: if proof is invalid, we should give the option to fetch another call proof
pub fn storage_prefix_keys_ordered(
&'_ self,
prefix: &[u8],
) -> Result<impl Iterator<Item = impl AsRef<[u8]> + '_>, RuntimeCallError> {
// TODO: this is sub-optimal as we iterate over the proof multiple times and do a lot of Vec allocations
let mut to_find = vec![trie::bytes_to_nibbles(prefix.iter().copied()).collect::<Vec<_>>()];
let mut output = Vec::new();
let call_proof = match &self.call_proof {
Ok(p) => p.decode(),
Err(err) => return Err(err.clone()),
};
for key in mem::take(&mut to_find) {
let node_info = proof_verify::trie_node_info(proof_verify::TrieNodeInfoConfig {
requested_key: key.iter().cloned(),
trie_root_hash: self.block_storage_root(),
proof: call_proof.iter().map(|v| &v[..]),
})
.map_err(RuntimeCallError::StorageRetrieval)?;
if matches!(
node_info.storage_value,
proof_verify::StorageValue::Known(_)
| proof_verify::StorageValue::HashKnownValueMissing(_)
) {
assert_eq!(key.len() % 2, 0);
output.push(
trie::nibbles_to_bytes_suffix_extend(key.iter().copied()).collect::<Vec<_>>(),
);
}
match node_info.children {
proof_verify::Children::None => {}
proof_verify::Children::One(nibble) => {
let mut child = key.clone();
child.push(nibble);
to_find.push(child);
}
proof_verify::Children::Multiple { children_bitmap } => {
for nibble in trie::all_nibbles() {
if (children_bitmap & (1 << u8::from(nibble))) == 0 {
continue;
}
let mut child = key.clone();
child.push(nibble);
to_find.push(child);
}
}
}
}
// TODO: maybe we could iterate over the proof in an ordered way rather than sorting at the end
output.sort();
Ok(output.into_iter())
}
/// End the runtime call.
///
/// This method **must** be called.
pub fn unlock(mut self, vm: executor::host::HostVmPrototype) {
debug_assert!(self.guarded.is_none());
*self.guarded = Some(vm);
}
}
impl<'a> Drop for RuntimeCallLock<'a> {
fn drop(&mut self) {
if self.guarded.is_none() {
// The [`RuntimeCallLock`] has been destroyed without being properly unlocked.
panic!()
}
}
}
/// Error that can happen when calling a runtime function.
// TODO: clean up these errors
#[derive(Debug, Clone, derive_more::Display)]
pub enum RuntimeCallError {
/// Runtime of the block isn't valid.
#[display(fmt = "Runtime of the block isn't valid: {}", _0)]
InvalidRuntime(RuntimeError),
/// Error while retrieving the storage item from other nodes.
// TODO: change error type?
#[display(fmt = "Error in call proof: {}", _0)]
StorageRetrieval(proof_verify::Error),
/// Error while retrieving the call proof from the network.
#[display(fmt = "Error when retrieving the call proof: {}", _0)]
CallProof(sync_service::CallProofQueryError),
/// Error while querying the storage of the block.
#[display(fmt = "Error while querying block storage: {}", _0)]
StorageQuery(sync_service::StorageQueryError),
}
impl RuntimeCallError {
/// Returns `true` if this is caused by networking issues, as opposed to a consensus-related
/// issue.
pub fn is_network_problem(&self) -> bool {
match self {
RuntimeCallError::InvalidRuntime(_) => false,
// TODO: as a temporary hack, we consider `TrieRootNotFound` as the remote not knowing about the requested block; see https://github.com/paritytech/substrate/pull/8046
RuntimeCallError::StorageRetrieval(proof_verify::Error::TrieRootNotFound) => true,
RuntimeCallError::StorageRetrieval(_) => false,
RuntimeCallError::CallProof(err) => err.is_network_problem(),
RuntimeCallError::StorageQuery(err) => err.is_network_problem(),
}
}
}
/// Error when analyzing the runtime.
#[derive(Debug, derive_more::Display, Clone)]
pub enum RuntimeError {
/// The `:code` key of the storage is empty.
CodeNotFound,
/// Error while parsing the `:heappages` storage value.
#[display(fmt = "Failed to parse `:heappages` storage value: {}", _0)]
InvalidHeapPages(executor::InvalidHeapPagesError),
/// Error while compiling the runtime.
#[display(fmt = "{}", _0)]
Build(executor::host::NewErr),
}
struct Guarded<TPlat: Platform> {
/// Identifier of the next subscription for
/// [`GuardedInner::FinalizedBlockRuntimeKnown::all_blocks_subscriptions`].
///
/// To avoid race conditions, subscription IDs are never used, even if we switch back to
/// [`GuardedInner::FinalizedBlockRuntimeUnknown`].
next_subscription_id: u64,
/// Return value of calling [`sync_service::SyncService::is_near_head_of_chain_heuristic`]
/// after the latest best block update.