Skip to content

Commit

Permalink
Fix some clippy lints
Browse files Browse the repository at this point in the history
  • Loading branch information
paulhauner committed Sep 21, 2018
1 parent 091379f commit 616cc61
Show file tree
Hide file tree
Showing 13 changed files with 85 additions and 111 deletions.
10 changes: 5 additions & 5 deletions lighthouse/client/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ impl Client {
///
/// Presently, this means starting network and sync threads
/// and plumbing them together.
pub fn new(config: LighthouseConfig,
log: Logger)
pub fn new(config: &LighthouseConfig,
log: &Logger)
-> Self
{
// Open the local db
Expand Down Expand Up @@ -65,8 +65,8 @@ impl Client {
sync_db,
network_tx.clone(),
network_rx,
sync_out_sender,
sync_in_receiver,
&sync_out_sender,
&sync_in_receiver,
sync_log,
);
});
Expand All @@ -75,7 +75,7 @@ impl Client {

// Return the client struct
Self {
db: db,
db,
network_thread,
sync_thread,
}
Expand Down
2 changes: 1 addition & 1 deletion lighthouse/config/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ impl LighthouseConfig {
home.join(DEFAULT_LIGHTHOUSE_DIR)
};
fs::create_dir_all(&data_dir)
.expect(&format!("Unable to create {:?}", &data_dir));
.unwrap_or_else(|_| panic!("Unable to create {:?}", &data_dir));
let p2p_listen_port = 0;
Self {
data_dir,
Expand Down
2 changes: 1 addition & 1 deletion lighthouse/db/disk_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ impl DiskDB {
* Initialise the path
*/
fs::create_dir_all(&path)
.expect(&format!("Unable to create {:?}", &path));
.unwrap_or_else(|_| panic!("Unable to create {:?}", &path));
let db_path = path.join("database");

/*
Expand Down
24 changes: 11 additions & 13 deletions lighthouse/db/memory_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,11 @@ impl ClientDB for MemoryDB {
let db = self.db.read().unwrap();
let known_columns = self.known_columns.read().unwrap();

match known_columns.contains(&col.to_string()) {
false => Err(DBError{ message: "Unknown column".to_string() }),
true => {
let column_key = MemoryDB::get_key_for_col(col, key);
Ok(db.get(&column_key).and_then(|val| Some(val.clone())))
}
if known_columns.contains(&col.to_string()) {
let column_key = MemoryDB::get_key_for_col(col, key);
Ok(db.get(&column_key).and_then(|val| Some(val.clone())))
} else {
Err(DBError{ message: "Unknown column".to_string() })
}
}

Expand All @@ -70,13 +69,12 @@ impl ClientDB for MemoryDB {
let mut db = self.db.write().unwrap();
let known_columns = self.known_columns.read().unwrap();

match known_columns.contains(&col.to_string()) {
false => Err(DBError{ message: "Unknown column".to_string() }),
true => {
let column_key = MemoryDB::get_key_for_col(col, key);
db.insert(column_key, val.to_vec());
Ok(())
}
if known_columns.contains(&col.to_string()) {
let column_key = MemoryDB::get_key_for_col(col, key);
db.insert(column_key, val.to_vec());
Ok(())
} else {
Err(DBError{ message: "Unknown column".to_string() })
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion lighthouse/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ fn main() {
"data_dir" => &config.data_dir.to_str(),
"port" => &config.p2p_listen_port);

let client = Client::new(config, log.new(o!()));
let client = Client::new(&config, &log);
client.sync_thread.join().unwrap();

info!(log, "Exiting.");
Expand Down
50 changes: 25 additions & 25 deletions lighthouse/state/transition/attestation_parent_hashes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,16 @@ use super::TransitionError;
/// See this slide for more information:
/// https://tinyurl.com/ybzn2spw
pub fn attestation_parent_hashes(
cycle_length: &u8,
block_slot: &u64,
attestation_slot: &u64,
current_hashes: &Vec<Hash256>,
oblique_hashes: &Vec<Hash256>)
cycle_length: u8,
block_slot: u64,
attestation_slot: u64,
current_hashes: &[Hash256],
oblique_hashes: &[Hash256])
-> Result<Vec<Hash256>, TransitionError>
{
// This cast places a limit on cycle_length. If you change it, check math
// for overflow.
let cycle_length: u64 = *cycle_length as u64;
let cycle_length: u64 = u64::from(cycle_length);

if current_hashes.len() as u64 != (cycle_length * 2) {
return Err(TransitionError::InvalidInput(String::from(
Expand Down Expand Up @@ -69,7 +69,7 @@ pub fn attestation_parent_hashes(
let mut hashes = Vec::new();
hashes.extend_from_slice(
&current_hashes[(start as usize)..(end as usize)]);
hashes.append(&mut oblique_hashes.clone());
hashes.extend_from_slice(oblique_hashes);

Ok(hashes)
}
Expand Down Expand Up @@ -98,9 +98,9 @@ mod tests {
let current_hashes = get_range_of_hashes(3, 19);
let oblique_hashes = get_range_of_hashes(100, 102);
let result = attestation_parent_hashes(
&cycle_length,
&block_slot,
&attestation_slot,
cycle_length,
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes);
assert!(result.is_ok());
Expand All @@ -123,9 +123,9 @@ mod tests {
let current_hashes = get_range_of_hashes(3, 19);
let oblique_hashes = get_range_of_hashes(100, 108);
let result = attestation_parent_hashes(
&cycle_length,
&block_slot,
&attestation_slot,
cycle_length,
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes);
assert!(result.is_ok());
Expand All @@ -148,9 +148,9 @@ mod tests {
let current_hashes = get_range_of_hashes(3, 19);
let oblique_hashes = vec![];
let result = attestation_parent_hashes(
&cycle_length,
&block_slot,
&attestation_slot,
cycle_length,
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes);
assert!(result.is_ok());
Expand All @@ -171,9 +171,9 @@ mod tests {
let current_hashes = get_range_of_hashes(0, 16);
let oblique_hashes = vec![];
let result = attestation_parent_hashes(
&cycle_length,
&block_slot,
&attestation_slot,
cycle_length,
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes);
assert!(result.is_ok());
Expand All @@ -194,9 +194,9 @@ mod tests {
let current_hashes = get_range_of_hashes(0, 16);
let oblique_hashes = vec![];
let result = attestation_parent_hashes(
&cycle_length,
&block_slot,
&attestation_slot,
cycle_length,
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes);
assert!(result.is_err());
Expand All @@ -213,9 +213,9 @@ mod tests {
let current_hashes = get_range_of_hashes(0, 15);
let oblique_hashes = vec![];
let result = attestation_parent_hashes(
&cycle_length,
&block_slot,
&attestation_slot,
cycle_length,
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes);
assert!(result.is_err());
Expand Down
27 changes: 12 additions & 15 deletions lighthouse/state/transition/shuffling/rng.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use super::blake2_rfc::blake2s::{ Blake2s, Blake2sResult };

const SEED_SIZE_BYTES: usize = 32;
const RAND_BYTES: usize = 3; // 24 / 8
const RAND_MAX: u32 = 16777216; // 2**24
const RAND_MAX: u32 = 16_777_216; // 2**24

/// A pseudo-random number generator which given a seed
/// uses successive blake2s hashing to generate "entropy".
Expand Down Expand Up @@ -31,17 +31,14 @@ impl ShuffleRng {
/// Extracts 3 bytes from the `seed`. Rehashes seed if required.
fn rand(&mut self) -> u32 {
self.idx += RAND_BYTES;
match self.idx >= SEED_SIZE_BYTES {
true => {
self.rehash_seed();
self.rand()
}
false => {
int_from_byte_slice(
self.seed.as_bytes(),
self.idx - RAND_BYTES,
)
}
if self.idx >= SEED_SIZE_BYTES {
self.rehash_seed();
self.rand()
} else {
int_from_byte_slice(
self.seed.as_bytes(),
self.idx - RAND_BYTES,
)
}
}

Expand All @@ -65,9 +62,9 @@ impl ShuffleRng {
/// Returns that integer.
fn int_from_byte_slice(source: &[u8], offset: usize) -> u32 {
(
source[offset + 2] as u32) |
((source[offset + 1] as u32) << 8) |
((source[offset ] as u32) << 16
u32::from(source[offset + 2])) |
(u32::from(source[offset + 1]) << 8) |
(u32::from(source[offset ]) << 16
)
}

Expand Down
24 changes: 0 additions & 24 deletions lighthouse/sync/block.rs

This file was deleted.

1 change: 0 additions & 1 deletion lighthouse/sync/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ extern crate slog;
extern crate tokio;
extern crate network_libp2p;

pub mod block;
pub mod network;
pub mod sync_future;
pub mod wire_protocol;
Expand Down
37 changes: 21 additions & 16 deletions lighthouse/sync/network.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@ use super::network_libp2p::message::{
NetworkEventType,
};

use super::block::process_unverified_blocks;

use super::wire_protocol::{
WireMessage,
WireMessageHeader,
Expand All @@ -25,9 +23,9 @@ use super::futures::sync::mpsc::{
/// (e.g., libp2p) has an event to push up to the sync process.
pub fn handle_network_event(
event: NetworkEvent,
db: Arc<ClientDB>,
network_tx: UnboundedSender<OutgoingMessage>,
log: Logger)
db: &Arc<ClientDB>,
network_tx: &UnboundedSender<OutgoingMessage>,
log: &Logger)
-> Result<(), ()>
{
debug!(&log, "";
Expand All @@ -38,10 +36,10 @@ pub fn handle_network_event(
NetworkEventType::Message => {
if let Some(data) = event.data {
handle_network_message(
data,
db,
network_tx,
log)
&data,
&db,
&network_tx,
&log)
} else {
Ok(())
}
Expand All @@ -55,10 +53,10 @@ pub fn handle_network_event(
/// This function should be called whenever a peer from a network
/// (e.g., libp2p) has sent a message to us.
fn handle_network_message(
message: Vec<u8>,
db: Arc<ClientDB>,
_network_tx: UnboundedSender<OutgoingMessage>,
log: Logger)
message: &[u8],
db: &Arc<ClientDB>,
_network_tx: &UnboundedSender<OutgoingMessage>,
log: &Logger)
-> Result<(), ()>
{
match WireMessage::decode(&message) {
Expand All @@ -67,16 +65,23 @@ fn handle_network_message(
WireMessageHeader::Blocks => {
process_unverified_blocks(
msg.body,
db,
log
&db,
&log
);
Ok(())
}
_ => Ok(())
}
}
Err(_) => {
return Ok(()) // No need to pass the error back
Ok(()) // No need to pass the error back
}
}
}

fn process_unverified_blocks(_message: &[u8],
_db: &Arc<ClientDB>,
_log: &Logger)
{
//
}
10 changes: 5 additions & 5 deletions lighthouse/sync/sync_future.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,18 +28,18 @@ pub fn run_sync_future(
db: Arc<ClientDB>,
network_tx: NetworkSender,
network_rx: NetworkReceiver,
_sync_tx: SyncSender,
_sync_rx: SyncReceiver,
_sync_tx: &SyncSender,
_sync_rx: &SyncReceiver,
log: Logger)
{
let network_future = {
network_rx
.for_each(move |event| {
handle_network_event(
event,
db.clone(),
network_tx.clone(),
log.clone())
&db.clone(),
&network_tx.clone(),
&log.clone())
})
.map_err(|_| panic!("rx failed"))
};
Expand Down
2 changes: 1 addition & 1 deletion lighthouse/sync/wire_protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ pub struct WireMessage<'a> {
}

impl<'a> WireMessage<'a> {
pub fn decode(bytes: &'a Vec<u8>)
pub fn decode(bytes: &'a [u8])
-> Result<Self, WireMessageDecodeError>
{
if let Some((header_byte, body)) = bytes.split_first() {
Expand Down
Loading

0 comments on commit 616cc61

Please sign in to comment.