Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve pruning #4125

Merged
merged 7 commits into from
Nov 7, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion graph/src/components/store/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1106,7 +1106,8 @@ pub trait PruneReporter: Send + 'static {

fn start_switch(&mut self) {}
fn copy_nonfinal_start(&mut self, table: &str) {}
fn copy_nonfinal_finish(&mut self, table: &str, rows: usize) {}
fn copy_nonfinal_batch(&mut self, table: &str, rows: usize, total_rows: usize, finished: bool) {
}
fn finish_switch(&mut self) {}

fn finish_prune(&mut self) {}
Expand Down
25 changes: 17 additions & 8 deletions node/src/manager/commands/prune.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ struct Progress {
start: Instant,
analyze_start: Instant,
switch_start: Instant,
table_start: Instant,
final_start: Instant,
final_table_start: Instant,
nonfinal_start: Instant,
}

Expand All @@ -34,7 +34,7 @@ impl Progress {
analyze_start: Instant::now(),
switch_start: Instant::now(),
final_start: Instant::now(),
final_table_start: Instant::now(),
table_start: Instant::now(),
nonfinal_start: Instant::now(),
}
}
Expand Down Expand Up @@ -82,14 +82,14 @@ impl PruneReporter for Progress {
print_copy_header();

self.final_start = Instant::now();
self.final_table_start = self.final_start;
self.table_start = self.final_start;
}

fn copy_final_batch(&mut self, table: &str, _rows: usize, total_rows: usize, finished: bool) {
print_copy_row(table, total_rows, self.final_table_start.elapsed());
print_copy_row(table, total_rows, self.table_start.elapsed());
if finished {
println!("");
self.final_table_start = Instant::now();
self.table_start = Instant::now();
}
std::io::stdout().flush().ok();
}
Expand Down Expand Up @@ -119,9 +119,18 @@ impl PruneReporter for Progress {
self.nonfinal_start = Instant::now();
}

fn copy_nonfinal_finish(&mut self, table: &str, rows: usize) {
print_copy_row(table, rows, self.nonfinal_start.elapsed());
println!("");
fn copy_nonfinal_batch(
&mut self,
table: &str,
_rows: usize,
total_rows: usize,
finished: bool,
) {
print_copy_row(table, total_rows, self.table_start.elapsed());
if finished {
println!("");
self.table_start = Instant::now();
}
std::io::stdout().flush().ok();
}

Expand Down
31 changes: 29 additions & 2 deletions store/postgres/src/advisory_lock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
//! * 2, n: to lock the deployment with id n to make sure only one write
//! happens to it

use diesel::sql_types::Bool;
use diesel::{sql_query, PgConnection, RunQueryDsl};
use graph::prelude::StoreError;

Expand Down Expand Up @@ -47,8 +48,34 @@ pub(crate) fn unlock_copying(conn: &PgConnection, dst: &Site) -> Result<(), Stor
.map_err(StoreError::from)
}

pub(crate) fn lock_deployment_xact(conn: &PgConnection, site: &Site) -> Result<(), StoreError> {
sql_query(&format!("select pg_advisory_xact_lock(2, {})", site.id))
/// Try to lock deployment `site` with a session lock. Return `true` if we
/// got the lock, and `false` if we did not. You don't want to use this
/// directly. Instead, use `deployment::with_lock`
pub(crate) fn lock_deployment_session(
conn: &PgConnection,
site: &Site,
) -> Result<bool, StoreError> {
#[derive(QueryableByName)]
struct Locked {
#[sql_type = "Bool"]
locked: bool,
}

sql_query(&format!(
"select pg_try_advisory_lock(2, {}) as locked",
site.id
))
.get_result::<Locked>(conn)
.map(|res| res.locked)
.map_err(StoreError::from)
}

/// Release the lock acquired with `lock_deployment_session`.
pub(crate) fn unlock_deployment_session(
conn: &PgConnection,
site: &Site,
) -> Result<(), StoreError> {
sql_query(&format!("select pg_advisory_unlock(2, {})", site.id))
.execute(conn)
.map(|_| ())
.map_err(StoreError::from)
Expand Down
49 changes: 6 additions & 43 deletions store/postgres/src/catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -202,49 +202,6 @@ pub fn supports_proof_of_indexing(
table_exists(conn, namespace.as_str(), &POI_TABLE_NAME)
}

/// Whether the given table has an exclusion constraint. When we create
/// tables, they either have an exclusion constraint on `(id, block_range)`,
/// or just a GIST index on those columns. If this returns `true`, there is
/// an exclusion constraint on the table, if it returns `false` we only have
/// an index.
///
/// This function only checks whether there is some exclusion constraint on
/// the table since checking fully if that is exactly the constraint we
/// think it is is a bit more complex. But if the table is part of a
/// deployment that we created, the conclusions in hte previous paragraph
/// are true.
pub fn has_exclusion_constraint(
conn: &PgConnection,
namespace: &Namespace,
table: &SqlName,
) -> Result<bool, StoreError> {
#[derive(Debug, QueryableByName)]
struct Row {
#[sql_type = "Bool"]
#[allow(dead_code)]
uses_excl: bool,
}

let query = "
select count(*) > 0 as uses_excl
from pg_catalog.pg_constraint con,
pg_catalog.pg_class rel,
pg_catalog.pg_namespace nsp
where rel.oid = con.conrelid
and nsp.oid = con.connamespace
and con.contype = 'x'
and nsp.nspname = $1
and rel.relname = $2;
";

sql_query(query)
.bind::<Text, _>(namespace)
.bind::<Text, _>(table.as_str())
.get_result::<Row>(conn)
.map_err(StoreError::from)
.map(|row| row.uses_excl)
}

pub fn current_servers(conn: &PgConnection) -> Result<Vec<String>, StoreError> {
#[derive(QueryableByName)]
struct Srv {
Expand Down Expand Up @@ -323,6 +280,12 @@ pub fn recreate_schema(conn: &PgConnection, nsp: &str) -> Result<(), StoreError>
Ok(conn.batch_execute(&query)?)
}

/// Drop the schema `nsp` and all its contents if it exists
pub fn drop_schema(conn: &PgConnection, nsp: &str) -> Result<(), StoreError> {
let query = format!("drop schema if exists {nsp} cascade;", nsp = nsp);
Ok(conn.batch_execute(&query)?)
}

pub fn migration_count(conn: &PgConnection) -> Result<i64, StoreError> {
use __diesel_schema_migrations as m;

Expand Down
34 changes: 23 additions & 11 deletions store/postgres/src/deployment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,20 @@ use diesel::{
sql_query,
sql_types::{Nullable, Text},
};
use graph::data::subgraph::{
schema::{DeploymentCreate, SubgraphManifestEntity},
SubgraphFeature,
};
use graph::prelude::{
anyhow, bigdecimal::ToPrimitive, hex, web3::types::H256, BigDecimal, BlockNumber, BlockPtr,
DeploymentHash, DeploymentState, Schema, StoreError,
};
use graph::{blockchain::block_stream::FirehoseCursor, data::subgraph::schema::SubgraphError};
use graph::{
data::subgraph::{
schema::{DeploymentCreate, SubgraphManifestEntity},
SubgraphFeature,
},
util::backoff::ExponentialBackoff,
};
use stable_hash_legacy::crypto::SetHasher;
use std::{collections::BTreeSet, convert::TryFrom, ops::Bound};
use std::{collections::BTreeSet, convert::TryFrom, ops::Bound, time::Duration};
use std::{str::FromStr, sync::Arc};

use crate::connection_pool::ForeignServer;
Expand Down Expand Up @@ -1047,11 +1050,20 @@ pub fn set_earliest_block(
Ok(())
}

/// Lock the deployment `site` for writes for the remainder of the current
/// transaction. This lock is used to coordinate the changes that the
/// subgraph writer makes with changes that other parts of the system, in
/// particular, pruning make
/// Lock the deployment `site` for writes while `f` is running. The lock can
/// cross transactions, and `f` can therefore execute multiple transactions
/// while other write activity for that deployment is locked out. Block the
/// current thread until we can acquire the lock.
// see also: deployment-lock-for-update
pub fn lock(conn: &PgConnection, site: &Site) -> Result<(), StoreError> {
advisory_lock::lock_deployment_xact(conn, site)
pub fn with_lock<F, R>(conn: &PgConnection, site: &Site, f: F) -> Result<R, StoreError>
where
F: FnOnce() -> Result<R, StoreError>,
{
let mut backoff = ExponentialBackoff::new(Duration::from_millis(100), Duration::from_secs(15));
while !advisory_lock::lock_deployment_session(conn, site)? {
backoff.sleep();
}
let res = f();
advisory_lock::unlock_deployment_session(conn, site)?;
res
}
Loading