Skip to content

Clean up some dead_code annotations #6103

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions graph/src/components/subgraph/proof_of_indexing/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ impl SharedProofOfIndexing {
#[cfg(test)]
mod tests {
use super::*;
use crate::util::stable_hash_glue::{impl_stable_hash, AsBytes};
use crate::{
data::store::Id,
prelude::{BlockPtr, DeploymentHash, Value},
Expand All @@ -97,6 +98,33 @@ mod tests {
use std::convert::TryInto;
use web3::types::{Address, H256};

/// The PoI is the StableHash of this struct. This reference implementation is
/// mostly here just to make sure that the online implementation is
/// well-implemented (without conflicting sequence numbers, or other oddities).
/// It's just way easier to check that this works, and serves as a kind of
/// documentation as a side-benefit.
pub struct PoI<'a> {
pub causality_regions: HashMap<String, PoICausalityRegion<'a>>,
pub subgraph_id: DeploymentHash,
pub block_hash: H256,
pub indexer: Option<Address>,
}

fn h256_as_bytes(val: &H256) -> AsBytes<&[u8]> {
AsBytes(val.as_bytes())
}

fn indexer_opt_as_bytes(val: &Option<Address>) -> Option<AsBytes<&[u8]>> {
val.as_ref().map(|v| AsBytes(v.as_bytes()))
}

impl_stable_hash!(PoI<'_> {
causality_regions,
subgraph_id,
block_hash: h256_as_bytes,
indexer: indexer_opt_as_bytes
});

/// Verify that the stable hash of a reference and online implementation match
fn check(case: Case, cache: &mut HashMap<String, &str>) {
let logger = Logger::root(Discard, o!());
Expand Down
35 changes: 1 addition & 34 deletions graph/src/components/subgraph/proof_of_indexing/reference.rs
Original file line number Diff line number Diff line change
@@ -1,38 +1,5 @@
use super::ProofOfIndexingEvent;
use crate::prelude::DeploymentHash;
use crate::util::stable_hash_glue::{impl_stable_hash, AsBytes};
use std::collections::HashMap;
use web3::types::{Address, H256};

/// The PoI is the StableHash of this struct. This reference implementation is
/// mostly here just to make sure that the online implementation is
/// well-implemented (without conflicting sequence numbers, or other oddities).
/// It's just way easier to check that this works, and serves as a kind of
/// documentation as a side-benefit.
#[allow(dead_code)]
pub struct PoI<'a> {
pub causality_regions: HashMap<String, PoICausalityRegion<'a>>,
pub subgraph_id: DeploymentHash,
pub block_hash: H256,
pub indexer: Option<Address>,
}

#[allow(dead_code)]
fn h256_as_bytes(val: &H256) -> AsBytes<&[u8]> {
AsBytes(val.as_bytes())
}

#[allow(dead_code)]
fn indexer_opt_as_bytes(val: &Option<Address>) -> Option<AsBytes<&[u8]>> {
val.as_ref().map(|v| AsBytes(v.as_bytes()))
}

impl_stable_hash!(PoI<'_> {
causality_regions,
subgraph_id,
block_hash: h256_as_bytes,
indexer: indexer_opt_as_bytes
});
use crate::util::stable_hash_glue::impl_stable_hash;

pub struct PoICausalityRegion<'a> {
pub blocks: Vec<Block<'a>>,
Expand Down
33 changes: 0 additions & 33 deletions graph/src/data_source/offchain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -374,39 +374,6 @@ pub struct UnresolvedMapping {
pub entities: Vec<String>,
}

impl UnresolvedDataSource {
#[allow(dead_code)]
pub(super) async fn resolve(
self,
resolver: &Arc<dyn LinkResolver>,
logger: &Logger,
manifest_idx: u32,
causality_region: CausalityRegion,
schema: &InputSchema,
) -> Result<DataSource, Error> {
info!(logger, "Resolve offchain data source";
"name" => &self.name,
"kind" => &self.kind,
"source" => format_args!("{:?}", &self.source),
);

let kind = OffchainDataSourceKind::from_str(self.kind.as_str())?;
let source = kind.try_parse_source(Bytes::from(self.source.file.link.as_bytes()))?;

Ok(DataSource {
manifest_idx,
kind,
name: self.name,
source,
mapping: self.mapping.resolve(resolver, schema, logger).await?,
context: Arc::new(None),
creation_block: None,
done_at: Arc::new(AtomicI32::new(NOT_DONE_VALUE)),
causality_region,
})
}
}

impl UnresolvedMapping {
pub async fn resolve(
self,
Expand Down
1 change: 0 additions & 1 deletion graph/src/data_source/subgraph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,6 @@ impl UnresolvedDataSource {
Ok(())
}

#[allow(dead_code)]
pub(super) async fn resolve<C: Blockchain>(
self,
resolver: &Arc<dyn LinkResolver>,
Expand Down
1 change: 0 additions & 1 deletion graph/src/schema/input/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1702,7 +1702,6 @@ mod validations {

/// Helper struct for validations
struct Schema<'a> {
#[allow(dead_code)]
spec_version: &'a Version,
schema: &'a BaseSchema,
subgraph_schema_type: Option<&'a s::ObjectType>,
Expand Down
1 change: 0 additions & 1 deletion graphql/src/store/resolver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ use crate::query::ext::BlockConstraint;
/// A resolver that fetches entities from a `Store`.
#[derive(Clone, CheapClone)]
pub struct StoreResolver {
#[allow(dead_code)]
logger: Logger,
pub(crate) store: Arc<dyn QueryStore>,
pub(crate) block_ptr: Option<BlockPtr>,
Expand Down
1 change: 0 additions & 1 deletion runtime/wasm/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ pub enum DeterminismLevel {
Deterministic,

/// This error is known to be non-deterministic. For example, an intermittent http failure.
#[allow(dead_code)]
NonDeterministic,

/// The runtime is processing a given block, but there is an indication that the blockchain client
Expand Down
9 changes: 0 additions & 9 deletions runtime/wasm/src/to_from/external.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ use graph::runtime::{
use graph::{data::store, runtime::DeterministicHostError};
use graph::{prelude::serde_json, runtime::FromAscObj};
use graph::{prelude::web3::types as web3, runtime::AscHeap};
use graph_runtime_derive::AscType;

use crate::asc_abi::class::*;

Expand Down Expand Up @@ -465,14 +464,6 @@ where
}
}

#[derive(Debug, Clone, Eq, PartialEq, AscType)]
#[allow(dead_code)]
pub enum AscSubgraphEntityOp {
Create,
Modify,
Delete,
}

impl ToAscObj<AscEnum<YamlValueKind>> for serde_yaml::Value {
fn to_asc_obj<H: AscHeap + ?Sized>(
&self,
Expand Down
1 change: 0 additions & 1 deletion server/index-node/src/resolver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@ pub struct IndexNodeResolver<S: Store> {
logger: Logger,
blockchain_map: Arc<BlockchainMap>,
store: Arc<S>,
#[allow(dead_code)]
link_resolver: Arc<dyn LinkResolver>,
bearer_token: Option<String>,
}
Expand Down
1 change: 0 additions & 1 deletion store/postgres/src/chain_head_listener.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ impl Watcher {
}
}

#[allow(dead_code)]
fn send(&self) {
// Unwrap: `self` holds a receiver.
self.sender.send(()).unwrap()
Expand Down
27 changes: 3 additions & 24 deletions store/postgres/src/deployment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,12 @@ use graph::{
schema::InputSchema,
};
use graph::{
data::subgraph::{
schema::{DeploymentCreate, SubgraphManifestEntity},
SubgraphFeature,
},
data::subgraph::schema::{DeploymentCreate, SubgraphManifestEntity},
util::backoff::ExponentialBackoff,
};
use stable_hash_legacy::crypto::SetHasher;
use std::{collections::BTreeSet, convert::TryFrom, ops::Bound, time::Duration};
use std::{str::FromStr, sync::Arc};
use std::sync::Arc;
use std::{convert::TryFrom, ops::Bound, time::Duration};

use crate::ForeignServer;
use crate::{block_range::BLOCK_RANGE_COLUMN, primary::Site};
Expand Down Expand Up @@ -403,24 +400,6 @@ pub fn set_history_blocks(
.map_err(StoreError::from)
}

#[allow(dead_code)]
pub fn features(
conn: &mut PgConnection,
site: &Site,
) -> Result<BTreeSet<SubgraphFeature>, StoreError> {
use subgraph_manifest as sm;

let features: Vec<String> = sm::table
.select(sm::features)
.filter(sm::id.eq(site.id))
.first(conn)
.unwrap();
features
.iter()
.map(|f| SubgraphFeature::from_str(f).map_err(StoreError::from))
.collect()
}

/// This migrates subgraphs that existed before the raw_yaml column was added.
pub fn set_manifest_raw_yaml(
conn: &mut PgConnection,
Expand Down
9 changes: 0 additions & 9 deletions store/postgres/src/relational_queries.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4973,15 +4973,6 @@ impl<'a> Query for CountCurrentVersionsQuery<'a> {

impl<'a, Conn> RunQueryDsl<Conn> for CountCurrentVersionsQuery<'a> {}

/// Helper struct for returning the id's touched by the RevertRemove and
/// RevertExtend queries
#[derive(QueryableByName, PartialEq, Eq, Hash)]
#[allow(dead_code)]
pub struct CopyVid {
#[diesel(sql_type = BigInt)]
pub vid: i64,
}

fn write_column_names(
column_names: &AttributeNames,
table: dsl::Table<'_>,
Expand Down