Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1,036 changes: 525 additions & 511 deletions Cargo.lock

Large diffs are not rendered by default.

7 changes: 6 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ default_constructed_unit_structs = { level = "allow", priority = 2 } # stylistic
polkavm = "0.9.3"
polkavm-linker = "0.9.2"
polkavm-derive = "0.9.1"
derive_more = "0.99"
async-trait = "0.1.71"
hex = { version = "0.4.3", default-features = false }
log = { version = "0.4.20", default-features = false }
quote = { version = "1.0.33" }
serde = { version = "1.0.197", default-features = false }
Expand All @@ -59,12 +62,13 @@ similar-asserts = "1.1.0"
smallvec = "1.11.0"
hex-literal = { version = "0.4.1"}
color-print = "0.3.4"
codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [
codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [
"derive",
] }
scale-info = { version = "2.10.0", default-features = false, features = [
"derive",
] }
rand = { version = "0.8.5", default-features = false }

# Substrate / FRAME
frame-benchmarking = { version = "31.0.0", default-features = false}
Expand Down Expand Up @@ -170,6 +174,7 @@ cumulus-relay-chain-interface = { version = "0.10.0" }

# Local
popsicle-runtime = { path = "./runtime", default-features = false }
pallet-randomness = { path = "./pallets/randomness", default-features = false }
pallet-sequencer-grouping ={ path = "./pallets/sequencer-grouping", default-features = false }
pallet-sequencer-staking ={ path = "./pallets/sequencer-staking", default-features = false }
pallet-container = { path = "pallets/container", default-features = false }
Expand Down
1 change: 1 addition & 0 deletions node/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ color-print = { workspace = true }

# Local
popsicle-runtime = { workspace = true }
pallet-randomness = { workspace = true }

[build-dependencies]
substrate-build-script-utils = { workspace = true }
Expand Down
2 changes: 1 addition & 1 deletion node/src/chain_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ fn testnet_genesis(
"sequencerStaking": {
"candidates": vec![alice, bob],
"sequencerCommission": Perbill::from_percent(5),
"blocksPerRound": 1440,
"blocksPerRound": 10,
// "numSelectedCandidates": 2,
"delegations": Vec::<(AccountId, AccountId, u128)>::new(),
}
Expand Down
19 changes: 13 additions & 6 deletions node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface};
// Substrate Imports
use crate::container_task::spawn_container_task;
use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE;
use primitives_container::ContainerRuntimeApi;
use sc_client_api::Backend;
use sc_consensus::ImportQueue;
use sc_executor::{
Expand All @@ -35,12 +34,12 @@ use sc_network_sync::SyncingService;
use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager};
use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
use sc_transaction_pool_api::OffchainTransactionPoolFactory;
use sp_core::Pair;
// use sp_core::Pair;
use sp_keystore::KeystorePtr;
use sp_runtime::{app_crypto::AppCrypto, traits::BlakeTwo256};
// use sp_runtime::{app_crypto::AppCrypto, traits::BlakeTwo256};
use substrate_prometheus_endpoint::Registry;

use sp_runtime::AccountId32;
// use sp_runtime::AccountId32;
/// Native executor type.
pub struct ParachainNativeExecutor;

Expand Down Expand Up @@ -364,7 +363,8 @@ fn build_import_queue(
block_import,
move |_, _| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
Ok(timestamp)
let randomness = pallet_randomness::inherent::InherentDataProvider;;
Ok((timestamp, randomness))
},
slot_duration,
&task_manager.spawn_essential_handle(),
Expand Down Expand Up @@ -416,7 +416,14 @@ fn start_consensus(
);

let params = BasicAuraParams {
create_inherent_data_providers: move |_, ()| async move { Ok(()) },
create_inherent_data_providers: move |parent_header, inherent_data| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
let randomness = pallet_randomness::inherent::InherentDataProvider {
client: client.clone(),
keystore: keystore.clone(),
};;
Ok((timestamp, randomness))
},
block_import,
para_client: client,
relay_client: relay_chain_interface,
Expand Down
1 change: 1 addition & 0 deletions pallets/container/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ sp-core = { workspace = true }

[dev-dependencies]
serde = { workspace = true }
rand = { workspace = true }

# Substrate
sp-core = { workspace = true }
Expand Down
28 changes: 25 additions & 3 deletions pallets/container/src/mock.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
use core::marker::PhantomData;
use frame_system::pallet_prelude::BlockNumberFor;
use crate::Config;

use frame_support::{derive_impl, parameter_types, traits::Everything};
use frame_system as system;
use pallet_sequencer_grouping::SimpleRandomness;
use sp_core::{ConstU32, H256};
use sp_runtime::{
traits::{BlakeTwo256, IdentityLookup},
traits::{BlakeTwo256, IdentityLookup, Hash},
BuildStorage,
};
type Block = frame_system::mocking::MockBlock<Test>;
Expand Down Expand Up @@ -49,12 +52,31 @@ impl frame_system::Config for Test {
type MaxConsumers = frame_support::traits::ConstU32<16>;
}

// Randomness trait
pub struct TestRandomness<T> {
_marker: PhantomData<T>,
}
impl<T: Config> frame_support::traits::Randomness<T::Hash, BlockNumberFor<T>>
for TestRandomness<T>
{
fn random(subject: &[u8]) -> (T::Hash, BlockNumberFor<T>) {
use rand::{rngs::OsRng, RngCore};
let mut digest: Vec<_> = [0u8; 32].into();
OsRng.fill_bytes(&mut digest);
digest.extend_from_slice(subject);
let randomness = T::Hashing::hash(&digest);
// NOTE: Test randomness is always "fresh" assuming block_number is > DrawingFreezeout
let block_number = 0u32.into();
(randomness, block_number)
}
}

impl pallet_sequencer_grouping::Config for Test {
type RuntimeEvent = RuntimeEvent;
type WeightInfo = ();
type MaxGroupSize = ConstU32<100>;
type MaxGroupNumber = ConstU32<100>;
type Randomness = SimpleRandomness<Self>;
type RandomnessSource = TestRandomness<Test>;
}

parameter_types! {
Expand Down
59 changes: 59 additions & 0 deletions pallets/randomness/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
[package]
authors.workspace = true
description = "Provides on-chain randomness"
edition.workspace = true
homepage.workspace = true
license.workspace = true
name = 'pallet-randomness'
repository.workspace = true
version = "0.1.0"


[dependencies]
async-trait = { workspace = true }
codec = { workspace = true }
frame-benchmarking = { workspace = true, optional = true }
frame-support = { workspace = true }
frame-system = { workspace = true }
hex = { workspace = true }
log = { workspace = true }
scale-info = { workspace = true }
serde = { workspace = true, optional = true }
sc-executor = { workspace = true }
sc-service = { workspace = true }
sp-core = { workspace = true }
sp-io = { workspace = true }
sp-keystore = { workspace = true }
sp-runtime = { workspace = true }
sp-std = { workspace = true }
sp-inherents = { workspace = true }

[dev-dependencies]
derive_more = { workspace = true }
pallet-balances = { workspace = true, features = ["std"] }

[features]
default = ["std"]
runtime-benchmarks = [
"frame-benchmarking/runtime-benchmarks",
"frame-support/runtime-benchmarks",
"frame-system/runtime-benchmarks",
]
std = [
"frame-support/std",
"frame-system/std",
"hex/std",
"codec/std",
"scale-info/std",
"serde",
"sp-core/std",
"sp-io/std",
"sp-runtime/std",
"sp-std/std",
"sp-inherents/std",
"log/std",
]
try-runtime = [
"frame-support/try-runtime",
"frame-system/try-runtime",
]
115 changes: 115 additions & 0 deletions pallets/randomness/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
# Randomness Solutions Tradeoff Analysis

This pallet provides access to 1 source of randomness:

1. The **BABE epoch randomness** is produced by the relay chain per relay chain epoch. It is based on **all the VRF produced** by the relay chain validators **during** a complete **epoch**.(~600 blocks on Kusama, ~2400 blocks on Polkadot). At the beginning of a new Epoch, those VRFs are **mixed together** and **hashed** in order to produce a **pseudo-random word**.

## CAP Theorem

The CAP Theorem says we can have at most 2 of the following properties: Consistency, Availability, and Partition Tolerance.

### Consistency

At any block, the random word is consistent between the nodes.

In practice it means that there **is only 1 possible random word generated** and so the node cannot choose between multiple ones.

### Example

Having multiple actors **sending a VRF proof** of the randomly generated word, and **revealing** their random words after all the proofs have been published. (the random word would be the hash of all the secrets)

By doing so, once the VRF proofs have all been published, there can only be 1 generated word.
_(This example however doesn't have the "Availability" property as actors can "fail" to reveal their secret preventing the random word to be available)_

### Availability

Every request to generate a random word receives a response.

In practice, it means a node **cannot withhold** any information **preventing** the random word to receive **a response**.

#### Example

Using the mandatory VRF of the current collator as a pseudo-random word makes it "available" as it is always present when the block is produced.

If the collator skips the block production, the next collator producing the block will be able to include its VRF to the block, allowing to provide the pseudo-random word.
_(This example however breaks the "consistency" because for the given block, there were the possibility of 2 different pseudo-random)_

### Partition Tolerance

The network continues to operate, even if an arbitrary number of nodes are failing.

In practice, it means that the randomness process cannot rely on a designated node but must, like the blockchain consensus, continue to work with a subset of the collators.

#### Example

Ex: Using the current block collator to produce the randomness output is partition tolerant.
If the current block collator fails to produce the block, the consensus will pick another collator allowing to produce the pseudo-random word.

## Breaking down in 2 categories

Because we can't get rid of the partition tolerance, we can only provide solutions that are compromising the Consistency or the Availability.

### Category 1: Availability Over Consistency

The solutions in category 1 provide a pseudo-random process that is guaranteed to provide a pseudo-random word but cannot ensure it hasn't been tampered before being revealed.

This is the case for the [Babe Epoch Randomness] which ensure each epoch provides a pseudo-random word but also allows the last validator of an epoch to known and pick 2 different pseudo-random words by skipping the block production (At the cost of a relay chain block reward)

### Category 2: Consistency over Availability

The solutions in this category provide a pseudo-random that cannot be tampered and that is unique but cannot provide the guarantee it will be always be possible to retrieve it.

This is the case of the [Mixed Delayed Secret] (not yet described), which will require collators to provide a VRF proof of locally generated secret, and to reveal it later once all the VRF proofs have been published. Such a solution will guarantee that, once the VRF proofs are published, it is impossible to provide a different pseudo-random word. It also guarantees that if at least 1 collator is a good actor, it will be impossible to know the pseudo-random word until all the secrets are revealed. However such solution cannot guarantee that a node will always be able to provide its secret (it can be lost, it can be attacked, or the node can be malicious and refuses to publish it).

## Babe Epoch Randomness

The Babe epoch randomness is based on **all the VRF produced** by the validators **during** a complete **epoch**.(~600 blocks on Kusama, ~2400 blocks on Polkadot)
At the beginning of a new Epoch, those VRFs are **mixed together** and **hashed** in order to produce a **pseudo-random word**.
To ensure each pseudo-random word generated during an epoch is different, the Smart Contract must provide a unique salt each time.

### Properties

- This randomness is totally **independent of the parachain**, preventing a malicious actor on the parachain to influence the randomness value.
- This randomness is **constant during a full epoch range** (~250 blocks on Kusama, ~2300 blocks on Polkadot) making it **resilient enough against censorship**. If a collator prevents fulfillment at a given block, another collator can fulfill it at the next block with the same random value.
- This randomness **requires** at last 1 epoch after the current epoch (**~1h30** on Kusama, **~6h** on Polkadot) to ensure the pseudo-random word cannot be predicted at the time of the request.

### Risks

The **danger** in this process comes from the knowledge that the **last validator** (Validator Y in the schema) has when producing the last block of an Epoch. The process being deterministic and all the material to generate the pseudo random word being known, the validator can decide to **skip producing the block** in order to not include its VRF, which would result in a different pseudo-random word.

Because epoch are time-based, if the block is skipped, there won't be any additional block produced for that epoch. So the last validator of the block knows both possible output:

1. When **producing the block** including its VRF => pseudo-random word **AAAA**
2. When **skipping the block** and using already known previous VRFs => pseudo-random word **BBBB**

The only **incentive** to prevent the validator from skipping the block is the **block rewards**. So the randomness value is only **economically safe if the value at stake is lower than a block reward**.

```sequence
note over Validator: Validator A
note over Relay: Epoch 1: Block #2399
Relay->Para: (Relay Block #2399)
note over Para: Block #111\nRequest Randomness (@Epoch 3)
note left of Relay: No knowledge of epoch 2 randomness\nexists yet
Validator->Relay: (Relay Block #2400)
note over Relay: Epoch 2: Block #2400\n(random epoch 1: 0xAAAAAA...)
note over Relay: .\n.\n.
note over Para: .\n.\n.
note over Validator: Validator X
Validator->Relay: Produces #4798\n(influences Epoch 2 Randomness\nbut doesn't know the result)
note over Validator: Validator Y
Validator->Relay: Produces #4799\n(knows/influences Epoch 2 Randomness)\ncan choose 0xBBBBBB... or 0xCCCCCC...
note over Relay: Epoch 3: Block #4800\n(random epoch 2: 0xBBBBBB...or 0xCCCCCC...)
Relay->Para: (Relay Block #4800)
note over Para: Block #222\nFulFill Randomness using\n0xBBBBBB...or 0xCCCCCC...
```

_In this schema, we can see that validator Y can decide the epoch 2 randomness by producing or skipping its block._

### Multiple slot leaders

Additionally, the Babe consensus can sometime allow multiple validator to produce a block at the same slot. If that is the last slot of an Epoch,the selected validators coordinate in order to decide which one is producing the block, offering the choice of even more pseudo-random words.

### Asynchronous Backing

This solution is **safe** even after the asynchronous backing is supported as the pseudo-random is not dependent on which relay block the parachain block is referencing.
A collator being able to choose the relay block on top of which it builds the parachain block will not influence the pseudo-random word.
58 changes: 58 additions & 0 deletions pallets/randomness/src/benchmarks.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
#![cfg(feature = "runtime-benchmarks")]

//! Benchmarking
use crate::{
Call, Config, InherentIncluded, Pallet, RandomnessResult, RandomnessResults, RelayEpoch,
RequestType,
};
use frame_benchmarking::{benchmarks, impl_benchmark_test_suite};
use frame_system::RawOrigin;

benchmarks! {
// Benchmark for inherent included in every block
set_babe_randomness_results {
// set the current relay epoch as 9, `get_epoch_index` configured to return 10
const BENCHMARKING_OLD_EPOCH: u64 = 9u64;
RelayEpoch::<T>::put(BENCHMARKING_OLD_EPOCH);
let benchmarking_babe_output = T::Hash::default();
let benchmarking_new_epoch = BENCHMARKING_OLD_EPOCH.saturating_add(1u64);
RandomnessResults::<T>::insert(
RequestType::BabeEpoch(benchmarking_new_epoch),
RandomnessResult::new()
);
}: _(RawOrigin::None)
verify {
// verify randomness result
assert_eq!(
RandomnessResults::<T>::get(
RequestType::BabeEpoch(benchmarking_new_epoch)
).unwrap().randomness,
Some(benchmarking_babe_output)
);
assert!(InherentIncluded::<T>::get().is_some());
assert_eq!(
RelayEpoch::<T>::get(),
benchmarking_new_epoch
);
}
}

#[cfg(test)]
mod tests {
use crate::mock::Test;
use sp_io::TestExternalities;
use sp_runtime::BuildStorage;

pub fn new_test_ext() -> TestExternalities {
let t = frame_system::GenesisConfig::<Test>::default()
.build_storage()
.unwrap();
TestExternalities::new(t)
}
}

impl_benchmark_test_suite!(
Pallet,
crate::benchmarks::tests::new_test_ext(),
crate::mock::Test
);
Loading