Skip to content

fix: Prevent issues with fnames being associated with multiple fids #442

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions src/mempool/mempool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -488,6 +488,31 @@ impl Mempool {
.message_router
.route_fid(fid, self.read_node_mempool.num_shards);

self.insert_into_shard(shard_id, message.clone(), source.clone())
.await;

// Fname transfers are mirrored to both the sender and receiver shard.
if let MempoolMessage::ValidatorMessage(inner_message) = &message {
if let Some(fname_transfer) = &inner_message.fname_transfer {
let mirror_shard_id = self
.read_node_mempool
.message_router
.route_fid(fname_transfer.from_fid, self.read_node_mempool.num_shards);

if mirror_shard_id != shard_id {
self.insert_into_shard(mirror_shard_id, message, source)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hmm, this technically affects consensus. But we might be ok if nodes upgrade at different times since this logic is only hit in the proposer? Other nodes will behave the same regardless of version, as long as the transfer is in the transactions list.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure it will affect consensus in practice since it only changes where the message is stored tbh.

.await;
}
}
}
}

async fn insert_into_shard(
&mut self,
shard_id: u32,
message: MempoolMessage,
source: MempoolSource,
) {
match self.messages.get_mut(&shard_id) {
Some(shard_messages) => {
if shard_messages.contains_key(&message.mempool_key()) {
Expand Down
7 changes: 7 additions & 0 deletions src/storage/store/account/name_registry_events.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ pub fn get_fname_proof_by_fid(db: &RocksDB, fid: u64) -> Result<Option<UserNameP
pub fn put_username_proof_transaction(
txn: &mut RocksDbTransactionBatch,
username_proof: &UserNameProof,
existing_fid: Option<u64>,
) {
let buf = username_proof.encode_to_vec();

Expand All @@ -81,6 +82,12 @@ pub fn put_username_proof_transaction(

let secondary_key = make_fname_username_proof_by_fid_key(username_proof.fid);
txn.put(secondary_key, primary_key);

// If a username is being transferred, remove the existing secondary key.
if let Some(existing_fid) = existing_fid {
let secondary_key = make_fname_username_proof_by_fid_key(existing_fid);
txn.delete(secondary_key);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, have to consider how to handle existing bad data. This doesn't break consensus since it's just an index change. We might need a migration to fix, but we don't have any migration harness code. Will ticket this, let me know if you're interested in taking that on.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Feels like it's probably something to deal with outside of this PR

}
}

#[inline]
Expand Down
2 changes: 1 addition & 1 deletion src/storage/store/account/user_data_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ impl UserDataStore {
if username_proof.fid == 0 {
delete_username_proof_transaction(txn, username_proof, existing_fid);
} else {
put_username_proof_transaction(txn, username_proof);
put_username_proof_transaction(txn, username_proof, existing_fid);
}

let mut hub_event = HubEvent::from(
Expand Down
108 changes: 106 additions & 2 deletions tests/consensus_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ use snapchain::node::snapchain_read_node::SnapchainReadNode;
use snapchain::proto::hub_service_server::HubServiceServer;
use snapchain::proto::{self, Height};
use snapchain::proto::{Block, FarcasterNetwork, IdRegisterEventType, SignerEventType};
use snapchain::storage::db::{PageOptions, RocksDB};
use snapchain::storage::db::{PageOptions, RocksDB, RocksDbTransactionBatch};
use snapchain::storage::store::account::UserDataStore;
use snapchain::storage::store::engine::MempoolMessage;
use snapchain::storage::store::node_local_state::LocalStateStore;
use snapchain::storage::store::stores::Stores;
Expand All @@ -37,7 +38,7 @@ const HOST_FOR_TEST: &str = "127.0.0.1";
const BASE_PORT_FOR_TEST: u32 = 9482;

struct NodeForTest {
node: SnapchainNode,
pub node: SnapchainNode,
db: Arc<RocksDB>,
block_store: BlockStore,
mempool_tx: mpsc::Sender<MempoolRequest>,
Expand Down Expand Up @@ -570,6 +571,19 @@ async fn register_fid(fid: u64, messages_tx: Sender<MempoolRequest>) -> SigningK
signer
}

async fn transfer_fname(transfer: proto::FnameTransfer, messages_tx: Sender<MempoolRequest>) {
messages_tx
.send(MempoolRequest::AddMessage(
MempoolMessage::ValidatorMessage(proto::ValidatorMessage {
on_chain_event: None,
fname_transfer: Some(transfer),
}),
MempoolSource::Local,
))
.await
.unwrap();
}

async fn send_messages(messages_tx: mpsc::Sender<MempoolRequest>) {
let mut i: i32 = 0;
let prefix = vec![0, 0, 0, 0, 0, 0];
Expand Down Expand Up @@ -770,3 +784,93 @@ async fn test_read_node() {
wait_for_read_node_blocks(read_node, 1).await;
}
}

#[tokio::test]
#[serial]
async fn test_cross_shard_interactions() {
let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("warn"));
let _ = tracing_subscriber::fmt()
.with_env_filter(env_filter)
.try_init();

let num_shards = 2;
let mut network = TestNetwork::create(3, num_shards, 3380).await;
network.start_validators().await;

let messages_tx = network.nodes[0].mempool_tx.clone();

register_fid(20270, messages_tx.clone()).await;
register_fid(211428, messages_tx.clone()).await;

let fname = "erica";

transfer_fname(
proto::FnameTransfer {
id: 43782,
from_fid: 0,
proof: Some(proto::UserNameProof {
timestamp: 1741384226,
name: fname.as_bytes().to_vec(),
fid: 211428,
owner: hex::decode("2b4d92e7626c5fc56cb4641f6f758563de1f6bdc").unwrap(),

signature: hex::decode("050b42fdda7b0a7309a1fb8a2cbc9a5f4bbf241aec74f53191f9665d9b9f572d4f452ac807911af7b6980219482d6f7fda7f99f23ab19c961b4701b9934fa2f91b").unwrap(),
r#type: proto::UserNameType::UsernameTypeFname as i32,
}),
},
messages_tx.clone(),
)
.await;

transfer_fname(
proto::FnameTransfer {
id: 829595,
from_fid: 211428,
proof: Some(proto::UserNameProof {
timestamp: 1741384226,
name: fname.as_bytes().to_vec(),
fid: 20270,
owner: hex::decode("92ce59c18a97646e9a7e011653d8417d3a08bb2b").unwrap(),
signature: hex::decode("00c3601c515edffe208e7128f47f89c2fb7b8e0beaaf615158305ddf02818a71679a8e7062503be59a19d241bd0b47396a3c294cfafd0d5478db1ae8249463bd1c").unwrap(),
r#type: proto::UserNameType::UsernameTypeFname as i32,
}),
},
messages_tx.clone(),
)
.await;

tokio::time::sleep(time::Duration::from_millis(200)).await;

network.produce_blocks(1).await;

for i in 0..network.nodes.len() {
assert!(
network.nodes[i].num_blocks().await > 0,
"Node {} should have confirmed blocks",
i
);

let node = &network.nodes[i].node;
node.shard_stores.iter().for_each(|(_, stores)| {
let proof1 = UserDataStore::get_username_proof(
&stores.user_data_store,
&mut RocksDbTransactionBatch::new(),
&fname.as_bytes().to_vec(),
)
.unwrap()
.unwrap();

assert_eq!(proof1.fid, 20270);

let proof2 = UserDataStore::get_username_proof(
&stores.user_data_store,
&mut RocksDbTransactionBatch::new(),
&fname.as_bytes().to_vec(),
)
.unwrap()
.unwrap();

assert_eq!(proof2.fid, 20270);
});
}
}
Loading