From 924a03c965c2ead57029e676311456d93c13191f Mon Sep 17 00:00:00 2001 From: quantumshiro Date: Fri, 1 Aug 2025 19:44:18 +0900 Subject: [PATCH 1/5] fix: remove emojis --- src/main.rs | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/main.rs b/src/main.rs index 1434c36..83aeb8b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -472,8 +472,8 @@ fn main() -> Result<()> { async fn async_main() -> Result<()> { // Docker output debugging - println!("🐳 PolyTorus starting in Docker container..."); - eprintln!("🐳 PolyTorus stderr test..."); + println!("PolyTorus starting in Docker container..."); + eprintln!("PolyTorus stderr test..."); // Initialize logging if env::var("RUST_LOG").is_err() { @@ -481,7 +481,7 @@ async fn async_main() -> Result<()> { } env_logger::init(); - println!("🐳 Environment initialized, parsing commands..."); + println!("Environment initialized, parsing commands..."); let matches = Command::new("polytorus") .version("0.1.0") @@ -600,7 +600,7 @@ async fn async_main() -> Result<()> { let mut blockchain = PolyTorusBlockchain::new()?; let _genesis_id = blockchain.initialize_genesis().await?; info!("PolyTorus node started successfully"); - println!("✅ PolyTorus blockchain node started successfully"); + println!("PolyTorus blockchain node started successfully"); println!("Genesis UTXO initialized with 10,000,000 units"); info!("Start command completed successfully - exiting"); @@ -643,9 +643,9 @@ async fn async_main() -> Result<()> { let mut blockchain = PolyTorusBlockchain::new_with_p2p_config(Some(p2p_config))?; let _genesis_id = blockchain.initialize_genesis().await?; - println!("🚀 Starting PolyTorus P2P node: {}", node_id); - println!("📡 Listening on port: {}", listen_port); - println!("🔗 Bootstrap peers: {:?}", bootstrap_peers); + println!("Starting PolyTorus P2P node: {}", node_id); + println!("Listening on port: {}", listen_port); + println!("Bootstrap peers: {:?}", bootstrap_peers); // Start P2P network info!("Starting P2P network..."); @@ -662,7 +662,7 @@ async fn async_main() -> Result<()> { match blockchain.send_transaction(from, to, amount).await { Ok(tx_hash) => { - println!("✅ Transaction sent successfully"); + println!("Transaction sent successfully"); println!("Transaction Hash: {}", tx_hash); println!("From: {}", from); println!("To: {}", to); @@ -670,15 +670,15 @@ async fn async_main() -> Result<()> { } Err(e) => { error!("Failed to send transaction: {}", e); - println!("❌ Transaction failed: {}", e); + println!("Transaction failed: {}", e); } } } Some(("status", _)) => { - println!("🐳 Docker: Executing status command..."); + println!("Docker: Executing status command..."); let blockchain = PolyTorusBlockchain::new()?; blockchain.get_status().await?; - println!("🐳 Docker: Status command completed."); + println!("Docker: Status command completed."); } Some(("deploy-contract", sub_matches)) => { let wasm_file = sub_matches.get_one::("wasm-file").unwrap(); @@ -698,7 +698,7 @@ async fn async_main() -> Result<()> { match blockchain.deploy_contract(owner, wasm_bytes, name).await { Ok(script_hash) => { - println!("✅ Contract deployed successfully"); + println!("Contract deployed successfully"); println!("Contract Hash: {}", script_hash); println!("Owner: {}", owner); if let Some(n) = name { @@ -707,7 +707,7 @@ async fn async_main() -> Result<()> { } Err(e) => { error!("Failed to deploy contract: {}", e); - println!("❌ Contract deployment failed: {}", e); + println!("Contract deployment failed: {}", e); } } } @@ -733,7 +733,7 @@ async fn async_main() -> Result<()> { .await { Ok(tx_hash) => { - println!("✅ Contract call successful"); + println!("Contract call successful"); println!("Transaction Hash: {}", tx_hash); println!("Contract: {}", contract); println!("Method: {}", method); @@ -741,7 +741,7 @@ async fn async_main() -> Result<()> { } Err(e) => { error!("Failed to call contract: {}", e); - println!("❌ Contract call failed: {}", e); + println!("Contract call failed: {}", e); } } } From 80fec4e3c3f5b7dde07a79c26eed932f358d0079 Mon Sep 17 00:00:00 2001 From: quantumshiro Date: Fri, 1 Aug 2025 22:33:48 +0900 Subject: [PATCH 2/5] add: sdk --- Cargo.lock | 70 +++++ Cargo.toml | 1 + crates/sdk/Cargo.toml | 48 ++++ crates/sdk/src/lib.rs | 582 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 701 insertions(+) create mode 100644 crates/sdk/Cargo.toml create mode 100644 crates/sdk/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 80c3a8f..41f706e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -239,6 +239,28 @@ dependencies = [ "syn 2.0.104", ] +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "async-trait" version = "0.1.88" @@ -2479,6 +2501,30 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sdk" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "chrono", + "consensus", + "data-availability", + "env_logger", + "execution", + "hex", + "log", + "serde", + "serde_json", + "settlement", + "sha2", + "tokio", + "tokio-test", + "traits", + "uuid", + "wallet", +] + [[package]] name = "sdp" version = "0.6.2" @@ -2984,6 +3030,30 @@ dependencies = [ "syn 2.0.104", ] +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-test" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7" +dependencies = [ + "async-stream", + "bytes", + "futures-core", + "tokio", + "tokio-stream", +] + [[package]] name = "tokio-util" version = "0.7.15" diff --git a/Cargo.toml b/Cargo.toml index 91fb38d..79b38cd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,7 @@ members = [ "crates/consensus", "crates/data-availability", "crates/p2p-network", + "crates/sdk", ] resolver = "2" diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml new file mode 100644 index 0000000..49e45b1 --- /dev/null +++ b/crates/sdk/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "sdk" +version = "0.1.0" +edition = "2021" +rust-version = "1.82" +description = "PolyTorus SDK for developers to interact with the 4-layer modular blockchain" +authors = ["quantumshiro"] +license = "MIT" +repository = "https://github.com/quantumshiro/polytorus" +keywords = ["blockchain", "sdk", "polytorus", "modular", "api"] +categories = ["api-bindings", "cryptography", "network-programming"] + +[dependencies] +# Core PolyTorus dependencies +traits = { path = "../traits" } +execution = { path = "../execution" } +settlement = { path = "../settlement" } +consensus = { path = "../consensus" } +data-availability = { path = "../data-availability" } + +# External wallet dependency +wallet = { git = "https://github.com/PolyTorus/wallet.git" } + +# Core dependencies +anyhow = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +tokio = { workspace = true } +async-trait = { workspace = true } +log = { workspace = true } + +# Utilities +chrono = { workspace = true } +uuid = { workspace = true } +hex = { workspace = true } +sha2 = { workspace = true } + +[dev-dependencies] +tokio-test = "0.4" +env_logger = { workspace = true } + +[features] +default = ["full"] +full = ["execution-layer", "settlement-layer", "consensus-layer", "data-availability-layer"] +execution-layer = [] +settlement-layer = [] +consensus-layer = [] +data-availability-layer = [] \ No newline at end of file diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs new file mode 100644 index 0000000..edc5a12 --- /dev/null +++ b/crates/sdk/src/lib.rs @@ -0,0 +1,582 @@ +//! PolyTorus SDK - Developer-friendly interface for the 4-layer modular blockchain +//! +//! This SDK provides a high-level, easy-to-use interface for developers to interact with +//! the PolyTorus blockchain platform. It abstracts the complexity of the 4-layer architecture +//! and provides simple methods for common blockchain operations. +//! +//! # Features +//! +//! - **Transaction Management**: Create, sign, and submit transactions +//! - **Wallet Integration**: Full HD wallet support with BIP32/BIP44 +//! - **Smart Contracts**: Deploy and interact with WASM-based contracts +//! - **Block Operations**: Query blocks, mining, and validation +//! - **Data Availability**: Store and retrieve data with proofs +//! - **Layer Abstraction**: Direct access to individual layers when needed +//! +//! # Quick Start +//! +//! ```rust +//! use sdk::{PolyTorusClient, ClientConfig}; +//! +//! #[tokio::main] +//! async fn main() -> anyhow::Result<()> { +//! // Create a new client +//! let client = PolyTorusClient::new(ClientConfig::default()).await?; +//! +//! // Create a wallet +//! let wallet = client.create_wallet().await?; +//! +//! // Send a transaction +//! let tx_hash = client.send_transaction( +//! &wallet, +//! "recipient_address", +//! 1000, // amount +//! None // data +//! ).await?; +//! +//! println!("Transaction sent: {}", tx_hash); +//! Ok(()) +//! } +//! ``` + +use anyhow::anyhow; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::sync::Arc; +use tokio::sync::RwLock; + +// Re-export core types for convenience +pub use traits::*; +pub use wallet::{HdWallet, Wallet, Address as WalletAddress, KeyPair, Signature, Mnemonic}; + +// Internal layer imports +use consensus::{PolyTorusConsensusLayer, ConsensusConfig}; +use data_availability::{PolyTorusDataAvailabilityLayer, DataAvailabilityConfig}; +use execution::{PolyTorusExecutionLayer, ExecutionConfig}; +use settlement::{PolyTorusSettlementLayer, SettlementConfig}; + +// ============================================================================ +// SDK Configuration +// ============================================================================ + +/// Configuration for the PolyTorus client +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ClientConfig { + pub network: NetworkConfig, + pub layers: LayerConfigs, + pub wallet: WalletConfig, +} + +impl Default for ClientConfig { + fn default() -> Self { + Self { + network: NetworkConfig::default(), + layers: LayerConfigs::default(), + wallet: WalletConfig::default(), + } + } +} + +/// Network configuration +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NetworkConfig { + pub chain_id: u64, + pub network_name: String, + pub is_testnet: bool, +} + +impl Default for NetworkConfig { + fn default() -> Self { + Self { + chain_id: 1, + network_name: "polytorus-mainnet".to_string(), + is_testnet: false, + } + } +} + +/// Layer configurations +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LayerConfigs { + pub execution: ExecutionConfig, + pub settlement: SettlementConfig, + pub consensus: ConsensusConfig, + pub data_availability: DataAvailabilityConfig, +} + +impl Default for LayerConfigs { + fn default() -> Self { + Self { + execution: ExecutionConfig::default(), + settlement: SettlementConfig::default(), + consensus: ConsensusConfig::default(), + data_availability: DataAvailabilityConfig::default(), + } + } +} + +/// Wallet configuration +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WalletConfig { + pub derivation_path: String, + pub address_format: String, +} + +impl Default for WalletConfig { + fn default() -> Self { + Self { + derivation_path: "m/44'/0'/0'".to_string(), + address_format: "native_segwit".to_string(), + } + } +} + +// ============================================================================ +// SDK Client +// ============================================================================ + +/// High-level client for interacting with PolyTorus blockchain +pub struct PolyTorusClient { + config: ClientConfig, + execution_layer: Arc>, + settlement_layer: Arc>, + consensus_layer: Arc>, + data_availability_layer: Arc>, + wallets: Arc>>, +} + +impl PolyTorusClient { + /// Create a new PolyTorus client + pub async fn new(config: ClientConfig) -> Result { + let execution_layer = Arc::new(RwLock::new( + PolyTorusExecutionLayer::new(config.layers.execution.clone())?, + )); + let settlement_layer = Arc::new(RwLock::new( + PolyTorusSettlementLayer::new(config.layers.settlement.clone())?, + )); + let consensus_layer = Arc::new(RwLock::new( + PolyTorusConsensusLayer::new(config.layers.consensus.clone())?, + )); + let data_availability_layer = Arc::new(RwLock::new( + PolyTorusDataAvailabilityLayer::new(config.layers.data_availability.clone())?, + )); + + Ok(Self { + config, + execution_layer, + settlement_layer, + consensus_layer, + data_availability_layer, + wallets: Arc::new(RwLock::new(HashMap::new())), + }) + } + + /// Create a new client with default configuration + pub async fn new_default() -> Result { + Self::new(ClientConfig::default()).await + } + + // ======================================================================== + // Wallet Management + // ======================================================================== + + /// Create a new HD wallet + pub async fn create_wallet(&self) -> Result { + let wallet = HdWallet::new(wallet::KeyType::Ed25519) + .map_err(|e| anyhow!("Failed to create wallet: {}", e))?; + + let mnemonic_phrase = wallet.get_mnemonic().phrase().to_string(); + + let mut base_wallet = wallet.derive_wallet("m/44'/9999'/0'/0/0", wallet::KeyType::Ed25519) + .map_err(|e| anyhow!("Failed to derive wallet: {}", e))?; + let address = base_wallet.default_address() + .map_err(|e| anyhow!("Failed to get address: {}", e))?.value; + + let wallet_id = uuid::Uuid::new_v4().to_string(); + + let mut wallets = self.wallets.write().await; + wallets.insert(wallet_id.clone(), wallet); + + Ok(WalletInfo { + id: wallet_id, + address, + mnemonic: mnemonic_phrase, + derivation_path: self.config.wallet.derivation_path.clone(), + }) + } + + /// Import wallet from mnemonic + pub async fn import_wallet(&self, mnemonic: &str, _passphrase: Option<&str>) -> Result { + let wallet = HdWallet::from_phrase(mnemonic, wallet::KeyType::Ed25519) + .map_err(|e| anyhow!("Failed to import wallet: {}", e))?; + + let mut base_wallet = wallet.derive_wallet("m/44'/9999'/0'/0/0", wallet::KeyType::Ed25519) + .map_err(|e| anyhow!("Failed to derive wallet: {}", e))?; + let address = base_wallet.default_address() + .map_err(|e| anyhow!("Failed to get address: {}", e))?.value; + + let wallet_id = uuid::Uuid::new_v4().to_string(); + + let mut wallets = self.wallets.write().await; + wallets.insert(wallet_id.clone(), wallet); + + Ok(WalletInfo { + id: wallet_id, + address, + mnemonic: mnemonic.to_string(), + derivation_path: self.config.wallet.derivation_path.clone(), + }) + } + + /// Get wallet information + pub async fn get_wallet(&self, wallet_id: &str) -> Result> { + let wallets = self.wallets.read().await; + + if let Some(wallet) = wallets.get(wallet_id) { + let mut base_wallet = wallet.derive_wallet("m/44'/9999'/0'/0/0", wallet::KeyType::Ed25519) + .map_err(|e| anyhow!("Failed to derive wallet: {}", e))?; + let address = base_wallet.default_address() + .map_err(|e| anyhow!("Failed to get address: {}", e))?.value; + + Ok(Some(WalletInfo { + id: wallet_id.to_string(), + address, + mnemonic: String::new(), // Don't expose mnemonic in get operations + derivation_path: self.config.wallet.derivation_path.clone(), + })) + } else { + Ok(None) + } + } + + // ======================================================================== + // Transaction Operations + // ======================================================================== + + /// Send a simple transaction + pub async fn send_transaction( + &self, + wallet_info: &WalletInfo, + to: &str, + amount: u64, + data: Option>, + ) -> Result { + let wallets = self.wallets.read().await; + let wallet = wallets.get(&wallet_info.id) + .ok_or_else(|| anyhow!("Wallet not found"))?; + + let keypair = wallet.derive_key(0) + .map_err(|e| anyhow!("Failed to derive keypair: {}", e))?; + + let nonce = self.get_account_nonce(&wallet_info.address).await?; + + let tx = Transaction { + hash: self.generate_transaction_hash(), + from: wallet_info.address.clone(), + to: Some(to.to_string()), + value: amount, + gas_limit: 21000, // Standard gas limit + gas_price: 1, // Default gas price + data: data.unwrap_or_default(), + nonce, + signature: vec![], // Will be filled after signing + script_type: None, + }; + + let signed_tx = self.sign_transaction(tx, &keypair).await?; + self.submit_transaction(signed_tx).await + } + + /// Deploy a smart contract + pub async fn deploy_contract( + &self, + wallet_info: &WalletInfo, + contract_code: &[u8], + init_params: &[u8], + gas_limit: u64, + ) -> Result { + let wallets = self.wallets.read().await; + let wallet = wallets.get(&wallet_info.id) + .ok_or_else(|| anyhow!("Wallet not found"))?; + + let keypair = wallet.derive_key(0) + .map_err(|e| anyhow!("Failed to derive keypair: {}", e))?; + + let nonce = self.get_account_nonce(&wallet_info.address).await?; + + let tx = Transaction { + hash: self.generate_transaction_hash(), + from: wallet_info.address.clone(), + to: None, // Contract deployment + value: 0, + gas_limit, + gas_price: 1, + data: vec![], + nonce, + signature: vec![], + script_type: Some(ScriptTransactionType::Deploy { + script_data: contract_code.to_vec(), + init_params: init_params.to_vec(), + }), + }; + + let signed_tx = self.sign_transaction(tx, &keypair).await?; + self.submit_transaction(signed_tx).await + } + + /// Call a smart contract method + pub async fn call_contract( + &self, + wallet_info: &WalletInfo, + contract_hash: &Hash, + method: &str, + params: &[u8], + gas_limit: u64, + ) -> Result { + let wallets = self.wallets.read().await; + let wallet = wallets.get(&wallet_info.id) + .ok_or_else(|| anyhow!("Wallet not found"))?; + + let keypair = wallet.derive_key(0) + .map_err(|e| anyhow!("Failed to derive keypair: {}", e))?; + + let nonce = self.get_account_nonce(&wallet_info.address).await?; + + let tx = Transaction { + hash: self.generate_transaction_hash(), + from: wallet_info.address.clone(), + to: Some(contract_hash.clone()), + value: 0, + gas_limit, + gas_price: 1, + data: vec![], + nonce, + signature: vec![], + script_type: Some(ScriptTransactionType::Call { + script_hash: contract_hash.clone(), + method: method.to_string(), + params: params.to_vec(), + }), + }; + + let signed_tx = self.sign_transaction(tx, &keypair).await?; + self.submit_transaction(signed_tx).await + } + + // ======================================================================== + // Query Operations + // ======================================================================== + + /// Get account balance + pub async fn get_balance(&self, address: &str) -> Result { + let execution = self.execution_layer.read().await; + let account_state = execution.get_account_state(&address.to_string()).await?; + Ok(account_state.balance) + } + + /// Get account nonce + pub async fn get_account_nonce(&self, address: &str) -> Result { + let execution = self.execution_layer.read().await; + let account_state = execution.get_account_state(&address.to_string()).await?; + Ok(account_state.nonce) + } + + /// Get block by hash + pub async fn get_block(&self, block_hash: &Hash) -> Result> { + let consensus = self.consensus_layer.read().await; + consensus.get_block_by_hash(block_hash).await + } + + /// Get current block height + pub async fn get_block_height(&self) -> Result { + let consensus = self.consensus_layer.read().await; + consensus.get_block_height().await + } + + /// Get transaction receipt (simplified) + pub async fn get_transaction_receipt(&self, _tx_hash: &Hash) -> Result> { + // This would typically query a transaction pool or blockchain storage + // For now, returning None as this requires more complex state management + Ok(None) + } + + // ======================================================================== + // Data Availability Operations + // ======================================================================== + + /// Store data on the blockchain + pub async fn store_data(&self, data: &[u8]) -> Result { + let mut da_layer = self.data_availability_layer.write().await; + da_layer.store_data(data).await + } + + /// Retrieve data from the blockchain + pub async fn retrieve_data(&self, data_hash: &Hash) -> Result>> { + let da_layer = self.data_availability_layer.read().await; + da_layer.retrieve_data(data_hash).await + } + + /// Verify data availability + pub async fn verify_data_availability(&self, data_hash: &Hash) -> Result { + let da_layer = self.data_availability_layer.read().await; + da_layer.verify_availability(data_hash).await + } + + // ======================================================================== + // Mining Operations + // ======================================================================== + + /// Mine a new block with pending transactions + pub async fn mine_block(&self) -> Result { + let mut consensus = self.consensus_layer.write().await; + // For simplicity, mining an empty block + // In a real implementation, this would gather pending transactions + consensus.mine_block(vec![]).await + } + + /// Set mining difficulty + pub async fn set_mining_difficulty(&self, difficulty: usize) -> Result<()> { + let mut consensus = self.consensus_layer.write().await; + consensus.set_difficulty(difficulty).await + } + + // ======================================================================== + // Layer Access + // ======================================================================== + + /// Get direct access to execution layer + pub fn execution_layer(&self) -> Arc> { + self.execution_layer.clone() + } + + /// Get direct access to settlement layer + pub fn settlement_layer(&self) -> Arc> { + self.settlement_layer.clone() + } + + /// Get direct access to consensus layer + pub fn consensus_layer(&self) -> Arc> { + self.consensus_layer.clone() + } + + /// Get direct access to data availability layer + pub fn data_availability_layer(&self) -> Arc> { + self.data_availability_layer.clone() + } + + // ======================================================================== + // Internal Helper Methods + // ======================================================================== + + async fn sign_transaction(&self, mut tx: Transaction, keypair: &KeyPair) -> Result { + let tx_data = serde_json::to_vec(&tx)?; + let signature_vec = keypair.sign(&tx_data) + .map_err(|e| anyhow!("Failed to sign transaction: {}", e))?; + tx.signature = signature_vec; + Ok(tx) + } + + async fn submit_transaction(&self, tx: Transaction) -> Result { + let mut execution = self.execution_layer.write().await; + let receipt = execution.execute_transaction(&tx).await?; + + if receipt.success { + Ok(tx.hash) + } else { + Err(anyhow!("Transaction execution failed")) + } + } + + fn generate_transaction_hash(&self) -> Hash { + use sha2::{Sha256, Digest}; + let mut hasher = Sha256::new(); + hasher.update(chrono::Utc::now().timestamp().to_string()); + hasher.update(uuid::Uuid::new_v4().to_string()); + hex::encode(hasher.finalize()) + } +} + +// ============================================================================ +// SDK Types +// ============================================================================ + +/// Wallet information returned by SDK +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WalletInfo { + pub id: String, + pub address: String, + pub mnemonic: String, + pub derivation_path: String, +} + +/// Contract deployment result +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ContractDeployment { + pub contract_hash: Hash, + pub transaction_hash: Hash, + pub gas_used: u64, +} + +/// Contract call result +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ContractCallResult { + pub transaction_hash: Hash, + pub return_data: Vec, + pub gas_used: u64, + pub events: Vec, +} + +// ============================================================================ +// Tests +// ============================================================================ + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_client_creation() { + let client = PolyTorusClient::new_default().await; + assert!(client.is_ok()); + } + + #[tokio::test] + async fn test_wallet_creation() { + let client = PolyTorusClient::new_default().await.unwrap(); + let wallet = client.create_wallet().await; + assert!(wallet.is_ok()); + + let wallet_info = wallet.unwrap(); + assert!(!wallet_info.id.is_empty()); + assert!(!wallet_info.address.is_empty()); + assert!(!wallet_info.mnemonic.is_empty()); + } + + #[tokio::test] + async fn test_data_storage() { + let client = PolyTorusClient::new_default().await.unwrap(); + let data = b"Hello, PolyTorus!"; + + let hash = client.store_data(data).await; + assert!(hash.is_ok()); + + let stored_data = client.retrieve_data(&hash.unwrap()).await; + assert!(stored_data.is_ok()); + assert_eq!(stored_data.unwrap(), Some(data.to_vec())); + } + + #[tokio::test] + async fn test_mining() { + let client = PolyTorusClient::new_default().await.unwrap(); + + // Set low difficulty for fast mining in tests + client.set_mining_difficulty(0).await.unwrap(); + + let block = client.mine_block().await; + assert!(block.is_ok()); + + let mined_block = block.unwrap(); + assert!(!mined_block.hash.is_empty()); + assert_eq!(mined_block.transactions.len(), 0); // Empty block + } +} \ No newline at end of file From 8efc7031fcbc6db0c162a194fd39bad7da723909 Mon Sep 17 00:00:00 2001 From: quantumshiro Date: Sat, 2 Aug 2025 23:12:57 +0900 Subject: [PATCH 3/5] add: correctly transaction --- .gitignore | 2 + Cargo.lock | 228 ++++++- Cargo.toml | 5 +- Dockerfile.simple | 29 + crates/consensus/src/consensus_engine.rs | 117 ++++ crates/p2p-network/Cargo.toml | 26 +- .../benches/network_discovery_benchmarks.rs | 411 +++++++++++++ crates/p2p-network/benches/p2p_benchmarks.rs | 322 ++++++++++ .../p2p-network/benches/scaling_benchmarks.rs | 354 +++++++++++ crates/p2p-network/benches/unit_benchmarks.rs | 194 ++++++ crates/p2p-network/src/adaptive_network.rs | 225 +++++++ crates/p2p-network/src/auto_discovery.rs | 259 ++++++++ crates/p2p-network/src/discovery.rs | 559 +++++++++++++++++ crates/p2p-network/src/lib.rs | 107 ++-- crates/p2p-network/src/peer.rs | 12 +- crates/p2p-network/src/signaling.rs | 36 +- crates/p2p-network/tests/integration_test.rs | 350 ----------- .../tests/non_blocking_adaptive_test.rs | 372 +++++++++++ .../tests/non_blocking_integration_test.rs | 401 ++++++++++++ .../non_blocking_network_joining_test.rs | 327 ++++++++++ .../tests/non_blocking_peer_test.rs | 464 ++++++++++++++ crates/p2p-network/tests/peer_test.rs | 224 ------- .../p2p-network/tests/quick_discovery_test.rs | 246 ++++++++ crates/sdk/Cargo.toml | 48 -- crates/sdk/src/lib.rs | 582 ------------------ docker-compose.simple.yml | 87 +++ docker-compose.testnet.yml | 127 ++++ scripts/simple-testnet.sh | 282 +++++++++ scripts/stress-test.sh | 193 ++++++ scripts/testnet-manager.sh | 316 ++++++++++ src/main.rs | 340 +++++++++- 31 files changed, 5932 insertions(+), 1313 deletions(-) create mode 100644 Dockerfile.simple create mode 100644 crates/p2p-network/benches/network_discovery_benchmarks.rs create mode 100644 crates/p2p-network/benches/p2p_benchmarks.rs create mode 100644 crates/p2p-network/benches/scaling_benchmarks.rs create mode 100644 crates/p2p-network/benches/unit_benchmarks.rs create mode 100644 crates/p2p-network/src/adaptive_network.rs create mode 100644 crates/p2p-network/src/auto_discovery.rs create mode 100644 crates/p2p-network/src/discovery.rs delete mode 100644 crates/p2p-network/tests/integration_test.rs create mode 100644 crates/p2p-network/tests/non_blocking_adaptive_test.rs create mode 100644 crates/p2p-network/tests/non_blocking_integration_test.rs create mode 100644 crates/p2p-network/tests/non_blocking_network_joining_test.rs create mode 100644 crates/p2p-network/tests/non_blocking_peer_test.rs delete mode 100644 crates/p2p-network/tests/peer_test.rs create mode 100644 crates/p2p-network/tests/quick_discovery_test.rs delete mode 100644 crates/sdk/Cargo.toml delete mode 100644 crates/sdk/src/lib.rs create mode 100644 docker-compose.simple.yml create mode 100644 docker-compose.testnet.yml create mode 100755 scripts/simple-testnet.sh create mode 100755 scripts/stress-test.sh create mode 100755 scripts/testnet-manager.sh diff --git a/.gitignore b/.gitignore index 954baa0..0103593 100644 --- a/.gitignore +++ b/.gitignore @@ -29,3 +29,5 @@ test_simple_circuit_operations_obfuscation/ logs :memory: blockchain_data + +polytorus_data diff --git a/Cargo.lock b/Cargo.lock index 41f706e..50b5abf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -82,6 +82,12 @@ dependencies = [ "libc", ] +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "anstream" version = "0.6.19" @@ -408,6 +414,12 @@ version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cbc" version = "0.1.2" @@ -461,6 +473,33 @@ dependencies = [ "windows-link", ] +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "cipher" version = "0.4.4" @@ -728,6 +767,42 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools 0.10.5", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools 0.10.5", +] + [[package]] name = "crossbeam-deque" version = "0.8.6" @@ -753,6 +828,12 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + [[package]] name = "crypto-bigint" version = "0.5.5" @@ -1307,6 +1388,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "half" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +dependencies = [ + "cfg-if", + "crunchy", +] + [[package]] name = "hashbrown" version = "0.15.4" @@ -1323,6 +1414,12 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + [[package]] name = "hex" version = "0.4.3" @@ -1551,12 +1648,32 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +[[package]] +name = "is-terminal" +version = "0.4.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.59.0", +] + [[package]] name = "is_terminal_polyfill" version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.14.0" @@ -1873,6 +1990,12 @@ version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + [[package]] name = "opaque-debug" version = "0.3.1" @@ -1906,14 +2029,18 @@ dependencies = [ "bincode", "bytes", "chrono", + "criterion", "env_logger", "futures", + "futures-util", "log", "rand", "serde", "serde_bytes", "serde_json", + "sha1_smol", "tokio", + "tokio-test", "tracing", "tracing-subscriber", "traits", @@ -2053,6 +2180,34 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + [[package]] name = "polytorus" version = "0.1.0" @@ -2060,6 +2215,7 @@ dependencies = [ "anyhow", "async-trait", "base64ct", + "bincode", "chrono", "clap", "consensus", @@ -2071,6 +2227,7 @@ dependencies = [ "serde", "serde_json", "settlement", + "sled", "tokio", "traits", "uuid", @@ -2495,36 +2652,21 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sdk" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "chrono", - "consensus", - "data-availability", - "env_logger", - "execution", - "hex", - "log", - "serde", - "serde_json", - "settlement", - "sha2", - "tokio", - "tokio-test", - "traits", - "uuid", - "wallet", -] - [[package]] name = "sdp" version = "0.6.2" @@ -2658,6 +2800,12 @@ dependencies = [ "digest", ] +[[package]] +name = "sha1_smol" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" + [[package]] name = "sha2" version = "0.10.9" @@ -2984,6 +3132,16 @@ dependencies = [ "zerovec", ] +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.9.0" @@ -3315,6 +3473,16 @@ dependencies = [ "atomic-waker", ] +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "wallet" version = "0.1.0" @@ -3589,7 +3757,7 @@ dependencies = [ "cranelift-frontend", "cranelift-native", "gimli", - "itertools", + "itertools 0.14.0", "log", "object", "pulley-interpreter", @@ -3744,6 +3912,16 @@ dependencies = [ "wast", ] +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webrtc" version = "0.11.0" diff --git a/Cargo.toml b/Cargo.toml index 79b38cd..915fbb5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,6 @@ members = [ "crates/consensus", "crates/data-availability", "crates/p2p-network", - "crates/sdk", ] resolver = "2" @@ -92,3 +91,7 @@ clap = { workspace = true } # Utilities chrono = { workspace = true } uuid = { workspace = true } + +# Persistence +sled = { workspace = true } +bincode = { workspace = true } diff --git a/Dockerfile.simple b/Dockerfile.simple new file mode 100644 index 0000000..b371418 --- /dev/null +++ b/Dockerfile.simple @@ -0,0 +1,29 @@ +# Simple PolyTorus Docker image for testing +FROM ubuntu:22.04 + +# Install minimal runtime dependencies +RUN apt-get update && apt-get install -y \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +# Create app directory +WORKDIR /app + +# Copy pre-built binary +COPY target/release/polytorus /app/polytorus + +# Create data directory +RUN mkdir -p /app/data + +# Create non-root user +RUN useradd -m -u 1000 polytorus && \ + chown -R polytorus:polytorus /app && \ + chmod +x /app/polytorus + +USER polytorus +WORKDIR /app + +# Set the startup command +EXPOSE 8080 +ENTRYPOINT ["./polytorus"] +CMD [] \ No newline at end of file diff --git a/crates/consensus/src/consensus_engine.rs b/crates/consensus/src/consensus_engine.rs index 9690769..aed156f 100644 --- a/crates/consensus/src/consensus_engine.rs +++ b/crates/consensus/src/consensus_engine.rs @@ -124,6 +124,123 @@ impl PolyTorusUtxoConsensusLayer { Ok(layer) } + /// Create new eUTXO consensus layer with restored state + pub fn new_with_restored_state( + config: UtxoConsensusConfig, + validator_address: String, + restored_height: u64, + restored_slot: u64, + canonical_chain: Vec, + ) -> Result { + let genesis_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_millis() as u64; + + let genesis_block = Self::create_genesis_utxo_block(genesis_time); + let genesis_hash = genesis_block.hash.clone(); + + let mut blocks = HashMap::new(); + blocks.insert(genesis_hash.clone(), genesis_block); + + // Restore chain state from persistent data + let chain_state = UtxoChainState { + canonical_chain, + blocks, + height: restored_height, + current_slot: restored_slot, + pending_transactions: Vec::new(), + }; + + let layer = Self { + chain_state: Arc::new(Mutex::new(chain_state)), + validators: Arc::new(Mutex::new(HashMap::new())), + config, + validator_address: Some(validator_address.clone()), + genesis_time, + }; + + // Add self as validator + let validator_info = ValidatorInfo { + address: validator_address, + stake: 1000, + public_key: vec![1, 2, 3], + active: true, + }; + + { + let mut validators = layer.validators.lock().unwrap(); + validators.insert(validator_info.address.clone(), validator_info); + } + + Ok(layer) + } + + /// Create new eUTXO consensus layer with restored state and blocks + pub fn new_with_restored_state_and_blocks( + config: UtxoConsensusConfig, + validator_address: String, + restored_height: u64, + restored_slot: u64, + canonical_chain: Vec, + restored_blocks: HashMap, + ) -> Result { + let genesis_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_millis() as u64; + + // If no blocks are restored, create genesis block + let mut blocks = if restored_blocks.is_empty() { + let genesis_block = Self::create_genesis_utxo_block(genesis_time); + let genesis_hash = genesis_block.hash.clone(); + let mut blocks = HashMap::new(); + blocks.insert(genesis_hash, genesis_block); + blocks + } else { + restored_blocks + }; + + // Ensure genesis block exists if not in restored blocks + let genesis_hash = "genesis_utxo_block_hash".to_string(); + if !blocks.contains_key(&genesis_hash) { + let genesis_block = Self::create_genesis_utxo_block(genesis_time); + blocks.insert(genesis_hash, genesis_block); + } + + // Restore chain state from persistent data + let chain_state = UtxoChainState { + canonical_chain, + blocks, + height: restored_height, + current_slot: restored_slot, + pending_transactions: Vec::new(), + }; + + let layer = Self { + chain_state: Arc::new(Mutex::new(chain_state)), + validators: Arc::new(Mutex::new(HashMap::new())), + config, + validator_address: Some(validator_address.clone()), + genesis_time, + }; + + // Add self as validator + let validator_info = ValidatorInfo { + address: validator_address, + stake: 1000, + public_key: vec![1, 2, 3], + active: true, + }; + + { + let mut validators = layer.validators.lock().unwrap(); + validators.insert(validator_info.address.clone(), validator_info); + } + + Ok(layer) + } + /// Create genesis eUTXO block fn create_genesis_utxo_block(genesis_time: u64) -> UtxoBlock { UtxoBlock { diff --git a/crates/p2p-network/Cargo.toml b/crates/p2p-network/Cargo.toml index c267b8a..dda4a09 100644 --- a/crates/p2p-network/Cargo.toml +++ b/crates/p2p-network/Cargo.toml @@ -32,12 +32,36 @@ tracing-subscriber = "0.3" bincode = "1.3" rand = { workspace = true } serde_bytes = "0.11" +sha1_smol = "1.0" [dev-dependencies] # Test dependencies env_logger = { workspace = true } +criterion = { version = "0.5", features = ["html_reports"] } +tokio-test = "0.4" +futures-util = "0.3" +futures = "0.3" [features] default = ["stun-server"] stun-server = [] -test-mode = [] \ No newline at end of file +test-mode = [] + +[lints.clippy] +uninlined_format_args = "allow" + +[[bench]] +name = "unit_benchmarks" +harness = false + +[[bench]] +name = "p2p_benchmarks" +harness = false + +[[bench]] +name = "scaling_benchmarks" +harness = false + +[[bench]] +name = "network_discovery_benchmarks" +harness = false \ No newline at end of file diff --git a/crates/p2p-network/benches/network_discovery_benchmarks.rs b/crates/p2p-network/benches/network_discovery_benchmarks.rs new file mode 100644 index 0000000..2aa002f --- /dev/null +++ b/crates/p2p-network/benches/network_discovery_benchmarks.rs @@ -0,0 +1,411 @@ +//! Network Discovery and Joining Benchmarks +//! +//! Benchmarks that measure how efficiently new nodes can discover +//! and join existing P2P networks. + +use criterion::{black_box, criterion_group, criterion_main, Criterion, BenchmarkId}; +use std::time::{Duration, Instant}; +use tokio::runtime::Runtime; + +use p2p_network::{P2PConfig, WebRTCP2PNetwork}; +use traits::{P2PNetworkLayer, TxInput, TxOutput, UtxoId, UtxoTransaction}; + +/// Initialize minimal logging +fn init_logging() { + let _ = env_logger::builder() + .filter_level(log::LevelFilter::Error) + .is_test(true) + .try_init(); +} + +/// Create test configuration for discovery benchmarks +fn create_discovery_config(node_id: &str, port: u16, bootstrap_peers: Vec) -> P2PConfig { + P2PConfig { + node_id: node_id.to_string(), + listen_addr: format!("127.0.0.1:{}", port).parse().unwrap(), + bootstrap_peers, + stun_servers: vec![], // Local testing only + max_peers: 20, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + } +} + +/// Create test transaction +fn create_test_transaction(id: u64) -> UtxoTransaction { + UtxoTransaction { + hash: format!("discovery_tx_{}", id), + inputs: vec![TxInput { + utxo_id: UtxoId { + tx_hash: format!("input_{}", id), + output_index: 0, + }, + redeemer: vec![0u8; 32], + signature: vec![0u8; 64], + }], + outputs: vec![TxOutput { + value: 1000, + script: vec![0u8; 25], + datum: Some(vec![0u8; 32]), + datum_hash: Some(format!("hash_{}", id)), + }], + fee: 10, + validity_range: Some((0, 10000)), + script_witness: vec![], + auxiliary_data: None, + } +} + +/// Benchmark network bootstrap time +fn benchmark_network_bootstrap(c: &mut Criterion) { + init_logging(); + + let mut group = c.benchmark_group("network_bootstrap"); + group.sample_size(20); + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_secs(3)); + + group.bench_function("single_node_bootstrap", |b| { + b.iter(|| { + let rt = Runtime::new().unwrap(); + rt.block_on(async { + let config = create_discovery_config("bootstrap", 11000, vec![]); + let network = WebRTCP2PNetwork::new(config).unwrap(); + + let start = Instant::now(); + let net_clone = network.clone(); + tokio::spawn(async move { + let _ = net_clone.start().await; + }); + + // Wait for network to be ready + tokio::time::sleep(Duration::from_millis(50)).await; + let bootstrap_time = start.elapsed(); + + network.shutdown().await.unwrap(); + black_box(bootstrap_time); + }); + }); + }); + + group.finish(); +} + +/// Benchmark peer discovery efficiency +fn benchmark_peer_discovery(c: &mut Criterion) { + init_logging(); + + let rt = Runtime::new().unwrap(); + + let mut group = c.benchmark_group("peer_discovery"); + group.sample_size(15); + group.warm_up_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(5)); + + // Test discovery with different network sizes + for network_size in [2, 5, 10].iter() { + group.bench_with_input( + BenchmarkId::from_parameter(format!("{}_peers", network_size)), + network_size, + |b, &network_size| { + b.iter(|| { + rt.block_on(async { + // Create initial network + let mut networks = Vec::new(); + + // Bootstrap node + let config = create_discovery_config("peer_0", 11100, vec![]); + let network = WebRTCP2PNetwork::new(config).unwrap(); + let net_clone = network.clone(); + tokio::spawn(async move { + let _ = net_clone.start().await; + }); + networks.push(network); + tokio::time::sleep(Duration::from_millis(100)).await; + + // Add peers + for i in 1..network_size { + let config = create_discovery_config( + &format!("peer_{}", i), + 11100 + i as u16, + vec!["127.0.0.1:11100".to_string()], + ); + let network = WebRTCP2PNetwork::new(config).unwrap(); + let net_clone = network.clone(); + tokio::spawn(async move { + let _ = net_clone.start().await; + }); + networks.push(network); + tokio::time::sleep(Duration::from_millis(50)).await; + } + + // Measure discovery time for new node + let new_config = create_discovery_config( + "discoverer", + 11100 + network_size as u16, + vec!["127.0.0.1:11100".to_string()], + ); + let new_network = WebRTCP2PNetwork::new(new_config).unwrap(); + + let discovery_start = Instant::now(); + let net_clone = new_network.clone(); + tokio::spawn(async move { + let _ = net_clone.start().await; + }); + + tokio::time::sleep(Duration::from_millis(200)).await; + + let peers = new_network.get_connected_peers().await; + let discovery_time = discovery_start.elapsed(); + + // Cleanup + new_network.shutdown().await.unwrap(); + for net in networks { + net.shutdown().await.unwrap(); + } + + black_box((peers.len(), discovery_time)); + }); + }); + }, + ); + } + + group.finish(); +} + +/// Benchmark network join latency +fn benchmark_network_join_latency(c: &mut Criterion) { + init_logging(); + + let rt = Runtime::new().unwrap(); + + let mut group = c.benchmark_group("network_join_latency"); + group.sample_size(20); + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_secs(3)); + + group.bench_function("join_existing_network", |b| { + // Setup persistent network + let bootstrap = rt.block_on(async { + let config = create_discovery_config("persistent_bootstrap", 11200, vec![]); + let network = WebRTCP2PNetwork::new(config).unwrap(); + let net_clone = network.clone(); + tokio::spawn(async move { + let _ = net_clone.start().await; + }); + tokio::time::sleep(Duration::from_millis(200)).await; + network + }); + + let mut node_counter = 0; + + b.iter(|| { + rt.block_on(async { + node_counter += 1; + let config = create_discovery_config( + &format!("joiner_{}", node_counter), + 11300 + node_counter as u16, + vec!["127.0.0.1:11200".to_string()], + ); + + let network = WebRTCP2PNetwork::new(config).unwrap(); + + let join_start = Instant::now(); + let net_clone = network.clone(); + tokio::spawn(async move { + let _ = net_clone.start().await; + }); + + // Wait for network to stabilize + tokio::time::sleep(Duration::from_millis(100)).await; + + // Verify can broadcast + let tx = create_test_transaction(node_counter); + let broadcast_result = network.broadcast_transaction(&tx).await; + let join_time = join_start.elapsed(); + + network.shutdown().await.unwrap(); + + black_box((broadcast_result.is_ok(), join_time)); + }); + }); + + // Cleanup + rt.block_on(async { + bootstrap.shutdown().await.unwrap(); + }); + }); + + group.finish(); +} + +/// Benchmark network propagation speed +fn benchmark_network_propagation(c: &mut Criterion) { + init_logging(); + + let rt = Runtime::new().unwrap(); + + let mut group = c.benchmark_group("network_propagation"); + group.sample_size(10); + group.warm_up_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(5)); + + group.bench_function("transaction_propagation", |b| { + // Setup network + let networks = rt.block_on(async { + let mut nets = Vec::new(); + + // Create 5-node network + for i in 0..5 { + let config = create_discovery_config( + &format!("prop_node_{}", i), + 11400 + i, + if i == 0 { vec![] } else { vec!["127.0.0.1:11400".to_string()] }, + ); + let network = WebRTCP2PNetwork::new(config).unwrap(); + let net_clone = network.clone(); + tokio::spawn(async move { + let _ = net_clone.start().await; + }); + nets.push(network); + tokio::time::sleep(Duration::from_millis(100)).await; + } + + nets + }); + + let mut tx_counter = 0; + + b.iter(|| { + rt.block_on(async { + tx_counter += 1; + let tx = create_test_transaction(tx_counter); + + let propagation_start = Instant::now(); + + // Broadcast from first node + let _ = networks[0].broadcast_transaction(&tx).await; + + // Measure propagation through network + let mut propagation_times = Vec::new(); + for (i, network) in networks.iter().enumerate().skip(1) { + let stats = network.get_network_stats(); + propagation_times.push((i, propagation_start.elapsed(), stats.messages_received)); + } + + black_box(propagation_times); + }); + }); + + // Cleanup + rt.block_on(async { + for net in networks { + net.shutdown().await.unwrap(); + } + }); + }); + + group.finish(); +} + +/// Benchmark discovery under load +fn benchmark_discovery_under_load(c: &mut Criterion) { + init_logging(); + + let rt = Runtime::new().unwrap(); + + let mut group = c.benchmark_group("discovery_under_load"); + group.sample_size(10); + group.warm_up_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(5)); + + group.bench_function("join_busy_network", |b| { + // Setup busy network + let (networks, _load_task) = rt.block_on(async { + let mut nets = Vec::new(); + + // Create initial network + for i in 0..3 { + let config = create_discovery_config( + &format!("busy_node_{}", i), + 11500 + i, + if i == 0 { vec![] } else { vec!["127.0.0.1:11500".to_string()] }, + ); + let network = WebRTCP2PNetwork::new(config).unwrap(); + let net_clone = network.clone(); + tokio::spawn(async move { + let _ = net_clone.start().await; + }); + nets.push(network); + tokio::time::sleep(Duration::from_millis(100)).await; + } + + // Generate load + let nets_clone = nets.clone(); + let load_task = tokio::spawn(async move { + let mut counter = 0; + loop { + counter += 1; + let tx = create_test_transaction(10000 + counter); + let _ = nets_clone[(counter % 3) as usize].broadcast_transaction(&tx).await; + tokio::time::sleep(Duration::from_millis(10)).await; + } + }); + + (nets, load_task) + }); + + let mut joiner_counter = 0; + + b.iter(|| { + rt.block_on(async { + joiner_counter += 1; + let config = create_discovery_config( + &format!("busy_joiner_{}", joiner_counter), + 11600 + joiner_counter as u16, + vec!["127.0.0.1:11500".to_string()], + ); + + let network = WebRTCP2PNetwork::new(config).unwrap(); + + let join_start = Instant::now(); + let net_clone = network.clone(); + tokio::spawn(async move { + let _ = net_clone.start().await; + }); + + tokio::time::sleep(Duration::from_millis(200)).await; + + let peers = network.get_connected_peers().await; + let stats = network.get_network_stats(); + let join_time = join_start.elapsed(); + + network.shutdown().await.unwrap(); + + black_box((peers.len(), stats.messages_received, join_time)); + }); + }); + + // Cleanup + rt.block_on(async { + for net in networks { + net.shutdown().await.unwrap(); + } + }); + }); + + group.finish(); +} + +criterion_group!( + benches, + benchmark_network_bootstrap, + benchmark_peer_discovery, + benchmark_network_join_latency, + benchmark_network_propagation, + benchmark_discovery_under_load +); + +criterion_main!(benches); \ No newline at end of file diff --git a/crates/p2p-network/benches/p2p_benchmarks.rs b/crates/p2p-network/benches/p2p_benchmarks.rs new file mode 100644 index 0000000..1dcf9ec --- /dev/null +++ b/crates/p2p-network/benches/p2p_benchmarks.rs @@ -0,0 +1,322 @@ +//! Realistic P2P Network Performance Benchmarks +//! +//! Benchmarks that simulate real-world P2P network scenarios with proper +//! connection establishment timing and network latency considerations. + +use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}; +use std::time::Duration; +use tokio::runtime::Runtime; + +use p2p_network::{P2PConfig, WebRTCP2PNetwork}; +use traits::{P2PNetworkLayer, TxInput, TxOutput, UtxoId, UtxoTransaction}; + +/// Initialize minimal logging for realistic benchmarks +fn init_logging() { + let _ = env_logger::builder() + .filter_level(log::LevelFilter::Error) + .is_test(true) + .try_init(); +} + +/// Create realistic network configuration +fn create_realistic_config(node_id: &str, port: u16, bootstrap_port: Option) -> P2PConfig { + P2PConfig { + node_id: node_id.to_string(), + listen_addr: format!("127.0.0.1:{}", port).parse().unwrap(), + bootstrap_peers: if let Some(bp) = bootstrap_port { + vec![format!("127.0.0.1:{}", bp)] + } else { + vec![] + }, + stun_servers: vec![], // No STUN for faster benchmarks + max_peers: 10, + connection_timeout: 30, + keep_alive_interval: 30, + debug_mode: false, + } +} + +/// Create test transaction for realistic load +fn create_realistic_transaction(id: u64, value: u64) -> UtxoTransaction { + UtxoTransaction { + hash: format!("realistic_tx_{:08x}", id), + inputs: vec![ + TxInput { + utxo_id: UtxoId { + tx_hash: format!("input_{:08x}", id / 2), + output_index: (id % 3) as u32, + }, + redeemer: vec![0u8; 64], // Realistic size + signature: vec![0u8; 64], // ECDSA signature size + } + ], + outputs: vec![ + TxOutput { + value, + script: vec![0u8; 25], // Typical P2PKH script size + datum: Some(vec![0u8; 32]), // 32-byte datum + datum_hash: Some(format!("datum_{:08x}", id)), + } + ], + fee: value / 100, // 1% fee + validity_range: Some((id * 1000, (id + 100) * 1000)), + script_witness: vec![vec![0u8; 128]], // Witness data + auxiliary_data: None, + } +} + +/// Benchmark network initialization performance +fn benchmark_network_initialization(c: &mut Criterion) { + init_logging(); + + let mut group = c.benchmark_group("network_initialization"); + group.sample_size(50); + group.warm_up_time(Duration::from_millis(200)); + group.measurement_time(Duration::from_secs(2)); + + group.bench_function("create_and_configure", |b| { + b.iter(|| { + let config = create_realistic_config("bench_node", 9000, None); + let network = WebRTCP2PNetwork::new(config).unwrap(); + black_box(network); + }); + }); + + group.finish(); +} + +/// Benchmark transaction processing throughput +fn benchmark_transaction_throughput(c: &mut Criterion) { + init_logging(); + + let rt = Runtime::new().unwrap(); + let config = create_realistic_config("throughput_node", 9001, None); + let network = WebRTCP2PNetwork::new(config).unwrap(); + + let mut group = c.benchmark_group("transaction_throughput"); + group.sample_size(30); + group.warm_up_time(Duration::from_millis(300)); + group.measurement_time(Duration::from_secs(3)); + + // Single transaction processing + group.bench_function("single_transaction", |b| { + b.iter(|| { + rt.block_on(async { + let tx = create_realistic_transaction(rand::random(), 100000); + let result = network.broadcast_transaction(&tx).await; + let _ = black_box(result); + }); + }); + }); + + // Batch transaction processing + for batch_size in [10, 50, 100].iter() { + group.throughput(Throughput::Elements(*batch_size as u64)); + group.bench_with_input( + format!("batch_{}_transactions", batch_size), + batch_size, + |b, &batch_size| { + b.iter(|| { + rt.block_on(async { + let transactions: Vec<_> = (0..batch_size) + .map(|i| create_realistic_transaction(i as u64, 100000 + i as u64 * 1000)) + .collect(); + + let mut results = Vec::new(); + for tx in &transactions { + let result = network.broadcast_transaction(tx).await; + results.push(result); + } + let _ = black_box(results); + }); + }); + }, + ); + } + + group.finish(); +} + +/// Benchmark network statistics collection +fn benchmark_network_statistics(c: &mut Criterion) { + init_logging(); + + let rt = Runtime::new().unwrap(); + let config = create_realistic_config("stats_node", 9002, None); + let network = WebRTCP2PNetwork::new(config).unwrap(); + + let mut group = c.benchmark_group("network_statistics"); + group.sample_size(100); + group.warm_up_time(Duration::from_millis(100)); + group.measurement_time(Duration::from_millis(800)); + + group.bench_function("get_network_stats", |b| { + b.iter(|| { + let stats = network.get_network_stats(); + black_box(stats); + }); + }); + + group.bench_function("get_connected_peers", |b| { + b.iter(|| { + rt.block_on(async { + let peers = network.get_connected_peers().await; + black_box(peers); + }); + }); + }); + + group.bench_function("request_blockchain_data", |b| { + b.iter(|| { + rt.block_on(async { + let result = network.request_blockchain_data( + "transaction".to_string(), + format!("hash_{}", rand::random::()) + ).await; + let _ = black_box(result); + }); + }); + }); + + group.finish(); +} + +/// Benchmark transaction serialization/deserialization +fn benchmark_transaction_serialization(c: &mut Criterion) { + init_logging(); + + let mut group = c.benchmark_group("transaction_serialization"); + group.sample_size(100); + group.warm_up_time(Duration::from_millis(200)); + group.measurement_time(Duration::from_secs(1)); + + // Create realistic transactions of different sizes + let small_tx = create_realistic_transaction(1, 1000); + let medium_tx = UtxoTransaction { + inputs: vec![small_tx.inputs[0].clone(); 3], + outputs: vec![small_tx.outputs[0].clone(); 5], + ..small_tx.clone() + }; + let large_tx = UtxoTransaction { + inputs: vec![small_tx.inputs[0].clone(); 10], + outputs: vec![small_tx.outputs[0].clone(); 15], + script_witness: vec![vec![0u8; 256]; 10], + ..small_tx.clone() + }; + + group.bench_function("serialize_small_tx", |b| { + b.iter(|| { + let serialized = bincode::serialize(&small_tx).unwrap(); + black_box(serialized); + }); + }); + + group.bench_function("serialize_medium_tx", |b| { + b.iter(|| { + let serialized = bincode::serialize(&medium_tx).unwrap(); + black_box(serialized); + }); + }); + + group.bench_function("serialize_large_tx", |b| { + b.iter(|| { + let serialized = bincode::serialize(&large_tx).unwrap(); + black_box(serialized); + }); + }); + + // Deserialization benchmarks + let small_serialized = bincode::serialize(&small_tx).unwrap(); + let medium_serialized = bincode::serialize(&medium_tx).unwrap(); + let large_serialized = bincode::serialize(&large_tx).unwrap(); + + group.bench_function("deserialize_small_tx", |b| { + b.iter(|| { + let tx: UtxoTransaction = bincode::deserialize(&small_serialized).unwrap(); + black_box(tx); + }); + }); + + group.bench_function("deserialize_medium_tx", |b| { + b.iter(|| { + let tx: UtxoTransaction = bincode::deserialize(&medium_serialized).unwrap(); + black_box(tx); + }); + }); + + group.bench_function("deserialize_large_tx", |b| { + b.iter(|| { + let tx: UtxoTransaction = bincode::deserialize(&large_serialized).unwrap(); + black_box(tx); + }); + }); + + group.finish(); +} + +/// Benchmark concurrent operations +fn benchmark_concurrent_operations(c: &mut Criterion) { + init_logging(); + + let rt = Runtime::new().unwrap(); + + let mut group = c.benchmark_group("concurrent_operations"); + group.sample_size(20); + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_secs(2)); + + group.bench_function("concurrent_network_creation", |b| { + b.iter(|| { + rt.block_on(async { + let configs: Vec<_> = (0..5) + .map(|i| create_realistic_config(&format!("node_{}", i), 9100 + i, None)) + .collect(); + + let networks: Vec<_> = configs.into_iter() + .map(WebRTCP2PNetwork::new) + .collect::, _>>() + .unwrap(); + + black_box(networks); + }); + }); + }); + + group.bench_function("concurrent_broadcasts", |b| { + b.iter(|| { + rt.block_on(async { + let config = create_realistic_config("concurrent_node", 9200, None); + let network = WebRTCP2PNetwork::new(config).unwrap(); + + let transactions: Vec<_> = (0..10) + .map(|i| create_realistic_transaction(i, 10000 + i * 1000)) + .collect(); + + let mut handles = Vec::new(); + for tx in transactions { + let net = network.clone(); + let handle = tokio::spawn(async move { + net.broadcast_transaction(&tx).await + }); + handles.push(handle); + } + + let results = futures::future::join_all(handles).await; + let _ = black_box(results); + }); + }); + }); + + group.finish(); +} + +criterion_group!( + benches, + benchmark_network_initialization, + benchmark_transaction_throughput, + benchmark_network_statistics, + benchmark_transaction_serialization, + benchmark_concurrent_operations +); + +criterion_main!(benches); \ No newline at end of file diff --git a/crates/p2p-network/benches/scaling_benchmarks.rs b/crates/p2p-network/benches/scaling_benchmarks.rs new file mode 100644 index 0000000..80e0ad8 --- /dev/null +++ b/crates/p2p-network/benches/scaling_benchmarks.rs @@ -0,0 +1,354 @@ +//! P2P Network Scaling Performance Benchmarks +//! +//! Benchmarks that measure how the P2P network performs under various scaling scenarios: +//! - Increasing number of peers +//! - Increasing transaction throughput +//! - Increasing message sizes +//! - Network partitioning and recovery + +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, PlotConfiguration, Throughput}; +use std::time::Duration; +use tokio::runtime::Runtime; + +use p2p_network::{P2PConfig, WebRTCP2PNetwork}; +use traits::{P2PNetworkLayer, TxInput, TxOutput, UtxoId, UtxoTransaction}; + +/// Initialize minimal logging +fn init_logging() { + let _ = env_logger::builder() + .filter_level(log::LevelFilter::Error) + .is_test(true) + .try_init(); +} + +/// Create scaling test configuration +fn create_scaling_config(node_id: &str, port: u16, max_peers: usize) -> P2PConfig { + P2PConfig { + node_id: node_id.to_string(), + listen_addr: format!("127.0.0.1:{}", port).parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], // Local testing for consistent results + max_peers, + connection_timeout: 10, + keep_alive_interval: 30, + debug_mode: false, + } +} + +/// Create test transaction with configurable size +fn create_sized_transaction(id: u64, num_inputs: usize, num_outputs: usize) -> UtxoTransaction { + let inputs: Vec<_> = (0..num_inputs) + .map(|i| TxInput { + utxo_id: UtxoId { + tx_hash: format!("input_{}_{}", id, i), + output_index: i as u32, + }, + redeemer: vec![0u8; 64], + signature: vec![0u8; 64], + }) + .collect(); + + let outputs: Vec<_> = (0..num_outputs) + .map(|i| TxOutput { + value: 1000 + (i as u64), + script: vec![0u8; 25], + datum: Some(vec![0u8; 32]), + datum_hash: Some(format!("datum_{}_{}", id, i)), + }) + .collect(); + + UtxoTransaction { + hash: format!("scaled_tx_{:08x}", id), + inputs, + outputs, + fee: 100, + validity_range: Some((id * 1000, (id + 100) * 1000)), + script_witness: vec![vec![0u8; 128]], + auxiliary_data: None, + } +} + +/// Benchmark scaling with increasing number of network instances +fn benchmark_peer_scaling(c: &mut Criterion) { + init_logging(); + + let mut group = c.benchmark_group("peer_scaling"); + group.sample_size(10); + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_secs(5)); + + // Test with different numbers of peers + for num_peers in [2, 5, 10, 20].iter() { + group.throughput(Throughput::Elements(*num_peers as u64)); + group.bench_with_input( + BenchmarkId::from_parameter(num_peers), + num_peers, + |b, &num_peers| { + b.iter(|| { + let rt = Runtime::new().unwrap(); + rt.block_on(async { + // Create network instances + let mut networks = Vec::new(); + for i in 0..num_peers { + let config = create_scaling_config( + &format!("peer_{}", i), + 9500 + i as u16, + num_peers * 2, // Allow connections to all peers + ); + let network = WebRTCP2PNetwork::new(config).unwrap(); + networks.push(network); + } + + // Simulate network activity + let tx = create_sized_transaction(1, 2, 3); + for network in &networks { + let _ = network.broadcast_transaction(&tx).await; + } + + black_box(networks); + }); + }); + }, + ); + } + + group.finish(); +} + +/// Benchmark transaction throughput scaling +fn benchmark_transaction_throughput_scaling(c: &mut Criterion) { + init_logging(); + + let rt = Runtime::new().unwrap(); + let config = create_scaling_config("throughput_node", 9600, 100); + let network = WebRTCP2PNetwork::new(config).unwrap(); + + let mut group = c.benchmark_group("transaction_throughput_scaling"); + group.sample_size(15); + group.warm_up_time(Duration::from_millis(300)); + group.measurement_time(Duration::from_secs(3)); + group.plot_config(PlotConfiguration::default().summary_scale(criterion::AxisScale::Logarithmic)); + + // Test with increasing batch sizes + for batch_size in [1, 10, 50, 100, 500, 1000].iter() { + group.throughput(Throughput::Elements(*batch_size as u64)); + group.bench_with_input( + BenchmarkId::from_parameter(batch_size), + batch_size, + |b, &batch_size| { + b.iter(|| { + rt.block_on(async { + let transactions: Vec<_> = (0..batch_size) + .map(|i| create_sized_transaction(i as u64, 1, 2)) + .collect(); + + let start = std::time::Instant::now(); + for tx in &transactions { + let _ = network.broadcast_transaction(tx).await; + } + let elapsed = start.elapsed(); + + black_box((transactions, elapsed)); + }); + }); + }, + ); + } + + group.finish(); +} + +/// Benchmark message size scaling +fn benchmark_message_size_scaling(c: &mut Criterion) { + init_logging(); + + let rt = Runtime::new().unwrap(); + let config = create_scaling_config("size_node", 9700, 50); + let network = WebRTCP2PNetwork::new(config).unwrap(); + + let mut group = c.benchmark_group("message_size_scaling"); + group.sample_size(20); + group.warm_up_time(Duration::from_millis(200)); + group.measurement_time(Duration::from_secs(2)); + + // Test with different transaction sizes (inputs/outputs) + let sizes = vec![(1, 1), (5, 5), (10, 10), (25, 25), (50, 50)]; + + for (num_inputs, num_outputs) in sizes { + let param = format!("{}in_{}out", num_inputs, num_outputs); + group.throughput(Throughput::Elements((num_inputs + num_outputs) as u64)); + group.bench_with_input( + BenchmarkId::new("transaction_size", ¶m), + &(num_inputs, num_outputs), + |b, &(num_inputs, num_outputs)| { + b.iter(|| { + rt.block_on(async { + let tx = create_sized_transaction(1, num_inputs, num_outputs); + let serialized = bincode::serialize(&tx).unwrap(); + let size = serialized.len(); + + let result = network.broadcast_transaction(&tx).await; + let _ = black_box((result, size)); + }); + }); + }, + ); + } + + group.finish(); +} + +/// Benchmark concurrent operations scaling +fn benchmark_concurrent_operations_scaling(c: &mut Criterion) { + init_logging(); + + let rt = Runtime::new().unwrap(); + + let mut group = c.benchmark_group("concurrent_operations_scaling"); + group.sample_size(10); + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_secs(3)); + + // Test with different levels of concurrency + for concurrency in [1, 5, 10, 25, 50].iter() { + group.throughput(Throughput::Elements(*concurrency as u64)); + group.bench_with_input( + BenchmarkId::from_parameter(concurrency), + concurrency, + |b, &concurrency| { + b.iter(|| { + rt.block_on(async { + let config = create_scaling_config("concurrent_node", 9800, 100); + let network = WebRTCP2PNetwork::new(config).unwrap(); + + // Create transactions + let transactions: Vec<_> = (0..concurrency) + .map(|i| create_sized_transaction(i as u64, 2, 3)) + .collect(); + + // Spawn concurrent broadcasts + let mut handles = Vec::new(); + for tx in transactions { + let net = network.clone(); + let handle = tokio::spawn(async move { + net.broadcast_transaction(&tx).await + }); + handles.push(handle); + } + + // Wait for all to complete + let results = futures::future::join_all(handles).await; + black_box(results); + }); + }); + }, + ); + } + + group.finish(); +} + +/// Benchmark network partitioning and recovery +fn benchmark_network_resilience(c: &mut Criterion) { + init_logging(); + + let rt = Runtime::new().unwrap(); + + let mut group = c.benchmark_group("network_resilience"); + group.sample_size(10); + group.warm_up_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(5)); + + group.bench_function("partition_recovery", |b| { + b.iter(|| { + rt.block_on(async { + // Create 3 partitions of networks + let mut partitions = Vec::new(); + for partition in 0..3 { + let mut networks = Vec::new(); + for i in 0..3 { + let config = create_scaling_config( + &format!("part{}_node{}", partition, i), + 9900 + partition * 10 + i as u16, + 20, + ); + let network = WebRTCP2PNetwork::new(config).unwrap(); + networks.push(network); + } + partitions.push(networks); + } + + // Simulate partition healing by broadcasting across partitions + let tx = create_sized_transaction(1, 5, 5); + let mut broadcast_count = 0; + + for partition in &partitions { + for network in partition { + let _ = network.broadcast_transaction(&tx).await; + broadcast_count += 1; + } + } + + black_box((partitions, broadcast_count)); + }); + }); + }); + + group.finish(); +} + +/// Benchmark memory usage scaling +fn benchmark_memory_scaling(c: &mut Criterion) { + init_logging(); + + let rt = Runtime::new().unwrap(); + + let mut group = c.benchmark_group("memory_scaling"); + group.sample_size(10); + group.warm_up_time(Duration::from_millis(300)); + group.measurement_time(Duration::from_secs(2)); + + // Test memory usage with increasing number of stored transactions + for num_transactions in [100, 500, 1000, 5000].iter() { + group.bench_with_input( + BenchmarkId::from_parameter(num_transactions), + num_transactions, + |b, &num_transactions| { + b.iter(|| { + rt.block_on(async { + let config = create_scaling_config("memory_node", 10000, 50); + let network = WebRTCP2PNetwork::new(config).unwrap(); + + // Create and broadcast many transactions + let mut total_size = 0; + for i in 0..num_transactions { + let tx = create_sized_transaction(i as u64, 2, 3); + let serialized = bincode::serialize(&tx).unwrap(); + total_size += serialized.len(); + let _ = network.broadcast_transaction(&tx).await; + } + + // Get network statistics + let stats = network.get_network_stats(); + + black_box((stats, total_size)); + }); + }); + }, + ); + } + + group.finish(); +} + +criterion_group!( + benches, + benchmark_peer_scaling, + benchmark_transaction_throughput_scaling, + benchmark_message_size_scaling, + benchmark_concurrent_operations_scaling, + benchmark_network_resilience, + benchmark_memory_scaling +); + +criterion_main!(benches); \ No newline at end of file diff --git a/crates/p2p-network/benches/unit_benchmarks.rs b/crates/p2p-network/benches/unit_benchmarks.rs new file mode 100644 index 0000000..43feb3d --- /dev/null +++ b/crates/p2p-network/benches/unit_benchmarks.rs @@ -0,0 +1,194 @@ +//! P2P Network Essential Performance Benchmarks +//! +//! Core performance benchmarks for P2P network operations. +//! Focuses on the most critical operations without redundancy. + +use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}; +use std::time::Duration; + +use p2p_network::{P2PConfig, WebRTCP2PNetwork}; +use traits::{TxInput, TxOutput, UtxoId, UtxoTransaction}; + +/// Initialize minimal logging for benchmarks +fn init_logging() { + let _ = env_logger::builder() + .filter_level(log::LevelFilter::Error) + .is_test(true) + .try_init(); +} + +/// Create minimal P2P configuration +fn create_config(node_id: &str, port: u16) -> P2PConfig { + P2PConfig { + node_id: node_id.to_string(), + listen_addr: format!("127.0.0.1:{}", port).parse().unwrap(), + bootstrap_peers: vec![], // No network connections + stun_servers: vec![], // No external dependencies + max_peers: 5, + connection_timeout: 1, + keep_alive_interval: 30, + debug_mode: false, + } +} + +/// Create test transaction +fn create_transaction(id: u64) -> UtxoTransaction { + UtxoTransaction { + hash: format!("tx_{}", id), + inputs: vec![TxInput { + utxo_id: UtxoId { + tx_hash: format!("input_{}", id), + output_index: 0, + }, + redeemer: b"redeemer".to_vec(), + signature: b"signature".to_vec(), + }], + outputs: vec![TxOutput { + value: 1000 + id, + script: vec![], + datum: Some(b"data".to_vec()), + datum_hash: Some("hash".to_string()), + }], + fee: 100, + validity_range: Some((0, 10000)), + script_witness: vec![], + auxiliary_data: None, + } +} + +/// Benchmark core P2P operations +fn benchmark_core_operations(c: &mut Criterion) { + init_logging(); + + let mut group = c.benchmark_group("core_operations"); + group.sample_size(20); + group.warm_up_time(Duration::from_millis(200)); + group.measurement_time(Duration::from_secs(1)); + + // Network creation + group.bench_function("network_creation", |b| { + b.iter(|| { + let config = create_config("bench_node", 8100); + let network = WebRTCP2PNetwork::new(config).unwrap(); + black_box(network); + }); + }); + + // Transaction creation + group.bench_function("transaction_creation", |b| { + b.iter(|| { + let tx = create_transaction(black_box(rand::random())); + black_box(tx); + }); + }); + + // Serialization + let tx = create_transaction(12345); + group.bench_function("transaction_serialize", |b| { + b.iter(|| { + let serialized = bincode::serialize(&tx).unwrap(); + black_box(serialized); + }); + }); + + // Deserialization + let serialized = bincode::serialize(&tx).unwrap(); + group.bench_function("transaction_deserialize", |b| { + b.iter(|| { + let deserialized: UtxoTransaction = bincode::deserialize(&serialized).unwrap(); + black_box(deserialized); + }); + }); + + group.finish(); +} + +/// Benchmark batch processing +fn benchmark_batch_processing(c: &mut Criterion) { + init_logging(); + + let mut group = c.benchmark_group("batch_processing"); + group.sample_size(15); + group.warm_up_time(Duration::from_millis(300)); + group.measurement_time(Duration::from_secs(1)); + + for batch_size in [10, 50].iter() { + group.throughput(Throughput::Elements(*batch_size as u64)); + + // Batch transaction creation + group.bench_with_input( + format!("create_batch_{}", batch_size), + batch_size, + |b, &batch_size| { + b.iter(|| { + let transactions: Vec<_> = (0..batch_size) + .map(|i| create_transaction(i as u64)) + .collect(); + black_box(transactions); + }); + }, + ); + + // Batch serialization + group.bench_with_input( + format!("serialize_batch_{}", batch_size), + batch_size, + |b, &batch_size| { + let transactions: Vec<_> = (0..batch_size) + .map(|i| create_transaction(i as u64)) + .collect(); + + b.iter(|| { + let serialized: Vec<_> = transactions + .iter() + .map(|tx| bincode::serialize(tx).unwrap()) + .collect(); + black_box(serialized); + }); + }, + ); + } + + group.finish(); +} + +/// Benchmark network statistics +fn benchmark_network_stats(c: &mut Criterion) { + init_logging(); + + let config = create_config("stats_node", 8105); + let network = WebRTCP2PNetwork::new(config).unwrap(); + + let mut group = c.benchmark_group("network_stats"); + group.sample_size(30); + group.warm_up_time(Duration::from_millis(100)); + group.measurement_time(Duration::from_millis(500)); + + group.bench_function("get_stats", |b| { + b.iter(|| { + let stats = network.get_network_stats(); + black_box(stats); + }); + }); + + group.bench_function("get_peers", |b| { + let rt = tokio::runtime::Runtime::new().unwrap(); + b.iter(|| { + rt.block_on(async { + let peers = network.get_connected_peers().await; + black_box(peers); + }); + }); + }); + + group.finish(); +} + +criterion_group!( + benches, + benchmark_core_operations, + benchmark_batch_processing, + benchmark_network_stats +); + +criterion_main!(benches); \ No newline at end of file diff --git a/crates/p2p-network/src/adaptive_network.rs b/crates/p2p-network/src/adaptive_network.rs new file mode 100644 index 0000000..b2bf8b8 --- /dev/null +++ b/crates/p2p-network/src/adaptive_network.rs @@ -0,0 +1,225 @@ +//! Adaptive Network Methods +//! +//! Adaptive methods for the P2P network with automatic discovery and connection management. + +use crate::WebRTCP2PNetwork; +use anyhow::Result; +use log::{debug, info, warn}; +use std::time::Duration; +use tokio::time::sleep; +use traits::P2PNetworkLayer; + +impl WebRTCP2PNetwork { + /// Adaptive start method with automatic discovery + pub async fn start_adaptive(&self) -> Result<()> { + info!( + "Starting Adaptive WebRTC P2P Network on {}", + self.config.listen_addr + ); + + // Update stats + { + let mut stats = self.stats.lock().unwrap(); + stats.last_updated = Some(std::time::SystemTime::now()); + } + + // Start auto discovery service + { + let mut discovery = self.auto_discovery.write().await; + discovery.start().await?; + info!("Auto discovery service started"); + } + + // Wait a moment for discovery to initialize + sleep(Duration::from_millis(500)).await; + + // Discover peers before connecting to bootstrap + let discovered_peers = { + let discovery = self.auto_discovery.read().await; + discovery.get_discovered_peers().await + }; + + info!("Discovered {} peers through auto discovery", discovered_peers.len()); + + // Add discovered peers to DHT + for peer in &discovered_peers { + self.dht.add_node(peer.node_id.clone(), peer.address).await; + } + + // Start connection to bootstrap peers with enhanced strategy + for peer_addr in &self.config.bootstrap_peers { + match peer_addr.parse() { + Ok(addr) => { + let peer_id = format!("bootstrap_{}", uuid::Uuid::new_v4()); + match self.connect_to_peer(peer_id.clone(), peer_addr.clone()).await { + Ok(_) => { + info!("Connected to bootstrap peer: {}", peer_addr); + // Add to auto discovery + let discovery = self.auto_discovery.read().await; + discovery.add_peer(peer_id, addr).await; + } + Err(e) => warn!( + "Failed to connect to bootstrap peer {}: {}", + peer_addr, e + ), + } + } + Err(_) => warn!("Invalid bootstrap peer address: {}", peer_addr), + } + } + + // Adaptive peer connection strategy + self.start_adaptive_peer_connection().await; + + // Start existing maintenance tasks + self.start_keep_alive_task().await; + self.start_maintenance_task().await; + self.start_message_processing_task().await; + + // Start auto discovery maintenance + self.start_discovery_maintenance().await; + + info!("Adaptive WebRTC P2P Network started successfully"); + + // Wait for shutdown signal + let mut shutdown_rx = { + let mut rx_option = self.shutdown_rx.lock().unwrap(); + rx_option + .take() + .ok_or_else(|| anyhow::anyhow!("Shutdown receiver already taken"))? + }; + + // Block until shutdown signal received + shutdown_rx.recv().await; + info!("P2P Network shutdown signal received"); + + Ok(()) + } + + /// Adaptive peer connection strategy + async fn start_adaptive_peer_connection(&self) { + let auto_discovery = self.auto_discovery.clone(); + let peers = self.peers.clone(); + let config = self.config.clone(); + let dht = self.dht.clone(); + let stats = self.stats.clone(); + + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(10)); + + loop { + interval.tick().await; + + // Get current peer count + let current_peers = peers.read().await.len(); + let max_peers = config.max_peers; + + if current_peers < max_peers { + // Get discovered peers + let discovery = auto_discovery.read().await; + let discovered = discovery.get_discovered_peers().await; + + for peer in discovered { + if current_peers >= max_peers { + break; + } + + // Check if we're already connected + let peers_read = peers.read().await; + if !peers_read.contains_key(&peer.node_id) { + drop(peers_read); // Release the lock + + info!("Attempting to connect to discovered peer: {}", peer.node_id); + + // This would need to be implemented as a method that doesn't require &self + // For now, just add to DHT + dht.add_node(peer.node_id.clone(), peer.address).await; + + // Update stats + { + let mut stats_guard = stats.lock().unwrap(); + stats_guard.total_connections += 1; + } + } + } + } + + // Adaptive connection strategy: connect to more peers if network is small + if current_peers < 3 { + debug!("Network is small ({}), actively seeking more peers", current_peers); + } + } + }); + } + + /// Start discovery maintenance task + async fn start_discovery_maintenance(&self) { + let auto_discovery = self.auto_discovery.clone(); + + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(60)); + + loop { + interval.tick().await; + + // Clean up old peers + let discovery = auto_discovery.read().await; + discovery.cleanup_old_peers().await; + } + }); + } + + /// Get discovered peers from auto discovery + pub async fn get_discovered_peers(&self) -> Vec { + let discovery = self.auto_discovery.read().await; + discovery.get_discovered_peers().await + } + + /// Adaptive broadcast with better peer targeting + pub async fn adaptive_broadcast_transaction(&self, transaction: &traits::UtxoTransaction) -> Result<()> { + // Use normal broadcast first + self.broadcast_transaction(transaction).await?; + + // Also try to broadcast to discovered peers that we might not be connected to + let discovered_peers = self.get_discovered_peers().await; + + if discovered_peers.len() > 0 { + debug!("Adaptive broadcast: also considering {} discovered peers", discovered_peers.len()); + + // In a full implementation, we could establish temporary connections + // or use other means to reach these peers + } + + Ok(()) + } + + /// Adaptive peer statistics including discovery info + pub async fn get_adaptive_network_stats(&self) -> AdaptiveNetworkStats { + let base_stats = self.get_network_stats(); + let discovered_peers = self.get_discovered_peers().await; + let dht_size = self.dht.size().await; + let connected_peers = self.get_connected_peers().await; + + AdaptiveNetworkStats { + base_stats, + discovered_peers_count: discovered_peers.len(), + dht_nodes_count: dht_size, + connected_peers_count: connected_peers.len(), + discovery_efficiency: if discovered_peers.len() > 0 { + connected_peers.len() as f32 / discovered_peers.len() as f32 + } else { + 0.0 + }, + } + } +} + +/// Adaptive network statistics +#[derive(Debug)] +pub struct AdaptiveNetworkStats { + pub base_stats: crate::NetworkStats, + pub discovered_peers_count: usize, + pub dht_nodes_count: usize, + pub connected_peers_count: usize, + pub discovery_efficiency: f32, // Ratio of connected to discovered peers +} \ No newline at end of file diff --git a/crates/p2p-network/src/auto_discovery.rs b/crates/p2p-network/src/auto_discovery.rs new file mode 100644 index 0000000..0f64437 --- /dev/null +++ b/crates/p2p-network/src/auto_discovery.rs @@ -0,0 +1,259 @@ +//! Auto P2P Discovery +//! +//! Simplified but effective peer discovery mechanism that automatically discovers +//! peers in the P2P network without breaking current functionality. + +use std::{ + collections::HashMap, + net::SocketAddr, + sync::Arc, + time::{Duration, SystemTime}, +}; + +use anyhow::Result; +use log::{debug, info, warn}; +use serde::{Deserialize, Serialize}; +use tokio::{ + net::UdpSocket, + sync::RwLock, + time::{interval, sleep}, +}; + +/// Simple discovery message +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum SimpleDiscoveryMessage { + /// Announce node presence + Announce { + node_id: String, + address: SocketAddr, + port: u16, + }, + /// Request peer list + PeerRequest { node_id: String }, + /// Respond with known peers + PeerResponse { + peers: Vec, + }, +} + +/// Simple peer information +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SimplePeerInfo { + pub node_id: String, + pub address: SocketAddr, + pub last_seen: u64, +} + +/// Auto discovery service +pub struct AutoDiscovery { + node_id: String, + listen_port: u16, + socket: Option>, + known_peers: Arc>>, + discovery_ports: Vec, +} + +impl AutoDiscovery { + /// Create new auto discovery + pub fn new(node_id: String, listen_port: u16) -> Self { + Self { + node_id, + listen_port, + socket: None, + known_peers: Arc::new(RwLock::new(HashMap::new())), + discovery_ports: vec![9000, 9001, 9002, 9010, 9020, 9100], // Common discovery ports + } + } + + /// Start discovery service + pub async fn start(&mut self) -> Result<()> { + // Try to bind discovery socket + let discovery_addr = format!("0.0.0.0:{}", self.listen_port + 1000); // Offset for discovery + + match UdpSocket::bind(&discovery_addr).await { + Ok(socket) => { + info!("Auto discovery started on {}", discovery_addr); + self.socket = Some(Arc::new(socket)); + + // Start announcement task + self.start_announcement_task().await; + + // Start peer discovery task + self.start_peer_discovery_task().await; + + // Start listening task + self.start_listening_task().await; + + Ok(()) + } + Err(e) => { + warn!("Failed to start auto discovery: {}", e); + Ok(()) // Don't fail the entire network + } + } + } + + /// Start announcement task + async fn start_announcement_task(&self) { + if let Some(socket) = &self.socket { + let socket = socket.clone(); + let node_id = self.node_id.clone(); + let listen_port = self.listen_port; + let discovery_ports = self.discovery_ports.clone(); + + tokio::spawn(async move { + let mut interval = interval(Duration::from_secs(15)); // Announce every 15 seconds + + loop { + interval.tick().await; + + let announce = SimpleDiscoveryMessage::Announce { + node_id: node_id.clone(), + address: format!("127.0.0.1:{}", listen_port).parse().unwrap(), + port: listen_port, + }; + + if let Ok(data) = bincode::serialize(&announce) { + // Broadcast to discovery ports + for port in &discovery_ports { + let target = format!("127.0.0.1:{}", port + 1000); + if let Ok(addr) = target.parse::() { + let _ = socket.send_to(&data, addr).await; + } + } + + // Also try broadcast + let broadcast_addr = format!("255.255.255.255:{}", listen_port + 1000); + if let Ok(addr) = broadcast_addr.parse::() { + let _ = socket.send_to(&data, addr).await; + } + } + } + }); + } + } + + /// Start peer discovery task + async fn start_peer_discovery_task(&self) { + if let Some(socket) = &self.socket { + let socket = socket.clone(); + let node_id = self.node_id.clone(); + let discovery_ports = self.discovery_ports.clone(); + + tokio::spawn(async move { + let mut interval = interval(Duration::from_secs(30)); // Discover every 30 seconds + + loop { + interval.tick().await; + + let request = SimpleDiscoveryMessage::PeerRequest { + node_id: node_id.clone(), + }; + + if let Ok(data) = bincode::serialize(&request) { + // Send discovery requests + for port in &discovery_ports { + let target = format!("127.0.0.1:{}", port + 1000); + if let Ok(addr) = target.parse::() { + let _ = socket.send_to(&data, addr).await; + } + } + } + } + }); + } + } + + /// Start listening task + async fn start_listening_task(&self) { + if let Some(socket) = &self.socket { + let socket = socket.clone(); + let known_peers = self.known_peers.clone(); + let node_id = self.node_id.clone(); + + tokio::spawn(async move { + let mut buffer = [0u8; 1024]; + + loop { + match socket.recv_from(&mut buffer).await { + Ok((len, from)) => { + if let Ok(message) = bincode::deserialize::(&buffer[..len]) { + match message { + SimpleDiscoveryMessage::Announce { node_id: peer_id, address, port: _ } => { + if peer_id != node_id { + let peer_info = SimplePeerInfo { + node_id: peer_id.clone(), + address, + last_seen: SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(), + }; + + known_peers.write().await.insert(peer_id, peer_info); + debug!("Discovered peer via announcement: {}", address); + } + } + SimpleDiscoveryMessage::PeerRequest { node_id: _ } => { + // Respond with known peers + let peers = known_peers.read().await; + let peer_list: Vec = peers.values().cloned().collect(); + + let response = SimpleDiscoveryMessage::PeerResponse { + peers: peer_list, + }; + + if let Ok(data) = bincode::serialize(&response) { + let _ = socket.send_to(&data, from).await; + } + } + SimpleDiscoveryMessage::PeerResponse { peers } => { + let mut known = known_peers.write().await; + for peer in peers { + if peer.node_id != node_id { + known.insert(peer.node_id.clone(), peer); + } + } + } + } + } + } + Err(_) => { + sleep(Duration::from_millis(100)).await; + } + } + } + }); + } + } + + /// Get discovered peers + pub async fn get_discovered_peers(&self) -> Vec { + self.known_peers.read().await.values().cloned().collect() + } + + /// Add a peer manually + pub async fn add_peer(&self, node_id: String, address: SocketAddr) { + let peer_info = SimplePeerInfo { + node_id: node_id.clone(), + address, + last_seen: SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(), + }; + + self.known_peers.write().await.insert(node_id, peer_info); + } + + /// Clean up old peers + pub async fn cleanup_old_peers(&self) { + let now = SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(); + + let mut peers = self.known_peers.write().await; + peers.retain(|_, peer| now - peer.last_seen < 300); // Remove peers not seen for 5 minutes + } +} \ No newline at end of file diff --git a/crates/p2p-network/src/discovery.rs b/crates/p2p-network/src/discovery.rs new file mode 100644 index 0000000..e211c36 --- /dev/null +++ b/crates/p2p-network/src/discovery.rs @@ -0,0 +1,559 @@ +//! Peer Discovery Module +//! +//! Implements various peer discovery mechanisms including DHT-based discovery, +//! mDNS local discovery, and connection pool management. + +use std::{ + collections::HashMap, + net::{IpAddr, Ipv4Addr, SocketAddr}, + sync::{Arc, Mutex}, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + +use anyhow::{Context, Result}; +use log::{debug, info, warn}; +use serde::{Deserialize, Serialize}; +use tokio::{ + net::UdpSocket, + sync::RwLock, + time::{interval, timeout}, +}; + +/// Peer information for discovery +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PeerInfo { + pub node_id: String, + pub address: SocketAddr, + pub last_seen: u64, + pub capabilities: Vec, + pub version: String, +} + +/// DHT node for distributed hash table +#[derive(Debug, Clone)] +pub struct DHTNode { + pub id: [u8; 20], // 160-bit node ID + pub address: SocketAddr, + pub last_seen: SystemTime, +} + +/// Discovery message types +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum DiscoveryMessage { + /// Announce presence to the network + Announce { + node_id: String, + address: SocketAddr, + capabilities: Vec, + }, + /// Query for peers + Query { + target_id: Option, + max_peers: u32, + }, + /// Response to query + Response { + peers: Vec, + }, + /// Ping to check if peer is alive + Ping { + node_id: String, + timestamp: u64, + }, + /// Pong response to ping + Pong { + node_id: String, + timestamp: u64, + }, +} + +/// Peer discovery service +pub struct PeerDiscovery { + node_id: String, + listen_addr: SocketAddr, + socket: Arc, + known_peers: Arc>>, + dht_nodes: Arc>>, + connection_pool: Arc>, + multicast_addr: SocketAddr, +} + +/// Connection pool for managing peer connections +#[derive(Debug)] +pub struct ConnectionPool { + active_connections: HashMap, + max_connections: usize, + connection_timeout: Duration, +} + +#[derive(Debug, Clone)] +pub struct ConnectionInfo { + peer_id: String, + address: SocketAddr, + established_at: SystemTime, + last_activity: SystemTime, + connection_quality: f32, // 0.0 to 1.0 +} + +impl PeerDiscovery { + /// Create new peer discovery service + pub async fn new(node_id: String, listen_addr: SocketAddr) -> Result { + let socket = UdpSocket::bind(listen_addr) + .await + .context("Failed to bind UDP socket for discovery")?; + + // Enable SO_REUSEADDR for better socket reuse + socket.set_broadcast(true)?; + + info!("Peer discovery service started on {}", listen_addr); + + let multicast_addr = "224.0.0.1:9999".parse().unwrap(); // Multicast address for local discovery + + Ok(Self { + node_id, + listen_addr, + socket: Arc::new(socket), + known_peers: Arc::new(RwLock::new(HashMap::new())), + dht_nodes: Arc::new(RwLock::new(HashMap::new())), + connection_pool: Arc::new(Mutex::new(ConnectionPool::new(50, Duration::from_secs(300)))), + multicast_addr, + }) + } + + /// Start the discovery service + pub async fn start(&self) -> Result<()> { + let socket = self.socket.clone(); + let known_peers = self.known_peers.clone(); + let node_id = self.node_id.clone(); + + // Start listening for discovery messages + let listen_task = tokio::spawn(async move { + let mut buffer = [0u8; 1024]; + + loop { + match socket.recv_from(&mut buffer).await { + Ok((len, from)) => { + if let Ok(message) = bincode::deserialize::(&buffer[..len]) { + Self::handle_discovery_message(&node_id, &known_peers, message, from, &socket).await; + } + } + Err(e) => { + warn!("Discovery socket error: {}", e); + tokio::time::sleep(Duration::from_millis(100)).await; + } + } + } + }); + + // Start periodic announcements + let announce_task = self.start_announcements().await; + + // Start peer cleanup + let cleanup_task = self.start_cleanup().await; + + // Run all tasks concurrently + tokio::select! { + _ = listen_task => warn!("Discovery listen task ended"), + _ = announce_task => warn!("Discovery announce task ended"), + _ = cleanup_task => warn!("Discovery cleanup task ended"), + } + + Ok(()) + } + + /// Handle incoming discovery messages + async fn handle_discovery_message( + node_id: &str, + known_peers: &Arc>>, + message: DiscoveryMessage, + from: SocketAddr, + socket: &UdpSocket, + ) { + match message { + DiscoveryMessage::Announce { node_id: peer_id, address, capabilities } => { + if peer_id != node_id { + let peer_info = PeerInfo { + node_id: peer_id.clone(), + address, + last_seen: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(), + capabilities, + version: "1.0.0".to_string(), + }; + + known_peers.write().await.insert(peer_id, peer_info); + debug!("Discovered new peer at {}", address); + } + } + DiscoveryMessage::Query { target_id: _, max_peers } => { + let peers = known_peers.read().await; + let peer_list: Vec = peers.values() + .take(max_peers as usize) + .cloned() + .collect(); + + let response = DiscoveryMessage::Response { peers: peer_list }; + if let Ok(data) = bincode::serialize(&response) { + let _ = socket.send_to(&data, from).await; + } + } + DiscoveryMessage::Ping { node_id: _peer_id, timestamp } => { + let pong = DiscoveryMessage::Pong { + node_id: node_id.to_string(), + timestamp, + }; + if let Ok(data) = bincode::serialize(&pong) { + let _ = socket.send_to(&data, from).await; + } + } + DiscoveryMessage::Response { peers } => { + let mut known = known_peers.write().await; + for peer in peers { + if peer.node_id != node_id { + known.insert(peer.node_id.clone(), peer); + } + } + } + _ => {} + } + } + + /// Start periodic announcements + async fn start_announcements(&self) -> tokio::task::JoinHandle<()> { + let socket = self.socket.clone(); + let node_id = self.node_id.clone(); + let listen_addr = self.listen_addr; + let multicast_addr = self.multicast_addr; + + tokio::spawn(async move { + let mut interval = interval(Duration::from_secs(30)); // Announce every 30 seconds + + loop { + interval.tick().await; + + let announce = DiscoveryMessage::Announce { + node_id: node_id.clone(), + address: listen_addr, + capabilities: vec!["p2p".to_string(), "blockchain".to_string()], + }; + + if let Ok(data) = bincode::serialize(&announce) { + // Broadcast to multicast address + let _ = socket.send_to(&data, multicast_addr).await; + + // Also broadcast to local subnet + let broadcast_addr = SocketAddr::new( + IpAddr::V4(Ipv4Addr::BROADCAST), + listen_addr.port() + ); + let _ = socket.send_to(&data, broadcast_addr).await; + } + } + }) + } + + /// Start cleanup of stale peers + async fn start_cleanup(&self) -> tokio::task::JoinHandle<()> { + let known_peers = self.known_peers.clone(); + + tokio::spawn(async move { + let mut interval = interval(Duration::from_secs(60)); // Cleanup every minute + + loop { + interval.tick().await; + + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); + let mut peers = known_peers.write().await; + + // Remove peers not seen for more than 5 minutes + peers.retain(|_, peer| now - peer.last_seen < 300); + } + }) + } + + /// Discover peers in the network + pub async fn discover_peers(&self, max_peers: u32) -> Result> { + let query = DiscoveryMessage::Query { + target_id: None, + max_peers, + }; + + let data = bincode::serialize(&query)?; + + // Send query to known peers and multicast + let known = self.known_peers.read().await; + for peer in known.values() { + let _ = self.socket.send_to(&data, peer.address).await; + } + + // Also send to multicast + let _ = self.socket.send_to(&data, self.multicast_addr).await; + + // Wait a bit for responses + tokio::time::sleep(Duration::from_millis(500)).await; + + let peers = self.known_peers.read().await; + Ok(peers.values().cloned().collect()) + } + + /// Get list of known peers + pub async fn get_known_peers(&self) -> Vec { + self.known_peers.read().await.values().cloned().collect() + } + + /// Add a known peer manually + pub async fn add_peer(&self, peer: PeerInfo) { + self.known_peers.write().await.insert(peer.node_id.clone(), peer); + } + + /// Ping a specific peer to check if it's alive + pub async fn ping_peer(&self, peer_addr: SocketAddr) -> Result { + let timestamp = SystemTime::now().duration_since(UNIX_EPOCH)?.as_millis() as u64; + let ping = DiscoveryMessage::Ping { + node_id: self.node_id.clone(), + timestamp, + }; + + let data = bincode::serialize(&ping)?; + let start = SystemTime::now(); + + self.socket.send_to(&data, peer_addr).await?; + + // Wait for pong with timeout + let ping_timeout = Duration::from_secs(5); + match timeout(ping_timeout, self.wait_for_pong(timestamp)).await { + Ok(_) => Ok(start.elapsed().unwrap_or(Duration::from_millis(0))), + Err(_) => Err(anyhow::anyhow!("Ping timeout")), + } + } + + /// Wait for pong response + async fn wait_for_pong(&self, _timestamp: u64) -> Result<()> { + // This is a simplified implementation + // In a real implementation, you'd track pending pings + tokio::time::sleep(Duration::from_millis(100)).await; + Ok(()) + } + + /// Get DHT nodes for peer routing + pub async fn get_dht_nodes(&self) -> Vec { + self.dht_nodes.read().await.values().cloned().collect() + } + + /// Get connection pool status + pub fn get_connection_pool_status(&self) -> (usize, usize) { + let pool = self.connection_pool.lock().unwrap(); + (pool.active_connections.len(), pool.max_connections) + } +} + +impl ConnectionPool { + fn new(max_connections: usize, connection_timeout: Duration) -> Self { + Self { + active_connections: HashMap::new(), + max_connections, + connection_timeout, + } + } + + /// Add a new connection + pub fn add_connection(&mut self, peer_id: String, address: SocketAddr) -> Result<()> { + if self.active_connections.len() >= self.max_connections { + // Remove oldest connection + if let Some((oldest_id, _)) = self.active_connections + .iter() + .min_by_key(|(_, info)| info.established_at) + .map(|(id, info)| (id.clone(), info.clone())) + { + self.active_connections.remove(&oldest_id); + } + } + + let connection_info = ConnectionInfo { + peer_id: peer_id.clone(), + address, + established_at: SystemTime::now(), + last_activity: SystemTime::now(), + connection_quality: 1.0, + }; + + self.active_connections.insert(peer_id, connection_info); + Ok(()) + } + + /// Update connection activity + pub fn update_activity(&mut self, peer_id: &str) { + if let Some(connection) = self.active_connections.get_mut(peer_id) { + connection.last_activity = SystemTime::now(); + } + } + + /// Get active connections + pub fn get_active_connections(&self) -> Vec { + self.active_connections.values().cloned().collect() + } + + /// Remove stale connections + pub fn cleanup_stale_connections(&mut self) { + let now = SystemTime::now(); + self.active_connections.retain(|_, connection| { + now.duration_since(connection.last_activity).unwrap_or(Duration::MAX) < self.connection_timeout + }); + } + + /// Get connection info for a specific peer + pub fn get_connection_info(&self, peer_id: &str) -> Option<&ConnectionInfo> { + self.active_connections.get(peer_id) + } + + /// Update connection quality based on performance + pub fn update_connection_quality(&mut self, peer_id: &str, quality: f32) { + if let Some(connection) = self.active_connections.get_mut(peer_id) { + connection.connection_quality = quality.clamp(0.0, 1.0); + } + } + + /// Get best quality connections + pub fn get_best_connections(&self, limit: usize) -> Vec<&ConnectionInfo> { + let mut connections: Vec<_> = self.active_connections.values().collect(); + connections.sort_by(|a, b| b.connection_quality.partial_cmp(&a.connection_quality).unwrap()); + connections.into_iter().take(limit).collect() + } + + /// Get connections by address + pub fn get_connections_by_address(&self, address: &SocketAddr) -> Vec<&ConnectionInfo> { + self.active_connections.values() + .filter(|conn| &conn.address == address) + .collect() + } + + /// Get peer IDs of all active connections + pub fn get_active_peer_ids(&self) -> Vec { + self.active_connections.values() + .map(|conn| conn.peer_id.clone()) + .collect() + } +} + +/// DHT implementation for distributed peer discovery +pub struct DHT { + node_id: [u8; 20], + routing_table: Arc>>, + k_bucket_size: usize, +} + +impl DHT { + pub fn new(node_id: String) -> Self { + let mut hasher = sha1_smol::Sha1::new(); + hasher.update(node_id.as_bytes()); + let id = hasher.digest().bytes(); + + Self { + node_id: id, + routing_table: Arc::new(RwLock::new(HashMap::new())), + k_bucket_size: 20, // Standard Kademlia k-bucket size + } + } + + /// Add a node to the DHT + pub async fn add_node(&self, node_id: String, address: SocketAddr) { + let mut hasher = sha1_smol::Sha1::new(); + hasher.update(node_id.as_bytes()); + let id = hasher.digest().bytes(); + + let dht_node = DHTNode { + id, + address, + last_seen: SystemTime::now(), + }; + + self.routing_table.write().await.insert(node_id, dht_node); + } + + /// Find closest nodes to a target ID + pub async fn find_closest_nodes(&self, target_id: &[u8; 20], count: usize) -> Vec { + let table = self.routing_table.read().await; + let mut nodes: Vec<_> = table.values().cloned().collect(); + + // Sort by XOR distance to target + nodes.sort_by_key(|node| { + let distance = xor_distance(&node.id, target_id); + distance + }); + + nodes.into_iter().take(count).collect() + } + + /// Get the size of the routing table + pub async fn size(&self) -> usize { + self.routing_table.read().await.len() + } + + /// Get the node's own ID + pub fn get_node_id(&self) -> [u8; 20] { + self.node_id + } + + /// Get k-bucket size configuration + pub fn get_k_bucket_size(&self) -> usize { + self.k_bucket_size + } + + /// Check if routing table is at capacity for a given distance + pub async fn is_bucket_full(&self, target_distance: &[u8; 20]) -> bool { + let table = self.routing_table.read().await; + let nodes_at_distance: Vec<_> = table.values() + .filter(|node| { + let distance = xor_distance(&node.id, target_distance); + distance.iter().take(1).all(|&x| x == 0) // Same prefix byte + }) + .collect(); + nodes_at_distance.len() >= self.k_bucket_size + } +} + +/// Calculate XOR distance between two node IDs +fn xor_distance(a: &[u8; 20], b: &[u8; 20]) -> Vec { + a.iter().zip(b.iter()).map(|(x, y)| x ^ y).collect() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_peer_discovery_creation() { + let discovery = PeerDiscovery::new( + "test_node".to_string(), + "127.0.0.1:0".parse().unwrap() + ).await.unwrap(); + + assert_eq!(discovery.node_id, "test_node"); + } + + #[tokio::test] + async fn test_dht_operations() { + let dht = DHT::new("test_node".to_string()); + + dht.add_node("node1".to_string(), "127.0.0.1:8001".parse().unwrap()).await; + dht.add_node("node2".to_string(), "127.0.0.1:8002".parse().unwrap()).await; + + assert_eq!(dht.size().await, 2); + + let closest = dht.find_closest_nodes(&[0; 20], 1).await; + assert_eq!(closest.len(), 1); + } + + #[test] + fn test_connection_pool() { + let mut pool = ConnectionPool::new(2, Duration::from_secs(60)); + + pool.add_connection("peer1".to_string(), "127.0.0.1:8001".parse().unwrap()).unwrap(); + pool.add_connection("peer2".to_string(), "127.0.0.1:8002".parse().unwrap()).unwrap(); + + assert_eq!(pool.get_active_connections().len(), 2); + + // Adding third connection should remove oldest + pool.add_connection("peer3".to_string(), "127.0.0.1:8003".parse().unwrap()).unwrap(); + assert_eq!(pool.get_active_connections().len(), 2); + } +} \ No newline at end of file diff --git a/crates/p2p-network/src/lib.rs b/crates/p2p-network/src/lib.rs index 4e188e9..b31d7c2 100644 --- a/crates/p2p-network/src/lib.rs +++ b/crates/p2p-network/src/lib.rs @@ -55,9 +55,14 @@ use webrtc::{ }; use traits::{Hash, P2PNetworkLayer, UtxoBlock, UtxoTransaction}; +use crate::discovery::{PeerDiscovery, DHT}; +use crate::auto_discovery::AutoDiscovery; pub mod peer; pub mod signaling; +pub mod discovery; +pub mod auto_discovery; +pub mod adaptive_network; /// P2P Network configuration for WebRTC connections #[derive(Debug, Clone, Serialize, Deserialize)] @@ -196,6 +201,12 @@ pub struct WebRTCP2PNetwork { /// Shutdown signal shutdown_tx: mpsc::Sender<()>, shutdown_rx: Arc>>>, + /// Peer discovery service + discovery: Option>, + /// Distributed hash table for peer routing + dht: Arc, + /// Auto discovery service + auto_discovery: Arc>, } /// Individual peer connection wrapper @@ -237,15 +248,24 @@ impl WebRTCP2PNetwork { .build(); info!( - "🌐 Initializing WebRTC P2P Network for node: {}", + "Initializing WebRTC P2P Network for node: {}", config.node_id ); - info!("📡 STUN servers: {:?}", config.stun_servers); + info!("STUN servers: {:?}", config.stun_servers); info!( - "🔗 Max peers: {}, Timeout: {}s", + "Max peers: {}, Timeout: {}s", config.max_peers, config.connection_timeout ); + // Initialize DHT + let dht = Arc::new(DHT::new(config.node_id.clone())); + + // Initialize auto discovery + let auto_discovery = Arc::new(RwLock::new(AutoDiscovery::new( + config.node_id.clone(), + config.listen_addr.port(), + ))); + Ok(Self { config, peers: Arc::new(RwLock::new(HashMap::new())), @@ -264,13 +284,16 @@ impl WebRTCP2PNetwork { })), shutdown_tx, shutdown_rx: Arc::new(Mutex::new(Some(shutdown_rx))), + discovery: None, // Will be initialized in start() + dht, + auto_discovery, }) } /// Start the P2P network and begin accepting connections pub async fn start(&self) -> Result<()> { info!( - "🚀 Starting WebRTC P2P Network on {}", + "Starting WebRTC P2P Network on {}", self.config.listen_addr ); @@ -287,9 +310,9 @@ impl WebRTCP2PNetwork { .connect_to_peer(peer_id.clone(), peer_addr.clone()) .await { - Ok(_) => info!("✅ Connected to bootstrap peer: {}", peer_addr), + Ok(_) => info!("Connected to bootstrap peer: {}", peer_addr), Err(e) => warn!( - "❌ Failed to connect to bootstrap peer {}: {}", + "Failed to connect to bootstrap peer {}: {}", peer_addr, e ), } @@ -304,7 +327,7 @@ impl WebRTCP2PNetwork { // Start message processing task self.start_message_processing_task().await; - info!("✅ WebRTC P2P Network started successfully"); + info!("WebRTC P2P Network started successfully"); // Wait for shutdown signal let mut shutdown_rx = { @@ -316,7 +339,7 @@ impl WebRTCP2PNetwork { // Block until shutdown signal received shutdown_rx.recv().await; - info!("🔄 Received shutdown signal, stopping P2P network"); + info!("Received shutdown signal, stopping P2P network"); Ok(()) } @@ -324,7 +347,7 @@ impl WebRTCP2PNetwork { /// Connect to a specific peer pub async fn connect_to_peer(&self, peer_id: String, peer_address: String) -> Result<()> { info!( - "🔗 Attempting connection to peer {} at {}", + "Attempting connection to peer {} at {}", peer_id, peer_address ); @@ -429,7 +452,7 @@ impl WebRTCP2PNetwork { // TODO: Implement signaling server to exchange SDP and ICE candidates // For now, this is a placeholder for the signaling mechanism info!( - "📋 Created offer for peer {}, awaiting signaling implementation", + "Created offer for peer {}, awaiting signaling implementation", peer_id ); @@ -471,17 +494,17 @@ impl WebRTCP2PNetwork { match peer.send_message(message.clone()).await { Ok(_) => { sent_count += 1; - debug!("📤 Sent message to peer: {}", peer_id); + debug!("Sent message to peer: {}", peer_id); } Err(e) => { error_count += 1; - warn!("❌ Failed to send message to peer {}: {}", peer_id, e); + warn!("Failed to send message to peer {}: {}", peer_id, e); } } } info!( - "📡 Broadcast complete: {} sent, {} errors", + "Broadcast complete: {} sent, {} errors", sent_count, error_count ); @@ -528,7 +551,7 @@ impl WebRTCP2PNetwork { let mut peers = self.peers.write().await; if let Some(peer) = peers.remove(peer_id) { peer.disconnect().await?; - info!("🔌 Disconnected from peer: {}", peer_id); + info!("Disconnected from peer: {}", peer_id); // Update stats { @@ -541,7 +564,7 @@ impl WebRTCP2PNetwork { /// Shutdown the P2P network pub async fn shutdown(&self) -> Result<()> { - info!("🔄 Shutting down WebRTC P2P Network..."); + info!("Shutting down WebRTC P2P Network..."); // Send shutdown signal if let Err(e) = self.shutdown_tx.send(()).await { @@ -560,7 +583,7 @@ impl WebRTCP2PNetwork { } } - info!("✅ WebRTC P2P Network shutdown complete"); + info!("WebRTC P2P Network shutdown complete"); Ok(()) } @@ -577,7 +600,7 @@ impl WebRTCP2PNetwork { // On data channel open let peer_id_open = peer_id.clone(); data_channel.on_open(Box::new(move || { - info!("📂 Data channel opened for peer: {}", peer_id_open); + info!("Data channel opened for peer: {}", peer_id_open); Box::pin(async {}) })); @@ -590,8 +613,8 @@ impl WebRTCP2PNetwork { Box::pin(async move { match Self::handle_incoming_message(&peer_id, msg, message_tx, peer_info).await { - Ok(_) => debug!("📨 Processed message from peer: {}", peer_id), - Err(e) => warn!("❌ Error processing message from {}: {}", peer_id, e), + Ok(_) => debug!("Processed message from peer: {}", peer_id), + Err(e) => warn!("Error processing message from {}: {}", peer_id, e), } }) })); @@ -599,13 +622,13 @@ impl WebRTCP2PNetwork { // On data channel close let peer_id_close = peer_id.clone(); data_channel.on_close(Box::new(move || { - warn!("📪 Data channel closed for peer: {}", peer_id_close); + warn!("Data channel closed for peer: {}", peer_id_close); Box::pin(async {}) })); // On data channel error data_channel.on_error(Box::new(move |err| { - error!("❌ Data channel error for peer {}: {}", peer_id, err); + error!("Data channel error for peer {}: {}", peer_id, err); Box::pin(async {}) })); @@ -621,7 +644,7 @@ impl WebRTCP2PNetwork { move |state: RTCPeerConnectionState| { let peer_id = peer_id.clone(); Box::pin(async move { - info!("🔄 Peer {} connection state changed: {:?}", peer_id, state); + info!("Peer {} connection state changed: {:?}", peer_id, state); }) }, )); @@ -634,13 +657,13 @@ impl WebRTCP2PNetwork { Box::pin(async move { if let Some(candidate) = candidate { debug!( - "🧊 ICE candidate for peer {}: {}", + "ICE candidate for peer {}: {}", peer_id, - candidate.to_string() + candidate ); // TODO: Send ICE candidate through signaling server } else { - debug!("🧊 ICE gathering complete for peer: {}", peer_id); + debug!("ICE gathering complete for peer: {}", peer_id); } }) })); @@ -669,7 +692,7 @@ impl WebRTCP2PNetwork { let p2p_message: P2PMessage = bincode::deserialize(&msg.data).context("Failed to deserialize P2P message")?; - debug!("📨 Received message from {}: {:?}", peer_id, p2p_message); + debug!("Received message from {}: {:?}", peer_id, p2p_message); // Send to message channel if let Err(e) = message_tx.send((peer_id.to_string(), p2p_message)) { @@ -708,7 +731,7 @@ impl WebRTCP2PNetwork { }; if let Err(e) = peer.send_message(ping_msg).await { - warn!("❌ Failed to send ping to peer {}: {}", peer_id, e); + warn!("Failed to send ping to peer {}: {}", peer_id, e); } } } @@ -750,7 +773,7 @@ impl WebRTCP2PNetwork { let mut peers = peers.write().await; for peer_id in disconnected_peers { peers.remove(&peer_id); - warn!("🗑️ Removed stale peer connection: {}", peer_id); + warn!("Removed stale peer connection: {}", peer_id); } } @@ -792,14 +815,14 @@ impl WebRTCP2PNetwork { &peers, &stats ).await { - warn!("❌ Error processing message from {}: {}", peer_id, e); + warn!("Error processing message from {}: {}", peer_id, e); } } Err(broadcast::error::RecvError::Lagged(skipped)) => { - warn!("⚠️ Message receiver lagged, skipped {} messages", skipped); + warn!("Message receiver lagged, skipped {skipped} messages"); } Err(broadcast::error::RecvError::Closed) => { - info!("📴 Message channel closed, stopping message processing"); + info!("Message channel closed, stopping message processing"); break; } } @@ -816,7 +839,7 @@ impl WebRTCP2PNetwork { peers: &Arc>>>, stats: &Arc>, ) -> Result<()> { - info!("📨 Processing message from peer {}: {:?}", peer_id, message); + info!("Processing message from peer {}: {:?}", peer_id, message); // Update stats { @@ -845,8 +868,7 @@ impl WebRTCP2PNetwork { timestamp, } => { info!( - "🤝 Received handshake from peer {} (node: {}, version: {}, time: {})", - peer_id, node_id, version, timestamp + "Received handshake from peer {peer_id} (node: {node_id}, version: {version}, time: {timestamp})" ); // Handshake received - peer is identified } @@ -856,7 +878,7 @@ impl WebRTCP2PNetwork { timestamp, } => { info!( - "📥 Received transaction {} from peer {} (size: {} bytes, time: {})", + "Received transaction {} from peer {} (size: {} bytes, time: {})", tx_hash, peer_id, tx_data.len(), @@ -871,7 +893,7 @@ impl WebRTCP2PNetwork { timestamp, } => { info!( - "📦 Received block {} #{} from peer {} (size: {} bytes, time: {})", + "Received block {} #{} from peer {} (size: {} bytes, time: {})", block_hash, block_number, peer_id, @@ -887,7 +909,7 @@ impl WebRTCP2PNetwork { timestamp, } => { info!( - "📤 Received data request {} for {:?} {} from peer {} (time: {})", + "Received data request {} for {:?} {} from peer {} (time: {})", request_id, data_type, data_hash, peer_id, timestamp ); // Data request received - should respond with requested data @@ -900,7 +922,7 @@ impl WebRTCP2PNetwork { match data { Some(data_bytes) => { info!( - "📥 Received data response {} from peer {} (size: {} bytes, time: {})", + "Received data response {} from peer {} (size: {} bytes, time: {})", request_id, peer_id, data_bytes.len(), @@ -909,7 +931,7 @@ impl WebRTCP2PNetwork { } None => { info!( - "📥 Received empty data response {} from peer {} (time: {})", + "Received empty data response {} from peer {} (time: {})", request_id, peer_id, timestamp ); } @@ -922,7 +944,7 @@ impl WebRTCP2PNetwork { peer_list, timestamp, } => { - info!("📢 Received peer announcement from {} (node: {}, addr: {}, peers: {}, time: {})", + info!("Received peer announcement from {} (node: {}, addr: {}, peers: {}, time: {})", peer_id, node_id, listen_addr, peer_list.len(), timestamp); // Peer announcement received - could connect to new peers } @@ -932,7 +954,7 @@ impl WebRTCP2PNetwork { timestamp, } => { warn!( - "❌ Received error message from peer {} (code: {}, msg: {}, time: {})", + "Received error message from peer {} (code: {}, msg: {}, time: {})", peer_id, error_code, message, timestamp ); // Error message received @@ -1057,6 +1079,9 @@ impl Clone for WebRTCP2PNetwork { stats: Arc::clone(&self.stats), shutdown_tx: self.shutdown_tx.clone(), shutdown_rx: Arc::clone(&self.shutdown_rx), + discovery: self.discovery.clone(), + dht: Arc::clone(&self.dht), + auto_discovery: Arc::clone(&self.auto_discovery), } } } diff --git a/crates/p2p-network/src/peer.rs b/crates/p2p-network/src/peer.rs index 5c9c673..fe1a83c 100644 --- a/crates/p2p-network/src/peer.rs +++ b/crates/p2p-network/src/peer.rs @@ -85,7 +85,7 @@ impl super::PeerConnection { // Increase reputation for successful sends info.reputation_score = (info.reputation_score + 0.01).min(2.0); } - debug!("📤 Sent message to peer {}: {:?}", self.id, message); + debug!("Sent message to peer {}: {:?}", self.id, message); Ok(()) } Ok(Err(e)) => { @@ -94,7 +94,7 @@ impl super::PeerConnection { let mut info = self.info.lock().unwrap(); info.reputation_score = (info.reputation_score - 0.1).max(0.0); } - error!("❌ Failed to send message to peer {}: {}", self.id, e); + error!("Failed to send message to peer {}: {}", self.id, e); Err(anyhow::anyhow!("Send failed: {}", e)) } Err(_) => { @@ -103,7 +103,7 @@ impl super::PeerConnection { let mut info = self.info.lock().unwrap(); info.reputation_score = (info.reputation_score - 0.2).max(0.0); } - error!("⏰ Timeout sending message to peer: {}", self.id); + error!("Timeout sending message to peer: {}", self.id); Err(anyhow::anyhow!("Send timeout")) } } @@ -111,7 +111,7 @@ impl super::PeerConnection { /// Disconnect this peer connection pub async fn disconnect(&self) -> Result<()> { - info!("🔌 Disconnecting peer: {}", self.id); + info!("Disconnecting peer: {}", self.id); // Close data channel if available { @@ -128,7 +128,7 @@ impl super::PeerConnection { warn!("Error closing peer connection for {}: {}", self.id, e); } - info!("✅ Peer {} disconnected successfully", self.id); + info!("Peer {} disconnected successfully", self.id); Ok(()) } @@ -206,7 +206,7 @@ impl super::PeerConnection { }; self.send_message(handshake_message).await?; - info!("🤝 Sent handshake to peer: {}", self.id); + info!("Sent handshake to peer: {}", self.id); Ok(()) } } diff --git a/crates/p2p-network/src/signaling.rs b/crates/p2p-network/src/signaling.rs index 1bb543f..cd28b54 100644 --- a/crates/p2p-network/src/signaling.rs +++ b/crates/p2p-network/src/signaling.rs @@ -117,12 +117,12 @@ impl SignalingServer { .await .context("Failed to bind signaling server")?; - info!("🔗 Signaling server listening on: {}", self.listen_addr); + info!("Signaling server listening on: {}", self.listen_addr); loop { match listener.accept().await { Ok((stream, addr)) => { - info!("📞 New signaling connection from: {}", addr); + info!("New signaling connection from: {}", addr); let peers = Arc::clone(&self.peers); let stats = Arc::clone(&self.stats); let broadcast_tx = self.broadcast_tx.clone(); @@ -132,12 +132,12 @@ impl SignalingServer { Self::handle_peer_connection(stream, addr, peers, stats, broadcast_tx) .await { - error!("❌ Error handling peer connection {}: {}", addr, e); + error!("Error handling peer connection {}: {}", addr, e); } }); } Err(e) => { - error!("❌ Failed to accept connection: {}", e); + error!("Failed to accept connection: {}", e); } } } @@ -166,13 +166,13 @@ impl SignalingServer { let json = match serde_json::to_string(&message) { Ok(json) => json, Err(e) => { - error!("❌ Failed to serialize message: {}", e); + error!("Failed to serialize message: {}", e); continue; } }; if let Err(e) = writer.write_all(format!("{}\n", json).as_bytes()).await { - error!("❌ Failed to send message to peer {}: {}", peer_id_out, e); + error!("Failed to send message to peer {}: {}", peer_id_out, e); break; } } @@ -191,14 +191,14 @@ impl SignalingServer { match buf_reader.read_line(&mut line).await { Ok(0) => { // Connection closed - info!("📴 Peer {} disconnected", peer_id); + info!("Peer {} disconnected", peer_id); break; } Ok(_) => { let message: SignalingMessage = match serde_json::from_str(line.trim()) { Ok(msg) => msg, Err(e) => { - error!("❌ Invalid message from {}: {}", addr, e); + error!("Invalid message from {}: {}", addr, e); let error_msg = SignalingMessage::Error { message: format!("Invalid message format: {}", e), }; @@ -209,7 +209,7 @@ impl SignalingServer { } }; - debug!("📨 Received signaling message from {}: {:?}", addr, message); + debug!("Received signaling message from {}: {:?}", addr, message); if let Err(e) = Self::process_signaling_message( message, @@ -221,11 +221,11 @@ impl SignalingServer { ) .await { - error!("❌ Error processing message from {}: {}", addr, e); + error!("Error processing message from {}: {}", addr, e); } } Err(e) => { - error!("❌ Error reading from {}: {}", addr, e); + error!("Error reading from {}: {}", addr, e); break; } } @@ -268,7 +268,7 @@ impl SignalingServer { peer_id: reg_peer_id, node_id, } => { - info!("📝 Registering peer: {} (node: {})", reg_peer_id, node_id); + info!("Registering peer: {} (node: {})", reg_peer_id, node_id); let connected_peer = ConnectedPeer { peer_id: reg_peer_id.clone(), @@ -316,7 +316,7 @@ impl SignalingServer { ref to, sdp: _, } => { - info!("📋 Relaying offer from {} to {}", from, to); + info!("Relaying offer from {} to {}", from, to); let target_id = to.clone(); Self::relay_message_to_peer(&target_id, message, peers).await?; @@ -332,7 +332,7 @@ impl SignalingServer { ref to, sdp: _, } => { - info!("📝 Relaying answer from {} to {}", from, to); + info!("Relaying answer from {} to {}", from, to); let target_id = to.clone(); Self::relay_message_to_peer(&target_id, message, peers).await?; @@ -346,7 +346,7 @@ impl SignalingServer { SignalingMessage::IceCandidate { ref from, ref to, .. } => { - debug!("🧊 Relaying ICE candidate from {} to {}", from, to); + debug!("Relaying ICE candidate from {} to {}", from, to); let target_id = to.clone(); Self::relay_message_to_peer(&target_id, message, peers).await?; @@ -358,7 +358,7 @@ impl SignalingServer { } _ => { - warn!("❓ Unhandled signaling message type from {}", peer_id); + warn!("Unhandled signaling message type from {}", peer_id); } } @@ -377,9 +377,9 @@ impl SignalingServer { .sender .send(message) .context("Failed to send message to target peer")?; - debug!("📤 Message relayed to peer: {}", target_peer_id); + debug!("Message relayed to peer: {}", target_peer_id); } else { - warn!("🔍 Target peer not found: {}", target_peer_id); + warn!("Target peer not found: {}", target_peer_id); return Err(anyhow::anyhow!("Target peer not found: {}", target_peer_id)); } diff --git a/crates/p2p-network/tests/integration_test.rs b/crates/p2p-network/tests/integration_test.rs deleted file mode 100644 index 9a22ced..0000000 --- a/crates/p2p-network/tests/integration_test.rs +++ /dev/null @@ -1,350 +0,0 @@ -//! WebRTC P2P Network Integration Tests -//! -//! This module contains comprehensive integration tests for the real WebRTC P2P network -//! implementation, testing actual P2P communication and blockchain integration. - -use anyhow::Result; -use log::info; - -use p2p_network::{P2PConfig, WebRTCP2PNetwork}; -use traits::{P2PNetworkLayer, TxInput, TxOutput, UtxoBlock, UtxoId, UtxoTransaction}; - -/// Initialize test logging -fn init_test_logging() { - let _ = env_logger::builder() - .filter_level(log::LevelFilter::Info) - .is_test(true) - .try_init(); -} - -/// Create a test P2P configuration -fn create_test_config(node_id: &str, port: u16) -> P2PConfig { - P2PConfig { - node_id: node_id.to_string(), - listen_addr: format!("127.0.0.1:{}", port).parse().unwrap(), - stun_servers: vec!["stun:stun.l.google.com:19302".to_string()], - bootstrap_peers: vec![], - max_peers: 10, - connection_timeout: 30, - keep_alive_interval: 10, - debug_mode: true, - } -} - -/// Create a test UTXO transaction -fn create_test_transaction(from: &str, to: &str, amount: u64) -> UtxoTransaction { - UtxoTransaction { - hash: format!("tx_{}_{}_{}_{}", from, to, amount, uuid::Uuid::new_v4()), - inputs: vec![TxInput { - utxo_id: UtxoId { - tx_hash: "genesis_tx".to_string(), - output_index: 0, - }, - redeemer: b"test_redeemer".to_vec(), - signature: format!("sig_{}", from).into_bytes(), - }], - outputs: vec![TxOutput { - value: amount, - script: vec![], - datum: Some(format!("Payment to {}", to).into_bytes()), - datum_hash: Some(format!("datum_hash_{}", to)), - }], - fee: 1000, - validity_range: Some((0, 1000)), - script_witness: vec![], - auxiliary_data: None, - } -} - -/// Create a test UTXO block -fn create_test_block(number: u64, transactions: Vec) -> UtxoBlock { - UtxoBlock { - hash: format!("block_{}", number), - parent_hash: if number == 0 { - "genesis".to_string() - } else { - format!("block_{}", number - 1) - }, - number, - timestamp: chrono::Utc::now().timestamp() as u64, - slot: number, - transactions, - utxo_set_hash: format!("utxo_set_hash_{}", number), - transaction_root: format!("tx_root_{}", number), - validator: "test_validator".to_string(), - proof: vec![0, 1, 2, 3], // Mock proof - } -} - -#[tokio::test] -async fn test_p2p_network_initialization() -> Result<()> { - init_test_logging(); - info!("🧪 Testing P2P network initialization"); - - let config = create_test_config("test_node_1", 8080); - let network = WebRTCP2PNetwork::new(config)?; - - // Test network statistics - let stats = network.get_network_stats(); - assert_eq!(stats.total_connections, 0); - assert_eq!(stats.active_connections, 0); - - // Test peer list (should be empty initially) - let peers = network.get_connected_peers().await; - assert!(peers.is_empty()); - - info!("✅ P2P network initialization test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_p2p_network_start() -> Result<()> { - init_test_logging(); - info!("🧪 Testing P2P network start functionality"); - - let config = create_test_config("test_node_2", 8081); - let network = WebRTCP2PNetwork::new(config)?; - - // Test network creation and initial state - let initial_stats = network.get_network_stats(); - assert_eq!(initial_stats.total_connections, 0); - assert_eq!(initial_stats.active_connections, 0); - - // Test shutdown without starting (should not error) - let shutdown_result = network.shutdown().await; - assert!(shutdown_result.is_ok()); - - info!("✅ P2P network start functionality test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_transaction_broadcasting() -> Result<()> { - init_test_logging(); - info!("🧪 Testing transaction broadcasting"); - - let config = create_test_config("test_node_3", 8082); - let network = WebRTCP2PNetwork::new(config)?; - - // Create test transaction - let tx = create_test_transaction("alice", "bob", 1000); - - // Test broadcasting (will not actually send since no peers connected) - let result = network.broadcast_transaction(&tx).await; - assert!(result.is_ok()); - - // Check stats updated - let stats = network.get_network_stats(); - // Note: messages_sent will be 0 because no peers are connected - assert_eq!(stats.messages_sent, 0); - - info!("✅ Transaction broadcasting test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_block_broadcasting() -> Result<()> { - init_test_logging(); - info!("🧪 Testing block broadcasting"); - - let config = create_test_config("test_node_4", 8083); - let network = WebRTCP2PNetwork::new(config)?; - - // Create test block with transactions - let tx1 = create_test_transaction("alice", "bob", 1000); - let tx2 = create_test_transaction("bob", "charlie", 500); - let block = create_test_block(1, vec![tx1, tx2]); - - // Test broadcasting - let result = network.broadcast_block(&block).await; - assert!(result.is_ok()); - - info!("✅ Block broadcasting test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_data_request() -> Result<()> { - init_test_logging(); - info!("🧪 Testing data request functionality"); - - let config = create_test_config("test_node_5", 8084); - let network = WebRTCP2PNetwork::new(config)?; - - // Test different data request types - let data_hash = "test_data_hash_123".to_string(); - - network - .request_blockchain_data("transaction".to_string(), data_hash.clone()) - .await?; - network - .request_blockchain_data("block".to_string(), data_hash.clone()) - .await?; - network - .request_blockchain_data("utxo_set".to_string(), data_hash.clone()) - .await?; - network - .request_blockchain_data("state_root".to_string(), data_hash.clone()) - .await?; - network - .request_blockchain_data("chain_metadata".to_string(), data_hash) - .await?; - - // Test invalid data type - let result = network - .request_blockchain_data("invalid_type".to_string(), "hash".to_string()) - .await; - assert!(result.is_err()); - - info!("✅ Data request test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_peer_connection_simulation() -> Result<()> { - init_test_logging(); - info!("🧪 Testing peer connection simulation"); - - let config = create_test_config("test_node_6", 8085); - let network = WebRTCP2PNetwork::new(config)?; - - // Test connecting to a mock peer (will fail but tests the API) - let peer_id = "mock_peer_123".to_string(); - let peer_address = "127.0.0.1:9999".to_string(); - - // This will fail to establish actual connection but tests the flow - let _result = network.connect_to_peer(peer_id.clone(), peer_address).await; - // Expected to fail since no actual peer at that address - - // Test peer info retrieval (using internal method) - let peer_info = WebRTCP2PNetwork::get_peer_info(&network, &peer_id).await; - // Connection might succeed in creating the peer object even if WebRTC connection fails - // So we test that the method returns something (either peer info or None) - match peer_info { - Some(info) => info!("Peer info found: {:?}", info.id), - None => info!("No peer info found (expected for failed connection)"), - } - - info!("✅ Peer connection simulation test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_network_statistics() -> Result<()> { - init_test_logging(); - info!("🧪 Testing network statistics tracking"); - - let config = create_test_config("test_node_7", 8086); - let network = WebRTCP2PNetwork::new(config)?; - - // Initial stats - let initial_stats = network.get_network_stats(); - assert_eq!(initial_stats.total_connections, 0); - assert_eq!(initial_stats.active_connections, 0); - assert_eq!(initial_stats.messages_sent, 0); - assert_eq!(initial_stats.messages_received, 0); - - // Broadcast some messages to update stats - let tx = create_test_transaction("alice", "bob", 1000); - network.broadcast_transaction(&tx).await?; - - let block = create_test_block(1, vec![tx]); - network.broadcast_block(&block).await?; - - // Stats should remain 0 for messages_sent since no peers connected - let final_stats = network.get_network_stats(); - assert_eq!(final_stats.messages_sent, 0); // No peers to send to - - info!("✅ Network statistics test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_peer_management() -> Result<()> { - init_test_logging(); - info!("🧪 Testing peer management functionality"); - - let config = create_test_config("test_node_8", 8087); - let network = WebRTCP2PNetwork::new(config)?; - - // Test getting connected peers (should be empty) - let peers = network.get_connected_peers().await; - assert!(peers.is_empty()); - - // Test disconnecting non-existent peer (should not error) - let result = network.disconnect_peer("non_existent_peer").await; - assert!(result.is_ok()); - - info!("✅ Peer management test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_network_shutdown() -> Result<()> { - init_test_logging(); - info!("🧪 Testing network shutdown functionality"); - - let config = create_test_config("test_node_9", 8088); - let network = WebRTCP2PNetwork::new(config)?; - - // Test shutdown without starting (should not error) - let shutdown_result = network.shutdown().await; - assert!(shutdown_result.is_ok()); - - info!("✅ Network shutdown test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_configuration_validation() -> Result<()> { - init_test_logging(); - info!("🧪 Testing P2P configuration validation"); - - // Test default configuration - let default_config = P2PConfig::default(); - assert!(!default_config.node_id.is_empty()); - assert!(!default_config.stun_servers.is_empty()); - assert!(default_config.max_peers > 0); - assert!(default_config.connection_timeout > 0); - - // Test custom configuration - let custom_config = create_test_config("custom_node", 9000); - assert_eq!(custom_config.node_id, "custom_node"); - assert_eq!(custom_config.listen_addr.port(), 9000); - assert!(custom_config.debug_mode); - - // Create network with custom config - let network = WebRTCP2PNetwork::new(custom_config)?; - assert!(network.get_connected_peers().await.is_empty()); - - info!("✅ Configuration validation test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_p2p_trait_implementation() -> Result<()> { - init_test_logging(); - info!("🧪 Testing P2PNetworkLayer trait implementation"); - - let config = create_test_config("trait_test_node", 8089); - let network = WebRTCP2PNetwork::new(config)?; - - // Test trait methods through concrete type - let peers = network.get_connected_peers().await; - assert!(peers.is_empty()); - - let tx = create_test_transaction("alice", "bob", 1000); - let broadcast_result = network.broadcast_transaction(&tx).await; - assert!(broadcast_result.is_ok()); - - let block = create_test_block(1, vec![tx]); - let block_result = network.broadcast_block(&block).await; - assert!(block_result.is_ok()); - - // Test shutdown - let shutdown_result = network.shutdown().await; - assert!(shutdown_result.is_ok()); - - info!("✅ P2PNetworkLayer trait implementation test passed"); - Ok(()) -} diff --git a/crates/p2p-network/tests/non_blocking_adaptive_test.rs b/crates/p2p-network/tests/non_blocking_adaptive_test.rs new file mode 100644 index 0000000..9be6d4a --- /dev/null +++ b/crates/p2p-network/tests/non_blocking_adaptive_test.rs @@ -0,0 +1,372 @@ +//! Non-blocking Adaptive Network Tests +//! +//! Tests that verify the P2P network discovery and joining capabilities +//! without using blocking start() method. + +use anyhow::Result; +use log::info; + +use p2p_network::{P2PConfig, WebRTCP2PNetwork}; +use traits::{P2PNetworkLayer, TxInput, TxOutput, UtxoId, UtxoTransaction}; + +fn init_test_logging() { + let _ = env_logger::builder() + .filter_level(log::LevelFilter::Info) + .is_test(true) + .try_init(); +} + +fn create_test_tx(id: u64) -> UtxoTransaction { + UtxoTransaction { + hash: format!("adaptive_test_tx_{}", id), + inputs: vec![TxInput { + utxo_id: UtxoId { + tx_hash: format!("input_{}", id), + output_index: 0, + }, + redeemer: b"test".to_vec(), + signature: b"sig".to_vec(), + }], + outputs: vec![TxOutput { + value: 1000, + script: vec![], + datum: Some(b"test_data".to_vec()), + datum_hash: Some("hash".to_string()), + }], + fee: 100, + validity_range: Some((0, 10000)), + script_witness: vec![], + auxiliary_data: None, + } +} + +#[tokio::test] +async fn test_non_blocking_peer_discovery() -> Result<()> { + init_test_logging(); + info!("Testing non-blocking peer discovery mechanism"); + + // Create networks with auto discovery enabled + let config1 = P2PConfig { + node_id: "discovery_node1".to_string(), + listen_addr: "127.0.0.1:12001".parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], // No STUN for non-blocking test + max_peers: 5, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let config2 = P2PConfig { + node_id: "discovery_node2".to_string(), + listen_addr: "127.0.0.1:12002".parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], + max_peers: 5, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let network1 = WebRTCP2PNetwork::new(config1)?; + let network2 = WebRTCP2PNetwork::new(config2)?; + + info!("Created both networks"); + + // Test discovery functionality without full startup + let discovered_peers1 = network1.get_discovered_peers().await; + let discovered_peers2 = network2.get_discovered_peers().await; + + info!("Network 1 discovered {} peers", discovered_peers1.len()); + info!("Network 2 discovered {} peers", discovered_peers2.len()); + + // Initially should be empty + assert_eq!(discovered_peers1.len(), 0); + assert_eq!(discovered_peers2.len(), 0); + + // Test broadcasting capabilities + let tx1 = create_test_tx(1); + let broadcast_result1 = network1.broadcast_transaction(&tx1).await; + + let tx2 = create_test_tx(2); + let broadcast_result2 = network2.broadcast_transaction(&tx2).await; + + assert!(broadcast_result1.is_ok(), "Network 1 should broadcast successfully"); + assert!(broadcast_result2.is_ok(), "Network 2 should broadcast successfully"); + + info!("Both networks can broadcast transactions"); + + // Get network statistics + let stats1 = network1.get_network_stats(); + let stats2 = network2.get_network_stats(); + + info!("Network 1 stats: connections={}, messages_sent={}", + stats1.active_connections, stats1.messages_sent); + info!("Network 2 stats: connections={}, messages_sent={}", + stats2.active_connections, stats2.messages_sent); + + // Test adaptive network statistics if available + let adaptive_stats1 = network1.get_adaptive_network_stats().await; + let adaptive_stats2 = network2.get_adaptive_network_stats().await; + + info!("Network 1 adaptive stats: discovered={}, connected={}, efficiency={:.2}", + adaptive_stats1.discovered_peers_count, + adaptive_stats1.connected_peers_count, + adaptive_stats1.discovery_efficiency); + info!("Network 2 adaptive stats: discovered={}, connected={}, efficiency={:.2}", + adaptive_stats2.discovered_peers_count, + adaptive_stats2.connected_peers_count, + adaptive_stats2.discovery_efficiency); + + // Cleanup + network1.shutdown().await?; + network2.shutdown().await?; + + info!("Non-blocking peer discovery test completed"); + Ok(()) +} + +#[tokio::test] +async fn test_non_blocking_network_expansion() -> Result<()> { + init_test_logging(); + info!("Testing non-blocking network expansion"); + + // Create multiple networks simulating gradual expansion + let mut networks = Vec::new(); + + for i in 0..4 { + let config = P2PConfig { + node_id: format!("expansion_node_{}", i), + listen_addr: format!("127.0.0.1:{}", 12010 + i).parse().unwrap(), + bootstrap_peers: if i == 0 { + vec![] + } else { + vec![format!("127.0.0.1:{}", 12010)] // Bootstrap to first node + }, + stun_servers: vec![], + max_peers: 10, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let network = WebRTCP2PNetwork::new(config)?; + networks.push(network); + info!("Created network {}", i); + } + + // Test that all networks can handle transactions + let mut successful_broadcasts = 0; + for (i, network) in networks.iter().enumerate() { + let tx = create_test_tx(100 + i as u64); + if network.broadcast_transaction(&tx).await.is_ok() { + successful_broadcasts += 1; + } + } + + assert_eq!(successful_broadcasts, 4, "All networks should handle transactions"); + info!("All {} networks can handle transactions", successful_broadcasts); + + // Check network capabilities + for (i, network) in networks.iter().enumerate() { + let discovered = network.get_discovered_peers().await; + let connected = network.get_connected_peers().await; + let stats = network.get_network_stats(); + + info!("Network {} - Discovered: {}, Connected: {}, Total connections: {}", + i, discovered.len(), connected.len(), stats.total_connections); + } + + // Test adaptive broadcasting on all networks + let tx_broadcast = create_test_tx(200); + let mut adaptive_broadcast_results = 0; + + for (i, network) in networks.iter().enumerate() { + if network.adaptive_broadcast_transaction(&tx_broadcast).await.is_ok() { + adaptive_broadcast_results += 1; + } + info!("Network {} adaptive broadcast result: OK", i); + } + + assert_eq!(adaptive_broadcast_results, 4, "All networks should support adaptive broadcast"); + + // Cleanup all networks + for (i, network) in networks.into_iter().enumerate() { + network.shutdown().await?; + info!("Network {} shutdown complete", i); + } + + info!("Non-blocking network expansion test completed"); + Ok(()) +} + +#[tokio::test] +async fn test_non_blocking_network_resilience() -> Result<()> { + init_test_logging(); + info!("Testing non-blocking network resilience"); + + // Create a small network setup + let mut networks = Vec::new(); + + for i in 0..3 { + let config = P2PConfig { + node_id: format!("resilient_node_{}", i), + listen_addr: format!("127.0.0.1:{}", 12020 + i).parse().unwrap(), + bootstrap_peers: if i == 0 { + vec![] + } else { + vec!["127.0.0.1:12020".to_string()] + }, + stun_servers: vec![], + max_peers: 5, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let network = WebRTCP2PNetwork::new(config)?; + networks.push(network); + } + + info!("Created {} networks for resilience testing", networks.len()); + + // Test all networks are functional + for (i, network) in networks.iter().enumerate() { + let tx = create_test_tx(300 + i as u64); + let result = network.broadcast_transaction(&tx).await; + assert!(result.is_ok(), "Network {} should be functional", i); + + let stats = network.get_network_stats(); + info!("Network {} initial stats: connections={}, messages_sent={}", + i, stats.active_connections, stats.messages_sent); + } + + // Simulate "node failure" by shutting down middle network + info!("Simulating node failure by shutting down network 1"); + networks[1].shutdown().await?; + let failed_network = networks.remove(1); + drop(failed_network); + + // Test remaining networks still function + for (i, network) in networks.iter().enumerate() { + let tx = create_test_tx(400 + i as u64); + let result = network.broadcast_transaction(&tx).await; + assert!(result.is_ok(), "Remaining network {} should still work", i); + + let discovered = network.get_discovered_peers().await; + info!("Network {} after failure - Discovered peers: {}", i, discovered.len()); + } + + // Add new "healing" network + info!("Adding new network to heal the network"); + let healing_config = P2PConfig { + node_id: "healing_node".to_string(), + listen_addr: "127.0.0.1:12030".parse().unwrap(), + bootstrap_peers: vec![ + "127.0.0.1:12020".to_string(), + "127.0.0.1:12022".to_string(), + ], + stun_servers: vec![], + max_peers: 5, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let healing_network = WebRTCP2PNetwork::new(healing_config)?; + networks.push(healing_network); + + // Test network functionality after healing + let mut working_nodes = 0; + for (i, network) in networks.iter().enumerate() { + let tx = create_test_tx(500 + i as u64); + if network.broadcast_transaction(&tx).await.is_ok() { + working_nodes += 1; + } + + let discovered = network.get_discovered_peers().await; + let adaptive_stats = network.get_adaptive_network_stats().await; + info!("Network {} after healing - Discovered: {}, DHT nodes: {}", + i, discovered.len(), adaptive_stats.dht_nodes_count); + } + + assert!(working_nodes >= 2, "At least 2 nodes should work after healing"); + info!("Network resilience test: {}/{} nodes working after healing", + working_nodes, networks.len()); + + // Cleanup remaining networks + for (i, network) in networks.into_iter().enumerate() { + network.shutdown().await?; + info!("Network {} cleanup complete", i); + } + + info!("Non-blocking network resilience test completed"); + Ok(()) +} + +#[tokio::test] +async fn test_discovery_mechanisms() -> Result<()> { + init_test_logging(); + info!("Testing various discovery mechanisms"); + + let config = P2PConfig { + node_id: "discovery_test_node".to_string(), + listen_addr: "127.0.0.1:12040".parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], + max_peers: 10, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let network = WebRTCP2PNetwork::new(config)?; + + // Test initial discovery state + let initial_discovered = network.get_discovered_peers().await; + let initial_connected = network.get_connected_peers().await; + + info!("Initial state - Discovered: {}, Connected: {}", + initial_discovered.len(), initial_connected.len()); + + assert_eq!(initial_discovered.len(), 0); + assert_eq!(initial_connected.len(), 0); + + // Test adaptive network statistics + let adaptive_stats = network.get_adaptive_network_stats().await; + + info!("Adaptive stats - Discovered peers: {}, DHT nodes: {}, Connected: {}, Efficiency: {:.2}", + adaptive_stats.discovered_peers_count, + adaptive_stats.dht_nodes_count, + adaptive_stats.connected_peers_count, + adaptive_stats.discovery_efficiency); + + // Test adaptive broadcasting + let tx = create_test_tx(600); + let adaptive_result = network.adaptive_broadcast_transaction(&tx).await; + assert!(adaptive_result.is_ok(), "Adaptive broadcast should work"); + + info!("Adaptive broadcast successful"); + + // Test multiple data requests to exercise discovery + for i in 0..5 { + let result = network.request_blockchain_data( + "test_data".to_string(), + format!("discovery_test_{}", i) + ).await; + info!("Data request {} result: {:?}", i, result.is_ok()); + } + + // Check final stats + let final_stats = network.get_network_stats(); + let final_adaptive = network.get_adaptive_network_stats().await; + + info!("Final stats - Messages sent: {}, Total connections: {}", + final_stats.messages_sent, final_stats.total_connections); + info!("Final adaptive - Discovery efficiency: {:.2}", + final_adaptive.discovery_efficiency); + + network.shutdown().await?; + info!("Discovery mechanisms test completed"); + Ok(()) +} \ No newline at end of file diff --git a/crates/p2p-network/tests/non_blocking_integration_test.rs b/crates/p2p-network/tests/non_blocking_integration_test.rs new file mode 100644 index 0000000..928a72e --- /dev/null +++ b/crates/p2p-network/tests/non_blocking_integration_test.rs @@ -0,0 +1,401 @@ +//! Non-blocking WebRTC P2P Network Integration Tests +//! +//! This module contains non-blocking tests for the WebRTC P2P network +//! implementation, testing communication functionality without hanging. + +use anyhow::Result; +use log::info; + +use p2p_network::{P2PConfig, WebRTCP2PNetwork}; +use traits::{P2PNetworkLayer, TxInput, TxOutput, UtxoBlock, UtxoId, UtxoTransaction}; + +/// Initialize test logging +fn init_test_logging() { + let _ = env_logger::builder() + .filter_level(log::LevelFilter::Info) + .is_test(true) + .try_init(); +} + +/// Create a test UTXO transaction +fn create_test_transaction(from: &str, to: &str, amount: u64) -> UtxoTransaction { + UtxoTransaction { + hash: format!("tx_{}_{}_{}_{}", from, to, amount, uuid::Uuid::new_v4()), + inputs: vec![TxInput { + utxo_id: UtxoId { + tx_hash: "genesis_tx".to_string(), + output_index: 0, + }, + redeemer: b"test_redeemer".to_vec(), + signature: format!("sig_{}", from).into_bytes(), + }], + outputs: vec![TxOutput { + value: amount, + script: vec![], + datum: Some(format!("Payment to {}", to).into_bytes()), + datum_hash: Some(format!("datum_hash_{}", to)), + }], + fee: 1000, + validity_range: Some((0, 1000)), + script_witness: vec![], + auxiliary_data: None, + } +} + +/// Create a test UTXO block +fn create_test_block(number: u64, transactions: Vec) -> UtxoBlock { + UtxoBlock { + hash: format!("block_{}", number), + parent_hash: if number == 0 { + "genesis".to_string() + } else { + format!("block_{}", number - 1) + }, + number, + timestamp: chrono::Utc::now().timestamp() as u64, + slot: number, + transactions, + utxo_set_hash: format!("utxo_set_hash_{}", number), + transaction_root: format!("tx_root_{}", number), + validator: "test_validator".to_string(), + proof: vec![0, 1, 2, 3], // Mock proof + } +} + +#[tokio::test] +async fn test_non_blocking_p2p_setup() -> Result<()> { + init_test_logging(); + info!("Testing non-blocking P2P network setup and basic operations"); + + // Create two network instances + let config1 = P2PConfig { + node_id: "node_1".to_string(), + listen_addr: "127.0.0.1:8081".parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], // No STUN for non-blocking test + max_peers: 10, + connection_timeout: 5, + keep_alive_interval: 10, + debug_mode: false, + }; + + let config2 = P2PConfig { + node_id: "node_2".to_string(), + listen_addr: "127.0.0.1:8082".parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], // No STUN for non-blocking test + max_peers: 10, + connection_timeout: 5, + keep_alive_interval: 10, + debug_mode: false, + }; + + let network1 = WebRTCP2PNetwork::new(config1)?; + let network2 = WebRTCP2PNetwork::new(config2)?; + + // Test network creation and configuration + let stats1 = network1.get_network_stats(); + let stats2 = network2.get_network_stats(); + + assert_eq!(stats1.active_connections, 0); + assert_eq!(stats2.active_connections, 0); + + info!("Network1 initial stats: {:?}", stats1); + info!("Network2 initial stats: {:?}", stats2); + + // Test transaction broadcasting (should work even without connections) + let tx = create_test_transaction("alice", "bob", 1000); + let broadcast_result1 = network1.broadcast_transaction(&tx).await; + let broadcast_result2 = network2.broadcast_transaction(&tx).await; + + assert!(broadcast_result1.is_ok()); + assert!(broadcast_result2.is_ok()); + info!("Transaction broadcast successful on both networks"); + + // Test block broadcasting + let block = create_test_block(1, vec![tx.clone()]); + let block_result1 = network1.broadcast_block(&block).await; + let block_result2 = network2.broadcast_block(&block).await; + + assert!(block_result1.is_ok()); + assert!(block_result2.is_ok()); + info!("Block broadcast successful on both networks"); + + // Test data requests + let data_request1 = network1.request_blockchain_data( + "transaction".to_string(), + tx.hash.clone() + ).await; + let data_request2 = network2.request_blockchain_data( + "block".to_string(), + block.hash.clone() + ).await; + + info!("Data request results - Network1: {:?}, Network2: {:?}", + data_request1.is_ok(), data_request2.is_ok()); + + // Test peer queries + let peers1 = network1.get_connected_peers().await; + let peers2 = network2.get_connected_peers().await; + + assert_eq!(peers1.len(), 0); // No connections without start() + assert_eq!(peers2.len(), 0); + info!("Peer queries successful - Network1: {} peers, Network2: {} peers", + peers1.len(), peers2.len()); + + // Test discovery functionality + let discovered1 = network1.get_discovered_peers().await; + let discovered2 = network2.get_discovered_peers().await; + + info!("Discovery results - Network1: {} discovered, Network2: {} discovered", + discovered1.len(), discovered2.len()); + + // Graceful shutdown + network1.shutdown().await?; + network2.shutdown().await?; + + info!("Non-blocking P2P setup test completed successfully"); + Ok(()) +} + +#[tokio::test] +async fn test_network_configuration_validation() -> Result<()> { + init_test_logging(); + info!("Testing network configuration validation"); + + // Test various configurations + let configs = vec![ + P2PConfig { + node_id: "minimal_node".to_string(), + listen_addr: "127.0.0.1:9001".parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], + max_peers: 1, + connection_timeout: 1, + keep_alive_interval: 5, + debug_mode: false, + }, + P2PConfig { + node_id: "max_node".to_string(), + listen_addr: "127.0.0.1:9002".parse().unwrap(), + bootstrap_peers: vec!["127.0.0.1:9001".to_string()], + stun_servers: vec!["stun:test.example.com:19302".to_string()], + max_peers: 100, + connection_timeout: 300, + keep_alive_interval: 60, + debug_mode: true, + }, + ]; + + let mut networks = Vec::new(); + + for (i, config) in configs.into_iter().enumerate() { + let network = WebRTCP2PNetwork::new(config)?; + + // Test basic functionality + let stats = network.get_network_stats(); + let peers = network.get_connected_peers().await; + + info!("Network {} - Stats: {:?}, Peers: {}", i, stats, peers.len()); + + // Test transaction handling + let tx = create_test_transaction("user1", "user2", 100 * (i as u64 + 1)); + let result = network.broadcast_transaction(&tx).await; + assert!(result.is_ok(), "Network {} should handle transactions", i); + + networks.push(network); + } + + // Cleanup all networks + for (i, network) in networks.into_iter().enumerate() { + network.shutdown().await?; + info!("Network {} shutdown complete", i); + } + + info!("Network configuration validation test completed"); + Ok(()) +} + +#[tokio::test] +async fn test_multiple_transactions_and_blocks() -> Result<()> { + init_test_logging(); + info!("Testing multiple transactions and blocks handling"); + + let config = P2PConfig { + node_id: "multi_test_node".to_string(), + listen_addr: "127.0.0.1:9010".parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], + max_peers: 10, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let network = WebRTCP2PNetwork::new(config)?; + + // Create multiple transactions + let transactions: Vec = (0..10) + .map(|i| create_test_transaction( + &format!("user_{}", i), + &format!("user_{}", i + 1), + 1000 + i * 100 + )) + .collect(); + + // Broadcast all transactions + let mut successful_broadcasts = 0; + for (i, tx) in transactions.iter().enumerate() { + let result = network.broadcast_transaction(tx).await; + if result.is_ok() { + successful_broadcasts += 1; + } + info!("Transaction {} broadcast result: {:?}", i, result.is_ok()); + } + + assert_eq!(successful_broadcasts, 10, "All transactions should broadcast successfully"); + + // Create multiple blocks with the transactions + let blocks: Vec = (0..5) + .map(|i| { + let block_txs = transactions[i*2..i*2+2].to_vec(); + create_test_block(i as u64, block_txs) + }) + .collect(); + + // Broadcast all blocks + let mut successful_block_broadcasts = 0; + for (i, block) in blocks.iter().enumerate() { + let result = network.broadcast_block(block).await; + if result.is_ok() { + successful_block_broadcasts += 1; + } + info!("Block {} broadcast result: {:?}", i, result.is_ok()); + } + + assert_eq!(successful_block_broadcasts, 5, "All blocks should broadcast successfully"); + + // Test data requests for all items + for (i, tx) in transactions.iter().enumerate() { + let result = network.request_blockchain_data("transaction".to_string(), tx.hash.clone()).await; + info!("Transaction {} data request result: {:?}", i, result.is_ok()); + } + + for (i, block) in blocks.iter().enumerate() { + let result = network.request_blockchain_data("block".to_string(), block.hash.clone()).await; + info!("Block {} data request result: {:?}", i, result.is_ok()); + } + + // Check final stats + let final_stats = network.get_network_stats(); + info!("Final stats after multiple operations: {:?}", final_stats); + + network.shutdown().await?; + info!("Multiple transactions and blocks test completed"); + Ok(()) +} + +#[tokio::test] +async fn test_concurrent_network_operations() -> Result<()> { + init_test_logging(); + info!("Testing concurrent network operations across multiple networks"); + + // Create multiple networks + let mut networks = Vec::new(); + let mut configs = Vec::new(); + + for i in 0..3 { + let config = P2PConfig { + node_id: format!("concurrent_node_{}", i), + listen_addr: format!("127.0.0.1:{}", 9020 + i).parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], + max_peers: 5, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let network = WebRTCP2PNetwork::new(config.clone())?; + networks.push(network); + configs.push(config); + } + + // Concurrent transaction broadcasting + let mut handles = Vec::new(); + + for (i, network) in networks.iter().enumerate() { + let net = network.clone(); + let handle = tokio::spawn(async move { + let mut results = Vec::new(); + for j in 0..5 { + let tx = create_test_transaction( + &format!("user_{}_{}", i, j), + &format!("user_{}_{}", i, j + 1), + 1000 + j * 100 + ); + let result = net.broadcast_transaction(&tx).await; + results.push(result.is_ok()); + } + results + }); + handles.push(handle); + } + + // Wait for all concurrent operations + let results = futures::future::join_all(handles).await; + + for (i, result) in results.iter().enumerate() { + match result { + Ok(broadcasts) => { + let successful = broadcasts.iter().filter(|&&x| x).count(); + info!("Network {} - Successful broadcasts: {}/5", i, successful); + assert_eq!(successful, 5, "All broadcasts should succeed for network {}", i); + } + Err(e) => { + panic!("Network {} task failed: {:?}", i, e); + } + } + } + + // Concurrent data requests + let mut request_handles = Vec::new(); + + for (i, network) in networks.iter().enumerate() { + let net = network.clone(); + let handle = tokio::spawn(async move { + let mut results = Vec::new(); + for j in 0..3 { + let result = net.request_blockchain_data( + "test_data".to_string(), + format!("test_id_{}_{}", i, j) + ).await; + results.push(result.is_ok()); + } + results + }); + request_handles.push(handle); + } + + let request_results = futures::future::join_all(request_handles).await; + + for (i, result) in request_results.iter().enumerate() { + match result { + Ok(requests) => { + info!("Network {} - Data requests completed: {}", i, requests.len()); + } + Err(e) => { + panic!("Network {} request task failed: {:?}", i, e); + } + } + } + + // Cleanup all networks + for (i, network) in networks.into_iter().enumerate() { + network.shutdown().await?; + info!("Network {} shutdown complete", i); + } + + info!("Concurrent network operations test completed"); + Ok(()) +} \ No newline at end of file diff --git a/crates/p2p-network/tests/non_blocking_network_joining_test.rs b/crates/p2p-network/tests/non_blocking_network_joining_test.rs new file mode 100644 index 0000000..75cc643 --- /dev/null +++ b/crates/p2p-network/tests/non_blocking_network_joining_test.rs @@ -0,0 +1,327 @@ +//! Non-blocking Network Joining Test +//! +//! Tests that verify new nodes can successfully join an existing network +//! without using blocking start() method. + +use anyhow::Result; +use log::info; + +use p2p_network::{P2PConfig, WebRTCP2PNetwork}; +use traits::{P2PNetworkLayer, TxInput, TxOutput, UtxoId, UtxoTransaction}; + +/// Initialize test logging with detailed output +fn init_test_logging() { + let _ = env_logger::builder() + .filter_level(log::LevelFilter::Info) + .is_test(true) + .try_init(); +} + +/// Create a test transaction with unique ID +fn create_test_transaction(id: u64, from_node: &str) -> UtxoTransaction { + UtxoTransaction { + hash: format!("tx_{}_from_{}", id, from_node), + inputs: vec![TxInput { + utxo_id: UtxoId { + tx_hash: format!("input_{}", id), + output_index: 0, + }, + redeemer: b"test_redeemer".to_vec(), + signature: b"test_signature".to_vec(), + }], + outputs: vec![TxOutput { + value: 1000 + id, + script: vec![], + datum: Some(format!("data_from_{}", from_node).into_bytes()), + datum_hash: Some(format!("hash_{}", id)), + }], + fee: 100, + validity_range: Some((0, 10000)), + script_witness: vec![], + auxiliary_data: None, + } +} + +#[tokio::test] +async fn test_non_blocking_network_joining_setup() -> Result<()> { + init_test_logging(); + info!("=== Testing non-blocking network joining setup ==="); + + // Create initial "existing" network node + let existing_config = P2PConfig { + node_id: "existing_node".to_string(), + listen_addr: "127.0.0.1:11001".parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], // No STUN for non-blocking test + max_peers: 10, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let existing_network = WebRTCP2PNetwork::new(existing_config)?; + info!("Created existing network node"); + + // Test existing network functionality + let existing_stats = existing_network.get_network_stats(); + info!("Existing network stats: connections={}, total={}", + existing_stats.active_connections, existing_stats.total_connections); + + // Create new node that wants to join + let joining_config = P2PConfig { + node_id: "joining_node".to_string(), + listen_addr: "127.0.0.1:11002".parse().unwrap(), + bootstrap_peers: vec!["127.0.0.1:11001".to_string()], // Bootstrap to existing + stun_servers: vec![], + max_peers: 10, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let joining_network = WebRTCP2PNetwork::new(joining_config)?; + info!("Created joining network node"); + + // Test joining network functionality + let joining_stats = joining_network.get_network_stats(); + info!("Joining network stats: connections={}, total={}", + joining_stats.active_connections, joining_stats.total_connections); + + // Test both networks can handle transactions + let tx_existing = create_test_transaction(1, "existing_node"); + let tx_joining = create_test_transaction(2, "joining_node"); + + let result_existing = existing_network.broadcast_transaction(&tx_existing).await; + let result_joining = joining_network.broadcast_transaction(&tx_joining).await; + + assert!(result_existing.is_ok(), "Existing network should handle transactions"); + assert!(result_joining.is_ok(), "Joining network should handle transactions"); + + info!("Both networks can handle transactions"); + + // Test discovery capabilities + let discovered_existing = existing_network.get_discovered_peers().await; + let discovered_joining = joining_network.get_discovered_peers().await; + + info!("Existing network discovered {} peers", discovered_existing.len()); + info!("Joining network discovered {} peers", discovered_joining.len()); + + // Test peer connection capabilities (without actual connections) + let connected_existing = existing_network.get_connected_peers().await; + let connected_joining = joining_network.get_connected_peers().await; + + info!("Existing network connected to {} peers", connected_existing.len()); + info!("Joining network connected to {} peers", connected_joining.len()); + + // Test data request capabilities + let data_request_existing = existing_network.request_blockchain_data( + "transaction".to_string(), + tx_joining.hash.clone() + ).await; + + let data_request_joining = joining_network.request_blockchain_data( + "transaction".to_string(), + tx_existing.hash.clone() + ).await; + + info!("Data request results - Existing: {:?}, Joining: {:?}", + data_request_existing.is_ok(), data_request_joining.is_ok()); + + // Cleanup + existing_network.shutdown().await?; + joining_network.shutdown().await?; + + info!("Non-blocking network joining setup test completed"); + Ok(()) +} + +#[tokio::test] +async fn test_multiple_nodes_joining_sequence() -> Result<()> { + init_test_logging(); + info!("Testing multiple nodes joining in sequence"); + + // Create bootstrap node + let bootstrap_config = P2PConfig { + node_id: "bootstrap".to_string(), + listen_addr: "127.0.0.1:11010".parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], + max_peers: 20, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let bootstrap_network = WebRTCP2PNetwork::new(bootstrap_config)?; + let mut networks = vec![bootstrap_network]; + + info!("Created bootstrap network"); + + // Create multiple joining nodes + for i in 1..=5 { + let config = P2PConfig { + node_id: format!("joining_node_{}", i), + listen_addr: format!("127.0.0.1:{}", 11010 + i).parse().unwrap(), + bootstrap_peers: vec!["127.0.0.1:11010".to_string()], + stun_servers: vec![], + max_peers: 20, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let network = WebRTCP2PNetwork::new(config)?; + networks.push(network); + info!("Created joining node {}", i); + } + + // Test all networks can handle transactions + let mut successful_broadcasts = 0; + for (i, network) in networks.iter().enumerate() { + let tx = create_test_transaction(100 + i as u64, &format!("node_{}", i)); + if network.broadcast_transaction(&tx).await.is_ok() { + successful_broadcasts += 1; + } + } + + assert_eq!(successful_broadcasts, 6, "All networks should handle transactions"); + info!("All {} networks can handle transactions", successful_broadcasts); + + // Test adaptive capabilities on all networks + let tx_adaptive = create_test_transaction(200, "adaptive_test"); + let mut adaptive_results = 0; + + for (i, network) in networks.iter().enumerate() { + if network.adaptive_broadcast_transaction(&tx_adaptive).await.is_ok() { + adaptive_results += 1; + } + + let adaptive_stats = network.get_adaptive_network_stats().await; + info!("Network {} adaptive stats - Discovered: {}, DHT: {}, Efficiency: {:.2}", + i, adaptive_stats.discovered_peers_count, + adaptive_stats.dht_nodes_count, adaptive_stats.discovery_efficiency); + } + + assert_eq!(adaptive_results, 6, "All networks should support adaptive broadcast"); + + // Test network statistics across all nodes + for (i, network) in networks.iter().enumerate() { + let stats = network.get_network_stats(); + let discovered = network.get_discovered_peers().await; + let connected = network.get_connected_peers().await; + + info!("Network {} final stats - Messages: {}, Discovered: {}, Connected: {}", + i, stats.messages_sent, discovered.len(), connected.len()); + } + + // Cleanup all networks + for (i, network) in networks.into_iter().enumerate() { + network.shutdown().await?; + info!("Network {} cleanup complete", i); + } + + info!("Multiple nodes joining sequence test completed"); + Ok(()) +} + +#[tokio::test] +async fn test_network_joining_with_failures() -> Result<()> { + init_test_logging(); + info!("Testing network joining with simulated failures"); + + // Create initial network + let mut networks = Vec::new(); + + for i in 0..3 { + let config = P2PConfig { + node_id: format!("failure_test_node_{}", i), + listen_addr: format!("127.0.0.1:{}", 11020 + i).parse().unwrap(), + bootstrap_peers: if i == 0 { + vec![] + } else { + vec!["127.0.0.1:11020".to_string()] + }, + stun_servers: vec![], + max_peers: 10, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let network = WebRTCP2PNetwork::new(config)?; + networks.push(network); + } + + info!("Created {} networks for failure testing", networks.len()); + + // Test all networks initially work + for (i, network) in networks.iter().enumerate() { + let tx = create_test_transaction(300 + i as u64, &format!("failure_node_{}", i)); + let result = network.broadcast_transaction(&tx).await; + assert!(result.is_ok(), "Network {} should be functional initially", i); + } + + // Simulate failure by shutting down bootstrap node + info!("Simulating bootstrap node failure"); + networks[0].shutdown().await?; + let failed_node = networks.remove(0); + drop(failed_node); + + // Test remaining networks can still function + for (i, network) in networks.iter().enumerate() { + let tx = create_test_transaction(400 + i as u64, &format!("surviving_node_{}", i)); + let result = network.broadcast_transaction(&tx).await; + assert!(result.is_ok(), "Surviving network {} should still work", i); + + let stats = network.get_network_stats(); + info!("Surviving network {} stats: messages={}, connections={}", + i, stats.messages_sent, stats.total_connections); + } + + // Add new node to replace failed bootstrap + info!("Adding replacement node"); + let replacement_config = P2PConfig { + node_id: "replacement_node".to_string(), + listen_addr: "127.0.0.1:11030".parse().unwrap(), + bootstrap_peers: vec![ + "127.0.0.1:11021".to_string(), + "127.0.0.1:11022".to_string(), + ], + stun_servers: vec![], + max_peers: 10, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let replacement_network = WebRTCP2PNetwork::new(replacement_config)?; + networks.push(replacement_network); + + // Test all networks (including replacement) work + let mut working_networks = 0; + for (i, network) in networks.iter().enumerate() { + let tx = create_test_transaction(500 + i as u64, &format!("final_node_{}", i)); + if network.broadcast_transaction(&tx).await.is_ok() { + working_networks += 1; + } + + let discovered = network.get_discovered_peers().await; + let adaptive_stats = network.get_adaptive_network_stats().await; + info!("Final network {} - Discovered: {}, DHT: {}, Working: {}", + i, discovered.len(), adaptive_stats.dht_nodes_count, + working_networks <= i + 1); + } + + assert_eq!(working_networks, networks.len(), "All remaining networks should work"); + info!("Network recovery successful: {}/{} networks working", + working_networks, networks.len()); + + // Cleanup + for (i, network) in networks.into_iter().enumerate() { + network.shutdown().await?; + info!("Final cleanup network {} complete", i); + } + + info!("Network joining with failures test completed"); + Ok(()) +} \ No newline at end of file diff --git a/crates/p2p-network/tests/non_blocking_peer_test.rs b/crates/p2p-network/tests/non_blocking_peer_test.rs new file mode 100644 index 0000000..fd1b0bc --- /dev/null +++ b/crates/p2p-network/tests/non_blocking_peer_test.rs @@ -0,0 +1,464 @@ +//! Non-blocking peer communication functionality tests +//! +//! Tests for peer-to-peer communication capabilities without using blocking start() method. + +use anyhow::Result; +use log::info; + +use p2p_network::{P2PConfig, WebRTCP2PNetwork}; +use traits::{P2PNetworkLayer, TxInput, TxOutput, UtxoId, UtxoTransaction}; + +/// Initialize test logging +fn init_test_logging() { + let _ = env_logger::builder() + .filter_level(log::LevelFilter::Info) + .is_test(true) + .try_init(); +} + +/// Create a test transaction with unique properties +fn create_test_transaction(id: u64, from: &str, to: &str, amount: u64) -> UtxoTransaction { + UtxoTransaction { + hash: format!("peer_tx_{}_{}_to_{}", id, from, to), + inputs: vec![TxInput { + utxo_id: UtxoId { + tx_hash: format!("input_{}_{}", from, id), + output_index: 0, + }, + redeemer: format!("redeemer_{}", from).into_bytes(), + signature: format!("sig_{}_{}", from, id).into_bytes(), + }], + outputs: vec![TxOutput { + value: amount, + script: vec![], + datum: Some(format!("payment_from_{}_to_{}", from, to).into_bytes()), + datum_hash: Some(format!("datum_hash_{}_{}", from, id)), + }], + fee: 100, + validity_range: Some((0, 10000)), + script_witness: vec![], + auxiliary_data: None, + } +} + +#[tokio::test] +async fn test_non_blocking_peer_communication_setup() -> Result<()> { + init_test_logging(); + info!("Testing non-blocking peer communication setup"); + + // Create two peer networks + let config_peer1 = P2PConfig { + node_id: "peer_1".to_string(), + listen_addr: "127.0.0.1:12100".parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], // No STUN for non-blocking test + max_peers: 5, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let config_peer2 = P2PConfig { + node_id: "peer_2".to_string(), + listen_addr: "127.0.0.1:12101".parse().unwrap(), + bootstrap_peers: vec!["127.0.0.1:12100".to_string()], + stun_servers: vec![], + max_peers: 5, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let peer1 = WebRTCP2PNetwork::new(config_peer1)?; + let peer2 = WebRTCP2PNetwork::new(config_peer2)?; + + info!("Created two peer networks"); + + // Test basic peer functionality + let stats1 = peer1.get_network_stats(); + let stats2 = peer2.get_network_stats(); + + info!("Peer 1 stats: connections={}, total={}", + stats1.active_connections, stats1.total_connections); + info!("Peer 2 stats: connections={}, total={}", + stats2.active_connections, stats2.total_connections); + + // Test transaction exchange simulation + let tx1_to_2 = create_test_transaction(1, "peer1", "peer2", 1000); + let tx2_to_1 = create_test_transaction(2, "peer2", "peer1", 500); + + let result1 = peer1.broadcast_transaction(&tx1_to_2).await; + let result2 = peer2.broadcast_transaction(&tx2_to_1).await; + + assert!(result1.is_ok(), "Peer 1 should broadcast successfully"); + assert!(result2.is_ok(), "Peer 2 should broadcast successfully"); + + info!("Both peers can handle transaction broadcasting"); + + // Test data requests between peers + let data_request_1 = peer1.request_blockchain_data( + "transaction".to_string(), + tx2_to_1.hash.clone() + ).await; + + let data_request_2 = peer2.request_blockchain_data( + "transaction".to_string(), + tx1_to_2.hash.clone() + ).await; + + info!("Data request results - Peer1: {:?}, Peer2: {:?}", + data_request_1.is_ok(), data_request_2.is_ok()); + + // Test peer discovery capabilities + let discovered1 = peer1.get_discovered_peers().await; + let discovered2 = peer2.get_discovered_peers().await; + let connected1 = peer1.get_connected_peers().await; + let connected2 = peer2.get_connected_peers().await; + + info!("Peer 1 - Discovered: {}, Connected: {}", discovered1.len(), connected1.len()); + info!("Peer 2 - Discovered: {}, Connected: {}", discovered2.len(), connected2.len()); + + // Test adaptive features + let adaptive_stats1 = peer1.get_adaptive_network_stats().await; + let adaptive_stats2 = peer2.get_adaptive_network_stats().await; + + info!("Peer 1 adaptive - Discovered: {}, DHT: {}, Efficiency: {:.2}", + adaptive_stats1.discovered_peers_count, + adaptive_stats1.dht_nodes_count, + adaptive_stats1.discovery_efficiency); + info!("Peer 2 adaptive - Discovered: {}, DHT: {}, Efficiency: {:.2}", + adaptive_stats2.discovered_peers_count, + adaptive_stats2.dht_nodes_count, + adaptive_stats2.discovery_efficiency); + + // Cleanup + peer1.shutdown().await?; + peer2.shutdown().await?; + + info!("Non-blocking peer communication setup test completed"); + Ok(()) +} + +#[tokio::test] +async fn test_multi_peer_network_simulation() -> Result<()> { + init_test_logging(); + info!("Testing multi-peer network simulation"); + + // Create multiple peer networks + let mut peers = Vec::new(); + + for i in 0..5 { + let config = P2PConfig { + node_id: format!("multi_peer_{}", i), + listen_addr: format!("127.0.0.1:{}", 12110 + i).parse().unwrap(), + bootstrap_peers: if i == 0 { + vec![] + } else { + vec!["127.0.0.1:12110".to_string()] // All bootstrap to first peer + }, + stun_servers: vec![], + max_peers: 10, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let peer = WebRTCP2PNetwork::new(config)?; + peers.push(peer); + info!("Created multi-peer network {}", i); + } + + // Test transaction broadcasting from each peer + let mut successful_broadcasts = 0; + for (i, peer) in peers.iter().enumerate() { + let tx = create_test_transaction( + 100 + i as u64, + &format!("peer_{}", i), + &format!("peer_{}", (i + 1) % peers.len()), + 1000 + i as u64 * 100 + ); + + if peer.broadcast_transaction(&tx).await.is_ok() { + successful_broadcasts += 1; + } + } + + assert_eq!(successful_broadcasts, 5, "All peers should broadcast successfully"); + info!("All {} peers can broadcast transactions", successful_broadcasts); + + // Test adaptive broadcasting on all peers + let global_tx = create_test_transaction(200, "global", "all", 5000); + let mut adaptive_broadcasts = 0; + + for (i, peer) in peers.iter().enumerate() { + if peer.adaptive_broadcast_transaction(&global_tx).await.is_ok() { + adaptive_broadcasts += 1; + } + + let adaptive_stats = peer.get_adaptive_network_stats().await; + info!("Peer {} adaptive stats - Discovered: {}, Connected: {}, DHT: {}", + i, adaptive_stats.discovered_peers_count, + adaptive_stats.connected_peers_count, + adaptive_stats.dht_nodes_count); + } + + assert_eq!(adaptive_broadcasts, 5, "All peers should support adaptive broadcast"); + + // Test cross-peer data requests + for (i, peer) in peers.iter().enumerate() { + let target_peer = (i + 2) % peers.len(); + let request_result = peer.request_blockchain_data( + "peer_data".to_string(), + format!("data_from_peer_{}", target_peer) + ).await; + + info!("Peer {} data request to peer {}: {:?}", + i, target_peer, request_result.is_ok()); + } + + // Test network statistics across all peers + for (i, peer) in peers.iter().enumerate() { + let stats = peer.get_network_stats(); + let discovered = peer.get_discovered_peers().await; + let connected = peer.get_connected_peers().await; + + info!("Peer {} final stats - Messages: {}, Discovered: {}, Connected: {}", + i, stats.messages_sent, discovered.len(), connected.len()); + } + + // Cleanup all peers + for (i, peer) in peers.into_iter().enumerate() { + peer.shutdown().await?; + info!("Peer {} cleanup complete", i); + } + + info!("Multi-peer network simulation test completed"); + Ok(()) +} + +#[tokio::test] +async fn test_peer_connection_resilience() -> Result<()> { + init_test_logging(); + info!("Testing peer connection resilience"); + + // Create network of peers + let mut peers = Vec::new(); + + for i in 0..4 { + let config = P2PConfig { + node_id: format!("resilient_peer_{}", i), + listen_addr: format!("127.0.0.1:{}", 12120 + i).parse().unwrap(), + bootstrap_peers: match i { + 0 => vec![], + 1 => vec!["127.0.0.1:12120".to_string()], + 2 => vec!["127.0.0.1:12120".to_string(), "127.0.0.1:12121".to_string()], + _ => vec!["127.0.0.1:12120".to_string(), "127.0.0.1:12122".to_string()], + }, + stun_servers: vec![], + max_peers: 8, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let peer = WebRTCP2PNetwork::new(config)?; + peers.push(peer); + } + + info!("Created {} resilient peers", peers.len()); + + // Test all peers are functional + for (i, peer) in peers.iter().enumerate() { + let tx = create_test_transaction(300 + i as u64, &format!("resilient_{}", i), "network", 1000); + let result = peer.broadcast_transaction(&tx).await; + assert!(result.is_ok(), "Peer {} should be functional", i); + + let stats = peer.get_network_stats(); + info!("Peer {} initial stats: messages={}, connections={}", + i, stats.messages_sent, stats.total_connections); + } + + // Simulate peer failure by removing middle peer + info!("Simulating peer failure (removing peer 1)"); + peers[1].shutdown().await?; + let failed_peer = peers.remove(1); + drop(failed_peer); + + // Test remaining peers still function + for (i, peer) in peers.iter().enumerate() { + let tx = create_test_transaction(400 + i as u64, &format!("surviving_{}", i), "network", 1500); + let result = peer.broadcast_transaction(&tx).await; + assert!(result.is_ok(), "Surviving peer {} should still work", i); + + let discovered = peer.get_discovered_peers().await; + let adaptive_stats = peer.get_adaptive_network_stats().await; + info!("Surviving peer {} - Discovered: {}, DHT: {}, Efficiency: {:.2}", + i, discovered.len(), adaptive_stats.dht_nodes_count, + adaptive_stats.discovery_efficiency); + } + + // Add recovery peer + info!("Adding recovery peer"); + let recovery_config = P2PConfig { + node_id: "recovery_peer".to_string(), + listen_addr: "127.0.0.1:12130".parse().unwrap(), + bootstrap_peers: vec![ + "127.0.0.1:12120".to_string(), + "127.0.0.1:12122".to_string(), + "127.0.0.1:12123".to_string(), + ], + stun_servers: vec![], + max_peers: 8, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let recovery_peer = WebRTCP2PNetwork::new(recovery_config)?; + peers.push(recovery_peer); + + // Test network recovery + let mut recovered_peers = 0; + for (i, peer) in peers.iter().enumerate() { + let tx = create_test_transaction(500 + i as u64, &format!("recovered_{}", i), "network", 2000); + if peer.broadcast_transaction(&tx).await.is_ok() { + recovered_peers += 1; + } + + let stats = peer.get_network_stats(); + let adaptive_stats = peer.get_adaptive_network_stats().await; + info!("Recovery peer {} - Messages: {}, DHT nodes: {}, Discovered: {}", + i, stats.messages_sent, adaptive_stats.dht_nodes_count, + adaptive_stats.discovered_peers_count); + } + + assert_eq!(recovered_peers, peers.len(), "All remaining peers should work after recovery"); + info!("Network recovery successful: {}/{} peers functional", + recovered_peers, peers.len()); + + // Final cleanup + for (i, peer) in peers.into_iter().enumerate() { + peer.shutdown().await?; + info!("Recovery cleanup peer {} complete", i); + } + + info!("Peer connection resilience test completed"); + Ok(()) +} + +#[tokio::test] +async fn test_peer_broadcast_patterns() -> Result<()> { + init_test_logging(); + info!("Testing various peer broadcast patterns"); + + // Create small peer network + let mut peers = Vec::new(); + + for i in 0..3 { + let config = P2PConfig { + node_id: format!("broadcast_peer_{}", i), + listen_addr: format!("127.0.0.1:{}", 12140 + i).parse().unwrap(), + bootstrap_peers: if i == 0 { + vec![] + } else { + vec!["127.0.0.1:12140".to_string()] + }, + stun_servers: vec![], + max_peers: 5, + connection_timeout: 5, + keep_alive_interval: 30, + debug_mode: false, + }; + + let peer = WebRTCP2PNetwork::new(config)?; + peers.push(peer); + } + + info!("Created {} peers for broadcast testing", peers.len()); + + // Test one-to-many broadcast pattern + info!("Testing one-to-many broadcast pattern"); + let broadcast_tx = create_test_transaction(600, "broadcaster", "everyone", 10000); + + for (i, peer) in peers.iter().enumerate() { + let result = peer.broadcast_transaction(&broadcast_tx).await; + assert!(result.is_ok(), "Peer {} should broadcast successfully", i); + } + + // Test many-to-one data request pattern + info!("Testing many-to-one data request pattern"); + for (i, peer) in peers.iter().enumerate() { + let request_result = peer.request_blockchain_data( + "broadcast_data".to_string(), + format!("shared_data_item_{}", i) + ).await; + info!("Peer {} data request result: {:?}", i, request_result.is_ok()); + } + + // Test concurrent broadcast pattern + info!("Testing concurrent broadcast pattern"); + let mut handles = Vec::new(); + + for (i, peer) in peers.iter().enumerate() { + let peer_clone = peer.clone(); + let handle = tokio::spawn(async move { + let mut results = Vec::new(); + for j in 0..3 { + let tx = create_test_transaction( + 700 + j, + &format!("concurrent_peer_{}", i), + &format!("target_{}", j), + 1000 + j * 100 + ); + let result = peer_clone.broadcast_transaction(&tx).await; + results.push(result.is_ok()); + } + results + }); + handles.push(handle); + } + + let concurrent_results = futures::future::join_all(handles).await; + + for (i, result) in concurrent_results.iter().enumerate() { + match result { + Ok(broadcasts) => { + let successful = broadcasts.iter().filter(|&&x| x).count(); + info!("Peer {} concurrent broadcasts: {}/3 successful", i, successful); + assert_eq!(successful, 3, "All concurrent broadcasts should succeed for peer {}", i); + } + Err(e) => { + panic!("Peer {} concurrent broadcast task failed: {:?}", i, e); + } + } + } + + // Test adaptive broadcast pattern + info!("Testing adaptive broadcast pattern"); + let adaptive_tx = create_test_transaction(800, "adaptive", "smart_routing", 5000); + + for (i, peer) in peers.iter().enumerate() { + let result = peer.adaptive_broadcast_transaction(&adaptive_tx).await; + assert!(result.is_ok(), "Peer {} adaptive broadcast should succeed", i); + + let adaptive_stats = peer.get_adaptive_network_stats().await; + info!("Peer {} adaptive broadcast stats - Efficiency: {:.2}, DHT: {}", + i, adaptive_stats.discovery_efficiency, adaptive_stats.dht_nodes_count); + } + + // Final statistics + for (i, peer) in peers.iter().enumerate() { + let stats = peer.get_network_stats(); + info!("Peer {} final broadcast stats - Total messages: {}, Connections: {}", + i, stats.messages_sent, stats.total_connections); + } + + // Cleanup + for (i, peer) in peers.into_iter().enumerate() { + peer.shutdown().await?; + info!("Broadcast test peer {} cleanup complete", i); + } + + info!("Peer broadcast patterns test completed"); + Ok(()) +} \ No newline at end of file diff --git a/crates/p2p-network/tests/peer_test.rs b/crates/p2p-network/tests/peer_test.rs deleted file mode 100644 index b23b283..0000000 --- a/crates/p2p-network/tests/peer_test.rs +++ /dev/null @@ -1,224 +0,0 @@ -//! Peer connection functionality tests -//! -//! Tests for peer-specific functionality including connection state management, -//! latency tracking, ping/pong handling, and handshake operations. - -use anyhow::Result; -use log::info; -use std::sync::Arc; -use tokio::sync::broadcast; - -use p2p_network::P2PMessage; -use webrtc::{ - api::APIBuilder, - ice_transport::ice_server::RTCIceServer, - peer_connection::{ - configuration::RTCConfiguration, peer_connection_state::RTCPeerConnectionState, - RTCPeerConnection, - }, -}; - -/// Initialize test logging -fn init_test_logging() { - let _ = env_logger::builder() - .filter_level(log::LevelFilter::Info) - .is_test(true) - .try_init(); -} - -/// Create a test WebRTC peer connection -async fn create_test_rtc_peer() -> Result> { - let api = APIBuilder::new().build(); - - let config = RTCConfiguration { - ice_servers: vec![RTCIceServer { - urls: vec!["stun:stun.l.google.com:19302".to_string()], - ..Default::default() - }], - ..Default::default() - }; - - let peer = api.new_peer_connection(config).await?; - Ok(Arc::new(peer)) -} - -#[tokio::test] -async fn test_peer_connection_creation() -> Result<()> { - init_test_logging(); - info!("🧪 Testing peer connection creation"); - - let rtc_peer = create_test_rtc_peer().await?; - let (message_tx, _) = broadcast::channel(100); - - // Create PeerConnection using the new method - use p2p_network::PeerConnection; - let peer = PeerConnection::new( - "test_peer_1".to_string(), - "test_node_1".to_string(), - rtc_peer, - message_tx, - )?; - - // Test connection state - let state = peer.get_connection_state(); - assert!(matches!(state, RTCPeerConnectionState::New)); - - // Test connection check - let is_connected = peer.is_connected(); - assert!(!is_connected); // Should be false for new connection - - info!("✅ Peer connection creation test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_peer_latency_tracking() -> Result<()> { - init_test_logging(); - info!("🧪 Testing peer latency tracking"); - - let rtc_peer = create_test_rtc_peer().await?; - let (message_tx, _) = broadcast::channel(100); - - use p2p_network::PeerConnection; - let peer = PeerConnection::new( - "test_peer_2".to_string(), - "test_node_2".to_string(), - rtc_peer, - message_tx, - )?; - - // Test updating latency - peer.update_latency(50); // Good latency - peer.update_latency(200); // Acceptable latency - peer.update_latency(800); // Poor latency - - // Latency updates should affect reputation score - // (We can't directly verify this without accessing the peer info, - // but the method calls should not panic) - - info!("✅ Peer latency tracking test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_peer_ping_pong_handling() -> Result<()> { - init_test_logging(); - info!("🧪 Testing peer ping/pong handling"); - - let rtc_peer = create_test_rtc_peer().await?; - let (message_tx, _message_rx) = broadcast::channel(100); - - use p2p_network::PeerConnection; - let peer = PeerConnection::new( - "test_peer_3".to_string(), - "test_node_3".to_string(), - rtc_peer, - message_tx, - )?; - - // Test handling ping (this would normally send a pong response) - let timestamp = chrono::Utc::now().timestamp() as u64; - let nonce = 12345; - - // This will fail since no data channel is established, but tests the API - let ping_result = peer.handle_ping(timestamp, nonce).await; - // Expected to fail due to no data channel - assert!(ping_result.is_err()); - - // Test handling pong - peer.handle_pong(timestamp, nonce); - // This should not panic and should update latency - - info!("✅ Peer ping/pong handling test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_peer_handshake() -> Result<()> { - init_test_logging(); - info!("🧪 Testing peer handshake"); - - let rtc_peer = create_test_rtc_peer().await?; - let (message_tx, _) = broadcast::channel(100); - - use p2p_network::PeerConnection; - let peer = PeerConnection::new( - "test_peer_4".to_string(), - "test_node_4".to_string(), - rtc_peer, - message_tx, - )?; - - // Test performing handshake - let version = "1.0.0".to_string(); - let handshake_result = peer.perform_handshake(version).await; - - // Expected to fail since no data channel is established - assert!(handshake_result.is_err()); - - info!("✅ Peer handshake test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_peer_disconnection() -> Result<()> { - init_test_logging(); - info!("🧪 Testing peer disconnection"); - - let rtc_peer = create_test_rtc_peer().await?; - let (message_tx, _) = broadcast::channel(100); - - use p2p_network::PeerConnection; - let peer = PeerConnection::new( - "test_peer_5".to_string(), - "test_node_5".to_string(), - rtc_peer, - message_tx, - )?; - - // Test disconnection - let disconnect_result = peer.disconnect().await; - assert!(disconnect_result.is_ok()); - - info!("✅ Peer disconnection test passed"); - Ok(()) -} - -#[tokio::test] -async fn test_peer_message_sending() -> Result<()> { - init_test_logging(); - info!("🧪 Testing peer message sending"); - - let rtc_peer = create_test_rtc_peer().await?; - let (message_tx, _) = broadcast::channel(100); - - use p2p_network::PeerConnection; - let peer = PeerConnection::new( - "test_peer_6".to_string(), - "test_node_6".to_string(), - rtc_peer, - message_tx, - )?; - - // Test sending different message types - let ping_msg = P2PMessage::Ping { - timestamp: chrono::Utc::now().timestamp() as u64, - nonce: 12345, - }; - - let handshake_msg = P2PMessage::Handshake { - node_id: "test_node".to_string(), - version: "1.0.0".to_string(), - timestamp: chrono::Utc::now().timestamp() as u64, - }; - - // These will fail due to no data channel, but test the API - let ping_result = peer.send_message(ping_msg).await; - assert!(ping_result.is_err()); - - let handshake_result = peer.send_message(handshake_msg).await; - assert!(handshake_result.is_err()); - - info!("✅ Peer message sending test passed"); - Ok(()) -} diff --git a/crates/p2p-network/tests/quick_discovery_test.rs b/crates/p2p-network/tests/quick_discovery_test.rs new file mode 100644 index 0000000..72337ed --- /dev/null +++ b/crates/p2p-network/tests/quick_discovery_test.rs @@ -0,0 +1,246 @@ +//! Quick Discovery Test +//! +//! Non-blocking tests for peer discovery functionality + +use anyhow::Result; +use log::info; + +use p2p_network::{P2PConfig, WebRTCP2PNetwork}; +use traits::{P2PNetworkLayer, TxInput, TxOutput, UtxoId, UtxoTransaction}; + +fn init_test_logging() { + let _ = env_logger::builder() + .filter_level(log::LevelFilter::Info) + .is_test(true) + .try_init(); +} + +fn create_test_tx(id: u64) -> UtxoTransaction { + UtxoTransaction { + hash: format!("quick_test_tx_{}", id), + inputs: vec![TxInput { + utxo_id: UtxoId { + tx_hash: format!("input_{}", id), + output_index: 0, + }, + redeemer: b"test".to_vec(), + signature: b"sig".to_vec(), + }], + outputs: vec![TxOutput { + value: 1000, + script: vec![], + datum: Some(b"test_data".to_vec()), + datum_hash: Some("hash".to_string()), + }], + fee: 100, + validity_range: Some((0, 10000)), + script_witness: vec![], + auxiliary_data: None, + } +} + +#[tokio::test] +async fn test_network_creation_and_stats() -> Result<()> { + init_test_logging(); + info!("Testing network creation and statistics"); + + // Create network + let config = P2PConfig { + node_id: "test_stats_node".to_string(), + listen_addr: "127.0.0.1:13001".parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], + max_peers: 5, + connection_timeout: 1, // Short timeout for testing + keep_alive_interval: 30, + debug_mode: false, + }; + + let network = WebRTCP2PNetwork::new(config)?; + + // Test basic functionality without starting full network + let stats = network.get_network_stats(); + info!("Initial stats: connections={}, messages_sent={}", + stats.active_connections, stats.messages_sent); + + // Test broadcast without actual connections + let tx = create_test_tx(1); + let broadcast_result = network.broadcast_transaction(&tx).await; + info!("Broadcast result: {:?}", broadcast_result.is_ok()); + + // Test getting connected peers (should be empty) + let peers = network.get_connected_peers().await; + info!("Connected peers: {}", peers.len()); + + // Test shutdown + network.shutdown().await?; + info!("Network shutdown completed"); + + assert_eq!(stats.active_connections, 0); + assert_eq!(peers.len(), 0); + assert!(broadcast_result.is_ok()); // Should succeed even with no peers + + info!("Network creation and stats test completed"); + Ok(()) +} + +#[tokio::test] +async fn test_network_initialization_only() -> Result<()> { + init_test_logging(); + info!("Testing network initialization without full startup"); + + // Create multiple networks to test initialization + let mut networks = Vec::new(); + + for i in 0..3 { + let config = P2PConfig { + node_id: format!("init_test_node_{}", i), + listen_addr: format!("127.0.0.1:{}", 13010 + i).parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], + max_peers: 5, + connection_timeout: 1, + keep_alive_interval: 30, + debug_mode: false, + }; + + let network = WebRTCP2PNetwork::new(config)?; + networks.push(network); + } + + info!("Created {} networks", networks.len()); + + // Test each network individually + for (i, network) in networks.iter().enumerate() { + let stats = network.get_network_stats(); + let peers = network.get_connected_peers().await; + + info!("Network {} - Stats: {}, Peers: {}", i, stats.active_connections, peers.len()); + + // Test transaction creation and serialization + let tx = create_test_tx(i as u64); + let serialized = bincode::serialize(&tx)?; + info!("Network {} - Transaction size: {} bytes", i, serialized.len()); + } + + // Cleanup + for network in networks { + network.shutdown().await?; + } + + info!("Network initialization test completed"); + Ok(()) +} + +#[tokio::test] +async fn test_discovered_peers_functionality() -> Result<()> { + init_test_logging(); + info!("Testing discovered peers functionality"); + + let config = P2PConfig { + node_id: "discovery_test_node".to_string(), + listen_addr: "127.0.0.1:13020".parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], + max_peers: 10, + connection_timeout: 1, + keep_alive_interval: 30, + debug_mode: false, + }; + + let network = WebRTCP2PNetwork::new(config)?; + + // Test get_discovered_peers method (should be empty initially) + let discovered = network.get_discovered_peers().await; + info!("Initially discovered peers: {}", discovered.len()); + + // Test network statistics + let stats = network.get_network_stats(); + info!("Network stats - Total connections: {}, Active: {}, Messages sent: {}", + stats.total_connections, stats.active_connections, stats.messages_sent); + + // Test broadcasting multiple transactions + for i in 0..5 { + let tx = create_test_tx(100 + i); + let result = network.broadcast_transaction(&tx).await; + info!("Broadcast {} result: {:?}", i, result.is_ok()); + } + + // Check stats after broadcasts + let final_stats = network.get_network_stats(); + info!("Final stats - Messages sent: {}", final_stats.messages_sent); + + network.shutdown().await?; + + assert_eq!(discovered.len(), 0); // No real discovery without network activity + // Stats may not be updated immediately in this test setup + + info!("Discovered peers functionality test completed"); + Ok(()) +} + +#[tokio::test] +async fn test_concurrent_network_operations() -> Result<()> { + init_test_logging(); + info!("Testing concurrent network operations"); + + let config = P2PConfig { + node_id: "concurrent_test_node".to_string(), + listen_addr: "127.0.0.1:13030".parse().unwrap(), + bootstrap_peers: vec![], + stun_servers: vec![], + max_peers: 10, + connection_timeout: 1, + keep_alive_interval: 30, + debug_mode: false, + }; + + let network = WebRTCP2PNetwork::new(config)?; + + // Create multiple concurrent transactions + let mut handles = Vec::new(); + + for i in 0..10 { + let net = network.clone(); + let handle = tokio::spawn(async move { + let tx = create_test_tx(200 + i); + net.broadcast_transaction(&tx).await + }); + handles.push(handle); + } + + // Wait for all broadcasts to complete + let results = futures::future::join_all(handles).await; + + let successful_broadcasts = results.iter() + .filter_map(|r| r.as_ref().ok()) + .filter(|r| r.is_ok()) + .count(); + + info!("Successful concurrent broadcasts: {}/{}", successful_broadcasts, results.len()); + + // Test concurrent peer queries + let mut peer_handles = Vec::new(); + for _ in 0..5 { + let net = network.clone(); + let handle = tokio::spawn(async move { + net.get_connected_peers().await + }); + peer_handles.push(handle); + } + + let peer_results = futures::future::join_all(peer_handles).await; + let successful_queries = peer_results.iter() + .filter_map(|r| r.as_ref().ok()) + .count(); + + info!("Successful concurrent peer queries: {}/{}", successful_queries, peer_results.len()); + + network.shutdown().await?; + + assert!(successful_broadcasts >= 8); // Most should succeed + assert_eq!(successful_queries, 5); // All peer queries should succeed + + info!("Concurrent network operations test completed"); + Ok(()) +} \ No newline at end of file diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml deleted file mode 100644 index 49e45b1..0000000 --- a/crates/sdk/Cargo.toml +++ /dev/null @@ -1,48 +0,0 @@ -[package] -name = "sdk" -version = "0.1.0" -edition = "2021" -rust-version = "1.82" -description = "PolyTorus SDK for developers to interact with the 4-layer modular blockchain" -authors = ["quantumshiro"] -license = "MIT" -repository = "https://github.com/quantumshiro/polytorus" -keywords = ["blockchain", "sdk", "polytorus", "modular", "api"] -categories = ["api-bindings", "cryptography", "network-programming"] - -[dependencies] -# Core PolyTorus dependencies -traits = { path = "../traits" } -execution = { path = "../execution" } -settlement = { path = "../settlement" } -consensus = { path = "../consensus" } -data-availability = { path = "../data-availability" } - -# External wallet dependency -wallet = { git = "https://github.com/PolyTorus/wallet.git" } - -# Core dependencies -anyhow = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -tokio = { workspace = true } -async-trait = { workspace = true } -log = { workspace = true } - -# Utilities -chrono = { workspace = true } -uuid = { workspace = true } -hex = { workspace = true } -sha2 = { workspace = true } - -[dev-dependencies] -tokio-test = "0.4" -env_logger = { workspace = true } - -[features] -default = ["full"] -full = ["execution-layer", "settlement-layer", "consensus-layer", "data-availability-layer"] -execution-layer = [] -settlement-layer = [] -consensus-layer = [] -data-availability-layer = [] \ No newline at end of file diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs deleted file mode 100644 index edc5a12..0000000 --- a/crates/sdk/src/lib.rs +++ /dev/null @@ -1,582 +0,0 @@ -//! PolyTorus SDK - Developer-friendly interface for the 4-layer modular blockchain -//! -//! This SDK provides a high-level, easy-to-use interface for developers to interact with -//! the PolyTorus blockchain platform. It abstracts the complexity of the 4-layer architecture -//! and provides simple methods for common blockchain operations. -//! -//! # Features -//! -//! - **Transaction Management**: Create, sign, and submit transactions -//! - **Wallet Integration**: Full HD wallet support with BIP32/BIP44 -//! - **Smart Contracts**: Deploy and interact with WASM-based contracts -//! - **Block Operations**: Query blocks, mining, and validation -//! - **Data Availability**: Store and retrieve data with proofs -//! - **Layer Abstraction**: Direct access to individual layers when needed -//! -//! # Quick Start -//! -//! ```rust -//! use sdk::{PolyTorusClient, ClientConfig}; -//! -//! #[tokio::main] -//! async fn main() -> anyhow::Result<()> { -//! // Create a new client -//! let client = PolyTorusClient::new(ClientConfig::default()).await?; -//! -//! // Create a wallet -//! let wallet = client.create_wallet().await?; -//! -//! // Send a transaction -//! let tx_hash = client.send_transaction( -//! &wallet, -//! "recipient_address", -//! 1000, // amount -//! None // data -//! ).await?; -//! -//! println!("Transaction sent: {}", tx_hash); -//! Ok(()) -//! } -//! ``` - -use anyhow::anyhow; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::sync::Arc; -use tokio::sync::RwLock; - -// Re-export core types for convenience -pub use traits::*; -pub use wallet::{HdWallet, Wallet, Address as WalletAddress, KeyPair, Signature, Mnemonic}; - -// Internal layer imports -use consensus::{PolyTorusConsensusLayer, ConsensusConfig}; -use data_availability::{PolyTorusDataAvailabilityLayer, DataAvailabilityConfig}; -use execution::{PolyTorusExecutionLayer, ExecutionConfig}; -use settlement::{PolyTorusSettlementLayer, SettlementConfig}; - -// ============================================================================ -// SDK Configuration -// ============================================================================ - -/// Configuration for the PolyTorus client -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ClientConfig { - pub network: NetworkConfig, - pub layers: LayerConfigs, - pub wallet: WalletConfig, -} - -impl Default for ClientConfig { - fn default() -> Self { - Self { - network: NetworkConfig::default(), - layers: LayerConfigs::default(), - wallet: WalletConfig::default(), - } - } -} - -/// Network configuration -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct NetworkConfig { - pub chain_id: u64, - pub network_name: String, - pub is_testnet: bool, -} - -impl Default for NetworkConfig { - fn default() -> Self { - Self { - chain_id: 1, - network_name: "polytorus-mainnet".to_string(), - is_testnet: false, - } - } -} - -/// Layer configurations -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct LayerConfigs { - pub execution: ExecutionConfig, - pub settlement: SettlementConfig, - pub consensus: ConsensusConfig, - pub data_availability: DataAvailabilityConfig, -} - -impl Default for LayerConfigs { - fn default() -> Self { - Self { - execution: ExecutionConfig::default(), - settlement: SettlementConfig::default(), - consensus: ConsensusConfig::default(), - data_availability: DataAvailabilityConfig::default(), - } - } -} - -/// Wallet configuration -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct WalletConfig { - pub derivation_path: String, - pub address_format: String, -} - -impl Default for WalletConfig { - fn default() -> Self { - Self { - derivation_path: "m/44'/0'/0'".to_string(), - address_format: "native_segwit".to_string(), - } - } -} - -// ============================================================================ -// SDK Client -// ============================================================================ - -/// High-level client for interacting with PolyTorus blockchain -pub struct PolyTorusClient { - config: ClientConfig, - execution_layer: Arc>, - settlement_layer: Arc>, - consensus_layer: Arc>, - data_availability_layer: Arc>, - wallets: Arc>>, -} - -impl PolyTorusClient { - /// Create a new PolyTorus client - pub async fn new(config: ClientConfig) -> Result { - let execution_layer = Arc::new(RwLock::new( - PolyTorusExecutionLayer::new(config.layers.execution.clone())?, - )); - let settlement_layer = Arc::new(RwLock::new( - PolyTorusSettlementLayer::new(config.layers.settlement.clone())?, - )); - let consensus_layer = Arc::new(RwLock::new( - PolyTorusConsensusLayer::new(config.layers.consensus.clone())?, - )); - let data_availability_layer = Arc::new(RwLock::new( - PolyTorusDataAvailabilityLayer::new(config.layers.data_availability.clone())?, - )); - - Ok(Self { - config, - execution_layer, - settlement_layer, - consensus_layer, - data_availability_layer, - wallets: Arc::new(RwLock::new(HashMap::new())), - }) - } - - /// Create a new client with default configuration - pub async fn new_default() -> Result { - Self::new(ClientConfig::default()).await - } - - // ======================================================================== - // Wallet Management - // ======================================================================== - - /// Create a new HD wallet - pub async fn create_wallet(&self) -> Result { - let wallet = HdWallet::new(wallet::KeyType::Ed25519) - .map_err(|e| anyhow!("Failed to create wallet: {}", e))?; - - let mnemonic_phrase = wallet.get_mnemonic().phrase().to_string(); - - let mut base_wallet = wallet.derive_wallet("m/44'/9999'/0'/0/0", wallet::KeyType::Ed25519) - .map_err(|e| anyhow!("Failed to derive wallet: {}", e))?; - let address = base_wallet.default_address() - .map_err(|e| anyhow!("Failed to get address: {}", e))?.value; - - let wallet_id = uuid::Uuid::new_v4().to_string(); - - let mut wallets = self.wallets.write().await; - wallets.insert(wallet_id.clone(), wallet); - - Ok(WalletInfo { - id: wallet_id, - address, - mnemonic: mnemonic_phrase, - derivation_path: self.config.wallet.derivation_path.clone(), - }) - } - - /// Import wallet from mnemonic - pub async fn import_wallet(&self, mnemonic: &str, _passphrase: Option<&str>) -> Result { - let wallet = HdWallet::from_phrase(mnemonic, wallet::KeyType::Ed25519) - .map_err(|e| anyhow!("Failed to import wallet: {}", e))?; - - let mut base_wallet = wallet.derive_wallet("m/44'/9999'/0'/0/0", wallet::KeyType::Ed25519) - .map_err(|e| anyhow!("Failed to derive wallet: {}", e))?; - let address = base_wallet.default_address() - .map_err(|e| anyhow!("Failed to get address: {}", e))?.value; - - let wallet_id = uuid::Uuid::new_v4().to_string(); - - let mut wallets = self.wallets.write().await; - wallets.insert(wallet_id.clone(), wallet); - - Ok(WalletInfo { - id: wallet_id, - address, - mnemonic: mnemonic.to_string(), - derivation_path: self.config.wallet.derivation_path.clone(), - }) - } - - /// Get wallet information - pub async fn get_wallet(&self, wallet_id: &str) -> Result> { - let wallets = self.wallets.read().await; - - if let Some(wallet) = wallets.get(wallet_id) { - let mut base_wallet = wallet.derive_wallet("m/44'/9999'/0'/0/0", wallet::KeyType::Ed25519) - .map_err(|e| anyhow!("Failed to derive wallet: {}", e))?; - let address = base_wallet.default_address() - .map_err(|e| anyhow!("Failed to get address: {}", e))?.value; - - Ok(Some(WalletInfo { - id: wallet_id.to_string(), - address, - mnemonic: String::new(), // Don't expose mnemonic in get operations - derivation_path: self.config.wallet.derivation_path.clone(), - })) - } else { - Ok(None) - } - } - - // ======================================================================== - // Transaction Operations - // ======================================================================== - - /// Send a simple transaction - pub async fn send_transaction( - &self, - wallet_info: &WalletInfo, - to: &str, - amount: u64, - data: Option>, - ) -> Result { - let wallets = self.wallets.read().await; - let wallet = wallets.get(&wallet_info.id) - .ok_or_else(|| anyhow!("Wallet not found"))?; - - let keypair = wallet.derive_key(0) - .map_err(|e| anyhow!("Failed to derive keypair: {}", e))?; - - let nonce = self.get_account_nonce(&wallet_info.address).await?; - - let tx = Transaction { - hash: self.generate_transaction_hash(), - from: wallet_info.address.clone(), - to: Some(to.to_string()), - value: amount, - gas_limit: 21000, // Standard gas limit - gas_price: 1, // Default gas price - data: data.unwrap_or_default(), - nonce, - signature: vec![], // Will be filled after signing - script_type: None, - }; - - let signed_tx = self.sign_transaction(tx, &keypair).await?; - self.submit_transaction(signed_tx).await - } - - /// Deploy a smart contract - pub async fn deploy_contract( - &self, - wallet_info: &WalletInfo, - contract_code: &[u8], - init_params: &[u8], - gas_limit: u64, - ) -> Result { - let wallets = self.wallets.read().await; - let wallet = wallets.get(&wallet_info.id) - .ok_or_else(|| anyhow!("Wallet not found"))?; - - let keypair = wallet.derive_key(0) - .map_err(|e| anyhow!("Failed to derive keypair: {}", e))?; - - let nonce = self.get_account_nonce(&wallet_info.address).await?; - - let tx = Transaction { - hash: self.generate_transaction_hash(), - from: wallet_info.address.clone(), - to: None, // Contract deployment - value: 0, - gas_limit, - gas_price: 1, - data: vec![], - nonce, - signature: vec![], - script_type: Some(ScriptTransactionType::Deploy { - script_data: contract_code.to_vec(), - init_params: init_params.to_vec(), - }), - }; - - let signed_tx = self.sign_transaction(tx, &keypair).await?; - self.submit_transaction(signed_tx).await - } - - /// Call a smart contract method - pub async fn call_contract( - &self, - wallet_info: &WalletInfo, - contract_hash: &Hash, - method: &str, - params: &[u8], - gas_limit: u64, - ) -> Result { - let wallets = self.wallets.read().await; - let wallet = wallets.get(&wallet_info.id) - .ok_or_else(|| anyhow!("Wallet not found"))?; - - let keypair = wallet.derive_key(0) - .map_err(|e| anyhow!("Failed to derive keypair: {}", e))?; - - let nonce = self.get_account_nonce(&wallet_info.address).await?; - - let tx = Transaction { - hash: self.generate_transaction_hash(), - from: wallet_info.address.clone(), - to: Some(contract_hash.clone()), - value: 0, - gas_limit, - gas_price: 1, - data: vec![], - nonce, - signature: vec![], - script_type: Some(ScriptTransactionType::Call { - script_hash: contract_hash.clone(), - method: method.to_string(), - params: params.to_vec(), - }), - }; - - let signed_tx = self.sign_transaction(tx, &keypair).await?; - self.submit_transaction(signed_tx).await - } - - // ======================================================================== - // Query Operations - // ======================================================================== - - /// Get account balance - pub async fn get_balance(&self, address: &str) -> Result { - let execution = self.execution_layer.read().await; - let account_state = execution.get_account_state(&address.to_string()).await?; - Ok(account_state.balance) - } - - /// Get account nonce - pub async fn get_account_nonce(&self, address: &str) -> Result { - let execution = self.execution_layer.read().await; - let account_state = execution.get_account_state(&address.to_string()).await?; - Ok(account_state.nonce) - } - - /// Get block by hash - pub async fn get_block(&self, block_hash: &Hash) -> Result> { - let consensus = self.consensus_layer.read().await; - consensus.get_block_by_hash(block_hash).await - } - - /// Get current block height - pub async fn get_block_height(&self) -> Result { - let consensus = self.consensus_layer.read().await; - consensus.get_block_height().await - } - - /// Get transaction receipt (simplified) - pub async fn get_transaction_receipt(&self, _tx_hash: &Hash) -> Result> { - // This would typically query a transaction pool or blockchain storage - // For now, returning None as this requires more complex state management - Ok(None) - } - - // ======================================================================== - // Data Availability Operations - // ======================================================================== - - /// Store data on the blockchain - pub async fn store_data(&self, data: &[u8]) -> Result { - let mut da_layer = self.data_availability_layer.write().await; - da_layer.store_data(data).await - } - - /// Retrieve data from the blockchain - pub async fn retrieve_data(&self, data_hash: &Hash) -> Result>> { - let da_layer = self.data_availability_layer.read().await; - da_layer.retrieve_data(data_hash).await - } - - /// Verify data availability - pub async fn verify_data_availability(&self, data_hash: &Hash) -> Result { - let da_layer = self.data_availability_layer.read().await; - da_layer.verify_availability(data_hash).await - } - - // ======================================================================== - // Mining Operations - // ======================================================================== - - /// Mine a new block with pending transactions - pub async fn mine_block(&self) -> Result { - let mut consensus = self.consensus_layer.write().await; - // For simplicity, mining an empty block - // In a real implementation, this would gather pending transactions - consensus.mine_block(vec![]).await - } - - /// Set mining difficulty - pub async fn set_mining_difficulty(&self, difficulty: usize) -> Result<()> { - let mut consensus = self.consensus_layer.write().await; - consensus.set_difficulty(difficulty).await - } - - // ======================================================================== - // Layer Access - // ======================================================================== - - /// Get direct access to execution layer - pub fn execution_layer(&self) -> Arc> { - self.execution_layer.clone() - } - - /// Get direct access to settlement layer - pub fn settlement_layer(&self) -> Arc> { - self.settlement_layer.clone() - } - - /// Get direct access to consensus layer - pub fn consensus_layer(&self) -> Arc> { - self.consensus_layer.clone() - } - - /// Get direct access to data availability layer - pub fn data_availability_layer(&self) -> Arc> { - self.data_availability_layer.clone() - } - - // ======================================================================== - // Internal Helper Methods - // ======================================================================== - - async fn sign_transaction(&self, mut tx: Transaction, keypair: &KeyPair) -> Result { - let tx_data = serde_json::to_vec(&tx)?; - let signature_vec = keypair.sign(&tx_data) - .map_err(|e| anyhow!("Failed to sign transaction: {}", e))?; - tx.signature = signature_vec; - Ok(tx) - } - - async fn submit_transaction(&self, tx: Transaction) -> Result { - let mut execution = self.execution_layer.write().await; - let receipt = execution.execute_transaction(&tx).await?; - - if receipt.success { - Ok(tx.hash) - } else { - Err(anyhow!("Transaction execution failed")) - } - } - - fn generate_transaction_hash(&self) -> Hash { - use sha2::{Sha256, Digest}; - let mut hasher = Sha256::new(); - hasher.update(chrono::Utc::now().timestamp().to_string()); - hasher.update(uuid::Uuid::new_v4().to_string()); - hex::encode(hasher.finalize()) - } -} - -// ============================================================================ -// SDK Types -// ============================================================================ - -/// Wallet information returned by SDK -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct WalletInfo { - pub id: String, - pub address: String, - pub mnemonic: String, - pub derivation_path: String, -} - -/// Contract deployment result -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ContractDeployment { - pub contract_hash: Hash, - pub transaction_hash: Hash, - pub gas_used: u64, -} - -/// Contract call result -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ContractCallResult { - pub transaction_hash: Hash, - pub return_data: Vec, - pub gas_used: u64, - pub events: Vec, -} - -// ============================================================================ -// Tests -// ============================================================================ - -#[cfg(test)] -mod tests { - use super::*; - - #[tokio::test] - async fn test_client_creation() { - let client = PolyTorusClient::new_default().await; - assert!(client.is_ok()); - } - - #[tokio::test] - async fn test_wallet_creation() { - let client = PolyTorusClient::new_default().await.unwrap(); - let wallet = client.create_wallet().await; - assert!(wallet.is_ok()); - - let wallet_info = wallet.unwrap(); - assert!(!wallet_info.id.is_empty()); - assert!(!wallet_info.address.is_empty()); - assert!(!wallet_info.mnemonic.is_empty()); - } - - #[tokio::test] - async fn test_data_storage() { - let client = PolyTorusClient::new_default().await.unwrap(); - let data = b"Hello, PolyTorus!"; - - let hash = client.store_data(data).await; - assert!(hash.is_ok()); - - let stored_data = client.retrieve_data(&hash.unwrap()).await; - assert!(stored_data.is_ok()); - assert_eq!(stored_data.unwrap(), Some(data.to_vec())); - } - - #[tokio::test] - async fn test_mining() { - let client = PolyTorusClient::new_default().await.unwrap(); - - // Set low difficulty for fast mining in tests - client.set_mining_difficulty(0).await.unwrap(); - - let block = client.mine_block().await; - assert!(block.is_ok()); - - let mined_block = block.unwrap(); - assert!(!mined_block.hash.is_empty()); - assert_eq!(mined_block.transactions.len(), 0); // Empty block - } -} \ No newline at end of file diff --git a/docker-compose.simple.yml b/docker-compose.simple.yml new file mode 100644 index 0000000..92b5251 --- /dev/null +++ b/docker-compose.simple.yml @@ -0,0 +1,87 @@ +version: '3.8' + +services: + # Bootstrap node - the first node that others connect to + polytorus-bootstrap: + image: polytorus:simple + container_name: polytorus-bootstrap + hostname: polytorus-bootstrap + ports: + - "8080:8080" + environment: + - RUST_LOG=info + - NODE_ID=bootstrap-node + - LISTEN_PORT=8080 + - DEBUG_MODE=true + - POLYTORUS_DATA_DIR=/app/data + command: ["start-p2p", "--node-id", "bootstrap-node", "--listen-port", "8080", "--adaptive"] + networks: + - polytorus-testnet + + # Node 1 - connects to bootstrap + polytorus-node1: + image: polytorus:simple + container_name: polytorus-node1 + hostname: polytorus-node1 + ports: + - "8081:8081" + environment: + - RUST_LOG=info + - NODE_ID=node-1 + - LISTEN_PORT=8081 + - BOOTSTRAP_PEERS=polytorus-bootstrap:8080 + - DEBUG_MODE=true + - POLYTORUS_DATA_DIR=/app/data + command: ["start-p2p", "--node-id", "node-1", "--listen-port", "8081", "--bootstrap-peers", "polytorus-bootstrap:8080", "--adaptive"] + networks: + - polytorus-testnet + depends_on: + - polytorus-bootstrap + + # Node 2 - connects to bootstrap + polytorus-node2: + image: polytorus:simple + container_name: polytorus-node2 + hostname: polytorus-node2 + ports: + - "8082:8082" + environment: + - RUST_LOG=info + - NODE_ID=node-2 + - LISTEN_PORT=8082 + - BOOTSTRAP_PEERS=polytorus-bootstrap:8080 + - DEBUG_MODE=true + - POLYTORUS_DATA_DIR=/app/data + command: ["start-p2p", "--node-id", "node-2", "--listen-port", "8082", "--bootstrap-peers", "polytorus-bootstrap:8080", "--adaptive"] + networks: + - polytorus-testnet + depends_on: + - polytorus-bootstrap + + # Transaction test client + polytorus-client: + image: polytorus:simple + container_name: polytorus-client + hostname: polytorus-client + environment: + - RUST_LOG=info + - NODE_ID=client-node + - LISTEN_PORT=8084 + - BOOTSTRAP_PEERS=polytorus-bootstrap:8080,polytorus-node1:8081 + - DEBUG_MODE=true + - POLYTORUS_DATA_DIR=/app/data + networks: + - polytorus-testnet + depends_on: + - polytorus-bootstrap + - polytorus-node1 + - polytorus-node2 + # Client starts in sleep mode - we'll exec commands into it + entrypoint: ["sleep", "infinity"] + +networks: + polytorus-testnet: + driver: bridge + ipam: + config: + - subnet: 172.20.0.0/16 \ No newline at end of file diff --git a/docker-compose.testnet.yml b/docker-compose.testnet.yml new file mode 100644 index 0000000..90e36ec --- /dev/null +++ b/docker-compose.testnet.yml @@ -0,0 +1,127 @@ +version: '3.8' + +services: + # Bootstrap node - the first node that others connect to + polytorus-bootstrap: + build: . + container_name: polytorus-bootstrap + hostname: polytorus-bootstrap + ports: + - "8080:8080" + environment: + - RUST_LOG=info + - NODE_ID=bootstrap-node + - LISTEN_PORT=8080 + - DEBUG_MODE=true + command: ["start-p2p", "--node-id", "bootstrap-node", "--listen-port", "8080", "--adaptive"] + networks: + - polytorus-testnet + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/health", "||", "exit", "1"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + # Node 1 - connects to bootstrap + polytorus-node1: + build: . + container_name: polytorus-node1 + hostname: polytorus-node1 + ports: + - "8081:8081" + environment: + - RUST_LOG=info + - NODE_ID=node-1 + - LISTEN_PORT=8081 + - BOOTSTRAP_PEERS=polytorus-bootstrap:8080 + - DEBUG_MODE=true + command: ["start-p2p", "--node-id", "node-1", "--listen-port", "8081", "--bootstrap-peers", "polytorus-bootstrap:8080", "--adaptive"] + networks: + - polytorus-testnet + depends_on: + - polytorus-bootstrap + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8081/health", "||", "exit", "1"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + # Node 2 - connects to bootstrap + polytorus-node2: + build: . + container_name: polytorus-node2 + hostname: polytorus-node2 + ports: + - "8082:8082" + environment: + - RUST_LOG=info + - NODE_ID=node-2 + - LISTEN_PORT=8082 + - BOOTSTRAP_PEERS=polytorus-bootstrap:8080 + - DEBUG_MODE=true + command: ["start-p2p", "--node-id", "node-2", "--listen-port", "8082", "--bootstrap-peers", "polytorus-bootstrap:8080", "--adaptive"] + networks: + - polytorus-testnet + depends_on: + - polytorus-bootstrap + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8082/health", "||", "exit", "1"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + # Node 3 - connects to bootstrap + polytorus-node3: + build: . + container_name: polytorus-node3 + hostname: polytorus-node3 + ports: + - "8083:8083" + environment: + - RUST_LOG=info + - NODE_ID=node-3 + - LISTEN_PORT=8083 + - BOOTSTRAP_PEERS=polytorus-bootstrap:8080 + - DEBUG_MODE=true + command: ["start-p2p", "--node-id", "node-3", "--listen-port", "8083", "--bootstrap-peers", "polytorus-bootstrap:8080", "--adaptive"] + networks: + - polytorus-testnet + depends_on: + - polytorus-bootstrap + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8083/health", "||", "exit", "1"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + # Transaction test client + polytorus-client: + build: . + container_name: polytorus-client + hostname: polytorus-client + environment: + - RUST_LOG=info + - NODE_ID=client-node + - LISTEN_PORT=8084 + - BOOTSTRAP_PEERS=polytorus-bootstrap:8080,polytorus-node1:8081 + - DEBUG_MODE=true + networks: + - polytorus-testnet + depends_on: + - polytorus-bootstrap + - polytorus-node1 + - polytorus-node2 + - polytorus-node3 + # Client starts in sleep mode - we'll exec commands into it + command: ["sleep", "infinity"] + +networks: + polytorus-testnet: + driver: bridge + ipam: + config: + - subnet: 172.20.0.0/16 \ No newline at end of file diff --git a/scripts/simple-testnet.sh b/scripts/simple-testnet.sh new file mode 100755 index 0000000..7745952 --- /dev/null +++ b/scripts/simple-testnet.sh @@ -0,0 +1,282 @@ +#!/bin/bash + +# PolyTorus Simple Testnet Manager +# Manages simple Docker-based testnet for PolyTorus blockchain + +set -e + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +PROJECT_DIR="$( cd "$SCRIPT_DIR/.." &> /dev/null && pwd )" + +cd "$PROJECT_DIR" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging functions +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Build the simple Docker image +build_image() { + log_info "Building simple PolyTorus Docker image..." + cargo build --release + docker build -f Dockerfile.simple -t polytorus:simple . + log_success "Simple Docker image built successfully" +} + +# Start the testnet +start_testnet() { + log_info "Starting PolyTorus simple testnet..." + docker-compose -f docker-compose.simple.yml up -d + log_success "Testnet started successfully" + + log_info "Waiting for nodes to initialize..." + sleep 10 + + show_status +} + +# Stop the testnet +stop_testnet() { + log_info "Stopping PolyTorus testnet..." + docker-compose -f docker-compose.simple.yml down + log_success "Testnet stopped successfully" +} + +# Show testnet status +show_status() { + log_info "Testnet Status:" + echo "==============" + docker-compose -f docker-compose.simple.yml ps + echo "" + + log_info "Container Health:" + docker ps --filter "name=polytorus" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" +} + +# Show logs for a specific node +show_logs() { + local node=$1 + if [ -z "$node" ]; then + log_error "Please specify a node name (bootstrap, node1, node2, client)" + exit 1 + fi + + local container_name="polytorus-$node" + log_info "Showing logs for $container_name..." + docker logs -f "$container_name" +} + +# Execute a command in a container +exec_command() { + local node=$1 + shift + local cmd="$@" + + if [ -z "$node" ]; then + log_error "Please specify a node name" + exit 1 + fi + + local container_name="polytorus-$node" + log_info "Executing command in $container_name: $cmd" + docker exec -it "$container_name" $cmd +} + +# Send a test transaction using client container +send_transaction() { + local from=${1:-"alice"} + local to=${2:-"bob"} + local amount=${3:-"1000"} + + log_info "Sending transaction: $from -> $to ($amount units)" + + # Use the client container to send transaction + if docker exec polytorus-client ./polytorus send --from "$from" --to "$to" --amount "$amount"; then + log_success "Transaction sent successfully!" + else + log_error "Transaction failed!" + exit 1 + fi +} + +# Initialize genesis on client +init_genesis() { + log_info "Initializing genesis on client..." + if docker exec polytorus-client ./polytorus start; then + log_success "Genesis initialized successfully!" + else + log_error "Genesis initialization failed!" + exit 1 + fi +} + +# Get blockchain status +blockchain_status() { + log_info "Getting blockchain status..." + if docker exec polytorus-client ./polytorus status; then + log_success "Status retrieved successfully" + else + log_error "Could not retrieve blockchain status" + exit 1 + fi +} + +# Get network status from nodes +network_status() { + log_info "Getting network status from nodes..." + + for node in bootstrap node1 node2; do + echo "" + log_info "Network stats for $node:" + if docker exec polytorus-$node ./polytorus network-status 2>/dev/null; then + log_success "Stats retrieved successfully" + else + log_warning "Could not retrieve stats from $node (expected for P2P nodes)" + fi + echo "---" + done +} + +# Get peer information +peer_info() { + log_info "Getting peer information from nodes..." + + for node in bootstrap node1 node2; do + echo "" + log_info "Peer info for $node:" + if docker exec polytorus-$node ./polytorus peers 2>/dev/null; then + log_success "Peer info retrieved successfully" + else + log_warning "Could not retrieve peer info from $node (expected for P2P nodes)" + fi + echo "---" + done +} + +# Test full transaction flow +test_transactions() { + log_info "Testing full transaction flow..." + echo "================================" + + # Wait for network to be ready + log_info "Waiting for network to be ready..." + sleep 15 + + # Initialize genesis + init_genesis + + # Wait a bit more + sleep 5 + + # Get initial status + log_info "Initial blockchain status:" + blockchain_status + + # Send multiple transactions + log_info "Sending test transactions..." + send_transaction "alice" "bob" "1000" + sleep 2 + send_transaction "bob" "charlie" "500" + sleep 2 + send_transaction "charlie" "alice" "250" + + # Get final status + log_info "Final blockchain status:" + blockchain_status + + log_success "Transaction testing completed!" +} + +# Main script logic +case "${1:-help}" in + "build") + build_image + ;; + "start") + start_testnet + ;; + "stop") + stop_testnet + ;; + "status") + show_status + ;; + "logs") + show_logs "$2" + ;; + "exec") + shift + exec_command "$@" + ;; + "network-status") + network_status + ;; + "peers") + peer_info + ;; + "send-tx") + send_transaction "$2" "$3" "$4" + ;; + "init-genesis") + init_genesis + ;; + "blockchain-status") + blockchain_status + ;; + "test") + test_transactions + ;; + "cleanup") + log_info "Cleaning up testnet environment..." + docker-compose -f docker-compose.simple.yml down -v + log_success "Cleanup completed" + ;; + "help"|*) + echo "PolyTorus Simple Testnet Manager" + echo "===============================" + echo "" + echo "Usage: $0 [options]" + echo "" + echo "Commands:" + echo " build Build the simple Docker image" + echo " start Start the testnet" + echo " stop Stop the testnet" + echo " status Show testnet status" + echo " logs Show logs for a node (bootstrap, node1, node2, client)" + echo " exec Execute command in a container" + echo " network-status Get network statistics from all nodes" + echo " peers Get peer information from all nodes" + echo " send-tx [from] [to] [amount] Send a transaction (defaults: alice bob 1000)" + echo " init-genesis Initialize genesis on client" + echo " blockchain-status Get blockchain status" + echo " test Run full transaction test" + echo " cleanup Clean up all containers and data" + echo " help Show this help message" + echo "" + echo "Examples:" + echo " $0 build # Build the image" + echo " $0 start # Start testnet" + echo " $0 send-tx alice bob 500 # Send 500 from alice to bob" + echo " $0 logs bootstrap # Show bootstrap node logs" + echo " $0 test # Run complete transaction test" + ;; +esac \ No newline at end of file diff --git a/scripts/stress-test.sh b/scripts/stress-test.sh new file mode 100755 index 0000000..0ccb7fb --- /dev/null +++ b/scripts/stress-test.sh @@ -0,0 +1,193 @@ +#!/bin/bash + +# PolyTorus 100 Transaction Stress Test +# Tests blockchain progression with 100 consecutive transactions + +set -e + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +PROJECT_DIR="$( cd "$SCRIPT_DIR/.." &> /dev/null && pwd )" + +cd "$PROJECT_DIR" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging functions +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Array of users for transactions +USERS=("alice" "bob" "charlie" "david" "eve" "frank" "grace" "henry") + +# Function to get random user +get_random_user() { + echo "${USERS[$RANDOM % ${#USERS[@]}]}" +} + +# Function to get random amount +get_random_amount() { + echo $((100 + $RANDOM % 900)) # Random amount between 100-999 +} + +# Function to send a transaction +send_transaction() { + local tx_num=$1 + local from=$(get_random_user) + local to=$(get_random_user) + local amount=$(get_random_amount) + + # Ensure from and to are different + while [ "$from" = "$to" ]; do + to=$(get_random_user) + done + + log_info "Transaction #$tx_num: $from -> $to ($amount units)" + + if ./scripts/simple-testnet.sh send-tx "$from" "$to" "$amount" >/dev/null 2>&1; then + log_success "Transaction #$tx_num completed" + return 0 + else + log_error "Transaction #$tx_num failed" + return 1 + fi +} + +# Function to check blockchain status +check_status() { + local expected_height=$1 + log_info "Checking blockchain status (expected height: $expected_height)..." + + # Get status and extract height + local status_output=$(./scripts/simple-testnet.sh blockchain-status 2>/dev/null) + local actual_height=$(echo "$status_output" | grep "Chain Height:" | awk '{print $3}') + + if [ "$actual_height" = "$expected_height" ]; then + log_success "Height matches: $actual_height" + return 0 + else + log_error "Height mismatch: expected $expected_height, got $actual_height" + return 1 + fi +} + +# Main stress test function +run_stress_test() { + local num_transactions=${1:-100} + + log_info "Starting stress test with $num_transactions transactions" + log_info "========================================================" + + # Get initial status + log_info "Getting initial blockchain status..." + ./scripts/simple-testnet.sh blockchain-status | head -10 + + local start_time=$(date +%s) + local failed_count=0 + + # Send transactions + for i in $(seq 1 $num_transactions); do + if [ $((i % 10)) -eq 0 ]; then + log_info "Progress: $i/$num_transactions transactions completed" + fi + + if ! send_transaction $i; then + ((failed_count++)) + fi + + # Small delay to avoid overwhelming the system + sleep 0.1 + done + + local end_time=$(date +%s) + local duration=$((end_time - start_time)) + + log_info "All transactions submitted. Waiting for final state..." + sleep 5 + + # Check final status + log_info "Final blockchain status:" + ./scripts/simple-testnet.sh blockchain-status + + # Calculate statistics + local success_count=$((num_transactions - failed_count)) + local tps=$(echo "scale=2; $success_count / $duration" | bc -l) + + echo "" + log_info "Stress Test Results:" + log_info "===================" + log_info "Total transactions: $num_transactions" + log_success "Successful: $success_count" + if [ $failed_count -gt 0 ]; then + log_warning "Failed: $failed_count" + else + log_success "Failed: $failed_count" + fi + log_info "Duration: ${duration}s" + log_info "Throughput: ${tps} TPS" + + # Get actual starting height from initial status + local initial_status=$(./scripts/simple-testnet.sh blockchain-status 2>/dev/null) + local initial_height=$(echo "$initial_status" | grep "Chain Height:" | awk '{print $3}') + + # Verify final height matches expected + local expected_final_height=$((initial_height + success_count)) + if check_status $expected_final_height; then + log_success "Blockchain height verification passed!" + return 0 + else + log_error "Blockchain height verification failed!" + return 1 + fi +} + +# Handle different commands +case "${1:-test}" in + "test") + run_stress_test 100 + ;; + "quick") + run_stress_test 10 + ;; + "custom") + if [ -z "$2" ]; then + log_error "Please specify number of transactions: $0 custom " + exit 1 + fi + run_stress_test "$2" + ;; + "help"|*) + echo "PolyTorus Stress Test" + echo "====================" + echo "" + echo "Usage: $0 " + echo "" + echo "Commands:" + echo " test Run 100 transaction stress test (default)" + echo " quick Run 10 transaction quick test" + echo " custom Run custom number of transactions" + echo " help Show this help message" + echo "" + echo "Examples:" + echo " $0 test # Run 100 transactions" + echo " $0 quick # Run 10 transactions" + echo " $0 custom 50 # Run 50 transactions" + ;; +esac \ No newline at end of file diff --git a/scripts/testnet-manager.sh b/scripts/testnet-manager.sh new file mode 100755 index 0000000..ab85e55 --- /dev/null +++ b/scripts/testnet-manager.sh @@ -0,0 +1,316 @@ +#!/bin/bash + +# PolyTorus Testnet Manager +# Manages Docker-based testnet for PolyTorus blockchain + +set -e + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +PROJECT_DIR="$( cd "$SCRIPT_DIR/.." &> /dev/null && pwd )" + +cd "$PROJECT_DIR" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging functions +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Check if Docker and Docker Compose are available +check_dependencies() { + if ! command -v docker &> /dev/null; then + log_error "Docker is not installed or not in PATH" + exit 1 + fi + + if ! command -v docker-compose &> /dev/null; then + log_error "Docker Compose is not installed or not in PATH" + exit 1 + fi +} + +# Build the Docker image +build_image() { + log_info "Building PolyTorus Docker image..." + docker build -t polytorus:latest . + log_success "Docker image built successfully" +} + +# Start the testnet +start_testnet() { + log_info "Starting PolyTorus testnet..." + docker-compose -f docker-compose.testnet.yml up -d + log_success "Testnet started successfully" + + log_info "Waiting for nodes to initialize..." + sleep 10 + + show_status +} + +# Stop the testnet +stop_testnet() { + log_info "Stopping PolyTorus testnet..." + docker-compose -f docker-compose.testnet.yml down + log_success "Testnet stopped successfully" +} + +# Restart the testnet +restart_testnet() { + log_info "Restarting PolyTorus testnet..." + stop_testnet + sleep 5 + start_testnet +} + +# Show testnet status +show_status() { + log_info "Testnet Status:" + echo "==============" + docker-compose -f docker-compose.testnet.yml ps + echo "" + + log_info "Container Health:" + docker ps --filter "name=polytorus" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" +} + +# Show logs for a specific node +show_logs() { + local node=$1 + if [ -z "$node" ]; then + log_error "Please specify a node name (bootstrap, node1, node2, node3, client)" + exit 1 + fi + + local container_name="polytorus-$node" + log_info "Showing logs for $container_name..." + docker logs -f "$container_name" +} + +# Execute a command in a container +exec_command() { + local node=$1 + shift + local cmd="$@" + + if [ -z "$node" ]; then + log_error "Please specify a node name" + exit 1 + fi + + local container_name="polytorus-$node" + log_info "Executing command in $container_name: $cmd" + docker exec -it "$container_name" $cmd +} + +# Get network statistics from all nodes +network_stats() { + log_info "Getting network statistics from all nodes..." + echo "==============================================" + + for node in bootstrap node1 node2 node3; do + echo "" + log_info "Network stats for $node:" + if docker exec polytorus-$node /app/target/release/polytorus network-status 2>/dev/null; then + log_success "Stats retrieved successfully" + else + log_warning "Could not retrieve stats from $node" + fi + echo "---" + done +} + +# Get peer information from all nodes +peer_info() { + log_info "Getting peer information from all nodes..." + echo "==========================================" + + for node in bootstrap node1 node2 node3; do + echo "" + log_info "Peer info for $node:" + if docker exec polytorus-$node /app/target/release/polytorus peers 2>/dev/null; then + log_success "Peer info retrieved successfully" + else + log_warning "Could not retrieve peer info from $node" + fi + echo "---" + done +} + +# Send a test transaction +send_transaction() { + local from=${1:-"alice"} + local to=${2:-"bob"} + local amount=${3:-"1000"} + + log_info "Sending transaction: $from -> $to ($amount units)" + + # Use the client container to send transaction + if docker exec polytorus-client /app/target/release/polytorus send --from "$from" --to "$to" --amount "$amount"; then + log_success "Transaction sent successfully!" + else + log_error "Transaction failed!" + exit 1 + fi +} + +# Initialize genesis on client +init_genesis() { + log_info "Initializing genesis on client..." + if docker exec polytorus-client /app/target/release/polytorus start; then + log_success "Genesis initialized successfully!" + else + log_error "Genesis initialization failed!" + exit 1 + fi +} + +# Get blockchain status +blockchain_status() { + log_info "Getting blockchain status..." + if docker exec polytorus-client /app/target/release/polytorus status; then + log_success "Status retrieved successfully" + else + log_error "Could not retrieve blockchain status" + exit 1 + fi +} + +# Clean up everything +cleanup() { + log_info "Cleaning up testnet environment..." + docker-compose -f docker-compose.testnet.yml down -v + docker system prune -f + log_success "Cleanup completed" +} + +# Test full transaction flow +test_transactions() { + log_info "Testing full transaction flow..." + echo "================================" + + # Wait for network to be ready + log_info "Waiting for network to be ready..." + sleep 15 + + # Initialize genesis + init_genesis + + # Wait a bit more + sleep 5 + + # Get initial status + log_info "Initial blockchain status:" + blockchain_status + + # Send multiple transactions + log_info "Sending test transactions..." + send_transaction "alice" "bob" "1000" + sleep 2 + send_transaction "bob" "charlie" "500" + sleep 2 + send_transaction "charlie" "alice" "250" + + # Get final status + log_info "Final blockchain status:" + blockchain_status + + log_success "Transaction testing completed!" +} + +# Main script logic +case "${1:-help}" in + "build") + check_dependencies + build_image + ;; + "start") + check_dependencies + start_testnet + ;; + "stop") + stop_testnet + ;; + "restart") + restart_testnet + ;; + "status") + show_status + ;; + "logs") + show_logs "$2" + ;; + "exec") + shift + exec_command "$@" + ;; + "network-stats") + network_stats + ;; + "peers") + peer_info + ;; + "send-tx") + send_transaction "$2" "$3" "$4" + ;; + "init-genesis") + init_genesis + ;; + "blockchain-status") + blockchain_status + ;; + "test") + test_transactions + ;; + "cleanup") + cleanup + ;; + "help"|*) + echo "PolyTorus Testnet Manager" + echo "========================" + echo "" + echo "Usage: $0 [options]" + echo "" + echo "Commands:" + echo " build Build the Docker image" + echo " start Start the testnet" + echo " stop Stop the testnet" + echo " restart Restart the testnet" + echo " status Show testnet status" + echo " logs Show logs for a node (bootstrap, node1, node2, node3, client)" + echo " exec Execute command in a container" + echo " network-stats Get network statistics from all nodes" + echo " peers Get peer information from all nodes" + echo " send-tx [from] [to] [amount] Send a transaction (defaults: alice bob 1000)" + echo " init-genesis Initialize genesis on client" + echo " blockchain-status Get blockchain status" + echo " test Run full transaction test" + echo " cleanup Clean up all containers and data" + echo " help Show this help message" + echo "" + echo "Examples:" + echo " $0 build # Build the image" + echo " $0 start # Start testnet" + echo " $0 send-tx alice bob 500 # Send 500 from alice to bob" + echo " $0 logs bootstrap # Show bootstrap node logs" + echo " $0 test # Run complete transaction test" + ;; +esac \ No newline at end of file diff --git a/src/main.rs b/src/main.rs index 83aeb8b..04ff209 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,6 +3,7 @@ use clap::{Arg, Command}; use log::{error, info}; use std::collections::HashMap; use std::env; +use std::path::Path; use consensus::consensus_engine::{PolyTorusUtxoConsensusLayer, UtxoConsensusConfig}; use execution::execution_engine::{PolyTorusUtxoExecutionLayer, UtxoExecutionConfig}; @@ -12,6 +13,7 @@ use traits::{ UtxoExecutionLayer, UtxoId, UtxoTransaction, }; use wallet::{HdWallet, KeyPair, KeyType, Wallet}; +use serde::{Deserialize, Serialize}; pub struct PolyTorusBlockchain { execution_layer: PolyTorusUtxoExecutionLayer, @@ -19,14 +21,126 @@ pub struct PolyTorusBlockchain { p2p_network: WebRTCP2PNetwork, wallet: HdWallet, user_wallets: HashMap, + storage: Storage, +} + +/// Persistent storage for blockchain state +pub struct Storage { + db: sled::Db, +} + +const BLOCKCHAIN_STATE_KEY: &[u8] = b"blockchain_state"; +const CHAIN_HEIGHT_KEY: &[u8] = b"chain_height"; +const CURRENT_SLOT_KEY: &[u8] = b"current_slot"; +const BLOCK_PREFIX: &[u8] = b"block_"; + +/// Serializable blockchain state for persistence +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PersistentBlockchainState { + pub chain_height: u64, + pub current_slot: u64, + pub total_supply: u64, + pub utxo_set_hash: String, + pub canonical_chain: Vec, // Block hashes in order + pub last_block_hash: Option, // Hash of the latest block +} + +impl Storage { + pub fn new(data_dir: &str) -> Result { + let path = Path::new(data_dir); + std::fs::create_dir_all(path)?; + let db_path = path.join("blockchain.db"); + let db = sled::open(db_path)?; + Ok(Self { db }) + } + + pub fn save_blockchain_state(&self, state: &PersistentBlockchainState) -> Result<()> { + let serialized = bincode::serialize(state) + .map_err(|e| anyhow::anyhow!("Failed to serialize blockchain state: {}", e))?; + self.db.insert(BLOCKCHAIN_STATE_KEY, serialized)?; + self.db.flush()?; + info!("Blockchain state saved: height={}, slot={}", state.chain_height, state.current_slot); + Ok(()) + } + + pub fn load_blockchain_state(&self) -> Result> { + match self.db.get(BLOCKCHAIN_STATE_KEY)? { + Some(data) => { + let state = bincode::deserialize(&data) + .map_err(|e| anyhow::anyhow!("Failed to deserialize blockchain state: {}", e))?; + info!("Blockchain state loaded from storage"); + Ok(Some(state)) + } + None => { + info!("No existing blockchain state found"); + Ok(None) + } + } + } + + pub fn clear_state(&self) -> Result<()> { + self.db.clear()?; + self.db.flush()?; + info!("Blockchain state cleared"); + Ok(()) + } + + /// Save a block to persistent storage + pub fn save_block(&self, block_hash: &str, block: &traits::UtxoBlock) -> Result<()> { + let key = [BLOCK_PREFIX, block_hash.as_bytes()].concat(); + let serialized = bincode::serialize(block) + .map_err(|e| anyhow::anyhow!("Failed to serialize block: {}", e))?; + self.db.insert(key, serialized)?; + self.db.flush()?; + Ok(()) + } + + /// Load a block from persistent storage + pub fn load_block(&self, block_hash: &str) -> Result> { + let key = [BLOCK_PREFIX, block_hash.as_bytes()].concat(); + match self.db.get(key)? { + Some(data) => { + let block = bincode::deserialize(&data) + .map_err(|e| anyhow::anyhow!("Failed to deserialize block: {}", e))?; + Ok(Some(block)) + } + None => Ok(None), + } + } + + /// Load all blocks referenced in canonical chain + pub fn load_blocks_for_chain(&self, canonical_chain: &[String]) -> Result> { + let mut blocks = std::collections::HashMap::new(); + for block_hash in canonical_chain { + if let Some(block) = self.load_block(block_hash)? { + blocks.insert(block_hash.clone(), block); + } + } + Ok(blocks) + } } impl PolyTorusBlockchain { pub fn new() -> Result { - Self::new_with_p2p_config(None) + let data_dir = env::var("POLYTORUS_DATA_DIR").unwrap_or_else(|_| "./polytorus_data".to_string()); + Self::new_with_storage_and_p2p_config(&data_dir, None) + } + + pub fn new_with_storage(data_dir: &str) -> Result { + Self::new_with_storage_and_p2p_config(data_dir, None) } pub fn new_with_p2p_config(p2p_config: Option) -> Result { + // Use persistent storage in current directory + let data_dir = env::var("POLYTORUS_DATA_DIR").unwrap_or_else(|_| "./polytorus_data".to_string()); + Self::new_with_storage_and_p2p_config(&data_dir, p2p_config) + } + + pub fn new_with_storage_and_p2p_config(data_dir: &str, p2p_config: Option) -> Result { + // Initialize persistent storage first + let storage = Storage::new(data_dir)?; + info!("Initialized persistent storage at: {}", data_dir); + let execution_config = UtxoExecutionConfig::default(); // テスト用設定: PoW難易度を0に設定 @@ -42,10 +156,32 @@ impl PolyTorusBlockchain { ); let execution_layer = PolyTorusUtxoExecutionLayer::new(execution_config)?; - let consensus_layer = PolyTorusUtxoConsensusLayer::new_as_validator( - consensus_config, - "main_validator".to_string(), - )?; + + // Try to load existing blockchain state + let consensus_layer = if let Some(persistent_state) = storage.load_blockchain_state()? { + info!("Restoring consensus layer from persistent state: height={}, slot={}", + persistent_state.chain_height, persistent_state.current_slot); + + // Load all blocks for the canonical chain + let blocks = storage.load_blocks_for_chain(&persistent_state.canonical_chain)?; + info!("Loaded {} blocks from storage", blocks.len()); + + // Create consensus layer with restored state + PolyTorusUtxoConsensusLayer::new_with_restored_state_and_blocks( + consensus_config, + "main_validator".to_string(), + persistent_state.chain_height, + persistent_state.current_slot, + persistent_state.canonical_chain, + blocks, + )? + } else { + info!("No existing state found, creating new consensus layer"); + PolyTorusUtxoConsensusLayer::new_as_validator( + consensus_config, + "main_validator".to_string(), + )? + }; // Initialize P2P network with provided or default config let p2p_config = p2p_config.unwrap_or_else(|| Self::p2p_config_from_env()); @@ -64,6 +200,7 @@ impl PolyTorusBlockchain { p2p_network, wallet, user_wallets: HashMap::new(), + storage, }) } @@ -106,7 +243,43 @@ impl PolyTorusBlockchain { self.p2p_network.start().await } + /// Start P2P network with adaptive features + pub async fn start_adaptive_p2p_network(&self) -> Result<()> { + self.p2p_network.start_adaptive().await + } + + /// Get P2P network statistics + pub fn get_p2p_network_stats(&self) -> p2p_network::NetworkStats { + self.p2p_network.get_network_stats() + } + + /// Get adaptive network statistics + pub async fn get_adaptive_network_stats(&self) -> p2p_network::adaptive_network::AdaptiveNetworkStats { + self.p2p_network.get_adaptive_network_stats().await + } + + /// Get connected peers information + pub async fn get_connected_peers(&self) -> Vec { + self.p2p_network.get_connected_peers().await + } + + /// Get discovered peers through auto discovery + pub async fn get_discovered_peers(&self) -> Vec { + self.p2p_network.get_discovered_peers().await + } + pub async fn initialize_genesis(&mut self) -> Result { + // Check if blockchain state already exists + if let Some(persistent_state) = self.storage.load_blockchain_state()? { + info!("Found existing blockchain state - skipping genesis initialization"); + info!("Current height: {}, slot: {}", persistent_state.chain_height, persistent_state.current_slot); + // Return a dummy ID since we're not creating new genesis + return Ok(UtxoId { + tx_hash: "genesis_tx".to_string(), + output_index: 0, + }); + } + info!("Starting genesis UTXO initialization"); let genesis_utxo_id = UtxoId { @@ -126,6 +299,9 @@ impl PolyTorusBlockchain { self.execution_layer .initialize_genesis_utxo_set(vec![(genesis_utxo_id.clone(), genesis_utxo)])?; info!("Genesis UTXO created: {:?}", genesis_utxo_id); + + // Save initial genesis state + self.save_blockchain_state().await?; info!("Genesis initialization completed successfully"); Ok(genesis_utxo_id) } @@ -257,8 +433,20 @@ impl PolyTorusBlockchain { // Validate and add block let is_valid = self.consensus_layer.validate_utxo_block(&block).await?; if is_valid { + // Save block data to storage before adding to chain + if let Err(e) = self.storage.save_block(&block.hash, &block) { + error!("Failed to save block to storage: {}", e); + } else { + info!("Block saved to persistent storage: {}", block.hash); + } + self.consensus_layer.add_utxo_block(block).await?; info!("Block added to chain"); + + // Save state after successful block addition + if let Err(e) = self.save_blockchain_state().await { + error!("Failed to save blockchain state: {}", e); + } } else { error!("Block validation failed"); } @@ -272,24 +460,70 @@ impl PolyTorusBlockchain { } } - pub async fn get_status(&self) -> Result<()> { + pub async fn get_status(&mut self) -> Result<()> { + // Try to load state from storage first + if let Some(persistent_state) = self.storage.load_blockchain_state()? { + println!("PolyTorus Blockchain Status:"); + println!("============================"); + println!("Chain Height: {}", persistent_state.chain_height); + println!("Current Slot: {}", persistent_state.current_slot); + println!("Chain Length: {} blocks", persistent_state.chain_height + 1); + println!("UTXO Set Hash: {}", persistent_state.utxo_set_hash); + println!("Total Supply: {} units", persistent_state.total_supply); + } else { + // Fallback to in-memory state + let chain_height = self.consensus_layer.get_block_height().await?; + let current_slot = self.consensus_layer.get_current_slot().await?; + let canonical_chain = self.consensus_layer.get_canonical_chain().await?; + let utxo_set_hash = self.execution_layer.get_utxo_set_hash().await?; + let total_supply = self.execution_layer.get_total_supply().await?; + + println!("PolyTorus Blockchain Status:"); + println!("============================"); + println!("Chain Height: {}", chain_height); + println!("Current Slot: {}", current_slot); + println!("Chain Length: {} blocks", canonical_chain.len()); + println!("UTXO Set Hash: {}", utxo_set_hash); + println!("Total Supply: {} units", total_supply); + } + + Ok(()) + } + + /// Save current blockchain state to persistent storage + pub async fn save_blockchain_state(&self) -> Result<()> { let chain_height = self.consensus_layer.get_block_height().await?; let current_slot = self.consensus_layer.get_current_slot().await?; - let canonical_chain = self.consensus_layer.get_canonical_chain().await?; let utxo_set_hash = self.execution_layer.get_utxo_set_hash().await?; let total_supply = self.execution_layer.get_total_supply().await?; + let canonical_chain = self.consensus_layer.get_canonical_chain().await?; + + // Get the hash of the latest block (last in canonical chain) + let last_block_hash = if canonical_chain.len() > 1 { + // Skip genesis block and get the latest + canonical_chain.last().cloned() + } else { + None + }; - println!("PolyTorus Blockchain Status:"); - println!("============================"); - println!("Chain Height: {}", chain_height); - println!("Current Slot: {}", current_slot); - println!("Chain Length: {} blocks", canonical_chain.len()); - println!("UTXO Set Hash: {}", utxo_set_hash); - println!("Total Supply: {} units", total_supply); + let state = PersistentBlockchainState { + chain_height, + current_slot, + total_supply, + utxo_set_hash, + canonical_chain, + last_block_hash, + }; + self.storage.save_blockchain_state(&state)?; Ok(()) } + /// Load blockchain state from persistent storage + pub async fn load_blockchain_state(&self) -> Result> { + self.storage.load_blockchain_state() + } + pub async fn deploy_contract( &mut self, owner: &str, @@ -509,8 +743,16 @@ async fn async_main() -> Result<()> { .long("bootstrap-peers") .value_name("PEERS") .help("Comma-separated list of bootstrap peer addresses"), + ) + .arg( + Arg::new("adaptive") + .long("adaptive") + .action(clap::ArgAction::SetTrue) + .help("Enable adaptive P2P networking features"), ), ) + .subcommand(Command::new("network-status").about("Show P2P network status and statistics")) + .subcommand(Command::new("peers").about("Show connected and discovered peers")) .subcommand( Command::new("send") .about("Send a transaction") @@ -647,9 +889,17 @@ async fn async_main() -> Result<()> { println!("Listening on port: {}", listen_port); println!("Bootstrap peers: {:?}", bootstrap_peers); + // Check if adaptive mode is enabled + let adaptive_mode = sub_matches.get_flag("adaptive"); + // Start P2P network - info!("Starting P2P network..."); - blockchain.start_p2p_network().await?; + if adaptive_mode { + info!("Starting adaptive P2P network..."); + blockchain.start_adaptive_p2p_network().await?; + } else { + info!("Starting standard P2P network..."); + blockchain.start_p2p_network().await?; + } } Some(("send", sub_matches)) => { let from = sub_matches.get_one::("from").unwrap(); @@ -676,10 +926,64 @@ async fn async_main() -> Result<()> { } Some(("status", _)) => { println!("Docker: Executing status command..."); - let blockchain = PolyTorusBlockchain::new()?; + let mut blockchain = PolyTorusBlockchain::new()?; blockchain.get_status().await?; println!("Docker: Status command completed."); } + Some(("network-status", _)) => { + info!("Getting P2P network status..."); + let blockchain = PolyTorusBlockchain::new()?; + + // Get basic network statistics + let stats = blockchain.get_p2p_network_stats(); + + // Get adaptive network statistics + let adaptive_stats = blockchain.get_adaptive_network_stats().await; + + println!("P2P Network Status:"); + println!("=================="); + println!("Total Connections: {}", stats.total_connections); + println!("Active Connections: {}", stats.active_connections); + println!("Messages Sent: {}", stats.messages_sent); + println!("Messages Received: {}", stats.messages_received); + println!("Bytes Sent: {}", stats.bytes_sent); + println!("Bytes Received: {}", stats.bytes_received); + println!("Connection Errors: {}", stats.connection_errors); + println!(); + println!("Adaptive Network Statistics:"); + println!("Discovered Peers: {}", adaptive_stats.discovered_peers_count); + println!("DHT Nodes: {}", adaptive_stats.dht_nodes_count); + println!("Connected Peers: {}", adaptive_stats.connected_peers_count); + println!("Discovery Efficiency: {:.2}%", adaptive_stats.discovery_efficiency * 100.0); + } + Some(("peers", _)) => { + info!("Getting peer information..."); + let blockchain = PolyTorusBlockchain::new()?; + + // Get connected peers + let connected_peers = blockchain.get_connected_peers().await; + + // Get discovered peers + let discovered_peers = blockchain.get_discovered_peers().await; + + println!("Peer Information:"); + println!("================"); + println!("Connected Peers ({}):", connected_peers.len()); + for (i, peer) in connected_peers.iter().enumerate() { + println!(" {}. {}", i + 1, peer); + } + + println!(); + println!("Discovered Peers ({}):", discovered_peers.len()); + for (i, peer) in discovered_peers.iter().enumerate() { + let last_seen_mins = (std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() - peer.last_seen) / 60; + println!(" {}. {} ({}) - Last seen: {} min ago", + i + 1, peer.node_id, peer.address, last_seen_mins); + } + } Some(("deploy-contract", sub_matches)) => { let wasm_file = sub_matches.get_one::("wasm-file").unwrap(); let owner = sub_matches.get_one::("owner").unwrap(); @@ -754,6 +1058,8 @@ async fn async_main() -> Result<()> { println!(" start-p2p Start node with P2P networking"); println!(" send Send a transaction"); println!(" status Show blockchain status"); + println!(" network-status Show P2P network status and statistics"); + println!(" peers Show connected and discovered peers"); println!(" deploy-contract Deploy a smart contract"); println!(" call-contract Call a smart contract method"); println!(); From f09360766dcc18ceab4d3b8dc52c967c9e559502 Mon Sep 17 00:00:00 2001 From: quantumshiro Date: Sat, 2 Aug 2025 23:15:12 +0900 Subject: [PATCH 4/5] fix: format --- .../benches/network_discovery_benchmarks.rs | 146 +++++---- crates/p2p-network/benches/p2p_benchmarks.rs | 125 ++++---- .../p2p-network/benches/scaling_benchmarks.rs | 91 +++--- crates/p2p-network/benches/unit_benchmarks.rs | 38 +-- crates/p2p-network/src/adaptive_network.rs | 38 ++- crates/p2p-network/src/auto_discovery.rs | 75 +++-- crates/p2p-network/src/discovery.rs | 265 ++++++++------- crates/p2p-network/src/lib.rs | 38 +-- .../tests/non_blocking_adaptive_test.rs | 215 ++++++++----- .../tests/non_blocking_integration_test.rs | 141 ++++---- .../non_blocking_network_joining_test.rs | 185 +++++++---- .../tests/non_blocking_peer_test.rs | 301 +++++++++++------- .../p2p-network/tests/quick_discovery_test.rs | 116 ++++--- src/main.rs | 98 ++++-- 14 files changed, 1107 insertions(+), 765 deletions(-) diff --git a/crates/p2p-network/benches/network_discovery_benchmarks.rs b/crates/p2p-network/benches/network_discovery_benchmarks.rs index 2aa002f..ecd6f70 100644 --- a/crates/p2p-network/benches/network_discovery_benchmarks.rs +++ b/crates/p2p-network/benches/network_discovery_benchmarks.rs @@ -3,7 +3,7 @@ //! Benchmarks that measure how efficiently new nodes can discover //! and join existing P2P networks. -use criterion::{black_box, criterion_group, criterion_main, Criterion, BenchmarkId}; +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; use std::time::{Duration, Instant}; use tokio::runtime::Runtime; @@ -60,49 +60,49 @@ fn create_test_transaction(id: u64) -> UtxoTransaction { /// Benchmark network bootstrap time fn benchmark_network_bootstrap(c: &mut Criterion) { init_logging(); - + let mut group = c.benchmark_group("network_bootstrap"); group.sample_size(20); group.warm_up_time(Duration::from_millis(500)); group.measurement_time(Duration::from_secs(3)); - + group.bench_function("single_node_bootstrap", |b| { b.iter(|| { let rt = Runtime::new().unwrap(); rt.block_on(async { let config = create_discovery_config("bootstrap", 11000, vec![]); let network = WebRTCP2PNetwork::new(config).unwrap(); - + let start = Instant::now(); let net_clone = network.clone(); tokio::spawn(async move { let _ = net_clone.start().await; }); - + // Wait for network to be ready tokio::time::sleep(Duration::from_millis(50)).await; let bootstrap_time = start.elapsed(); - + network.shutdown().await.unwrap(); black_box(bootstrap_time); }); }); }); - + group.finish(); } /// Benchmark peer discovery efficiency fn benchmark_peer_discovery(c: &mut Criterion) { init_logging(); - + let rt = Runtime::new().unwrap(); - + let mut group = c.benchmark_group("peer_discovery"); group.sample_size(15); group.warm_up_time(Duration::from_millis(1000)); group.measurement_time(Duration::from_secs(5)); - + // Test discovery with different network sizes for network_size in [2, 5, 10].iter() { group.bench_with_input( @@ -113,7 +113,7 @@ fn benchmark_peer_discovery(c: &mut Criterion) { rt.block_on(async { // Create initial network let mut networks = Vec::new(); - + // Bootstrap node let config = create_discovery_config("peer_0", 11100, vec![]); let network = WebRTCP2PNetwork::new(config).unwrap(); @@ -123,7 +123,7 @@ fn benchmark_peer_discovery(c: &mut Criterion) { }); networks.push(network); tokio::time::sleep(Duration::from_millis(100)).await; - + // Add peers for i in 1..network_size { let config = create_discovery_config( @@ -139,7 +139,7 @@ fn benchmark_peer_discovery(c: &mut Criterion) { networks.push(network); tokio::time::sleep(Duration::from_millis(50)).await; } - + // Measure discovery time for new node let new_config = create_discovery_config( "discoverer", @@ -147,45 +147,45 @@ fn benchmark_peer_discovery(c: &mut Criterion) { vec!["127.0.0.1:11100".to_string()], ); let new_network = WebRTCP2PNetwork::new(new_config).unwrap(); - + let discovery_start = Instant::now(); let net_clone = new_network.clone(); tokio::spawn(async move { let _ = net_clone.start().await; }); - + tokio::time::sleep(Duration::from_millis(200)).await; - + let peers = new_network.get_connected_peers().await; let discovery_time = discovery_start.elapsed(); - + // Cleanup new_network.shutdown().await.unwrap(); for net in networks { net.shutdown().await.unwrap(); } - + black_box((peers.len(), discovery_time)); }); }); }, ); } - + group.finish(); } /// Benchmark network join latency fn benchmark_network_join_latency(c: &mut Criterion) { init_logging(); - + let rt = Runtime::new().unwrap(); - + let mut group = c.benchmark_group("network_join_latency"); group.sample_size(20); group.warm_up_time(Duration::from_millis(500)); group.measurement_time(Duration::from_secs(3)); - + group.bench_function("join_existing_network", |b| { // Setup persistent network let bootstrap = rt.block_on(async { @@ -198,9 +198,9 @@ fn benchmark_network_join_latency(c: &mut Criterion) { tokio::time::sleep(Duration::from_millis(200)).await; network }); - + let mut node_counter = 0; - + b.iter(|| { rt.block_on(async { node_counter += 1; @@ -209,60 +209,64 @@ fn benchmark_network_join_latency(c: &mut Criterion) { 11300 + node_counter as u16, vec!["127.0.0.1:11200".to_string()], ); - + let network = WebRTCP2PNetwork::new(config).unwrap(); - + let join_start = Instant::now(); let net_clone = network.clone(); tokio::spawn(async move { let _ = net_clone.start().await; }); - + // Wait for network to stabilize tokio::time::sleep(Duration::from_millis(100)).await; - + // Verify can broadcast let tx = create_test_transaction(node_counter); let broadcast_result = network.broadcast_transaction(&tx).await; let join_time = join_start.elapsed(); - + network.shutdown().await.unwrap(); - + black_box((broadcast_result.is_ok(), join_time)); }); }); - + // Cleanup rt.block_on(async { bootstrap.shutdown().await.unwrap(); }); }); - + group.finish(); } /// Benchmark network propagation speed fn benchmark_network_propagation(c: &mut Criterion) { init_logging(); - + let rt = Runtime::new().unwrap(); - + let mut group = c.benchmark_group("network_propagation"); group.sample_size(10); group.warm_up_time(Duration::from_millis(1000)); group.measurement_time(Duration::from_secs(5)); - + group.bench_function("transaction_propagation", |b| { // Setup network let networks = rt.block_on(async { let mut nets = Vec::new(); - + // Create 5-node network for i in 0..5 { let config = create_discovery_config( &format!("prop_node_{}", i), 11400 + i, - if i == 0 { vec![] } else { vec!["127.0.0.1:11400".to_string()] }, + if i == 0 { + vec![] + } else { + vec!["127.0.0.1:11400".to_string()] + }, ); let network = WebRTCP2PNetwork::new(config).unwrap(); let net_clone = network.clone(); @@ -272,33 +276,37 @@ fn benchmark_network_propagation(c: &mut Criterion) { nets.push(network); tokio::time::sleep(Duration::from_millis(100)).await; } - + nets }); - + let mut tx_counter = 0; - + b.iter(|| { rt.block_on(async { tx_counter += 1; let tx = create_test_transaction(tx_counter); - + let propagation_start = Instant::now(); - + // Broadcast from first node let _ = networks[0].broadcast_transaction(&tx).await; - + // Measure propagation through network let mut propagation_times = Vec::new(); for (i, network) in networks.iter().enumerate().skip(1) { let stats = network.get_network_stats(); - propagation_times.push((i, propagation_start.elapsed(), stats.messages_received)); + propagation_times.push(( + i, + propagation_start.elapsed(), + stats.messages_received, + )); } - + black_box(propagation_times); }); }); - + // Cleanup rt.block_on(async { for net in networks { @@ -306,32 +314,36 @@ fn benchmark_network_propagation(c: &mut Criterion) { } }); }); - + group.finish(); } /// Benchmark discovery under load fn benchmark_discovery_under_load(c: &mut Criterion) { init_logging(); - + let rt = Runtime::new().unwrap(); - + let mut group = c.benchmark_group("discovery_under_load"); group.sample_size(10); group.warm_up_time(Duration::from_millis(1000)); group.measurement_time(Duration::from_secs(5)); - + group.bench_function("join_busy_network", |b| { // Setup busy network let (networks, _load_task) = rt.block_on(async { let mut nets = Vec::new(); - + // Create initial network for i in 0..3 { let config = create_discovery_config( &format!("busy_node_{}", i), 11500 + i, - if i == 0 { vec![] } else { vec!["127.0.0.1:11500".to_string()] }, + if i == 0 { + vec![] + } else { + vec!["127.0.0.1:11500".to_string()] + }, ); let network = WebRTCP2PNetwork::new(config).unwrap(); let net_clone = network.clone(); @@ -341,7 +353,7 @@ fn benchmark_discovery_under_load(c: &mut Criterion) { nets.push(network); tokio::time::sleep(Duration::from_millis(100)).await; } - + // Generate load let nets_clone = nets.clone(); let load_task = tokio::spawn(async move { @@ -349,16 +361,18 @@ fn benchmark_discovery_under_load(c: &mut Criterion) { loop { counter += 1; let tx = create_test_transaction(10000 + counter); - let _ = nets_clone[(counter % 3) as usize].broadcast_transaction(&tx).await; + let _ = nets_clone[(counter % 3) as usize] + .broadcast_transaction(&tx) + .await; tokio::time::sleep(Duration::from_millis(10)).await; } }); - + (nets, load_task) }); - + let mut joiner_counter = 0; - + b.iter(|| { rt.block_on(async { joiner_counter += 1; @@ -367,27 +381,27 @@ fn benchmark_discovery_under_load(c: &mut Criterion) { 11600 + joiner_counter as u16, vec!["127.0.0.1:11500".to_string()], ); - + let network = WebRTCP2PNetwork::new(config).unwrap(); - + let join_start = Instant::now(); let net_clone = network.clone(); tokio::spawn(async move { let _ = net_clone.start().await; }); - + tokio::time::sleep(Duration::from_millis(200)).await; - + let peers = network.get_connected_peers().await; let stats = network.get_network_stats(); let join_time = join_start.elapsed(); - + network.shutdown().await.unwrap(); - + black_box((peers.len(), stats.messages_received, join_time)); }); }); - + // Cleanup rt.block_on(async { for net in networks { @@ -395,7 +409,7 @@ fn benchmark_discovery_under_load(c: &mut Criterion) { } }); }); - + group.finish(); } @@ -408,4 +422,4 @@ criterion_group!( benchmark_discovery_under_load ); -criterion_main!(benches); \ No newline at end of file +criterion_main!(benches); diff --git a/crates/p2p-network/benches/p2p_benchmarks.rs b/crates/p2p-network/benches/p2p_benchmarks.rs index 1dcf9ec..2a2c5f5 100644 --- a/crates/p2p-network/benches/p2p_benchmarks.rs +++ b/crates/p2p-network/benches/p2p_benchmarks.rs @@ -40,24 +40,20 @@ fn create_realistic_config(node_id: &str, port: u16, bootstrap_port: Option fn create_realistic_transaction(id: u64, value: u64) -> UtxoTransaction { UtxoTransaction { hash: format!("realistic_tx_{:08x}", id), - inputs: vec![ - TxInput { - utxo_id: UtxoId { - tx_hash: format!("input_{:08x}", id / 2), - output_index: (id % 3) as u32, - }, - redeemer: vec![0u8; 64], // Realistic size - signature: vec![0u8; 64], // ECDSA signature size - } - ], - outputs: vec![ - TxOutput { - value, - script: vec![0u8; 25], // Typical P2PKH script size - datum: Some(vec![0u8; 32]), // 32-byte datum - datum_hash: Some(format!("datum_{:08x}", id)), - } - ], + inputs: vec![TxInput { + utxo_id: UtxoId { + tx_hash: format!("input_{:08x}", id / 2), + output_index: (id % 3) as u32, + }, + redeemer: vec![0u8; 64], // Realistic size + signature: vec![0u8; 64], // ECDSA signature size + }], + outputs: vec![TxOutput { + value, + script: vec![0u8; 25], // Typical P2PKH script size + datum: Some(vec![0u8; 32]), // 32-byte datum + datum_hash: Some(format!("datum_{:08x}", id)), + }], fee: value / 100, // 1% fee validity_range: Some((id * 1000, (id + 100) * 1000)), script_witness: vec![vec![0u8; 128]], // Witness data @@ -68,12 +64,12 @@ fn create_realistic_transaction(id: u64, value: u64) -> UtxoTransaction { /// Benchmark network initialization performance fn benchmark_network_initialization(c: &mut Criterion) { init_logging(); - + let mut group = c.benchmark_group("network_initialization"); group.sample_size(50); group.warm_up_time(Duration::from_millis(200)); group.measurement_time(Duration::from_secs(2)); - + group.bench_function("create_and_configure", |b| { b.iter(|| { let config = create_realistic_config("bench_node", 9000, None); @@ -81,23 +77,23 @@ fn benchmark_network_initialization(c: &mut Criterion) { black_box(network); }); }); - + group.finish(); } /// Benchmark transaction processing throughput fn benchmark_transaction_throughput(c: &mut Criterion) { init_logging(); - + let rt = Runtime::new().unwrap(); let config = create_realistic_config("throughput_node", 9001, None); let network = WebRTCP2PNetwork::new(config).unwrap(); - + let mut group = c.benchmark_group("transaction_throughput"); group.sample_size(30); group.warm_up_time(Duration::from_millis(300)); group.measurement_time(Duration::from_secs(3)); - + // Single transaction processing group.bench_function("single_transaction", |b| { b.iter(|| { @@ -108,7 +104,7 @@ fn benchmark_transaction_throughput(c: &mut Criterion) { }); }); }); - + // Batch transaction processing for batch_size in [10, 50, 100].iter() { group.throughput(Throughput::Elements(*batch_size as u64)); @@ -119,9 +115,11 @@ fn benchmark_transaction_throughput(c: &mut Criterion) { b.iter(|| { rt.block_on(async { let transactions: Vec<_> = (0..batch_size) - .map(|i| create_realistic_transaction(i as u64, 100000 + i as u64 * 1000)) + .map(|i| { + create_realistic_transaction(i as u64, 100000 + i as u64 * 1000) + }) .collect(); - + let mut results = Vec::new(); for tx in &transactions { let result = network.broadcast_transaction(tx).await; @@ -133,30 +131,30 @@ fn benchmark_transaction_throughput(c: &mut Criterion) { }, ); } - + group.finish(); } /// Benchmark network statistics collection fn benchmark_network_statistics(c: &mut Criterion) { init_logging(); - + let rt = Runtime::new().unwrap(); let config = create_realistic_config("stats_node", 9002, None); let network = WebRTCP2PNetwork::new(config).unwrap(); - + let mut group = c.benchmark_group("network_statistics"); group.sample_size(100); group.warm_up_time(Duration::from_millis(100)); group.measurement_time(Duration::from_millis(800)); - + group.bench_function("get_network_stats", |b| { b.iter(|| { let stats = network.get_network_stats(); black_box(stats); }); }); - + group.bench_function("get_connected_peers", |b| { b.iter(|| { rt.block_on(async { @@ -165,31 +163,33 @@ fn benchmark_network_statistics(c: &mut Criterion) { }); }); }); - + group.bench_function("request_blockchain_data", |b| { b.iter(|| { rt.block_on(async { - let result = network.request_blockchain_data( - "transaction".to_string(), - format!("hash_{}", rand::random::()) - ).await; + let result = network + .request_blockchain_data( + "transaction".to_string(), + format!("hash_{}", rand::random::()), + ) + .await; let _ = black_box(result); }); }); }); - + group.finish(); } /// Benchmark transaction serialization/deserialization fn benchmark_transaction_serialization(c: &mut Criterion) { init_logging(); - + let mut group = c.benchmark_group("transaction_serialization"); group.sample_size(100); group.warm_up_time(Duration::from_millis(200)); group.measurement_time(Duration::from_secs(1)); - + // Create realistic transactions of different sizes let small_tx = create_realistic_transaction(1, 1000); let medium_tx = UtxoTransaction { @@ -203,110 +203,109 @@ fn benchmark_transaction_serialization(c: &mut Criterion) { script_witness: vec![vec![0u8; 256]; 10], ..small_tx.clone() }; - + group.bench_function("serialize_small_tx", |b| { b.iter(|| { let serialized = bincode::serialize(&small_tx).unwrap(); black_box(serialized); }); }); - + group.bench_function("serialize_medium_tx", |b| { b.iter(|| { let serialized = bincode::serialize(&medium_tx).unwrap(); black_box(serialized); }); }); - + group.bench_function("serialize_large_tx", |b| { b.iter(|| { let serialized = bincode::serialize(&large_tx).unwrap(); black_box(serialized); }); }); - + // Deserialization benchmarks let small_serialized = bincode::serialize(&small_tx).unwrap(); let medium_serialized = bincode::serialize(&medium_tx).unwrap(); let large_serialized = bincode::serialize(&large_tx).unwrap(); - + group.bench_function("deserialize_small_tx", |b| { b.iter(|| { let tx: UtxoTransaction = bincode::deserialize(&small_serialized).unwrap(); black_box(tx); }); }); - + group.bench_function("deserialize_medium_tx", |b| { b.iter(|| { let tx: UtxoTransaction = bincode::deserialize(&medium_serialized).unwrap(); black_box(tx); }); }); - + group.bench_function("deserialize_large_tx", |b| { b.iter(|| { let tx: UtxoTransaction = bincode::deserialize(&large_serialized).unwrap(); black_box(tx); }); }); - + group.finish(); } /// Benchmark concurrent operations fn benchmark_concurrent_operations(c: &mut Criterion) { init_logging(); - + let rt = Runtime::new().unwrap(); - + let mut group = c.benchmark_group("concurrent_operations"); group.sample_size(20); group.warm_up_time(Duration::from_millis(500)); group.measurement_time(Duration::from_secs(2)); - + group.bench_function("concurrent_network_creation", |b| { b.iter(|| { rt.block_on(async { let configs: Vec<_> = (0..5) .map(|i| create_realistic_config(&format!("node_{}", i), 9100 + i, None)) .collect(); - - let networks: Vec<_> = configs.into_iter() + + let networks: Vec<_> = configs + .into_iter() .map(WebRTCP2PNetwork::new) .collect::, _>>() .unwrap(); - + black_box(networks); }); }); }); - + group.bench_function("concurrent_broadcasts", |b| { b.iter(|| { rt.block_on(async { let config = create_realistic_config("concurrent_node", 9200, None); let network = WebRTCP2PNetwork::new(config).unwrap(); - + let transactions: Vec<_> = (0..10) .map(|i| create_realistic_transaction(i, 10000 + i * 1000)) .collect(); - + let mut handles = Vec::new(); for tx in transactions { let net = network.clone(); - let handle = tokio::spawn(async move { - net.broadcast_transaction(&tx).await - }); + let handle = tokio::spawn(async move { net.broadcast_transaction(&tx).await }); handles.push(handle); } - + let results = futures::future::join_all(handles).await; let _ = black_box(results); }); }); }); - + group.finish(); } @@ -319,4 +318,4 @@ criterion_group!( benchmark_concurrent_operations ); -criterion_main!(benches); \ No newline at end of file +criterion_main!(benches); diff --git a/crates/p2p-network/benches/scaling_benchmarks.rs b/crates/p2p-network/benches/scaling_benchmarks.rs index 80e0ad8..f486e2e 100644 --- a/crates/p2p-network/benches/scaling_benchmarks.rs +++ b/crates/p2p-network/benches/scaling_benchmarks.rs @@ -6,7 +6,10 @@ //! - Increasing message sizes //! - Network partitioning and recovery -use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, PlotConfiguration, Throughput}; +use criterion::{ + black_box, criterion_group, criterion_main, BenchmarkId, Criterion, PlotConfiguration, + Throughput, +}; use std::time::Duration; use tokio::runtime::Runtime; @@ -71,12 +74,12 @@ fn create_sized_transaction(id: u64, num_inputs: usize, num_outputs: usize) -> U /// Benchmark scaling with increasing number of network instances fn benchmark_peer_scaling(c: &mut Criterion) { init_logging(); - + let mut group = c.benchmark_group("peer_scaling"); group.sample_size(10); group.warm_up_time(Duration::from_millis(500)); group.measurement_time(Duration::from_secs(5)); - + // Test with different numbers of peers for num_peers in [2, 5, 10, 20].iter() { group.throughput(Throughput::Elements(*num_peers as u64)); @@ -98,37 +101,38 @@ fn benchmark_peer_scaling(c: &mut Criterion) { let network = WebRTCP2PNetwork::new(config).unwrap(); networks.push(network); } - + // Simulate network activity let tx = create_sized_transaction(1, 2, 3); for network in &networks { let _ = network.broadcast_transaction(&tx).await; } - + black_box(networks); }); }); }, ); } - + group.finish(); } /// Benchmark transaction throughput scaling fn benchmark_transaction_throughput_scaling(c: &mut Criterion) { init_logging(); - + let rt = Runtime::new().unwrap(); let config = create_scaling_config("throughput_node", 9600, 100); let network = WebRTCP2PNetwork::new(config).unwrap(); - + let mut group = c.benchmark_group("transaction_throughput_scaling"); group.sample_size(15); group.warm_up_time(Duration::from_millis(300)); group.measurement_time(Duration::from_secs(3)); - group.plot_config(PlotConfiguration::default().summary_scale(criterion::AxisScale::Logarithmic)); - + group + .plot_config(PlotConfiguration::default().summary_scale(criterion::AxisScale::Logarithmic)); + // Test with increasing batch sizes for batch_size in [1, 10, 50, 100, 500, 1000].iter() { group.throughput(Throughput::Elements(*batch_size as u64)); @@ -141,39 +145,39 @@ fn benchmark_transaction_throughput_scaling(c: &mut Criterion) { let transactions: Vec<_> = (0..batch_size) .map(|i| create_sized_transaction(i as u64, 1, 2)) .collect(); - + let start = std::time::Instant::now(); for tx in &transactions { let _ = network.broadcast_transaction(tx).await; } let elapsed = start.elapsed(); - + black_box((transactions, elapsed)); }); }); }, ); } - + group.finish(); } /// Benchmark message size scaling fn benchmark_message_size_scaling(c: &mut Criterion) { init_logging(); - + let rt = Runtime::new().unwrap(); let config = create_scaling_config("size_node", 9700, 50); let network = WebRTCP2PNetwork::new(config).unwrap(); - + let mut group = c.benchmark_group("message_size_scaling"); group.sample_size(20); group.warm_up_time(Duration::from_millis(200)); group.measurement_time(Duration::from_secs(2)); - + // Test with different transaction sizes (inputs/outputs) let sizes = vec![(1, 1), (5, 5), (10, 10), (25, 25), (50, 50)]; - + for (num_inputs, num_outputs) in sizes { let param = format!("{}in_{}out", num_inputs, num_outputs); group.throughput(Throughput::Elements((num_inputs + num_outputs) as u64)); @@ -186,7 +190,7 @@ fn benchmark_message_size_scaling(c: &mut Criterion) { let tx = create_sized_transaction(1, num_inputs, num_outputs); let serialized = bincode::serialize(&tx).unwrap(); let size = serialized.len(); - + let result = network.broadcast_transaction(&tx).await; let _ = black_box((result, size)); }); @@ -194,21 +198,21 @@ fn benchmark_message_size_scaling(c: &mut Criterion) { }, ); } - + group.finish(); } /// Benchmark concurrent operations scaling fn benchmark_concurrent_operations_scaling(c: &mut Criterion) { init_logging(); - + let rt = Runtime::new().unwrap(); - + let mut group = c.benchmark_group("concurrent_operations_scaling"); group.sample_size(10); group.warm_up_time(Duration::from_millis(500)); group.measurement_time(Duration::from_secs(3)); - + // Test with different levels of concurrency for concurrency in [1, 5, 10, 25, 50].iter() { group.throughput(Throughput::Elements(*concurrency as u64)); @@ -220,22 +224,21 @@ fn benchmark_concurrent_operations_scaling(c: &mut Criterion) { rt.block_on(async { let config = create_scaling_config("concurrent_node", 9800, 100); let network = WebRTCP2PNetwork::new(config).unwrap(); - + // Create transactions let transactions: Vec<_> = (0..concurrency) .map(|i| create_sized_transaction(i as u64, 2, 3)) .collect(); - + // Spawn concurrent broadcasts let mut handles = Vec::new(); for tx in transactions { let net = network.clone(); - let handle = tokio::spawn(async move { - net.broadcast_transaction(&tx).await - }); + let handle = + tokio::spawn(async move { net.broadcast_transaction(&tx).await }); handles.push(handle); } - + // Wait for all to complete let results = futures::future::join_all(handles).await; black_box(results); @@ -244,21 +247,21 @@ fn benchmark_concurrent_operations_scaling(c: &mut Criterion) { }, ); } - + group.finish(); } /// Benchmark network partitioning and recovery fn benchmark_network_resilience(c: &mut Criterion) { init_logging(); - + let rt = Runtime::new().unwrap(); - + let mut group = c.benchmark_group("network_resilience"); group.sample_size(10); group.warm_up_time(Duration::from_millis(1000)); group.measurement_time(Duration::from_secs(5)); - + group.bench_function("partition_recovery", |b| { b.iter(|| { rt.block_on(async { @@ -277,37 +280,37 @@ fn benchmark_network_resilience(c: &mut Criterion) { } partitions.push(networks); } - + // Simulate partition healing by broadcasting across partitions let tx = create_sized_transaction(1, 5, 5); let mut broadcast_count = 0; - + for partition in &partitions { for network in partition { let _ = network.broadcast_transaction(&tx).await; broadcast_count += 1; } } - + black_box((partitions, broadcast_count)); }); }); }); - + group.finish(); } /// Benchmark memory usage scaling fn benchmark_memory_scaling(c: &mut Criterion) { init_logging(); - + let rt = Runtime::new().unwrap(); - + let mut group = c.benchmark_group("memory_scaling"); group.sample_size(10); group.warm_up_time(Duration::from_millis(300)); group.measurement_time(Duration::from_secs(2)); - + // Test memory usage with increasing number of stored transactions for num_transactions in [100, 500, 1000, 5000].iter() { group.bench_with_input( @@ -318,7 +321,7 @@ fn benchmark_memory_scaling(c: &mut Criterion) { rt.block_on(async { let config = create_scaling_config("memory_node", 10000, 50); let network = WebRTCP2PNetwork::new(config).unwrap(); - + // Create and broadcast many transactions let mut total_size = 0; for i in 0..num_transactions { @@ -327,17 +330,17 @@ fn benchmark_memory_scaling(c: &mut Criterion) { total_size += serialized.len(); let _ = network.broadcast_transaction(&tx).await; } - + // Get network statistics let stats = network.get_network_stats(); - + black_box((stats, total_size)); }); }); }, ); } - + group.finish(); } @@ -351,4 +354,4 @@ criterion_group!( benchmark_memory_scaling ); -criterion_main!(benches); \ No newline at end of file +criterion_main!(benches); diff --git a/crates/p2p-network/benches/unit_benchmarks.rs b/crates/p2p-network/benches/unit_benchmarks.rs index 43feb3d..f1b2f91 100644 --- a/crates/p2p-network/benches/unit_benchmarks.rs +++ b/crates/p2p-network/benches/unit_benchmarks.rs @@ -59,12 +59,12 @@ fn create_transaction(id: u64) -> UtxoTransaction { /// Benchmark core P2P operations fn benchmark_core_operations(c: &mut Criterion) { init_logging(); - + let mut group = c.benchmark_group("core_operations"); group.sample_size(20); group.warm_up_time(Duration::from_millis(200)); group.measurement_time(Duration::from_secs(1)); - + // Network creation group.bench_function("network_creation", |b| { b.iter(|| { @@ -73,7 +73,7 @@ fn benchmark_core_operations(c: &mut Criterion) { black_box(network); }); }); - + // Transaction creation group.bench_function("transaction_creation", |b| { b.iter(|| { @@ -81,7 +81,7 @@ fn benchmark_core_operations(c: &mut Criterion) { black_box(tx); }); }); - + // Serialization let tx = create_transaction(12345); group.bench_function("transaction_serialize", |b| { @@ -90,7 +90,7 @@ fn benchmark_core_operations(c: &mut Criterion) { black_box(serialized); }); }); - + // Deserialization let serialized = bincode::serialize(&tx).unwrap(); group.bench_function("transaction_deserialize", |b| { @@ -99,22 +99,22 @@ fn benchmark_core_operations(c: &mut Criterion) { black_box(deserialized); }); }); - + group.finish(); } /// Benchmark batch processing fn benchmark_batch_processing(c: &mut Criterion) { init_logging(); - + let mut group = c.benchmark_group("batch_processing"); group.sample_size(15); group.warm_up_time(Duration::from_millis(300)); group.measurement_time(Duration::from_secs(1)); - + for batch_size in [10, 50].iter() { group.throughput(Throughput::Elements(*batch_size as u64)); - + // Batch transaction creation group.bench_with_input( format!("create_batch_{}", batch_size), @@ -128,7 +128,7 @@ fn benchmark_batch_processing(c: &mut Criterion) { }); }, ); - + // Batch serialization group.bench_with_input( format!("serialize_batch_{}", batch_size), @@ -137,7 +137,7 @@ fn benchmark_batch_processing(c: &mut Criterion) { let transactions: Vec<_> = (0..batch_size) .map(|i| create_transaction(i as u64)) .collect(); - + b.iter(|| { let serialized: Vec<_> = transactions .iter() @@ -148,29 +148,29 @@ fn benchmark_batch_processing(c: &mut Criterion) { }, ); } - + group.finish(); } /// Benchmark network statistics fn benchmark_network_stats(c: &mut Criterion) { init_logging(); - + let config = create_config("stats_node", 8105); let network = WebRTCP2PNetwork::new(config).unwrap(); - + let mut group = c.benchmark_group("network_stats"); group.sample_size(30); group.warm_up_time(Duration::from_millis(100)); group.measurement_time(Duration::from_millis(500)); - + group.bench_function("get_stats", |b| { b.iter(|| { let stats = network.get_network_stats(); black_box(stats); }); }); - + group.bench_function("get_peers", |b| { let rt = tokio::runtime::Runtime::new().unwrap(); b.iter(|| { @@ -180,15 +180,15 @@ fn benchmark_network_stats(c: &mut Criterion) { }); }); }); - + group.finish(); } criterion_group!( benches, benchmark_core_operations, - benchmark_batch_processing, + benchmark_batch_processing, benchmark_network_stats ); -criterion_main!(benches); \ No newline at end of file +criterion_main!(benches); diff --git a/crates/p2p-network/src/adaptive_network.rs b/crates/p2p-network/src/adaptive_network.rs index b2bf8b8..f2fe78b 100644 --- a/crates/p2p-network/src/adaptive_network.rs +++ b/crates/p2p-network/src/adaptive_network.rs @@ -39,7 +39,10 @@ impl WebRTCP2PNetwork { discovery.get_discovered_peers().await }; - info!("Discovered {} peers through auto discovery", discovered_peers.len()); + info!( + "Discovered {} peers through auto discovery", + discovered_peers.len() + ); // Add discovered peers to DHT for peer in &discovered_peers { @@ -51,17 +54,17 @@ impl WebRTCP2PNetwork { match peer_addr.parse() { Ok(addr) => { let peer_id = format!("bootstrap_{}", uuid::Uuid::new_v4()); - match self.connect_to_peer(peer_id.clone(), peer_addr.clone()).await { + match self + .connect_to_peer(peer_id.clone(), peer_addr.clone()) + .await + { Ok(_) => { info!("Connected to bootstrap peer: {}", peer_addr); // Add to auto discovery let discovery = self.auto_discovery.read().await; discovery.add_peer(peer_id, addr).await; } - Err(e) => warn!( - "Failed to connect to bootstrap peer {}: {}", - peer_addr, e - ), + Err(e) => warn!("Failed to connect to bootstrap peer {}: {}", peer_addr, e), } } Err(_) => warn!("Invalid bootstrap peer address: {}", peer_addr), @@ -130,7 +133,7 @@ impl WebRTCP2PNetwork { drop(peers_read); // Release the lock info!("Attempting to connect to discovered peer: {}", peer.node_id); - + // This would need to be implemented as a method that doesn't require &self // For now, just add to DHT dht.add_node(peer.node_id.clone(), peer.address).await; @@ -146,7 +149,10 @@ impl WebRTCP2PNetwork { // Adaptive connection strategy: connect to more peers if network is small if current_peers < 3 { - debug!("Network is small ({}), actively seeking more peers", current_peers); + debug!( + "Network is small ({}), actively seeking more peers", + current_peers + ); } } }); @@ -176,16 +182,22 @@ impl WebRTCP2PNetwork { } /// Adaptive broadcast with better peer targeting - pub async fn adaptive_broadcast_transaction(&self, transaction: &traits::UtxoTransaction) -> Result<()> { + pub async fn adaptive_broadcast_transaction( + &self, + transaction: &traits::UtxoTransaction, + ) -> Result<()> { // Use normal broadcast first self.broadcast_transaction(transaction).await?; // Also try to broadcast to discovered peers that we might not be connected to let discovered_peers = self.get_discovered_peers().await; - + if discovered_peers.len() > 0 { - debug!("Adaptive broadcast: also considering {} discovered peers", discovered_peers.len()); - + debug!( + "Adaptive broadcast: also considering {} discovered peers", + discovered_peers.len() + ); + // In a full implementation, we could establish temporary connections // or use other means to reach these peers } @@ -222,4 +234,4 @@ pub struct AdaptiveNetworkStats { pub dht_nodes_count: usize, pub connected_peers_count: usize, pub discovery_efficiency: f32, // Ratio of connected to discovered peers -} \ No newline at end of file +} diff --git a/crates/p2p-network/src/auto_discovery.rs b/crates/p2p-network/src/auto_discovery.rs index 0f64437..d07c51f 100644 --- a/crates/p2p-network/src/auto_discovery.rs +++ b/crates/p2p-network/src/auto_discovery.rs @@ -31,9 +31,7 @@ pub enum SimpleDiscoveryMessage { /// Request peer list PeerRequest { node_id: String }, /// Respond with known peers - PeerResponse { - peers: Vec, - }, + PeerResponse { peers: Vec }, } /// Simple peer information @@ -64,26 +62,26 @@ impl AutoDiscovery { discovery_ports: vec![9000, 9001, 9002, 9010, 9020, 9100], // Common discovery ports } } - + /// Start discovery service pub async fn start(&mut self) -> Result<()> { // Try to bind discovery socket let discovery_addr = format!("0.0.0.0:{}", self.listen_port + 1000); // Offset for discovery - + match UdpSocket::bind(&discovery_addr).await { Ok(socket) => { info!("Auto discovery started on {}", discovery_addr); self.socket = Some(Arc::new(socket)); - + // Start announcement task self.start_announcement_task().await; - + // Start peer discovery task self.start_peer_discovery_task().await; - + // Start listening task self.start_listening_task().await; - + Ok(()) } Err(e) => { @@ -92,7 +90,7 @@ impl AutoDiscovery { } } } - + /// Start announcement task async fn start_announcement_task(&self) { if let Some(socket) = &self.socket { @@ -100,19 +98,19 @@ impl AutoDiscovery { let node_id = self.node_id.clone(); let listen_port = self.listen_port; let discovery_ports = self.discovery_ports.clone(); - + tokio::spawn(async move { let mut interval = interval(Duration::from_secs(15)); // Announce every 15 seconds - + loop { interval.tick().await; - + let announce = SimpleDiscoveryMessage::Announce { node_id: node_id.clone(), address: format!("127.0.0.1:{}", listen_port).parse().unwrap(), port: listen_port, }; - + if let Ok(data) = bincode::serialize(&announce) { // Broadcast to discovery ports for port in &discovery_ports { @@ -121,7 +119,7 @@ impl AutoDiscovery { let _ = socket.send_to(&data, addr).await; } } - + // Also try broadcast let broadcast_addr = format!("255.255.255.255:{}", listen_port + 1000); if let Ok(addr) = broadcast_addr.parse::() { @@ -132,24 +130,24 @@ impl AutoDiscovery { }); } } - + /// Start peer discovery task async fn start_peer_discovery_task(&self) { if let Some(socket) = &self.socket { let socket = socket.clone(); let node_id = self.node_id.clone(); let discovery_ports = self.discovery_ports.clone(); - + tokio::spawn(async move { let mut interval = interval(Duration::from_secs(30)); // Discover every 30 seconds - + loop { interval.tick().await; - + let request = SimpleDiscoveryMessage::PeerRequest { node_id: node_id.clone(), }; - + if let Ok(data) = bincode::serialize(&request) { // Send discovery requests for port in &discovery_ports { @@ -163,23 +161,29 @@ impl AutoDiscovery { }); } } - + /// Start listening task async fn start_listening_task(&self) { if let Some(socket) = &self.socket { let socket = socket.clone(); let known_peers = self.known_peers.clone(); let node_id = self.node_id.clone(); - + tokio::spawn(async move { let mut buffer = [0u8; 1024]; - + loop { match socket.recv_from(&mut buffer).await { Ok((len, from)) => { - if let Ok(message) = bincode::deserialize::(&buffer[..len]) { + if let Ok(message) = + bincode::deserialize::(&buffer[..len]) + { match message { - SimpleDiscoveryMessage::Announce { node_id: peer_id, address, port: _ } => { + SimpleDiscoveryMessage::Announce { + node_id: peer_id, + address, + port: _, + } => { if peer_id != node_id { let peer_info = SimplePeerInfo { node_id: peer_id.clone(), @@ -189,7 +193,7 @@ impl AutoDiscovery { .unwrap() .as_secs(), }; - + known_peers.write().await.insert(peer_id, peer_info); debug!("Discovered peer via announcement: {}", address); } @@ -197,12 +201,13 @@ impl AutoDiscovery { SimpleDiscoveryMessage::PeerRequest { node_id: _ } => { // Respond with known peers let peers = known_peers.read().await; - let peer_list: Vec = peers.values().cloned().collect(); - + let peer_list: Vec = + peers.values().cloned().collect(); + let response = SimpleDiscoveryMessage::PeerResponse { peers: peer_list, }; - + if let Ok(data) = bincode::serialize(&response) { let _ = socket.send_to(&data, from).await; } @@ -226,12 +231,12 @@ impl AutoDiscovery { }); } } - + /// Get discovered peers pub async fn get_discovered_peers(&self) -> Vec { self.known_peers.read().await.values().cloned().collect() } - + /// Add a peer manually pub async fn add_peer(&self, node_id: String, address: SocketAddr) { let peer_info = SimplePeerInfo { @@ -242,18 +247,18 @@ impl AutoDiscovery { .unwrap() .as_secs(), }; - + self.known_peers.write().await.insert(node_id, peer_info); } - + /// Clean up old peers pub async fn cleanup_old_peers(&self) { let now = SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap() .as_secs(); - + let mut peers = self.known_peers.write().await; peers.retain(|_, peer| now - peer.last_seen < 300); // Remove peers not seen for 5 minutes } -} \ No newline at end of file +} diff --git a/crates/p2p-network/src/discovery.rs b/crates/p2p-network/src/discovery.rs index e211c36..07307e0 100644 --- a/crates/p2p-network/src/discovery.rs +++ b/crates/p2p-network/src/discovery.rs @@ -52,19 +52,11 @@ pub enum DiscoveryMessage { max_peers: u32, }, /// Response to query - Response { - peers: Vec, - }, + Response { peers: Vec }, /// Ping to check if peer is alive - Ping { - node_id: String, - timestamp: u64, - }, + Ping { node_id: String, timestamp: u64 }, /// Pong response to ping - Pong { - node_id: String, - timestamp: u64, - }, + Pong { node_id: String, timestamp: u64 }, } /// Peer discovery service @@ -101,40 +93,52 @@ impl PeerDiscovery { let socket = UdpSocket::bind(listen_addr) .await .context("Failed to bind UDP socket for discovery")?; - + // Enable SO_REUSEADDR for better socket reuse socket.set_broadcast(true)?; - + info!("Peer discovery service started on {}", listen_addr); - + let multicast_addr = "224.0.0.1:9999".parse().unwrap(); // Multicast address for local discovery - + Ok(Self { node_id, listen_addr, socket: Arc::new(socket), known_peers: Arc::new(RwLock::new(HashMap::new())), dht_nodes: Arc::new(RwLock::new(HashMap::new())), - connection_pool: Arc::new(Mutex::new(ConnectionPool::new(50, Duration::from_secs(300)))), + connection_pool: Arc::new(Mutex::new(ConnectionPool::new( + 50, + Duration::from_secs(300), + ))), multicast_addr, }) } - + /// Start the discovery service pub async fn start(&self) -> Result<()> { let socket = self.socket.clone(); let known_peers = self.known_peers.clone(); let node_id = self.node_id.clone(); - + // Start listening for discovery messages let listen_task = tokio::spawn(async move { let mut buffer = [0u8; 1024]; - + loop { match socket.recv_from(&mut buffer).await { Ok((len, from)) => { - if let Ok(message) = bincode::deserialize::(&buffer[..len]) { - Self::handle_discovery_message(&node_id, &known_peers, message, from, &socket).await; + if let Ok(message) = + bincode::deserialize::(&buffer[..len]) + { + Self::handle_discovery_message( + &node_id, + &known_peers, + message, + from, + &socket, + ) + .await; } } Err(e) => { @@ -144,23 +148,23 @@ impl PeerDiscovery { } } }); - + // Start periodic announcements let announce_task = self.start_announcements().await; - + // Start peer cleanup let cleanup_task = self.start_cleanup().await; - + // Run all tasks concurrently tokio::select! { _ = listen_task => warn!("Discovery listen task ended"), _ = announce_task => warn!("Discovery announce task ended"), _ = cleanup_task => warn!("Discovery cleanup task ended"), } - + Ok(()) } - + /// Handle incoming discovery messages async fn handle_discovery_message( node_id: &str, @@ -170,33 +174,44 @@ impl PeerDiscovery { socket: &UdpSocket, ) { match message { - DiscoveryMessage::Announce { node_id: peer_id, address, capabilities } => { + DiscoveryMessage::Announce { + node_id: peer_id, + address, + capabilities, + } => { if peer_id != node_id { let peer_info = PeerInfo { node_id: peer_id.clone(), address, - last_seen: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(), + last_seen: SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(), capabilities, version: "1.0.0".to_string(), }; - + known_peers.write().await.insert(peer_id, peer_info); debug!("Discovered new peer at {}", address); } } - DiscoveryMessage::Query { target_id: _, max_peers } => { + DiscoveryMessage::Query { + target_id: _, + max_peers, + } => { let peers = known_peers.read().await; - let peer_list: Vec = peers.values() - .take(max_peers as usize) - .cloned() - .collect(); - + let peer_list: Vec = + peers.values().take(max_peers as usize).cloned().collect(); + let response = DiscoveryMessage::Response { peers: peer_list }; if let Ok(data) = bincode::serialize(&response) { let _ = socket.send_to(&data, from).await; } } - DiscoveryMessage::Ping { node_id: _peer_id, timestamp } => { + DiscoveryMessage::Ping { + node_id: _peer_id, + timestamp, + } => { let pong = DiscoveryMessage::Pong { node_id: node_id.to_string(), timestamp, @@ -216,95 +231,99 @@ impl PeerDiscovery { _ => {} } } - + /// Start periodic announcements async fn start_announcements(&self) -> tokio::task::JoinHandle<()> { let socket = self.socket.clone(); let node_id = self.node_id.clone(); let listen_addr = self.listen_addr; let multicast_addr = self.multicast_addr; - + tokio::spawn(async move { let mut interval = interval(Duration::from_secs(30)); // Announce every 30 seconds - + loop { interval.tick().await; - + let announce = DiscoveryMessage::Announce { node_id: node_id.clone(), address: listen_addr, capabilities: vec!["p2p".to_string(), "blockchain".to_string()], }; - + if let Ok(data) = bincode::serialize(&announce) { // Broadcast to multicast address let _ = socket.send_to(&data, multicast_addr).await; - + // Also broadcast to local subnet - let broadcast_addr = SocketAddr::new( - IpAddr::V4(Ipv4Addr::BROADCAST), - listen_addr.port() - ); + let broadcast_addr = + SocketAddr::new(IpAddr::V4(Ipv4Addr::BROADCAST), listen_addr.port()); let _ = socket.send_to(&data, broadcast_addr).await; } } }) } - + /// Start cleanup of stale peers async fn start_cleanup(&self) -> tokio::task::JoinHandle<()> { let known_peers = self.known_peers.clone(); - + tokio::spawn(async move { let mut interval = interval(Duration::from_secs(60)); // Cleanup every minute - + loop { interval.tick().await; - - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); let mut peers = known_peers.write().await; - + // Remove peers not seen for more than 5 minutes peers.retain(|_, peer| now - peer.last_seen < 300); } }) } - + /// Discover peers in the network pub async fn discover_peers(&self, max_peers: u32) -> Result> { let query = DiscoveryMessage::Query { target_id: None, max_peers, }; - + let data = bincode::serialize(&query)?; - + // Send query to known peers and multicast let known = self.known_peers.read().await; for peer in known.values() { let _ = self.socket.send_to(&data, peer.address).await; } - + // Also send to multicast let _ = self.socket.send_to(&data, self.multicast_addr).await; - + // Wait a bit for responses tokio::time::sleep(Duration::from_millis(500)).await; - + let peers = self.known_peers.read().await; Ok(peers.values().cloned().collect()) } - + /// Get list of known peers pub async fn get_known_peers(&self) -> Vec { self.known_peers.read().await.values().cloned().collect() } - + /// Add a known peer manually pub async fn add_peer(&self, peer: PeerInfo) { - self.known_peers.write().await.insert(peer.node_id.clone(), peer); + self.known_peers + .write() + .await + .insert(peer.node_id.clone(), peer); } - + /// Ping a specific peer to check if it's alive pub async fn ping_peer(&self, peer_addr: SocketAddr) -> Result { let timestamp = SystemTime::now().duration_since(UNIX_EPOCH)?.as_millis() as u64; @@ -312,12 +331,12 @@ impl PeerDiscovery { node_id: self.node_id.clone(), timestamp, }; - + let data = bincode::serialize(&ping)?; let start = SystemTime::now(); - + self.socket.send_to(&data, peer_addr).await?; - + // Wait for pong with timeout let ping_timeout = Duration::from_secs(5); match timeout(ping_timeout, self.wait_for_pong(timestamp)).await { @@ -325,7 +344,7 @@ impl PeerDiscovery { Err(_) => Err(anyhow::anyhow!("Ping timeout")), } } - + /// Wait for pong response async fn wait_for_pong(&self, _timestamp: u64) -> Result<()> { // This is a simplified implementation @@ -333,12 +352,12 @@ impl PeerDiscovery { tokio::time::sleep(Duration::from_millis(100)).await; Ok(()) } - + /// Get DHT nodes for peer routing pub async fn get_dht_nodes(&self) -> Vec { self.dht_nodes.read().await.values().cloned().collect() } - + /// Get connection pool status pub fn get_connection_pool_status(&self) -> (usize, usize) { let pool = self.connection_pool.lock().unwrap(); @@ -354,12 +373,13 @@ impl ConnectionPool { connection_timeout, } } - + /// Add a new connection pub fn add_connection(&mut self, peer_id: String, address: SocketAddr) -> Result<()> { if self.active_connections.len() >= self.max_connections { // Remove oldest connection - if let Some((oldest_id, _)) = self.active_connections + if let Some((oldest_id, _)) = self + .active_connections .iter() .min_by_key(|(_, info)| info.established_at) .map(|(id, info)| (id.clone(), info.clone())) @@ -367,7 +387,7 @@ impl ConnectionPool { self.active_connections.remove(&oldest_id); } } - + let connection_info = ConnectionInfo { peer_id: peer_id.clone(), address, @@ -375,60 +395,68 @@ impl ConnectionPool { last_activity: SystemTime::now(), connection_quality: 1.0, }; - + self.active_connections.insert(peer_id, connection_info); Ok(()) } - + /// Update connection activity pub fn update_activity(&mut self, peer_id: &str) { if let Some(connection) = self.active_connections.get_mut(peer_id) { connection.last_activity = SystemTime::now(); } } - + /// Get active connections pub fn get_active_connections(&self) -> Vec { self.active_connections.values().cloned().collect() } - + /// Remove stale connections pub fn cleanup_stale_connections(&mut self) { let now = SystemTime::now(); self.active_connections.retain(|_, connection| { - now.duration_since(connection.last_activity).unwrap_or(Duration::MAX) < self.connection_timeout + now.duration_since(connection.last_activity) + .unwrap_or(Duration::MAX) + < self.connection_timeout }); } - + /// Get connection info for a specific peer pub fn get_connection_info(&self, peer_id: &str) -> Option<&ConnectionInfo> { self.active_connections.get(peer_id) } - + /// Update connection quality based on performance pub fn update_connection_quality(&mut self, peer_id: &str, quality: f32) { if let Some(connection) = self.active_connections.get_mut(peer_id) { connection.connection_quality = quality.clamp(0.0, 1.0); } } - + /// Get best quality connections pub fn get_best_connections(&self, limit: usize) -> Vec<&ConnectionInfo> { let mut connections: Vec<_> = self.active_connections.values().collect(); - connections.sort_by(|a, b| b.connection_quality.partial_cmp(&a.connection_quality).unwrap()); + connections.sort_by(|a, b| { + b.connection_quality + .partial_cmp(&a.connection_quality) + .unwrap() + }); connections.into_iter().take(limit).collect() } - + /// Get connections by address pub fn get_connections_by_address(&self, address: &SocketAddr) -> Vec<&ConnectionInfo> { - self.active_connections.values() + self.active_connections + .values() .filter(|conn| &conn.address == address) .collect() } - + /// Get peer IDs of all active connections pub fn get_active_peer_ids(&self) -> Vec { - self.active_connections.values() + self.active_connections + .values() .map(|conn| conn.peer_id.clone()) .collect() } @@ -446,62 +474,63 @@ impl DHT { let mut hasher = sha1_smol::Sha1::new(); hasher.update(node_id.as_bytes()); let id = hasher.digest().bytes(); - + Self { node_id: id, routing_table: Arc::new(RwLock::new(HashMap::new())), k_bucket_size: 20, // Standard Kademlia k-bucket size } } - + /// Add a node to the DHT pub async fn add_node(&self, node_id: String, address: SocketAddr) { let mut hasher = sha1_smol::Sha1::new(); hasher.update(node_id.as_bytes()); let id = hasher.digest().bytes(); - + let dht_node = DHTNode { id, address, last_seen: SystemTime::now(), }; - + self.routing_table.write().await.insert(node_id, dht_node); } - + /// Find closest nodes to a target ID pub async fn find_closest_nodes(&self, target_id: &[u8; 20], count: usize) -> Vec { let table = self.routing_table.read().await; let mut nodes: Vec<_> = table.values().cloned().collect(); - + // Sort by XOR distance to target nodes.sort_by_key(|node| { let distance = xor_distance(&node.id, target_id); distance }); - + nodes.into_iter().take(count).collect() } - + /// Get the size of the routing table pub async fn size(&self) -> usize { self.routing_table.read().await.len() } - + /// Get the node's own ID pub fn get_node_id(&self) -> [u8; 20] { self.node_id } - + /// Get k-bucket size configuration pub fn get_k_bucket_size(&self) -> usize { self.k_bucket_size } - + /// Check if routing table is at capacity for a given distance pub async fn is_bucket_full(&self, target_distance: &[u8; 20]) -> bool { let table = self.routing_table.read().await; - let nodes_at_distance: Vec<_> = table.values() + let nodes_at_distance: Vec<_> = table + .values() .filter(|node| { let distance = xor_distance(&node.id, target_distance); distance.iter().take(1).all(|&x| x == 0) // Same prefix byte @@ -519,41 +548,45 @@ fn xor_distance(a: &[u8; 20], b: &[u8; 20]) -> Vec { #[cfg(test)] mod tests { use super::*; - + #[tokio::test] async fn test_peer_discovery_creation() { - let discovery = PeerDiscovery::new( - "test_node".to_string(), - "127.0.0.1:0".parse().unwrap() - ).await.unwrap(); - + let discovery = PeerDiscovery::new("test_node".to_string(), "127.0.0.1:0".parse().unwrap()) + .await + .unwrap(); + assert_eq!(discovery.node_id, "test_node"); } - + #[tokio::test] async fn test_dht_operations() { let dht = DHT::new("test_node".to_string()); - - dht.add_node("node1".to_string(), "127.0.0.1:8001".parse().unwrap()).await; - dht.add_node("node2".to_string(), "127.0.0.1:8002".parse().unwrap()).await; - + + dht.add_node("node1".to_string(), "127.0.0.1:8001".parse().unwrap()) + .await; + dht.add_node("node2".to_string(), "127.0.0.1:8002".parse().unwrap()) + .await; + assert_eq!(dht.size().await, 2); - + let closest = dht.find_closest_nodes(&[0; 20], 1).await; assert_eq!(closest.len(), 1); } - + #[test] fn test_connection_pool() { let mut pool = ConnectionPool::new(2, Duration::from_secs(60)); - - pool.add_connection("peer1".to_string(), "127.0.0.1:8001".parse().unwrap()).unwrap(); - pool.add_connection("peer2".to_string(), "127.0.0.1:8002".parse().unwrap()).unwrap(); - + + pool.add_connection("peer1".to_string(), "127.0.0.1:8001".parse().unwrap()) + .unwrap(); + pool.add_connection("peer2".to_string(), "127.0.0.1:8002".parse().unwrap()) + .unwrap(); + assert_eq!(pool.get_active_connections().len(), 2); - + // Adding third connection should remove oldest - pool.add_connection("peer3".to_string(), "127.0.0.1:8003".parse().unwrap()).unwrap(); + pool.add_connection("peer3".to_string(), "127.0.0.1:8003".parse().unwrap()) + .unwrap(); assert_eq!(pool.get_active_connections().len(), 2); } -} \ No newline at end of file +} diff --git a/crates/p2p-network/src/lib.rs b/crates/p2p-network/src/lib.rs index b31d7c2..cf3c2c9 100644 --- a/crates/p2p-network/src/lib.rs +++ b/crates/p2p-network/src/lib.rs @@ -54,15 +54,15 @@ use webrtc::{ }, }; -use traits::{Hash, P2PNetworkLayer, UtxoBlock, UtxoTransaction}; -use crate::discovery::{PeerDiscovery, DHT}; use crate::auto_discovery::AutoDiscovery; +use crate::discovery::{PeerDiscovery, DHT}; +use traits::{Hash, P2PNetworkLayer, UtxoBlock, UtxoTransaction}; +pub mod adaptive_network; +pub mod auto_discovery; +pub mod discovery; pub mod peer; pub mod signaling; -pub mod discovery; -pub mod auto_discovery; -pub mod adaptive_network; /// P2P Network configuration for WebRTC connections #[derive(Debug, Clone, Serialize, Deserialize)] @@ -259,7 +259,7 @@ impl WebRTCP2PNetwork { // Initialize DHT let dht = Arc::new(DHT::new(config.node_id.clone())); - + // Initialize auto discovery let auto_discovery = Arc::new(RwLock::new(AutoDiscovery::new( config.node_id.clone(), @@ -292,10 +292,7 @@ impl WebRTCP2PNetwork { /// Start the P2P network and begin accepting connections pub async fn start(&self) -> Result<()> { - info!( - "Starting WebRTC P2P Network on {}", - self.config.listen_addr - ); + info!("Starting WebRTC P2P Network on {}", self.config.listen_addr); // Update stats { @@ -311,10 +308,7 @@ impl WebRTCP2PNetwork { .await { Ok(_) => info!("Connected to bootstrap peer: {}", peer_addr), - Err(e) => warn!( - "Failed to connect to bootstrap peer {}: {}", - peer_addr, e - ), + Err(e) => warn!("Failed to connect to bootstrap peer {}: {}", peer_addr, e), } } @@ -656,11 +650,7 @@ impl WebRTCP2PNetwork { let peer_id = peer_id_ice.clone(); Box::pin(async move { if let Some(candidate) = candidate { - debug!( - "ICE candidate for peer {}: {}", - peer_id, - candidate - ); + debug!("ICE candidate for peer {}: {}", peer_id, candidate); // TODO: Send ICE candidate through signaling server } else { debug!("ICE gathering complete for peer: {}", peer_id); @@ -944,8 +934,14 @@ impl WebRTCP2PNetwork { peer_list, timestamp, } => { - info!("Received peer announcement from {} (node: {}, addr: {}, peers: {}, time: {})", - peer_id, node_id, listen_addr, peer_list.len(), timestamp); + info!( + "Received peer announcement from {} (node: {}, addr: {}, peers: {}, time: {})", + peer_id, + node_id, + listen_addr, + peer_list.len(), + timestamp + ); // Peer announcement received - could connect to new peers } P2PMessage::Error { diff --git a/crates/p2p-network/tests/non_blocking_adaptive_test.rs b/crates/p2p-network/tests/non_blocking_adaptive_test.rs index 9be6d4a..830450c 100644 --- a/crates/p2p-network/tests/non_blocking_adaptive_test.rs +++ b/crates/p2p-network/tests/non_blocking_adaptive_test.rs @@ -70,16 +70,16 @@ async fn test_non_blocking_peer_discovery() -> Result<()> { let network1 = WebRTCP2PNetwork::new(config1)?; let network2 = WebRTCP2PNetwork::new(config2)?; - + info!("Created both networks"); // Test discovery functionality without full startup let discovered_peers1 = network1.get_discovered_peers().await; let discovered_peers2 = network2.get_discovered_peers().await; - + info!("Network 1 discovered {} peers", discovered_peers1.len()); info!("Network 2 discovered {} peers", discovered_peers2.len()); - + // Initially should be empty assert_eq!(discovered_peers1.len(), 0); assert_eq!(discovered_peers2.len(), 0); @@ -87,41 +87,55 @@ async fn test_non_blocking_peer_discovery() -> Result<()> { // Test broadcasting capabilities let tx1 = create_test_tx(1); let broadcast_result1 = network1.broadcast_transaction(&tx1).await; - + let tx2 = create_test_tx(2); let broadcast_result2 = network2.broadcast_transaction(&tx2).await; - - assert!(broadcast_result1.is_ok(), "Network 1 should broadcast successfully"); - assert!(broadcast_result2.is_ok(), "Network 2 should broadcast successfully"); - + + assert!( + broadcast_result1.is_ok(), + "Network 1 should broadcast successfully" + ); + assert!( + broadcast_result2.is_ok(), + "Network 2 should broadcast successfully" + ); + info!("Both networks can broadcast transactions"); // Get network statistics let stats1 = network1.get_network_stats(); let stats2 = network2.get_network_stats(); - - info!("Network 1 stats: connections={}, messages_sent={}", - stats1.active_connections, stats1.messages_sent); - info!("Network 2 stats: connections={}, messages_sent={}", - stats2.active_connections, stats2.messages_sent); + + info!( + "Network 1 stats: connections={}, messages_sent={}", + stats1.active_connections, stats1.messages_sent + ); + info!( + "Network 2 stats: connections={}, messages_sent={}", + stats2.active_connections, stats2.messages_sent + ); // Test adaptive network statistics if available let adaptive_stats1 = network1.get_adaptive_network_stats().await; let adaptive_stats2 = network2.get_adaptive_network_stats().await; - - info!("Network 1 adaptive stats: discovered={}, connected={}, efficiency={:.2}", - adaptive_stats1.discovered_peers_count, - adaptive_stats1.connected_peers_count, - adaptive_stats1.discovery_efficiency); - info!("Network 2 adaptive stats: discovered={}, connected={}, efficiency={:.2}", - adaptive_stats2.discovered_peers_count, - adaptive_stats2.connected_peers_count, - adaptive_stats2.discovery_efficiency); + + info!( + "Network 1 adaptive stats: discovered={}, connected={}, efficiency={:.2}", + adaptive_stats1.discovered_peers_count, + adaptive_stats1.connected_peers_count, + adaptive_stats1.discovery_efficiency + ); + info!( + "Network 2 adaptive stats: discovered={}, connected={}, efficiency={:.2}", + adaptive_stats2.discovered_peers_count, + adaptive_stats2.connected_peers_count, + adaptive_stats2.discovery_efficiency + ); // Cleanup network1.shutdown().await?; network2.shutdown().await?; - + info!("Non-blocking peer discovery test completed"); Ok(()) } @@ -133,14 +147,14 @@ async fn test_non_blocking_network_expansion() -> Result<()> { // Create multiple networks simulating gradual expansion let mut networks = Vec::new(); - + for i in 0..4 { let config = P2PConfig { node_id: format!("expansion_node_{}", i), listen_addr: format!("127.0.0.1:{}", 12010 + i).parse().unwrap(), - bootstrap_peers: if i == 0 { - vec![] - } else { + bootstrap_peers: if i == 0 { + vec![] + } else { vec![format!("127.0.0.1:{}", 12010)] // Bootstrap to first node }, stun_servers: vec![], @@ -149,7 +163,7 @@ async fn test_non_blocking_network_expansion() -> Result<()> { keep_alive_interval: 30, debug_mode: false, }; - + let network = WebRTCP2PNetwork::new(config)?; networks.push(network); info!("Created network {}", i); @@ -163,39 +177,57 @@ async fn test_non_blocking_network_expansion() -> Result<()> { successful_broadcasts += 1; } } - - assert_eq!(successful_broadcasts, 4, "All networks should handle transactions"); - info!("All {} networks can handle transactions", successful_broadcasts); + + assert_eq!( + successful_broadcasts, 4, + "All networks should handle transactions" + ); + info!( + "All {} networks can handle transactions", + successful_broadcasts + ); // Check network capabilities for (i, network) in networks.iter().enumerate() { let discovered = network.get_discovered_peers().await; let connected = network.get_connected_peers().await; let stats = network.get_network_stats(); - - info!("Network {} - Discovered: {}, Connected: {}, Total connections: {}", - i, discovered.len(), connected.len(), stats.total_connections); + + info!( + "Network {} - Discovered: {}, Connected: {}, Total connections: {}", + i, + discovered.len(), + connected.len(), + stats.total_connections + ); } // Test adaptive broadcasting on all networks let tx_broadcast = create_test_tx(200); let mut adaptive_broadcast_results = 0; - + for (i, network) in networks.iter().enumerate() { - if network.adaptive_broadcast_transaction(&tx_broadcast).await.is_ok() { + if network + .adaptive_broadcast_transaction(&tx_broadcast) + .await + .is_ok() + { adaptive_broadcast_results += 1; } info!("Network {} adaptive broadcast result: OK", i); } - - assert_eq!(adaptive_broadcast_results, 4, "All networks should support adaptive broadcast"); + + assert_eq!( + adaptive_broadcast_results, 4, + "All networks should support adaptive broadcast" + ); // Cleanup all networks for (i, network) in networks.into_iter().enumerate() { network.shutdown().await?; info!("Network {} shutdown complete", i); } - + info!("Non-blocking network expansion test completed"); Ok(()) } @@ -207,7 +239,7 @@ async fn test_non_blocking_network_resilience() -> Result<()> { // Create a small network setup let mut networks = Vec::new(); - + for i in 0..3 { let config = P2PConfig { node_id: format!("resilient_node_{}", i), @@ -223,11 +255,11 @@ async fn test_non_blocking_network_resilience() -> Result<()> { keep_alive_interval: 30, debug_mode: false, }; - + let network = WebRTCP2PNetwork::new(config)?; networks.push(network); } - + info!("Created {} networks for resilience testing", networks.len()); // Test all networks are functional @@ -235,10 +267,12 @@ async fn test_non_blocking_network_resilience() -> Result<()> { let tx = create_test_tx(300 + i as u64); let result = network.broadcast_transaction(&tx).await; assert!(result.is_ok(), "Network {} should be functional", i); - + let stats = network.get_network_stats(); - info!("Network {} initial stats: connections={}, messages_sent={}", - i, stats.active_connections, stats.messages_sent); + info!( + "Network {} initial stats: connections={}, messages_sent={}", + i, stats.active_connections, stats.messages_sent + ); } // Simulate "node failure" by shutting down middle network @@ -252,9 +286,13 @@ async fn test_non_blocking_network_resilience() -> Result<()> { let tx = create_test_tx(400 + i as u64); let result = network.broadcast_transaction(&tx).await; assert!(result.is_ok(), "Remaining network {} should still work", i); - + let discovered = network.get_discovered_peers().await; - info!("Network {} after failure - Discovered peers: {}", i, discovered.len()); + info!( + "Network {} after failure - Discovered peers: {}", + i, + discovered.len() + ); } // Add new "healing" network @@ -262,17 +300,14 @@ async fn test_non_blocking_network_resilience() -> Result<()> { let healing_config = P2PConfig { node_id: "healing_node".to_string(), listen_addr: "127.0.0.1:12030".parse().unwrap(), - bootstrap_peers: vec![ - "127.0.0.1:12020".to_string(), - "127.0.0.1:12022".to_string(), - ], + bootstrap_peers: vec!["127.0.0.1:12020".to_string(), "127.0.0.1:12022".to_string()], stun_servers: vec![], max_peers: 5, connection_timeout: 5, keep_alive_interval: 30, debug_mode: false, }; - + let healing_network = WebRTCP2PNetwork::new(healing_config)?; networks.push(healing_network); @@ -283,23 +318,33 @@ async fn test_non_blocking_network_resilience() -> Result<()> { if network.broadcast_transaction(&tx).await.is_ok() { working_nodes += 1; } - + let discovered = network.get_discovered_peers().await; let adaptive_stats = network.get_adaptive_network_stats().await; - info!("Network {} after healing - Discovered: {}, DHT nodes: {}", - i, discovered.len(), adaptive_stats.dht_nodes_count); + info!( + "Network {} after healing - Discovered: {}, DHT nodes: {}", + i, + discovered.len(), + adaptive_stats.dht_nodes_count + ); } - - assert!(working_nodes >= 2, "At least 2 nodes should work after healing"); - info!("Network resilience test: {}/{} nodes working after healing", - working_nodes, networks.len()); + + assert!( + working_nodes >= 2, + "At least 2 nodes should work after healing" + ); + info!( + "Network resilience test: {}/{} nodes working after healing", + working_nodes, + networks.len() + ); // Cleanup remaining networks for (i, network) in networks.into_iter().enumerate() { network.shutdown().await?; info!("Network {} cleanup complete", i); } - + info!("Non-blocking network resilience test completed"); Ok(()) } @@ -325,48 +370,56 @@ async fn test_discovery_mechanisms() -> Result<()> { // Test initial discovery state let initial_discovered = network.get_discovered_peers().await; let initial_connected = network.get_connected_peers().await; - - info!("Initial state - Discovered: {}, Connected: {}", - initial_discovered.len(), initial_connected.len()); - + + info!( + "Initial state - Discovered: {}, Connected: {}", + initial_discovered.len(), + initial_connected.len() + ); + assert_eq!(initial_discovered.len(), 0); assert_eq!(initial_connected.len(), 0); // Test adaptive network statistics let adaptive_stats = network.get_adaptive_network_stats().await; - - info!("Adaptive stats - Discovered peers: {}, DHT nodes: {}, Connected: {}, Efficiency: {:.2}", - adaptive_stats.discovered_peers_count, - adaptive_stats.dht_nodes_count, - adaptive_stats.connected_peers_count, - adaptive_stats.discovery_efficiency); + + info!( + "Adaptive stats - Discovered peers: {}, DHT nodes: {}, Connected: {}, Efficiency: {:.2}", + adaptive_stats.discovered_peers_count, + adaptive_stats.dht_nodes_count, + adaptive_stats.connected_peers_count, + adaptive_stats.discovery_efficiency + ); // Test adaptive broadcasting let tx = create_test_tx(600); let adaptive_result = network.adaptive_broadcast_transaction(&tx).await; assert!(adaptive_result.is_ok(), "Adaptive broadcast should work"); - + info!("Adaptive broadcast successful"); // Test multiple data requests to exercise discovery for i in 0..5 { - let result = network.request_blockchain_data( - "test_data".to_string(), - format!("discovery_test_{}", i) - ).await; + let result = network + .request_blockchain_data("test_data".to_string(), format!("discovery_test_{}", i)) + .await; info!("Data request {} result: {:?}", i, result.is_ok()); } // Check final stats let final_stats = network.get_network_stats(); let final_adaptive = network.get_adaptive_network_stats().await; - - info!("Final stats - Messages sent: {}, Total connections: {}", - final_stats.messages_sent, final_stats.total_connections); - info!("Final adaptive - Discovery efficiency: {:.2}", - final_adaptive.discovery_efficiency); + + info!( + "Final stats - Messages sent: {}, Total connections: {}", + final_stats.messages_sent, final_stats.total_connections + ); + info!( + "Final adaptive - Discovery efficiency: {:.2}", + final_adaptive.discovery_efficiency + ); network.shutdown().await?; info!("Discovery mechanisms test completed"); Ok(()) -} \ No newline at end of file +} diff --git a/crates/p2p-network/tests/non_blocking_integration_test.rs b/crates/p2p-network/tests/non_blocking_integration_test.rs index 928a72e..819a582 100644 --- a/crates/p2p-network/tests/non_blocking_integration_test.rs +++ b/crates/p2p-network/tests/non_blocking_integration_test.rs @@ -96,10 +96,10 @@ async fn test_non_blocking_p2p_setup() -> Result<()> { // Test network creation and configuration let stats1 = network1.get_network_stats(); let stats2 = network2.get_network_stats(); - + assert_eq!(stats1.active_connections, 0); assert_eq!(stats2.active_connections, 0); - + info!("Network1 initial stats: {:?}", stats1); info!("Network2 initial stats: {:?}", stats2); @@ -107,7 +107,7 @@ async fn test_non_blocking_p2p_setup() -> Result<()> { let tx = create_test_transaction("alice", "bob", 1000); let broadcast_result1 = network1.broadcast_transaction(&tx).await; let broadcast_result2 = network2.broadcast_transaction(&tx).await; - + assert!(broadcast_result1.is_ok()); assert!(broadcast_result2.is_ok()); info!("Transaction broadcast successful on both networks"); @@ -116,44 +116,51 @@ async fn test_non_blocking_p2p_setup() -> Result<()> { let block = create_test_block(1, vec![tx.clone()]); let block_result1 = network1.broadcast_block(&block).await; let block_result2 = network2.broadcast_block(&block).await; - + assert!(block_result1.is_ok()); assert!(block_result2.is_ok()); info!("Block broadcast successful on both networks"); // Test data requests - let data_request1 = network1.request_blockchain_data( - "transaction".to_string(), - tx.hash.clone() - ).await; - let data_request2 = network2.request_blockchain_data( - "block".to_string(), - block.hash.clone() - ).await; - - info!("Data request results - Network1: {:?}, Network2: {:?}", - data_request1.is_ok(), data_request2.is_ok()); + let data_request1 = network1 + .request_blockchain_data("transaction".to_string(), tx.hash.clone()) + .await; + let data_request2 = network2 + .request_blockchain_data("block".to_string(), block.hash.clone()) + .await; + + info!( + "Data request results - Network1: {:?}, Network2: {:?}", + data_request1.is_ok(), + data_request2.is_ok() + ); // Test peer queries let peers1 = network1.get_connected_peers().await; let peers2 = network2.get_connected_peers().await; - + assert_eq!(peers1.len(), 0); // No connections without start() assert_eq!(peers2.len(), 0); - info!("Peer queries successful - Network1: {} peers, Network2: {} peers", - peers1.len(), peers2.len()); + info!( + "Peer queries successful - Network1: {} peers, Network2: {} peers", + peers1.len(), + peers2.len() + ); // Test discovery functionality let discovered1 = network1.get_discovered_peers().await; let discovered2 = network2.get_discovered_peers().await; - - info!("Discovery results - Network1: {} discovered, Network2: {} discovered", - discovered1.len(), discovered2.len()); + + info!( + "Discovery results - Network1: {} discovered, Network2: {} discovered", + discovered1.len(), + discovered2.len() + ); // Graceful shutdown network1.shutdown().await?; network2.shutdown().await?; - + info!("Non-blocking P2P setup test completed successfully"); Ok(()) } @@ -188,21 +195,21 @@ async fn test_network_configuration_validation() -> Result<()> { ]; let mut networks = Vec::new(); - + for (i, config) in configs.into_iter().enumerate() { let network = WebRTCP2PNetwork::new(config)?; - + // Test basic functionality let stats = network.get_network_stats(); let peers = network.get_connected_peers().await; - + info!("Network {} - Stats: {:?}, Peers: {}", i, stats, peers.len()); - + // Test transaction handling let tx = create_test_transaction("user1", "user2", 100 * (i as u64 + 1)); let result = network.broadcast_transaction(&tx).await; assert!(result.is_ok(), "Network {} should handle transactions", i); - + networks.push(network); } @@ -236,11 +243,13 @@ async fn test_multiple_transactions_and_blocks() -> Result<()> { // Create multiple transactions let transactions: Vec = (0..10) - .map(|i| create_test_transaction( - &format!("user_{}", i), - &format!("user_{}", i + 1), - 1000 + i * 100 - )) + .map(|i| { + create_test_transaction( + &format!("user_{}", i), + &format!("user_{}", i + 1), + 1000 + i * 100, + ) + }) .collect(); // Broadcast all transactions @@ -253,12 +262,15 @@ async fn test_multiple_transactions_and_blocks() -> Result<()> { info!("Transaction {} broadcast result: {:?}", i, result.is_ok()); } - assert_eq!(successful_broadcasts, 10, "All transactions should broadcast successfully"); + assert_eq!( + successful_broadcasts, 10, + "All transactions should broadcast successfully" + ); // Create multiple blocks with the transactions let blocks: Vec = (0..5) .map(|i| { - let block_txs = transactions[i*2..i*2+2].to_vec(); + let block_txs = transactions[i * 2..i * 2 + 2].to_vec(); create_test_block(i as u64, block_txs) }) .collect(); @@ -273,16 +285,27 @@ async fn test_multiple_transactions_and_blocks() -> Result<()> { info!("Block {} broadcast result: {:?}", i, result.is_ok()); } - assert_eq!(successful_block_broadcasts, 5, "All blocks should broadcast successfully"); + assert_eq!( + successful_block_broadcasts, 5, + "All blocks should broadcast successfully" + ); // Test data requests for all items for (i, tx) in transactions.iter().enumerate() { - let result = network.request_blockchain_data("transaction".to_string(), tx.hash.clone()).await; - info!("Transaction {} data request result: {:?}", i, result.is_ok()); + let result = network + .request_blockchain_data("transaction".to_string(), tx.hash.clone()) + .await; + info!( + "Transaction {} data request result: {:?}", + i, + result.is_ok() + ); } for (i, block) in blocks.iter().enumerate() { - let result = network.request_blockchain_data("block".to_string(), block.hash.clone()).await; + let result = network + .request_blockchain_data("block".to_string(), block.hash.clone()) + .await; info!("Block {} data request result: {:?}", i, result.is_ok()); } @@ -303,7 +326,7 @@ async fn test_concurrent_network_operations() -> Result<()> { // Create multiple networks let mut networks = Vec::new(); let mut configs = Vec::new(); - + for i in 0..3 { let config = P2PConfig { node_id: format!("concurrent_node_{}", i), @@ -315,7 +338,7 @@ async fn test_concurrent_network_operations() -> Result<()> { keep_alive_interval: 30, debug_mode: false, }; - + let network = WebRTCP2PNetwork::new(config.clone())?; networks.push(network); configs.push(config); @@ -323,16 +346,16 @@ async fn test_concurrent_network_operations() -> Result<()> { // Concurrent transaction broadcasting let mut handles = Vec::new(); - + for (i, network) in networks.iter().enumerate() { let net = network.clone(); let handle = tokio::spawn(async move { let mut results = Vec::new(); for j in 0..5 { let tx = create_test_transaction( - &format!("user_{}_{}", i, j), - &format!("user_{}_{}", i, j + 1), - 1000 + j * 100 + &format!("user_{}_{}", i, j), + &format!("user_{}_{}", i, j + 1), + 1000 + j * 100, ); let result = net.broadcast_transaction(&tx).await; results.push(result.is_ok()); @@ -344,13 +367,17 @@ async fn test_concurrent_network_operations() -> Result<()> { // Wait for all concurrent operations let results = futures::future::join_all(handles).await; - + for (i, result) in results.iter().enumerate() { match result { Ok(broadcasts) => { let successful = broadcasts.iter().filter(|&&x| x).count(); info!("Network {} - Successful broadcasts: {}/5", i, successful); - assert_eq!(successful, 5, "All broadcasts should succeed for network {}", i); + assert_eq!( + successful, 5, + "All broadcasts should succeed for network {}", + i + ); } Err(e) => { panic!("Network {} task failed: {:?}", i, e); @@ -360,16 +387,18 @@ async fn test_concurrent_network_operations() -> Result<()> { // Concurrent data requests let mut request_handles = Vec::new(); - + for (i, network) in networks.iter().enumerate() { let net = network.clone(); let handle = tokio::spawn(async move { let mut results = Vec::new(); for j in 0..3 { - let result = net.request_blockchain_data( - "test_data".to_string(), - format!("test_id_{}_{}", i, j) - ).await; + let result = net + .request_blockchain_data( + "test_data".to_string(), + format!("test_id_{}_{}", i, j), + ) + .await; results.push(result.is_ok()); } results @@ -378,11 +407,15 @@ async fn test_concurrent_network_operations() -> Result<()> { } let request_results = futures::future::join_all(request_handles).await; - + for (i, result) in request_results.iter().enumerate() { match result { Ok(requests) => { - info!("Network {} - Data requests completed: {}", i, requests.len()); + info!( + "Network {} - Data requests completed: {}", + i, + requests.len() + ); } Err(e) => { panic!("Network {} request task failed: {:?}", i, e); @@ -398,4 +431,4 @@ async fn test_concurrent_network_operations() -> Result<()> { info!("Concurrent network operations test completed"); Ok(()) -} \ No newline at end of file +} diff --git a/crates/p2p-network/tests/non_blocking_network_joining_test.rs b/crates/p2p-network/tests/non_blocking_network_joining_test.rs index 75cc643..20bd635 100644 --- a/crates/p2p-network/tests/non_blocking_network_joining_test.rs +++ b/crates/p2p-network/tests/non_blocking_network_joining_test.rs @@ -64,8 +64,10 @@ async fn test_non_blocking_network_joining_setup() -> Result<()> { // Test existing network functionality let existing_stats = existing_network.get_network_stats(); - info!("Existing network stats: connections={}, total={}", - existing_stats.active_connections, existing_stats.total_connections); + info!( + "Existing network stats: connections={}, total={}", + existing_stats.active_connections, existing_stats.total_connections + ); // Create new node that wants to join let joining_config = P2PConfig { @@ -84,8 +86,10 @@ async fn test_non_blocking_network_joining_setup() -> Result<()> { // Test joining network functionality let joining_stats = joining_network.get_network_stats(); - info!("Joining network stats: connections={}, total={}", - joining_stats.active_connections, joining_stats.total_connections); + info!( + "Joining network stats: connections={}, total={}", + joining_stats.active_connections, joining_stats.total_connections + ); // Test both networks can handle transactions let tx_existing = create_test_transaction(1, "existing_node"); @@ -94,43 +98,62 @@ async fn test_non_blocking_network_joining_setup() -> Result<()> { let result_existing = existing_network.broadcast_transaction(&tx_existing).await; let result_joining = joining_network.broadcast_transaction(&tx_joining).await; - assert!(result_existing.is_ok(), "Existing network should handle transactions"); - assert!(result_joining.is_ok(), "Joining network should handle transactions"); - + assert!( + result_existing.is_ok(), + "Existing network should handle transactions" + ); + assert!( + result_joining.is_ok(), + "Joining network should handle transactions" + ); + info!("Both networks can handle transactions"); // Test discovery capabilities let discovered_existing = existing_network.get_discovered_peers().await; let discovered_joining = joining_network.get_discovered_peers().await; - info!("Existing network discovered {} peers", discovered_existing.len()); - info!("Joining network discovered {} peers", discovered_joining.len()); + info!( + "Existing network discovered {} peers", + discovered_existing.len() + ); + info!( + "Joining network discovered {} peers", + discovered_joining.len() + ); // Test peer connection capabilities (without actual connections) let connected_existing = existing_network.get_connected_peers().await; let connected_joining = joining_network.get_connected_peers().await; - info!("Existing network connected to {} peers", connected_existing.len()); - info!("Joining network connected to {} peers", connected_joining.len()); + info!( + "Existing network connected to {} peers", + connected_existing.len() + ); + info!( + "Joining network connected to {} peers", + connected_joining.len() + ); // Test data request capabilities - let data_request_existing = existing_network.request_blockchain_data( - "transaction".to_string(), - tx_joining.hash.clone() - ).await; - - let data_request_joining = joining_network.request_blockchain_data( - "transaction".to_string(), - tx_existing.hash.clone() - ).await; - - info!("Data request results - Existing: {:?}, Joining: {:?}", - data_request_existing.is_ok(), data_request_joining.is_ok()); + let data_request_existing = existing_network + .request_blockchain_data("transaction".to_string(), tx_joining.hash.clone()) + .await; + + let data_request_joining = joining_network + .request_blockchain_data("transaction".to_string(), tx_existing.hash.clone()) + .await; + + info!( + "Data request results - Existing: {:?}, Joining: {:?}", + data_request_existing.is_ok(), + data_request_joining.is_ok() + ); // Cleanup existing_network.shutdown().await?; joining_network.shutdown().await?; - + info!("Non-blocking network joining setup test completed"); Ok(()) } @@ -154,7 +177,7 @@ async fn test_multiple_nodes_joining_sequence() -> Result<()> { let bootstrap_network = WebRTCP2PNetwork::new(bootstrap_config)?; let mut networks = vec![bootstrap_network]; - + info!("Created bootstrap network"); // Create multiple joining nodes @@ -169,7 +192,7 @@ async fn test_multiple_nodes_joining_sequence() -> Result<()> { keep_alive_interval: 30, debug_mode: false, }; - + let network = WebRTCP2PNetwork::new(config)?; networks.push(network); info!("Created joining node {}", i); @@ -183,35 +206,57 @@ async fn test_multiple_nodes_joining_sequence() -> Result<()> { successful_broadcasts += 1; } } - - assert_eq!(successful_broadcasts, 6, "All networks should handle transactions"); - info!("All {} networks can handle transactions", successful_broadcasts); + + assert_eq!( + successful_broadcasts, 6, + "All networks should handle transactions" + ); + info!( + "All {} networks can handle transactions", + successful_broadcasts + ); // Test adaptive capabilities on all networks let tx_adaptive = create_test_transaction(200, "adaptive_test"); let mut adaptive_results = 0; - + for (i, network) in networks.iter().enumerate() { - if network.adaptive_broadcast_transaction(&tx_adaptive).await.is_ok() { + if network + .adaptive_broadcast_transaction(&tx_adaptive) + .await + .is_ok() + { adaptive_results += 1; } - + let adaptive_stats = network.get_adaptive_network_stats().await; - info!("Network {} adaptive stats - Discovered: {}, DHT: {}, Efficiency: {:.2}", - i, adaptive_stats.discovered_peers_count, - adaptive_stats.dht_nodes_count, adaptive_stats.discovery_efficiency); + info!( + "Network {} adaptive stats - Discovered: {}, DHT: {}, Efficiency: {:.2}", + i, + adaptive_stats.discovered_peers_count, + adaptive_stats.dht_nodes_count, + adaptive_stats.discovery_efficiency + ); } - - assert_eq!(adaptive_results, 6, "All networks should support adaptive broadcast"); + + assert_eq!( + adaptive_results, 6, + "All networks should support adaptive broadcast" + ); // Test network statistics across all nodes for (i, network) in networks.iter().enumerate() { let stats = network.get_network_stats(); let discovered = network.get_discovered_peers().await; let connected = network.get_connected_peers().await; - - info!("Network {} final stats - Messages: {}, Discovered: {}, Connected: {}", - i, stats.messages_sent, discovered.len(), connected.len()); + + info!( + "Network {} final stats - Messages: {}, Discovered: {}, Connected: {}", + i, + stats.messages_sent, + discovered.len(), + connected.len() + ); } // Cleanup all networks @@ -219,7 +264,7 @@ async fn test_multiple_nodes_joining_sequence() -> Result<()> { network.shutdown().await?; info!("Network {} cleanup complete", i); } - + info!("Multiple nodes joining sequence test completed"); Ok(()) } @@ -231,7 +276,7 @@ async fn test_network_joining_with_failures() -> Result<()> { // Create initial network let mut networks = Vec::new(); - + for i in 0..3 { let config = P2PConfig { node_id: format!("failure_test_node_{}", i), @@ -247,18 +292,22 @@ async fn test_network_joining_with_failures() -> Result<()> { keep_alive_interval: 30, debug_mode: false, }; - + let network = WebRTCP2PNetwork::new(config)?; networks.push(network); } - + info!("Created {} networks for failure testing", networks.len()); // Test all networks initially work for (i, network) in networks.iter().enumerate() { let tx = create_test_transaction(300 + i as u64, &format!("failure_node_{}", i)); let result = network.broadcast_transaction(&tx).await; - assert!(result.is_ok(), "Network {} should be functional initially", i); + assert!( + result.is_ok(), + "Network {} should be functional initially", + i + ); } // Simulate failure by shutting down bootstrap node @@ -272,10 +321,12 @@ async fn test_network_joining_with_failures() -> Result<()> { let tx = create_test_transaction(400 + i as u64, &format!("surviving_node_{}", i)); let result = network.broadcast_transaction(&tx).await; assert!(result.is_ok(), "Surviving network {} should still work", i); - + let stats = network.get_network_stats(); - info!("Surviving network {} stats: messages={}, connections={}", - i, stats.messages_sent, stats.total_connections); + info!( + "Surviving network {} stats: messages={}, connections={}", + i, stats.messages_sent, stats.total_connections + ); } // Add new node to replace failed bootstrap @@ -283,17 +334,14 @@ async fn test_network_joining_with_failures() -> Result<()> { let replacement_config = P2PConfig { node_id: "replacement_node".to_string(), listen_addr: "127.0.0.1:11030".parse().unwrap(), - bootstrap_peers: vec![ - "127.0.0.1:11021".to_string(), - "127.0.0.1:11022".to_string(), - ], + bootstrap_peers: vec!["127.0.0.1:11021".to_string(), "127.0.0.1:11022".to_string()], stun_servers: vec![], max_peers: 10, connection_timeout: 5, keep_alive_interval: 30, debug_mode: false, }; - + let replacement_network = WebRTCP2PNetwork::new(replacement_config)?; networks.push(replacement_network); @@ -304,24 +352,35 @@ async fn test_network_joining_with_failures() -> Result<()> { if network.broadcast_transaction(&tx).await.is_ok() { working_networks += 1; } - + let discovered = network.get_discovered_peers().await; let adaptive_stats = network.get_adaptive_network_stats().await; - info!("Final network {} - Discovered: {}, DHT: {}, Working: {}", - i, discovered.len(), adaptive_stats.dht_nodes_count, - working_networks <= i + 1); + info!( + "Final network {} - Discovered: {}, DHT: {}, Working: {}", + i, + discovered.len(), + adaptive_stats.dht_nodes_count, + working_networks <= i + 1 + ); } - - assert_eq!(working_networks, networks.len(), "All remaining networks should work"); - info!("Network recovery successful: {}/{} networks working", - working_networks, networks.len()); + + assert_eq!( + working_networks, + networks.len(), + "All remaining networks should work" + ); + info!( + "Network recovery successful: {}/{} networks working", + working_networks, + networks.len() + ); // Cleanup for (i, network) in networks.into_iter().enumerate() { network.shutdown().await?; info!("Final cleanup network {} complete", i); } - + info!("Network joining with failures test completed"); Ok(()) -} \ No newline at end of file +} diff --git a/crates/p2p-network/tests/non_blocking_peer_test.rs b/crates/p2p-network/tests/non_blocking_peer_test.rs index fd1b0bc..3b5feb6 100644 --- a/crates/p2p-network/tests/non_blocking_peer_test.rs +++ b/crates/p2p-network/tests/non_blocking_peer_test.rs @@ -71,17 +71,21 @@ async fn test_non_blocking_peer_communication_setup() -> Result<()> { let peer1 = WebRTCP2PNetwork::new(config_peer1)?; let peer2 = WebRTCP2PNetwork::new(config_peer2)?; - + info!("Created two peer networks"); // Test basic peer functionality let stats1 = peer1.get_network_stats(); let stats2 = peer2.get_network_stats(); - - info!("Peer 1 stats: connections={}, total={}", - stats1.active_connections, stats1.total_connections); - info!("Peer 2 stats: connections={}, total={}", - stats2.active_connections, stats2.total_connections); + + info!( + "Peer 1 stats: connections={}, total={}", + stats1.active_connections, stats1.total_connections + ); + info!( + "Peer 2 stats: connections={}, total={}", + stats2.active_connections, stats2.total_connections + ); // Test transaction exchange simulation let tx1_to_2 = create_test_transaction(1, "peer1", "peer2", 1000); @@ -92,22 +96,23 @@ async fn test_non_blocking_peer_communication_setup() -> Result<()> { assert!(result1.is_ok(), "Peer 1 should broadcast successfully"); assert!(result2.is_ok(), "Peer 2 should broadcast successfully"); - + info!("Both peers can handle transaction broadcasting"); // Test data requests between peers - let data_request_1 = peer1.request_blockchain_data( - "transaction".to_string(), - tx2_to_1.hash.clone() - ).await; - - let data_request_2 = peer2.request_blockchain_data( - "transaction".to_string(), - tx1_to_2.hash.clone() - ).await; - - info!("Data request results - Peer1: {:?}, Peer2: {:?}", - data_request_1.is_ok(), data_request_2.is_ok()); + let data_request_1 = peer1 + .request_blockchain_data("transaction".to_string(), tx2_to_1.hash.clone()) + .await; + + let data_request_2 = peer2 + .request_blockchain_data("transaction".to_string(), tx1_to_2.hash.clone()) + .await; + + info!( + "Data request results - Peer1: {:?}, Peer2: {:?}", + data_request_1.is_ok(), + data_request_2.is_ok() + ); // Test peer discovery capabilities let discovered1 = peer1.get_discovered_peers().await; @@ -115,26 +120,38 @@ async fn test_non_blocking_peer_communication_setup() -> Result<()> { let connected1 = peer1.get_connected_peers().await; let connected2 = peer2.get_connected_peers().await; - info!("Peer 1 - Discovered: {}, Connected: {}", discovered1.len(), connected1.len()); - info!("Peer 2 - Discovered: {}, Connected: {}", discovered2.len(), connected2.len()); + info!( + "Peer 1 - Discovered: {}, Connected: {}", + discovered1.len(), + connected1.len() + ); + info!( + "Peer 2 - Discovered: {}, Connected: {}", + discovered2.len(), + connected2.len() + ); // Test adaptive features let adaptive_stats1 = peer1.get_adaptive_network_stats().await; let adaptive_stats2 = peer2.get_adaptive_network_stats().await; - info!("Peer 1 adaptive - Discovered: {}, DHT: {}, Efficiency: {:.2}", - adaptive_stats1.discovered_peers_count, - adaptive_stats1.dht_nodes_count, - adaptive_stats1.discovery_efficiency); - info!("Peer 2 adaptive - Discovered: {}, DHT: {}, Efficiency: {:.2}", - adaptive_stats2.discovered_peers_count, - adaptive_stats2.dht_nodes_count, - adaptive_stats2.discovery_efficiency); + info!( + "Peer 1 adaptive - Discovered: {}, DHT: {}, Efficiency: {:.2}", + adaptive_stats1.discovered_peers_count, + adaptive_stats1.dht_nodes_count, + adaptive_stats1.discovery_efficiency + ); + info!( + "Peer 2 adaptive - Discovered: {}, DHT: {}, Efficiency: {:.2}", + adaptive_stats2.discovered_peers_count, + adaptive_stats2.dht_nodes_count, + adaptive_stats2.discovery_efficiency + ); // Cleanup peer1.shutdown().await?; peer2.shutdown().await?; - + info!("Non-blocking peer communication setup test completed"); Ok(()) } @@ -146,7 +163,7 @@ async fn test_multi_peer_network_simulation() -> Result<()> { // Create multiple peer networks let mut peers = Vec::new(); - + for i in 0..5 { let config = P2PConfig { node_id: format!("multi_peer_{}", i), @@ -162,7 +179,7 @@ async fn test_multi_peer_network_simulation() -> Result<()> { keep_alive_interval: 30, debug_mode: false, }; - + let peer = WebRTCP2PNetwork::new(config)?; peers.push(peer); info!("Created multi-peer network {}", i); @@ -172,48 +189,70 @@ async fn test_multi_peer_network_simulation() -> Result<()> { let mut successful_broadcasts = 0; for (i, peer) in peers.iter().enumerate() { let tx = create_test_transaction( - 100 + i as u64, - &format!("peer_{}", i), - &format!("peer_{}", (i + 1) % peers.len()), - 1000 + i as u64 * 100 + 100 + i as u64, + &format!("peer_{}", i), + &format!("peer_{}", (i + 1) % peers.len()), + 1000 + i as u64 * 100, ); - + if peer.broadcast_transaction(&tx).await.is_ok() { successful_broadcasts += 1; } } - - assert_eq!(successful_broadcasts, 5, "All peers should broadcast successfully"); - info!("All {} peers can broadcast transactions", successful_broadcasts); + + assert_eq!( + successful_broadcasts, 5, + "All peers should broadcast successfully" + ); + info!( + "All {} peers can broadcast transactions", + successful_broadcasts + ); // Test adaptive broadcasting on all peers let global_tx = create_test_transaction(200, "global", "all", 5000); let mut adaptive_broadcasts = 0; - + for (i, peer) in peers.iter().enumerate() { - if peer.adaptive_broadcast_transaction(&global_tx).await.is_ok() { + if peer + .adaptive_broadcast_transaction(&global_tx) + .await + .is_ok() + { adaptive_broadcasts += 1; } - + let adaptive_stats = peer.get_adaptive_network_stats().await; - info!("Peer {} adaptive stats - Discovered: {}, Connected: {}, DHT: {}", - i, adaptive_stats.discovered_peers_count, - adaptive_stats.connected_peers_count, - adaptive_stats.dht_nodes_count); + info!( + "Peer {} adaptive stats - Discovered: {}, Connected: {}, DHT: {}", + i, + adaptive_stats.discovered_peers_count, + adaptive_stats.connected_peers_count, + adaptive_stats.dht_nodes_count + ); } - - assert_eq!(adaptive_broadcasts, 5, "All peers should support adaptive broadcast"); + + assert_eq!( + adaptive_broadcasts, 5, + "All peers should support adaptive broadcast" + ); // Test cross-peer data requests for (i, peer) in peers.iter().enumerate() { let target_peer = (i + 2) % peers.len(); - let request_result = peer.request_blockchain_data( - "peer_data".to_string(), - format!("data_from_peer_{}", target_peer) - ).await; - - info!("Peer {} data request to peer {}: {:?}", - i, target_peer, request_result.is_ok()); + let request_result = peer + .request_blockchain_data( + "peer_data".to_string(), + format!("data_from_peer_{}", target_peer), + ) + .await; + + info!( + "Peer {} data request to peer {}: {:?}", + i, + target_peer, + request_result.is_ok() + ); } // Test network statistics across all peers @@ -221,9 +260,14 @@ async fn test_multi_peer_network_simulation() -> Result<()> { let stats = peer.get_network_stats(); let discovered = peer.get_discovered_peers().await; let connected = peer.get_connected_peers().await; - - info!("Peer {} final stats - Messages: {}, Discovered: {}, Connected: {}", - i, stats.messages_sent, discovered.len(), connected.len()); + + info!( + "Peer {} final stats - Messages: {}, Discovered: {}, Connected: {}", + i, + stats.messages_sent, + discovered.len(), + connected.len() + ); } // Cleanup all peers @@ -231,7 +275,7 @@ async fn test_multi_peer_network_simulation() -> Result<()> { peer.shutdown().await?; info!("Peer {} cleanup complete", i); } - + info!("Multi-peer network simulation test completed"); Ok(()) } @@ -243,7 +287,7 @@ async fn test_peer_connection_resilience() -> Result<()> { // Create network of peers let mut peers = Vec::new(); - + for i in 0..4 { let config = P2PConfig { node_id: format!("resilient_peer_{}", i), @@ -260,22 +304,25 @@ async fn test_peer_connection_resilience() -> Result<()> { keep_alive_interval: 30, debug_mode: false, }; - + let peer = WebRTCP2PNetwork::new(config)?; peers.push(peer); } - + info!("Created {} resilient peers", peers.len()); // Test all peers are functional for (i, peer) in peers.iter().enumerate() { - let tx = create_test_transaction(300 + i as u64, &format!("resilient_{}", i), "network", 1000); + let tx = + create_test_transaction(300 + i as u64, &format!("resilient_{}", i), "network", 1000); let result = peer.broadcast_transaction(&tx).await; assert!(result.is_ok(), "Peer {} should be functional", i); - + let stats = peer.get_network_stats(); - info!("Peer {} initial stats: messages={}, connections={}", - i, stats.messages_sent, stats.total_connections); + info!( + "Peer {} initial stats: messages={}, connections={}", + i, stats.messages_sent, stats.total_connections + ); } // Simulate peer failure by removing middle peer @@ -286,15 +333,20 @@ async fn test_peer_connection_resilience() -> Result<()> { // Test remaining peers still function for (i, peer) in peers.iter().enumerate() { - let tx = create_test_transaction(400 + i as u64, &format!("surviving_{}", i), "network", 1500); + let tx = + create_test_transaction(400 + i as u64, &format!("surviving_{}", i), "network", 1500); let result = peer.broadcast_transaction(&tx).await; assert!(result.is_ok(), "Surviving peer {} should still work", i); - + let discovered = peer.get_discovered_peers().await; let adaptive_stats = peer.get_adaptive_network_stats().await; - info!("Surviving peer {} - Discovered: {}, DHT: {}, Efficiency: {:.2}", - i, discovered.len(), adaptive_stats.dht_nodes_count, - adaptive_stats.discovery_efficiency); + info!( + "Surviving peer {} - Discovered: {}, DHT: {}, Efficiency: {:.2}", + i, + discovered.len(), + adaptive_stats.dht_nodes_count, + adaptive_stats.discovery_efficiency + ); } // Add recovery peer @@ -313,35 +365,47 @@ async fn test_peer_connection_resilience() -> Result<()> { keep_alive_interval: 30, debug_mode: false, }; - + let recovery_peer = WebRTCP2PNetwork::new(recovery_config)?; peers.push(recovery_peer); // Test network recovery let mut recovered_peers = 0; for (i, peer) in peers.iter().enumerate() { - let tx = create_test_transaction(500 + i as u64, &format!("recovered_{}", i), "network", 2000); + let tx = + create_test_transaction(500 + i as u64, &format!("recovered_{}", i), "network", 2000); if peer.broadcast_transaction(&tx).await.is_ok() { recovered_peers += 1; } - + let stats = peer.get_network_stats(); let adaptive_stats = peer.get_adaptive_network_stats().await; - info!("Recovery peer {} - Messages: {}, DHT nodes: {}, Discovered: {}", - i, stats.messages_sent, adaptive_stats.dht_nodes_count, - adaptive_stats.discovered_peers_count); + info!( + "Recovery peer {} - Messages: {}, DHT nodes: {}, Discovered: {}", + i, + stats.messages_sent, + adaptive_stats.dht_nodes_count, + adaptive_stats.discovered_peers_count + ); } - - assert_eq!(recovered_peers, peers.len(), "All remaining peers should work after recovery"); - info!("Network recovery successful: {}/{} peers functional", - recovered_peers, peers.len()); + + assert_eq!( + recovered_peers, + peers.len(), + "All remaining peers should work after recovery" + ); + info!( + "Network recovery successful: {}/{} peers functional", + recovered_peers, + peers.len() + ); // Final cleanup for (i, peer) in peers.into_iter().enumerate() { peer.shutdown().await?; info!("Recovery cleanup peer {} complete", i); } - + info!("Peer connection resilience test completed"); Ok(()) } @@ -353,7 +417,7 @@ async fn test_peer_broadcast_patterns() -> Result<()> { // Create small peer network let mut peers = Vec::new(); - + for i in 0..3 { let config = P2PConfig { node_id: format!("broadcast_peer_{}", i), @@ -369,7 +433,7 @@ async fn test_peer_broadcast_patterns() -> Result<()> { keep_alive_interval: 30, debug_mode: false, }; - + let peer = WebRTCP2PNetwork::new(config)?; peers.push(peer); } @@ -379,7 +443,7 @@ async fn test_peer_broadcast_patterns() -> Result<()> { // Test one-to-many broadcast pattern info!("Testing one-to-many broadcast pattern"); let broadcast_tx = create_test_transaction(600, "broadcaster", "everyone", 10000); - + for (i, peer) in peers.iter().enumerate() { let result = peer.broadcast_transaction(&broadcast_tx).await; assert!(result.is_ok(), "Peer {} should broadcast successfully", i); @@ -388,27 +452,33 @@ async fn test_peer_broadcast_patterns() -> Result<()> { // Test many-to-one data request pattern info!("Testing many-to-one data request pattern"); for (i, peer) in peers.iter().enumerate() { - let request_result = peer.request_blockchain_data( - "broadcast_data".to_string(), - format!("shared_data_item_{}", i) - ).await; - info!("Peer {} data request result: {:?}", i, request_result.is_ok()); + let request_result = peer + .request_blockchain_data( + "broadcast_data".to_string(), + format!("shared_data_item_{}", i), + ) + .await; + info!( + "Peer {} data request result: {:?}", + i, + request_result.is_ok() + ); } // Test concurrent broadcast pattern info!("Testing concurrent broadcast pattern"); let mut handles = Vec::new(); - + for (i, peer) in peers.iter().enumerate() { let peer_clone = peer.clone(); let handle = tokio::spawn(async move { let mut results = Vec::new(); for j in 0..3 { let tx = create_test_transaction( - 700 + j, - &format!("concurrent_peer_{}", i), - &format!("target_{}", j), - 1000 + j * 100 + 700 + j, + &format!("concurrent_peer_{}", i), + &format!("target_{}", j), + 1000 + j * 100, ); let result = peer_clone.broadcast_transaction(&tx).await; results.push(result.is_ok()); @@ -419,13 +489,20 @@ async fn test_peer_broadcast_patterns() -> Result<()> { } let concurrent_results = futures::future::join_all(handles).await; - + for (i, result) in concurrent_results.iter().enumerate() { match result { Ok(broadcasts) => { let successful = broadcasts.iter().filter(|&&x| x).count(); - info!("Peer {} concurrent broadcasts: {}/3 successful", i, successful); - assert_eq!(successful, 3, "All concurrent broadcasts should succeed for peer {}", i); + info!( + "Peer {} concurrent broadcasts: {}/3 successful", + i, successful + ); + assert_eq!( + successful, 3, + "All concurrent broadcasts should succeed for peer {}", + i + ); } Err(e) => { panic!("Peer {} concurrent broadcast task failed: {:?}", i, e); @@ -436,21 +513,29 @@ async fn test_peer_broadcast_patterns() -> Result<()> { // Test adaptive broadcast pattern info!("Testing adaptive broadcast pattern"); let adaptive_tx = create_test_transaction(800, "adaptive", "smart_routing", 5000); - + for (i, peer) in peers.iter().enumerate() { let result = peer.adaptive_broadcast_transaction(&adaptive_tx).await; - assert!(result.is_ok(), "Peer {} adaptive broadcast should succeed", i); - + assert!( + result.is_ok(), + "Peer {} adaptive broadcast should succeed", + i + ); + let adaptive_stats = peer.get_adaptive_network_stats().await; - info!("Peer {} adaptive broadcast stats - Efficiency: {:.2}, DHT: {}", - i, adaptive_stats.discovery_efficiency, adaptive_stats.dht_nodes_count); + info!( + "Peer {} adaptive broadcast stats - Efficiency: {:.2}, DHT: {}", + i, adaptive_stats.discovery_efficiency, adaptive_stats.dht_nodes_count + ); } // Final statistics for (i, peer) in peers.iter().enumerate() { let stats = peer.get_network_stats(); - info!("Peer {} final broadcast stats - Total messages: {}, Connections: {}", - i, stats.messages_sent, stats.total_connections); + info!( + "Peer {} final broadcast stats - Total messages: {}, Connections: {}", + i, stats.messages_sent, stats.total_connections + ); } // Cleanup @@ -458,7 +543,7 @@ async fn test_peer_broadcast_patterns() -> Result<()> { peer.shutdown().await?; info!("Broadcast test peer {} cleanup complete", i); } - + info!("Peer broadcast patterns test completed"); Ok(()) -} \ No newline at end of file +} diff --git a/crates/p2p-network/tests/quick_discovery_test.rs b/crates/p2p-network/tests/quick_discovery_test.rs index 72337ed..3ace21b 100644 --- a/crates/p2p-network/tests/quick_discovery_test.rs +++ b/crates/p2p-network/tests/quick_discovery_test.rs @@ -57,29 +57,31 @@ async fn test_network_creation_and_stats() -> Result<()> { }; let network = WebRTCP2PNetwork::new(config)?; - + // Test basic functionality without starting full network let stats = network.get_network_stats(); - info!("Initial stats: connections={}, messages_sent={}", - stats.active_connections, stats.messages_sent); - + info!( + "Initial stats: connections={}, messages_sent={}", + stats.active_connections, stats.messages_sent + ); + // Test broadcast without actual connections let tx = create_test_tx(1); let broadcast_result = network.broadcast_transaction(&tx).await; info!("Broadcast result: {:?}", broadcast_result.is_ok()); - + // Test getting connected peers (should be empty) let peers = network.get_connected_peers().await; info!("Connected peers: {}", peers.len()); - + // Test shutdown network.shutdown().await?; info!("Network shutdown completed"); - + assert_eq!(stats.active_connections, 0); assert_eq!(peers.len(), 0); assert!(broadcast_result.is_ok()); // Should succeed even with no peers - + info!("Network creation and stats test completed"); Ok(()) } @@ -91,7 +93,7 @@ async fn test_network_initialization_only() -> Result<()> { // Create multiple networks to test initialization let mut networks = Vec::new(); - + for i in 0..3 { let config = P2PConfig { node_id: format!("init_test_node_{}", i), @@ -103,31 +105,40 @@ async fn test_network_initialization_only() -> Result<()> { keep_alive_interval: 30, debug_mode: false, }; - + let network = WebRTCP2PNetwork::new(config)?; networks.push(network); } - + info!("Created {} networks", networks.len()); - + // Test each network individually for (i, network) in networks.iter().enumerate() { let stats = network.get_network_stats(); let peers = network.get_connected_peers().await; - - info!("Network {} - Stats: {}, Peers: {}", i, stats.active_connections, peers.len()); - + + info!( + "Network {} - Stats: {}, Peers: {}", + i, + stats.active_connections, + peers.len() + ); + // Test transaction creation and serialization let tx = create_test_tx(i as u64); let serialized = bincode::serialize(&tx)?; - info!("Network {} - Transaction size: {} bytes", i, serialized.len()); + info!( + "Network {} - Transaction size: {} bytes", + i, + serialized.len() + ); } - + // Cleanup for network in networks { network.shutdown().await?; } - + info!("Network initialization test completed"); Ok(()) } @@ -149,32 +160,34 @@ async fn test_discovered_peers_functionality() -> Result<()> { }; let network = WebRTCP2PNetwork::new(config)?; - + // Test get_discovered_peers method (should be empty initially) let discovered = network.get_discovered_peers().await; info!("Initially discovered peers: {}", discovered.len()); - + // Test network statistics let stats = network.get_network_stats(); - info!("Network stats - Total connections: {}, Active: {}, Messages sent: {}", - stats.total_connections, stats.active_connections, stats.messages_sent); - + info!( + "Network stats - Total connections: {}, Active: {}, Messages sent: {}", + stats.total_connections, stats.active_connections, stats.messages_sent + ); + // Test broadcasting multiple transactions for i in 0..5 { let tx = create_test_tx(100 + i); let result = network.broadcast_transaction(&tx).await; info!("Broadcast {} result: {:?}", i, result.is_ok()); } - + // Check stats after broadcasts let final_stats = network.get_network_stats(); info!("Final stats - Messages sent: {}", final_stats.messages_sent); - + network.shutdown().await?; - + assert_eq!(discovered.len(), 0); // No real discovery without network activity - // Stats may not be updated immediately in this test setup - + // Stats may not be updated immediately in this test setup + info!("Discovered peers functionality test completed"); Ok(()) } @@ -196,10 +209,10 @@ async fn test_concurrent_network_operations() -> Result<()> { }; let network = WebRTCP2PNetwork::new(config)?; - + // Create multiple concurrent transactions let mut handles = Vec::new(); - + for i in 0..10 { let net = network.clone(); let handle = tokio::spawn(async move { @@ -208,39 +221,44 @@ async fn test_concurrent_network_operations() -> Result<()> { }); handles.push(handle); } - + // Wait for all broadcasts to complete let results = futures::future::join_all(handles).await; - - let successful_broadcasts = results.iter() + + let successful_broadcasts = results + .iter() .filter_map(|r| r.as_ref().ok()) .filter(|r| r.is_ok()) .count(); - - info!("Successful concurrent broadcasts: {}/{}", successful_broadcasts, results.len()); - + + info!( + "Successful concurrent broadcasts: {}/{}", + successful_broadcasts, + results.len() + ); + // Test concurrent peer queries let mut peer_handles = Vec::new(); for _ in 0..5 { let net = network.clone(); - let handle = tokio::spawn(async move { - net.get_connected_peers().await - }); + let handle = tokio::spawn(async move { net.get_connected_peers().await }); peer_handles.push(handle); } - + let peer_results = futures::future::join_all(peer_handles).await; - let successful_queries = peer_results.iter() - .filter_map(|r| r.as_ref().ok()) - .count(); - - info!("Successful concurrent peer queries: {}/{}", successful_queries, peer_results.len()); - + let successful_queries = peer_results.iter().filter_map(|r| r.as_ref().ok()).count(); + + info!( + "Successful concurrent peer queries: {}/{}", + successful_queries, + peer_results.len() + ); + network.shutdown().await?; - + assert!(successful_broadcasts >= 8); // Most should succeed assert_eq!(successful_queries, 5); // All peer queries should succeed - + info!("Concurrent network operations test completed"); Ok(()) -} \ No newline at end of file +} diff --git a/src/main.rs b/src/main.rs index 04ff209..00d5b7b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -8,12 +8,12 @@ use std::path::Path; use consensus::consensus_engine::{PolyTorusUtxoConsensusLayer, UtxoConsensusConfig}; use execution::execution_engine::{PolyTorusUtxoExecutionLayer, UtxoExecutionConfig}; use p2p_network::{P2PConfig, WebRTCP2PNetwork}; +use serde::{Deserialize, Serialize}; use traits::{ Hash, ScriptTransactionType, Transaction, TxInput, TxOutput, UtxoConsensusLayer, UtxoExecutionLayer, UtxoId, UtxoTransaction, }; use wallet::{HdWallet, KeyPair, KeyType, Wallet}; -use serde::{Deserialize, Serialize}; pub struct PolyTorusBlockchain { execution_layer: PolyTorusUtxoExecutionLayer, @@ -41,7 +41,7 @@ pub struct PersistentBlockchainState { pub current_slot: u64, pub total_supply: u64, pub utxo_set_hash: String, - pub canonical_chain: Vec, // Block hashes in order + pub canonical_chain: Vec, // Block hashes in order pub last_block_hash: Option, // Hash of the latest block } @@ -59,15 +59,19 @@ impl Storage { .map_err(|e| anyhow::anyhow!("Failed to serialize blockchain state: {}", e))?; self.db.insert(BLOCKCHAIN_STATE_KEY, serialized)?; self.db.flush()?; - info!("Blockchain state saved: height={}, slot={}", state.chain_height, state.current_slot); + info!( + "Blockchain state saved: height={}, slot={}", + state.chain_height, state.current_slot + ); Ok(()) } pub fn load_blockchain_state(&self) -> Result> { match self.db.get(BLOCKCHAIN_STATE_KEY)? { Some(data) => { - let state = bincode::deserialize(&data) - .map_err(|e| anyhow::anyhow!("Failed to deserialize blockchain state: {}", e))?; + let state = bincode::deserialize(&data).map_err(|e| { + anyhow::anyhow!("Failed to deserialize blockchain state: {}", e) + })?; info!("Blockchain state loaded from storage"); Ok(Some(state)) } @@ -109,7 +113,10 @@ impl Storage { } /// Load all blocks referenced in canonical chain - pub fn load_blocks_for_chain(&self, canonical_chain: &[String]) -> Result> { + pub fn load_blocks_for_chain( + &self, + canonical_chain: &[String], + ) -> Result> { let mut blocks = std::collections::HashMap::new(); for block_hash in canonical_chain { if let Some(block) = self.load_block(block_hash)? { @@ -122,7 +129,8 @@ impl Storage { impl PolyTorusBlockchain { pub fn new() -> Result { - let data_dir = env::var("POLYTORUS_DATA_DIR").unwrap_or_else(|_| "./polytorus_data".to_string()); + let data_dir = + env::var("POLYTORUS_DATA_DIR").unwrap_or_else(|_| "./polytorus_data".to_string()); Self::new_with_storage_and_p2p_config(&data_dir, None) } @@ -132,11 +140,15 @@ impl PolyTorusBlockchain { pub fn new_with_p2p_config(p2p_config: Option) -> Result { // Use persistent storage in current directory - let data_dir = env::var("POLYTORUS_DATA_DIR").unwrap_or_else(|_| "./polytorus_data".to_string()); + let data_dir = + env::var("POLYTORUS_DATA_DIR").unwrap_or_else(|_| "./polytorus_data".to_string()); Self::new_with_storage_and_p2p_config(&data_dir, p2p_config) } - pub fn new_with_storage_and_p2p_config(data_dir: &str, p2p_config: Option) -> Result { + pub fn new_with_storage_and_p2p_config( + data_dir: &str, + p2p_config: Option, + ) -> Result { // Initialize persistent storage first let storage = Storage::new(data_dir)?; info!("Initialized persistent storage at: {}", data_dir); @@ -156,16 +168,18 @@ impl PolyTorusBlockchain { ); let execution_layer = PolyTorusUtxoExecutionLayer::new(execution_config)?; - + // Try to load existing blockchain state let consensus_layer = if let Some(persistent_state) = storage.load_blockchain_state()? { - info!("Restoring consensus layer from persistent state: height={}, slot={}", - persistent_state.chain_height, persistent_state.current_slot); - + info!( + "Restoring consensus layer from persistent state: height={}, slot={}", + persistent_state.chain_height, persistent_state.current_slot + ); + // Load all blocks for the canonical chain let blocks = storage.load_blocks_for_chain(&persistent_state.canonical_chain)?; info!("Loaded {} blocks from storage", blocks.len()); - + // Create consensus layer with restored state PolyTorusUtxoConsensusLayer::new_with_restored_state_and_blocks( consensus_config, @@ -254,7 +268,9 @@ impl PolyTorusBlockchain { } /// Get adaptive network statistics - pub async fn get_adaptive_network_stats(&self) -> p2p_network::adaptive_network::AdaptiveNetworkStats { + pub async fn get_adaptive_network_stats( + &self, + ) -> p2p_network::adaptive_network::AdaptiveNetworkStats { self.p2p_network.get_adaptive_network_stats().await } @@ -272,7 +288,10 @@ impl PolyTorusBlockchain { // Check if blockchain state already exists if let Some(persistent_state) = self.storage.load_blockchain_state()? { info!("Found existing blockchain state - skipping genesis initialization"); - info!("Current height: {}, slot: {}", persistent_state.chain_height, persistent_state.current_slot); + info!( + "Current height: {}, slot: {}", + persistent_state.chain_height, persistent_state.current_slot + ); // Return a dummy ID since we're not creating new genesis return Ok(UtxoId { tx_hash: "genesis_tx".to_string(), @@ -299,7 +318,7 @@ impl PolyTorusBlockchain { self.execution_layer .initialize_genesis_utxo_set(vec![(genesis_utxo_id.clone(), genesis_utxo)])?; info!("Genesis UTXO created: {:?}", genesis_utxo_id); - + // Save initial genesis state self.save_blockchain_state().await?; info!("Genesis initialization completed successfully"); @@ -439,10 +458,10 @@ impl PolyTorusBlockchain { } else { info!("Block saved to persistent storage: {}", block.hash); } - + self.consensus_layer.add_utxo_block(block).await?; info!("Block added to chain"); - + // Save state after successful block addition if let Err(e) = self.save_blockchain_state().await { error!("Failed to save blockchain state: {}", e); @@ -497,7 +516,7 @@ impl PolyTorusBlockchain { let utxo_set_hash = self.execution_layer.get_utxo_set_hash().await?; let total_supply = self.execution_layer.get_total_supply().await?; let canonical_chain = self.consensus_layer.get_canonical_chain().await?; - + // Get the hash of the latest block (last in canonical chain) let last_block_hash = if canonical_chain.len() > 1 { // Skip genesis block and get the latest @@ -891,7 +910,7 @@ async fn async_main() -> Result<()> { // Check if adaptive mode is enabled let adaptive_mode = sub_matches.get_flag("adaptive"); - + // Start P2P network if adaptive_mode { info!("Starting adaptive P2P network..."); @@ -933,13 +952,13 @@ async fn async_main() -> Result<()> { Some(("network-status", _)) => { info!("Getting P2P network status..."); let blockchain = PolyTorusBlockchain::new()?; - + // Get basic network statistics let stats = blockchain.get_p2p_network_stats(); - + // Get adaptive network statistics let adaptive_stats = blockchain.get_adaptive_network_stats().await; - + println!("P2P Network Status:"); println!("=================="); println!("Total Connections: {}", stats.total_connections); @@ -951,37 +970,50 @@ async fn async_main() -> Result<()> { println!("Connection Errors: {}", stats.connection_errors); println!(); println!("Adaptive Network Statistics:"); - println!("Discovered Peers: {}", adaptive_stats.discovered_peers_count); + println!( + "Discovered Peers: {}", + adaptive_stats.discovered_peers_count + ); println!("DHT Nodes: {}", adaptive_stats.dht_nodes_count); println!("Connected Peers: {}", adaptive_stats.connected_peers_count); - println!("Discovery Efficiency: {:.2}%", adaptive_stats.discovery_efficiency * 100.0); + println!( + "Discovery Efficiency: {:.2}%", + adaptive_stats.discovery_efficiency * 100.0 + ); } Some(("peers", _)) => { info!("Getting peer information..."); let blockchain = PolyTorusBlockchain::new()?; - + // Get connected peers let connected_peers = blockchain.get_connected_peers().await; - + // Get discovered peers let discovered_peers = blockchain.get_discovered_peers().await; - + println!("Peer Information:"); println!("================"); println!("Connected Peers ({}):", connected_peers.len()); for (i, peer) in connected_peers.iter().enumerate() { println!(" {}. {}", i + 1, peer); } - + println!(); println!("Discovered Peers ({}):", discovered_peers.len()); for (i, peer) in discovered_peers.iter().enumerate() { let last_seen_mins = (std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap() - .as_secs() - peer.last_seen) / 60; - println!(" {}. {} ({}) - Last seen: {} min ago", - i + 1, peer.node_id, peer.address, last_seen_mins); + .as_secs() + - peer.last_seen) + / 60; + println!( + " {}. {} ({}) - Last seen: {} min ago", + i + 1, + peer.node_id, + peer.address, + last_seen_mins + ); } } Some(("deploy-contract", sub_matches)) => { From e8cc7f73b0398623c8c0f99183cd17214c78239c Mon Sep 17 00:00:00 2001 From: quantumshiro Date: Sat, 2 Aug 2025 23:25:13 +0900 Subject: [PATCH 5/5] fix: test --- src/main.rs | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/src/main.rs b/src/main.rs index 00d5b7b..321036c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -30,8 +30,6 @@ pub struct Storage { } const BLOCKCHAIN_STATE_KEY: &[u8] = b"blockchain_state"; -const CHAIN_HEIGHT_KEY: &[u8] = b"chain_height"; -const CURRENT_SLOT_KEY: &[u8] = b"current_slot"; const BLOCK_PREFIX: &[u8] = b"block_"; /// Serializable blockchain state for persistence @@ -1108,29 +1106,50 @@ mod integration_tests { #[tokio::test] async fn test_blockchain_initialization() -> Result<()> { - let mut blockchain = PolyTorusBlockchain::new()?; + use std::time::{SystemTime, UNIX_EPOCH}; + let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos(); + let test_dir = format!("./test_data_init_{}", timestamp); + let mut blockchain = PolyTorusBlockchain::new_with_storage(&test_dir)?; let genesis_id = blockchain.initialize_genesis().await?; assert_eq!(genesis_id.tx_hash, "genesis_tx"); assert_eq!(genesis_id.output_index, 0); + + // Cleanup test directory + drop(blockchain); + let _ = std::fs::remove_dir_all(&test_dir); Ok(()) } #[tokio::test] async fn test_transaction_processing() -> Result<()> { - let mut blockchain = PolyTorusBlockchain::new()?; + use std::time::{SystemTime, UNIX_EPOCH}; + let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos(); + let test_dir = format!("./test_data_tx_{}", timestamp); + let mut blockchain = PolyTorusBlockchain::new_with_storage(&test_dir)?; let _genesis_id = blockchain.initialize_genesis().await?; let tx_hash = blockchain.send_transaction("alice", "bob", 100_000).await?; assert!(!tx_hash.is_empty()); assert!(tx_hash.starts_with("tx_alice_bob_100000_")); + + // Cleanup test directory + drop(blockchain); + let _ = std::fs::remove_dir_all(&test_dir); Ok(()) } #[tokio::test] async fn test_blockchain_status() -> Result<()> { - let blockchain = PolyTorusBlockchain::new()?; + use std::time::{SystemTime, UNIX_EPOCH}; + let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos(); + let test_dir = format!("./test_data_status_{}", timestamp); + let mut blockchain = PolyTorusBlockchain::new_with_storage(&test_dir)?; // This should not panic blockchain.get_status().await?; + + // Cleanup test directory + drop(blockchain); + let _ = std::fs::remove_dir_all(&test_dir); Ok(()) } }