diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 545b0742..64d3e34c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,7 +18,7 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run clippy - run: cargo clippy --all -- -D warnings + run: cargo clippy --all --all-targets -- -D warnings fmt: diff --git a/Cargo.lock b/Cargo.lock index 5f290c0a..da1e4d38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -538,8 +538,11 @@ dependencies = [ "anyhow", "celestia-types", "dotenvy", + "futures", "http", "jsonrpsee", + "libp2p", + "log", "rand", "serde", "thiserror", @@ -557,10 +560,13 @@ dependencies = [ "cid", "const_format", "enum_dispatch", + "libp2p-identity", + "multiaddr", "nmt-rs", "ruint", "serde", "serde_json", + "serde_repr", "sha2 0.10.7", "tendermint", "tendermint-proto", diff --git a/celestia/src/native.rs b/celestia/src/native.rs index ae1d058e..4e5e7847 100644 --- a/celestia/src/native.rs +++ b/celestia/src/native.rs @@ -3,7 +3,7 @@ use std::env; use anyhow::{Context, Result}; use celestia_node::node::{Node, NodeConfig}; use celestia_rpc::prelude::*; -use libp2p::{core::upgrade::Version, identity, noise, tcp, yamux, Multiaddr, Transport}; +use libp2p::{core::upgrade::Version, identity, noise, tcp, yamux, Transport}; const WS_URL: &str = "ws://localhost:26658"; @@ -15,15 +15,11 @@ pub async fn run() -> Result<()> { let auth_token = env::var("CELESTIA_NODE_AUTH_TOKEN_ADMIN")?; let client = celestia_rpc::client::new_websocket(WS_URL, Some(&auth_token)).await?; let bridge_info = client.p2p_info().await?; - let bridge_maddrs: Vec = bridge_info - .addrs - .into_iter() - .map(|addr| addr.parse().context("Parsing addr failed")) - .collect::>()?; println!("bridge id: {:?}", bridge_info.id); - println!("bridge listens on: {bridge_maddrs:?}"); + println!("bridge listens on: {:?}", bridge_info.addrs); - let bridge_ma = bridge_maddrs + let bridge_ma = bridge_info + .addrs .into_iter() .find(|ma| ma.protocol_stack().any(|protocol| protocol == "tcp")) .context("Bridge doesn't listen on tcp")?; diff --git a/node/tests/node.rs b/node/tests/node.rs index 2a374128..382355d1 100644 --- a/node/tests/node.rs +++ b/node/tests/node.rs @@ -28,14 +28,9 @@ async fn get_bridge_tcp_ma() -> Multiaddr { .unwrap(); let bridge_info = client.p2p_info().await.unwrap(); - let bridge_maddrs: Vec = bridge_info - .addrs - .into_iter() - .map(|addr| addr.parse()) - .collect::>() - .unwrap(); - bridge_maddrs + bridge_info + .addrs .into_iter() .find(|ma| ma.protocol_stack().any(|protocol| protocol == "tcp")) .expect("Bridge doesn't listen on tcp") diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 0c839fd2..93b5f0b9 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -17,5 +17,12 @@ jsonrpsee = { version = "0.20", features = ["http-client", "ws-client"] } [dev-dependencies] anyhow = "1.0.71" dotenvy = "0.15.7" +futures = "0.3.28" +libp2p = "0.52.3" +log = "0.4" rand = "0.8.5" tokio = { version = "1.32.0", features = ["rt", "macros"] } + +[features] +default = ["p2p"] +p2p = ["celestia-types/p2p"] diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 830c6be3..f0024005 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -2,6 +2,7 @@ mod blob; pub mod client; mod error; mod header; +#[cfg(feature = "p2p")] pub mod p2p; mod share; mod state; @@ -9,6 +10,7 @@ mod state; pub use crate::blob::BlobClient; pub use crate::error::{Error, Result}; pub use crate::header::HeaderClient; +#[cfg(feature = "p2p")] pub use crate::p2p::P2PClient; pub use crate::share::ShareClient; pub use crate::state::StateClient; @@ -16,6 +18,7 @@ pub use crate::state::StateClient; pub mod prelude { pub use crate::BlobClient; pub use crate::HeaderClient; + #[cfg(feature = "p2p")] pub use crate::P2PClient; pub use crate::ShareClient; pub use crate::StateClient; diff --git a/rpc/src/p2p.rs b/rpc/src/p2p.rs index 723b23ba..bd19ddd5 100644 --- a/rpc/src/p2p.rs +++ b/rpc/src/p2p.rs @@ -1,17 +1,85 @@ +use celestia_types::p2p::{ + AddrInfo, BandwidthStats, Connectedness, PeerId, Reachability, ResourceManagerStats, +}; use jsonrpsee::proc_macros::rpc; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Serialize, Deserialize)] -pub struct AddrInfo { - #[serde(rename = "ID")] - pub id: String, - // TODO: multiaddr - #[serde(rename = "Addrs")] - pub addrs: Vec, -} #[rpc(client)] pub trait P2P { + /// BandwidthForPeer returns a Stats struct with bandwidth metrics associated with the given peer.ID. The metrics returned include all traffic sent / received for the peer, regardless of protocol. + #[method(name = "p2p.BandwidthForPeer")] + async fn p2p_bandwidth_for_peer(&self, peer_id: &PeerId) -> Result; + + /// BandwidthForProtocol returns a Stats struct with bandwidth metrics associated with the given protocol.ID. + #[method(name = "p2p.BandwidthForProtocol")] + async fn p2p_bandwidth_for_protocol(&self, protocol_id: &str) -> Result; + + /// BandwidthStats returns a Stats struct with bandwidth metrics for all data sent/received by the local peer, regardless of protocol or remote peer IDs. + #[method(name = "p2p.BandwidthStats")] + async fn p2p_bandwidth_stats(&self) -> Result; + + // This method does not report errors due to a workaround to a go-jsonrpc bug, see https://github.com/eigerco/celestia-node-rs/issues/53 + /// BlockPeer adds a peer to the set of blocked peers. + #[method(name = "p2p.BlockPeer")] + async fn p2p_block_peer(&self, peer_id: &PeerId); + + // This method does not report errors due to a workaround to a go-jsonrpc bug, see https://github.com/eigerco/celestia-node-rs/issues/53 + /// ClosePeer closes the connection to a given peer. + #[method(name = "p2p.ClosePeer")] + async fn p2p_close_peer(&self, peer_id: &PeerId); + + // This method does not report errors due to a workaround to a go-jsonrpc bug, see https://github.com/eigerco/celestia-node-rs/issues/53 + /// Connect ensures there is a connection between this host and the peer with given peer. + #[method(name = "p2p.Connect")] + async fn p2p_connect(&self, address: &AddrInfo); + + /// Connectedness returns a state signaling connection capabilities. + #[method(name = "p2p.Connectedness")] + async fn p2p_connectedness(&self, peer_id: &PeerId) -> Result; + + /// Info returns address information about the host. #[method(name = "p2p.Info")] async fn p2p_info(&self) -> Result; + + /// IsProtected returns whether the given peer is protected. + #[method(name = "p2p.IsProtected")] + async fn p2p_is_protected(&self, peer_id: &PeerId, tag: &str) -> Result; + + /// ListBlockedPeers returns a list of blocked peers. + #[method(name = "p2p.ListBlockedPeers")] + async fn p2p_list_blocked_peers(&self) -> Result, Error>; + + /// NATStatus returns the current NAT status. + #[method(name = "p2p.NATStatus")] + async fn p2p_nat_status(&self) -> Result; + + /// PeerInfo returns a small slice of information Peerstore has on the given peer. + #[method(name = "p2p.PeerInfo")] + async fn p2p_peer_info(&self, peer_id: &PeerId) -> Result; + + /// Peers returns connected peers. + #[method(name = "p2p.Peers")] + async fn p2p_peers(&self) -> Result, Error>; + + // This method does not report errors due to a workaround to a go-jsonrpc bug, see https://github.com/eigerco/celestia-node-rs/issues/53 + /// Protect adds a peer to the list of peers who have a bidirectional peering agreement that they are protected from being trimmed, dropped or negatively scored. + #[method(name = "p2p.Protect")] + async fn p2p_protect(&self, peer_id: &PeerId, tag: &str); + + // We might get null in response here, so Option is needed + /// PubSubPeers returns the peer IDs of the peers joined on the given topic. + #[method(name = "p2p.PubSubPeers")] + async fn p2p_pub_sub_peers(&self, topic: &str) -> Result>, Error>; + + /// ResourceState returns the state of the resource manager. + #[method(name = "p2p.ResourceState")] + async fn p2p_resource_state(&self) -> Result; + + // This method does not report errors due to a workaround to a go-jsonrpc bug, see https://github.com/eigerco/celestia-node-rs/issues/53 + /// UnblockPeer removes a peer from the set of blocked peers. + #[method(name = "p2p.UnblockPeer")] + async fn p2p_unblock_peer(&self, peer_id: &PeerId); + + /// Unprotect removes a peer from the list of peers who have a bidirectional peering agreement that they are protected from being trimmed, dropped or negatively scored, returning a bool representing whether the given peer is protected or not. + #[method(name = "p2p.Unprotect")] + async fn p2p_unprotect(&self, peer_id: &PeerId, tag: &str) -> Result; } diff --git a/rpc/tests/p2p.rs b/rpc/tests/p2p.rs new file mode 100644 index 00000000..d7e78cf5 --- /dev/null +++ b/rpc/tests/p2p.rs @@ -0,0 +1,268 @@ +#![cfg(feature = "p2p")] +use crate::utils::client::{new_test_client, AuthLevel}; +use celestia_rpc::prelude::*; +use celestia_types::p2p; +use libp2p::{identity, PeerId}; +use tokio::time::{sleep, Duration}; + +pub mod utils; + +#[tokio::test] +async fn info_test() { + let client = new_test_client(AuthLevel::Admin).await.unwrap(); + client.p2p_info().await.expect("Failed to get node info"); +} + +#[tokio::test] +async fn add_remove_peer_test() { + // add and then remove a peer, testing outputs from `p2p.Peers` and `p2p.Connectedness` + let addr_info = utils::tiny_node::start_tiny_node() + .await + .expect("failed to spin up second node"); + let client = new_test_client(AuthLevel::Admin).await.unwrap(); + + let initial_peers = client + .p2p_peers() + .await + .expect("failed to get initial peer list"); + assert!(!initial_peers.contains(&addr_info.id)); + + let connected_to_peer = client + .p2p_connectedness(&addr_info.id) + .await + .expect("failed to check initial connection to peer"); + assert_eq!(connected_to_peer, p2p::Connectedness::NotConnected); + + client + .p2p_connect(&addr_info) + .await + .expect("request to connect to second node failed"); + rpc_call_delay().await; + + let peers = client + .p2p_peers() + .await + .expect("failed to get peer list after connect request"); + assert!(peers.contains(&addr_info.id)); + + let connected_to_peer = client + .p2p_connectedness(&addr_info.id) + .await + .expect("failed to check connection to peer after connect request"); + assert_eq!(connected_to_peer, p2p::Connectedness::Connected); + + client + .p2p_close_peer(&addr_info.id) + .await + .expect("Failed to close peer"); + rpc_call_delay().await; + + let final_peers = client + .p2p_peers() + .await + .expect("failed to get peer list after close peer request"); + assert!(!final_peers.contains(&addr_info.id)); +} + +#[tokio::test] +async fn protect_unprotect_test() { + // check whether reported protect status reacts correctly to protect/unprotect requests and + // whether node takes tag into the account + + const PROTECT_TAG: &str = "test-tag"; + const ANOTHER_PROTECT_TAG: &str = "test-tag-2"; + + let addr_info = utils::tiny_node::start_tiny_node() + .await + .expect("failed to spin up second node"); + let client = new_test_client(AuthLevel::Admin).await.unwrap(); + + client + .p2p_connect(&addr_info) + .await + .expect("request to connect to second node failed"); + rpc_call_delay().await; + + let is_protected = client + .p2p_is_protected(&addr_info.id, PROTECT_TAG) + .await + .expect("failed to check initial protect status"); + assert!(!is_protected); + + client + .p2p_protect(&addr_info.id, PROTECT_TAG) + .await + .expect("protect request failed"); + rpc_call_delay().await; + + let is_protected = client + .p2p_is_protected(&addr_info.id, PROTECT_TAG) + .await + .expect("failed to check protect status after protect request"); + assert!(is_protected); + + let is_protected_another_tag = client + .p2p_is_protected(&addr_info.id, ANOTHER_PROTECT_TAG) + .await + .expect("failed to check protect status for another tag after protect request"); + assert!(!is_protected_another_tag); + + client + .p2p_unprotect(&addr_info.id, PROTECT_TAG) + .await + .expect("unprotect request failed"); + rpc_call_delay().await; + + let is_protected = client + .p2p_is_protected(&addr_info.id, PROTECT_TAG) + .await + .expect("failed to check protect status after unprotect reqest"); + assert!(!is_protected); +} + +#[tokio::test] +async fn peer_block_unblock_test() { + let addr_info = utils::tiny_node::start_tiny_node() + .await + .expect("failed to spin up second node"); + let client = new_test_client(AuthLevel::Admin).await.unwrap(); + + let blocked_peers = client + .p2p_list_blocked_peers() + .await + .expect("failed to get blocked peer list"); + assert!(!blocked_peers.contains(&addr_info.id)); + + client + .p2p_block_peer(&addr_info.id) + .await + .expect("failed to block peer"); + rpc_call_delay().await; + + let blocked_peers = client + .p2p_list_blocked_peers() + .await + .expect("failed to get blocked peer list"); + assert!(blocked_peers.contains(&addr_info.id)); + + client + .p2p_unblock_peer(&addr_info.id) + .await + .expect("failed to block peer"); + rpc_call_delay().await; + + let blocked_peers = client + .p2p_list_blocked_peers() + .await + .expect("failed to get blocked peer list"); + assert!(!blocked_peers.contains(&addr_info.id)); +} + +#[tokio::test] +async fn bandwidth_stats_test() { + // just check whether we can get the data without error, node could have been running any + // amount of time, so any value should be valid. + let client = new_test_client(AuthLevel::Admin).await.unwrap(); + client + .p2p_bandwidth_stats() + .await + .expect("failed to get bandwidth stats"); +} + +#[tokio::test] +async fn bandwidth_for_peer_test() { + let local_key = identity::Keypair::generate_ed25519(); + let local_peer_id = p2p::PeerId(PeerId::from(local_key.public())); + + let client = new_test_client(AuthLevel::Admin).await.unwrap(); + let stats = client + .p2p_bandwidth_for_peer(&local_peer_id) + .await + .expect("failed to get bandwidth stats for peer"); + + // where should be no data exchanged with peer that we're not connected to + assert_eq!(stats.total_in, 0.0); + assert_eq!(stats.total_out, 0.0); + assert_eq!(stats.rate_in, 0.0); + assert_eq!(stats.rate_out, 0.0); +} + +#[tokio::test] +async fn bandwidth_for_protocol_test() { + let client = new_test_client(AuthLevel::Admin).await.unwrap(); + + // query for nonsense protocol name so that we get all zeros in response + // until we have better way of inducing traffic + let stats = client + .p2p_bandwidth_for_protocol("/foo/bar") + .await + .expect("failed to get bandwidth stats"); + assert_eq!(stats.total_in, 0.0); + assert_eq!(stats.total_out, 0.0); + assert_eq!(stats.rate_in, 0.0); + assert_eq!(stats.rate_out, 0.0); +} + +#[tokio::test] +async fn nat_status_test() { + // just query for status and make sure no errors happen, since any value is potentially correct + let client = new_test_client(AuthLevel::Admin).await.unwrap(); + let _ = client + .p2p_nat_status() + .await + .expect("failed to query NAT status"); +} + +#[tokio::test] +async fn peer_info_test() { + let addr_info = utils::tiny_node::start_tiny_node() + .await + .expect("failed to spin up second node"); + let client = new_test_client(AuthLevel::Admin).await.unwrap(); + + client + .p2p_connect(&addr_info) + .await + .expect("request to connect to second node failed"); + rpc_call_delay().await; + + let connectedness = client + .p2p_connectedness(&addr_info.id) + .await + .expect("failed to check connection to peer after connect request"); + assert_eq!(connectedness, p2p::Connectedness::Connected); + + let peer_info = client + .p2p_peer_info(&addr_info.id) + .await + .expect("failed to get peer info"); + + assert_eq!(addr_info.id, peer_info.id); +} + +#[tokio::test] +async fn pub_sub_peers_test() { + let client = new_test_client(AuthLevel::Admin).await.unwrap(); + let peers = client + .p2p_pub_sub_peers("topic") + .await + .expect("failed to get topic peers"); + + assert!(peers.is_none()) +} + +#[tokio::test] +async fn resource_state_test() { + // cannot really test values here, just make sure it deserializes correctly + let client = new_test_client(AuthLevel::Admin).await.unwrap(); + client + .p2p_resource_state() + .await + .expect("failed to get resource state"); +} + +async fn rpc_call_delay() { + // delay for RPC calls like connect/close to let node finish the operation before we query it + // again. Below 150 ms I start getting intermittent failures. + sleep(Duration::from_millis(150)).await; +} diff --git a/rpc/tests/utils/mod.rs b/rpc/tests/utils/mod.rs index 0683d3d5..bfe148d2 100644 --- a/rpc/tests/utils/mod.rs +++ b/rpc/tests/utils/mod.rs @@ -2,6 +2,8 @@ use celestia_types::nmt::{Namespace, NS_ID_V0_SIZE}; use rand::{Rng, RngCore}; pub mod client; +#[cfg(feature = "p2p")] +pub mod tiny_node; fn ns_to_u128(ns: Namespace) -> u128 { let mut bytes = [0u8; 16]; diff --git a/rpc/tests/utils/tiny_node.rs b/rpc/tests/utils/tiny_node.rs new file mode 100644 index 00000000..e78e8eee --- /dev/null +++ b/rpc/tests/utils/tiny_node.rs @@ -0,0 +1,82 @@ +//! Tiny p2p node without any defined behaviour celestia can connect to so that we can test RPC p2p +//! calls + +use celestia_types::p2p; +use futures::StreamExt; +use libp2p::{ + core::upgrade::Version, + identity, noise, + swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, + tcp, yamux, PeerId, Transport, +}; +use tokio::{ + sync::mpsc, + time::{sleep, Duration}, +}; + +// how long to wait during startup for node to start listening on interfaces, before we return a +// list of addresses +const NODE_ADDRESS_ACQUIRE_DELAY_TIME: Duration = Duration::from_millis(100); + +/// Our network behaviour. +#[derive(NetworkBehaviour)] +struct Behaviour { + keep_alive: keep_alive::Behaviour, +} + +impl Behaviour { + fn new() -> Self { + Self { + keep_alive: keep_alive::Behaviour, + } + } +} + +pub async fn start_tiny_node() -> anyhow::Result { + // Create identity + let local_key = identity::Keypair::generate_ed25519(); + let local_peer_id = PeerId::from(local_key.public()); + log::debug!("local peer id: {local_peer_id:?}"); + + // Setup swarm + let transport = tcp::tokio::Transport::default() + .upgrade(Version::V1Lazy) + .authenticate(noise::Config::new(&local_key)?) + .multiplex(yamux::Config::default()) + .boxed(); + + let mut swarm = + SwarmBuilder::with_tokio_executor(transport, Behaviour::new(), local_peer_id).build(); + + swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; + + let (addr_tx, mut addr_rx) = mpsc::channel(32); + + tokio::task::spawn(async move { + loop { + if let Some(SwarmEvent::NewListenAddr { address, .. }) = swarm.next().await { + dbg!(&address); + if addr_tx.send(address).await.is_err() { + log::warn!("received new addr after set startup time, unittests might not have all the node addresses"); + } + } + } + }); + + // give node second to acquire addresses and then gather all the ones we received + sleep(NODE_ADDRESS_ACQUIRE_DELAY_TIME).await; + addr_rx.close(); + + let mut addrs = vec![]; + while let Some(addr) = addr_rx.recv().await { + addrs.push(addr); + } + + let addr = p2p::AddrInfo { + id: p2p::PeerId(local_peer_id), + addrs, + }; + log::debug!("Listening addresses: {addr:?}"); + + Ok(addr) +} diff --git a/types/Cargo.toml b/types/Cargo.toml index 8df7e9dd..4b4f38fc 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -12,9 +12,12 @@ celestia-proto = { workspace = true } const_format = "0.2.31" enum_dispatch = "0.3.12" cid = { version = "0.10.1", default-features = false, features = ["std"] } +libp2p-identity = { version = "0.2.3", optional = true } +multiaddr = { version = "0.18.0", optional = true } nmt-rs = { workspace = true } ruint = { version = "1.8.0", features = ["serde"] } serde = { version = "1.0.164", features = ["derive"] } +serde_repr = { version = "0.1", optional = true } sha2 = "0.10.7" tendermint = { workspace = true } tendermint-proto = { workspace = true } @@ -22,3 +25,7 @@ thiserror = "1.0.40" [dev-dependencies] serde_json = "1.0.97" + +[features] +default = ["p2p"] +p2p = ["dep:libp2p-identity", "dep:multiaddr", "dep:serde_repr"] diff --git a/types/src/block.rs b/types/src/block.rs index de0688f6..0b1703e0 100644 --- a/types/src/block.rs +++ b/types/src/block.rs @@ -270,7 +270,8 @@ mod tests { validator_address, timestamp, .. - } = commit.signatures[0].clone() else { + } = commit.signatures[0].clone() + else { unreachable!() }; commit.signatures[0] = CommitSig::BlockIdFlagCommit { diff --git a/types/src/lib.rs b/types/src/lib.rs index 82da6c84..c468f47c 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -7,6 +7,8 @@ mod error; mod extended_header; pub mod fraud_proof; pub mod nmt; +#[cfg(feature = "p2p")] +pub mod p2p; mod rsmt2d; mod share; pub mod state; diff --git a/types/src/p2p.rs b/types/src/p2p.rs new file mode 100644 index 00000000..0ce2f559 --- /dev/null +++ b/types/src/p2p.rs @@ -0,0 +1,65 @@ +use multiaddr::Multiaddr; +use serde::{Deserialize, Serialize}; +use serde_repr::{Deserialize_repr, Serialize_repr}; +use std::collections::HashMap; + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct AddrInfo { + #[serde(rename = "ID")] + pub id: PeerId, + pub addrs: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct Stat { + pub num_streams_inbound: u32, + pub num_streams_outbound: u32, + pub num_conns_inbound: u32, + pub num_conns_outbound: u32, + #[serde(rename = "NumFD")] + pub num_fd: u32, + pub memory: u32, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct ResourceManagerStats { + pub system: Stat, + pub transient: Stat, + pub services: HashMap, + pub protocols: HashMap, + pub peers: HashMap, +} + +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct PeerId( + #[serde(with = "tendermint_proto::serializers::from_str")] pub libp2p_identity::PeerId, +); + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct BandwidthStats { + pub total_in: f32, + pub total_out: f32, + pub rate_in: f32, + pub rate_out: f32, +} + +#[derive(Debug, PartialEq, Eq, Serialize_repr, Deserialize_repr)] +#[repr(u8)] +pub enum Connectedness { + NotConnected = 0, + Connected = 1, + CanConnect = 2, + CannotConnect = 3, +} + +#[derive(Debug, PartialEq, Eq, Serialize_repr, Deserialize_repr)] +#[repr(u8)] +pub enum Reachability { + Unknown = 0, + Public = 1, + Private = 2, +} diff --git a/types/src/share.rs b/types/src/share.rs index bcea953f..b752277f 100644 --- a/types/src/share.rs +++ b/types/src/share.rs @@ -273,7 +273,10 @@ mod tests { b64_decode("OBQQFb/BaYJ+fBd9qWCox8r2wzLXrzLddHN3BjWOllg=") ); - let nmt_rs::NamespaceProof::AbsenceProof { leaf: Some(leaf), .. } = &*proof else { + let nmt_rs::NamespaceProof::AbsenceProof { + leaf: Some(leaf), .. + } = &*proof + else { unreachable!(); };