use std::collections::BTreeMap;
use std::path::PathBuf;
use anyhow::Context;
use erased_serde::Serialize;
use fedimint_client::db::{ClientConfigKey, OperationLogKeyPrefix};
use fedimint_client::module::init::ClientModuleInitRegistry;
use fedimint_client::oplog::OperationLogEntry;
use fedimint_core::config::{ClientConfig, CommonModuleInitRegistry, ServerModuleInitRegistry};
use fedimint_core::core::ModuleKind;
use fedimint_core::db::{
Database, DatabaseTransaction, DatabaseVersionKey, IDatabaseTransactionOpsCore,
IDatabaseTransactionOpsCoreTyped,
};
use fedimint_core::encoding::Encodable;
use fedimint_core::module::registry::{ModuleDecoderRegistry, ModuleRegistry};
use fedimint_core::push_db_pair_items;
use fedimint_rocksdb::RocksDbReadOnly;
use fedimint_server::config::io::read_server_config;
use fedimint_server::config::ServerConfig;
use fedimint_server::consensus::db as ConsensusRange;
use fedimint_server::net::api::announcement::ApiAnnouncementPrefix;
use futures::StreamExt;
use ln_gateway::Gateway;
use strum::IntoEnumIterator;
macro_rules! push_db_pair_items_no_serde {
($dbtx:ident, $prefix_type:expr, $key_type:ty, $value_type:ty, $map:ident, $key_literal:literal) => {
let db_items = IDatabaseTransactionOpsCoreTyped::find_by_prefix($dbtx, &$prefix_type)
.await
.map(|(key, val)| {
(
Encodable::consensus_encode_to_hex(&key),
SerdeWrapper::from_encodable(&val),
)
})
.collect::<BTreeMap<_, _>>()
.await;
$map.insert($key_literal.to_string(), Box::new(db_items));
};
}
#[derive(Debug, serde::Serialize)]
struct SerdeWrapper(#[serde(with = "hex::serde")] Vec<u8>);
impl SerdeWrapper {
fn from_encodable<T: Encodable>(e: &T) -> SerdeWrapper {
SerdeWrapper(e.consensus_encode_to_vec())
}
}
pub struct DatabaseDump {
serialized: BTreeMap<String, Box<dyn Serialize>>,
read_only_db: Database,
modules: Vec<String>,
prefixes: Vec<String>,
server_cfg: Option<ServerConfig>,
module_inits: ServerModuleInitRegistry,
client_cfg: Option<ClientConfig>,
client_module_inits: ClientModuleInitRegistry,
}
impl DatabaseDump {
pub async fn new(
cfg_dir: PathBuf,
data_dir: String,
password: String,
module_inits: ServerModuleInitRegistry,
client_module_inits: ClientModuleInitRegistry,
modules: Vec<String>,
prefixes: Vec<String>,
) -> anyhow::Result<DatabaseDump> {
let Ok(read_only_rocks_db) = RocksDbReadOnly::open_read_only(data_dir.clone()) else {
panic!("Error reading RocksDB database. Quitting...");
};
let read_only_db = Database::new(read_only_rocks_db, ModuleRegistry::default());
let (server_cfg, client_cfg, decoders) = if let Ok(cfg) =
read_server_config(&password, &cfg_dir).context("Failed to read server config")
{
let decoders = module_inits
.available_decoders(cfg.iter_module_instances())
.unwrap()
.with_fallback();
(Some(cfg), None, decoders)
} else {
let mut dbtx = read_only_db.begin_transaction_nc().await;
let client_cfg_or = dbtx.get_value(&ClientConfigKey).await;
if let Some(client_cfg) = client_cfg_or {
let kinds = client_cfg.modules.iter().map(|(k, v)| (*k, &v.kind));
let decoders = client_module_inits
.available_decoders(kinds)
.unwrap()
.with_fallback();
let client_cfg = client_cfg.redecode_raw(&decoders)?;
(None, Some(client_cfg), decoders)
} else {
(None, None, ModuleDecoderRegistry::default())
}
};
Ok(DatabaseDump {
serialized: BTreeMap::new(),
read_only_db: read_only_db.with_decoders(decoders),
modules,
prefixes,
server_cfg,
module_inits,
client_module_inits,
client_cfg,
})
}
}
impl DatabaseDump {
fn print_database(&self) {
let json = serde_json::to_string_pretty(&self.serialized).unwrap();
println!("{json}");
}
async fn serialize_module(
&mut self,
module_id: &u16,
kind: &ModuleKind,
inits: CommonModuleInitRegistry,
) -> anyhow::Result<()> {
if !self.modules.is_empty() && !self.modules.contains(&kind.to_string()) {
return Ok(());
}
let mut dbtx = self.read_only_db.begin_transaction_nc().await;
let db_version = dbtx.get_value(&DatabaseVersionKey(*module_id)).await;
let mut isolated_dbtx = dbtx.to_ref_with_prefix_module_id(*module_id).0;
match inits.get(kind) {
None => {
tracing::warn!(module_id, %kind, "Detected configuration for unsupported module");
let mut module_serialized = BTreeMap::new();
let filtered_prefixes = (0u8..=255).filter(|f| {
self.prefixes.is_empty()
|| self.prefixes.contains(&f.to_string().to_lowercase())
});
let isolated_dbtx = &mut isolated_dbtx;
for prefix in filtered_prefixes {
let db_items = isolated_dbtx
.raw_find_by_prefix(&[prefix])
.await?
.map(|(k, v)| {
(
k.consensus_encode_to_hex(),
Box::new(v.consensus_encode_to_hex()),
)
})
.collect::<BTreeMap<String, Box<_>>>()
.await;
module_serialized.extend(db_items);
}
self.serialized
.insert(format!("{kind}-{module_id}"), Box::new(module_serialized));
}
Some(init) => {
let mut module_serialized = init
.dump_database(&mut isolated_dbtx.to_ref_nc(), self.prefixes.clone())
.await
.collect::<BTreeMap<String, _>>();
if let Some(db_version) = db_version {
module_serialized.insert("Version".to_string(), Box::new(db_version));
} else {
module_serialized
.insert("Version".to_string(), Box::new("Not Specified".to_string()));
}
self.serialized
.insert(format!("{kind}-{module_id}"), Box::new(module_serialized));
}
}
Ok(())
}
async fn serialize_gateway(&mut self) -> anyhow::Result<()> {
let mut dbtx = self.read_only_db.begin_transaction_nc().await;
let gateway_serialized = Gateway::dump_database(&mut dbtx, self.prefixes.clone()).await;
self.serialized
.insert("gateway".to_string(), Box::new(gateway_serialized));
Ok(())
}
pub async fn dump_database(&mut self) -> anyhow::Result<()> {
if let Some(cfg) = self.server_cfg.clone() {
if self.modules.is_empty() || self.modules.contains(&"consensus".to_string()) {
self.retrieve_consensus_data().await;
}
for (module_id, module_cfg) in &cfg.consensus.modules {
let kind = &module_cfg.kind;
self.serialize_module(module_id, kind, self.module_inits.to_common())
.await?;
}
self.print_database();
return Ok(());
}
if let Some(cfg) = self.client_cfg.clone() {
self.serialized
.insert("Client Config".into(), Box::new(cfg.to_json()));
for (module_id, module_cfg) in &cfg.modules {
let kind = &module_cfg.kind;
let mut modules = Vec::new();
if let Some(module) = self.client_module_inits.get(kind) {
modules.push(module.to_dyn_common());
}
let registry = CommonModuleInitRegistry::from(modules);
self.serialize_module(module_id, kind, registry).await?;
}
{
let mut dbtx = self.read_only_db.begin_transaction_nc().await;
Self::write_serialized_client_operation_log(&mut self.serialized, &mut dbtx).await;
}
self.print_database();
return Ok(());
}
self.serialize_gateway().await?;
self.print_database();
Ok(())
}
async fn retrieve_consensus_data(&mut self) {
let filtered_prefixes = ConsensusRange::DbKeyPrefix::iter().filter(|prefix| {
self.prefixes.is_empty() || self.prefixes.contains(&prefix.to_string().to_lowercase())
});
let mut dbtx = self.read_only_db.begin_transaction_nc().await;
let mut consensus: BTreeMap<String, Box<dyn Serialize>> = BTreeMap::new();
for table in filtered_prefixes {
Self::write_serialized_consensus_range(table, &mut dbtx, &mut consensus).await;
}
self.serialized
.insert("Consensus".to_string(), Box::new(consensus));
}
async fn write_serialized_consensus_range(
table: ConsensusRange::DbKeyPrefix,
dbtx: &mut DatabaseTransaction<'_>,
consensus: &mut BTreeMap<String, Box<dyn Serialize>>,
) {
match table {
ConsensusRange::DbKeyPrefix::AcceptedItem => {
push_db_pair_items_no_serde!(
dbtx,
ConsensusRange::AcceptedItemPrefix,
ConsensusRange::AcceptedItemKey,
fedimint_server::consensus::AcceptedItem,
consensus,
"Accepted Items"
);
}
ConsensusRange::DbKeyPrefix::AcceptedTransaction => {
push_db_pair_items_no_serde!(
dbtx,
ConsensusRange::AcceptedTransactionKeyPrefix,
ConsensusRange::AcceptedTransactionKey,
fedimint_server::consensus::AcceptedTransaction,
consensus,
"Accepted Transactions"
);
}
ConsensusRange::DbKeyPrefix::SignedSessionOutcome => {
push_db_pair_items_no_serde!(
dbtx,
ConsensusRange::SignedSessionOutcomePrefix,
ConsensusRange::SignedBlockKey,
fedimint_server::consensus::SignedBlock,
consensus,
"Signed Blocks"
);
}
ConsensusRange::DbKeyPrefix::AlephUnits => {
push_db_pair_items_no_serde!(
dbtx,
ConsensusRange::AlephUnitsPrefix,
ConsensusRange::AlephUnitsKey,
Vec<u8>,
consensus,
"Aleph Units"
);
}
ConsensusRange::DbKeyPrefix::Module => {}
ConsensusRange::DbKeyPrefix::ApiAnnouncements => {
push_db_pair_items_no_serde!(
dbtx,
ApiAnnouncementPrefix,
ApiAnnouncementKey,
fedimint_core::net::api_announcement::SignedApiAnnouncement,
consensus,
"API Announcements"
);
}
}
}
async fn write_serialized_client_operation_log(
serialized: &mut BTreeMap<String, Box<dyn Serialize>>,
dbtx: &mut DatabaseTransaction<'_>,
) {
push_db_pair_items!(
dbtx,
OperationLogKeyPrefix,
OperationLogKey,
OperationLogEntry,
serialized,
"Operations"
);
}
}