mithril_signer/services/chain_data/
importer_with_vacuum.rs1use std::sync::Arc;
2
3use async_trait::async_trait;
4use slog::{Logger, debug};
5
6use mithril_cardano_node_chain::chain_importer::ChainDataImporter;
7use mithril_common::StdResult;
8use mithril_common::entities::BlockNumber;
9use mithril_common::logging::LoggerExtensions;
10use mithril_persistence::sqlite::{SqliteCleaner, SqliteCleaningTask, SqliteConnectionPool};
11
12pub struct ChainDataImporterWithVacuum {
14 connection_pool: Arc<SqliteConnectionPool>,
15 wrapped_importer: Arc<dyn ChainDataImporter>,
16 logger: Logger,
17}
18
19impl ChainDataImporterWithVacuum {
20 pub fn new(
22 connection_pool: Arc<SqliteConnectionPool>,
23 wrapped_importer: Arc<dyn ChainDataImporter>,
24 logger: Logger,
25 ) -> Self {
26 Self {
27 connection_pool,
28 wrapped_importer,
29 logger: logger.new_with_component_name::<Self>(),
30 }
31 }
32}
33
34#[async_trait]
35impl ChainDataImporter for ChainDataImporterWithVacuum {
36 async fn import(&self, up_to_beacon: BlockNumber) -> StdResult<()> {
37 self.wrapped_importer.import(up_to_beacon).await?;
38
39 debug!(
40 self.logger,
41 "Chain data Import finished - Vacuuming database to reclaim disk space"
42 );
43 let connection = self.connection_pool.connection()?;
44
45 SqliteCleaner::new(&connection)
46 .with_tasks(&[SqliteCleaningTask::Vacuum])
47 .run()?;
48
49 Ok(())
50 }
51}
52
53#[cfg(test)]
54mod tests {
55 use mockall::mock;
56
57 use mithril_common::test::TempDir;
58 use mithril_persistence::sqlite::SqliteConnection;
59
60 use crate::database::test_helper::cardano_tx_db_connection_builder;
61 use crate::test::TestLogger;
62
63 use super::*;
64
65 mock! {
66 pub ChainDataImporter {}
67
68 #[async_trait]
69 impl ChainDataImporter for ChainDataImporter {
70 async fn import(&self, up_to_beacon: BlockNumber) -> StdResult<()>;
71 }
72 }
73
74 impl ChainDataImporterWithVacuum {
75 pub(crate) fn new_with_mock<I>(
76 connection_pool: Arc<SqliteConnectionPool>,
77 importer_mock_config: I,
78 ) -> Self
79 where
80 I: FnOnce(&mut MockChainDataImporter),
81 {
82 let mut chain_data_importer = MockChainDataImporter::new();
83 importer_mock_config(&mut chain_data_importer);
84
85 Self::new(
86 connection_pool,
87 Arc::new(chain_data_importer),
88 TestLogger::stdout(),
89 )
90 }
91 }
92
93 fn mangle_db(connection: &SqliteConnection) {
95 connection
96 .execute("CREATE TABLE test (id INTEGER PRIMARY KEY, text TEXT);")
97 .unwrap();
98 connection
99 .execute(format!(
100 "INSERT INTO test (id, text) VALUES {}",
101 (0..10_000)
102 .map(|i| format!("({i}, 'some text to fill the db')"))
103 .collect::<Vec<String>>()
104 .join(", ")
105 ))
106 .unwrap();
107 connection.execute("DROP TABLE test").unwrap();
108 }
109
110 #[tokio::test]
111 async fn test_database_size_shrink_after_import() {
112 let db_path = TempDir::create("mithril-persistence", "test_vacuum").join("test.db");
113 let pool = Arc::new(cardano_tx_db_connection_builder(&db_path).build_pool(1).unwrap());
114 let importer = ChainDataImporterWithVacuum::new_with_mock(pool.clone(), |mock| {
115 mock.expect_import().once().returning(|_| Ok(()));
116 });
117
118 pool.connection()
120 .unwrap()
121 .execute("pragma auto_vacuum = none; vacuum;")
122 .unwrap();
123
124 let initial_size = db_path.metadata().unwrap().len();
125
126 mangle_db(&pool.connection().unwrap());
128 let mangled_size = db_path.metadata().unwrap().len();
129 assert!(
130 mangled_size > initial_size,
131 "Database size did not grow after mangling"
132 );
133
134 importer
135 .import(BlockNumber(100))
136 .await
137 .expect("Import should not fail");
138
139 let after_import_size = db_path.metadata().unwrap().len();
140
141 assert!(
142 mangled_size > after_import_size,
143 "Database size did not shrink after import: \
144 initial_size: {mangled_size} -> after_import_size: {after_import_size}"
145 );
146 }
147}