mithril_aggregator/database/repository/
immutable_file_digest_repository.rs1use std::collections::BTreeMap;
2use std::sync::Arc;
3
4use async_trait::async_trait;
5
6use mithril_cardano_node_internal_database::digesters::cache::{
7 CacheProviderResult, ImmutableDigesterCacheGetError, ImmutableDigesterCacheStoreError,
8 ImmutableFileDigestCacheProvider,
9};
10use mithril_cardano_node_internal_database::entities::ImmutableFile;
11use mithril_common::StdResult;
12use mithril_common::entities::{HexEncodedDigest, ImmutableFileName};
13use mithril_persistence::sqlite::{ConnectionExtensions, SqliteConnection};
14
15use crate::ImmutableFileDigestMapper;
16use crate::database::query::{
17 DeleteImmutableFileDigestQuery, GetImmutableFileDigestQuery, UpsertImmutableFileDigestQuery,
18};
19use crate::database::record::ImmutableFileDigestRecord;
20
21pub struct ImmutableFileDigestRepository {
23 connection: Arc<SqliteConnection>,
24}
25
26impl ImmutableFileDigestRepository {
27 pub fn new(connection: Arc<SqliteConnection>) -> Self {
29 Self { connection }
30 }
31
32 pub async fn get_immutable_file_digest(
34 &self,
35 immutable_file_name: &ImmutableFileName,
36 ) -> StdResult<Option<ImmutableFileDigestRecord>> {
37 self.connection
38 .fetch_first(GetImmutableFileDigestQuery::by_immutable_file_name(
39 immutable_file_name,
40 )?)
41 }
42
43 pub async fn get_all_immutable_file_digest(&self) -> StdResult<Vec<ImmutableFileDigestRecord>> {
45 self.connection.fetch_collect(GetImmutableFileDigestQuery::all())
46 }
47
48 pub async fn upsert_immutable_file_digest(
50 &self,
51 immutable_file_name: &ImmutableFileName,
52 digest: &str,
53 ) -> StdResult<ImmutableFileDigestRecord> {
54 let message = self.connection.fetch_first(UpsertImmutableFileDigestQuery::one(
55 immutable_file_name,
56 digest,
57 )?)?;
58
59 message
60 .ok_or_else(|| panic!("Upserting an immutable_file_digest should not return nothing."))
61 }
62
63 pub async fn delete_all(&self) -> StdResult<()> {
65 self.connection.apply(DeleteImmutableFileDigestQuery::all())?;
66
67 Ok(())
68 }
69}
70
71#[async_trait]
72impl ImmutableFileDigestCacheProvider for ImmutableFileDigestRepository {
73 async fn store(
74 &self,
75 digest_per_filenames: Vec<(ImmutableFileName, HexEncodedDigest)>,
76 ) -> CacheProviderResult<()> {
77 for (filename, digest) in digest_per_filenames {
78 self.upsert_immutable_file_digest(&filename, &digest)
79 .await
80 .map_err(ImmutableDigesterCacheStoreError::StoreError)?;
81 }
82
83 Ok(())
84 }
85
86 async fn get(
87 &self,
88 immutables: Vec<ImmutableFile>,
89 ) -> CacheProviderResult<BTreeMap<ImmutableFile, Option<HexEncodedDigest>>> {
90 let mut result = BTreeMap::new();
91 for immutable in immutables {
92 let immutable_file_digest = self
93 .get_immutable_file_digest(&immutable.filename)
94 .await
95 .map_err(ImmutableDigesterCacheGetError::StoreError)?;
96
97 result.insert(immutable, immutable_file_digest.map(|f| f.digest));
98 }
99
100 Ok(result)
101 }
102
103 async fn reset(&self) -> CacheProviderResult<()> {
104 self.delete_all()
105 .await
106 .map_err(ImmutableDigesterCacheGetError::StoreError)?;
107
108 Ok(())
109 }
110}
111
112#[async_trait]
113impl ImmutableFileDigestMapper for ImmutableFileDigestRepository {
114 async fn get_immutable_file_digest_map(
115 &self,
116 ) -> StdResult<BTreeMap<ImmutableFileName, HexEncodedDigest>> {
117 let immutable_file_digest_map = BTreeMap::from_iter(
118 self.get_all_immutable_file_digest()
119 .await?
120 .into_iter()
121 .map(|record| (record.immutable_file_name, record.digest)),
122 );
123
124 Ok(immutable_file_digest_map)
125 }
126}
127
128#[cfg(test)]
129mod tests {
130
131 use crate::database::test_helper::main_db_connection;
132
133 use super::*;
134
135 async fn get_connection() -> Arc<SqliteConnection> {
136 let connection = main_db_connection().unwrap();
137
138 Arc::new(connection)
139 }
140
141 mod repository {
142 use mithril_common::test_utils::assert_equivalent;
143
144 use super::*;
145
146 #[tokio::test]
147 async fn repository_get_immutable_file_digest() {
148 let repository = ImmutableFileDigestRepository::new(get_connection().await);
149 let immutable_file_name: ImmutableFileName = "123.chunk".to_string();
150 let digest = "digest-123";
151
152 let immutable_file_digest_result = repository
153 .get_immutable_file_digest(&immutable_file_name)
154 .await
155 .unwrap();
156 assert_eq!(None, immutable_file_digest_result);
157
158 repository
159 .upsert_immutable_file_digest(&immutable_file_name, digest)
160 .await
161 .unwrap();
162 let immutable_file_digest_result = repository
163 .get_immutable_file_digest(&immutable_file_name)
164 .await
165 .unwrap();
166 assert_eq!(
167 Some(ImmutableFileDigestRecord {
168 immutable_file_name,
169 digest: digest.to_string()
170 }),
171 immutable_file_digest_result
172 );
173 }
174
175 #[tokio::test]
176 async fn repository_get_all_immutable_file_digests() {
177 let repository = ImmutableFileDigestRepository::new(get_connection().await);
178
179 let all_immutable_file_digests =
180 repository.get_all_immutable_file_digest().await.unwrap();
181 assert!(all_immutable_file_digests.is_empty());
182
183 repository
184 .upsert_immutable_file_digest(&"123.chunk".to_string(), "digest-123")
185 .await
186 .unwrap();
187 repository
188 .upsert_immutable_file_digest(&"456.chunk".to_string(), "digest-456")
189 .await
190 .unwrap();
191 let all_immutable_file_digests =
192 repository.get_all_immutable_file_digest().await.unwrap();
193
194 assert_equivalent(
195 vec![
196 ImmutableFileDigestRecord {
197 immutable_file_name: "123.chunk".to_string(),
198 digest: "digest-123".to_string(),
199 },
200 ImmutableFileDigestRecord {
201 immutable_file_name: "456.chunk".to_string(),
202 digest: "digest-456".to_string(),
203 },
204 ],
205 all_immutable_file_digests,
206 );
207 }
208
209 #[tokio::test]
210 async fn repository_upsert_immutable_file_digest() {
211 let repository = ImmutableFileDigestRepository::new(get_connection().await);
212 let immutable_file_name: ImmutableFileName = "123.chunk".to_string();
213 let digest = "digest-123";
214 let digest_updated = "digest-456";
215
216 repository
217 .upsert_immutable_file_digest(&immutable_file_name, digest)
218 .await
219 .unwrap();
220 let immutable_file_digest = repository
221 .get_immutable_file_digest(&immutable_file_name)
222 .await
223 .unwrap()
224 .unwrap();
225 assert_eq!(immutable_file_digest.digest, digest);
226
227 repository
228 .upsert_immutable_file_digest(&immutable_file_name, digest_updated)
229 .await
230 .unwrap();
231 let immutable_file_digest = repository
232 .get_immutable_file_digest(&immutable_file_name)
233 .await
234 .unwrap()
235 .unwrap();
236 assert_eq!(immutable_file_digest.digest, digest_updated);
237 }
238
239 #[tokio::test]
240 async fn repository_delete_all_immutable_file_digests() {
241 let repository = ImmutableFileDigestRepository::new(get_connection().await);
242
243 repository
244 .upsert_immutable_file_digest(&"123.chunk".to_string(), "digest-123")
245 .await
246 .unwrap();
247 repository
248 .upsert_immutable_file_digest(&"456.chunk".to_string(), "digest-456")
249 .await
250 .unwrap();
251 let all_immutable_file_digests =
252 repository.get_all_immutable_file_digest().await.unwrap();
253 assert_eq!(2, all_immutable_file_digests.len());
254
255 repository.delete_all().await.unwrap();
256
257 let all_immutable_file_digests =
258 repository.get_all_immutable_file_digest().await.unwrap();
259 assert!(all_immutable_file_digests.is_empty());
260 }
261 }
262
263 mod cache_provider {
264 use std::path::PathBuf;
265
266 use mithril_cardano_node_internal_database::test::fake_data;
267
268 use super::*;
269
270 #[tokio::test]
271 async fn can_store_values() {
272 let provider = ImmutableFileDigestRepository::new(get_connection().await);
273 let values_to_store = vec![
274 ("0.chunk".to_string(), "digest 0".to_string()),
275 ("1.chunk".to_string(), "digest 1".to_string()),
276 ];
277 let expected: BTreeMap<_, _> = BTreeMap::from([
278 (
279 fake_data::immutable_file(PathBuf::default(), 0, "0.chunk"),
280 Some("digest 0".to_string()),
281 ),
282 (
283 fake_data::immutable_file(PathBuf::default(), 1, "1.chunk"),
284 Some("digest 1".to_string()),
285 ),
286 ]);
287 let immutables = expected.keys().cloned().collect();
288
289 provider
290 .store(values_to_store)
291 .await
292 .expect("Cache write should not fail");
293 let result = provider.get(immutables).await.expect("Cache read should not fail");
294
295 assert_eq!(expected, result);
296 }
297
298 #[tokio::test]
299 async fn returns_only_asked_immutables_cache() {
300 let provider = ImmutableFileDigestRepository::new(get_connection().await);
301 provider
302 .store(vec![
303 ("0.chunk".to_string(), "digest 0".to_string()),
304 ("1.chunk".to_string(), "digest 1".to_string()),
305 ])
306 .await
307 .expect("Cache write should not fail");
308 let expected: BTreeMap<_, _> = BTreeMap::from([(
309 fake_data::immutable_file(PathBuf::default(), 0, "0.chunk"),
310 Some("digest 0".to_string()),
311 )]);
312 let immutables = expected.keys().cloned().collect();
313
314 let result = provider.get(immutables).await.expect("Cache read should not fail");
315
316 assert_eq!(expected, result);
317 }
318
319 #[tokio::test]
320 async fn returns_none_for_uncached_asked_immutables() {
321 let provider = ImmutableFileDigestRepository::new(get_connection().await);
322 let expected: BTreeMap<_, _> = BTreeMap::from([(
323 fake_data::immutable_file(PathBuf::default(), 2, "2.chunk"),
324 None,
325 )]);
326 let immutables = expected.keys().cloned().collect();
327
328 let result = provider.get(immutables).await.expect("Cache read should not fail");
329
330 assert_eq!(expected, result);
331 }
332
333 #[tokio::test]
334 async fn store_erase_existing_values() {
335 let provider = ImmutableFileDigestRepository::new(get_connection().await);
336 provider
337 .store(vec![
338 ("0.chunk".to_string(), "to erase".to_string()),
339 ("1.chunk".to_string(), "keep me".to_string()),
340 ("2.chunk".to_string(), "keep me too".to_string()),
341 ])
342 .await
343 .expect("Cache write should not fail");
344 let values_to_store = vec![
345 ("0.chunk".to_string(), "updated".to_string()),
346 ("1.chunk".to_string(), "keep me".to_string()),
347 ];
348 let expected: BTreeMap<_, _> = BTreeMap::from([
349 (
350 fake_data::immutable_file(PathBuf::default(), 0, "0.chunk"),
351 Some("updated".to_string()),
352 ),
353 (
354 fake_data::immutable_file(PathBuf::default(), 1, "1.chunk"),
355 Some("keep me".to_string()),
356 ),
357 (
358 fake_data::immutable_file(PathBuf::default(), 2, "2.chunk"),
359 Some("keep me too".to_string()),
360 ),
361 (
362 fake_data::immutable_file(PathBuf::default(), 3, "3.chunk"),
363 None,
364 ),
365 ]);
366 let immutables = expected.keys().cloned().collect();
367
368 provider
369 .store(values_to_store)
370 .await
371 .expect("Cache write should not fail");
372 let result = provider.get(immutables).await.expect("Cache read should not fail");
373
374 assert_eq!(expected, result);
375 }
376
377 #[tokio::test]
378 async fn reset_clear_existing_values() {
379 let provider = ImmutableFileDigestRepository::new(get_connection().await);
380 let values_to_store = vec![
381 ("0.chunk".to_string(), "digest 0".to_string()),
382 ("1.chunk".to_string(), "digest 1".to_string()),
383 ];
384 let expected: BTreeMap<_, _> = BTreeMap::from([
385 (
386 fake_data::immutable_file(PathBuf::default(), 0, "0.chunk"),
387 Some("digest 0".to_string()),
388 ),
389 (
390 fake_data::immutable_file(PathBuf::default(), 1, "1.chunk"),
391 Some("digest 1".to_string()),
392 ),
393 ]);
394 let immutables = expected.keys().cloned().collect();
395
396 provider
397 .store(values_to_store)
398 .await
399 .expect("Cache write should not fail");
400 provider.reset().await.expect("reset should not fails");
401
402 let result: BTreeMap<_, _> =
403 provider.get(immutables).await.expect("Cache read should not fail");
404
405 assert!(result.into_iter().all(|(_, cache)| cache.is_none()));
406 }
407 }
408
409 #[cfg(test)]
410 mod digest_mapper {
411
412 use mithril_cardano_node_internal_database::digesters::cache::ImmutableFileDigestCacheProvider;
413
414 use super::*;
415
416 #[tokio::test]
417 async fn get_immutable_file_digest_map() {
418 let provider = ImmutableFileDigestRepository::new(get_connection().await);
419 let immutable_file_digest_records = vec![
420 ("0.chunk".to_string(), "digest 0".to_string()),
421 ("1.chunk".to_string(), "digest 1".to_string()),
422 ("2.chunk".to_string(), "digest 2".to_string()),
423 ];
424 let expected_immutable_file_digest_map =
425 BTreeMap::from_iter(immutable_file_digest_records.clone().into_iter());
426 provider
427 .store(immutable_file_digest_records)
428 .await
429 .expect("Cache write should not fail");
430
431 let immutable_file_digest_map = provider.get_immutable_file_digest_map().await.unwrap();
432
433 assert_eq!(
434 expected_immutable_file_digest_map,
435 immutable_file_digest_map
436 );
437 }
438 }
439}