mithril_aggregator/commands/
serve_command.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
use anyhow::{anyhow, Context};
use clap::Parser;
use config::{builder::DefaultState, ConfigBuilder, Map, Source, Value, ValueKind};
use mithril_common::StdResult;
use mithril_metric::MetricsServer;
use slog::{crit, debug, info, warn, Logger};
use std::time::Duration;
use std::{net::IpAddr, path::PathBuf};
use tokio::{sync::oneshot, task::JoinSet};

use crate::{dependency_injection::DependenciesBuilder, Configuration};

/// Server runtime mode
#[derive(Parser, Debug, Clone)]
pub struct ServeCommand {
    /// Server listening IP
    #[clap(long)]
    pub server_ip: Option<String>,

    /// Server TCP port
    #[clap(long)]
    pub server_port: Option<u16>,

    /// Directory to store snapshot
    /// Defaults to work folder
    #[clap(long)]
    pub snapshot_directory: Option<PathBuf>,

    /// Disable immutables digests cache.
    #[clap(long)]
    disable_digests_cache: bool,

    /// If set the existing immutables digests cache will be reset.
    ///
    /// Will be ignored if set in conjunction with `--disable-digests-cache`.
    #[clap(long)]
    reset_digests_cache: bool,

    /// If set no error is returned in case of unparsable block and an error log is written instead.
    ///
    /// Will be ignored on (pre)production networks.
    #[clap(long)]
    allow_unparsable_block: bool,

    /// Enable metrics HTTP server (Prometheus endpoint on /metrics).
    #[clap(long)]
    enable_metrics_server: bool,

    /// Metrics HTTP server IP.
    #[clap(long)]
    metrics_server_ip: Option<String>,

    /// Metrics HTTP server listening port.
    #[clap(long)]
    metrics_server_port: Option<u16>,
}

impl Source for ServeCommand {
    fn clone_into_box(&self) -> Box<dyn Source + Send + Sync> {
        Box::new(self.clone())
    }

    fn collect(&self) -> Result<Map<String, Value>, config::ConfigError> {
        let mut result = Map::new();
        let namespace = "clap arguments".to_string();

        if let Some(server_ip) = self.server_ip.clone() {
            result.insert(
                "server_ip".to_string(),
                Value::new(Some(&namespace), ValueKind::from(server_ip)),
            );
        }
        if let Some(server_port) = self.server_port {
            result.insert(
                "server_port".to_string(),
                Value::new(Some(&namespace), ValueKind::from(server_port)),
            );
        }
        if let Some(snapshot_directory) = self.snapshot_directory.clone() {
            result.insert(
                "snapshot_directory".to_string(),
                Value::new(
                    Some(&namespace),
                    ValueKind::from(format!("{}", snapshot_directory.to_string_lossy())),
                ),
            );
        }
        if self.disable_digests_cache {
            result.insert(
                "disable_digests_cache".to_string(),
                Value::new(Some(&namespace), ValueKind::from(true)),
            );
        };
        if self.reset_digests_cache {
            result.insert(
                "reset_digests_cache".to_string(),
                Value::new(Some(&namespace), ValueKind::from(true)),
            );
        }
        if self.allow_unparsable_block {
            result.insert(
                "allow_unparsable_block".to_string(),
                Value::new(Some(&namespace), ValueKind::from(true)),
            );
        };
        if self.enable_metrics_server {
            result.insert(
                "enable_metrics_server".to_string(),
                Value::new(Some(&namespace), ValueKind::from(true)),
            );
        };
        if let Some(metrics_server_ip) = self.metrics_server_ip.clone() {
            result.insert(
                "metrics_server_ip".to_string(),
                Value::new(Some(&namespace), ValueKind::from(metrics_server_ip)),
            );
        }
        if let Some(metrics_server_port) = self.metrics_server_port {
            result.insert(
                "metrics_server_port".to_string(),
                Value::new(Some(&namespace), ValueKind::from(metrics_server_port)),
            );
        }

        Ok(result)
    }
}

impl ServeCommand {
    pub async fn execute(
        &self,
        root_logger: Logger,
        mut config_builder: ConfigBuilder<DefaultState>,
    ) -> StdResult<()> {
        config_builder = config_builder.add_source(self.clone());
        let config: Configuration = config_builder
            .build()
            .with_context(|| "configuration build error")?
            .try_deserialize()
            .with_context(|| "configuration deserialize error")?;
        debug!(root_logger, "SERVE command"; "config" => format!("{config:?}"));
        let mut dependencies_builder =
            DependenciesBuilder::new(root_logger.clone(), config.clone());

        // start servers
        println!("Starting server...");
        println!("Press Ctrl+C to stop");

        // start the monitoring thread
        let mut event_store = dependencies_builder
            .create_event_store()
            .await
            .with_context(|| "Dependencies Builder can not create event store")?;
        let event_store_thread = tokio::spawn(async move { event_store.run().await.unwrap() });

        // start the aggregator runtime
        let mut runtime = dependencies_builder
            .create_aggregator_runner()
            .await
            .with_context(|| "Dependencies Builder can not create aggregator runner")?;
        let mut join_set = JoinSet::new();
        join_set.spawn(async move { runtime.run().await.map_err(|e| e.to_string()) });

        // start the cardano transactions preloader
        let cardano_transactions_preloader = dependencies_builder
            .create_cardano_transactions_preloader()
            .await
            .with_context(|| {
                "Dependencies Builder can not create cardano transactions preloader"
            })?;
        let preload_task =
            tokio::spawn(async move { cardano_transactions_preloader.preload().await });

        // start the HTTP server
        let (shutdown_tx, shutdown_rx) = oneshot::channel();
        let routes = dependencies_builder
            .create_http_routes()
            .await
            .with_context(|| "Dependencies Builder can not create http routes")?;
        join_set.spawn(async move {
            let (_, server) = warp::serve(routes).bind_with_graceful_shutdown(
                (
                    config.server_ip.clone().parse::<IpAddr>().unwrap(),
                    config.server_port,
                ),
                async {
                    shutdown_rx.await.ok();
                },
            );
            server.await;

            Ok(())
        });

        // Create a SignersImporter only if the `cexplorer_pools_url` is provided in the config.
        if let Some(cexplorer_pools_url) = config.cexplorer_pools_url {
            match dependencies_builder
                .create_signer_importer(&cexplorer_pools_url)
                .await
            {
                Ok(service) => {
                    join_set.spawn(async move {
                        // Wait 5s to let the other services the time to start before running
                        // the first import.
                        tokio::time::sleep(Duration::from_secs(5)).await;
                        service
                            .run_forever(Duration::from_secs(
                                // Import interval are in minutes
                                config.signer_importer_run_interval * 60,
                            ))
                            .await;
                        Ok(())
                    });
                }
                Err(error) => {
                    warn!(
                        root_logger, "Failed to build the `SignersImporter`";
                        "url_to_import" => cexplorer_pools_url,
                        "error" => ?error
                    );
                }
            }
        }

        let mut usage_reporter = dependencies_builder
            .create_usage_reporter()
            .await
            .with_context(|| "Dependencies Builder can not create usage reporter")?;
        join_set.spawn(async move {
            let interval_duration =
                Duration::from_secs(config.persist_usage_report_interval_in_seconds);
            usage_reporter.run_forever(interval_duration).await;
            Ok(())
        });

        let metrics_service = dependencies_builder
            .get_metrics_service()
            .await
            .with_context(|| "Metrics service initialization error")?;
        let (metrics_server_shutdown_tx, metrics_server_shutdown_rx) = oneshot::channel();
        if config.enable_metrics_server {
            let metrics_logger = root_logger.clone();
            join_set.spawn(async move {
                let _ = MetricsServer::new(
                    &config.metrics_server_ip,
                    config.metrics_server_port,
                    metrics_service,
                    metrics_logger.clone(),
                )
                .start(metrics_server_shutdown_rx)
                .await
                .map_err(|e| anyhow!(e));

                Ok(())
            });
        }

        join_set.spawn(async { tokio::signal::ctrl_c().await.map_err(|e| e.to_string()) });
        dependencies_builder.vanish().await;

        if let Err(e) = join_set.join_next().await.unwrap()? {
            crit!(root_logger, "A critical error occurred"; "error" => e);
        }

        metrics_server_shutdown_tx
            .send(())
            .map_err(|e| anyhow!("Metrics server shutdown signal could not be sent: {e:?}"))?;

        // stop servers
        join_set.shutdown().await;
        let _ = shutdown_tx.send(());

        if !preload_task.is_finished() {
            preload_task.abort();
        }

        info!(root_logger, "Event store is finishing...");
        event_store_thread.await.unwrap();
        println!("Services stopped, exiting.");

        Ok(())
    }
}