ln_gateway/
events.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};

use fedimint_client::ClientHandle;
use fedimint_core::core::ModuleKind;
use fedimint_core::util::{get_average, get_median};
use fedimint_core::Amount;
use fedimint_eventlog::{
    DBTransactionEventLogExt, Event, EventKind, EventLogId, PersistedLogEntry,
};
use fedimint_mint_client::event::{OOBNotesReissued, OOBNotesSpent};
use fedimint_wallet_client::events::{DepositConfirmed, WithdrawRequest};
use itertools::Itertools;

use crate::gateway_module_v2::events::{
    CompleteLightningPaymentSucceeded, IncomingPaymentFailed, IncomingPaymentStarted,
    IncomingPaymentSucceeded, OutgoingPaymentFailed, OutgoingPaymentStarted,
    OutgoingPaymentSucceeded,
};
use crate::rpc::PaymentStats;

pub const ALL_GATEWAY_EVENTS: [EventKind; 11] = [
    OutgoingPaymentStarted::KIND,
    OutgoingPaymentSucceeded::KIND,
    OutgoingPaymentFailed::KIND,
    IncomingPaymentStarted::KIND,
    IncomingPaymentSucceeded::KIND,
    IncomingPaymentFailed::KIND,
    CompleteLightningPaymentSucceeded::KIND,
    OOBNotesSpent::KIND,
    OOBNotesReissued::KIND,
    WithdrawRequest::KIND,
    DepositConfirmed::KIND,
];

/// Searches through the event log for all events that occurred within the
/// specified time bounds.
///
/// Because it is inefficient to search the log backwards, instead this function
/// traverses the log forwards, but in batches.
/// All events are appended to an array until the cutoff event where the
/// timestamp is greater than the start timestamp or the end of the log is hit.
pub async fn get_events_for_duration(
    client: &Arc<ClientHandle>,
    start: SystemTime,
    end: SystemTime,
) -> Vec<PersistedLogEntry> {
    const BATCH_SIZE: u64 = 10_000;

    let start_micros = start
        .duration_since(UNIX_EPOCH)
        .expect("before unix epoch")
        .as_micros() as u64;

    let end_micros = end
        .duration_since(UNIX_EPOCH)
        .expect("before unix epoch")
        .as_micros() as u64;

    let batch_end = {
        let mut dbtx = client.db().begin_transaction_nc().await;
        dbtx.get_next_event_log_id().await
    };

    let mut batch_start = batch_end.saturating_sub(BATCH_SIZE);

    // Find the "rough start" in the log by reading the log backwards in batches.
    // Once an event with a timestamp before our start time is found, then we start
    // traversing forward to find events that fall within our time bound.
    while batch_start != EventLogId::LOG_START {
        let batch = client.get_event_log(Some(batch_start), BATCH_SIZE).await;

        match batch.first() {
            Some(first_event) => {
                if first_event.timestamp < start_micros {
                    // Found the "rough start" where we can read forward
                    break;
                }
            }
            None => {
                return vec![];
            }
        }

        batch_start = batch_start.saturating_sub(BATCH_SIZE);
    }

    let mut all_events = Vec::new();
    loop {
        let batch = client.get_event_log(Some(batch_start), BATCH_SIZE).await;

        if batch.is_empty() {
            return all_events;
        }

        for event in batch {
            if event.timestamp < start_micros {
                continue;
            }

            if event.timestamp >= end_micros {
                return all_events;
            }

            all_events.push(event);
        }

        batch_start = batch_start.saturating_add(BATCH_SIZE);
    }
}

/// Filters the given `PersistedLogEntry` slice by the `EventKind` and
/// `ModuleKind`.
pub(crate) fn filter_events<'a, I>(
    all_events: I,
    event_kind: EventKind,
    module_kind: ModuleKind,
) -> impl Iterator<Item = &'a PersistedLogEntry> + 'a
where
    I: IntoIterator<Item = &'a PersistedLogEntry> + 'a,
{
    all_events.into_iter().filter(move |e| {
        if let Some((m, _)) = &e.module {
            e.event_kind == event_kind && *m == module_kind
        } else {
            false
        }
    })
}

/// Joins two sets of events on a predicate.
///
/// This function computes a "nested loop join" by first computing the cross
/// product of the start event vector and the success/failure event vectors. The
/// resulting cartesian product is then filtered according to the join predicate
/// supplied in the parameters.
///
/// This function is intended for small data sets. If the data set relations
/// grow, this function should implement a different join algorithm or be moved
/// out of the gateway.
pub(crate) fn join_events<'a, L, R, Res>(
    events_l: &'a [&PersistedLogEntry],
    events_r: &'a [&PersistedLogEntry],
    predicate: impl Fn(L, R, u64) -> Option<Res> + 'a,
) -> impl Iterator<Item = Res> + 'a
where
    L: Event,
    R: Event,
{
    events_l
        .iter()
        .cartesian_product(events_r)
        .filter_map(move |(l, r)| {
            if let Some(latency) = r.timestamp.checked_sub(l.timestamp) {
                let event_l: L =
                    serde_json::from_value(l.value.clone()).expect("could not parse JSON");
                let event_r: R =
                    serde_json::from_value(r.value.clone()).expect("could not parse JSON");
                predicate(event_l, event_r, latency)
            } else {
                None
            }
        })
}

/// Helper struct for storing computed data about outgoing and incoming
/// payments.
#[derive(Debug, Default)]
pub struct StructuredPaymentEvents {
    latencies: Vec<u64>,
    fees: Vec<Amount>,
    latencies_failure: Vec<u64>,
}

impl StructuredPaymentEvents {
    pub fn new(
        success_stats: &[(u64, Amount)],
        failure_stats: Vec<u64>,
    ) -> StructuredPaymentEvents {
        let mut events = StructuredPaymentEvents {
            latencies: success_stats.iter().map(|(l, _)| *l).collect(),
            fees: success_stats.iter().map(|(_, f)| *f).collect(),
            latencies_failure: failure_stats,
        };
        events.sort();
        events
    }

    /// Combines this `StructuredPaymentEvents` with the `other`
    /// `StructuredPaymentEvents` by appending all of the internal vectors.
    pub fn combine(&mut self, other: &mut StructuredPaymentEvents) {
        self.latencies.append(&mut other.latencies);
        self.fees.append(&mut other.fees);
        self.latencies_failure.append(&mut other.latencies_failure);
        self.sort();
    }

    /// Sorts this `StructuredPaymentEvents` by sorting all of the internal
    /// vectors.
    fn sort(&mut self) {
        self.latencies.sort_unstable();
        self.fees.sort_unstable();
        self.latencies_failure.sort_unstable();
    }

    /// Computes the payment statistics for the given input data.
    pub fn compute_payment_stats(&self) -> PaymentStats {
        PaymentStats {
            average_latency: get_average(&self.latencies).map(Duration::from_micros),
            median_latency: get_median(&self.latencies).map(Duration::from_micros),
            total_fees: Amount::from_msats(self.fees.iter().map(|a| a.msats).sum()),
            total_success: self.latencies.len(),
            total_failure: self.latencies_failure.len(),
        }
    }
}