Skip to content

Commit 384b887

Browse files
committed
feat(metrics): [#1589] add Avg (average) aggregate function
Implements a new aggregate function for calculating averages of metric samples that match specific label criteria, complementing the existing Sum aggregation. - **metrics/src/metric/aggregate/avg.rs**: New metric-level average trait and implementations - `Avg` trait with `avg()` method for calculating averages - Implementation for `Metric<Counter>` returning `f64` - Implementation for `Metric<Gauge>` returning `f64` - Comprehensive unit tests with edge cases (empty samples, large values, etc.) - **metrics/src/metric_collection/aggregate/avg.rs**: New collection-level average trait - `Avg` trait for `MetricCollection` and `MetricKindCollection<T>` - Delegates to metric-level implementations - Handles mixed counter/gauge collections by trying counters first, then gauges - Returns `None` for non-existent metrics - Comprehensive test suite covering various scenarios - **metrics/src/metric/aggregate/mod.rs**: Export new `avg` module - **metrics/src/metric_collection/aggregate/mod.rs**: Export new `avg` module - **metrics/README.md**: Add example usage of the new `Avg` trait in the aggregation section - **Type Safety**: Returns appropriate types (`f64` for both counters and gauges) - **Label Filtering**: Supports filtering samples by label criteria like existing `Sum` - **Edge Case Handling**: Returns `0.0` for empty sample sets - **Performance**: Uses iterator chains for efficient sample processing - **Comprehensive Testing**: 205 tests pass including new avg functionality ```rust use torrust_tracker_metrics::metric_collection::aggregate::Avg; // Calculate average of all matching samples let avg_value = metrics.avg(&metric_name, &label_criteria); ``` The implementation follows the same patterns as the existing `Sum` aggregate function, ensuring consistency in the codebase and maintaining the same level of type safety and performance characteristics.
1 parent ed5f1e6 commit 384b887

File tree

5 files changed

+532
-1
lines changed

5 files changed

+532
-1
lines changed

packages/metrics/README.md

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ println!("{}", prometheus_output);
6767
### Metric Aggregation
6868

6969
```rust
70-
use torrust_tracker_metrics::metric_collection::aggregate::Sum;
70+
use torrust_tracker_metrics::metric_collection::aggregate::{Sum, Avg};
7171

7272
// Sum all counter values matching specific labels
7373
let total_requests = metrics.sum(
@@ -76,6 +76,14 @@ let total_requests = metrics.sum(
7676
);
7777

7878
println!("Total requests: {:?}", total_requests);
79+
80+
// Calculate average of gauge values matching specific labels
81+
let avg_response_time = metrics.avg(
82+
&metric_name!("response_time_seconds"),
83+
&[("endpoint", "/announce")].into(),
84+
);
85+
86+
println!("Average response time: {:?}", avg_response_time);
7987
```
8088

8189
## Architecture
Lines changed: 307 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,307 @@
1+
use crate::counter::Counter;
2+
use crate::gauge::Gauge;
3+
use crate::label::LabelSet;
4+
use crate::metric::Metric;
5+
6+
pub trait Avg {
7+
type Output;
8+
fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output;
9+
}
10+
11+
impl Avg for Metric<Counter> {
12+
type Output = f64;
13+
14+
fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output {
15+
let matching_samples: Vec<_> = self
16+
.sample_collection
17+
.iter()
18+
.filter(|(label_set, _measurement)| label_set.matches(label_set_criteria))
19+
.collect();
20+
21+
if matching_samples.is_empty() {
22+
return 0.0;
23+
}
24+
25+
let sum: u64 = matching_samples
26+
.iter()
27+
.map(|(_label_set, measurement)| measurement.value().primitive())
28+
.sum();
29+
30+
#[allow(clippy::cast_precision_loss)]
31+
(sum as f64 / matching_samples.len() as f64)
32+
}
33+
}
34+
35+
impl Avg for Metric<Gauge> {
36+
type Output = f64;
37+
38+
fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output {
39+
let matching_samples: Vec<_> = self
40+
.sample_collection
41+
.iter()
42+
.filter(|(label_set, _measurement)| label_set.matches(label_set_criteria))
43+
.collect();
44+
45+
if matching_samples.is_empty() {
46+
return 0.0;
47+
}
48+
49+
let sum: f64 = matching_samples
50+
.iter()
51+
.map(|(_label_set, measurement)| measurement.value().primitive())
52+
.sum();
53+
54+
#[allow(clippy::cast_precision_loss)]
55+
(sum / matching_samples.len() as f64)
56+
}
57+
}
58+
59+
#[cfg(test)]
60+
mod tests {
61+
62+
use torrust_tracker_primitives::DurationSinceUnixEpoch;
63+
64+
use crate::counter::Counter;
65+
use crate::gauge::Gauge;
66+
use crate::label::LabelSet;
67+
use crate::metric::aggregate::avg::Avg;
68+
use crate::metric::{Metric, MetricName};
69+
use crate::metric_name;
70+
use crate::sample::Sample;
71+
use crate::sample_collection::SampleCollection;
72+
73+
struct MetricBuilder<T> {
74+
sample_time: DurationSinceUnixEpoch,
75+
name: MetricName,
76+
samples: Vec<Sample<T>>,
77+
}
78+
79+
impl<T> Default for MetricBuilder<T> {
80+
fn default() -> Self {
81+
Self {
82+
sample_time: DurationSinceUnixEpoch::from_secs(1_743_552_000),
83+
name: metric_name!("test_metric"),
84+
samples: vec![],
85+
}
86+
}
87+
}
88+
89+
impl<T> MetricBuilder<T> {
90+
fn with_sample(mut self, value: T, label_set: &LabelSet) -> Self {
91+
let sample = Sample::new(value, self.sample_time, label_set.clone());
92+
self.samples.push(sample);
93+
self
94+
}
95+
96+
fn build(self) -> Metric<T> {
97+
Metric::new(
98+
self.name,
99+
None,
100+
None,
101+
SampleCollection::new(self.samples).expect("invalid samples"),
102+
)
103+
}
104+
}
105+
106+
fn counter_cases() -> Vec<(Metric<Counter>, LabelSet, f64)> {
107+
// (metric, label set criteria, expected_average_value)
108+
vec![
109+
// Metric with one sample without label set
110+
(
111+
MetricBuilder::default().with_sample(1.into(), &LabelSet::empty()).build(),
112+
LabelSet::empty(),
113+
1.0,
114+
),
115+
// Metric with one sample with a label set
116+
(
117+
MetricBuilder::default()
118+
.with_sample(1.into(), &[("l1", "l1_value")].into())
119+
.build(),
120+
[("l1", "l1_value")].into(),
121+
1.0,
122+
),
123+
// Metric with two samples, different label sets, average all
124+
(
125+
MetricBuilder::default()
126+
.with_sample(1.into(), &[("l1", "l1_value")].into())
127+
.with_sample(3.into(), &[("l2", "l2_value")].into())
128+
.build(),
129+
LabelSet::empty(),
130+
2.0, // (1 + 3) / 2 = 2.0
131+
),
132+
// Metric with two samples, different label sets, average one
133+
(
134+
MetricBuilder::default()
135+
.with_sample(1.into(), &[("l1", "l1_value")].into())
136+
.with_sample(2.into(), &[("l2", "l2_value")].into())
137+
.build(),
138+
[("l1", "l1_value")].into(),
139+
1.0,
140+
),
141+
// Metric with three samples, same label key, different label values, average by key
142+
(
143+
MetricBuilder::default()
144+
.with_sample(2.into(), &[("l1", "l1_value"), ("la", "la_value")].into())
145+
.with_sample(4.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into())
146+
.with_sample(6.into(), &[("l1", "l1_value"), ("lc", "lc_value")].into())
147+
.build(),
148+
[("l1", "l1_value")].into(),
149+
4.0, // (2 + 4 + 6) / 3 = 4.0
150+
),
151+
// Metric with two samples, different label values, average by subkey
152+
(
153+
MetricBuilder::default()
154+
.with_sample(5.into(), &[("l1", "l1_value"), ("la", "la_value")].into())
155+
.with_sample(7.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into())
156+
.build(),
157+
[("la", "la_value")].into(),
158+
5.0,
159+
),
160+
// Edge: Metric with no samples at all
161+
(MetricBuilder::default().build(), LabelSet::empty(), 0.0),
162+
// Edge: Metric with samples but no matching labels
163+
(
164+
MetricBuilder::default()
165+
.with_sample(5.into(), &[("foo", "bar")].into())
166+
.build(),
167+
[("not", "present")].into(),
168+
0.0,
169+
),
170+
// Edge: Metric with zero value
171+
(
172+
MetricBuilder::default()
173+
.with_sample(0.into(), &[("l3", "l3_value")].into())
174+
.build(),
175+
[("l3", "l3_value")].into(),
176+
0.0,
177+
),
178+
// Edge: Metric with a very large value
179+
(
180+
MetricBuilder::default()
181+
.with_sample((u64::MAX / 2).into(), &[("edge", "large1")].into())
182+
.with_sample((u64::MAX / 2).into(), &[("edge", "large2")].into())
183+
.build(),
184+
LabelSet::empty(),
185+
#[allow(clippy::cast_precision_loss)]
186+
(u64::MAX as f64 / 2.0), // Average of (max/2) and (max/2)
187+
),
188+
]
189+
}
190+
191+
fn gauge_cases() -> Vec<(Metric<Gauge>, LabelSet, f64)> {
192+
// (metric, label set criteria, expected_average_value)
193+
vec![
194+
// Metric with one sample without label set
195+
(
196+
MetricBuilder::default().with_sample(1.0.into(), &LabelSet::empty()).build(),
197+
LabelSet::empty(),
198+
1.0,
199+
),
200+
// Metric with one sample with a label set
201+
(
202+
MetricBuilder::default()
203+
.with_sample(1.0.into(), &[("l1", "l1_value")].into())
204+
.build(),
205+
[("l1", "l1_value")].into(),
206+
1.0,
207+
),
208+
// Metric with two samples, different label sets, average all
209+
(
210+
MetricBuilder::default()
211+
.with_sample(1.0.into(), &[("l1", "l1_value")].into())
212+
.with_sample(3.0.into(), &[("l2", "l2_value")].into())
213+
.build(),
214+
LabelSet::empty(),
215+
2.0, // (1.0 + 3.0) / 2 = 2.0
216+
),
217+
// Metric with two samples, different label sets, average one
218+
(
219+
MetricBuilder::default()
220+
.with_sample(1.0.into(), &[("l1", "l1_value")].into())
221+
.with_sample(2.0.into(), &[("l2", "l2_value")].into())
222+
.build(),
223+
[("l1", "l1_value")].into(),
224+
1.0,
225+
),
226+
// Metric with three samples, same label key, different label values, average by key
227+
(
228+
MetricBuilder::default()
229+
.with_sample(2.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into())
230+
.with_sample(4.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into())
231+
.with_sample(6.0.into(), &[("l1", "l1_value"), ("lc", "lc_value")].into())
232+
.build(),
233+
[("l1", "l1_value")].into(),
234+
4.0, // (2.0 + 4.0 + 6.0) / 3 = 4.0
235+
),
236+
// Metric with two samples, different label values, average by subkey
237+
(
238+
MetricBuilder::default()
239+
.with_sample(5.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into())
240+
.with_sample(7.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into())
241+
.build(),
242+
[("la", "la_value")].into(),
243+
5.0,
244+
),
245+
// Edge: Metric with no samples at all
246+
(MetricBuilder::default().build(), LabelSet::empty(), 0.0),
247+
// Edge: Metric with samples but no matching labels
248+
(
249+
MetricBuilder::default()
250+
.with_sample(5.0.into(), &[("foo", "bar")].into())
251+
.build(),
252+
[("not", "present")].into(),
253+
0.0,
254+
),
255+
// Edge: Metric with zero value
256+
(
257+
MetricBuilder::default()
258+
.with_sample(0.0.into(), &[("l3", "l3_value")].into())
259+
.build(),
260+
[("l3", "l3_value")].into(),
261+
0.0,
262+
),
263+
// Edge: Metric with negative values
264+
(
265+
MetricBuilder::default()
266+
.with_sample((-2.0).into(), &[("l4", "l4_value")].into())
267+
.with_sample(4.0.into(), &[("l5", "l5_value")].into())
268+
.build(),
269+
LabelSet::empty(),
270+
1.0, // (-2.0 + 4.0) / 2 = 1.0
271+
),
272+
// Edge: Metric with decimal values
273+
(
274+
MetricBuilder::default()
275+
.with_sample(1.5.into(), &[("l6", "l6_value")].into())
276+
.with_sample(2.5.into(), &[("l7", "l7_value")].into())
277+
.build(),
278+
LabelSet::empty(),
279+
2.0, // (1.5 + 2.5) / 2 = 2.0
280+
),
281+
]
282+
}
283+
284+
#[test]
285+
fn test_counter_cases() {
286+
for (idx, (metric, criteria, expected_value)) in counter_cases().iter().enumerate() {
287+
let avg = metric.avg(criteria);
288+
289+
assert!(
290+
(avg - expected_value).abs() <= f64::EPSILON,
291+
"at case {idx}, expected avg to be {expected_value}, got {avg}"
292+
);
293+
}
294+
}
295+
296+
#[test]
297+
fn test_gauge_cases() {
298+
for (idx, (metric, criteria, expected_value)) in gauge_cases().iter().enumerate() {
299+
let avg = metric.avg(criteria);
300+
301+
assert!(
302+
(avg - expected_value).abs() <= f64::EPSILON,
303+
"at case {idx}, expected avg to be {expected_value}, got {avg}"
304+
);
305+
}
306+
}
307+
}
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
1+
pub mod avg;
12
pub mod sum;

0 commit comments

Comments
 (0)