diff options
| author | Hazel Atkinson <yellowsink@riseup.net> | 2025-04-05 17:19:38 +0100 |
|---|---|---|
| committer | Hazel Atkinson <yellowsink@riseup.net> | 2025-04-05 17:19:38 +0100 |
| commit | 729817b8e8d778c7e25b8141d8812d3fa046e3fa (patch) | |
| tree | cab1236c3aca811457811bd8f467071086432f43 | |
| parent | 9423b0788e386bc663cda5ddc4ce27c1021e6fef (diff) | |
| download | containerspy-729817b8e8d778c7e25b8141d8812d3fa046e3fa.tar.gz containerspy-729817b8e8d778c7e25b8141d8812d3fa046e3fa.tar.bz2 containerspy-729817b8e8d778c7e25b8141d8812d3fa046e3fa.zip | |
working cpu_usage_seconds_total implementation :tada:
| -rw-r--r-- | src/main.rs | 37 |
1 files changed, 31 insertions, 6 deletions
diff --git a/src/main.rs b/src/main.rs index 1af5cba..08f7b48 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,6 +2,8 @@ use std::{collections::BTreeMap, sync::Arc, time::Duration}; use anyhow::Result; use bollard::{container::StatsOptions, Docker}; +use bollard::models::ContainerSummary; +use opentelemetry::KeyValue; use config::CONFIG; use opentelemetry::metrics::MeterProvider; use opentelemetry_otlp::{MetricExporter, Protocol, WithExportConfig}; @@ -99,11 +101,11 @@ async fn main() -> Result<()> { } // now, add any new ones - for cont in &containers { + for cont in containers { let id_string = cont.id.as_ref().unwrap(); if !tasks.contains_key(id_string) { // all this string cloning hurts me - tasks.insert(id_string.clone(), launch_stats_task(id_string.clone(), docker.clone(), meter_provider.clone())); + tasks.insert(id_string.clone(), launch_stats_task(cont, docker.clone(), meter_provider.clone())); } } } @@ -119,8 +121,12 @@ async fn main() -> Result<()> { } // I do not enjoy taking a bunch of Rcs but tokio needs ownership so fine. -fn launch_stats_task(container_id: String, docker: Arc<Docker>, meter_provider: Arc<SdkMeterProvider>) -> JoinHandle<()> { +fn launch_stats_task(container: ContainerSummary, docker: Arc<Docker>, meter_provider: Arc<SdkMeterProvider>) -> JoinHandle<()> { tokio::spawn(async move { + // extract some container info + let container_id = container.id.unwrap(); + let container_name = container.names.iter().flatten().next().map(|n| n.trim_start_matches("/").to_owned()); + let mut stats_stream = docker.stats(container_id.as_str(), Some(StatsOptions { stream: true, @@ -139,9 +145,28 @@ fn launch_stats_task(container_id: String, docker: Arc<Docker>, meter_provider: } } - // container labels - let labels = []; + // container labels shared for all metrics + let mut shared_labels = vec![ + KeyValue::new("id", container_id.to_owned()), + KeyValue::new("image", container.image.unwrap_or(container.image_id.unwrap())) + ]; + + if let Some(name) = container_name { + shared_labels.push(KeyValue::new("name", name)); + } + + if let Some(docker_labels) = &container.labels { + for (key, value) in docker_labels { + shared_labels.push(KeyValue::new("container_label_".to_string() + key, value.clone())) + } + } + + // free space and make mutable + shared_labels.shrink_to_fit(); + let shared_labels = &shared_labels[..]; + //println!("Starting reporting for container: {shared_labels:?}"); + // create meters let meter_container_cpu_usage_seconds_total = meter_provider.meter("test_meter").f64_counter("container_cpu_usage_seconds_total").with_unit("s").with_description("Cumulative cpu time consumed").build(); @@ -149,7 +174,7 @@ fn launch_stats_task(container_id: String, docker: Arc<Docker>, meter_provider: if let Ok(stats) = val { meter_container_cpu_usage_seconds_total.add( Duration::from_nanos(stats.cpu_stats.cpu_usage.total_usage - stats.precpu_stats.cpu_usage.total_usage).as_secs_f64(), - &labels); + shared_labels); } else { // failed to get stats, log as such: |
