diff options
| -rw-r--r-- | CHANGELOG.md | 3 | ||||
| -rw-r--r-- | Cargo.lock | 2 | ||||
| -rw-r--r-- | Cargo.toml | 2 | ||||
| -rw-r--r-- | src/main.rs | 6 | ||||
| -rw-r--r-- | src/stats_task.rs | 11 |
5 files changed, 12 insertions, 12 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ce859e..36e705b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,6 @@ +# v0.1.3-beta +- fix memory leak but actually + # v0.1.2-beta - use structured (json or syslog) logging to integrate nicely with log aggregation systems like Loki - fix exit signal in docker container @@ -212,7 +212,7 @@ dependencies = [ [[package]] name = "containerspy" -version = "0.1.2-beta" +version = "0.1.3-beta" dependencies = [ "anyhow", "bollard", @@ -1,7 +1,7 @@ [package] name = "containerspy" authors = ["Hazel Atkinson <yellowsink@riseup.net>"] -version = "0.1.2-beta" +version = "0.1.3-beta" edition = "2021" description = "A lightweight Docker OTLP stats exporter" license-file = "LICENSE.md" diff --git a/src/main.rs b/src/main.rs index 26b2a4b..8b01073 100644 --- a/src/main.rs +++ b/src/main.rs @@ -5,6 +5,7 @@ use opentelemetry_otlp::{MetricExporter, Protocol, WithExportConfig}; use opentelemetry_sdk::metrics::{PeriodicReader, SdkMeterProvider}; use std::env::args; use std::{collections::BTreeMap, sync::Arc, time::Duration}; +use opentelemetry::metrics::MeterProvider; use tokio::task::JoinHandle; use tokio::time::MissedTickBehavior; use tokio_util::sync::CancellationToken; @@ -97,6 +98,7 @@ async fn main() -> Result<()> { // connect the OTLP exporter let meter_provider = Arc::new(setup_otlp()?); + let meter = Arc::new(meter_provider.meter("cspy_worker")); // fetch-report loop with graceful shutdown let shutdown_token = CancellationToken::new(); @@ -153,7 +155,7 @@ async fn main() -> Result<()> { // all this string cloning hurts me tasks.insert( id_string.clone(), - stats_task::launch_stats_task(cont, docker.clone(), meter_provider.clone()), + stats_task::launch_stats_task(cont, docker.clone(), meter.clone()), ); } } @@ -165,7 +167,7 @@ async fn main() -> Result<()> { } debug("Exiting cleanly", []); - + let _ = meter_provider.force_flush(); Ok(()) diff --git a/src/stats_task.rs b/src/stats_task.rs index 8eb40e8..04e0ee9 100644 --- a/src/stats_task.rs +++ b/src/stats_task.rs @@ -1,9 +1,8 @@ use bollard::container::{BlkioStatsEntry, MemoryStatsStats, Stats, StatsOptions}; use bollard::models::ContainerSummary; use bollard::Docker; -use opentelemetry::metrics::MeterProvider; -use opentelemetry::{InstrumentationScope, KeyValue}; -use opentelemetry_sdk::metrics::SdkMeterProvider; +use opentelemetry::metrics::Meter; +use opentelemetry::KeyValue; use std::mem::MaybeUninit; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; @@ -15,7 +14,7 @@ use crate::s_log::*; pub fn launch_stats_task( container: ContainerSummary, docker: Arc<Docker>, - meter_provider: Arc<SdkMeterProvider>, + meter: Arc<Meter> ) -> JoinHandle<()> { tokio::spawn(async move { // extract some container info @@ -27,8 +26,6 @@ pub fn launch_stats_task( .next() .map(|n| n.trim_start_matches("/").to_owned()); - let meter_name = "cspy_".to_string() + container_id.as_str(); - let mut stats_stream = docker.stats( container_id.as_str(), Some(StatsOptions { @@ -118,8 +115,6 @@ pub fn launch_stats_task( //println!("Starting reporting for container: {shared_labels:?}"); // create meters - let meter = meter_provider.meter_with_scope(InstrumentationScope::builder(meter_name).build()); - let meter_container_cpu_usage_seconds_total = meter .f64_counter("container_cpu_usage_seconds_total") .with_unit("s") |
