diff options
| -rw-r--r-- | Cargo.lock | 9 | ||||
| -rw-r--r-- | Cargo.toml | 10 | ||||
| -rw-r--r-- | build.rs | 3 | ||||
| -rw-r--r-- | prometheus.yml | 0 | ||||
| -rw-r--r-- | src/config.rs | 25 | ||||
| -rw-r--r-- | src/help.txt | 9 | ||||
| -rw-r--r-- | src/main.rs | 34 | ||||
| -rw-r--r-- | src/stats_task.rs | 84 |
8 files changed, 120 insertions, 54 deletions
@@ -142,6 +142,12 @@ dependencies = [ ] [[package]] +name = "built" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ed6191a7e78c36abdb16ab65341eefd73d64d303fffccdbb00d51e4205967b" + +[[package]] name = "bumpalo" version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -206,10 +212,11 @@ dependencies = [ [[package]] name = "containerspy" -version = "0.1.0" +version = "0.1.1-beta" dependencies = [ "anyhow", "bollard", + "built", "confique", "opentelemetry", "opentelemetry-otlp", @@ -1,7 +1,12 @@ [package] name = "containerspy" -version = "0.1.0" +authors = ["Hazel Atkinson <yellowsink@riseup.net>"] +version = "0.1.1-beta" edition = "2021" +description = "A lightweight Docker OTLP stats exporter" +license-file = "LICENSE.md" +repository = "https://github.com/uwu/containerspy" +publish = false [dependencies] anyhow = "1.0.97" @@ -13,3 +18,6 @@ opentelemetry_sdk = { version = "0.29.0", features = ["metrics"] } tokio = { version = "1.44.1", features = ["macros", "signal"] } tokio-stream = "0.1.17" tokio-util = "0.7.14" + +[build-dependencies] +built = "0.7.7"
\ No newline at end of file diff --git a/build.rs b/build.rs new file mode 100644 index 0000000..2c9bc4f --- /dev/null +++ b/build.rs @@ -0,0 +1,3 @@ +fn main() { + built::write_built_file().expect("Failed to acquire build-time information") +} diff --git a/prometheus.yml b/prometheus.yml deleted file mode 100644 index e69de29..0000000 --- a/prometheus.yml +++ /dev/null diff --git a/src/config.rs b/src/config.rs index 212ca64..0cd9019 100644 --- a/src/config.rs +++ b/src/config.rs @@ -21,16 +21,14 @@ pub struct CspyConfig { pub static CONFIG: LazyLock<CspyConfig> = LazyLock::new(|| { let cfg_loc = std::env::var("CSPY_CONFIG"); - let cfg_loc = cfg_loc.as_deref().ok().unwrap_or("/etc/containerspy/config.json"); + let cfg_loc = cfg_loc + .as_deref() + .ok() + .unwrap_or("/etc/containerspy/config.json"); - CspyConfig::builder() - .env() - .file(cfg_loc) - .load() - .unwrap() + CspyConfig::builder().env().file(cfg_loc).load().unwrap() }); - /// deserialization boilerplate struct ProtoDeserVisitor; @@ -43,13 +41,18 @@ impl confique::serde::de::Visitor<'_> for ProtoDeserVisitor { } fn visit_str<E>(self, v: &str) -> std::result::Result<Self::Value, E> - where - E: confique::serde::de::Error, { - Ok(match v { + where + E: confique::serde::de::Error, + { + Ok(match v { "httpbinary" => Protocol::HttpBinary, "httpjson" => Protocol::HttpJson, "grpc" => Protocol::Grpc, - &_ => return Err(E::custom(format!("{v} is not a valid OTLP protocol, valid options are httpbinary, httpjson, or grpc."))) + &_ => { + return Err(E::custom(format!( + "{v} is not a valid OTLP protocol, valid options are httpbinary, httpjson, or grpc." + ))) + } }) } } diff --git a/src/help.txt b/src/help.txt new file mode 100644 index 0000000..6250e34 --- /dev/null +++ b/src/help.txt @@ -0,0 +1,9 @@ +Available config options (please see {{REPO_URL}} for more detailed config information): + +config file env var default + +N/A CSPY_CONFIG /etc/containerspy/config.json +docker_socket CSPY_DOCKER_SOCKET /var/run/docker.sock or //./pipe/docker_engine +otlp_protocol CSPY_OTLP_PROTO httpbinary +otlp_endpoint CSPY_OTLP_ENDPOINT localhost:4318 for HTTP, localhost:4317 for gRPC +otlp_export_interval CSPY_OTLP_INTERVAL 60 seconds
\ No newline at end of file diff --git a/src/main.rs b/src/main.rs index c703fc2..07ebeb0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,10 +1,10 @@ -use std::{collections::BTreeMap, sync::Arc, time::Duration}; - use anyhow::Result; use bollard::Docker; use config::CONFIG; use opentelemetry_otlp::{MetricExporter, Protocol, WithExportConfig}; use opentelemetry_sdk::metrics::{PeriodicReader, SdkMeterProvider}; +use std::env::args; +use std::{collections::BTreeMap, sync::Arc, time::Duration}; use tokio::task::JoinHandle; use tokio::time::MissedTickBehavior; use tokio_util::sync::CancellationToken; @@ -12,6 +12,11 @@ use tokio_util::sync::CancellationToken; mod config; mod stats_task; +// includes data from Cargo.toml and other sources using the `built` crate +pub mod built_info { + include!(concat!(env!("OUT_DIR"), "/built.rs")); +} + fn setup_otlp() -> Result<SdkMeterProvider> { let metric_exporter = match CONFIG.otlp_protocol { Protocol::HttpBinary | Protocol::HttpJson => { @@ -59,6 +64,28 @@ fn setup_otlp() -> Result<SdkMeterProvider> { #[tokio::main(flavor = "current_thread")] async fn main() -> Result<()> { + // handle CLI stuff + for arg in args() { + if ["--version", "--help"].contains(&arg.as_str()) { + println!( + "ContainerSpy v{}, made with love by {}", + built_info::PKG_VERSION, + built_info::PKG_AUTHORS.replace(":", ", ") + ); + + if arg == "--help" { + println!( + "\n{}", + include_str!("help.txt") + .trim_end() + .replace("{{REPO_URL}}", built_info::PKG_REPOSITORY) + ); + } + + return Ok(()); + } + } + // open a docker connection let docker = Arc::new(if let Some(path) = &CONFIG.docker_socket { Docker::connect_with_socket(path, 60, bollard::API_DEFAULT_VERSION)? @@ -80,7 +107,8 @@ async fn main() -> Result<()> { st2.cancel(); }); - let mut container_search_interval = tokio::time::interval(Duration::from_millis(CONFIG.otlp_export_interval.unwrap_or(6000)) / 2); + let mut container_search_interval = + tokio::time::interval(Duration::from_millis(CONFIG.otlp_export_interval.unwrap_or(6000)) / 2); container_search_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); let mut tasks: BTreeMap<String, JoinHandle<()>> = BTreeMap::new(); diff --git a/src/stats_task.rs b/src/stats_task.rs index 6b8eb21..34fa6fb 100644 --- a/src/stats_task.rs +++ b/src/stats_task.rs @@ -1,13 +1,11 @@ -use std::borrow::Cow; -use std::collections::HashMap; -use bollard::container::{BlkioStatsEntry, MemoryStatsStats, MemoryStatsStatsV1, Stats, StatsOptions}; +use bollard::container::{BlkioStatsEntry, MemoryStatsStats, Stats, StatsOptions}; use bollard::models::ContainerSummary; use bollard::Docker; use opentelemetry::metrics::MeterProvider; use opentelemetry::KeyValue; use opentelemetry_sdk::metrics::SdkMeterProvider; use std::mem::MaybeUninit; -use std::sync::{Arc, LazyLock, RwLock}; +use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::task::JoinHandle; use tokio_stream::StreamExt; @@ -51,17 +49,22 @@ pub fn launch_stats_task( Some(Ok(st)) => { first_read = MaybeUninit::new(st); break; - }, + } Some(Err(err)) => { // TODO: use json logging or syslog so loki can understand this lol println!("Failed to get stats for container {container_id}!: {err:?}"); - }, + } } } // I'm going to rust jail! let first_read = unsafe { first_read.assume_init() }; - let Stats { blkio_stats, networks: mut last_net_stats, memory_stats: mut last_mem_stats, .. } = first_read; + let Stats { + blkio_stats, + networks: mut last_net_stats, + memory_stats: mut last_mem_stats, + .. + } = first_read; let mut last_io_stats = blkio_stats.io_service_bytes_recursive; @@ -189,7 +192,9 @@ pub fn launch_stats_task( let meter_container_memory_usage_bytes = meter .u64_gauge("container_memory_usage_bytes") .with_unit("By") - .with_description("Current memory usage, including all memory regardless of when it was accessed") + .with_description( + "Current memory usage, including all memory regardless of when it was accessed", + ) .build(); let meter_container_memory_working_set_bytes = meter .u64_gauge("container_memory_working_set_bytes") @@ -233,7 +238,7 @@ pub fn launch_stats_task( let meter_container_network_transmit_packets_total = meter .u64_counter("container_network_transmit_packets_total") .with_description("Cumulative count of packets transmitted") - .build();; + .build(); let meter_container_start_time_seconds = meter .u64_gauge("container_start_time_seconds") @@ -352,18 +357,29 @@ pub fn launch_stats_task( // todo // i have no way to test cgroups v2 so only work on v1 - see readme for more info } else if let Some(MemoryStatsStats::V2(v2stats)) = stats.memory_stats.stats { - // container_memory_cache meter_container_memory_cache.record(v2stats.file, shared_labels); // container_memory_failures_total // need last if let Some(MemoryStatsStats::V2(last_v2)) = last_mem_stats.stats { - meter_container_memory_failures_total.add(v2stats.pgfault - last_v2.pgfault, labels_mem_container_min_c); - meter_container_memory_failures_total.add(v2stats.pgfault - last_v2.pgfault, labels_mem_container_min_h); - - meter_container_memory_failures_total.add(v2stats.pgmajfault - last_v2.pgmajfault, labels_mem_container_maj_c); - meter_container_memory_failures_total.add(v2stats.pgmajfault - last_v2.pgmajfault, labels_mem_container_maj_h); + meter_container_memory_failures_total.add( + v2stats.pgfault - last_v2.pgfault, + labels_mem_container_min_c, + ); + meter_container_memory_failures_total.add( + v2stats.pgfault - last_v2.pgfault, + labels_mem_container_min_h, + ); + + meter_container_memory_failures_total.add( + v2stats.pgmajfault - last_v2.pgmajfault, + labels_mem_container_maj_c, + ); + meter_container_memory_failures_total.add( + v2stats.pgmajfault - last_v2.pgmajfault, + labels_mem_container_maj_h, + ); } // container_memory_kernel_usage @@ -385,7 +401,8 @@ pub fn launch_stats_task( meter_container_memory_usage_bytes.record(all_usage, shared_labels); // container_memory_working_set_bytes - meter_container_memory_working_set_bytes.record(all_usage - v2stats.inactive_file, shared_labels); + meter_container_memory_working_set_bytes + .record(all_usage - v2stats.inactive_file, shared_labels); } } @@ -404,39 +421,31 @@ pub fn launch_stats_task( net_labels.push(KeyValue::new("interface", interface.clone())); let net_labels = &net_labels.into_boxed_slice()[..]; - meter_container_network_receive_bytes_total.add( - this_inter.rx_bytes - last_this_inter.rx_bytes, - net_labels - ); - meter_container_network_transmit_bytes_total.add( - this_inter.tx_bytes - last_this_inter.tx_bytes, - net_labels - ); + meter_container_network_receive_bytes_total + .add(this_inter.rx_bytes - last_this_inter.rx_bytes, net_labels); + meter_container_network_transmit_bytes_total + .add(this_inter.tx_bytes - last_this_inter.tx_bytes, net_labels); #[cfg(not(windows))] - meter_container_network_receive_errors_total.add( - this_inter.rx_errors - last_this_inter.rx_errors, - net_labels - ); + meter_container_network_receive_errors_total + .add(this_inter.rx_errors - last_this_inter.rx_errors, net_labels); #[cfg(not(windows))] - meter_container_network_transmit_errors_total.add( - this_inter.tx_errors - last_this_inter.tx_errors, - net_labels - ); + meter_container_network_transmit_errors_total + .add(this_inter.tx_errors - last_this_inter.tx_errors, net_labels); meter_container_network_receive_packets_dropped_total.add( this_inter.rx_dropped - last_this_inter.rx_dropped, - net_labels + net_labels, ); meter_container_network_transmit_packets_dropped_total.add( this_inter.tx_dropped - last_this_inter.tx_dropped, - net_labels + net_labels, ); meter_container_network_receive_packets_total.add( this_inter.rx_packets - last_this_inter.rx_packets, - net_labels + net_labels, ); meter_container_network_transmit_packets_total.add( this_inter.tx_packets - last_this_inter.tx_packets, - net_labels + net_labels, ); } } @@ -494,7 +503,6 @@ fn get_rw_totals<'a>(iter: impl IntoIterator<Item = &'a BlkioStatsEntry>) -> (u6 (read, write) } - // LMAO i built this entire string pool around the idea of needing &'static str but turns out i can just use owned strings // guuuh okay whatever that's fine i guess, i'll keep this around just in case i need it -- sink @@ -521,4 +529,4 @@ fn pool_kv(key: &str, val: &str) -> KeyValue { LABEL_POOL.write().unwrap().insert(cows, kv.clone()); kv } -}*/
\ No newline at end of file +}*/ |
