* [pbs-devel] [PATCH proxmox v5 1/2] proxmox-async: add udp::connect() helper
2022-02-02 9:50 [pbs-devel] [PATCH proxmox/proxmox-backup v5] add metrics server capability Dominik Csapak
@ 2022-02-02 9:50 ` Dominik Csapak
2022-02-02 12:22 ` [pbs-devel] applied: " Wolfgang Bumiller
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox v5 2/2] proxmox-metrics: implement metrics server client code Dominik Csapak
` (8 subsequent siblings)
9 siblings, 1 reply; 13+ messages in thread
From: Dominik Csapak @ 2022-02-02 9:50 UTC (permalink / raw)
To: pbs-devel
so that we do not have to always check the target ipaddr family manually
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
proxmox-async/Cargo.toml | 2 +-
proxmox-async/src/io/mod.rs | 2 ++
proxmox-async/src/io/udp.rs | 36 ++++++++++++++++++++++++++++++++++++
3 files changed, 39 insertions(+), 1 deletion(-)
create mode 100644 proxmox-async/src/io/udp.rs
diff --git a/proxmox-async/Cargo.toml b/proxmox-async/Cargo.toml
index 9e38303..c1a41f1 100644
--- a/proxmox-async/Cargo.toml
+++ b/proxmox-async/Cargo.toml
@@ -17,7 +17,7 @@ flate2 = "1.0"
futures = "0.3"
lazy_static = "1.4"
pin-utils = "0.1.0"
-tokio = { version = "1.0", features = ["fs", "rt", "rt-multi-thread", "sync"] }
+tokio = { version = "1.0", features = ["fs", "net", "rt", "rt-multi-thread", "sync"] }
walkdir = "2"
proxmox-sys = { path = "../proxmox-sys", version = "0.2.0" }
diff --git a/proxmox-async/src/io/mod.rs b/proxmox-async/src/io/mod.rs
index 9a6d8a6..32081cf 100644
--- a/proxmox-async/src/io/mod.rs
+++ b/proxmox-async/src/io/mod.rs
@@ -2,3 +2,5 @@
mod async_channel_writer;
pub use async_channel_writer::AsyncChannelWriter;
+
+pub mod udp;
diff --git a/proxmox-async/src/io/udp.rs b/proxmox-async/src/io/udp.rs
new file mode 100644
index 0000000..a517869
--- /dev/null
+++ b/proxmox-async/src/io/udp.rs
@@ -0,0 +1,36 @@
+use std::io;
+use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr};
+
+use tokio::net::{ToSocketAddrs, UdpSocket};
+
+/// Helper to connect to UDP addresses without having to manually bind to the correct ip address
+pub async fn connect<A: ToSocketAddrs>(addr: A) -> io::Result<UdpSocket> {
+ let mut last_err = None;
+ for address in tokio::net::lookup_host(&addr).await? {
+ let bind_address = match address {
+ SocketAddr::V4(_) => SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0),
+ SocketAddr::V6(_) => SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0),
+ };
+ let socket = match UdpSocket::bind(bind_address).await {
+ Ok(sock) => sock,
+ Err(err) => {
+ last_err = Some(err);
+ continue;
+ }
+ };
+ match socket.connect(address).await {
+ Ok(()) => return Ok(socket),
+ Err(err) => {
+ last_err = Some(err);
+ continue;
+ }
+ }
+ }
+
+ Err(last_err.unwrap_or_else(|| {
+ io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "could not resolve to any addresses",
+ )
+ }))
+}
--
2.30.2
^ permalink raw reply [flat|nested] 13+ messages in thread
* [pbs-devel] applied: [PATCH proxmox v5 1/2] proxmox-async: add udp::connect() helper
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox v5 1/2] proxmox-async: add udp::connect() helper Dominik Csapak
@ 2022-02-02 12:22 ` Wolfgang Bumiller
0 siblings, 0 replies; 13+ messages in thread
From: Wolfgang Bumiller @ 2022-02-02 12:22 UTC (permalink / raw)
To: Dominik Csapak; +Cc: pbs-devel
applied but moved from `io::udp` to `net::udp`.
On Wed, Feb 02, 2022 at 10:50:10AM +0100, Dominik Csapak wrote:
> so that we do not have to always check the target ipaddr family manually
>
> Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
> ---
> proxmox-async/Cargo.toml | 2 +-
> proxmox-async/src/io/mod.rs | 2 ++
> proxmox-async/src/io/udp.rs | 36 ++++++++++++++++++++++++++++++++++++
> 3 files changed, 39 insertions(+), 1 deletion(-)
> create mode 100644 proxmox-async/src/io/udp.rs
>
> diff --git a/proxmox-async/Cargo.toml b/proxmox-async/Cargo.toml
> index 9e38303..c1a41f1 100644
> --- a/proxmox-async/Cargo.toml
> +++ b/proxmox-async/Cargo.toml
> @@ -17,7 +17,7 @@ flate2 = "1.0"
> futures = "0.3"
> lazy_static = "1.4"
> pin-utils = "0.1.0"
> -tokio = { version = "1.0", features = ["fs", "rt", "rt-multi-thread", "sync"] }
> +tokio = { version = "1.0", features = ["fs", "net", "rt", "rt-multi-thread", "sync"] }
> walkdir = "2"
>
> proxmox-sys = { path = "../proxmox-sys", version = "0.2.0" }
> diff --git a/proxmox-async/src/io/mod.rs b/proxmox-async/src/io/mod.rs
> index 9a6d8a6..32081cf 100644
> --- a/proxmox-async/src/io/mod.rs
> +++ b/proxmox-async/src/io/mod.rs
> @@ -2,3 +2,5 @@
>
> mod async_channel_writer;
> pub use async_channel_writer::AsyncChannelWriter;
> +
> +pub mod udp;
> diff --git a/proxmox-async/src/io/udp.rs b/proxmox-async/src/io/udp.rs
> new file mode 100644
> index 0000000..a517869
> --- /dev/null
> +++ b/proxmox-async/src/io/udp.rs
> @@ -0,0 +1,36 @@
> +use std::io;
> +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr};
> +
> +use tokio::net::{ToSocketAddrs, UdpSocket};
> +
> +/// Helper to connect to UDP addresses without having to manually bind to the correct ip address
> +pub async fn connect<A: ToSocketAddrs>(addr: A) -> io::Result<UdpSocket> {
> + let mut last_err = None;
> + for address in tokio::net::lookup_host(&addr).await? {
> + let bind_address = match address {
> + SocketAddr::V4(_) => SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0),
> + SocketAddr::V6(_) => SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0),
> + };
> + let socket = match UdpSocket::bind(bind_address).await {
> + Ok(sock) => sock,
> + Err(err) => {
> + last_err = Some(err);
> + continue;
> + }
> + };
> + match socket.connect(address).await {
> + Ok(()) => return Ok(socket),
> + Err(err) => {
> + last_err = Some(err);
> + continue;
> + }
> + }
> + }
> +
> + Err(last_err.unwrap_or_else(|| {
> + io::Error::new(
> + io::ErrorKind::InvalidInput,
> + "could not resolve to any addresses",
> + )
> + }))
> +}
> --
> 2.30.2
^ permalink raw reply [flat|nested] 13+ messages in thread
* [pbs-devel] [PATCH proxmox v5 2/2] proxmox-metrics: implement metrics server client code
2022-02-02 9:50 [pbs-devel] [PATCH proxmox/proxmox-backup v5] add metrics server capability Dominik Csapak
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox v5 1/2] proxmox-async: add udp::connect() helper Dominik Csapak
@ 2022-02-02 9:50 ` Dominik Csapak
2022-02-02 12:25 ` [pbs-devel] applied: " Wolfgang Bumiller
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 1/8] use 'fs_info' from proxmox-sys Dominik Csapak
` (7 subsequent siblings)
9 siblings, 1 reply; 13+ messages in thread
From: Dominik Csapak @ 2022-02-02 9:50 UTC (permalink / raw)
To: pbs-devel
influxdb (udp + http(s)) only for now
general architecture looks as follows:
the helper functions influxdb_http/udp start a tokio task and return
a Metrics struct, that can be used to send data and wait for the tokio
task. if the struct is dropped, the task is canceled.
so it would look like this:
let metrics = influxdb_http(..params..)?;
metrics.send_data(...).await?;
metrics.send_data(...).await?;
metrics.join?;
on join, the sending part of the channel will be dropped and thus
flushing the remaining data to the server
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
Cargo.toml | 1 +
proxmox-metrics/Cargo.toml | 21 +++
proxmox-metrics/debian/changelog | 5 +
proxmox-metrics/debian/copyright | 16 +++
proxmox-metrics/debian/debcargo.toml | 7 +
proxmox-metrics/src/influxdb/http.rs | 185 ++++++++++++++++++++++++++
proxmox-metrics/src/influxdb/mod.rs | 7 +
proxmox-metrics/src/influxdb/udp.rs | 86 ++++++++++++
proxmox-metrics/src/influxdb/utils.rs | 50 +++++++
proxmox-metrics/src/lib.rs | 117 ++++++++++++++++
10 files changed, 495 insertions(+)
create mode 100644 proxmox-metrics/Cargo.toml
create mode 100644 proxmox-metrics/debian/changelog
create mode 100644 proxmox-metrics/debian/copyright
create mode 100644 proxmox-metrics/debian/debcargo.toml
create mode 100644 proxmox-metrics/src/influxdb/http.rs
create mode 100644 proxmox-metrics/src/influxdb/mod.rs
create mode 100644 proxmox-metrics/src/influxdb/udp.rs
create mode 100644 proxmox-metrics/src/influxdb/utils.rs
create mode 100644 proxmox-metrics/src/lib.rs
diff --git a/Cargo.toml b/Cargo.toml
index 8f85e08..4a458d2 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -6,6 +6,7 @@ members = [
"proxmox-http",
"proxmox-io",
"proxmox-lang",
+ "proxmox-metrics",
"proxmox-router",
"proxmox-schema",
"proxmox-serde",
diff --git a/proxmox-metrics/Cargo.toml b/proxmox-metrics/Cargo.toml
new file mode 100644
index 0000000..4f0b8e3
--- /dev/null
+++ b/proxmox-metrics/Cargo.toml
@@ -0,0 +1,21 @@
+[package]
+name = "proxmox-metrics"
+version = "0.1.0"
+authors = ["Proxmox Support Team <support@proxmox.com>"]
+edition = "2018"
+license = "AGPL-3"
+description = "Metrics Server export utilitites"
+
+exclude = [ "debian" ]
+
+[dependencies]
+anyhow = "1.0"
+tokio = { version = "1.0", features = [ "net", "sync" ] }
+futures = "0.3"
+serde = "1.0"
+serde_json = "1.0"
+http = "0.2"
+hyper = "0.14"
+openssl = "0.10"
+proxmox-http = { path = "../proxmox-http", features = [ "client" ], version = "0.6" }
+proxmox-async = { path = "../proxmox-async", features = [], version = "0.3" }
diff --git a/proxmox-metrics/debian/changelog b/proxmox-metrics/debian/changelog
new file mode 100644
index 0000000..c02803b
--- /dev/null
+++ b/proxmox-metrics/debian/changelog
@@ -0,0 +1,5 @@
+rust-proxmox-metrics (0.1.0-1) unstable; urgency=medium
+
+ * initial package
+
+ -- Proxmox Support Team <support@proxmox.com> Tue, 14 Dec 2021 08:56:54 +0100
diff --git a/proxmox-metrics/debian/copyright b/proxmox-metrics/debian/copyright
new file mode 100644
index 0000000..5661ef6
--- /dev/null
+++ b/proxmox-metrics/debian/copyright
@@ -0,0 +1,16 @@
+Copyright (C) 2021 Proxmox Server Solutions GmbH
+
+This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/proxmox-metrics/debian/debcargo.toml b/proxmox-metrics/debian/debcargo.toml
new file mode 100644
index 0000000..b7864cd
--- /dev/null
+++ b/proxmox-metrics/debian/debcargo.toml
@@ -0,0 +1,7 @@
+overlay = "."
+crate_src_path = ".."
+maintainer = "Proxmox Support Team <support@proxmox.com>"
+
+[source]
+vcs_git = "git://git.proxmox.com/git/proxmox.git"
+vcs_browser = "https://git.proxmox.com/?p=proxmox.git"
diff --git a/proxmox-metrics/src/influxdb/http.rs b/proxmox-metrics/src/influxdb/http.rs
new file mode 100644
index 0000000..51a4181
--- /dev/null
+++ b/proxmox-metrics/src/influxdb/http.rs
@@ -0,0 +1,185 @@
+use std::sync::Arc;
+
+use anyhow::{bail, Error};
+use hyper::Body;
+use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
+use tokio::sync::mpsc;
+
+use proxmox_http::client::{SimpleHttp, SimpleHttpOptions};
+
+use crate::influxdb::utils;
+use crate::{Metrics, MetricsData};
+
+struct InfluxDbHttp {
+ client: SimpleHttp,
+ healthuri: http::Uri,
+ writeuri: http::Uri,
+ token: Option<String>,
+ max_body_size: usize,
+ data: String,
+ channel: mpsc::Receiver<Arc<MetricsData>>,
+}
+
+/// Tests the connection to the given influxdb http server with the given
+/// parameters.
+pub async fn test_influxdb_http(
+ uri: &str,
+ organization: &str,
+ bucket: &str,
+ token: Option<&str>,
+ verify_tls: bool,
+) -> Result<(), Error> {
+ let (_tx, rx) = mpsc::channel(1);
+
+ let this = InfluxDbHttp::new(uri, organization, bucket, token, verify_tls, 1, rx)?;
+
+ this.test_connection().await
+}
+
+/// Returns a [Metrics] handle that connects and sends data to the
+/// given influxdb server at the given https url
+pub fn influxdb_http(
+ uri: &str,
+ organization: &str,
+ bucket: &str,
+ token: Option<&str>,
+ verify_tls: bool,
+ max_body_size: usize,
+) -> Result<Metrics, Error> {
+ let (tx, rx) = mpsc::channel(1024);
+
+ let this = InfluxDbHttp::new(
+ uri,
+ organization,
+ bucket,
+ token,
+ verify_tls,
+ max_body_size,
+ rx,
+ )?;
+
+ let join_handle = Some(tokio::spawn(async { this.finish().await }));
+
+ Ok(Metrics {
+ join_handle,
+ channel: Some(tx),
+ })
+}
+
+impl InfluxDbHttp {
+ fn new(
+ uri: &str,
+ organization: &str,
+ bucket: &str,
+ token: Option<&str>,
+ verify_tls: bool,
+ max_body_size: usize,
+ channel: mpsc::Receiver<Arc<MetricsData>>,
+ ) -> Result<Self, Error> {
+ let client = if verify_tls {
+ SimpleHttp::with_options(SimpleHttpOptions::default())
+ } else {
+ let mut ssl_connector = SslConnector::builder(SslMethod::tls()).unwrap();
+ ssl_connector.set_verify(SslVerifyMode::NONE);
+ SimpleHttp::with_ssl_connector(ssl_connector.build(), SimpleHttpOptions::default())
+ };
+
+ let uri: http::uri::Uri = uri.parse()?;
+ let uri_parts = uri.into_parts();
+
+ let base_path = if let Some(ref p) = uri_parts.path_and_query {
+ p.path().trim_end_matches('/')
+ } else {
+ ""
+ };
+
+ let writeuri = http::uri::Builder::new()
+ .scheme(uri_parts.scheme.clone().unwrap())
+ .authority(uri_parts.authority.clone().unwrap())
+ .path_and_query(format!(
+ "{}/api/v2/write?org={}&bucket={}",
+ base_path, organization, bucket
+ ))
+ .build()?;
+
+ let healthuri = http::uri::Builder::new()
+ .scheme(uri_parts.scheme.unwrap())
+ .authority(uri_parts.authority.unwrap())
+ .path_and_query(format!("{}/health", base_path))
+ .build()?;
+
+ Ok(InfluxDbHttp {
+ client,
+ writeuri,
+ healthuri,
+ token: token.map(String::from),
+ max_body_size,
+ data: String::new(),
+ channel,
+ })
+ }
+
+ async fn test_connection(&self) -> Result<(), Error> {
+ let mut request = http::Request::builder().method("GET").uri(&self.healthuri);
+
+ if let Some(token) = &self.token {
+ request = request.header("Authorization", format!("Token {}", token));
+ }
+
+ let res = self.client.request(request.body(Body::empty())?).await?;
+
+ let status = res.status();
+ if !status.is_success() {
+ bail!("got bad status: {}", status);
+ }
+
+ Ok(())
+ }
+
+ async fn add_data(&mut self, data: Arc<MetricsData>) -> Result<(), Error> {
+ let new_data = utils::format_influxdb_line(&data)?;
+
+ if self.data.len() + new_data.len() >= self.max_body_size {
+ self.flush().await?;
+ }
+
+ self.data.push_str(&new_data);
+
+ if self.data.len() >= self.max_body_size {
+ self.flush().await?;
+ }
+
+ Ok(())
+ }
+
+ async fn flush(&mut self) -> Result<(), Error> {
+ if self.data.is_empty() {
+ return Ok(());
+ }
+ let mut request = http::Request::builder().method("POST").uri(&self.writeuri);
+
+ if let Some(token) = &self.token {
+ request = request.header("Authorization", format!("Token {}", token));
+ }
+
+ let request = request.body(Body::from(self.data.split_off(0)))?;
+
+ let res = self.client.request(request).await?;
+
+ let status = res.status();
+ if !status.is_success() {
+ bail!("got bad status: {}", status);
+ }
+ Ok(())
+ }
+
+ async fn finish(mut self) -> Result<(), Error> {
+ while let Some(data) = self.channel.recv().await {
+ self.add_data(data).await?;
+ }
+
+ self.flush().await?;
+
+ Ok(())
+ }
+}
diff --git a/proxmox-metrics/src/influxdb/mod.rs b/proxmox-metrics/src/influxdb/mod.rs
new file mode 100644
index 0000000..26a715c
--- /dev/null
+++ b/proxmox-metrics/src/influxdb/mod.rs
@@ -0,0 +1,7 @@
+mod http;
+pub use self::http::*;
+
+mod udp;
+pub use udp::*;
+
+pub mod utils;
diff --git a/proxmox-metrics/src/influxdb/udp.rs b/proxmox-metrics/src/influxdb/udp.rs
new file mode 100644
index 0000000..4217c61
--- /dev/null
+++ b/proxmox-metrics/src/influxdb/udp.rs
@@ -0,0 +1,86 @@
+use std::sync::Arc;
+
+use anyhow::Error;
+use tokio::sync::mpsc;
+
+use proxmox_async::io::udp;
+
+use crate::influxdb::utils;
+use crate::{Metrics, MetricsData};
+
+struct InfluxDbUdp {
+ address: String,
+ conn: Option<tokio::net::UdpSocket>,
+ mtu: u16,
+ data: String,
+ channel: mpsc::Receiver<Arc<MetricsData>>,
+}
+
+/// Tests the connection to the given influxdb udp server.
+pub async fn test_influxdb_udp(address: &str) -> Result<(), Error> {
+ udp::connect(address).await?;
+ Ok(())
+}
+
+/// Returns a [Metrics] handle that connects and sends data to the
+/// given influxdb server at the given udp address/port
+///
+/// `address` must be in the format of 'ip_or_hostname:port'
+pub fn influxdb_udp(address: &str, mtu: Option<u16>) -> Metrics {
+ let (tx, rx) = mpsc::channel(1024);
+
+ let this = InfluxDbUdp {
+ address: address.to_string(),
+ conn: None,
+ // empty ipv6 udp package needs 48 bytes, subtract 50 for safety
+ mtu: mtu.unwrap_or(1500) - 50,
+ data: String::new(),
+ channel: rx,
+ };
+
+ let join_handle = Some(tokio::spawn(async { this.finish().await }));
+
+ Metrics {
+ join_handle,
+ channel: Some(tx),
+ }
+}
+
+impl InfluxDbUdp {
+ async fn add_data(&mut self, data: Arc<MetricsData>) -> Result<(), Error> {
+ let new_data = utils::format_influxdb_line(&data)?;
+
+ if self.data.len() + new_data.len() >= (self.mtu as usize) {
+ self.flush().await?;
+ }
+
+ self.data.push_str(&new_data);
+
+ if self.data.len() >= (self.mtu as usize) {
+ self.flush().await?;
+ }
+
+ Ok(())
+ }
+
+ async fn flush(&mut self) -> Result<(), Error> {
+ let conn = match self.conn.take() {
+ Some(conn) => conn,
+ None => udp::connect(&self.address).await?,
+ };
+
+ conn.send(self.data.split_off(0).as_bytes()).await?;
+ self.conn = Some(conn);
+ Ok(())
+ }
+
+ async fn finish(mut self) -> Result<(), Error> {
+ while let Some(data) = self.channel.recv().await {
+ self.add_data(data).await?;
+ }
+
+ self.flush().await?;
+
+ Ok(())
+ }
+}
diff --git a/proxmox-metrics/src/influxdb/utils.rs b/proxmox-metrics/src/influxdb/utils.rs
new file mode 100644
index 0000000..3507e61
--- /dev/null
+++ b/proxmox-metrics/src/influxdb/utils.rs
@@ -0,0 +1,50 @@
+use std::fmt::Write;
+
+use anyhow::{bail, Error};
+use serde_json::Value;
+
+use crate::MetricsData;
+
+pub(crate) fn format_influxdb_line(data: &MetricsData) -> Result<String, Error> {
+ if !data.values.is_object() {
+ bail!("invalid data");
+ }
+
+ let mut line = escape_measurement(&data.measurement);
+
+ for (key, value) in &data.tags {
+ write!(line, ",{}={}", escape_key(key), escape_key(value))?;
+ }
+
+ line.push(' ');
+
+ let mut first = true;
+ for (key, value) in data.values.as_object().unwrap().iter() {
+ match value {
+ Value::Object(_) => bail!("objects not supported"),
+ Value::Array(_) => bail!("arrays not supported"),
+ _ => {}
+ }
+ if !first {
+ line.push(',');
+ }
+ first = false;
+ write!(line, "{}={}", escape_key(key), value.to_string())?;
+ }
+
+ // nanosecond precision
+ writeln!(line, " {}", data.ctime * 1_000_000_000)?;
+
+ Ok(line)
+}
+
+fn escape_key(key: &str) -> String {
+ let key = key.replace(',', "\\,");
+ let key = key.replace('=', "\\=");
+ key.replace(' ', "\\ ")
+}
+
+fn escape_measurement(measurement: &str) -> String {
+ let measurement = measurement.replace(',', "\\,");
+ measurement.replace(' ', "\\ ")
+}
diff --git a/proxmox-metrics/src/lib.rs b/proxmox-metrics/src/lib.rs
new file mode 100644
index 0000000..9fb098e
--- /dev/null
+++ b/proxmox-metrics/src/lib.rs
@@ -0,0 +1,117 @@
+use std::collections::HashMap;
+use std::sync::Arc;
+
+use anyhow::{bail, format_err, Error};
+use serde::Serialize;
+use serde_json::Value;
+use tokio::sync::mpsc;
+
+mod influxdb;
+#[doc(inline)]
+pub use influxdb::{influxdb_http, influxdb_udp, test_influxdb_http, test_influxdb_udp};
+
+#[derive(Clone)]
+/// Structured data for the metric server
+pub struct MetricsData {
+ /// The category of measurements
+ pub measurement: String,
+ /// A list of to attach to the measurements
+ pub tags: HashMap<String, String>,
+ /// The actual values to send. Only plain (not-nested) objects are supported at the moment.
+ pub values: Value,
+ /// The time of the measurement
+ pub ctime: i64,
+}
+
+impl MetricsData {
+ /// Convenient helper to create from references
+ pub fn new<V: Serialize>(
+ measurement: &str,
+ tags: &[(&str, &str)],
+ ctime: i64,
+ values: V,
+ ) -> Result<Self, Error> {
+ let mut new_tags = HashMap::new();
+ for (key, value) in tags {
+ new_tags.insert(key.to_string(), value.to_string());
+ }
+
+ Ok(Self {
+ measurement: measurement.to_string(),
+ tags: new_tags,
+ values: serde_json::to_value(values)?,
+ ctime,
+ })
+ }
+}
+
+/// Helper to send a list of [MetricsData] to a list of [Metrics]
+pub async fn send_data_to_channels(
+ values: &[Arc<MetricsData>],
+ connections: &[Metrics],
+) -> Vec<Result<(), Error>> {
+ let mut futures = Vec::with_capacity(connections.len());
+ for connection in connections {
+ futures.push(async move {
+ for data in values {
+ connection.send_data(Arc::clone(data)).await?
+ }
+ Ok::<(), Error>(())
+ });
+ }
+
+ futures::future::join_all(futures).await
+}
+
+/// Represents connection to the metric server which can be used to send data
+///
+/// You can send [MetricsData] by using [`Self::send_data()`], and to flush and
+/// finish the connection use [`Self::join`].
+///
+/// If dropped, it will abort the connection and not flush out buffered data.
+pub struct Metrics {
+ join_handle: Option<tokio::task::JoinHandle<Result<(), Error>>>,
+ channel: Option<mpsc::Sender<Arc<MetricsData>>>,
+}
+
+impl Drop for Metrics {
+ fn drop(&mut self) {
+ if let Some(join_handle) = self.join_handle.take() {
+ join_handle.abort();
+ }
+ }
+}
+
+impl Metrics {
+ /// Closes the queue and waits for the connection to send all remaining data
+ pub async fn join(mut self) -> Result<(), Error> {
+ if let Some(channel) = self.channel.take() {
+ drop(channel);
+ }
+ if let Some(join_handle) = self.join_handle.take() {
+ join_handle.await?
+ } else {
+ bail!("internal error: no join_handle")
+ }
+ }
+
+ /// Queues the given data to the metric server
+ pub async fn send_data(&self, data: Arc<MetricsData>) -> Result<(), Error> {
+ // return ok if we got no data to send
+ if let Value::Object(map) = &data.values {
+ if map.is_empty() {
+ return Ok(());
+ }
+ }
+
+ if let Some(channel) = &self.channel {
+ channel
+ .send(data)
+ .await
+ .map_err(|_| format_err!("receiver side closed"))?;
+ } else {
+ bail!("channel was already closed");
+ }
+ Ok(())
+ }
+}
--
2.30.2
^ permalink raw reply [flat|nested] 13+ messages in thread
* [pbs-devel] [PATCH proxmox-backup v5 1/8] use 'fs_info' from proxmox-sys
2022-02-02 9:50 [pbs-devel] [PATCH proxmox/proxmox-backup v5] add metrics server capability Dominik Csapak
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox v5 1/2] proxmox-async: add udp::connect() helper Dominik Csapak
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox v5 2/2] proxmox-metrics: implement metrics server client code Dominik Csapak
@ 2022-02-02 9:50 ` Dominik Csapak
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 2/8] pbs-api-types: add metrics api types Dominik Csapak
` (6 subsequent siblings)
9 siblings, 0 replies; 13+ messages in thread
From: Dominik Csapak @ 2022-02-02 9:50 UTC (permalink / raw)
To: pbs-devel
as replacement for 'disk_usage' in tools. also remove that there since
we do not need it anymore
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
src/api2/admin/datastore.rs | 4 ++--
src/api2/node/status.rs | 11 ++++++++---
src/api2/status.rs | 4 ++--
src/bin/proxmox-backup-proxy.rs | 4 ++--
src/tools/disks/mod.rs | 21 +--------------------
5 files changed, 15 insertions(+), 29 deletions(-)
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index 263ea96f..ac60c1af 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -616,7 +616,7 @@ pub fn status(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<DataStoreStatus, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
- let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
+ let storage = proxmox_sys::fs::fs_info(&datastore.base_path())?;
let (counts, gc_status) = if verbose {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
@@ -639,7 +639,7 @@ pub fn status(
Ok(DataStoreStatus {
total: storage.total,
used: storage.used,
- avail: storage.avail,
+ avail: storage.available,
gc_status,
counts,
})
diff --git a/src/api2/node/status.rs b/src/api2/node/status.rs
index 9559dda6..4d48c350 100644
--- a/src/api2/node/status.rs
+++ b/src/api2/node/status.rs
@@ -1,5 +1,4 @@
use std::process::Command;
-use std::path::Path;
use anyhow::{Error, format_err, bail};
use serde_json::Value;
@@ -9,7 +8,7 @@ use proxmox_sys::linux::procfs;
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
use proxmox_schema::api;
-use pbs_api_types::{NODE_SCHEMA, NodePowerCommand, PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
+use pbs_api_types::{NODE_SCHEMA, NodePowerCommand, PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT, StorageStatus};
use crate::api2::types::{
NodeCpuInformation, NodeStatus, NodeMemoryCounters, NodeSwapCounters, NodeInformation,
@@ -77,10 +76,16 @@ fn get_status(
uname.version()
);
+ let disk = proxmox_sys::fs::fs_info(proxmox_lang::c_str!("/"))?;
+
Ok(NodeStatus {
memory,
swap,
- root: crate::tools::disks::disk_usage(Path::new("/"))?,
+ root: StorageStatus {
+ total: disk.total,
+ used: disk.used,
+ avail: disk.available,
+ },
uptime: procfs::read_proc_uptime()?.0 as u64,
loadavg,
kversion,
diff --git a/src/api2/status.rs b/src/api2/status.rs
index 029529ac..58bf7333 100644
--- a/src/api2/status.rs
+++ b/src/api2/status.rs
@@ -110,13 +110,13 @@ pub fn datastore_status(
continue;
}
};
- let status = crate::tools::disks::disk_usage(&datastore.base_path())?;
+ let status = proxmox_sys::fs::fs_info(&datastore.base_path())?;
let mut entry = json!({
"store": store,
"total": status.total,
"used": status.used,
- "avail": status.avail,
+ "avail": status.available,
"gc-status": datastore.last_gc_status(),
});
diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs
index 8d0033de..8ebaba1d 100644
--- a/src/bin/proxmox-backup-proxy.rs
+++ b/src/bin/proxmox-backup-proxy.rs
@@ -1066,7 +1066,7 @@ fn check_schedule(worker_type: &str, event_str: &str, id: &str) -> bool {
fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str) {
- match proxmox_backup::tools::disks::disk_usage(path) {
+ match proxmox_sys::fs::fs_info(path) {
Ok(status) => {
let rrd_key = format!("{}/total", rrd_prefix);
rrd_update_gauge(&rrd_key, status.total as f64);
@@ -1074,7 +1074,7 @@ fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &st
rrd_update_gauge(&rrd_key, status.used as f64);
}
Err(err) => {
- eprintln!("read disk_usage on {:?} failed - {}", path, err);
+ eprintln!("read fs info on {:?} failed - {}", path, err);
}
}
diff --git a/src/tools/disks/mod.rs b/src/tools/disks/mod.rs
index 080e4ba6..ebc490d1 100644
--- a/src/tools/disks/mod.rs
+++ b/src/tools/disks/mod.rs
@@ -19,7 +19,7 @@ use proxmox_sys::linux::procfs::{MountInfo, mountinfo::Device};
use proxmox_sys::{io_bail, io_format_err};
use proxmox_schema::api;
-use pbs_api_types::{BLOCKDEVICE_NAME_REGEX, StorageStatus};
+use pbs_api_types::BLOCKDEVICE_NAME_REGEX;
mod zfs;
pub use zfs::*;
@@ -521,25 +521,6 @@ impl Disk {
}
}
-/// Returns disk usage information (total, used, avail)
-pub fn disk_usage(path: &std::path::Path) -> Result<StorageStatus, Error> {
-
- let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
-
- use nix::NixPath;
-
- let res = path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
- nix::errno::Errno::result(res)?;
-
- let bsize = stat.f_bsize as u64;
-
- Ok(StorageStatus{
- total: stat.f_blocks*bsize,
- used: (stat.f_blocks-stat.f_bfree)*bsize,
- avail: stat.f_bavail*bsize,
- })
-}
-
#[api()]
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all="lowercase")]
--
2.30.2
^ permalink raw reply [flat|nested] 13+ messages in thread
* [pbs-devel] [PATCH proxmox-backup v5 2/8] pbs-api-types: add metrics api types
2022-02-02 9:50 [pbs-devel] [PATCH proxmox/proxmox-backup v5] add metrics server capability Dominik Csapak
` (2 preceding siblings ...)
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 1/8] use 'fs_info' from proxmox-sys Dominik Csapak
@ 2022-02-02 9:50 ` Dominik Csapak
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 3/8] pbs-config: add metrics config class Dominik Csapak
` (5 subsequent siblings)
9 siblings, 0 replies; 13+ messages in thread
From: Dominik Csapak @ 2022-02-02 9:50 UTC (permalink / raw)
To: pbs-devel
InfluxDbUdp and InfluxDbHttp for now
introduces schemas for host:port and https urls
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
pbs-api-types/src/lib.rs | 15 ++++
pbs-api-types/src/metrics.rs | 138 +++++++++++++++++++++++++++++++++++
2 files changed, 153 insertions(+)
create mode 100644 pbs-api-types/src/metrics.rs
diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs
index 754e7b22..38edeb29 100644
--- a/pbs-api-types/src/lib.rs
+++ b/pbs-api-types/src/lib.rs
@@ -87,6 +87,8 @@ pub use traffic_control::*;
mod zfs;
pub use zfs::*;
+mod metrics;
+pub use metrics::*;
#[rustfmt::skip]
#[macro_use]
@@ -99,6 +101,7 @@ mod local_macros {
macro_rules! DNS_ALIAS_NAME {
() => (concat!(r"(?:(?:", DNS_ALIAS_LABEL!() , r"\.)*", DNS_ALIAS_LABEL!(), ")"))
}
+ macro_rules! PORT_REGEX_STR { () => (r"(?:[0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])") }
}
const_regex! {
@@ -112,6 +115,8 @@ const_regex! {
pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$");
pub DNS_ALIAS_REGEX = concat!(r"^", DNS_ALIAS_NAME!(), r"$");
pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$");
+ pub HOST_PORT_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE_BRACKET!(), "):", PORT_REGEX_STR!() ,"$");
+ pub HTTP_URL_REGEX = concat!(r"^https?://(?:(?:(?:", DNS_NAME!(), "|", IPRE_BRACKET!(), ")(?::", PORT_REGEX_STR!() ,")?)|", IPV6RE!(),")(?:/[^\x00-\x1F\x7F]*)?$");
pub SHA256_HEX_REGEX = r"^[a-f0-9]{64}$"; // fixme: define in common_regex ?
@@ -163,6 +168,8 @@ pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&S
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX);
pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX);
pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX);
+pub const HOST_PORT_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOST_PORT_REGEX);
+pub const HTTP_URL_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HTTP_URL_REGEX);
pub const DNS_ALIAS_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&DNS_ALIAS_REGEX);
@@ -210,7 +217,15 @@ pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP addr
.format(&DNS_NAME_OR_IP_FORMAT)
.schema();
+pub const HOST_PORT_SCHEMA: Schema = StringSchema::new("host:port combination (Host can be DNS name or IP address).")
+ .format(&HOST_PORT_FORMAT)
+ .schema();
+
+pub const HTTP_URL_SCHEMA: Schema = StringSchema::new("HTTP(s) url with optional port.")
+ .format(&HTTP_URL_FORMAT)
+ .schema();
+#[cfg(not(target_arch="wasm32"))] // this only makes sense for the serever side
pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')")
.format(&HOSTNAME_FORMAT)
.schema();
diff --git a/pbs-api-types/src/metrics.rs b/pbs-api-types/src/metrics.rs
new file mode 100644
index 00000000..239d6c80
--- /dev/null
+++ b/pbs-api-types/src/metrics.rs
@@ -0,0 +1,138 @@
+use serde::{Deserialize, Serialize};
+
+use crate::{
+ HOST_PORT_SCHEMA, HTTP_URL_SCHEMA, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
+};
+use proxmox_schema::{api, Schema, StringSchema, Updater};
+
+pub const METRIC_SERVER_ID_SCHEMA: Schema = StringSchema::new("Metrics Server ID.")
+ .format(&PROXMOX_SAFE_ID_FORMAT)
+ .min_length(3)
+ .max_length(32)
+ .schema();
+
+pub const INFLUXDB_BUCKET_SCHEMA: Schema = StringSchema::new("InfluxDB Bucket.")
+ .format(&PROXMOX_SAFE_ID_FORMAT)
+ .min_length(3)
+ .max_length(32)
+ .default("proxmox")
+ .schema();
+
+pub const INFLUXDB_ORGANIZATION_SCHEMA: Schema = StringSchema::new("InfluxDB Organization.")
+ .format(&PROXMOX_SAFE_ID_FORMAT)
+ .min_length(3)
+ .max_length(32)
+ .default("proxmox")
+ .schema();
+
+#[api(
+ properties: {
+ name: {
+ schema: METRIC_SERVER_ID_SCHEMA,
+ },
+ enable: {
+ type: bool,
+ optional: true,
+ default: true,
+ },
+ host: {
+ schema: HOST_PORT_SCHEMA,
+ },
+ mtu: {
+ type: u16,
+ optional: true,
+ default: 1500,
+ },
+ comment: {
+ optional: true,
+ schema: SINGLE_LINE_COMMENT_SCHEMA,
+ },
+ },
+)]
+#[derive(Serialize, Deserialize, Updater)]
+#[serde(rename_all = "kebab-case")]
+/// InfluxDB Server (UDP)
+pub struct InfluxDbUdp {
+ #[updater(skip)]
+ pub name: String,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ /// Enables or disables the metrics server
+ pub enable: Option<bool>,
+ /// the host + port
+ pub host: String,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ /// The MTU
+ pub mtu: Option<u16>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub comment: Option<String>,
+}
+
+#[api(
+ properties: {
+ name: {
+ schema: METRIC_SERVER_ID_SCHEMA,
+ },
+ enable: {
+ type: bool,
+ optional: true,
+ default: true,
+ },
+ url: {
+ schema: HTTP_URL_SCHEMA,
+ },
+ token: {
+ type: String,
+ optional: true,
+ },
+ bucket: {
+ schema: INFLUXDB_BUCKET_SCHEMA,
+ optional: true,
+ },
+ organization: {
+ schema: INFLUXDB_ORGANIZATION_SCHEMA,
+ optional: true,
+ },
+ "max-body-size": {
+ type: usize,
+ optional: true,
+ default: 25_000_000,
+ },
+ "verify-tls": {
+ type: bool,
+ optional: true,
+ default: true,
+ },
+ comment: {
+ optional: true,
+ schema: SINGLE_LINE_COMMENT_SCHEMA,
+ },
+ },
+)]
+#[derive(Serialize, Deserialize, Updater)]
+#[serde(rename_all = "kebab-case")]
+/// InfluxDB Server (HTTP(s))
+pub struct InfluxDbHttp {
+ #[updater(skip)]
+ pub name: String,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ /// Enables or disables the metrics server
+ pub enable: Option<bool>,
+ /// The base url of the influxdb server
+ pub url: String,
+ /// The Optional Token
+ #[serde(skip_serializing_if = "Option::is_none")]
+ /// The (optional) API token
+ pub token: Option<String>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub bucket: Option<String>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub organization: Option<String>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ /// The (optional) maximum body size
+ pub max_body_size: Option<usize>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ /// If true, the certificate will be validated.
+ pub verify_tls: Option<bool>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub comment: Option<String>,
+}
--
2.30.2
^ permalink raw reply [flat|nested] 13+ messages in thread
* [pbs-devel] [PATCH proxmox-backup v5 3/8] pbs-config: add metrics config class
2022-02-02 9:50 [pbs-devel] [PATCH proxmox/proxmox-backup v5] add metrics server capability Dominik Csapak
` (3 preceding siblings ...)
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 2/8] pbs-api-types: add metrics api types Dominik Csapak
@ 2022-02-02 9:50 ` Dominik Csapak
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 4/8] backup-proxy: decouple stats gathering from rrd update Dominik Csapak
` (4 subsequent siblings)
9 siblings, 0 replies; 13+ messages in thread
From: Dominik Csapak @ 2022-02-02 9:50 UTC (permalink / raw)
To: pbs-devel
a section config like in pve
also adds a helper to get Metrics structs for all configured servers
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
Cargo.toml | 1 +
pbs-config/Cargo.toml | 1 +
pbs-config/src/lib.rs | 1 +
pbs-config/src/metrics.rs | 115 ++++++++++++++++++++++++++++++++++++++
4 files changed, 118 insertions(+)
create mode 100644 pbs-config/src/metrics.rs
diff --git a/Cargo.toml b/Cargo.toml
index eafa5d94..e690266a 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -96,6 +96,7 @@ pxar = { version = "0.10.1", features = [ "tokio-io" ] }
proxmox-http = { version = "0.6", features = [ "client", "http-helpers", "websocket" ] }
proxmox-io = "1"
proxmox-lang = "1"
+proxmox-metrics = "0.1"
proxmox-router = { version = "1.1", features = [ "cli" ] }
proxmox-schema = { version = "1.1", features = [ "api-macro" ] }
proxmox-section-config = "1"
diff --git a/pbs-config/Cargo.toml b/pbs-config/Cargo.toml
index 7c3b31cb..35b70c1d 100644
--- a/pbs-config/Cargo.toml
+++ b/pbs-config/Cargo.toml
@@ -25,6 +25,7 @@ proxmox-time = "1"
proxmox-serde = "0.1"
proxmox-shared-memory = "0.2"
proxmox-sys = "0.2"
+proxmox-metrics = "0.1"
pbs-api-types = { path = "../pbs-api-types" }
pbs-buildcfg = { path = "../pbs-buildcfg" }
diff --git a/pbs-config/src/lib.rs b/pbs-config/src/lib.rs
index 118030bc..29880ab9 100644
--- a/pbs-config/src/lib.rs
+++ b/pbs-config/src/lib.rs
@@ -6,6 +6,7 @@ pub mod domains;
pub mod drive;
pub mod key_config;
pub mod media_pool;
+pub mod metrics;
pub mod network;
pub mod remote;
pub mod sync;
diff --git a/pbs-config/src/metrics.rs b/pbs-config/src/metrics.rs
new file mode 100644
index 00000000..3d4428f8
--- /dev/null
+++ b/pbs-config/src/metrics.rs
@@ -0,0 +1,115 @@
+use std::collections::HashMap;
+
+use anyhow::Error;
+use lazy_static::lazy_static;
+
+use proxmox_metrics::{influxdb_http, influxdb_udp, Metrics};
+use proxmox_schema::*;
+use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
+
+use pbs_api_types::{InfluxDbHttp, InfluxDbUdp, METRIC_SERVER_ID_SCHEMA};
+
+use crate::{open_backup_lockfile, BackupLockGuard};
+
+lazy_static! {
+ pub static ref CONFIG: SectionConfig = init();
+}
+
+fn init() -> SectionConfig {
+ let mut config = SectionConfig::new(&METRIC_SERVER_ID_SCHEMA);
+
+ let udp_schema = match InfluxDbUdp::API_SCHEMA {
+ Schema::Object(ref object_schema) => object_schema,
+ _ => unreachable!(),
+ };
+
+ let udp_plugin = SectionConfigPlugin::new(
+ "influxdb-udp".to_string(),
+ Some("name".to_string()),
+ udp_schema,
+ );
+ config.register_plugin(udp_plugin);
+
+ let http_schema = match InfluxDbHttp::API_SCHEMA {
+ Schema::Object(ref object_schema) => object_schema,
+ _ => unreachable!(),
+ };
+
+ let http_plugin = SectionConfigPlugin::new(
+ "influxdb-http".to_string(),
+ Some("name".to_string()),
+ http_schema,
+ );
+
+ config.register_plugin(http_plugin);
+
+ config
+}
+
+pub const METRIC_SERVER_CFG_FILENAME: &str = "/etc/proxmox-backup/metricserver.cfg";
+pub const METRIC_SERVER_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.metricserver.lck";
+
+/// Get exclusive lock
+pub fn lock_config() -> Result<BackupLockGuard, Error> {
+ open_backup_lockfile(METRIC_SERVER_CFG_LOCKFILE, None, true)
+}
+
+pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
+ let content = proxmox_sys::fs::file_read_optional_string(METRIC_SERVER_CFG_FILENAME)?
+ .unwrap_or_else(|| "".to_string());
+
+ let digest = openssl::sha::sha256(content.as_bytes());
+ let data = CONFIG.parse(METRIC_SERVER_CFG_FILENAME, &content)?;
+ Ok((data, digest))
+}
+
+pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
+ let raw = CONFIG.write(METRIC_SERVER_CFG_FILENAME, config)?;
+ crate::replace_backup_config(METRIC_SERVER_CFG_FILENAME, raw.as_bytes())
+}
+
+// shell completion helper
+pub fn complete_remote_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
+ match config() {
+ Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
+ Err(_) => return vec![],
+ }
+}
+
+/// Get the metric server connections from a config
+pub fn get_metric_server_connections(
+ metric_config: SectionConfigData,
+) -> Result<(Vec<Metrics>, Vec<String>), Error> {
+ let mut futures = Vec::new();
+ let mut names = Vec::new();
+
+ for config in
+ metric_config.convert_to_typed_array::<pbs_api_types::InfluxDbUdp>("influxdb-udp")?
+ {
+ if !config.enable.unwrap_or(true) {
+ continue;
+ }
+ let future = influxdb_udp(&config.host, config.mtu);
+ names.push(config.name);
+ futures.push(future);
+ }
+
+ for config in
+ metric_config.convert_to_typed_array::<pbs_api_types::InfluxDbHttp>("influxdb-http")?
+ {
+ if !config.enable.unwrap_or(true) {
+ continue;
+ }
+ let future = influxdb_http(
+ &config.url,
+ config.organization.as_deref().unwrap_or("proxmox"),
+ config.bucket.as_deref().unwrap_or("proxmox"),
+ config.token.as_deref(),
+ config.verify_tls.unwrap_or(true),
+ config.max_body_size.unwrap_or(25_000_000),
+ )?;
+ names.push(config.name);
+ futures.push(future);
+ }
+ Ok((futures, names))
+}
--
2.30.2
^ permalink raw reply [flat|nested] 13+ messages in thread
* [pbs-devel] [PATCH proxmox-backup v5 4/8] backup-proxy: decouple stats gathering from rrd update
2022-02-02 9:50 [pbs-devel] [PATCH proxmox/proxmox-backup v5] add metrics server capability Dominik Csapak
` (4 preceding siblings ...)
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 3/8] pbs-config: add metrics config class Dominik Csapak
@ 2022-02-02 9:50 ` Dominik Csapak
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 5/8] proxmox-backup-proxy: send metrics to configured metrics server Dominik Csapak
` (3 subsequent siblings)
9 siblings, 0 replies; 13+ messages in thread
From: Dominik Csapak @ 2022-02-02 9:50 UTC (permalink / raw)
To: pbs-devel
that way we can reuse the stats gathered
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
src/bin/proxmox-backup-proxy.rs | 213 +++++++++++++++++++++-----------
1 file changed, 141 insertions(+), 72 deletions(-)
diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs
index 8ebaba1d..302fece9 100644
--- a/src/bin/proxmox-backup-proxy.rs
+++ b/src/bin/proxmox-backup-proxy.rs
@@ -17,8 +17,11 @@ use tokio_stream::wrappers::ReceiverStream;
use serde_json::{json, Value};
use http::{Method, HeaderMap};
-use proxmox_sys::linux::socket::set_tcp_keepalive;
-use proxmox_sys::fs::CreateOptions;
+use proxmox_sys::linux::{
+ procfs::{ProcFsStat, ProcFsMemInfo, ProcFsNetDev, Loadavg},
+ socket::set_tcp_keepalive
+};
+use proxmox_sys::fs::{CreateOptions, FileSystemInformation};
use proxmox_lang::try_block;
use proxmox_router::{RpcEnvironment, RpcEnvironmentType, UserInformation};
use proxmox_http::client::{RateLimitedStream, ShareableRateLimit};
@@ -44,6 +47,7 @@ use proxmox_backup::{
Job,
},
},
+ tools::disks::BlockDevStat,
};
use pbs_buildcfg::configdir;
@@ -942,9 +946,24 @@ async fn run_stat_generator() {
loop {
let delay_target = Instant::now() + Duration::from_secs(10);
- generate_host_stats().await;
+ let stats = match tokio::task::spawn_blocking(|| {
+ let hoststats = collect_host_stats_sync();
+ let (hostdisk, datastores) = collect_disk_stats_sync();
+ Arc::new((hoststats, hostdisk, datastores))
+ }).await {
+ Ok(res) => res,
+ Err(err) => {
+ log::error!("collecting host stats paniced: {}", err);
+ tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await;
+ continue;
+ }
+ };
+
+ let rrd_future = tokio::task::spawn_blocking(move || {
+ rrd_update_host_stats_sync(&stats.0, &stats.1, &stats.2);
+ rrd_sync_journal();
+ });
- rrd_sync_journal();
tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await;
@@ -952,86 +971,147 @@ async fn run_stat_generator() {
}
-async fn generate_host_stats() {
- match tokio::task::spawn_blocking(generate_host_stats_sync).await {
- Ok(()) => (),
- Err(err) => log::error!("generate_host_stats paniced: {}", err),
- }
+struct HostStats {
+ proc: Option<ProcFsStat>,
+ meminfo: Option<ProcFsMemInfo>,
+ net: Option<Vec<ProcFsNetDev>>,
+ load: Option<Loadavg>,
+}
+
+struct DiskStat {
+ name: String,
+ usage: Option<FileSystemInformation>,
+ dev: Option<BlockDevStat>,
}
-fn generate_host_stats_sync() {
+fn collect_host_stats_sync() -> HostStats {
use proxmox_sys::linux::procfs::{
read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg};
- match read_proc_stat() {
- Ok(stat) => {
- rrd_update_gauge("host/cpu", stat.cpu);
- rrd_update_gauge("host/iowait", stat.iowait_percent);
- }
+ let proc = match read_proc_stat() {
+ Ok(stat) => Some(stat),
Err(err) => {
eprintln!("read_proc_stat failed - {}", err);
+ None
}
- }
+ };
- match read_meminfo() {
- Ok(meminfo) => {
- rrd_update_gauge("host/memtotal", meminfo.memtotal as f64);
- rrd_update_gauge("host/memused", meminfo.memused as f64);
- rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64);
- rrd_update_gauge("host/swapused", meminfo.swapused as f64);
- }
+ let meminfo = match read_meminfo() {
+ Ok(stat) => Some(stat),
Err(err) => {
eprintln!("read_meminfo failed - {}", err);
+ None
}
- }
+ };
- match read_proc_net_dev() {
- Ok(netdev) => {
- use pbs_config::network::is_physical_nic;
- let mut netin = 0;
- let mut netout = 0;
- for item in netdev {
- if !is_physical_nic(&item.device) { continue; }
- netin += item.receive;
- netout += item.send;
- }
- rrd_update_derive("host/netin", netin as f64);
- rrd_update_derive("host/netout", netout as f64);
- }
+ let net = match read_proc_net_dev() {
+ Ok(netdev) => Some(netdev),
Err(err) => {
eprintln!("read_prox_net_dev failed - {}", err);
+ None
}
- }
+ };
- match read_loadavg() {
- Ok(loadavg) => {
- rrd_update_gauge("host/loadavg", loadavg.0 as f64);
- }
+ let load = match read_loadavg() {
+ Ok(loadavg) => Some(loadavg),
Err(err) => {
eprintln!("read_loadavg failed - {}", err);
+ None
}
+ };
+
+ HostStats {
+ proc,
+ meminfo,
+ net,
+ load,
}
+}
+fn collect_disk_stats_sync() -> (DiskStat, Vec<DiskStat>) {
let disk_manager = DiskManage::new();
- gather_disk_stats(disk_manager.clone(), Path::new("/"), "host");
+ let root = gather_disk_stats(disk_manager.clone(), Path::new("/"), "host");
+ let mut datastores = Vec::new();
match pbs_config::datastore::config() {
Ok((config, _)) => {
let datastore_list: Vec<DataStoreConfig> =
config.convert_to_typed_array("datastore").unwrap_or_default();
for config in datastore_list {
-
- let rrd_prefix = format!("datastore/{}", config.name);
let path = std::path::Path::new(&config.path);
- gather_disk_stats(disk_manager.clone(), path, &rrd_prefix);
+ datastores.push(gather_disk_stats(disk_manager.clone(), path, &config.name));
}
}
Err(err) => {
eprintln!("read datastore config failed - {}", err);
}
}
+
+ (root, datastores)
+}
+
+fn rrd_update_host_stats_sync(host: &HostStats, hostdisk: &DiskStat, datastores: &[DiskStat]) {
+ if let Some(stat) = &host.proc {
+ rrd_update_gauge("host/cpu", stat.cpu);
+ rrd_update_gauge("host/iowait", stat.iowait_percent);
+ }
+
+ if let Some(meminfo) = &host.meminfo {
+ rrd_update_gauge("host/memtotal", meminfo.memtotal as f64);
+ rrd_update_gauge("host/memused", meminfo.memused as f64);
+ rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64);
+ rrd_update_gauge("host/swapused", meminfo.swapused as f64);
+ }
+
+ if let Some(netdev) = &host.net {
+ use pbs_config::network::is_physical_nic;
+ let mut netin = 0;
+ let mut netout = 0;
+ for item in netdev {
+ if !is_physical_nic(&item.device) { continue; }
+ netin += item.receive;
+ netout += item.send;
+ }
+ rrd_update_derive("host/netin", netin as f64);
+ rrd_update_derive("host/netout", netout as f64);
+ }
+
+ if let Some(loadavg) = &host.load {
+ rrd_update_gauge("host/loadavg", loadavg.0 as f64);
+ }
+
+ rrd_update_disk_stat(&hostdisk, "host");
+
+ for stat in datastores {
+ let rrd_prefix = format!("datastore/{}", stat.name);
+ rrd_update_disk_stat(&stat, &rrd_prefix);
+ }
+}
+
+fn rrd_update_disk_stat(disk: &DiskStat, rrd_prefix: &str) {
+ if let Some(status) = &disk.usage {
+ let rrd_key = format!("{}/total", rrd_prefix);
+ rrd_update_gauge(&rrd_key, status.total as f64);
+ let rrd_key = format!("{}/used", rrd_prefix);
+ rrd_update_gauge(&rrd_key, status.used as f64);
+ }
+
+ if let Some(stat) = &disk.dev {
+ let rrd_key = format!("{}/read_ios", rrd_prefix);
+ rrd_update_derive(&rrd_key, stat.read_ios as f64);
+ let rrd_key = format!("{}/read_bytes", rrd_prefix);
+ rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64);
+
+ let rrd_key = format!("{}/write_ios", rrd_prefix);
+ rrd_update_derive(&rrd_key, stat.write_ios as f64);
+ let rrd_key = format!("{}/write_bytes", rrd_prefix);
+ rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64);
+
+ let rrd_key = format!("{}/io_ticks", rrd_prefix);
+ rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0);
+ }
}
fn check_schedule(worker_type: &str, event_str: &str, id: &str) -> bool {
@@ -1064,22 +1144,17 @@ fn check_schedule(worker_type: &str, event_str: &str, id: &str) -> bool {
next <= now
}
-fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str) {
-
- match proxmox_sys::fs::fs_info(path) {
- Ok(status) => {
- let rrd_key = format!("{}/total", rrd_prefix);
- rrd_update_gauge(&rrd_key, status.total as f64);
- let rrd_key = format!("{}/used", rrd_prefix);
- rrd_update_gauge(&rrd_key, status.used as f64);
- }
+fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, name: &str) -> DiskStat {
+ let usage = match proxmox_sys::fs::fs_info(path) {
+ Ok(status) => Some(status),
Err(err) => {
eprintln!("read fs info on {:?} failed - {}", path, err);
+ None
}
- }
+ };
- match disk_manager.find_mounted_device(path) {
- Ok(None) => {},
+ let dev = match disk_manager.find_mounted_device(path) {
+ Ok(None) => None,
Ok(Some((fs_type, device, source))) => {
let mut device_stat = None;
match (fs_type.as_str(), source) {
@@ -1101,24 +1176,18 @@ fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &st
}
}
}
- if let Some(stat) = device_stat {
- let rrd_key = format!("{}/read_ios", rrd_prefix);
- rrd_update_derive(&rrd_key, stat.read_ios as f64);
- let rrd_key = format!("{}/read_bytes", rrd_prefix);
- rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64);
-
- let rrd_key = format!("{}/write_ios", rrd_prefix);
- rrd_update_derive(&rrd_key, stat.write_ios as f64);
- let rrd_key = format!("{}/write_bytes", rrd_prefix);
- rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64);
-
- let rrd_key = format!("{}/io_ticks", rrd_prefix);
- rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0);
- }
+ device_stat
}
Err(err) => {
eprintln!("find_mounted_device failed - {}", err);
+ None
}
+ };
+
+ DiskStat {
+ name: name.to_string(),
+ usage,
+ dev,
}
}
--
2.30.2
^ permalink raw reply [flat|nested] 13+ messages in thread
* [pbs-devel] [PATCH proxmox-backup v5 5/8] proxmox-backup-proxy: send metrics to configured metrics server
2022-02-02 9:50 [pbs-devel] [PATCH proxmox/proxmox-backup v5] add metrics server capability Dominik Csapak
` (5 preceding siblings ...)
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 4/8] backup-proxy: decouple stats gathering from rrd update Dominik Csapak
@ 2022-02-02 9:50 ` Dominik Csapak
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 6/8] api: add metricserver endpoints Dominik Csapak
` (2 subsequent siblings)
9 siblings, 0 replies; 13+ messages in thread
From: Dominik Csapak @ 2022-02-02 9:50 UTC (permalink / raw)
To: pbs-devel
and keep the data as similar as possible to pve (tags/fields)
datastores get their own 'object' type and reside in the "blockstat"
measurement
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
src/bin/proxmox-backup-proxy.rs | 135 +++++++++++++++++++++++++++++++-
1 file changed, 132 insertions(+), 3 deletions(-)
diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs
index 302fece9..465374b7 100644
--- a/src/bin/proxmox-backup-proxy.rs
+++ b/src/bin/proxmox-backup-proxy.rs
@@ -23,11 +23,13 @@ use proxmox_sys::linux::{
};
use proxmox_sys::fs::{CreateOptions, FileSystemInformation};
use proxmox_lang::try_block;
+use proxmox_metrics::MetricsData;
use proxmox_router::{RpcEnvironment, RpcEnvironmentType, UserInformation};
use proxmox_http::client::{RateLimitedStream, ShareableRateLimit};
use proxmox_sys::{task_log, task_warn};
use proxmox_sys::logrotate::LogRotate;
+use pbs_config::metrics::get_metric_server_connections;
use pbs_datastore::DataStore;
use proxmox_rest_server::{
@@ -959,16 +961,123 @@ async fn run_stat_generator() {
}
};
- let rrd_future = tokio::task::spawn_blocking(move || {
- rrd_update_host_stats_sync(&stats.0, &stats.1, &stats.2);
- rrd_sync_journal();
+ let rrd_future = tokio::task::spawn_blocking({
+ let stats = Arc::clone(&stats);
+ move || {
+ rrd_update_host_stats_sync(&stats.0, &stats.1, &stats.2);
+ rrd_sync_journal();
+ }
});
+ let metrics_future = send_data_to_metric_servers(stats);
+
+ let (rrd_res, metrics_res) = join!(rrd_future, metrics_future);
+ if let Err(err) = rrd_res {
+ log::error!("rrd update panicked: {}", err);
+ }
+ if let Err(err) = metrics_res {
+ log::error!("error during metrics sending: {}", err);
+ }
tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await;
}
+}
+
+async fn send_data_to_metric_servers(
+ stats: Arc<(HostStats, DiskStat, Vec<DiskStat>)>,
+) -> Result<(), Error> {
+ let (config, _digest) = pbs_config::metrics::config()?;
+ let (channels, names) = get_metric_server_connections(config)?;
+
+ if channels.is_empty() {
+ return Ok(());
+ }
+
+ let ctime = proxmox_time::epoch_i64();
+ let nodename = proxmox_sys::nodename();
+
+ let mut values = Vec::new();
+ let mut cpuvalue = match &stats.0.proc {
+ Some(stat) => serde_json::to_value(stat)?,
+ None => json!({}),
+ };
+
+ if let Some(loadavg) = &stats.0.load {
+ cpuvalue["avg1"] = Value::from(loadavg.0);
+ cpuvalue["avg5"] = Value::from(loadavg.1);
+ cpuvalue["avg15"] = Value::from(loadavg.2);
+ }
+
+ values.push(Arc::new(MetricsData::new(
+ "cpustat",
+ &[("object", "host"), ("host", nodename)],
+ ctime,
+ cpuvalue,
+ )?));
+
+ if let Some(stat) = &stats.0.meminfo {
+ values.push(Arc::new(MetricsData::new(
+ "memory",
+ &[("object", "host"), ("host", nodename)],
+ ctime,
+ stat,
+ )?));
+ }
+
+ if let Some(netdev) = &stats.0.net {
+ for item in netdev {
+ values.push(Arc::new(MetricsData::new(
+ "nics",
+ &[
+ ("object", "host"),
+ ("host", nodename),
+ ("instance", &item.device),
+ ],
+ ctime,
+ item,
+ )?));
+ }
+ }
+
+ values.push(Arc::new(MetricsData::new(
+ "blockstat",
+ &[("object", "host"), ("host", nodename)],
+ ctime,
+ stats.1.to_value(),
+ )?));
+
+ for datastore in stats.2.iter() {
+ values.push(Arc::new(MetricsData::new(
+ "blockstat",
+ &[
+ ("object", "datastore"),
+ ("nodename", nodename),
+ ("datastore", &datastore.name),
+ ],
+ ctime,
+ datastore.to_value(),
+ )?));
+ }
+
+ let results = proxmox_metrics::send_data_to_channels(&values, &channels).await;
+ for (res, name) in results.into_iter().zip(names.iter()) {
+ if let Err(err) = res {
+ log::error!("error sending into channel of {}: {}", name, err);
+ }
+ }
+
+ futures::future::join_all(channels.into_iter().zip(names.into_iter()).map(
+ |(channel, name)| async move {
+ if let Err(err) = channel.join().await {
+ log::error!("error sending to metric server {}: {}", name, err);
+ }
+ },
+ ))
+ .await;
+
+ Ok(())
}
struct HostStats {
@@ -984,6 +1093,26 @@ struct DiskStat {
dev: Option<BlockDevStat>,
}
+impl DiskStat {
+ fn to_value(&self) -> Value {
+ let mut value = json!({});
+ if let Some(usage) = &self.usage {
+ value["total"] = Value::from(usage.total);
+ value["used"] = Value::from(usage.used);
+ value["avail"] = Value::from(usage.available);
+ }
+
+ if let Some(dev) = &self.dev {
+ value["read_ios"] = Value::from(dev.read_ios);
+ value["read_bytes"] = Value::from(dev.read_sectors * 512);
+ value["write_ios"] = Value::from(dev.write_ios);
+ value["write_bytes"] = Value::from(dev.write_sectors * 512);
+ value["io_ticks"] = Value::from(dev.io_ticks / 1000);
+ }
+ value
+ }
+}
+
fn collect_host_stats_sync() -> HostStats {
use proxmox_sys::linux::procfs::{
read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg};
--
2.30.2
^ permalink raw reply [flat|nested] 13+ messages in thread
* [pbs-devel] [PATCH proxmox-backup v5 6/8] api: add metricserver endpoints
2022-02-02 9:50 [pbs-devel] [PATCH proxmox/proxmox-backup v5] add metrics server capability Dominik Csapak
` (6 preceding siblings ...)
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 5/8] proxmox-backup-proxy: send metrics to configured metrics server Dominik Csapak
@ 2022-02-02 9:50 ` Dominik Csapak
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 7/8] ui: add window/InfluxDbEdit Dominik Csapak
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 8/8] ui: add MetricServerView and use it Dominik Csapak
9 siblings, 0 replies; 13+ messages in thread
From: Dominik Csapak @ 2022-02-02 9:50 UTC (permalink / raw)
To: pbs-devel
but in contrast to pve, we split the api by type of the section config,
since we cannot handle multiple types in the updater
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
src/api2/admin/metricserver.rs | 91 ++++++
src/api2/admin/mod.rs | 2 +
src/api2/config/metricserver/influxdbhttp.rs | 314 +++++++++++++++++++
src/api2/config/metricserver/influxdbudp.rs | 269 ++++++++++++++++
src/api2/config/metricserver/mod.rs | 16 +
src/api2/config/mod.rs | 2 +
6 files changed, 694 insertions(+)
create mode 100644 src/api2/admin/metricserver.rs
create mode 100644 src/api2/config/metricserver/influxdbhttp.rs
create mode 100644 src/api2/config/metricserver/influxdbudp.rs
create mode 100644 src/api2/config/metricserver/mod.rs
diff --git a/src/api2/admin/metricserver.rs b/src/api2/admin/metricserver.rs
new file mode 100644
index 00000000..ff84989a
--- /dev/null
+++ b/src/api2/admin/metricserver.rs
@@ -0,0 +1,91 @@
+use anyhow::Error;
+use serde::{Deserialize, Serialize};
+use serde_json::Value;
+
+use proxmox_router::{Permission, Router, RpcEnvironment};
+use proxmox_schema::api;
+
+use pbs_api_types::{METRIC_SERVER_ID_SCHEMA, PRIV_SYS_AUDIT, SINGLE_LINE_COMMENT_SCHEMA};
+use pbs_config::metrics;
+
+#[api]
+#[derive(Deserialize, Serialize, PartialEq, Eq)]
+/// Type of the metric server
+pub enum MetricServerType {
+ /// InfluxDB HTTP
+ #[serde(rename = "influxdb-http")]
+ InfluxDbHttp,
+ /// InfluxDB UDP
+ #[serde(rename = "influxdb-udp")]
+ InfluxDbUdp,
+}
+
+#[api(
+ properties: {
+ name: {
+ schema: METRIC_SERVER_ID_SCHEMA,
+ },
+ "type": {
+ type: MetricServerType,
+ },
+ comment: {
+ optional: true,
+ schema: SINGLE_LINE_COMMENT_SCHEMA,
+ },
+ },
+)]
+#[derive(Serialize, Deserialize)]
+#[serde(rename_all = "kebab-case")]
+/// Basic information about a metric server thats available for all types
+pub struct MetricServerInfo {
+ pub name: String,
+ #[serde(rename = "type")]
+ pub ty: MetricServerType,
+ /// Enables or disables the metrics server
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub enable: Option<bool>,
+ /// The target server
+ pub server: String,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub comment: Option<String>,
+}
+
+#[api(
+ input: {
+ properties: {},
+ },
+ returns: {
+ description: "List of configured metric servers.",
+ type: Array,
+ items: { type: MetricServerInfo },
+ },
+ access: {
+ permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
+ },
+)]
+/// List configured metric servers.
+pub fn list_metric_servers(
+ _param: Value,
+ mut rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Vec<MetricServerInfo>, Error> {
+ let (config, digest) = metrics::config()?;
+ let mut list = Vec::new();
+
+ for (_, (section_type, v)) in config.sections.iter() {
+ let mut entry = v.clone();
+ entry["type"] = Value::from(section_type.clone());
+ if entry.get("url").is_some() {
+ entry["server"] = entry["url"].clone();
+ }
+ if entry.get("host").is_some() {
+ entry["server"] = entry["host"].clone();
+ }
+ list.push(serde_json::from_value(entry)?);
+ }
+
+ rpcenv["digest"] = hex::encode(&digest).into();
+
+ Ok(list)
+}
+
+pub const ROUTER: Router = Router::new().get(&API_METHOD_LIST_METRIC_SERVERS);
diff --git a/src/api2/admin/mod.rs b/src/api2/admin/mod.rs
index 4667355a..e1e92134 100644
--- a/src/api2/admin/mod.rs
+++ b/src/api2/admin/mod.rs
@@ -4,12 +4,14 @@ use proxmox_router::{Router, SubdirMap};
use proxmox_router::list_subdirs_api_method;
pub mod datastore;
+pub mod metricserver;
pub mod sync;
pub mod verify;
pub mod traffic_control;
const SUBDIRS: SubdirMap = &[
("datastore", &datastore::ROUTER),
+ ("metricserver", &metricserver::ROUTER),
("sync", &sync::ROUTER),
("traffic-control", &traffic_control::ROUTER),
("verify", &verify::ROUTER)
diff --git a/src/api2/config/metricserver/influxdbhttp.rs b/src/api2/config/metricserver/influxdbhttp.rs
new file mode 100644
index 00000000..01c1b8d1
--- /dev/null
+++ b/src/api2/config/metricserver/influxdbhttp.rs
@@ -0,0 +1,314 @@
+use anyhow::{bail, format_err, Error};
+use hex::FromHex;
+use serde::{Deserialize, Serialize};
+use serde_json::Value;
+
+use proxmox_metrics::test_influxdb_http;
+use proxmox_router::{Permission, Router, RpcEnvironment};
+use proxmox_schema::api;
+
+use pbs_api_types::{
+ InfluxDbHttp, InfluxDbHttpUpdater, METRIC_SERVER_ID_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+ PROXMOX_CONFIG_DIGEST_SCHEMA,
+};
+
+use pbs_config::metrics;
+
+async fn test_server(config: &InfluxDbHttp) -> Result<(), Error> {
+ if config.enable.unwrap_or(true) {
+ test_influxdb_http(
+ &config.url,
+ config.organization.as_deref().unwrap_or("proxmox"),
+ config.bucket.as_deref().unwrap_or("proxmox"),
+ config.token.as_deref(),
+ config.verify_tls.unwrap_or(true),
+ )
+ .await
+ .map_err(|err| format_err!("could not connect to {}: {}", config.url, err))
+ } else {
+ Ok(())
+ }
+}
+
+#[api(
+ input: {
+ properties: {},
+ },
+ returns: {
+ description: "List of configured InfluxDB http metric servers.",
+ type: Array,
+ items: { type: InfluxDbHttp },
+ },
+ access: {
+ permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
+ },
+)]
+/// List configured InfluxDB http metric servers.
+pub fn list_influxdb_http_servers(
+ _param: Value,
+ mut rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Vec<InfluxDbHttp>, Error> {
+ let (config, digest) = metrics::config()?;
+
+ let mut list: Vec<InfluxDbHttp> = config.convert_to_typed_array("influxdb-http")?;
+
+ // don't return token via api
+ for item in list.iter_mut() {
+ item.token = None;
+ }
+
+ rpcenv["digest"] = hex::encode(&digest).into();
+
+ Ok(list)
+}
+
+#[api(
+ protected: true,
+ input: {
+ properties: {
+ config: {
+ type: InfluxDbHttp,
+ flatten: true,
+ },
+ },
+ },
+ access: {
+ permission: &Permission::Privilege(&[], PRIV_SYS_MODIFY, false),
+ },
+)]
+/// Create a new InfluxDB http server configuration
+pub async fn create_influxdb_http_server(config: InfluxDbHttp) -> Result<(), Error> {
+ let _lock = metrics::lock_config()?;
+
+ let (mut metrics, _digest) = metrics::config()?;
+
+ if metrics.sections.get(&config.name).is_some() {
+ bail!("metric server '{}' already exists.", config.name);
+ }
+
+ test_server(&config).await?;
+
+ metrics.set_data(&config.name, "influxdb-http", &config)?;
+
+ metrics::save_config(&metrics)?;
+
+ Ok(())
+}
+
+#[api(
+ protected: true,
+ input: {
+ properties: {
+ name: {
+ schema: METRIC_SERVER_ID_SCHEMA,
+ },
+ digest: {
+ optional: true,
+ schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
+ },
+ },
+ },
+ access: {
+ permission: &Permission::Privilege(&[], PRIV_SYS_MODIFY, false),
+ },
+)]
+/// Remove a InfluxDB http server configuration
+pub fn delete_influxdb_http_server(
+ name: String,
+ digest: Option<String>,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<(), Error> {
+ let _lock = metrics::lock_config()?;
+
+ let (mut metrics, expected_digest) = metrics::config()?;
+
+ if let Some(ref digest) = digest {
+ let digest = <[u8; 32]>::from_hex(digest)?;
+ crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
+ }
+
+ if metrics.sections.remove(&name).is_none() {
+ bail!("name '{}' does not exist.", name);
+ }
+
+ metrics::save_config(&metrics)?;
+
+ Ok(())
+}
+
+#[api(
+ input: {
+ properties: {
+ name: {
+ schema: METRIC_SERVER_ID_SCHEMA,
+ },
+ },
+ },
+ returns: { type: InfluxDbHttp },
+ access: {
+ permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
+ },
+)]
+/// Read the InfluxDB http server configuration
+pub fn read_influxdb_http_server(
+ name: String,
+ mut rpcenv: &mut dyn RpcEnvironment,
+) -> Result<InfluxDbHttp, Error> {
+ let (metrics, digest) = metrics::config()?;
+
+ let mut config: InfluxDbHttp = metrics.lookup("influxdb-http", &name)?;
+
+ config.token = None;
+
+ rpcenv["digest"] = hex::encode(&digest).into();
+
+ Ok(config)
+}
+
+#[api()]
+#[derive(Serialize, Deserialize)]
+#[serde(rename_all = "kebab-case")]
+/// Deletable property name
+pub enum DeletableProperty {
+ /// Delete the enable property.
+ Enable,
+ /// Delete the token property.
+ Token,
+ /// Delete the bucket property.
+ Bucket,
+ /// Delete the organization property.
+ Organization,
+ /// Delete the max_body_size property.
+ MaxBodySize,
+ /// Delete the verify_tls property.
+ VerifyTls,
+ /// Delete the comment property.
+ Comment,
+}
+
+#[api(
+ protected: true,
+ input: {
+ properties: {
+ name: {
+ schema: METRIC_SERVER_ID_SCHEMA,
+ },
+ update: {
+ type: InfluxDbHttpUpdater,
+ flatten: true,
+ },
+ delete: {
+ description: "List of properties to delete.",
+ type: Array,
+ optional: true,
+ items: {
+ type: DeletableProperty,
+ }
+ },
+ digest: {
+ optional: true,
+ schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
+ },
+ },
+ },
+ access: {
+ permission: &Permission::Privilege(&[], PRIV_SYS_MODIFY, false),
+ },
+)]
+/// Update an InfluxDB http server configuration
+pub async fn update_influxdb_http_server(
+ name: String,
+ update: InfluxDbHttpUpdater,
+ delete: Option<Vec<DeletableProperty>>,
+ digest: Option<String>,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<(), Error> {
+ let _lock = metrics::lock_config()?;
+
+ let (mut metrics, expected_digest) = metrics::config()?;
+
+ if let Some(ref digest) = digest {
+ let digest = <[u8; 32]>::from_hex(digest)?;
+ crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
+ }
+
+ let mut config: InfluxDbHttp = metrics.lookup("influxdb-http", &name)?;
+
+ if let Some(delete) = delete {
+ for delete_prop in delete {
+ match delete_prop {
+ DeletableProperty::Enable => {
+ config.enable = None;
+ }
+ DeletableProperty::Token => {
+ config.token = None;
+ }
+ DeletableProperty::Bucket => {
+ config.bucket = None;
+ }
+ DeletableProperty::Organization => {
+ config.organization = None;
+ }
+ DeletableProperty::MaxBodySize => {
+ config.max_body_size = None;
+ }
+ DeletableProperty::VerifyTls => {
+ config.verify_tls = None;
+ }
+ DeletableProperty::Comment => {
+ config.comment = None;
+ }
+ }
+ }
+ }
+
+ if let Some(comment) = update.comment {
+ let comment = comment.trim().to_string();
+ if comment.is_empty() {
+ config.comment = None;
+ } else {
+ config.comment = Some(comment);
+ }
+ }
+
+ if let Some(url) = update.url {
+ config.url = url;
+ }
+
+ if update.enable.is_some() {
+ config.enable = update.enable;
+ }
+ if update.token.is_some() {
+ config.token = update.token;
+ }
+ if update.bucket.is_some() {
+ config.bucket = update.bucket;
+ }
+ if update.organization.is_some() {
+ config.organization = update.organization;
+ }
+ if update.max_body_size.is_some() {
+ config.max_body_size = update.max_body_size;
+ }
+ if update.verify_tls.is_some() {
+ config.verify_tls = update.verify_tls;
+ }
+
+ test_server(&config).await?;
+
+ metrics.set_data(&name, "influxdb-http", &config)?;
+
+ metrics::save_config(&metrics)?;
+
+ Ok(())
+}
+
+const ITEM_ROUTER: Router = Router::new()
+ .get(&API_METHOD_READ_INFLUXDB_HTTP_SERVER)
+ .put(&API_METHOD_UPDATE_INFLUXDB_HTTP_SERVER)
+ .delete(&API_METHOD_DELETE_INFLUXDB_HTTP_SERVER);
+
+pub const ROUTER: Router = Router::new()
+ .get(&API_METHOD_LIST_INFLUXDB_HTTP_SERVERS)
+ .post(&API_METHOD_CREATE_INFLUXDB_HTTP_SERVER)
+ .match_all("name", &ITEM_ROUTER);
diff --git a/src/api2/config/metricserver/influxdbudp.rs b/src/api2/config/metricserver/influxdbudp.rs
new file mode 100644
index 00000000..7a701316
--- /dev/null
+++ b/src/api2/config/metricserver/influxdbudp.rs
@@ -0,0 +1,269 @@
+use anyhow::{bail, format_err, Error};
+use hex::FromHex;
+use serde::{Deserialize, Serialize};
+use serde_json::Value;
+
+use proxmox_metrics::test_influxdb_udp;
+use proxmox_router::{Permission, Router, RpcEnvironment};
+use proxmox_schema::api;
+
+use pbs_api_types::{
+ InfluxDbUdp, InfluxDbUdpUpdater, METRIC_SERVER_ID_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+ PROXMOX_CONFIG_DIGEST_SCHEMA,
+};
+
+use pbs_config::metrics;
+
+async fn test_server(address: &str) -> Result<(), Error> {
+ test_influxdb_udp(address)
+ .await
+ .map_err(|err| format_err!("cannot conect to {}: {}", address, err))
+}
+
+#[api(
+ input: {
+ properties: {},
+ },
+ returns: {
+ description: "List of configured InfluxDB udp metric servers.",
+ type: Array,
+ items: { type: InfluxDbUdp },
+ },
+ access: {
+ permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
+ },
+)]
+/// List configured InfluxDB udp metric servers.
+pub fn list_influxdb_udp_servers(
+ _param: Value,
+ mut rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Vec<InfluxDbUdp>, Error> {
+ let (config, digest) = metrics::config()?;
+
+ let list = config.convert_to_typed_array("influxdb-udp")?;
+
+ rpcenv["digest"] = hex::encode(&digest).into();
+
+ Ok(list)
+}
+
+#[api(
+ protected: true,
+ input: {
+ properties: {
+ config: {
+ type: InfluxDbUdp,
+ flatten: true,
+ },
+ },
+ },
+ access: {
+ permission: &Permission::Privilege(&[], PRIV_SYS_MODIFY, false),
+ },
+)]
+/// Create a new InfluxDB udp server configuration
+pub async fn create_influxdb_udp_server(config: InfluxDbUdp) -> Result<(), Error> {
+ let _lock = metrics::lock_config()?;
+
+ let (mut metrics, _digest) = metrics::config()?;
+
+ if metrics.sections.get(&config.name).is_some() {
+ bail!("metric server '{}' already exists.", config.name);
+ }
+
+ if config.enable.unwrap_or(true) {
+ test_server(&config.host).await?;
+ }
+
+ metrics.set_data(&config.name, "influxdb-udp", &config)?;
+
+ metrics::save_config(&metrics)?;
+
+ Ok(())
+}
+
+#[api(
+ protected: true,
+ input: {
+ properties: {
+ name: {
+ schema: METRIC_SERVER_ID_SCHEMA,
+ },
+ digest: {
+ optional: true,
+ schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
+ },
+ },
+ },
+ access: {
+ permission: &Permission::Privilege(&[], PRIV_SYS_MODIFY, false),
+ },
+)]
+/// Remove a InfluxDB udp server configuration
+pub fn delete_influxdb_udp_server(
+ name: String,
+ digest: Option<String>,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<(), Error> {
+ let _lock = metrics::lock_config()?;
+
+ let (mut metrics, expected_digest) = metrics::config()?;
+
+ if let Some(ref digest) = digest {
+ let digest = <[u8; 32]>::from_hex(digest)?;
+ crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
+ }
+
+ if metrics.sections.remove(&name).is_none() {
+ bail!("name '{}' does not exist.", name);
+ }
+
+ metrics::save_config(&metrics)?;
+
+ Ok(())
+}
+
+#[api(
+ input: {
+ properties: {
+ name: {
+ schema: METRIC_SERVER_ID_SCHEMA,
+ },
+ },
+ },
+ returns: { type: InfluxDbUdp },
+ access: {
+ permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
+ },
+)]
+/// Read the InfluxDB udp server configuration
+pub fn read_influxdb_udp_server(
+ name: String,
+ mut rpcenv: &mut dyn RpcEnvironment,
+) -> Result<InfluxDbUdp, Error> {
+ let (metrics, digest) = metrics::config()?;
+
+ let config = metrics.lookup("influxdb-udp", &name)?;
+
+ rpcenv["digest"] = hex::encode(&digest).into();
+
+ Ok(config)
+}
+
+#[api()]
+#[derive(Serialize, Deserialize)]
+#[serde(rename_all = "kebab-case")]
+/// Deletable property name
+pub enum DeletableProperty {
+ /// Delete the enable property.
+ Enable,
+ /// Delete the mtu property.
+ Mtu,
+ /// Delete the comment property.
+ Comment,
+}
+
+#[api(
+ protected: true,
+ input: {
+ properties: {
+ name: {
+ schema: METRIC_SERVER_ID_SCHEMA,
+ },
+ update: {
+ type: InfluxDbUdpUpdater,
+ flatten: true,
+ },
+ delete: {
+ description: "List of properties to delete.",
+ type: Array,
+ optional: true,
+ items: {
+ type: DeletableProperty,
+ }
+ },
+ digest: {
+ optional: true,
+ schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
+ },
+ },
+ },
+ access: {
+ permission: &Permission::Privilege(&[], PRIV_SYS_MODIFY, false),
+ },
+)]
+/// Update an InfluxDB udp server configuration
+pub async fn update_influxdb_udp_server(
+ name: String,
+ update: InfluxDbUdpUpdater,
+ delete: Option<Vec<DeletableProperty>>,
+ digest: Option<String>,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<(), Error> {
+ let _lock = metrics::lock_config()?;
+
+ let (mut metrics, expected_digest) = metrics::config()?;
+
+ if let Some(ref digest) = digest {
+ let digest = <[u8; 32]>::from_hex(digest)?;
+ crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
+ }
+
+ let mut config: InfluxDbUdp = metrics.lookup("influxdb-udp", &name)?;
+
+ if let Some(delete) = delete {
+ for delete_prop in delete {
+ match delete_prop {
+ DeletableProperty::Enable => {
+ config.enable = None;
+ }
+ DeletableProperty::Mtu => {
+ config.mtu = None;
+ }
+ DeletableProperty::Comment => {
+ config.comment = None;
+ }
+ }
+ }
+ }
+
+ if let Some(comment) = update.comment {
+ let comment = comment.trim().to_string();
+ if comment.is_empty() {
+ config.comment = None;
+ } else {
+ config.comment = Some(comment);
+ }
+ }
+
+ if let Some(host) = update.host {
+ config.host = host;
+ }
+
+ if update.enable.is_some() {
+ config.enable = update.enable;
+ }
+ if update.mtu.is_some() {
+ config.mtu = update.mtu;
+ }
+
+ metrics.set_data(&name, "influxdb-udp", &config)?;
+
+ if config.enable.unwrap_or(true) {
+ test_server(&config.host).await?;
+ }
+
+ metrics::save_config(&metrics)?;
+
+ Ok(())
+}
+
+const ITEM_ROUTER: Router = Router::new()
+ .get(&API_METHOD_READ_INFLUXDB_UDP_SERVER)
+ .put(&API_METHOD_UPDATE_INFLUXDB_UDP_SERVER)
+ .delete(&API_METHOD_DELETE_INFLUXDB_UDP_SERVER);
+
+pub const ROUTER: Router = Router::new()
+ .get(&API_METHOD_LIST_INFLUXDB_UDP_SERVERS)
+ .post(&API_METHOD_CREATE_INFLUXDB_UDP_SERVER)
+ .match_all("name", &ITEM_ROUTER);
diff --git a/src/api2/config/metricserver/mod.rs b/src/api2/config/metricserver/mod.rs
new file mode 100644
index 00000000..cbce34f7
--- /dev/null
+++ b/src/api2/config/metricserver/mod.rs
@@ -0,0 +1,16 @@
+use proxmox_router::{Router, SubdirMap};
+use proxmox_router::list_subdirs_api_method;
+use proxmox_sys::sortable;
+
+pub mod influxdbudp;
+pub mod influxdbhttp;
+
+#[sortable]
+const SUBDIRS: SubdirMap = &sorted!([
+ ("influxdb-http", &influxdbhttp::ROUTER),
+ ("influxdb-udp", &influxdbudp::ROUTER),
+]);
+
+pub const ROUTER: Router = Router::new()
+ .get(&list_subdirs_api_method!(SUBDIRS))
+ .subdirs(SUBDIRS);
diff --git a/src/api2/config/mod.rs b/src/api2/config/mod.rs
index c256ba64..5de1c28f 100644
--- a/src/api2/config/mod.rs
+++ b/src/api2/config/mod.rs
@@ -12,6 +12,7 @@ pub mod verify;
pub mod drive;
pub mod changer;
pub mod media_pool;
+pub mod metricserver;
pub mod tape_encryption_keys;
pub mod tape_backup_job;
pub mod traffic_control;
@@ -23,6 +24,7 @@ const SUBDIRS: SubdirMap = &[
("datastore", &datastore::ROUTER),
("drive", &drive::ROUTER),
("media-pool", &media_pool::ROUTER),
+ ("metricserver", &metricserver::ROUTER),
("remote", &remote::ROUTER),
("sync", &sync::ROUTER),
("tape-backup-job", &tape_backup_job::ROUTER),
--
2.30.2
^ permalink raw reply [flat|nested] 13+ messages in thread
* [pbs-devel] [PATCH proxmox-backup v5 7/8] ui: add window/InfluxDbEdit
2022-02-02 9:50 [pbs-devel] [PATCH proxmox/proxmox-backup v5] add metrics server capability Dominik Csapak
` (7 preceding siblings ...)
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 6/8] api: add metricserver endpoints Dominik Csapak
@ 2022-02-02 9:50 ` Dominik Csapak
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 8/8] ui: add MetricServerView and use it Dominik Csapak
9 siblings, 0 replies; 13+ messages in thread
From: Dominik Csapak @ 2022-02-02 9:50 UTC (permalink / raw)
To: pbs-devel
contains both windows for HTTP and UDP
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
www/Makefile | 1 +
www/window/InfluxDbEdit.js | 218 +++++++++++++++++++++++++++++++++++++
2 files changed, 219 insertions(+)
create mode 100644 www/window/InfluxDbEdit.js
diff --git a/www/Makefile b/www/Makefile
index 455fbeec..df471e2c 100644
--- a/www/Makefile
+++ b/www/Makefile
@@ -72,6 +72,7 @@ JSSRC= \
window/TokenEdit.js \
window/VerifyJobEdit.js \
window/ZFSCreate.js \
+ window/InfluxDbEdit.js \
dashboard/DataStoreStatistics.js \
dashboard/LongestTasks.js \
dashboard/RunningTasks.js \
diff --git a/www/window/InfluxDbEdit.js b/www/window/InfluxDbEdit.js
new file mode 100644
index 00000000..4ac9fff3
--- /dev/null
+++ b/www/window/InfluxDbEdit.js
@@ -0,0 +1,218 @@
+Ext.define('PBS.window.InfluxDbHttpEdit', {
+ extend: 'Proxmox.window.Edit',
+ mixins: ['Proxmox.Mixin.CBind'],
+
+ subject: 'InfluxDB (HTTP)',
+
+ cbindData: function() {
+ let me = this;
+ me.isCreate = !me.serverid;
+ me.serverid = me.serverid || "";
+ me.url = `/api2/extjs/config/metricserver/influxdb-http/${me.serverid}`;
+ me.tokenEmptyText = me.isCreate ? '' : gettext('unchanged');
+ me.method = me.isCreate ? 'POST' : 'PUT';
+ if (!me.isCreate) {
+ me.subject = `${me.subject}: ${me.serverid}`;
+ }
+ return {};
+ },
+
+ items: [
+ {
+ xtype: 'inputpanel',
+
+ column1: [
+ {
+ xtype: 'pmxDisplayEditField',
+ name: 'name',
+ fieldLabel: gettext('Name'),
+ allowBlank: false,
+ cbind: {
+ editable: '{isCreate}',
+ value: '{serverid}',
+ },
+ },
+ {
+ xtype: 'proxmoxtextfield',
+ name: 'url',
+ fieldLabel: gettext('URL'),
+ allowBlank: false,
+ },
+ ],
+
+ column2: [
+ {
+ xtype: 'checkbox',
+ name: 'enable',
+ fieldLabel: gettext('Enabled'),
+ inputValue: 1,
+ uncheckedValue: 0,
+ checked: true,
+ },
+ {
+ xtype: 'proxmoxtextfield',
+ name: 'organization',
+ fieldLabel: gettext('Organization'),
+ emptyText: 'proxmox',
+ cbind: {
+ deleteEmpty: '{!isCreate}',
+ },
+ },
+ {
+ xtype: 'proxmoxtextfield',
+ name: 'bucket',
+ fieldLabel: gettext('Bucket'),
+ emptyText: 'proxmox',
+ cbind: {
+ deleteEmpty: '{!isCreate}',
+ },
+ },
+ {
+ xtype: 'proxmoxtextfield',
+ name: 'token',
+ fieldLabel: gettext('Token'),
+ allowBlank: true,
+ deleteEmpty: false,
+ submitEmpty: false,
+ cbind: {
+ emptyText: '{tokenEmptyText}',
+ },
+ },
+ ],
+
+ columnB: [
+ {
+ xtype: 'proxmoxtextfield',
+ name: 'comment',
+ fieldLabel: gettext('Comment'),
+ cbind: {
+ deleteEmpty: '{!isCreate}',
+ },
+ },
+ ],
+
+ advancedColumn1: [
+ {
+ xtype: 'proxmoxintegerfield',
+ name: 'max-body-size',
+ fieldLabel: gettext('Batch Size (b)'),
+ minValue: 1,
+ emptyText: '25000000',
+ submitEmpty: false,
+ cbind: {
+ deleteEmpty: '{!isCreate}',
+ },
+ },
+ ],
+ },
+ ],
+});
+
+Ext.define('PBS.window.InfluxDbUdpEdit', {
+ extend: 'Proxmox.window.Edit',
+ mixins: ['Proxmox.Mixin.CBind'],
+
+ subject: 'InfluxDB (UDP)',
+
+ cbindData: function() {
+ let me = this;
+ me.isCreate = !me.serverid;
+ me.serverid = me.serverid || "";
+ me.url = `/api2/extjs/config/metricserver/influxdb-udp/${me.serverid}`;
+ me.method = me.isCreate ? 'POST' : 'PUT';
+ if (!me.isCreate) {
+ me.subject = `${me.subject}: ${me.serverid}`;
+ }
+ return {};
+ },
+
+ items: [
+ {
+ xtype: 'inputpanel',
+
+ onGetValues: function(values) {
+ values.host += `:${values.port}`;
+ delete values.port;
+ return values;
+ },
+
+ column1: [
+ {
+ xtype: 'pmxDisplayEditField',
+ name: 'name',
+ fieldLabel: gettext('Name'),
+ allowBlank: false,
+ cbind: {
+ editable: '{isCreate}',
+ value: '{serverid}',
+ },
+ },
+ {
+ xtype: 'proxmoxtextfield',
+ name: 'host',
+ fieldLabel: gettext('Host'),
+ allowBlank: false,
+ },
+ ],
+
+ column2: [
+ {
+ xtype: 'checkbox',
+ name: 'enable',
+ fieldLabel: gettext('Enabled'),
+ inputValue: 1,
+ uncheckedValue: 0,
+ checked: true,
+ },
+ {
+ xtype: 'proxmoxintegerfield',
+ name: 'port',
+ minValue: 1,
+ maxValue: 65535,
+ fieldLabel: gettext('Port'),
+ allowBlank: false,
+ },
+ ],
+
+ columnB: [
+ {
+ xtype: 'proxmoxtextfield',
+ name: 'comment',
+ fieldLabel: gettext('Comment'),
+ cbind: {
+ deleteEmpty: '{!isCreate}',
+ },
+ },
+ ],
+
+ advancedColumn1: [
+ {
+ xtype: 'proxmoxintegerfield',
+ name: 'mtu',
+ fieldLabel: 'MTU',
+ minValue: 1,
+ emptyText: '1500',
+ submitEmpty: false,
+ cbind: {
+ deleteEmpty: '{!isCreate}',
+ },
+ },
+ ],
+ },
+ ],
+
+ initComponent: function() {
+ let me = this;
+ me.callParent();
+
+ me.load({
+ success: function(response, options) {
+ let values = response.result.data;
+ let [_match, host, port] = /^(.*):(\d+)$/.exec(values.host) || [];
+ values.host = host;
+ values.port = port;
+ me.setValues(values);
+ },
+ });
+ },
+});
--
2.30.2
^ permalink raw reply [flat|nested] 13+ messages in thread
* [pbs-devel] [PATCH proxmox-backup v5 8/8] ui: add MetricServerView and use it
2022-02-02 9:50 [pbs-devel] [PATCH proxmox/proxmox-backup v5] add metrics server capability Dominik Csapak
` (8 preceding siblings ...)
2022-02-02 9:50 ` [pbs-devel] [PATCH proxmox-backup v5 7/8] ui: add window/InfluxDbEdit Dominik Csapak
@ 2022-02-02 9:50 ` Dominik Csapak
9 siblings, 0 replies; 13+ messages in thread
From: Dominik Csapak @ 2022-02-02 9:50 UTC (permalink / raw)
To: pbs-devel
simple CRUD interface to show/add/edit/delete metric servers
it's a bit different from PVE's so that it's harder to reuse that to
copy it. If we need it again, we can still refactor and combine them.
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
www/Makefile | 1 +
www/NavigationTree.js | 6 ++
www/config/MetricServerView.js | 145 +++++++++++++++++++++++++++++++++
3 files changed, 152 insertions(+)
create mode 100644 www/config/MetricServerView.js
diff --git a/www/Makefile b/www/Makefile
index df471e2c..08062dfc 100644
--- a/www/Makefile
+++ b/www/Makefile
@@ -56,6 +56,7 @@ JSSRC= \
config/WebauthnView.js \
config/CertificateView.js \
config/NodeOptionView.js \
+ config/MetricServerView.js \
window/ACLEdit.js \
window/BackupFileDownloader.js \
window/BackupGroupChangeOwner.js \
diff --git a/www/NavigationTree.js b/www/NavigationTree.js
index 576d05ab..4d7d4280 100644
--- a/www/NavigationTree.js
+++ b/www/NavigationTree.js
@@ -62,6 +62,12 @@ Ext.define('PBS.store.NavigationStore', {
path: 'pbsCertificateConfiguration',
leaf: true,
},
+ {
+ text: gettext('Metric Server'),
+ iconCls: 'fa fa-bar-chart',
+ path: 'pbsMetricServerView',
+ leaf: true,
+ },
{
text: gettext('Subscription'),
iconCls: 'fa fa-support',
diff --git a/www/config/MetricServerView.js b/www/config/MetricServerView.js
new file mode 100644
index 00000000..b904a427
--- /dev/null
+++ b/www/config/MetricServerView.js
@@ -0,0 +1,145 @@
+Ext.define('PBS.config.MetricServerView', {
+ extend: 'Ext.grid.Panel',
+ alias: ['widget.pbsMetricServerView'],
+
+ stateful: true,
+ stateId: 'grid-metricserver',
+
+ controller: {
+ xclass: 'Ext.app.ViewController',
+
+ render_type: function(value) {
+ switch (value) {
+ case 'influxdb-http': return "InfluxDB (HTTP)";
+ case 'influxdb-udp': return "InfluxDB (UDP)";
+ default: return Proxmox.Utils.unknownText;
+ }
+ },
+
+ get_xtype: function(value) {
+ switch (value) {
+ case 'influxdb-http': return "InfluxDbHttp";
+ case 'influxdb-udp': return "InfluxDbUdp";
+ default: throw "invalid type";
+ }
+ },
+
+ editWindow: function(xtype, id) {
+ let me = this;
+ Ext.create(`PBS.window.${xtype}Edit`, {
+ serverid: id,
+ autoShow: true,
+ autoLoad: true,
+ listeners: {
+ destroy: () => me.reload(),
+ },
+ });
+ },
+
+ addServer: function(button) {
+ this.editWindow(this.get_xtype(button.type));
+ },
+
+ editServer: function() {
+ let me = this;
+ let view = me.getView();
+ let selection = view.getSelection();
+ if (!selection || selection.length < 1) {
+ return;
+ }
+
+ let cfg = selection[0].data;
+
+ let xtype = me.get_xtype(cfg.type);
+ me.editWindow(xtype, cfg.name);
+ },
+
+ reload: function() {
+ this.getView().getStore().load();
+ },
+ },
+
+ store: {
+ autoLoad: true,
+ id: 'metricservers',
+ proxy: {
+ type: 'proxmox',
+ url: '/api2/json/admin/metricserver',
+ },
+ },
+
+ columns: [
+ {
+ text: gettext('Name'),
+ flex: 2,
+ dataIndex: 'name',
+ },
+ {
+ text: gettext('Type'),
+ width: 150,
+ dataIndex: 'type',
+ renderer: 'render_type',
+ },
+ {
+ text: gettext('Enabled'),
+ dataIndex: 'disable',
+ width: 100,
+ renderer: Proxmox.Utils.format_neg_boolean,
+ },
+ {
+ text: gettext('Target Server'),
+ width: 200,
+ dataIndex: 'server',
+ },
+ {
+ text: gettext('Comment'),
+ flex: 3,
+ dataIndex: 'comment',
+ renderer: Ext.htmlEncode,
+ },
+ ],
+
+ tbar: [
+ {
+ text: gettext('Add'),
+ menu: [
+ {
+ text: 'InfluxDB (HTTP)',
+ type: 'influxdb-http',
+ iconCls: 'fa fa-fw fa-bar-chart',
+ handler: 'addServer',
+ },
+ {
+ text: 'InfluxDB (UDP)',
+ type: 'influxdb-udp',
+ iconCls: 'fa fa-fw fa-bar-chart',
+ handler: 'addServer',
+ },
+ ],
+ },
+ {
+ text: gettext('Edit'),
+ xtype: 'proxmoxButton',
+ handler: 'editServer',
+ disabled: true,
+ },
+ {
+ xtype: 'proxmoxStdRemoveButton',
+ getUrl: (rec) => `/api2/extjs/config/metricserver/${rec.data.type}/${rec.data.name}`,
+ getRecordName: (rec) => rec.data.name,
+ callback: 'reload',
+ },
+ ],
+
+ listeners: {
+ itemdblclick: 'editServer',
+ },
+
+ initComponent: function() {
+ var me = this;
+
+ me.callParent();
+
+ Proxmox.Utils.monStoreErrors(me, me.getStore());
+ },
+});
--
2.30.2
^ permalink raw reply [flat|nested] 13+ messages in thread