From aa1e60544f9843bb810323ec6c2146a43f15e6b6 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Wed, 24 Sep 2025 16:00:10 -0400 Subject: [PATCH 1/5] wip --- lib/codecs/src/encoding/format/protobuf.rs | 14 +- lib/codecs/src/encoding/mod.rs | 6 + lib/opentelemetry-proto/src/proto.rs | 7 + src/codecs/encoding/config.rs | 4 +- src/sinks/http/config.rs | 149 ++++++++++-------- src/sinks/opentelemetry/mod.rs | 64 ++++++-- src/sources/opentelemetry/config.rs | 51 +++--- .../e2e/opentelemetry/logs/vector_otlp.yaml | 1 + 8 files changed, 183 insertions(+), 113 deletions(-) diff --git a/lib/codecs/src/encoding/format/protobuf.rs b/lib/codecs/src/encoding/format/protobuf.rs index 9100f3fe815f0..1057a880afb14 100644 --- a/lib/codecs/src/encoding/format/protobuf.rs +++ b/lib/codecs/src/encoding/format/protobuf.rs @@ -1,5 +1,6 @@ use std::path::PathBuf; +use crate::encoding::BuildError; use bytes::BytesMut; use prost_reflect::{MessageDescriptor, prost::Message as _}; use tokio_util::codec::Encoder; @@ -9,9 +10,10 @@ use vector_core::{ event::{Event, Value}, schema, }; -use vrl::protobuf::{descriptor::get_message_descriptor, encode::encode_message}; - -use crate::encoding::BuildError; +use vrl::protobuf::{ + descriptor::{get_message_descriptor, get_message_descriptor_from_bytes}, + encode::encode_message, +}; /// Config used to build a `ProtobufSerializer`. #[configurable_component] @@ -72,6 +74,12 @@ impl ProtobufSerializer { Self { message_descriptor } } + /// Creates a new serializer instance using the descriptor bytes directly. + pub fn new_from_bytes(desc_bytes: &[u8], message_type: &str) -> vector_common::Result { + let message_descriptor = get_message_descriptor_from_bytes(desc_bytes, message_type)?; + Ok(Self { message_descriptor }) + } + /// Get a description of the message type used in serialization. pub fn descriptor_proto(&self) -> &prost_reflect::prost_types::DescriptorProto { self.message_descriptor.descriptor_proto() diff --git a/lib/codecs/src/encoding/mod.rs b/lib/codecs/src/encoding/mod.rs index 91e45ffc6d1c7..8352d27559cd5 100644 --- a/lib/codecs/src/encoding/mod.rs +++ b/lib/codecs/src/encoding/mod.rs @@ -293,6 +293,12 @@ pub enum SerializerConfig { Text(TextSerializerConfig), } +impl Default for SerializerConfig { + fn default() -> Self { + Self::Json(JsonSerializerConfig::default()) + } +} + impl From for SerializerConfig { fn from(config: AvroSerializerConfig) -> Self { Self::Avro { avro: config.avro } diff --git a/lib/opentelemetry-proto/src/proto.rs b/lib/opentelemetry-proto/src/proto.rs index 5559113bd14db..a770d7d51506b 100644 --- a/lib/opentelemetry-proto/src/proto.rs +++ b/lib/opentelemetry-proto/src/proto.rs @@ -1,3 +1,10 @@ +pub const LOGS_REQUEST_MESSAGE_TYPE: &str = + "opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest"; +pub const TRACES_REQUEST_MESSAGE_TYPE: &str = + "opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest"; +pub const METRICS_REQUEST_MESSAGE_TYPE: &str = + "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest"; + /// Service stub and clients. pub mod collector { pub mod trace { diff --git a/src/codecs/encoding/config.rs b/src/codecs/encoding/config.rs index 756bc8e2406f2..dc76b7ce2f46e 100644 --- a/src/codecs/encoding/config.rs +++ b/src/codecs/encoding/config.rs @@ -10,7 +10,7 @@ use crate::codecs::Transformer; /// Encoding configuration. #[configurable_component] -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] /// Configures how events are encoded into raw bytes. /// The selected encoding also determines which input types (logs, metrics, traces) are supported. pub struct EncodingConfig { @@ -60,7 +60,7 @@ where /// Encoding configuration. #[configurable_component] -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct EncodingConfigWithFraming { #[configurable(derived)] diff --git a/src/sinks/http/config.rs b/src/sinks/http/config.rs index ab72afb9c4cab..0e53cc21d64ec 100644 --- a/src/sinks/http/config.rs +++ b/src/sinks/http/config.rs @@ -68,6 +68,8 @@ pub struct HttpSinkConfig { #[serde(default)] pub compression: Compression, + /// If not specified, `encoding.codec` will default to `json`. + /// If `encoding.framing` is not specified, it will be deduced from `encoding.codec`. #[serde(flatten)] pub encoding: EncodingConfigWithFraming, @@ -170,79 +172,15 @@ impl HttpSinkConfig { let (framer, serializer) = self.encoding.build(SinkType::MessageBased)?; Ok(Encoder::::new(framer, serializer)) } -} - -impl GenerateConfig for HttpSinkConfig { - fn generate_config() -> toml::Value { - toml::from_str( - r#"uri = "https://10.22.212.22:9000/endpoint" - encoding.codec = "json""#, - ) - .unwrap() - } -} - -async fn healthcheck(uri: UriSerde, auth: Option, client: HttpClient) -> crate::Result<()> { - let auth = auth.choose_one(&uri.auth)?; - let uri = uri.with_default_parts(); - let mut request = Request::head(&uri.uri).body(Body::empty()).unwrap(); - - if let Some(auth) = auth { - auth.apply(&mut request); - } - - let response = client.send(request).await?; - - match response.status() { - StatusCode::OK => Ok(()), - status => Err(HealthcheckError::UnexpectedStatus { status }.into()), - } -} - -pub(super) fn validate_headers( - headers: &BTreeMap, - configures_auth: bool, -) -> crate::Result> { - let headers = crate::sinks::util::http::validate_headers(headers)?; - - for name in headers.keys() { - if configures_auth && name.inner() == AUTHORIZATION { - return Err("Authorization header can not be used with defined auth options".into()); - } - } - Ok(headers) -} - -pub(super) fn validate_payload_wrapper( - payload_prefix: &str, - payload_suffix: &str, - encoder: &Encoder, -) -> crate::Result<(String, String)> { - let payload = [payload_prefix, "{}", payload_suffix].join(""); - match ( - encoder.serializer(), - encoder.framer(), - serde_json::from_str::(&payload), - ) { - ( - Serializer::Json(_), - Framer::CharacterDelimited(CharacterDelimitedEncoder { delimiter: b',' }), - Err(_), - ) => Err("Payload prefix and suffix wrapper must produce a valid JSON object.".into()), - _ => Ok((payload_prefix.to_owned(), payload_suffix.to_owned())), - } -} - -#[async_trait] -#[typetag::serde(name = "http")] -impl SinkConfig for HttpSinkConfig { - async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { + pub(crate) async fn build_with_encoder( + &self, + cx: SinkContext, + encoder: Encoder, + transformer: Transformer, + ) -> crate::Result<(VectorSink, Healthcheck)> { let batch_settings = self.batch.validate()?.into_batcher_settings()?; - let encoder = self.build_encoder()?; - let transformer = self.encoding.transformer(); - let mut request = self.request.clone(); request.add_old_option(self.headers.clone()); @@ -350,6 +288,77 @@ impl SinkConfig for HttpSinkConfig { Ok((VectorSink::from_event_streamsink(sink), healthcheck)) } +} + +impl GenerateConfig for HttpSinkConfig { + fn generate_config() -> toml::Value { + toml::from_str( + r#"uri = "https://10.22.212.22:9000/endpoint" + encoding.codec = "json""#, + ) + .unwrap() + } +} + +async fn healthcheck(uri: UriSerde, auth: Option, client: HttpClient) -> crate::Result<()> { + let auth = auth.choose_one(&uri.auth)?; + let uri = uri.with_default_parts(); + let mut request = Request::head(&uri.uri).body(Body::empty()).unwrap(); + + if let Some(auth) = auth { + auth.apply(&mut request); + } + + let response = client.send(request).await?; + + match response.status() { + StatusCode::OK => Ok(()), + status => Err(HealthcheckError::UnexpectedStatus { status }.into()), + } +} + +pub(super) fn validate_headers( + headers: &BTreeMap, + configures_auth: bool, +) -> crate::Result> { + let headers = crate::sinks::util::http::validate_headers(headers)?; + + for name in headers.keys() { + if configures_auth && name.inner() == AUTHORIZATION { + return Err("Authorization header can not be used with defined auth options".into()); + } + } + + Ok(headers) +} + +pub(super) fn validate_payload_wrapper( + payload_prefix: &str, + payload_suffix: &str, + encoder: &Encoder, +) -> crate::Result<(String, String)> { + let payload = [payload_prefix, "{}", payload_suffix].join(""); + match ( + encoder.serializer(), + encoder.framer(), + serde_json::from_str::(&payload), + ) { + ( + Serializer::Json(_), + Framer::CharacterDelimited(CharacterDelimitedEncoder { delimiter: b',' }), + Err(_), + ) => Err("Payload prefix and suffix wrapper must produce a valid JSON object.".into()), + _ => Ok((payload_prefix.to_owned(), payload_suffix.to_owned())), + } +} + +#[async_trait] +#[typetag::serde(name = "http")] +impl SinkConfig for HttpSinkConfig { + async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { + let encoder = self.build_encoder()?; + self.build_with_encoder(cx, encoder, self.encoding.transformer()).await + } fn input(&self) -> Input { Input::new(self.encoding.config().1.input_type()) diff --git a/src/sinks/opentelemetry/mod.rs b/src/sinks/opentelemetry/mod.rs index 88963f8603cde..8007e3f2a0b54 100644 --- a/src/sinks/opentelemetry/mod.rs +++ b/src/sinks/opentelemetry/mod.rs @@ -1,5 +1,18 @@ +use crate::codecs::Encoder; +use crate::{ + codecs::{EncodingConfigWithFraming, Transformer}, + config::{AcknowledgementsConfig, Input, SinkConfig, SinkContext}, + sinks::{ + Healthcheck, VectorSink, + http::config::{HttpMethod, HttpSinkConfig}, + }, +}; use indoc::indoc; use vector_config::component::GenerateConfig; +use vector_lib::codecs::encoding::{Framer, ProtobufSerializer, Serializer}; +use vector_lib::opentelemetry::proto::{ + LOGS_REQUEST_MESSAGE_TYPE, METRICS_REQUEST_MESSAGE_TYPE, TRACES_REQUEST_MESSAGE_TYPE, +}; use vector_lib::{ codecs::{ JsonSerializerConfig, @@ -8,15 +21,6 @@ use vector_lib::{ configurable::configurable_component, }; -use crate::{ - codecs::{EncodingConfigWithFraming, Transformer}, - config::{AcknowledgementsConfig, Input, SinkConfig, SinkContext}, - sinks::{ - Healthcheck, VectorSink, - http::config::{HttpMethod, HttpSinkConfig}, - }, -}; - /// Configuration for the `OpenTelemetry` sink. #[configurable_component(sink("opentelemetry", "Deliver OTLP data over HTTP."))] #[derive(Clone, Debug, Default)] @@ -24,6 +28,19 @@ pub struct OpenTelemetryConfig { /// Protocol configuration #[configurable(derived)] protocol: Protocol, + + /// Setting this field to `true`, will override all encoding settings and it will encode requests based on the + /// [OpenTelemetry protocol](https://opentelemetry.io/docs/specs/otel/protocol/). + /// + /// The endpoint is used to determine the data type: + /// * v1/logs → OTLP Logs + /// * v1/traces → OTLP Traces + /// * v1/metrics → OTLP Metrics + /// + /// More information available [here](https://opentelemetry.io/docs/specs/otlp/?utm_source=chatgpt.com#otlphttp-request). + #[configurable(derived)] + #[serde(default)] + pub use_otlp_encoding: bool, } /// The protocol used to send data to OpenTelemetry. @@ -78,7 +95,21 @@ impl GenerateConfig for OpenTelemetryConfig { impl SinkConfig for OpenTelemetryConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { match &self.protocol { - Protocol::Http(config) => config.build(cx).await, + Protocol::Http(config) => { + if self.use_otlp_encoding { + let serializer = ProtobufSerializer::new_from_bytes( + vector_lib::opentelemetry::proto::DESCRIPTOR_BYTES, + to_message_type(&config.uri.to_string())?, + )?; + let encoder = Encoder::::new( + FramingConfig::Bytes.build(), + Serializer::Protobuf(serializer), + ); + config.build_with_encoder(cx, encoder, config.encoding.transformer()).await + } else { + config.build(cx).await + } + } } } @@ -95,6 +126,19 @@ impl SinkConfig for OpenTelemetryConfig { } } +/// Checks if an endpoint ends with a known OTEL proto request. +pub fn to_message_type(endpoint: &str) -> crate::Result<&'static str> { + if endpoint.ends_with("v1/logs") { + Ok(LOGS_REQUEST_MESSAGE_TYPE) + } else if endpoint.ends_with("v1/traces") { + Ok(TRACES_REQUEST_MESSAGE_TYPE) + } else if endpoint.ends_with("v1/metrics") { + Ok(METRICS_REQUEST_MESSAGE_TYPE) + } else { + Err(format!("Endpoint {endpoint} not supported, should end with 'v1/logs', 'v1/metrics' or 'v1/traces'.").into()) + } +} + #[cfg(test)] mod test { #[test] diff --git a/src/sources/opentelemetry/config.rs b/src/sources/opentelemetry/config.rs index b2457f8f70113..14fd14fd26094 100644 --- a/src/sources/opentelemetry/config.rs +++ b/src/sources/opentelemetry/config.rs @@ -1,8 +1,28 @@ use std::net::SocketAddr; +use crate::{ + config::{ + DataType, GenerateConfig, Resource, SourceAcknowledgementsConfig, SourceConfig, + SourceContext, SourceOutput, + }, + http::KeepaliveConfig, + serde::bool_or_struct, + sources::{ + Source, + http_server::{build_param_matcher, remove_duplicates}, + opentelemetry::{ + grpc::Service, + http::{build_warp_filter, run_http_server}, + }, + util::grpc::run_grpc_server_with_routes, + }, +}; use futures::FutureExt; use futures_util::{TryFutureExt, future::join}; use tonic::{codec::CompressionEncoding, transport::server::RoutesBuilder}; +use vector_lib::opentelemetry::proto::{ + LOGS_REQUEST_MESSAGE_TYPE, METRICS_REQUEST_MESSAGE_TYPE, TRACES_REQUEST_MESSAGE_TYPE, +}; use vector_lib::{ codecs::decoding::ProtobufDeserializer, config::{LegacyKey, LogNamespace, log_schema}, @@ -28,35 +48,10 @@ use vrl::{ value::{Kind, kind::Collection}, }; -use crate::{ - config::{ - DataType, GenerateConfig, Resource, SourceAcknowledgementsConfig, SourceConfig, - SourceContext, SourceOutput, - }, - http::KeepaliveConfig, - serde::bool_or_struct, - sources::{ - Source, - http_server::{build_param_matcher, remove_duplicates}, - opentelemetry::{ - grpc::Service, - http::{build_warp_filter, run_http_server}, - }, - util::grpc::run_grpc_server_with_routes, - }, -}; - pub const LOGS: &str = "logs"; pub const METRICS: &str = "metrics"; pub const TRACES: &str = "traces"; -pub const OTEL_PROTO_LOGS_REQUEST: &str = - "opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest"; -pub const OTEL_PROTO_TRACES_REQUEST: &str = - "opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest"; -pub const OTEL_PROTO_METRICS_REQUEST: &str = - "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest"; - /// Configuration for the `opentelemetry` source. #[configurable_component(source("opentelemetry", "Receive OTLP data through gRPC or HTTP."))] #[derive(Clone, Debug)] @@ -198,7 +193,7 @@ impl SourceConfig for OpentelemetryConfig { let grpc_tls_settings = MaybeTlsSettings::from_config(self.grpc.tls.as_ref(), true)?; - let log_deserializer = self.get_deserializer(OTEL_PROTO_LOGS_REQUEST)?; + let log_deserializer = self.get_deserializer(LOGS_REQUEST_MESSAGE_TYPE)?; let log_service = LogsServiceServer::new(Service { pipeline: cx.out.clone(), acknowledgements, @@ -209,7 +204,7 @@ impl SourceConfig for OpentelemetryConfig { .accept_compressed(CompressionEncoding::Gzip) .max_decoding_message_size(usize::MAX); - let metric_deserializer = self.get_deserializer(OTEL_PROTO_METRICS_REQUEST)?; + let metric_deserializer = self.get_deserializer(METRICS_REQUEST_MESSAGE_TYPE)?; let metrics_service = MetricsServiceServer::new(Service { pipeline: cx.out.clone(), acknowledgements, @@ -220,7 +215,7 @@ impl SourceConfig for OpentelemetryConfig { .accept_compressed(CompressionEncoding::Gzip) .max_decoding_message_size(usize::MAX); - let trace_deserializer = self.get_deserializer(OTEL_PROTO_TRACES_REQUEST)?; + let trace_deserializer = self.get_deserializer(TRACES_REQUEST_MESSAGE_TYPE)?; let trace_service = TraceServiceServer::new(Service { pipeline: cx.out.clone(), acknowledgements, diff --git a/tests/data/e2e/opentelemetry/logs/vector_otlp.yaml b/tests/data/e2e/opentelemetry/logs/vector_otlp.yaml index a3b98647059c5..e84196bcf7a07 100644 --- a/tests/data/e2e/opentelemetry/logs/vector_otlp.yaml +++ b/tests/data/e2e/opentelemetry/logs/vector_otlp.yaml @@ -19,6 +19,7 @@ sinks: inputs: - source0.logs type: opentelemetry + use_otlp_encoding: true protocol: type: http uri: http://otel-collector-sink:5318/v1/logs From fde54017a76b2ccc2ce12e3da6d6e9f10eece0a3 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Wed, 24 Sep 2025 16:49:58 -0400 Subject: [PATCH 2/5] ran cargo fmt --- src/sinks/http/config.rs | 3 ++- src/sinks/opentelemetry/mod.rs | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/sinks/http/config.rs b/src/sinks/http/config.rs index 0e53cc21d64ec..cdc27abeb2bab 100644 --- a/src/sinks/http/config.rs +++ b/src/sinks/http/config.rs @@ -357,7 +357,8 @@ pub(super) fn validate_payload_wrapper( impl SinkConfig for HttpSinkConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let encoder = self.build_encoder()?; - self.build_with_encoder(cx, encoder, self.encoding.transformer()).await + self.build_with_encoder(cx, encoder, self.encoding.transformer()) + .await } fn input(&self) -> Input { diff --git a/src/sinks/opentelemetry/mod.rs b/src/sinks/opentelemetry/mod.rs index 8007e3f2a0b54..c8e820eb933d2 100644 --- a/src/sinks/opentelemetry/mod.rs +++ b/src/sinks/opentelemetry/mod.rs @@ -105,7 +105,9 @@ impl SinkConfig for OpenTelemetryConfig { FramingConfig::Bytes.build(), Serializer::Protobuf(serializer), ); - config.build_with_encoder(cx, encoder, config.encoding.transformer()).await + config + .build_with_encoder(cx, encoder, config.encoding.transformer()) + .await } else { config.build(cx).await } From 94078dc9690c34772be8e0b28f8f9319799d6d66 Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Thu, 25 Sep 2025 10:03:48 -0400 Subject: [PATCH 3/5] changelog --- changelog.d/otlp_encoding.feature.md | 4 ++++ .../e2e/opentelemetry/logs/vector_otlp.yaml | 4 ---- .../components/sources/opentelemetry.cue | 17 +---------------- 3 files changed, 5 insertions(+), 20 deletions(-) create mode 100644 changelog.d/otlp_encoding.feature.md diff --git a/changelog.d/otlp_encoding.feature.md b/changelog.d/otlp_encoding.feature.md new file mode 100644 index 0000000000000..55e806aac8f45 --- /dev/null +++ b/changelog.d/otlp_encoding.feature.md @@ -0,0 +1,4 @@ +Added `use_otlp_encoding` option to the `opentelemetry` sink. +When set to `true` the sink assumes the Vector events are structured based on OTLP. + +authors: pront diff --git a/tests/data/e2e/opentelemetry/logs/vector_otlp.yaml b/tests/data/e2e/opentelemetry/logs/vector_otlp.yaml index e84196bcf7a07..09e83a7812f23 100644 --- a/tests/data/e2e/opentelemetry/logs/vector_otlp.yaml +++ b/tests/data/e2e/opentelemetry/logs/vector_otlp.yaml @@ -24,10 +24,6 @@ sinks: type: http uri: http://otel-collector-sink:5318/v1/logs method: post - encoding: - codec: json - framing: - method: newline_delimited batch: max_events: 1 request: diff --git a/website/cue/reference/components/sources/opentelemetry.cue b/website/cue/reference/components/sources/opentelemetry.cue index 622e4e8c684f3..d9cc36d266b18 100644 --- a/website/cue/reference/components/sources/opentelemetry.cue +++ b/website/cue/reference/components/sources/opentelemetry.cue @@ -295,30 +295,15 @@ components: sources: opentelemetry: { inputs: - otel.logs type: opentelemetry + use_otlp_encoding: true protocol: type: http uri: http://localhost:5318/v1/logs method: post - encoding: - codec: protobuf - protobuf: - desc_file: path/to/opentelemetry-proto.desc - message_type: opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest - framing: - method: "bytes" request: headers: content-type: "application/x-protobuf" ``` - - The `desc` file was generated with the following command: - ```bash - protoc -I=/path/to/vector/lib/opentelemetry-proto/src/proto/opentelemetry-proto \\ - --include_imports \\ - --include_source_info \\ - --descriptor_set_out=opentelemetry-proto.desc \\ - $(find /path/to/vector/lib/opentelemetry-proto/src/proto/opentelemetry-proto -name '*.proto') - ``` """ } tls: { From f161cbe23ec4a70844881ee52461a44b54e1fa5f Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Thu, 25 Sep 2025 15:04:20 -0400 Subject: [PATCH 4/5] linting --- changelog.d/otlp_encoding.feature.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/otlp_encoding.feature.md b/changelog.d/otlp_encoding.feature.md index 55e806aac8f45..a29c59d128c41 100644 --- a/changelog.d/otlp_encoding.feature.md +++ b/changelog.d/otlp_encoding.feature.md @@ -1,4 +1,4 @@ -Added `use_otlp_encoding` option to the `opentelemetry` sink. +Added `use_otlp_encoding` option to the `opentelemetry` sink. When set to `true` the sink assumes the Vector events are structured based on OTLP. authors: pront From ff38f524c285886a50469107b8b7cf3f1969ed1a Mon Sep 17 00:00:00 2001 From: Pavlos Rontidis Date: Thu, 25 Sep 2025 15:30:12 -0400 Subject: [PATCH 5/5] generate component docs --- .../sinks/generated/opentelemetry.cue | 1473 +++++++++-------- 1 file changed, 745 insertions(+), 728 deletions(-) diff --git a/website/cue/reference/components/sinks/generated/opentelemetry.cue b/website/cue/reference/components/sinks/generated/opentelemetry.cue index 7b284dd2fd3df..6c78bef3f11bb 100644 --- a/website/cue/reference/components/sinks/generated/opentelemetry.cue +++ b/website/cue/reference/components/sinks/generated/opentelemetry.cue @@ -1,130 +1,131 @@ package metadata -generated: components: sinks: opentelemetry: configuration: protocol: { - description: "Protocol configuration" - required: true - type: object: options: { - acknowledgements: { - description: """ - Controls how acknowledgements are handled for this sink. - - See [End-to-end Acknowledgements][e2e_acks] for more information on how event acknowledgement is handled. - - [e2e_acks]: https://vector.dev/docs/architecture/end-to-end-acknowledgements/ - """ - required: false - type: object: options: enabled: { +generated: components: sinks: opentelemetry: configuration: { + protocol: { + description: "Protocol configuration" + required: true + type: object: options: { + acknowledgements: { description: """ - Whether or not end-to-end acknowledgements are enabled. + Controls how acknowledgements are handled for this sink. - When enabled for a sink, any source that supports end-to-end - acknowledgements that is connected to that sink waits for events - to be acknowledged by **all connected sinks** before acknowledging them at the source. + See [End-to-end Acknowledgements][e2e_acks] for more information on how event acknowledgement is handled. - Enabling or disabling acknowledgements at the sink level takes precedence over any global - [`acknowledgements`][global_acks] configuration. - - [global_acks]: https://vector.dev/docs/reference/configuration/global-options/#acknowledgements + [e2e_acks]: https://vector.dev/docs/architecture/end-to-end-acknowledgements/ """ required: false - type: bool: {} + type: object: options: enabled: { + description: """ + Whether or not end-to-end acknowledgements are enabled. + + When enabled for a sink, any source that supports end-to-end + acknowledgements that is connected to that sink waits for events + to be acknowledged by **all connected sinks** before acknowledging them at the source. + + Enabling or disabling acknowledgements at the sink level takes precedence over any global + [`acknowledgements`][global_acks] configuration. + + [global_acks]: https://vector.dev/docs/reference/configuration/global-options/#acknowledgements + """ + required: false + type: bool: {} + } } - } - auth: { - description: """ - Configuration of the authentication strategy for HTTP requests. - - HTTP authentication should be used with HTTPS only, as the authentication credentials are passed as an - HTTP header without any additional encryption beyond what is provided by the transport itself. - """ - required: false - type: object: options: { - auth: { - description: "The AWS authentication configuration." - relevant_when: "strategy = \"aws\"" - required: true - type: object: options: { - access_key_id: { - description: "The AWS access key ID." - required: true - type: string: examples: ["AKIAIOSFODNN7EXAMPLE"] - } - assume_role: { - description: """ + auth: { + description: """ + Configuration of the authentication strategy for HTTP requests. + + HTTP authentication should be used with HTTPS only, as the authentication credentials are passed as an + HTTP header without any additional encryption beyond what is provided by the transport itself. + """ + required: false + type: object: options: { + auth: { + description: "The AWS authentication configuration." + relevant_when: "strategy = \"aws\"" + required: true + type: object: options: { + access_key_id: { + description: "The AWS access key ID." + required: true + type: string: examples: ["AKIAIOSFODNN7EXAMPLE"] + } + assume_role: { + description: """ The ARN of an [IAM role][iam_role] to assume. [iam_role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html """ - required: true - type: string: examples: ["arn:aws:iam::123456789098:role/my_role"] - } - credentials_file: { - description: "Path to the credentials file." - required: true - type: string: examples: ["/my/aws/credentials"] - } - external_id: { - description: """ + required: true + type: string: examples: ["arn:aws:iam::123456789098:role/my_role"] + } + credentials_file: { + description: "Path to the credentials file." + required: true + type: string: examples: ["/my/aws/credentials"] + } + external_id: { + description: """ The optional unique external ID in conjunction with role to assume. [external_id]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html """ - required: false - type: string: examples: ["randomEXAMPLEidString"] - } - imds: { - description: "Configuration for authenticating with AWS through IMDS." - required: false - type: object: options: { - connect_timeout_seconds: { - description: "Connect timeout for IMDS." - required: false - type: uint: { - default: 1 - unit: "seconds" + required: false + type: string: examples: ["randomEXAMPLEidString"] + } + imds: { + description: "Configuration for authenticating with AWS through IMDS." + required: false + type: object: options: { + connect_timeout_seconds: { + description: "Connect timeout for IMDS." + required: false + type: uint: { + default: 1 + unit: "seconds" + } } - } - max_attempts: { - description: "Number of IMDS retries for fetching tokens and metadata." - required: false - type: uint: default: 4 - } - read_timeout_seconds: { - description: "Read timeout for IMDS." - required: false - type: uint: { - default: 1 - unit: "seconds" + max_attempts: { + description: "Number of IMDS retries for fetching tokens and metadata." + required: false + type: uint: default: 4 + } + read_timeout_seconds: { + description: "Read timeout for IMDS." + required: false + type: uint: { + default: 1 + unit: "seconds" + } } } } - } - load_timeout_secs: { - description: """ + load_timeout_secs: { + description: """ Timeout for successfully loading any credentials, in seconds. Relevant when the default credentials chain or `assume_role` is used. """ - required: false - type: uint: { - examples: [30] - unit: "seconds" + required: false + type: uint: { + examples: [30] + unit: "seconds" + } } - } - profile: { - description: """ + profile: { + description: """ The credentials profile to use. Used to select AWS credentials from a provided credentials file. """ - required: false - type: string: { - default: "default" - examples: ["develop"] + required: false + type: string: { + default: "default" + examples: ["develop"] + } } - } - region: { - description: """ + region: { + description: """ The [AWS region][aws_region] to send STS requests to. If not set, this defaults to the configured region @@ -132,16 +133,16 @@ generated: components: sinks: opentelemetry: configuration: protocol: { [aws_region]: https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints """ - required: false - type: string: examples: ["us-west-2"] - } - secret_access_key: { - description: "The AWS secret access key." - required: true - type: string: examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] - } - session_name: { - description: """ + required: false + type: string: examples: ["us-west-2"] + } + secret_access_key: { + description: "The AWS secret access key." + required: true + type: string: examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + } + session_name: { + description: """ The optional [RoleSessionName][role_session_name] is a unique session identifier for your assumed role. Should be unique per principal or reason. @@ -149,210 +150,210 @@ generated: components: sinks: opentelemetry: configuration: protocol: { [role_session_name]: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html """ - required: false - type: string: examples: ["vector-indexer-role"] - } - session_token: { - description: """ + required: false + type: string: examples: ["vector-indexer-role"] + } + session_token: { + description: """ The AWS session token. See [AWS temporary credentials](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) """ - required: false - type: string: examples: ["AQoDYXdz...AQoDYXdz..."] + required: false + type: string: examples: ["AQoDYXdz...AQoDYXdz..."] + } } } - } - password: { - description: "The basic authentication password." - relevant_when: "strategy = \"basic\"" - required: true - type: string: examples: ["${PASSWORD}", "password"] - } - service: { - description: "The AWS service name to use for signing." - relevant_when: "strategy = \"aws\"" - required: true - type: string: {} - } - strategy: { - description: "The authentication strategy to use." - required: true - type: string: enum: { - aws: "AWS authentication." - basic: """ + password: { + description: "The basic authentication password." + relevant_when: "strategy = \"basic\"" + required: true + type: string: examples: ["${PASSWORD}", "password"] + } + service: { + description: "The AWS service name to use for signing." + relevant_when: "strategy = \"aws\"" + required: true + type: string: {} + } + strategy: { + description: "The authentication strategy to use." + required: true + type: string: enum: { + aws: "AWS authentication." + basic: """ Basic authentication. The username and password are concatenated and encoded using [base64][base64]. [base64]: https://en.wikipedia.org/wiki/Base64 """ - bearer: """ + bearer: """ Bearer authentication. The bearer token value (OAuth2, JWT, etc.) is passed as-is. """ + } + } + token: { + description: "The bearer authentication token." + relevant_when: "strategy = \"bearer\"" + required: true + type: string: {} + } + user: { + description: "The basic authentication username." + relevant_when: "strategy = \"basic\"" + required: true + type: string: examples: ["${USERNAME}", "username"] } - } - token: { - description: "The bearer authentication token." - relevant_when: "strategy = \"bearer\"" - required: true - type: string: {} - } - user: { - description: "The basic authentication username." - relevant_when: "strategy = \"basic\"" - required: true - type: string: examples: ["${USERNAME}", "username"] } } - } - batch: { - description: "Event batching behavior." - required: false - type: object: options: { - max_bytes: { - description: """ - The maximum size of a batch that is processed by a sink. + batch: { + description: "Event batching behavior." + required: false + type: object: options: { + max_bytes: { + description: """ + The maximum size of a batch that is processed by a sink. - This is based on the uncompressed size of the batched events, before they are - serialized or compressed. - """ - required: false - type: uint: { - default: 10000000 - unit: "bytes" + This is based on the uncompressed size of the batched events, before they are + serialized or compressed. + """ + required: false + type: uint: { + default: 10000000 + unit: "bytes" + } } - } - max_events: { - description: "The maximum size of a batch before it is flushed." - required: false - type: uint: unit: "events" - } - timeout_secs: { - description: "The maximum age of a batch before it is flushed." - required: false - type: float: { - default: 1.0 - unit: "seconds" + max_events: { + description: "The maximum size of a batch before it is flushed." + required: false + type: uint: unit: "events" + } + timeout_secs: { + description: "The maximum age of a batch before it is flushed." + required: false + type: float: { + default: 1.0 + unit: "seconds" + } } } } - } - compression: { - description: """ - Compression configuration. - - All compression algorithms use the default compression level unless otherwise specified. - """ - required: false - type: string: { - default: "none" - enum: { - gzip: """ - [Gzip][gzip] compression. - - [gzip]: https://www.gzip.org/ - """ - none: "No compression." - snappy: """ - [Snappy][snappy] compression. - - [snappy]: https://github.com/google/snappy/blob/main/docs/README.md - """ - zlib: """ - [Zlib][zlib] compression. - - [zlib]: https://zlib.net/ - """ - zstd: """ - [Zstandard][zstd] compression. + compression: { + description: """ + Compression configuration. - [zstd]: https://facebook.github.io/zstd/ - """ + All compression algorithms use the default compression level unless otherwise specified. + """ + required: false + type: string: { + default: "none" + enum: { + gzip: """ + [Gzip][gzip] compression. + + [gzip]: https://www.gzip.org/ + """ + none: "No compression." + snappy: """ + [Snappy][snappy] compression. + + [snappy]: https://github.com/google/snappy/blob/main/docs/README.md + """ + zlib: """ + [Zlib][zlib] compression. + + [zlib]: https://zlib.net/ + """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ + } } } - } - encoding: { - description: """ - Encoding configuration. - Configures how events are encoded into raw bytes. - The selected encoding also determines which input types (logs, metrics, traces) are supported. - """ - required: true - type: object: options: { - avro: { - description: "Apache Avro-specific encoder options." - relevant_when: "codec = \"avro\"" - required: true - type: object: options: schema: { - description: "The Avro schema." - required: true - type: string: examples: ["{ \"type\": \"record\", \"name\": \"log\", \"fields\": [{ \"name\": \"message\", \"type\": \"string\" }] }"] + encoding: { + description: """ + Encoding configuration. + Configures how events are encoded into raw bytes. + The selected encoding also determines which input types (logs, metrics, traces) are supported. + """ + required: true + type: object: options: { + avro: { + description: "Apache Avro-specific encoder options." + relevant_when: "codec = \"avro\"" + required: true + type: object: options: schema: { + description: "The Avro schema." + required: true + type: string: examples: ["{ \"type\": \"record\", \"name\": \"log\", \"fields\": [{ \"name\": \"message\", \"type\": \"string\" }] }"] + } } - } - cef: { - description: "The CEF Serializer Options." - relevant_when: "codec = \"cef\"" - required: true - type: object: options: { - device_event_class_id: { - description: """ + cef: { + description: "The CEF Serializer Options." + relevant_when: "codec = \"cef\"" + required: true + type: object: options: { + device_event_class_id: { + description: """ Unique identifier for each event type. Identifies the type of event reported. The value length must be less than or equal to 1023. """ - required: true - type: string: {} - } - device_product: { - description: """ + required: true + type: string: {} + } + device_product: { + description: """ Identifies the product of a vendor. The part of a unique device identifier. No two products can use the same combination of device vendor and device product. The value length must be less than or equal to 63. """ - required: true - type: string: {} - } - device_vendor: { - description: """ + required: true + type: string: {} + } + device_vendor: { + description: """ Identifies the vendor of the product. The part of a unique device identifier. No two products can use the same combination of device vendor and device product. The value length must be less than or equal to 63. """ - required: true - type: string: {} - } - device_version: { - description: """ + required: true + type: string: {} + } + device_version: { + description: """ Identifies the version of the problem. The combination of the device product, vendor and this value make up the unique id of the device that sends messages. The value length must be less than or equal to 31. """ - required: true - type: string: {} - } - extensions: { - description: """ + required: true + type: string: {} + } + extensions: { + description: """ The collection of key-value pairs. Keys are the keys of the extensions, and values are paths that point to the extension values of a log event. The event can have any number of key-value pairs in any order. """ - required: false - type: object: options: "*": { - description: "This is a path that points to the extension value of a log event." - required: true - type: string: {} + required: false + type: object: options: "*": { + description: "This is a path that points to the extension value of a log event." + required: true + type: string: {} + } } - } - name: { - description: """ + name: { + description: """ This is a path that points to the human-readable description of a log event. The value length must be less than or equal to 512. Equals "cef.name" by default. """ - required: true - type: string: {} - } - severity: { - description: """ + required: true + type: string: {} + } + severity: { + description: """ This is a path that points to the field of a log event that reflects importance of the event. Reflects importance of the event. @@ -360,38 +361,38 @@ generated: components: sinks: opentelemetry: configuration: protocol: { 0 = lowest_importance, 10 = highest_importance. Set to "cef.severity" by default. """ - required: true - type: string: {} - } - version: { - description: """ + required: true + type: string: {} + } + version: { + description: """ CEF Version. Can be either 0 or 1. Set to "0" by default. """ - required: true - type: string: enum: { - V0: "CEF specification version 0.1." - V1: "CEF specification version 1.x." + required: true + type: string: enum: { + V0: "CEF specification version 0.1." + V1: "CEF specification version 1.x." + } } } } - } - codec: { - description: "The codec to use for encoding events." - required: true - type: string: enum: { - avro: """ + codec: { + description: "The codec to use for encoding events." + required: true + type: string: enum: { + avro: """ Encodes an event as an [Apache Avro][apache_avro] message. [apache_avro]: https://avro.apache.org/ """ - cef: "Encodes an event as a CEF (Common Event Format) formatted message." - csv: """ + cef: "Encodes an event as a CEF (Common Event Format) formatted message." + csv: """ Encodes an event as a CSV message. This codec must be configured with fields to encode. """ - gelf: """ + gelf: """ Encodes an event as a [GELF][gelf] message. This codec is experimental for the following reason: @@ -409,17 +410,17 @@ generated: components: sinks: opentelemetry: configuration: protocol: { [gelf]: https://docs.graylog.org/docs/gelf [implementation]: https://github.com/Graylog2/go-gelf/blob/v2/gelf/reader.go """ - json: """ + json: """ Encodes an event as [JSON][json]. [json]: https://www.json.org/ """ - logfmt: """ + logfmt: """ Encodes an event as a [logfmt][logfmt] message. [logfmt]: https://brandur.org/logfmt """ - native: """ + native: """ Encodes an event in the [native Protocol Buffers format][vector_native_protobuf]. This codec is **[experimental][experimental]**. @@ -427,7 +428,7 @@ generated: components: sinks: opentelemetry: configuration: protocol: { [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs """ - native_json: """ + native_json: """ Encodes an event in the [native JSON format][vector_native_json]. This codec is **[experimental][experimental]**. @@ -435,12 +436,12 @@ generated: components: sinks: opentelemetry: configuration: protocol: { [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs """ - protobuf: """ + protobuf: """ Encodes an event as a [Protobuf][protobuf] message. [protobuf]: https://protobuf.dev/ """ - raw_message: """ + raw_message: """ No encoding. This encoding uses the `message` field of a log event. @@ -449,7 +450,7 @@ generated: components: sinks: opentelemetry: configuration: protocol: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ - text: """ + text: """ Plain text encoding. This encoding uses the `message` field of a log event. For metrics, it uses an @@ -459,38 +460,38 @@ generated: components: sinks: opentelemetry: configuration: protocol: { transform) and removing the message field while doing additional parsing on it, as this could lead to the encoding emitting empty strings for the given event. """ + } } - } - csv: { - description: "The CSV Serializer Options." - relevant_when: "codec = \"csv\"" - required: true - type: object: options: { - capacity: { - description: """ + csv: { + description: "The CSV Serializer Options." + relevant_when: "codec = \"csv\"" + required: true + type: object: options: { + capacity: { + description: """ Sets the capacity (in bytes) of the internal buffer used in the CSV writer. This defaults to 8KB. """ - required: false - type: uint: default: 8192 - } - delimiter: { - description: "The field delimiter to use when writing CSV." - required: false - type: ascii_char: default: "," - } - double_quote: { - description: """ + required: false + type: uint: default: 8192 + } + delimiter: { + description: "The field delimiter to use when writing CSV." + required: false + type: ascii_char: default: "," + } + double_quote: { + description: """ Enables double quote escapes. This is enabled by default, but you can disable it. When disabled, quotes in field data are escaped instead of doubled. """ - required: false - type: bool: default: true - } - escape: { - description: """ + required: false + type: bool: default: true + } + escape: { + description: """ The escape character to use when writing CSV. In some variants of CSV, quotes are escaped using a special escape character @@ -498,11 +499,11 @@ generated: components: sinks: opentelemetry: configuration: protocol: { To use this, `double_quotes` needs to be disabled as well; otherwise, this setting is ignored. """ - required: false - type: ascii_char: default: "\"" - } - fields: { - description: """ + required: false + type: ascii_char: default: "\"" + } + fields: { + description: """ Configures the fields that are encoded, as well as the order in which they appear in the output. @@ -511,269 +512,269 @@ generated: components: sinks: opentelemetry: configuration: protocol: { Values of type `Array`, `Object`, and `Regex` are not supported, and the output for any of these types is an empty string. """ - required: true - type: array: items: type: string: {} - } - quote: { - description: "The quote character to use when writing CSV." - required: false - type: ascii_char: default: "\"" - } - quote_style: { - description: "The quoting style to use when writing CSV data." - required: false - type: string: { - default: "necessary" - enum: { - always: "Always puts quotes around every field." - necessary: """ + required: true + type: array: items: type: string: {} + } + quote: { + description: "The quote character to use when writing CSV." + required: false + type: ascii_char: default: "\"" + } + quote_style: { + description: "The quoting style to use when writing CSV data." + required: false + type: string: { + default: "necessary" + enum: { + always: "Always puts quotes around every field." + necessary: """ Puts quotes around fields only when necessary. They are necessary when fields contain a quote, delimiter, or record terminator. Quotes are also necessary when writing an empty record (which is indistinguishable from a record with one empty field). """ - never: "Never writes quotes, even if it produces invalid CSV data." - non_numeric: """ + never: "Never writes quotes, even if it produces invalid CSV data." + non_numeric: """ Puts quotes around all fields that are non-numeric. This means that when writing a field that does not parse as a valid float or integer, quotes are used even if they aren't strictly necessary. """ + } } } } } - } - except_fields: { - description: "List of fields that are excluded from the encoded event." - required: false - type: array: items: type: string: {} - } - gelf: { - description: "The GELF Serializer Options." - relevant_when: "codec = \"gelf\"" - required: false - type: object: options: max_chunk_size: { - description: """ + except_fields: { + description: "List of fields that are excluded from the encoded event." + required: false + type: array: items: type: string: {} + } + gelf: { + description: "The GELF Serializer Options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: max_chunk_size: { + description: """ Maximum size for each GELF chunked datagram (including 12-byte header). Chunking starts when datagrams exceed this size. For Graylog target, keep at or below 8192 bytes; for Vector target (`gelf` decoding with `chunked_gelf` framing), up to 65,500 bytes is recommended. """ - required: false - type: uint: default: 8192 + required: false + type: uint: default: 8192 + } } - } - json: { - description: "Options for the JsonSerializer." - relevant_when: "codec = \"json\"" - required: false - type: object: options: pretty: { - description: "Whether to use pretty JSON formatting." - required: false - type: bool: default: false + json: { + description: "Options for the JsonSerializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: pretty: { + description: "Whether to use pretty JSON formatting." + required: false + type: bool: default: false + } } - } - metric_tag_values: { - description: """ - Controls how metric tag values are encoded. - - When set to `single`, only the last non-bare value of tags are displayed with the - metric. When set to `full`, all metric tags are exposed as separate assignments. - """ - relevant_when: "codec = \"json\" or codec = \"text\"" - required: false - type: string: { - default: "single" - enum: { - full: "All tags are exposed as arrays of either string or null values." - single: """ + metric_tag_values: { + description: """ + Controls how metric tag values are encoded. + + When set to `single`, only the last non-bare value of tags are displayed with the + metric. When set to `full`, all metric tags are exposed as separate assignments. + """ + relevant_when: "codec = \"json\" or codec = \"text\"" + required: false + type: string: { + default: "single" + enum: { + full: "All tags are exposed as arrays of either string or null values." + single: """ Tag values are exposed as single strings, the same as they were before this config option. Tags with multiple values show the last assigned value, and null values are ignored. """ + } } } - } - only_fields: { - description: "List of fields that are included in the encoded event." - required: false - type: array: items: type: string: {} - } - protobuf: { - description: "Options for the Protobuf serializer." - relevant_when: "codec = \"protobuf\"" - required: true - type: object: options: { - desc_file: { - description: """ + only_fields: { + description: "List of fields that are included in the encoded event." + required: false + type: array: items: type: string: {} + } + protobuf: { + description: "Options for the Protobuf serializer." + relevant_when: "codec = \"protobuf\"" + required: true + type: object: options: { + desc_file: { + description: """ The path to the protobuf descriptor set file. This file is the output of `protoc -I -o ` You can read more [here](https://buf.build/docs/reference/images/#how-buf-images-work). """ - required: true - type: string: examples: ["/etc/vector/protobuf_descriptor_set.desc"] - } - message_type: { - description: "The name of the message type to use for serializing." - required: true - type: string: examples: ["package.Message"] + required: true + type: string: examples: ["/etc/vector/protobuf_descriptor_set.desc"] + } + message_type: { + description: "The name of the message type to use for serializing." + required: true + type: string: examples: ["package.Message"] + } } } - } - timestamp_format: { - description: "Format used for timestamp fields." - required: false - type: string: enum: { - rfc3339: "Represent the timestamp as a RFC 3339 timestamp." - unix: "Represent the timestamp as a Unix timestamp." - unix_float: "Represent the timestamp as a Unix timestamp in floating point." - unix_ms: "Represent the timestamp as a Unix timestamp in milliseconds." - unix_ns: "Represent the timestamp as a Unix timestamp in nanoseconds." - unix_us: "Represent the timestamp as a Unix timestamp in microseconds" + timestamp_format: { + description: "Format used for timestamp fields." + required: false + type: string: enum: { + rfc3339: "Represent the timestamp as a RFC 3339 timestamp." + unix: "Represent the timestamp as a Unix timestamp." + unix_float: "Represent the timestamp as a Unix timestamp in floating point." + unix_ms: "Represent the timestamp as a Unix timestamp in milliseconds." + unix_ns: "Represent the timestamp as a Unix timestamp in nanoseconds." + unix_us: "Represent the timestamp as a Unix timestamp in microseconds" + } } } } - } - framing: { - description: "Framing configuration." - required: false - type: object: options: { - character_delimited: { - description: "Options for the character delimited encoder." - relevant_when: "method = \"character_delimited\"" - required: true - type: object: options: delimiter: { - description: "The ASCII (7-bit) character that delimits byte sequences." - required: true - type: ascii_char: {} - } - } - length_delimited: { - description: "Options for the length delimited decoder." - relevant_when: "method = \"length_delimited\"" - required: true - type: object: options: { - length_field_is_big_endian: { - description: "Length field byte order (little or big endian)" - required: false - type: bool: default: true - } - length_field_length: { - description: "Number of bytes representing the field length" - required: false - type: uint: default: 4 - } - length_field_offset: { - description: "Number of bytes in the header before the length field" - required: false - type: uint: default: 0 + framing: { + description: "Framing configuration." + required: false + type: object: options: { + character_delimited: { + description: "Options for the character delimited encoder." + relevant_when: "method = \"character_delimited\"" + required: true + type: object: options: delimiter: { + description: "The ASCII (7-bit) character that delimits byte sequences." + required: true + type: ascii_char: {} } - max_frame_length: { - description: "Maximum frame length" - required: false - type: uint: default: 8388608 + } + length_delimited: { + description: "Options for the length delimited decoder." + relevant_when: "method = \"length_delimited\"" + required: true + type: object: options: { + length_field_is_big_endian: { + description: "Length field byte order (little or big endian)" + required: false + type: bool: default: true + } + length_field_length: { + description: "Number of bytes representing the field length" + required: false + type: uint: default: 4 + } + length_field_offset: { + description: "Number of bytes in the header before the length field" + required: false + type: uint: default: 0 + } + max_frame_length: { + description: "Maximum frame length" + required: false + type: uint: default: 8388608 + } } } - } - max_frame_length: { - description: "Maximum frame length" - relevant_when: "method = \"varint_length_delimited\"" - required: false - type: uint: default: 8388608 - } - method: { - description: "The framing method." - required: true - type: string: enum: { - bytes: "Event data is not delimited at all." - character_delimited: "Event data is delimited by a single ASCII (7-bit) character." - length_delimited: """ + max_frame_length: { + description: "Maximum frame length" + relevant_when: "method = \"varint_length_delimited\"" + required: false + type: uint: default: 8388608 + } + method: { + description: "The framing method." + required: true + type: string: enum: { + bytes: "Event data is not delimited at all." + character_delimited: "Event data is delimited by a single ASCII (7-bit) character." + length_delimited: """ Event data is prefixed with its length in bytes. The prefix is a 32-bit unsigned integer, little endian. """ - newline_delimited: "Event data is delimited by a newline (LF) character." - varint_length_delimited: """ + newline_delimited: "Event data is delimited by a newline (LF) character." + varint_length_delimited: """ Event data is prefixed with its length in bytes as a varint. This is compatible with protobuf's length-delimited encoding. """ + } } } } - } - headers: { - deprecated: true - deprecated_message: "This option has been deprecated, use `request.headers` instead." - description: "A list of custom headers to add to each request." - required: false - type: object: options: "*": { - description: "An HTTP request header and it's value." - required: true - type: string: {} + headers: { + deprecated: true + deprecated_message: "This option has been deprecated, use `request.headers` instead." + description: "A list of custom headers to add to each request." + required: false + type: object: options: "*": { + description: "An HTTP request header and it's value." + required: true + type: string: {} + } } - } - method: { - description: "The HTTP method to use when making the request." - required: false - type: string: { - default: "post" - enum: { - delete: "DELETE." - get: "GET." - head: "HEAD." - options: "OPTIONS." - patch: "PATCH." - post: "POST." - put: "PUT." - trace: "TRACE." + method: { + description: "The HTTP method to use when making the request." + required: false + type: string: { + default: "post" + enum: { + delete: "DELETE." + get: "GET." + head: "HEAD." + options: "OPTIONS." + patch: "PATCH." + post: "POST." + put: "PUT." + trace: "TRACE." + } } } - } - payload_prefix: { - description: """ - A string to prefix the payload with. - - This option is ignored if the encoding is not character delimited JSON. - - If specified, the `payload_suffix` must also be specified and together they must produce a valid JSON object. - """ - required: false - type: string: { - default: "" - examples: ["{\"data\":"] + payload_prefix: { + description: """ + A string to prefix the payload with. + + This option is ignored if the encoding is not character delimited JSON. + + If specified, the `payload_suffix` must also be specified and together they must produce a valid JSON object. + """ + required: false + type: string: { + default: "" + examples: ["{\"data\":"] + } } - } - payload_suffix: { - description: """ - A string to suffix the payload with. - - This option is ignored if the encoding is not character delimited JSON. - - If specified, the `payload_prefix` must also be specified and together they must produce a valid JSON object. - """ - required: false - type: string: { - default: "" - examples: ["}"] + payload_suffix: { + description: """ + A string to suffix the payload with. + + This option is ignored if the encoding is not character delimited JSON. + + If specified, the `payload_prefix` must also be specified and together they must produce a valid JSON object. + """ + required: false + type: string: { + default: "" + examples: ["}"] + } } - } - request: { - description: "Outbound HTTP request settings." - required: false - type: object: options: { - adaptive_concurrency: { - description: """ - Configuration of adaptive concurrency parameters. + request: { + description: "Outbound HTTP request settings." + required: false + type: object: options: { + adaptive_concurrency: { + description: """ + Configuration of adaptive concurrency parameters. - These parameters typically do not require changes from the default, and incorrect values can lead to meta-stable or - unstable performance and sink behavior. Proceed with caution. - """ - required: false - type: object: options: { - decrease_ratio: { - description: """ + These parameters typically do not require changes from the default, and incorrect values can lead to meta-stable or + unstable performance and sink behavior. Proceed with caution. + """ + required: false + type: object: options: { + decrease_ratio: { + description: """ The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than `0` and less than `1`. Smaller values cause the algorithm to scale back rapidly @@ -781,11 +782,11 @@ generated: components: sinks: opentelemetry: configuration: protocol: { **Note**: The new limit is rounded down after applying this ratio. """ - required: false - type: float: default: 0.9 - } - ewma_alpha: { - description: """ + required: false + type: float: default: 0.9 + } + ewma_alpha: { + description: """ The weighting of new measurements compared to older measurements. Valid values are greater than `0` and less than `1`. @@ -794,31 +795,31 @@ generated: components: sinks: opentelemetry: configuration: protocol: { the current RTT. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability. """ - required: false - type: float: default: 0.4 - } - initial_concurrency: { - description: """ + required: false + type: float: default: 0.4 + } + initial_concurrency: { + description: """ The initial concurrency limit to use. If not specified, the initial limit is 1 (no concurrency). Datadog recommends setting this value to your service's average limit if you're seeing that it takes a long time to ramp up adaptive concurrency after a restart. You can find this value by looking at the `adaptive_concurrency_limit` metric. """ - required: false - type: uint: default: 1 - } - max_concurrency_limit: { - description: """ + required: false + type: uint: default: 1 + } + max_concurrency_limit: { + description: """ The maximum concurrency limit. The adaptive request concurrency limit does not go above this bound. This is put in place as a safeguard. """ - required: false - type: uint: default: 200 - } - rtt_deviation_scale: { - description: """ + required: false + type: uint: default: 200 + } + rtt_deviation_scale: { + description: """ Scale of RTT deviations which are not considered anomalous. Valid values are greater than or equal to `0`, and we expect reasonable values to range from `1.0` to `3.0`. @@ -828,98 +829,98 @@ generated: components: sinks: opentelemetry: configuration: protocol: { can ignore increases in RTT that are within an expected range. This factor is used to scale up the deviation to an appropriate range. Larger values cause the algorithm to ignore larger increases in the RTT. """ - required: false - type: float: default: 2.5 + required: false + type: float: default: 2.5 + } } } - } - concurrency: { - description: """ - Configuration for outbound request concurrency. + concurrency: { + description: """ + Configuration for outbound request concurrency. - This can be set either to one of the below enum values or to a positive integer, which denotes - a fixed concurrency limit. - """ - required: false - type: { - string: { - default: "adaptive" - enum: { - adaptive: """ + This can be set either to one of the below enum values or to a positive integer, which denotes + a fixed concurrency limit. + """ + required: false + type: { + string: { + default: "adaptive" + enum: { + adaptive: """ Concurrency is managed by Vector's [Adaptive Request Concurrency][arc] feature. [arc]: https://vector.dev/docs/architecture/arc/ """ - none: """ + none: """ A fixed concurrency of 1. Only one request can be outstanding at any given time. """ + } } + uint: {} } - uint: {} } - } - headers: { - description: "Additional HTTP headers to add to every HTTP request." - required: false - type: object: { - examples: [{ - Accept: "text/plain" - "X-Event-Level": "{{level}}" - "X-Event-Timestamp": "{{timestamp}}" - "X-My-Custom-Header": "A-Value" - }] - options: "*": { - description: "An HTTP request header and its value. Both header names and values support templating with event data." - required: true - type: string: {} + headers: { + description: "Additional HTTP headers to add to every HTTP request." + required: false + type: object: { + examples: [{ + Accept: "text/plain" + "X-Event-Level": "{{level}}" + "X-Event-Timestamp": "{{timestamp}}" + "X-My-Custom-Header": "A-Value" + }] + options: "*": { + description: "An HTTP request header and its value. Both header names and values support templating with event data." + required: true + type: string: {} + } } } - } - rate_limit_duration_secs: { - description: "The time window used for the `rate_limit_num` option." - required: false - type: uint: { - default: 1 - unit: "seconds" + rate_limit_duration_secs: { + description: "The time window used for the `rate_limit_num` option." + required: false + type: uint: { + default: 1 + unit: "seconds" + } } - } - rate_limit_num: { - description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." - required: false - type: uint: { - default: 9223372036854775807 - unit: "requests" + rate_limit_num: { + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + required: false + type: uint: { + default: 9223372036854775807 + unit: "requests" + } } - } - retry_attempts: { - description: "The maximum number of retries to make for failed requests." - required: false - type: uint: { - default: 9223372036854775807 - unit: "retries" + retry_attempts: { + description: "The maximum number of retries to make for failed requests." + required: false + type: uint: { + default: 9223372036854775807 + unit: "retries" + } } - } - retry_initial_backoff_secs: { - description: """ - The amount of time to wait before attempting the first retry for a failed request. + retry_initial_backoff_secs: { + description: """ + The amount of time to wait before attempting the first retry for a failed request. - After the first retry has failed, the fibonacci sequence is used to select future backoffs. - """ - required: false - type: uint: { - default: 1 - unit: "seconds" + After the first retry has failed, the fibonacci sequence is used to select future backoffs. + """ + required: false + type: uint: { + default: 1 + unit: "seconds" + } } - } - retry_jitter_mode: { - description: "The jitter mode to use for retry backoff behavior." - required: false - type: string: { - default: "Full" - enum: { - Full: """ + retry_jitter_mode: { + description: "The jitter mode to use for retry backoff behavior." + required: false + type: string: { + default: "Full" + enum: { + Full: """ Full jitter. The random delay is anywhere from 0 up to the maximum current delay calculated by the backoff @@ -929,143 +930,159 @@ generated: components: sinks: opentelemetry: configuration: protocol: { of creating accidental denial of service (DoS) conditions against your own systems when many clients are recovering from a failure state. """ - None: "No jitter." + None: "No jitter." + } } } - } - retry_max_duration_secs: { - description: "The maximum amount of time to wait between retries." - required: false - type: uint: { - default: 30 - unit: "seconds" + retry_max_duration_secs: { + description: "The maximum amount of time to wait between retries." + required: false + type: uint: { + default: 30 + unit: "seconds" + } } - } - timeout_secs: { - description: """ - The time a request can take before being aborted. + timeout_secs: { + description: """ + The time a request can take before being aborted. - Datadog highly recommends that you do not lower this value below the service's internal timeout, as this could - create orphaned requests, pile on retries, and result in duplicate data downstream. - """ - required: false - type: uint: { - default: 60 - unit: "seconds" + Datadog highly recommends that you do not lower this value below the service's internal timeout, as this could + create orphaned requests, pile on retries, and result in duplicate data downstream. + """ + required: false + type: uint: { + default: 60 + unit: "seconds" + } } } } - } - tls: { - description: "TLS configuration." - required: false - type: object: options: { - alpn_protocols: { - description: """ - Sets the list of supported ALPN protocols. + tls: { + description: "TLS configuration." + required: false + type: object: options: { + alpn_protocols: { + description: """ + Sets the list of supported ALPN protocols. - Declare the supported ALPN protocols, which are used during negotiation with a peer. They are prioritized in the order - that they are defined. - """ - required: false - type: array: items: type: string: examples: ["h2"] - } - ca_file: { - description: """ - Absolute path to an additional CA certificate file. + Declare the supported ALPN protocols, which are used during negotiation with a peer. They are prioritized in the order + that they are defined. + """ + required: false + type: array: items: type: string: examples: ["h2"] + } + ca_file: { + description: """ + Absolute path to an additional CA certificate file. - The certificate must be in the DER or PEM (X.509) format. Additionally, the certificate can be provided as an inline string in PEM format. - """ - required: false - type: string: examples: ["/path/to/certificate_authority.crt"] - } - crt_file: { - description: """ - Absolute path to a certificate file used to identify this server. + The certificate must be in the DER or PEM (X.509) format. Additionally, the certificate can be provided as an inline string in PEM format. + """ + required: false + type: string: examples: ["/path/to/certificate_authority.crt"] + } + crt_file: { + description: """ + Absolute path to a certificate file used to identify this server. - The certificate must be in DER, PEM (X.509), or PKCS#12 format. Additionally, the certificate can be provided as - an inline string in PEM format. + The certificate must be in DER, PEM (X.509), or PKCS#12 format. Additionally, the certificate can be provided as + an inline string in PEM format. - If this is set _and_ is not a PKCS#12 archive, `key_file` must also be set. - """ - required: false - type: string: examples: ["/path/to/host_certificate.crt"] - } - key_file: { - description: """ - Absolute path to a private key file used to identify this server. + If this is set _and_ is not a PKCS#12 archive, `key_file` must also be set. + """ + required: false + type: string: examples: ["/path/to/host_certificate.crt"] + } + key_file: { + description: """ + Absolute path to a private key file used to identify this server. - The key must be in DER or PEM (PKCS#8) format. Additionally, the key can be provided as an inline string in PEM format. - """ - required: false - type: string: examples: ["/path/to/host_certificate.key"] - } - key_pass: { - description: """ - Passphrase used to unlock the encrypted key file. + The key must be in DER or PEM (PKCS#8) format. Additionally, the key can be provided as an inline string in PEM format. + """ + required: false + type: string: examples: ["/path/to/host_certificate.key"] + } + key_pass: { + description: """ + Passphrase used to unlock the encrypted key file. - This has no effect unless `key_file` is set. - """ - required: false - type: string: examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] - } - server_name: { - description: """ - Server name to use when using Server Name Indication (SNI). + This has no effect unless `key_file` is set. + """ + required: false + type: string: examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + } + server_name: { + description: """ + Server name to use when using Server Name Indication (SNI). - Only relevant for outgoing connections. - """ - required: false - type: string: examples: ["www.example.com"] - } - verify_certificate: { - description: """ - Enables certificate verification. For components that create a server, this requires that the - client connections have a valid client certificate. For components that initiate requests, - this validates that the upstream has a valid certificate. + Only relevant for outgoing connections. + """ + required: false + type: string: examples: ["www.example.com"] + } + verify_certificate: { + description: """ + Enables certificate verification. For components that create a server, this requires that the + client connections have a valid client certificate. For components that initiate requests, + this validates that the upstream has a valid certificate. - If enabled, certificates must not be expired and must be issued by a trusted - issuer. This verification operates in a hierarchical manner, checking that the leaf certificate (the - certificate presented by the client/server) is not only valid, but that the issuer of that certificate is also valid, and - so on, until the verification process reaches a root certificate. + If enabled, certificates must not be expired and must be issued by a trusted + issuer. This verification operates in a hierarchical manner, checking that the leaf certificate (the + certificate presented by the client/server) is not only valid, but that the issuer of that certificate is also valid, and + so on, until the verification process reaches a root certificate. - Do NOT set this to `false` unless you understand the risks of not verifying the validity of certificates. - """ - required: false - type: bool: {} - } - verify_hostname: { - description: """ - Enables hostname verification. + Do NOT set this to `false` unless you understand the risks of not verifying the validity of certificates. + """ + required: false + type: bool: {} + } + verify_hostname: { + description: """ + Enables hostname verification. - If enabled, the hostname used to connect to the remote host must be present in the TLS certificate presented by - the remote host, either as the Common Name or as an entry in the Subject Alternative Name extension. + If enabled, the hostname used to connect to the remote host must be present in the TLS certificate presented by + the remote host, either as the Common Name or as an entry in the Subject Alternative Name extension. - Only relevant for outgoing connections. + Only relevant for outgoing connections. - Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname. - """ - required: false - type: bool: {} + Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname. + """ + required: false + type: bool: {} + } } } - } - type: { - description: "The communication protocol." - required: true - type: string: enum: http: "Send data over HTTP." - } - uri: { - description: """ - The full URI to make HTTP requests to. - - This should include the protocol and host, but can also include the port, path, and any other valid part of a URI. - """ - required: true - type: string: { - examples: ["https://10.22.212.22:9000/endpoint"] - syntax: "template" + type: { + description: "The communication protocol." + required: true + type: string: enum: http: "Send data over HTTP." + } + uri: { + description: """ + The full URI to make HTTP requests to. + + This should include the protocol and host, but can also include the port, path, and any other valid part of a URI. + """ + required: true + type: string: { + examples: ["https://10.22.212.22:9000/endpoint"] + syntax: "template" + } } } } + use_otlp_encoding: { + description: """ + Setting this field to `true`, will override all encoding settings and it will encode requests based on the + [OpenTelemetry protocol](https://opentelemetry.io/docs/specs/otel/protocol/). + + The endpoint is used to determine the data type: + * v1/logs → OTLP Logs + * v1/traces → OTLP Traces + * v1/metrics → OTLP Metrics + + More information available [here](https://opentelemetry.io/docs/specs/otlp/?utm_source=chatgpt.com#otlphttp-request). + """ + required: false + type: bool: default: false + } }