Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,6 @@
## ActiveMQ WebConsole URL
url = "http://127.0.0.1:8161"

## Required ActiveMQ Endpoint
## deprecated in 1.11; use the url option
# server = "192.168.50.10"
# port = 8161

## Credentials for basic HTTP authentication
# username = "admin"
# password = "admin"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
## DEPRECATED: The "aerospike" plugin is deprecated in version 1.30.0 and will be removed in 1.40.0, use 'inputs.prometheus' with the Aerospike Prometheus Exporter instead.
# Read stats from aerospike server(s)
[[inputs.aerospike]]
## Aerospike servers to connect to (with port)
Expand All @@ -9,13 +10,30 @@
# password = "pa$$word"

## Optional TLS Config
# enable_tls = false
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
# tls_name = "tlsname"
## If false, skip chain & host verification
# insecure_skip_verify = true
## Set to true/false to enforce TLS being enabled/disabled. If not set,
## enable TLS only if any of the other options are specified.
# tls_enable =
## Trusted root certificates for server
# tls_ca = "/path/to/cafile"
## Used for TLS client certificate authentication
# tls_cert = "/path/to/certfile"
## Used for TLS client certificate authentication
# tls_key = "/path/to/keyfile"
## Password for the key file if it is encrypted
# tls_key_pwd = ""
## Send the specified TLS server name via SNI
# tls_server_name = "kubernetes.example.com"
## Minimal TLS version to accept by the client
# tls_min_version = "TLS12"
## List of ciphers to accept, by default all secure ciphers will be accepted
## See https://pkg.go.dev/crypto/tls#pkg-constants for supported values.
## Use "all", "secure" and "insecure" to add all support ciphers, secure
## suites or insecure suites respectively.
# tls_cipher_suites = ["secure"]
## Renegotiation method, "never", "once" or "freely"
# tls_renegotiation_method = "never"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

# Feature Options
# Add namespace variable to limit the namespaces executed on
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,41 +19,45 @@
# public_key_id = ""
# role_name = ""

## Specify the ali cloud region list to be queried for metrics and objects discovery
## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here
## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm
## Specify ali cloud regions to be queried for metric and object discovery
## If not set, all supported regions (see below) would be covered, it can
## provide a significant load on API, so the recommendation here is to
## limit the list as much as possible.
## Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm
## Default supported regions are:
## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen,
## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5,
## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1
## cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,
## cn-shanghai, cn-shenzhen, cn-heyuan,cn-chengdu,cn-hongkong,
## ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5,
## ap-south-1,ap-northeast-1, us-west-1,us-east-1,eu-central-1,
## eu-west-1,me-east-1
##
## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich
## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then
## it will be reported on the start - for example for 'acs_cdn' project:
## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' )
## From discovery perspective it set the scope for object discovery,
## the discovered info can be used to enrich the metrics with objects
## attributes/tags. Discovery is not supported for all projects.
## Currently, discovery supported for the following projects:
## - acs_ecs_dashboard
## - acs_rds_dashboard
## - acs_slb_dashboard
## - acs_vpc_eip
regions = ["cn-hongkong"]

# The minimum period for AliyunCMS metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute, 5 minute, or larger intervals.
# See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Aliyun OpenAPI
# and will not be collected by Telegraf.
#
## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s)
## Requested AliyunCMS aggregation Period (required)
## The period must be multiples of 60s and the minimum for AliyunCMS metrics
## is 1 minute (60s). However not all metrics are made available to the
## one minute period. Some are collected at 3 minute, 5 minute, or larger
## intervals.
## See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv
## Note that if a period is configured that is smaller than the minimum for
## a particular metric, that metric will not be returned by Aliyun's
## OpenAPI and will not be collected by Telegraf.
period = "5m"

## Collection Delay (required - must account for metrics availability via AliyunCMS API)
## Collection Delay (required)
## The delay must account for metrics availability via AliyunCMS API.
delay = "1m"

## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
## Recommended: use metric 'interval' that is a multiple of 'period'
## to avoid gaps or overlap in pulled data
interval = "5m"

## Metric Statistic Project (required)
Expand All @@ -65,36 +69,52 @@
## How often the discovery API call executed (default 1m)
#discovery_interval = "1m"

## Metrics to Pull (Required)
## NOTE: Due to the way TOML is parsed, tables must be at the END of the
## plugin definition, otherwise additional config options are read as part of
## the table

## Metrics to Pull
## At least one metrics definition required
[[inputs.aliyuncms.metrics]]
## Metrics names to be requested,
## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
names = ["InstanceActiveConnection", "InstanceNewConnection"]
## Metrics names to be requested,
## Description can be found here (per project):
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
names = ["InstanceActiveConnection", "InstanceNewConnection"]

## Dimension filters for Metric (these are optional).
## This allows to get additional metric dimension. If dimension is not specified it can be returned or
## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
##
## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled)
## Values specified here would be added into the list of discovered objects.
## You can specify either single dimension:
#dimensions = '{"instanceId": "p-example"}'
## Dimension filters for Metric (optional)
## This allows to get additional metric dimension. If dimension is not
## specified it can be returned or the data can be aggregated - it depends
## on particular metric, you can find details here:
## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq
##
## Note, that by default dimension filter includes the list of discovered
## objects in scope (if discovery is enabled). Values specified here would
## be added into the list of discovered objects. You can specify either
## single dimension:
# dimensions = '{"instanceId": "p-example"}'

## Or you can specify several dimensions at once:
#dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]'
## Or you can specify several dimensions at once:
# dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]'

## Enrichment tags, can be added from discovery (if supported)
## Notation is <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)>
## To figure out which fields are available, consult the Describe<ObjectType> API per project.
## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO
#tag_query_path = [
# "address:Address",
# "name:LoadBalancerName",
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]"
# ]
## The following tags added by default: regionId (if discovery enabled), userId, instanceId.
## Tag Query Path
## The following tags added by default:
## * regionId (if discovery enabled)
## * userId
## * instanceId
## Enrichment tags, can be added from discovery (if supported)
## Notation is
## <measurement_tag_name>:<JMES query path (https://jmespath.org/tutorial.html)>
## To figure out which fields are available, consult the
## Describe<ObjectType> API per project. For example, for SLB see:
## https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers&params={}&tab=MOCK&lang=GO
# tag_query_path = [
# "address:Address",
# "name:LoadBalancerName",
# "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]"
# ]

## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery
## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage
## of discovery scope vs monitoring scope
#allow_dps_without_discovery = false
## Allow metrics without discovery data, if discovery is enabled.
## If set to true, then metric without discovery data would be emitted, otherwise dropped.
## This cane be of help, in case debugging dimension filters, or partial coverage of
## discovery scope vs monitoring scope
# allow_dps_without_discovery = false
Original file line number Diff line number Diff line change
Expand Up @@ -34,23 +34,36 @@
## If true, queue will be passively declared.
# queue_passive = false

## Additional arguments when consuming from Queue
# queue_consume_arguments = { }
# queue_consume_arguments = {"x-stream-offset" = "first"}

## Additional queue arguments.
# queue_arguments = { }
# queue_arguments = {"x-max-length" = 100}

## A binding between the exchange and queue using this binding key is
## created. If unset, no binding is created.
binding_key = "#"

## Maximum number of messages server should give to the worker.
# prefetch_count = 50

## Maximum messages to read from the broker that have not been written by an
## output. For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
## Max undelivered messages
## This plugin uses tracking metrics, which ensure messages are read to
## outputs before acknowledging them to the original broker to ensure data
## is not lost. This option sets the maximum messages to read from the
## broker that have not been written by an output.
##
## For example, if each message from the queue contains 10 metrics and the
## output metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
## This value needs to be picked with awareness of the agent's
## metric_batch_size value as well. Setting max undelivered messages too high
## can result in a constant stream of data batches to the output. While
## setting it too low may never flush the broker's messages.
# max_undelivered_messages = 1000

## Timeout for establishing the connection to a broker
# timeout = "30s"

## Auth method. PLAIN and EXTERNAL are supported
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
## described here: https://www.rabbitmq.com/plugins.html
Expand All @@ -63,10 +76,18 @@
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

## Content encoding for message payloads, can be set to "gzip" to or
## "identity" to apply no encoding.
## Content encoding for message payloads, can be set to
## "gzip", "identity" or "auto"
## - Use "gzip" to decode gzip
## - Use "identity" to apply no encoding
## - Use "auto" determine the encoding using the ContentEncoding header
# content_encoding = "identity"

## Maximum size of decoded message.
## Acceptable units are B, KiB, KB, MiB, MB...
## Without quotes and units, interpreted as size in bytes.
# max_decompression_size = "500MB"

## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
# Gather Azure resources metrics from Azure Monitor API
[[inputs.azure_monitor]]
# can be found under Overview->Essentials in the Azure portal for your application/service
subscription_id = "<<SUBSCRIPTION_ID>>"
# can be obtained by registering an application under Azure Active Directory
client_id = "<<CLIENT_ID>>"
# can be obtained by registering an application under Azure Active Directory.
# If not specified Default Azure Credentials chain will be attempted:
# - Environment credentials (AZURE_*)
# - Workload Identity in Kubernetes cluster
# - Managed Identity
# - Azure CLI auth
# - Developer Azure CLI auth
client_secret = "<<CLIENT_SECRET>>"
# can be found under Azure Active Directory->Properties
tenant_id = "<<TENANT_ID>>"
# Define the optional Azure cloud option e.g. AzureChina, AzureGovernment or AzurePublic. The default is AzurePublic.
# cloud_option = "AzurePublic"

# resource target #1 to collect metrics from
[[inputs.azure_monitor.resource_target]]
# can be found under Overview->Essentials->JSON View in the Azure portal for your application/service
# must start with 'resourceGroups/...' ('/subscriptions/xxxxxxxx-xxxx-xxxx-xxx-xxxxxxxxxxxx'
# must be removed from the beginning of Resource ID property value)
resource_id = "<<RESOURCE_ID>>"
# the metric names to collect
# leave the array empty to use all metrics available to this resource
metrics = [ "<<METRIC>>", "<<METRIC>>" ]
# metrics aggregation type value to collect
# can be 'Total', 'Count', 'Average', 'Minimum', 'Maximum'
# leave the array empty to collect all aggregation types values for each metric
aggregations = [ "<<AGGREGATION>>", "<<AGGREGATION>>" ]

# resource target #2 to collect metrics from
[[inputs.azure_monitor.resource_target]]
resource_id = "<<RESOURCE_ID>>"
metrics = [ "<<METRIC>>", "<<METRIC>>" ]
aggregations = [ "<<AGGREGATION>>", "<<AGGREGATION>>" ]

# resource group target #1 to collect metrics from resources under it with resource type
[[inputs.azure_monitor.resource_group_target]]
# the resource group name
resource_group = "<<RESOURCE_GROUP_NAME>>"

# defines the resources to collect metrics from
[[inputs.azure_monitor.resource_group_target.resource]]
# the resource type
resource_type = "<<RESOURCE_TYPE>>"
metrics = [ "<<METRIC>>", "<<METRIC>>" ]
aggregations = [ "<<AGGREGATION>>", "<<AGGREGATION>>" ]

# defines the resources to collect metrics from
[[inputs.azure_monitor.resource_group_target.resource]]
resource_type = "<<RESOURCE_TYPE>>"
metrics = [ "<<METRIC>>", "<<METRIC>>" ]
aggregations = [ "<<AGGREGATION>>", "<<AGGREGATION>>" ]

# resource group target #2 to collect metrics from resources under it with resource type
[[inputs.azure_monitor.resource_group_target]]
resource_group = "<<RESOURCE_GROUP_NAME>>"

[[inputs.azure_monitor.resource_group_target.resource]]
resource_type = "<<RESOURCE_TYPE>>"
metrics = [ "<<METRIC>>", "<<METRIC>>" ]
aggregations = [ "<<AGGREGATION>>", "<<AGGREGATION>>" ]

# subscription target #1 to collect metrics from resources under it with resource type
[[inputs.azure_monitor.subscription_target]]
resource_type = "<<RESOURCE_TYPE>>"
metrics = [ "<<METRIC>>", "<<METRIC>>" ]
aggregations = [ "<<AGGREGATION>>", "<<AGGREGATION>>" ]

# subscription target #2 to collect metrics from resources under it with resource type
[[inputs.azure_monitor.subscription_target]]
resource_type = "<<RESOURCE_TYPE>>"
metrics = [ "<<METRIC>>", "<<METRIC>>" ]
aggregations = [ "<<AGGREGATION>>", "<<AGGREGATION>>" ]
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
# Gather Azure Storage Queue metrics
[[inputs.azure_storage_queue]]
## Required Azure Storage Account name
## Azure Storage Account name and shared access key (required)
account_name = "mystorageaccount"

## Required Azure Storage Account access key
account_key = "storageaccountaccesskey"

## Set to false to disable peeking age of oldest message (executes faster)
## Disable peeking age of oldest message (faster)
# peek_oldest_message_age = true
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# Read metrics of bcache from stats_total and dirty_data
# This plugin ONLY supports Linux
[[inputs.bcache]]
## Bcache sets path
## If not specified, then default is:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,10 @@
# gather_memory_contexts = false
# gather_views = false

## Report xml v3 counters as integers instead of unsigned for backward
## compatibility. Set this to false as soon as possible!
## Values are clipped if exceeding the integer range.
# report_counters_as_int = true

## Timeout for http requests made by bind nameserver
# timeout = "4s"
Loading