chore: mirror k8s-monitoring-2.0.12

upstream_repo:
This commit is contained in:
Nikolai Rodionov 2025-02-21 09:42:13 +01:00
commit 57712751f1
No known key found for this signature in database
GPG Key ID: 0639A45505F3BFA6
440 changed files with 101268 additions and 0 deletions
charts/k8s-monitoring
.helmignore.updatecli-alloy.yamlChart.lockChart.yaml
alloyModules
LICENSE
modules
databases/kv/etcd
kubernetes
system/node-exporter
charts
alloy
.helmignoreCHANGELOG.mdChart.lockChart.yamlREADME.md
charts/crds
ci
config
templates
values.yaml
feature-annotation-autodiscovery
feature-application-observability

@ -0,0 +1,8 @@
.ct.yaml
data-alloy
docs
schema-mods
tests
Makefile
README.md
README.md.gotmpl

@ -0,0 +1,67 @@
---
name: Update dependency "alloy" for Helm chart "k8s-monitoring"
sources:
alloy:
name: Get latest "alloy" Helm chart version
kind: helmchart
spec:
name: alloy
url: https://grafana.github.io/helm-charts
versionfilter:
kind: semver
pattern: '*'
conditions:
alloy:
name: Ensure Helm chart dependency "alloy" is specified
kind: yaml
spec:
file: charts/k8s-monitoring/Chart.yaml
key: $.dependencies[10].name
value: alloy
disablesourceinput: true
targets:
alloy-metrics:
name: Bump Helm chart dependency "alloy-metrics" for Helm chart "k8s-monitoring"
kind: helmchart
spec:
file: Chart.yaml
key: $.dependencies[10].version
name: charts/k8s-monitoring
versionincrement: none
sourceid: alloy
alloy-singleton:
name: Bump Helm chart dependency "alloy-singleton" for Helm chart "k8s-monitoring"
kind: helmchart
spec:
file: Chart.yaml
key: $.dependencies[11].version
name: charts/k8s-monitoring
versionincrement: none
sourceid: alloy
alloy-logs:
name: Bump Helm chart dependency "alloy-logs" for Helm chart "k8s-monitoring"
kind: helmchart
spec:
file: Chart.yaml
key: $.dependencies[12].version
name: charts/k8s-monitoring
versionincrement: none
sourceid: alloy
alloy-receiver:
name: Bump Helm chart dependency "alloy-receiver" for Helm chart "k8s-monitoring"
kind: helmchart
spec:
file: Chart.yaml
key: $.dependencies[13].version
name: charts/k8s-monitoring
versionincrement: none
sourceid: alloy
alloy-profiles:
name: Bump Helm chart dependency "alloy-profiles" for Helm chart "k8s-monitoring"
kind: helmchart
spec:
file: Chart.yaml
key: $.dependencies[14].version
name: charts/k8s-monitoring
versionincrement: none
sourceid: alloy

@ -0,0 +1,48 @@
dependencies:
- name: feature-annotation-autodiscovery
repository: ""
version: 1.0.0
- name: feature-application-observability
repository: ""
version: 1.0.0
- name: feature-auto-instrumentation
repository: ""
version: 1.0.0
- name: feature-cluster-events
repository: ""
version: 1.0.0
- name: feature-cluster-metrics
repository: ""
version: 1.0.0
- name: feature-integrations
repository: ""
version: 1.0.0
- name: feature-node-logs
repository: ""
version: 1.0.0
- name: feature-pod-logs
repository: ""
version: 1.0.0
- name: feature-profiling
repository: ""
version: 1.0.0
- name: feature-prometheus-operator-objects
repository: ""
version: 1.0.0
- name: alloy
repository: https://grafana.github.io/helm-charts
version: 0.11.0
- name: alloy
repository: https://grafana.github.io/helm-charts
version: 0.11.0
- name: alloy
repository: https://grafana.github.io/helm-charts
version: 0.11.0
- name: alloy
repository: https://grafana.github.io/helm-charts
version: 0.11.0
- name: alloy
repository: https://grafana.github.io/helm-charts
version: 0.11.0
digest: sha256:84065dcd958d8eefc9b179f4f27a814f79a35ab687fd5f624e90f443d59bac64
generated: "2025-01-24T13:18:52.317472-06:00"

@ -0,0 +1,89 @@
apiVersion: v2
appVersion: 2.0.12
dependencies:
- alias: annotationAutodiscovery
condition: annotationAutodiscovery.enabled
name: feature-annotation-autodiscovery
repository: ""
version: 1.0.0
- alias: applicationObservability
condition: applicationObservability.enabled
name: feature-application-observability
repository: ""
version: 1.0.0
- alias: autoInstrumentation
condition: autoInstrumentation.enabled
name: feature-auto-instrumentation
repository: ""
version: 1.0.0
- alias: clusterEvents
condition: clusterEvents.enabled
name: feature-cluster-events
repository: ""
version: 1.0.0
- alias: clusterMetrics
condition: clusterMetrics.enabled
name: feature-cluster-metrics
repository: ""
version: 1.0.0
- alias: integrations
name: feature-integrations
repository: ""
version: 1.0.0
- alias: nodeLogs
condition: nodeLogs.enabled
name: feature-node-logs
repository: ""
version: 1.0.0
- alias: podLogs
condition: podLogs.enabled
name: feature-pod-logs
repository: ""
version: 1.0.0
- alias: profiling
condition: profiling.enabled
name: feature-profiling
repository: ""
version: 1.0.0
- alias: prometheusOperatorObjects
condition: prometheusOperatorObjects.enabled
name: feature-prometheus-operator-objects
repository: ""
version: 1.0.0
- alias: alloy-metrics
condition: alloy-metrics.enabled
name: alloy
repository: https://grafana.github.io/helm-charts
version: 0.11.0
- alias: alloy-singleton
condition: alloy-singleton.enabled
name: alloy
repository: https://grafana.github.io/helm-charts
version: 0.11.0
- alias: alloy-logs
condition: alloy-logs.enabled
name: alloy
repository: https://grafana.github.io/helm-charts
version: 0.11.0
- alias: alloy-receiver
condition: alloy-receiver.enabled
name: alloy
repository: https://grafana.github.io/helm-charts
version: 0.11.0
- alias: alloy-profiles
condition: alloy-profiles.enabled
name: alloy
repository: https://grafana.github.io/helm-charts
version: 0.11.0
description: Capture all telemetry data from your Kubernetes cluster.
icon: https://raw.githubusercontent.com/grafana/grafana/main/public/img/grafana_icon.svg
maintainers:
- email: pete.wall@grafana.com
name: petewall
- email: robert.lankford@grafana.com
name: rlankfo
name: k8s-monitoring
sources:
- https://github.com/grafana/k8s-monitoring-helm/tree/main/charts/k8s-monitoring
type: application
version: 2.0.12

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -0,0 +1,244 @@
/*
Module: job-etcd
Description: Scrapes etcd
Note: Every argument except for "forward_to" is optional, and does have a defined default value. However, the values for these
arguments are not defined using the default = " ... " argument syntax, but rather using the coalesce(argument.value, " ... ").
This is because if the argument passed in from another consuming module is set to null, the default = " ... " syntax will
does not override the value passed in, where coalesce() will return the first non-null value.
*/
declare "kubernetes" {
// arguments for kubernetes discovery
argument "namespaces" {
comment = "The namespaces to look for targets in (default: [] is all namespaces)"
optional = true
}
argument "field_selectors" {
// Docs: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
comment = "The label selectors to use to find matching targets (default: [])"
optional = true
}
argument "label_selectors" {
// Docs: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
comment = "The label selectors to use to find matching targets (default: [\"app.kubernetes.io/component=etcd\"])"
optional = true
}
argument "port_name" {
comment = "The of the port to scrape metrics from (default: metrics)"
optional = true
}
// etcd service discovery for all of the pods
discovery.kubernetes "etcd" {
role = "pod"
selectors {
role = "pod"
field = string.join(coalesce(argument.field_selectors.value, []), ",")
label = string.join(coalesce(argument.label_selectors.value, ["app.kubernetes.io/component=etcd"]), ",")
}
namespaces {
names = coalesce(argument.namespaces.value, [])
}
}
// etcd relabelings (pre-scrape)
discovery.relabel "kubernetes" {
targets = discovery.kubernetes.etcd.targets
// keep only the specified metrics port name, and pods that are Running and ready
rule {
source_labels = [
"__meta_kubernetes_pod_container_port_name",
"__meta_kubernetes_pod_phase",
"__meta_kubernetes_pod_ready",
]
separator = "@"
regex = coalesce(argument.port_name.value, "metrics") + "@Running@true"
action = "keep"
}
// drop any init containers
rule {
source_labels = ["__meta_kubernetes_pod_container_init"]
regex = "true"
action = "drop"
}
// set the namespace label
rule {
source_labels = ["__meta_kubernetes_namespace"]
target_label = "namespace"
}
// set the pod label
rule {
source_labels = ["__meta_kubernetes_pod_name"]
target_label = "pod"
}
// set the container label
rule {
source_labels = ["__meta_kubernetes_pod_container_name"]
target_label = "container"
}
// set a workload label
rule {
source_labels = [
"__meta_kubernetes_pod_controller_kind",
"__meta_kubernetes_pod_controller_name",
]
separator = "/"
target_label = "workload"
}
// remove the hash from the ReplicaSet
rule {
source_labels = ["workload"]
regex = "(ReplicaSet/.+)-.+"
target_label = "workload"
}
// set the app name if specified as metadata labels "app:" or "app.kubernetes.io/name:" or "k8s-app:"
rule {
action = "replace"
source_labels = [
"__meta_kubernetes_pod_label_app_kubernetes_io_name",
"__meta_kubernetes_pod_label_k8s_app",
"__meta_kubernetes_pod_label_app",
]
separator = ";"
regex = "^(?:;*)?([^;]+).*$"
replacement = "$1"
target_label = "app"
}
// set the component if specified as metadata labels "component:" or "app.kubernetes.io/component:" or "k8s-component:"
rule {
action = "replace"
source_labels = [
"__meta_kubernetes_pod_label_app_kubernetes_io_component",
"__meta_kubernetes_pod_label_k8s_component",
"__meta_kubernetes_pod_label_component",
]
regex = "^(?:;*)?([^;]+).*$"
replacement = "$1"
target_label = "component"
}
// set a source label
rule {
action = "replace"
replacement = "kubernetes"
target_label = "source"
}
}
export "output" {
value = discovery.relabel.kubernetes.output
}
}
declare "local" {
argument "port" {
comment = "The port to use (default: 9150)"
optional = true
}
// arguments for local (static)
discovery.relabel "local" {
targets = [
{
"__address__" = "localhost" + string.format("%s", coalesce(argument.port.value, "9150")),
"source" = "local",
},
]
}
export "output" {
value = discovery.relabel.local.output
}
}
declare "scrape" {
argument "targets" {
comment = "Must be a list() of targets"
}
argument "forward_to" {
comment = "Must be a list(MetricsReceiver) where collected metrics should be forwarded to"
}
argument "job_label" {
comment = "The job label to add for all etcd metric (default: integrations/etcd)"
optional = true
}
argument "keep_metrics" {
comment = "A regular expression of metrics to keep (default: see below)"
optional = true
}
argument "drop_metrics" {
comment = "A regular expression of metrics to drop (default: see below)"
optional = true
}
argument "scrape_interval" {
comment = "How often to scrape metrics from the targets (default: 60s)"
optional = true
}
argument "scrape_timeout" {
comment = "How long before a scrape times out (default: 10s)"
optional = true
}
argument "max_cache_size" {
comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate."
optional = true
}
argument "clustering" {
// Docs: https://grafana.com/docs/agent/latest/flow/concepts/clustering/
comment = "Whether or not clustering should be enabled (default: false)"
optional = true
}
// etcd scrape job
prometheus.scrape "etcd" {
job_name = coalesce(argument.job_label.value, "integrations/etcd")
forward_to = [prometheus.relabel.etcd.receiver]
targets = argument.targets.value
scrape_interval = coalesce(argument.scrape_interval.value, "60s")
scrape_timeout = coalesce(argument.scrape_timeout.value, "10s")
clustering {
enabled = coalesce(argument.clustering.value, false)
}
}
// etcd metric relabelings (post-scrape)
prometheus.relabel "etcd" {
forward_to = argument.forward_to.value
max_cache_size = coalesce(argument.max_cache_size.value, 100000)
// drop metrics that match the drop_metrics regex
rule {
source_labels = ["__name__"]
regex = coalesce(argument.drop_metrics.value, "(^(go|process)_.+$)")
action = "drop"
}
// keep only metrics that match the keep_metrics regex
rule {
source_labels = ["__name__"]
regex = coalesce(argument.keep_metrics.value, "(up|etcd_(commands_total|connections_total|current_(bytes|connections|items)|items_(evicted_total|total)|max_connections|read_bytes_total|up|uptime_seconds|version|written_bytes_total))")
action = "keep"
}
}
}

@ -0,0 +1,223 @@
/*
Module: job-cert-manager
Description: Scrapes cert-manager
Note: Every argument except for "forward_to" is optional, and does have a defined default value. However, the values for these
arguments are not defined using the default = " ... " argument syntax, but rather using the coalesce(argument.value, " ... ").
This is because if the argument passed in from another consuming module is set to null, the default = " ... " syntax will
does not override the value passed in, where coalesce() will return the first non-null value.
*/
declare "kubernetes" {
// arguments for kubernetes discovery
argument "namespaces" {
comment = "The namespaces to look for targets in (default: [] is all namespaces)"
optional = true
}
argument "field_selectors" {
// Docs: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
comment = "The label selectors to use to find matching targets (default: [])"
optional = true
}
argument "label_selectors" {
// Docs: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
comment = "The label selectors to use to find matching targets (default: [\"app.kubernetes.io/name=cert-manager\"])"
optional = true
}
argument "port_name" {
comment = "The of the port to scrape metrics from (default: http-metrics)"
optional = true
}
// cert-manager service discovery for all of the pods
discovery.kubernetes "cert_manager" {
role = "pod"
selectors {
role = "pod"
field = string.join(coalesce(argument.field_selectors.value, []), ",")
label = string.join(coalesce(argument.label_selectors.value, ["app.kubernetes.io/name=cert-manager"]), ",")
}
namespaces {
names = coalesce(argument.namespaces.value, [])
}
}
// cert-manager relabelings (pre-scrape)
discovery.relabel "kubernetes" {
targets = discovery.kubernetes.cert_manager.targets
// keep only the specified metrics port name, and pods that are Running and ready
rule {
source_labels = [
"__meta_kubernetes_pod_container_port_name",
"__meta_kubernetes_pod_phase",
"__meta_kubernetes_pod_ready",
]
separator = "@"
regex = coalesce(argument.port_name.value, "http-metrics") + "@Running@true"
action = "keep"
}
// drop any init containers
rule {
source_labels = ["__meta_kubernetes_pod_container_init"]
regex = "true"
action = "drop"
}
// set the namespace label
rule {
source_labels = ["__meta_kubernetes_namespace"]
target_label = "namespace"
}
// set the pod label
rule {
source_labels = ["__meta_kubernetes_pod_name"]
target_label = "pod"
}
// set the container label
rule {
source_labels = ["__meta_kubernetes_pod_container_name"]
target_label = "container"
}
// set a workload label
rule {
source_labels = [
"__meta_kubernetes_pod_controller_kind",
"__meta_kubernetes_pod_controller_name",
]
separator = "/"
target_label = "workload"
}
// remove the hash from the ReplicaSet
rule {
source_labels = ["workload"]
regex = "(ReplicaSet/.+)-.+"
target_label = "workload"
}
// set the app name if specified as metadata labels "app:" or "app.kubernetes.io/name:" or "k8s-app:"
rule {
action = "replace"
source_labels = [
"__meta_kubernetes_pod_label_app_kubernetes_io_name",
"__meta_kubernetes_pod_label_k8s_app",
"__meta_kubernetes_pod_label_app",
]
separator = ";"
regex = "^(?:;*)?([^;]+).*$"
replacement = "$1"
target_label = "app"
}
// set the component if specified as metadata labels "component:" or "app.kubernetes.io/component:" or "k8s-component:"
rule {
action = "replace"
source_labels = [
"__meta_kubernetes_pod_label_app_kubernetes_io_component",
"__meta_kubernetes_pod_label_k8s_component",
"__meta_kubernetes_pod_label_component",
]
regex = "^(?:;*)?([^;]+).*$"
replacement = "$1"
target_label = "component"
}
// set a source label
rule {
action = "replace"
replacement = "kubernetes"
target_label = "source"
}
}
export "output" {
value = discovery.relabel.kubernetes.output
}
}
declare "scrape" {
argument "targets" {
comment = "Must be a list() of targets"
}
argument "forward_to" {
comment = "Must be a list(MetricsReceiver) where collected metrics should be forwarded to"
}
argument "job_label" {
comment = "The job label to add for all cert-manager metric (default: integrations/cert-manager)"
optional = true
}
argument "keep_metrics" {
comment = "A regular expression of metrics to keep (default: see below)"
optional = true
}
argument "drop_metrics" {
comment = "A regular expression of metrics to drop (default: see below)"
optional = true
}
argument "scrape_interval" {
comment = "How often to scrape metrics from the targets (default: 60s)"
optional = true
}
argument "scrape_timeout" {
comment = "How long before a scrape times out (default: 10s)"
optional = true
}
argument "max_cache_size" {
comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate."
optional = true
}
argument "clustering" {
// Docs: https://grafana.com/docs/agent/latest/flow/concepts/clustering/
comment = "Whether or not clustering should be enabled (default: false)"
optional = true
}
// cert-manager scrape job
prometheus.scrape "cert_manager" {
job_name = coalesce(argument.job_label.value, "integrations/cert-manager")
forward_to = [prometheus.relabel.cert_manager.receiver]
targets = argument.targets.value
scrape_interval = coalesce(argument.scrape_interval.value, "60s")
scrape_timeout = coalesce(argument.scrape_timeout.value, "10s")
clustering {
enabled = coalesce(argument.clustering.value, false)
}
}
// cert-manager metric relabelings (post-scrape)
prometheus.relabel "cert_manager" {
forward_to = argument.forward_to.value
max_cache_size = coalesce(argument.max_cache_size.value, 100000)
// drop metrics that match the drop_metrics regex
rule {
source_labels = ["__name__"]
regex = coalesce(argument.drop_metrics.value, "(^(go|process)_.+$)")
action = "drop"
}
// keep only metrics that match the keep_metrics regex
rule {
source_labels = ["__name__"]
regex = coalesce(argument.keep_metrics.value, "(up|(certmanager_(certificate_(expiration_timestamp_seconds|ready_status)|clock_time_seconds|controller_sync_call_count|http_acme_client_request_(count|duration_seconds_(count|sum)))|container_(cpu_(cfs_(periods|throttled_periods)_total|usage_seconds_total)|memory_usage_bytes|network_(receive|transmit)_bytes_total)|kube_pod_container_resource_(limits|requests)_(cpu_cores|memory_bytes)))")
action = "keep"
}
}
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,268 @@
/*
Module: job-node_exporter
Description: Scrapes node_exporter
Note: Every argument except for "forward_to" is optional, and does have a defined default value. However, the values for these
arguments are not defined using the default = " ... " argument syntax, but rather using the coalesce(argument.value, " ... ").
This is because if the argument passed in from another consuming module is set to null, the default = " ... " syntax will
does not override the value passed in, where coalesce() will return the first non-null value.
*/
declare "kubernetes" {
// arguments for kubernetes discovery
argument "namespaces" {
comment = "The namespaces to look for targets in (default: [] is all namespaces)"
optional = true
}
argument "field_selectors" {
// Docs: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
comment = "The label selectors to use to find matching targets (default: [])"
optional = true
}
argument "label_selectors" {
// Docs: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
comment = "The label selectors to use to find matching targets (default: [\"app.kubernetes.io/name=prometheus-node-exporter\"])"
optional = true
}
argument "port_name" {
comment = "The of the port to scrape metrics from (default: metrics)"
optional = true
}
// node_exporter service discovery for all of the pods
discovery.kubernetes "node_exporter" {
role = "pod"
selectors {
role = "pod"
field = string.join(coalesce(argument.field_selectors.value, []), ",")
label = string.join(coalesce(argument.label_selectors.value, ["app.kubernetes.io/name=prometheus-node-exporter"]), ",")
}
namespaces {
names = coalesce(argument.namespaces.value, [])
}
}
// node_exporter relabelings (pre-scrape)
discovery.relabel "kubernetes" {
targets = discovery.kubernetes.node_exporter.targets
// keep only the specified metrics port name, and pods that are Running and ready
rule {
source_labels = [
"__meta_kubernetes_pod_container_port_name",
"__meta_kubernetes_pod_phase",
"__meta_kubernetes_pod_ready",
]
separator = "@"
regex = coalesce(argument.port_name.value, "metrics") + "@Running@true"
action = "keep"
}
// drop any init containers
rule {
source_labels = ["__meta_kubernetes_pod_container_init"]
regex = "true"
action = "drop"
}
// set the namespace label
rule {
source_labels = ["__meta_kubernetes_namespace"]
target_label = "namespace"
}
// set the pod label
rule {
source_labels = ["__meta_kubernetes_pod_name"]
target_label = "pod"
}
// set the container label
rule {
source_labels = ["__meta_kubernetes_pod_container_name"]
target_label = "container"
}
// set a workload label
rule {
source_labels = [
"__meta_kubernetes_pod_controller_kind",
"__meta_kubernetes_pod_controller_name",
]
separator = "/"
target_label = "workload"
}
// remove the hash from the ReplicaSet
rule {
source_labels = ["workload"]
regex = "(ReplicaSet/.+)-.+"
target_label = "workload"
}
// set the app name if specified as metadata labels "app:" or "app.kubernetes.io/name:" or "k8s-app:"
rule {
action = "replace"
source_labels = [
"__meta_kubernetes_pod_label_app_kubernetes_io_name",
"__meta_kubernetes_pod_label_k8s_app",
"__meta_kubernetes_pod_label_app",
]
separator = ";"
regex = "^(?:;*)?([^;]+).*$"
replacement = "$1"
target_label = "app"
}
// set the component if specified as metadata labels "component:" or "app.kubernetes.io/component:" or "k8s-component:"
rule {
action = "replace"
source_labels = [
"__meta_kubernetes_pod_label_app_kubernetes_io_component",
"__meta_kubernetes_pod_label_k8s_component",
"__meta_kubernetes_pod_label_component",
]
regex = "^(?:;*)?([^;]+).*$"
replacement = "$1"
target_label = "component"
}
// set a source label
rule {
action = "replace"
replacement = "kubernetes"
target_label = "source"
}
}
export "output" {
value = discovery.relabel.kubernetes.output
}
}
declare "local" {
argument "port" {
comment = "The port to use (default: 9100)"
optional = true
}
// arguments for local (static)
discovery.relabel "local" {
targets = [
{
"__address__" = "localhost" + string.format("%s", coalesce(argument.port.value, "9100")),
"source" = "local",
},
]
}
export "output" {
value = discovery.relabel.local.output
}
}
declare "scrape" {
argument "targets" {
comment = "Must be a list() of targets"
}
argument "forward_to" {
comment = "Must be a list(MetricsReceiver) where collected metrics should be forwarded to"
}
argument "job_label" {
comment = "The job label to add for all node_exporter metric (default: integrations/node_exporter)"
optional = true
}
argument "keep_metrics" {
comment = "A regular expression of metrics to keep (default: see below)"
optional = true
}
argument "drop_metrics" {
comment = "A regular expression of metrics to drop (default: see below)"
optional = true
}
argument "scheme" {
comment = "The scheme to use when scraping metrics (default: http)"
optional = true
}
argument "bearer_token_file" {
comment = "The bearer token file (default: none)"
optional = true
}
argument "scrape_interval" {
comment = "How often to scrape metrics from the targets (default: 60s)"
optional = true
}
argument "scrape_timeout" {
comment = "How long before a scrape times out (default: 10s)"
optional = true
}
argument "max_cache_size" {
comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate."
optional = true
}
argument "clustering" {
// Docs: https://node_exporter.com/docs/agent/latest/flow/concepts/clustering/
comment = "Whether or not clustering should be enabled (default: false)"
optional = true
}
// node_exporter scrape job
prometheus.scrape "node_exporter" {
job_name = coalesce(argument.job_label.value, "integrations/node_exporter")
forward_to = [prometheus.relabel.node_exporter.receiver]
targets = argument.targets.value
scrape_interval = coalesce(argument.scrape_interval.value, "60s")
scrape_timeout = coalesce(argument.scrape_timeout.value, "10s")
scheme = coalesce(argument.scheme.value, "http")
bearer_token_file = coalesce(argument.bearer_token_file.value, "")
tls_config {
insecure_skip_verify = true
}
clustering {
enabled = coalesce(argument.clustering.value, false)
}
}
// node_exporter metric relabelings (post-scrape)
prometheus.relabel "node_exporter" {
forward_to = argument.forward_to.value
max_cache_size = coalesce(argument.max_cache_size.value, 100000)
// drop metrics that match the drop_metrics regex
rule {
source_labels = ["__name__"]
regex = coalesce(argument.drop_metrics.value, "(^(go)_.+$)")
action = "drop"
}
// keep only metrics that match the keep_metrics regex
rule {
source_labels = ["__name__"]
regex = coalesce(argument.keep_metrics.value, "(up|scrape_(duration_seconds|series_added|samples_(post_metric_relabeling|scraped))|node_(arp_entries|boot_time_seconds|context_switches_total|cpu_seconds_total|disk_(io_time_seconds_total|io_time_weighted_seconds_total|read_(bytes_total|time_seconds_total)|reads_completed_total|write_time_seconds_total|writes_completed_total|written_bytes_total)|file(fd_(allocated|maximum)|system_(avail_bytes|device_error|files(_free)?|readonly|size_bytes))|intr_total|load(1|15|5)|md_disks(_required)?|memory_(Active_(anon_bytes|bytes|file_bytes)|Anon(HugePages_bytes|Pages_bytes)|Bounce_bytes|Buffers_bytes|Cached_bytes|CommitLimit_bytes|Committed_AS_bytes|DirectMap(1G|2M|4k)_bytes|Dirty_bytes|HugePages_(Free|Rsvd|Surp|Total)|Hugepagesize_bytes|Inactive_(anon_bytes|bytes|file_bytes)|Mapped_bytes|Mem(Available|Free|Total)_bytes|S(Reclaimable|Unreclaim)_bytes|Shmem(HugePages_bytes|PmdMapped_bytes|_bytes)|Slab_bytes|SwapTotal_bytes|Vmalloc(Chunk|Total|Used)_bytes|Writeback(Tmp|)_bytes)|netstat_(Icmp6_(InErrors|InMsgs|OutMsgs)|Icmp_(InErrors|InMsgs|OutMsgs)|IpExt_(InOctets|OutOctets)|TcpExt_(Listen(Drops|Overflows)|TCPSynRetrans)|Tcp_(InErrs|InSegs|OutRsts|OutSegs|RetransSegs)|Udp6_(InDatagrams|InErrors|NoPorts|OutDatagrams|RcvbufErrors|SndbufErrors)|Udp(Lite|)_(InDatagrams|InErrors|NoPorts|OutDatagrams|RcvbufErrors|SndbufErrors))|network_(carrier|info|mtu_bytes|receive_(bytes_total|compressed_total|drop_total|errs_total|fifo_total|multicast_total|packets_total)|speed_bytes|transmit_(bytes_total|compressed_total|drop_total|errs_total|fifo_total|multicast_total|packets_total|queue_length)|up)|nf_conntrack_(entries(_limit)?|limit)|os_info|sockstat_(FRAG6|FRAG|RAW6|RAW|TCP6|TCP_(alloc|inuse|mem(_bytes)?|orphan|tw)|UDP6|UDPLITE6|UDPLITE|UDP_(inuse|mem(_bytes)?)|sockets_used)|softnet_(dropped_total|processed_total|times_squeezed_total)|systemd_unit_state|textfile_scrape_error|time_zone_offset_seconds|timex_(estimated_error_seconds|maxerror_seconds|offset_seconds|sync_status)|uname_info|vmstat_(oom_kill|pgfault|pgmajfault|pgpgin|pgpgout|pswpin|pswpout)|process_(max_fds|open_fds)))")
action = "keep"
}
// Drop metrics for certain file systems
rule {
source_labels = ["__name__", "fstype"]
separator = "@"
regex = "node_filesystem.*@(tempfs)"
action = "drop"
}
}
}

@ -0,0 +1,29 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
# Don't package templates.
README.md.gotmpl
# Don't packages the tests used for CI.
/tests/

@ -0,0 +1,163 @@
# Changelog
> _Contributors should read our [contributors guide][] for instructions on how
> to update the changelog._
This document contains a historical list of changes between releases. Only
changes that impact end-user behavior are listed; changes to documentation or
internal API changes are not present.
0.11.0 (2025-01-23)
----------
### Enhancements
- Update jimmidyson/configmap-reload to 0.14.0. (@petewall)
- Add the ability to deploy extra manifest files. (@dbluxo)
0.10.1 (2024-12-03)
----------
### Enhancements
- Update to Grafana Alloy v1.5.1. (@ptodev)
0.10.0 (2024-11-13)
----------
### Enhancements
- Add support for adding hostAliases to the Helm chart. (@duncan485)
- Update to Grafana Alloy v1.5.0. (@thampiotr)
0.9.2 (2024-10-18)
------------------
### Enhancements
- Update to Grafana Alloy v1.4.3. (@ptodev)
0.9.1 (2024-10-04)
------------------
### Enhancements
- Update to Grafana Alloy v1.4.2. (@ptodev)
0.9.0 (2024-10-02)
------------------
### Enhancements
- Add lifecyle hook to the Helm chart. (@etiennep)
- Add terminationGracePeriodSeconds setting to the Helm chart. (@etiennep)
0.8.1 (2024-09-26)
------------------
### Enhancements
- Update to Grafana Alloy v1.4.1. (@ptodev)
0.8.0 (2024-09-25)
------------------
### Enhancements
- Update to Grafana Alloy v1.4.0. (@ptodev)
0.7.0 (2024-08-26)
------------------
### Enhancements
- Add PodDisruptionBudget to the Helm chart. (@itspouya)
0.6.1 (2024-08-23)
----------
### Enhancements
- Add the ability to set --cluster.name in the Helm chart with alloy.clustering.name. (@petewall)
- Add the ability to set appProtocol in extraPorts to help OpenShift users to expose gRPC. (@clementduveau)
### Other changes
- Update helm chart to use v1.3.1.
0.6.0 (2024-08-05)
------------------
### Other changes
- Update helm chart to use v1.3.0.
- Set `publishNotReadyAddresses` to `true` in the service spec for clustering to fix a bug where peers could not join on startup. (@wildum)
0.5.1 (2023-07-11)
------------------
### Other changes
- Update helm chart to use v1.2.1.
0.5.0 (2024-07-08)
------------------
### Enhancements
- Only utilize spec.internalTrafficPolicy in the Service if deploying to Kubernetes 1.26 or later. (@petewall)
0.4.0 (2024-06-26)
------------------
### Enhancements
- Update to Grafana Alloy v1.2.0. (@ptodev)
0.3.2 (2024-05-30)
------------------
### Bugfixes
- Update to Grafana Alloy v1.1.1. (@rfratto)
0.3.1 (2024-05-22)
------------------
### Bugfixes
- Fix clustering on instances running within Istio mesh by allowing to change the name of the clustering port
0.3.0 (2024-05-14)
------------------
### Enhancements
- Update to Grafana Alloy v1.1.0. (@rfratto)
0.2.0 (2024-05-08)
------------------
### Other changes
- Support all [Kubernetes recommended labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/) (@nlamirault)
0.1.1 (2024-04-11)
------------------
### Other changes
- Add missing Alloy icon to Chart.yaml. (@rfratto)
0.1.0 (2024-04-09)
------------------
### Features
- Introduce a Grafana Alloy Helm chart. The Grafana Alloy Helm chart is
backwards compatibile with the values.yaml from the `grafana-agent` Helm
chart. Review the Helm chart README for a description on how to migrate.
(@rfratto)

@ -0,0 +1,6 @@
dependencies:
- name: crds
repository: ""
version: 0.0.0
digest: sha256:1980431a3d80822fca2e67e9cf16ff7a7f8d1dc87deb9e44d50e85e3e8e33a81
generated: "2025-01-23T18:06:39.985104093Z"

@ -0,0 +1,12 @@
apiVersion: v2
appVersion: v1.6.1
dependencies:
- condition: crds.create
name: crds
repository: ""
version: 0.0.0
description: Grafana Alloy
icon: https://raw.githubusercontent.com/grafana/alloy/main/docs/sources/assets/alloy_icon_orange.svg
name: alloy
type: application
version: 0.11.0

@ -0,0 +1,317 @@
# Grafana Alloy Helm chart
![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.11.0](https://img.shields.io/badge/Version-0.11.0-informational?style=flat-square) ![AppVersion: v1.6.1](https://img.shields.io/badge/AppVersion-v1.6.1-informational?style=flat-square)
Helm chart for deploying [Grafana Alloy][] to Kubernetes.
[Grafana Alloy]: https://grafana.com/docs/alloy/latest/
## Usage
### Setup Grafana chart repository
```
helm repo add grafana https://grafana.github.io/helm-charts
helm repo update
```
### Install chart
To install the chart with the release name my-release:
`helm install my-release grafana/alloy`
This chart installs one instance of Grafana Alloy into your Kubernetes cluster
using a specific Kubernetes controller. By default, DaemonSet is used. The
`controller.type` value can be used to change the controller to either a
StatefulSet or Deployment.
Creating multiple installations of the Helm chart with different controllers is
useful if just using the default DaemonSet isn't sufficient.
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| alloy.clustering.enabled | bool | `false` | Deploy Alloy in a cluster to allow for load distribution. |
| alloy.clustering.name | string | `""` | Name for the Alloy cluster. Used for differentiating between clusters. |
| alloy.clustering.portName | string | `"http"` | Name for the port used for clustering, useful if running inside an Istio Mesh |
| alloy.configMap.content | string | `""` | Content to assign to the new ConfigMap. This is passed into `tpl` allowing for templating from values. |
| alloy.configMap.create | bool | `true` | Create a new ConfigMap for the config file. |
| alloy.configMap.key | string | `nil` | Key in ConfigMap to get config from. |
| alloy.configMap.name | string | `nil` | Name of existing ConfigMap to use. Used when create is false. |
| alloy.enableReporting | bool | `true` | Enables sending Grafana Labs anonymous usage stats to help improve Grafana Alloy. |
| alloy.envFrom | list | `[]` | Maps all the keys on a ConfigMap or Secret as environment variables. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#envfromsource-v1-core |
| alloy.extraArgs | list | `[]` | Extra args to pass to `alloy run`: https://grafana.com/docs/alloy/latest/reference/cli/run/ |
| alloy.extraEnv | list | `[]` | Extra environment variables to pass to the Alloy container. |
| alloy.extraPorts | list | `[]` | Extra ports to expose on the Alloy container. |
| alloy.hostAliases | list | `[]` | Host aliases to add to the Alloy container. |
| alloy.lifecycle | object | `{}` | Set lifecycle hooks for the Grafana Alloy container. |
| alloy.listenAddr | string | `"0.0.0.0"` | Address to listen for traffic on. 0.0.0.0 exposes the UI to other containers. |
| alloy.listenPort | int | `12345` | Port to listen for traffic on. |
| alloy.listenScheme | string | `"HTTP"` | Scheme is needed for readiness probes. If enabling tls in your configs, set to "HTTPS" |
| alloy.mounts.dockercontainers | bool | `false` | Mount /var/lib/docker/containers from the host into the container for log collection. |
| alloy.mounts.extra | list | `[]` | Extra volume mounts to add into the Grafana Alloy container. Does not affect the watch container. |
| alloy.mounts.varlog | bool | `false` | Mount /var/log from the host into the container for log collection. |
| alloy.resources | object | `{}` | Resource requests and limits to apply to the Grafana Alloy container. |
| alloy.securityContext | object | `{}` | Security context to apply to the Grafana Alloy container. |
| alloy.stabilityLevel | string | `"generally-available"` | Minimum stability level of components and behavior to enable. Must be one of "experimental", "public-preview", or "generally-available". |
| alloy.storagePath | string | `"/tmp/alloy"` | Path to where Grafana Alloy stores data (for example, the Write-Ahead Log). By default, data is lost between reboots. |
| alloy.uiPathPrefix | string | `"/"` | Base path where the UI is exposed. |
| configReloader.customArgs | list | `[]` | Override the args passed to the container. |
| configReloader.enabled | bool | `true` | Enables automatically reloading when the Alloy config changes. |
| configReloader.image.digest | string | `""` | SHA256 digest of image to use for config reloading (either in format "sha256:XYZ" or "XYZ"). When set, will override `configReloader.image.tag` |
| configReloader.image.registry | string | `"ghcr.io"` | Config reloader image registry (defaults to docker.io) |
| configReloader.image.repository | string | `"jimmidyson/configmap-reload"` | Repository to get config reloader image from. |
| configReloader.image.tag | string | `"v0.14.0"` | Tag of image to use for config reloading. |
| configReloader.resources | object | `{"requests":{"cpu":"1m","memory":"5Mi"}}` | Resource requests and limits to apply to the config reloader container. |
| configReloader.securityContext | object | `{}` | Security context to apply to the Grafana configReloader container. |
| controller.affinity | object | `{}` | Affinity configuration for pods. |
| controller.autoscaling.enabled | bool | `false` | Creates a HorizontalPodAutoscaler for controller type deployment. |
| controller.autoscaling.maxReplicas | int | `5` | The upper limit for the number of replicas to which the autoscaler can scale up. |
| controller.autoscaling.minReplicas | int | `1` | The lower limit for the number of replicas to which the autoscaler can scale down. |
| controller.autoscaling.scaleDown.policies | list | `[]` | List of policies to determine the scale-down behavior. |
| controller.autoscaling.scaleDown.selectPolicy | string | `"Max"` | Determines which of the provided scaling-down policies to apply if multiple are specified. |
| controller.autoscaling.scaleDown.stabilizationWindowSeconds | int | `300` | The duration that the autoscaling mechanism should look back on to make decisions about scaling down. |
| controller.autoscaling.scaleUp.policies | list | `[]` | List of policies to determine the scale-up behavior. |
| controller.autoscaling.scaleUp.selectPolicy | string | `"Max"` | Determines which of the provided scaling-up policies to apply if multiple are specified. |
| controller.autoscaling.scaleUp.stabilizationWindowSeconds | int | `0` | The duration that the autoscaling mechanism should look back on to make decisions about scaling up. |
| controller.autoscaling.targetCPUUtilizationPercentage | int | `0` | Average CPU utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetCPUUtilizationPercentage` to 0 will disable CPU scaling. |
| controller.autoscaling.targetMemoryUtilizationPercentage | int | `80` | Average Memory utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetMemoryUtilizationPercentage` to 0 will disable Memory scaling. |
| controller.dnsPolicy | string | `"ClusterFirst"` | Configures the DNS policy for the pod. https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy |
| controller.enableStatefulSetAutoDeletePVC | bool | `false` | Whether to enable automatic deletion of stale PVCs due to a scale down operation, when controller.type is 'statefulset'. |
| controller.extraAnnotations | object | `{}` | Annotations to add to controller. |
| controller.extraContainers | list | `[]` | Additional containers to run alongside the Alloy container and initContainers. |
| controller.hostNetwork | bool | `false` | Configures Pods to use the host network. When set to true, the ports that will be used must be specified. |
| controller.hostPID | bool | `false` | Configures Pods to use the host PID namespace. |
| controller.initContainers | list | `[]` | |
| controller.nodeSelector | object | `{}` | nodeSelector to apply to Grafana Alloy pods. |
| controller.parallelRollout | bool | `true` | Whether to deploy pods in parallel. Only used when controller.type is 'statefulset'. |
| controller.podAnnotations | object | `{}` | Extra pod annotations to add. |
| controller.podDisruptionBudget | object | `{"enabled":false,"maxUnavailable":null,"minAvailable":null}` | PodDisruptionBudget configuration. |
| controller.podDisruptionBudget.enabled | bool | `false` | Whether to create a PodDisruptionBudget for the controller. |
| controller.podDisruptionBudget.maxUnavailable | string | `nil` | Maximum number of pods that can be unavailable during a disruption. Note: Only one of minAvailable or maxUnavailable should be set. |
| controller.podDisruptionBudget.minAvailable | string | `nil` | Minimum number of pods that must be available during a disruption. Note: Only one of minAvailable or maxUnavailable should be set. |
| controller.podLabels | object | `{}` | Extra pod labels to add. |
| controller.priorityClassName | string | `""` | priorityClassName to apply to Grafana Alloy pods. |
| controller.replicas | int | `1` | Number of pods to deploy. Ignored when controller.type is 'daemonset'. |
| controller.terminationGracePeriodSeconds | string | `nil` | Termination grace period in seconds for the Grafana Alloy pods. The default value used by Kubernetes if unspecifed is 30 seconds. |
| controller.tolerations | list | `[]` | Tolerations to apply to Grafana Alloy pods. |
| controller.topologySpreadConstraints | list | `[]` | Topology Spread Constraints to apply to Grafana Alloy pods. |
| controller.type | string | `"daemonset"` | Type of controller to use for deploying Grafana Alloy in the cluster. Must be one of 'daemonset', 'deployment', or 'statefulset'. |
| controller.updateStrategy | object | `{}` | Update strategy for updating deployed Pods. |
| controller.volumeClaimTemplates | list | `[]` | volumeClaimTemplates to add when controller.type is 'statefulset'. |
| controller.volumes.extra | list | `[]` | Extra volumes to add to the Grafana Alloy pod. |
| crds.create | bool | `true` | Whether to install CRDs for monitoring. |
| extraObjects | list | `[]` | Extra k8s manifests to deploy |
| fullnameOverride | string | `nil` | Overrides the chart's computed fullname. Used to change the full prefix of resource names. |
| global.image.pullSecrets | list | `[]` | Optional set of global image pull secrets. |
| global.image.registry | string | `""` | Global image registry to use if it needs to be overriden for some specific use cases (e.g local registries, custom images, ...) |
| global.podSecurityContext | object | `{}` | Security context to apply to the Grafana Alloy pod. |
| image.digest | string | `nil` | Grafana Alloy image's SHA256 digest (either in format "sha256:XYZ" or "XYZ"). When set, will override `image.tag`. |
| image.pullPolicy | string | `"IfNotPresent"` | Grafana Alloy image pull policy. |
| image.pullSecrets | list | `[]` | Optional set of image pull secrets. |
| image.registry | string | `"docker.io"` | Grafana Alloy image registry (defaults to docker.io) |
| image.repository | string | `"grafana/alloy"` | Grafana Alloy image repository. |
| image.tag | string | `nil` | Grafana Alloy image tag. When empty, the Chart's appVersion is used. |
| ingress.annotations | object | `{}` | |
| ingress.enabled | bool | `false` | Enables ingress for Alloy (Faro port) |
| ingress.extraPaths | list | `[]` | |
| ingress.faroPort | int | `12347` | |
| ingress.hosts[0] | string | `"chart-example.local"` | |
| ingress.labels | object | `{}` | |
| ingress.path | string | `"/"` | |
| ingress.pathType | string | `"Prefix"` | |
| ingress.tls | list | `[]` | |
| nameOverride | string | `nil` | Overrides the chart's name. Used to change the infix in the resource names. |
| rbac.create | bool | `true` | Whether to create RBAC resources for Alloy. |
| service.annotations | object | `{}` | |
| service.clusterIP | string | `""` | Cluster IP, can be set to None, empty "" or an IP address |
| service.enabled | bool | `true` | Creates a Service for the controller's pods. |
| service.internalTrafficPolicy | string | `"Cluster"` | Value for internal traffic policy. 'Cluster' or 'Local' |
| service.nodePort | int | `31128` | NodePort port. Only takes effect when `service.type: NodePort` |
| service.type | string | `"ClusterIP"` | Service type |
| serviceAccount.additionalLabels | object | `{}` | Additional labels to add to the created service account. |
| serviceAccount.annotations | object | `{}` | Annotations to add to the created service account. |
| serviceAccount.create | bool | `true` | Whether to create a service account for the Grafana Alloy deployment. |
| serviceAccount.name | string | `nil` | The name of the existing service account to use when serviceAccount.create is false. |
| serviceMonitor.additionalLabels | object | `{}` | Additional labels for the service monitor. |
| serviceMonitor.enabled | bool | `false` | |
| serviceMonitor.interval | string | `""` | Scrape interval. If not set, the Prometheus default scrape interval is used. |
| serviceMonitor.metricRelabelings | list | `[]` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion. ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig |
| serviceMonitor.relabelings | list | `[]` | RelabelConfigs to apply to samples before scraping ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig |
| serviceMonitor.tlsConfig | object | `{}` | Customize tls parameters for the service monitor |
#### Migrate from `grafana/grafana-agent` chart to `grafana/alloy`
The `values.yaml` file for the `grafana/grafana-agent` chart is compatible with
the chart for `grafana/alloy`, with two exceptions:
* The `agent` field in `values.yaml` is deprecated in favor of `alloy`. Support
for the `agent` field will be removed in a future release.
* The default value for `alloy.listenPort` is `12345` to align with the default
listen port in other installations. To retain the previous default, set
`alloy.listenPort` to `80` when installing.
### alloy.stabilityLevel
`alloy.stabilityLevel` controls the minimum level of stability for what
components can be created (directly or through imported modules). Note that
setting this field to a lower stability may also enable internal behaviour of a
lower stability, such as experimental memory optimizations.
Valid settings are `experimental`, `public-preview`, and `generally-available`.
### alloy.extraArgs
`alloy.extraArgs` allows for passing extra arguments to the Grafana Alloy
container. The list of available arguments is documented on [alloy run][].
> **WARNING**: Using `alloy.extraArgs` does not have a stable API. Things may
> break between Chart upgrade if an argument gets added to the template.
[alloy run]: https://grafana.com/docs/alloy/latest/reference/cli/run/
### alloy.extraPorts
`alloy.extraPorts` allows for configuring specific open ports.
The detained specification of ports can be found at the [Kubernetes Pod documents](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#ports).
Port numbers specified must be 0 < x < 65535.
| ChartPort | KubePort | Description |
|-----------|----------|-------------|
| targetPort | containerPort | Number of port to expose on the pod's IP address. |
| hostPort | hostPort | (Optional) Number of port to expose on the host. Daemonsets taking traffic might find this useful. |
| name | name | If specified, this must be an `IANA_SVC_NAME` and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
| protocol | protocol | Must be UDP, TCP, or SCTP. Defaults to "TCP". |
| appProtocol | appProtocol | Hint on application protocol. This is used to expose Alloy externally on OpenShift clusters using "h2c". Optional. No default value. |
### alloy.listenAddr
`alloy.listenAddr` allows for restricting which address Alloy listens on
for network traffic on its HTTP server. By default, this is `0.0.0.0` to allow
its UI to be exposed when port-forwarding and to expose its metrics to other
Alloy instances in the cluster.
### alloy.configMap.config
`alloy.configMap.content` holds the Grafana Alloy configuration to use.
If `alloy.configMap.content` is not provided, a [default configuration file][default-config] is
used. When provided, `alloy.configMap.content` must hold a valid Alloy configuration file.
[default-config]: ./config/example.alloy
### alloy.securityContext
`alloy.securityContext` sets the securityContext passed to the Grafana
Alloy container.
By default, Grafana Alloy containers are not able to collect telemetry from the
host node or other specific types of privileged telemetry data. See [Collecting
logs from other containers][#collecting-logs-from-other-containers] and
[Collecting host node telemetry][#collecting-host-node-telemetry] below for
more information on how to enable these capabilities.
### rbac.create
`rbac.create` enables the creation of ClusterRole and ClusterRoleBindings for
the Grafana Alloy containers to use. The default permission set allows
components like [discovery.kubernetes][] to work properly.
[discovery.kubernetes]: https://grafana.com/docs/alloy/latest/reference/components/discovery.kubernetes/
### controller.autoscaling
`controller.autoscaling.enabled` enables the creation of a HorizontalPodAutoscaler. It is only used when `controller.type` is set to `deployment` or `statefulset`.
`controller.autoscaling` is intended to be used with [clustered][] mode.
> **WARNING**: Using `controller.autoscaling` for any other Grafana Alloy
> configuration could lead to redundant or double telemetry collection.
[clustered]: https://grafana.com/docs/alloy/latest/reference/cli/run/#clustered-mode
When using autoscaling with a StatefulSet controller and have enabled
volumeClaimTemplates to be created alongside the StatefulSet, it is possible to
leak up to `maxReplicas` PVCs when the HPA is scaling down. If you're on
Kubernetes version `>=1.23-0` and your cluster has the
`StatefulSetAutoDeletePVC` feature gate enabled, you can set
`enableStatefulSetAutoDeletePVC` to true to automatically delete stale PVCs.
Using `controller.autoscaling` requires the target metric (cpu/memory) to have
its resource requests set up for both the Alloy and config-reloader containers
so that the HPA can use them to calculate the replica count from the actual
resource utilization.
## Collecting logs from other containers
There are two ways to collect logs from other containers within the cluster
Alloy is deployed in.
### loki.source.kubernetes
The [loki.source.kubernetes][] component may be used to collect logs from
containers using the Kubernetes API. This component does not require mounting
the hosts filesystem into Alloy, nor requires additional security contexts to
work correctly.
[loki.source.kubernetes]: https://grafana.com/docs/alloy/latest/reference/components/loki.source.kubernetes/
### File-based collection
Logs may also be collected by mounting the host's filesystem into the Alloy
container, bypassing the need to communicate with the Kubrnetes API.
To mount logs from other containers to Grafana Alloy directly:
* Set `alloy.mounts.dockercontainers` to `true`.
* Set `alloy.securityContext` to:
```yaml
privileged: true
runAsUser: 0
```
## Collecting host node telemetry
Telemetry from the host, such as host-specific log files (from `/var/logs`) or
metrics from `/proc` and `/sys` are not accessible to Grafana Alloy containers.
To expose this information to Grafana Alloy for telemetry collection:
* Set `alloy.mounts.dockercontainers` to `true`.
* Mount `/proc` and `/sys` from the host into the container.
* Set `alloy.securityContext` to:
```yaml
privileged: true
runAsUser: 0
```
## Expose Alloy externally on OpenShift clusters
If you want to send telemetry from an Alloy instance outside of the OpenShift clusters over gRPC towards the Alloy instance on the OpenShift clusters, you need to:
* Set the optional `appProtocol` on `alloy.extraPorts` to `h2c`
* Expose the service via Ingress or Route within the OpenShift cluster. Example of a Route in OpenShift:
```yaml
kind: Route
apiVersion: route.openshift.io/v1
metadata:
name: route-otlp-alloy-h2c
spec:
to:
kind: Service
name: test-grpc-h2c
weight: 100
port:
targetPort: otlp-grpc
tls:
termination: edge
insecureEdgeTerminationPolicy: Redirect
wildcardPolicy: None
```
Once this Ingress/Route is exposed it would then allow gRPC communication for (for example) traces. This allow an Alloy instance on a VM or another Kubernetes/OpenShift cluster to be able to communicate over gRPC via the exposed Ingress or Route.

@ -0,0 +1,3 @@
apiVersion: v2
name: crds
version: 0.0.0

@ -0,0 +1,205 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.2
creationTimestamp: null
name: podlogs.monitoring.grafana.com
spec:
group: monitoring.grafana.com
names:
categories:
- grafana-alloy
- alloy
kind: PodLogs
listKind: PodLogsList
plural: podlogs
singular: podlogs
scope: Namespaced
versions:
- name: v1alpha2
schema:
openAPIV3Schema:
description: PodLogs defines how to collect logs for a Pod.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: PodLogsSpec defines how to collect logs for a Pod.
properties:
namespaceSelector:
description: Selector to select which namespaces the Pod objects are
discovered from.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
relabelings:
description: RelabelConfigs to apply to logs before delivering.
items:
description: 'RelabelConfig allows dynamic rewriting of the label
set, being applied to samples before ingestion. It defines `<metric_relabel_configs>`-section
of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
default: replace
description: Action to perform based on regex matching. Default
is 'replace'. uppercase and lowercase actions require Prometheus
>= 2.36.
enum:
- replace
- Replace
- keep
- Keep
- drop
- Drop
- hashmod
- HashMod
- labelmap
- LabelMap
- labeldrop
- LabelDrop
- labelkeep
- LabelKeep
- lowercase
- Lowercase
- uppercase
- Uppercase
type: string
modulus:
description: Modulus to take of the hash of the source label
values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched. Default is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace
is performed if the regular expression matches. Regex capture
groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source label
values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing labels.
Their content is concatenated using the configured separator
and matched against the configured regular expression for
the replace, keep, and drop actions.
items:
description: LabelName is a valid Prometheus label name which
may only contain ASCII letters, numbers, as well as underscores.
pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
type: string
type: array
targetLabel:
description: Label to which the resulting value is written in
a replace action. It is mandatory for replace actions. Regex
capture groups are available.
type: string
type: object
type: array
selector:
description: Selector to select Pod objects. Required.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
required:
- selector
type: object
type: object
served: true
storage: true

@ -0,0 +1,3 @@
serviceAccount:
additionalLabels:
test: "true"

@ -0,0 +1,7 @@
alloy:
clustering:
enabled: true
controller:
type: 'statefulset'
replicas: 3

@ -0,0 +1,5 @@
controller:
type: deployment
podDisruptionBudget:
enabled: true
maxUnavailable: 1

@ -0,0 +1,5 @@
controller:
type: deployment
podDisruptionBudget:
enabled: true
minAvailable: 1

@ -0,0 +1,5 @@
controller:
type: statefulset
podDisruptionBudget:
enabled: true
maxUnavailable: 1

@ -0,0 +1,5 @@
controller:
type: statefulset
podDisruptionBudget:
enabled: true
minAvailable: 1

@ -0,0 +1,12 @@
controller:
volumes:
extra:
- name: cache-volume
emptyDir:
sizeLimit: 500Mi
alloy:
mounts:
extra:
- mountPath: /cache
name: cache-volume

@ -0,0 +1,5 @@
# Test rendering of the chart with the controller explicitly set to DaemonSet.
controller:
type: daemonset
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet

@ -0,0 +1,3 @@
# Test rendering of the chart with the controller explicitly set to DaemonSet.
controller:
type: daemonset

@ -0,0 +1,25 @@
# Test rendering of the chart with the controller explicitly set to Deployment and autoscaling enabled.
controller:
type: deployment
autoscaling:
enabled: true
scaleDown:
policies:
- type: Pods
value: 4
periodSeconds: 60
selectPolicy: Min
stabilizationWindowSeconds: 100
scaleUp:
policies:
- type: Pods
value: 4
periodSeconds: 60
- type: Percent
value: 100
periodSeconds: 15
stabilizationWindowSeconds: 80
alloy:
resources:
requests:
memory: 100Mi

@ -0,0 +1,3 @@
# Test rendering of the chart with the controller explicitly set to Deployment.
controller:
type: deployment

@ -0,0 +1,10 @@
# Test rendering of the chart with the controller explicitly set to StatefulSet and autoscaling enabled.
controller:
type: statefulset
autoscaling:
enabled: true
enableStatefulSetAutoDeletePVC: true
alloy:
resources:
requests:
memory: 100Mi

@ -0,0 +1,3 @@
# Test rendering of the chart with the controller explicitly set to StatefulSet.
controller:
type: statefulset

@ -0,0 +1,10 @@
alloy:
configMap:
content: |-
logging {
level = "warn"
format = "logfmt"
}
discovery.kubernetes "custom_pods" {
role = "pod"
}

@ -0,0 +1 @@
# Test rendering of the chart with everything set to the default values.

@ -0,0 +1,9 @@
# Test rendering of the chart with the service monitor enabled
alloy:
listenScheme: HTTPS
service:
enabled: true
serviceMonitor:
enabled: true
tlsConfig:
insecureSkipVerify: true

@ -0,0 +1,5 @@
# Test rendering of the chart with the service monitor enabled
service:
enabled: true
serviceMonitor:
enabled: true

@ -0,0 +1,5 @@
# Specify extra ports for verifying rendering the template works
alloy:
envFrom:
- configMapRef:
name: special-config

@ -0,0 +1,5 @@
alloy:
configMap:
create: false
name: existing-config
key: my-config.alloy

@ -0,0 +1,9 @@
# Specify extra ports for verifying rendering the template works
alloy:
extraEnv:
- name: GREETING
value: "Warm greetings to"
- name: HONORIFIC
value: "The Most Honorable"
- name: NAME
value: "Kubernetes"

@ -0,0 +1,8 @@
extraObjects:
- apiVersion: v1
kind: Secret
metadata:
name: grafana-cloud
stringData:
PROMETHEUS_HOST: 'https://prometheus-us-central1.grafana.net/api/prom/push'
PROMETHEUS_USERNAME: '123456'

@ -0,0 +1,7 @@
# Specify extra ports for verifying rendering the template works
alloy:
extraPorts:
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP

@ -0,0 +1,9 @@
alloy:
extraPorts:
- name: "faro"
port: 12347
targetPort: 12347
protocol: "TCP"
ingress:
enabled: true

@ -0,0 +1,13 @@
# Test rendering of the chart with the global image pull secret explicitly set.
global:
image:
pullSecrets:
- name: global-cred
podSecurityContext:
runAsUser: 1000
runAsGroup: 1000
image:
pullSecrets:
- name: local-cred

@ -0,0 +1,11 @@
# Test rendering of the chart with the global image registry explicitly set to another value.
global:
image:
registry: quay.io
image:
registry: docker.com # Invalid value by default
configReloader:
image:
registry: docker.com

@ -0,0 +1,5 @@
alloy:
hostAliases:
- ip: "20.21.22.23"
hostnames:
- "grafana.company.net"

@ -0,0 +1,29 @@
controller:
initContainers:
- name: geo-ip
image: ghcr.io/maxmind/geoipupdate:v6.0
volumeMounts:
- name: geoip
mountPath: /etc/geoip
volumes:
- name: geoip
emptyDir: {}
env:
- name: GEOIPUPDATE_ACCOUNT_ID
value: "geoipupdate_account_id"
- name: GEOIPUPDATE_LICENSE_KEY
value: "geoipupdate_license_key"
- name: GEOIPUPDATE_EDITION_IDS
value: "GeoLite2-ASN GeoLite2-City GeoLite2-Country"
- name: GEOIPUPDATE_DB_DIR
value: "/etc/geoip"
volumes:
extra:
- name: geoip
mountPath: /etc/geoip
alloy:
mounts:
extra:
- name: geoip
mountPath: /etc/geoip

@ -0,0 +1,8 @@
controller:
type: deployment
alloy:
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "sleep 1"]

@ -0,0 +1,4 @@
# Test rendering of the chart with the image pull secret explicitly set.
image:
pullSecrets:
- name: local-cred

@ -0,0 +1,7 @@
# Test rendering of the chart with the individual image registries explicitly set to another value.
image:
registry: quay.io
configReloader:
image:
registry: quay.io

@ -0,0 +1,11 @@
controller:
nodeSelector:
key1: "value1"
tolerations:
- key: "key1"
operator: "Equal"
value: "value1"
effect: "NoSchedule"
- key: "key2"
operator: "Exists"
effect: "NoSchedule"

@ -0,0 +1,7 @@
global:
podSecurityContext:
fsGroup: 473
alloy:
securityContext:
runAsUser: 473
runAsGroup: 473

@ -0,0 +1,4 @@
# Test correct rendering of the pod annotations
controller:
podAnnotations:
testAnnotationKey: testAnnotationValue

@ -0,0 +1,29 @@
controller:
extraContainers:
- name: geo-ip
image: ghcr.io/maxmind/geoipupdate:v6.0
volumeMounts:
- name: geoip
mountPath: /etc/geoip
volumes:
- name: geoip
emptyDir: {}
env:
- name: GEOIPUPDATE_ACCOUNT_ID
value: "geoipupdate_account_id"
- name: GEOIPUPDATE_LICENSE_KEY
value: "geoipupdate_license_key"
- name: GEOIPUPDATE_EDITION_IDS
value: "GeoLite2-ASN GeoLite2-City GeoLite2-Country"
- name: GEOIPUPDATE_DB_DIR
value: "/etc/geoip"
volumes:
extra:
- name: geoip
mountPath: /etc/geoip
alloy:
mounts:
extra:
- name: geoip
mountPath: /etc/geoip

@ -0,0 +1,3 @@
controller:
type: deployment
terminationGracePeriodSeconds: 20

@ -0,0 +1,10 @@
controller:
type: deployment
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: alloy
app.kubernetes.io/instance: alloy

@ -0,0 +1,10 @@
image:
registry: "docker.io"
repository: "grafana/agent"
digest: "sha256:82575a7be3e4770e53f620298e58bcc4cdb0fd0338e01c4b206cae9e3ca46ebf"
configReloader:
image:
registry: "docker.io"
repository: "jimmidyson/configmap-reload"
digest: "sha256:5af9d3041d12a3e63f115125f89b66d2ba981fe82e64302ac370c5496055059c"

@ -0,0 +1,28 @@
logging {
level = "info"
format = "logfmt"
}
discovery.kubernetes "pods" {
role = "pod"
}
discovery.kubernetes "nodes" {
role = "node"
}
discovery.kubernetes "services" {
role = "service"
}
discovery.kubernetes "endpoints" {
role = "endpoints"
}
discovery.kubernetes "endpointslices" {
role = "endpointslice"
}
discovery.kubernetes "ingresses" {
role = "ingress"
}

@ -0,0 +1 @@
Welcome to Grafana Alloy!

@ -0,0 +1,25 @@
{{/*
Retrieve configMap name from the name of the chart or the ConfigMap the user
specified.
*/}}
{{- define "alloy.config-map.name" -}}
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if $values.configMap.name -}}
{{- $values.configMap.name }}
{{- else -}}
{{- include "alloy.fullname" . }}
{{- end }}
{{- end }}
{{/*
The name of the config file is the default or the key the user specified in the
ConfigMap.
*/}}
{{- define "alloy.config-map.key" -}}
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if $values.configMap.key -}}
{{- $values.configMap.key }}
{{- else -}}
config.alloy
{{- end }}
{{- end }}

@ -0,0 +1,162 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "alloy.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "alloy.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "alloy.chart" -}}
{{- if index .Values "$chart_tests" }}
{{- printf "%s" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "alloy.namespace" -}}
{{- if .Values.namespaceOverride }}
{{- .Values.namespaceOverride }}
{{- else }}
{{- .Release.Namespace }}
{{- end }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "alloy.labels" -}}
helm.sh/chart: {{ include "alloy.chart" . }}
{{ include "alloy.selectorLabels" . }}
{{- if index .Values "$chart_tests" }}
app.kubernetes.io/version: "vX.Y.Z"
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- else }}
{{/* substr trims delimeter prefix char from alloy.imageId output
e.g. ':' for tags and '@' for digests.
For digests, we crop the string to a 7-char (short) sha. */}}
app.kubernetes.io/version: {{ (include "alloy.imageId" .) | trunc 15 | trimPrefix "@sha256" | trimPrefix ":" | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: alloy
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "alloy.selectorLabels" -}}
app.kubernetes.io/name: {{ include "alloy.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "alloy.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "alloy.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Calculate name of image ID to use for "alloy.
*/}}
{{- define "alloy.imageId" -}}
{{- if .Values.image.digest }}
{{- $digest := .Values.image.digest }}
{{- if not (hasPrefix "sha256:" $digest) }}
{{- $digest = printf "sha256:%s" $digest }}
{{- end }}
{{- printf "@%s" $digest }}
{{- else if .Values.image.tag }}
{{- printf ":%s" .Values.image.tag }}
{{- else }}
{{- printf ":%s" .Chart.AppVersion }}
{{- end }}
{{- end }}
{{/*
Calculate name of image ID to use for "config-reloader".
*/}}
{{- define "config-reloader.imageId" -}}
{{- if .Values.configReloader.image.digest }}
{{- $digest := .Values.configReloader.image.digest }}
{{- if not (hasPrefix "sha256:" $digest) }}
{{- $digest = printf "sha256:%s" $digest }}
{{- end }}
{{- printf "@%s" $digest }}
{{- else if .Values.configReloader.image.tag }}
{{- printf ":%s" .Values.configReloader.image.tag }}
{{- else }}
{{- printf ":%s" "v0.8.0" }}
{{- end }}
{{- end }}
{{/*
Return the appropriate apiVersion for ingress.
*/}}
{{- define "alloy.ingress.apiVersion" -}}
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) }}
{{- print "networking.k8s.io/v1" }}
{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
{{- print "networking.k8s.io/v1beta1" }}
{{- else }}
{{- print "extensions/v1beta1" }}
{{- end }}
{{- end }}
{{/*
Return if ingress is stable.
*/}}
{{- define "alloy.ingress.isStable" -}}
{{- eq (include "alloy.ingress.apiVersion" .) "networking.k8s.io/v1" }}
{{- end }}
{{/*
Return if ingress supports ingressClassName.
*/}}
{{- define "alloy.ingress.supportsIngressClassName" -}}
{{- or (eq (include "alloy.ingress.isStable" .) "true") (and (eq (include "alloy.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }}
{{- end }}
{{/*
Return if ingress supports pathType.
*/}}
{{- define "alloy.ingress.supportsPathType" -}}
{{- or (eq (include "alloy.ingress.isStable" .) "true") (and (eq (include "alloy.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }}
{{- end }}
{{/*
Return the appropriate apiVersion for PodDisruptionBudget.
*/}}
{{- define "alloy.controller.pdb.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">=1.21-0" .Capabilities.KubeVersion.Version) -}}
{{- print "policy/v1" -}}
{{- else -}}
{{- print "policy/v1beta1" -}}
{{- end -}}
{{- end -}}

@ -0,0 +1,37 @@
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if $values.clustering.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: {{ include "alloy.fullname" . }}-cluster
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: networking
spec:
type: ClusterIP
clusterIP: 'None'
publishNotReadyAddresses: true
selector:
{{- include "alloy.selectorLabels" . | nindent 4 }}
ports:
# Do not include the -metrics suffix in the port name, otherwise metrics
# can be double-collected with the non-headless Service if it's also
# enabled.
#
# This service should only be used for clustering, and not metric
# collection.
- name: {{ $values.clustering.portName }}
port: {{ $values.listenPort }}
targetPort: {{ $values.listenPort }}
protocol: "TCP"
{{- range $portMap := $values.extraPorts }}
- name: {{ $portMap.name }}
port: {{ $portMap.port }}
targetPort: {{ $portMap.targetPort }}
protocol: {{ coalesce $portMap.protocol "TCP" }}
{{- if not (empty $portMap.appProtocol) }}
# Useful for OpenShift clusters that want to expose Alloy ports externally
appProtocol: {{ $portMap.appProtocol }}
{{- end }}
{{- end }}
{{- end }}

@ -0,0 +1,16 @@
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if $values.configMap.create }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "alloy.fullname" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: config
data:
{{- if $values.configMap.content }}
config.alloy: |- {{- (tpl $values.configMap.content .) | nindent 4 }}
{{- else }}
config.alloy: |- {{- .Files.Get "config/example.alloy" | trim | nindent 4 }}
{{- end }}
{{- end }}

@ -0,0 +1,88 @@
{{- define "alloy.container" -}}
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
- name: alloy
image: {{ .Values.global.image.registry | default .Values.image.registry }}/{{ .Values.image.repository }}{{ include "alloy.imageId" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- run
- /etc/alloy/{{ include "alloy.config-map.key" . }}
- --storage.path={{ $values.storagePath }}
- --server.http.listen-addr={{ $values.listenAddr }}:{{ $values.listenPort }}
- --server.http.ui-path-prefix={{ $values.uiPathPrefix }}
{{- if not $values.enableReporting }}
- --disable-reporting
{{- end}}
{{- if $values.clustering.enabled }}
- --cluster.enabled=true
- --cluster.join-addresses={{ include "alloy.fullname" . }}-cluster
{{- if $values.clustering.name }}
- --cluster.name={{ $values.clustering.name }}
{{- end}}
{{- end}}
{{- if $values.stabilityLevel }}
- --stability.level={{ $values.stabilityLevel }}
{{- end }}
{{- range $values.extraArgs }}
- {{ . }}
{{- end}}
env:
- name: ALLOY_DEPLOY_MODE
value: "helm"
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- range $values.extraEnv }}
- {{- toYaml . | nindent 6 }}
{{- end }}
{{- if $values.envFrom }}
envFrom:
{{- toYaml $values.envFrom | nindent 4 }}
{{- end }}
ports:
- containerPort: {{ $values.listenPort }}
name: http-metrics
{{- range $portMap := $values.extraPorts }}
- containerPort: {{ $portMap.targetPort }}
{{- if $portMap.hostPort }}
hostPort: {{ $portMap.hostPort }}
{{- end}}
name: {{ $portMap.name }}
protocol: {{ coalesce $portMap.protocol "TCP" }}
{{- end }}
readinessProbe:
httpGet:
path: /-/ready
port: {{ $values.listenPort }}
scheme: {{ $values.listenScheme }}
initialDelaySeconds: 10
timeoutSeconds: 1
{{- with $values.resources }}
resources:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with $values.lifecycle }}
lifecycle:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with $values.securityContext }}
securityContext:
{{- toYaml . | nindent 4 }}
{{- end }}
volumeMounts:
- name: config
mountPath: /etc/alloy
{{- if $values.mounts.varlog }}
- name: varlog
mountPath: /var/log
readOnly: true
{{- end }}
{{- if $values.mounts.dockercontainers }}
- name: dockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
{{- end }}
{{- range $values.mounts.extra }}
- {{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

@ -0,0 +1,26 @@
{{- define "alloy.watch-container" -}}
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if .Values.configReloader.enabled -}}
- name: config-reloader
image: {{ .Values.global.image.registry | default .Values.configReloader.image.registry }}/{{ .Values.configReloader.image.repository }}{{ include "config-reloader.imageId" . }}
{{- if .Values.configReloader.customArgs }}
args:
{{- toYaml .Values.configReloader.customArgs | nindent 4 }}
{{- else }}
args:
- --volume-dir=/etc/alloy
- --webhook-url=http://localhost:{{ $values.listenPort }}/-/reload
{{- end }}
volumeMounts:
- name: config
mountPath: /etc/alloy
{{- with .Values.configReloader.resources }}
resources:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.configReloader.securityContext }}
securityContext:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- end -}}

@ -0,0 +1,90 @@
{{- define "alloy.pod-template" -}}
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
metadata:
annotations:
kubectl.kubernetes.io/default-container: alloy
{{- with .Values.controller.podAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "alloy.selectorLabels" . | nindent 4 }}
{{- with .Values.controller.podLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- with .Values.global.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 4 }}
{{- end }}
serviceAccountName: {{ include "alloy.serviceAccountName" . }}
{{- if or .Values.global.image.pullSecrets .Values.image.pullSecrets }}
imagePullSecrets:
{{- if .Values.global.image.pullSecrets }}
{{- toYaml .Values.global.image.pullSecrets | nindent 4 }}
{{- else }}
{{- toYaml .Values.image.pullSecrets | nindent 4 }}
{{- end }}
{{- end }}
{{- if .Values.controller.initContainers }}
initContainers:
{{- with .Values.controller.initContainers }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
containers:
{{- include "alloy.container" . | nindent 4 }}
{{- include "alloy.watch-container" . | nindent 4 }}
{{- with .Values.controller.extraContainers }}
{{- toYaml . | nindent 4 }}
{{- end}}
{{- if .Values.controller.priorityClassName }}
priorityClassName: {{ .Values.controller.priorityClassName }}
{{- end }}
{{- if .Values.controller.hostNetwork }}
hostNetwork: {{ .Values.controller.hostNetwork }}
{{- end }}
{{- if .Values.controller.hostPID }}
hostPID: {{ .Values.controller.hostPID }}
{{- end }}
dnsPolicy: {{ .Values.controller.dnsPolicy }}
{{- with .Values.controller.affinity }}
affinity:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.controller.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds | int }}
{{- end }}
{{- with .Values.controller.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.controller.tolerations }}
tolerations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.controller.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 4 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ include "alloy.config-map.name" . }}
{{- if $values.mounts.varlog }}
- name: varlog
hostPath:
path: /var/log
{{- end }}
{{- if $values.mounts.dockercontainers }}
- name: dockercontainers
hostPath:
path: /var/lib/docker/containers
{{- end }}
{{- if .Values.controller.volumes.extra }}
{{- toYaml .Values.controller.volumes.extra | nindent 4 }}
{{- end }}
{{- if $values.hostAliases }}
hostAliases:
{{- toYaml $values.hostAliases | nindent 4 }}
{{- end }}
{{- end }}

@ -0,0 +1,25 @@
{{- if eq .Values.controller.type "daemonset" }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "alloy.fullname" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
{{- with .Values.controller.extraAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if ge (int .Capabilities.KubeVersion.Minor) 22 }}
minReadySeconds: 10
{{- end }}
selector:
matchLabels:
{{- include "alloy.selectorLabels" . | nindent 6 }}
template:
{{- include "alloy.pod-template" . | nindent 4 }}
{{- with .Values.controller.updateStrategy }}
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

@ -0,0 +1,28 @@
{{- if eq .Values.controller.type "deployment" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "alloy.fullname" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
{{- with .Values.controller.extraAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if not .Values.controller.autoscaling.enabled }}
replicas: {{ .Values.controller.replicas }}
{{- end }}
{{- if ge (int .Capabilities.KubeVersion.Minor) 22 }}
minReadySeconds: 10
{{- end }}
selector:
matchLabels:
{{- include "alloy.selectorLabels" . | nindent 6 }}
template:
{{- include "alloy.pod-template" . | nindent 4 }}
{{- with .Values.controller.updateStrategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

@ -0,0 +1,50 @@
{{- if eq .Values.controller.type "statefulset" }}
{{- if .Values.enableStatefulSetAutoDeletePVC }}
{{- fail "Value 'enableStatefulSetAutoDeletePVC' should be nested inside 'controller' options." }}
{{- end }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "alloy.fullname" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
{{- with .Values.controller.extraAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if not .Values.controller.autoscaling.enabled }}
replicas: {{ .Values.controller.replicas }}
{{- end }}
{{- if .Values.controller.parallelRollout }}
podManagementPolicy: Parallel
{{- end }}
{{- if ge (int .Capabilities.KubeVersion.Minor) 22 }}
minReadySeconds: 10
{{- end }}
serviceName: {{ include "alloy.fullname" . }}
selector:
matchLabels:
{{- include "alloy.selectorLabels" . | nindent 6 }}
template:
{{- include "alloy.pod-template" . | nindent 4 }}
{{- with .Values.controller.updateStrategy }}
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.controller.volumeClaimTemplates }}
volumeClaimTemplates:
{{- range . }}
- {{ toYaml . | nindent 6 }}
{{- end }}
{{- end }}
{{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.controller.enableStatefulSetAutoDeletePVC) }}
{{- /*
Data on the read nodes is easy to replace, so we want to always delete PVCs to make
operation easier, and will rely on re-fetching data when needed.
*/}}
persistentVolumeClaimRetentionPolicy:
whenDeleted: Delete
whenScaled: Delete
{{- end }}
{{- end }}

@ -0,0 +1,4 @@
{{ range .Values.extraObjects }}
---
{{ tpl (toYaml .) $ }}
{{ end }}

@ -0,0 +1,78 @@
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if and (or (eq .Values.controller.type "deployment") (eq .Values.controller.type "statefulset" )) .Values.controller.autoscaling.enabled }}
{{- if not (empty .Values.controller.autoscaling.targetMemoryUtilizationPercentage)}}
{{- $_ := $values.resources.requests | required ".Values.alloy.resources.requests is required when using autoscaling." -}}
{{- $_ := $values.resources.requests.memory | required ".Values.alloy.resources.requests.memory is required when using autoscaling based on memory utilization." -}}
{{- $_ := .Values.configReloader.resources.requests | required ".Values.configReloader.resources.requests is required when using autoscaling." -}}
{{- $_ := .Values.configReloader.resources.requests.memory | required ".Values.configReloader.resources.requests.memory is required when using autoscaling based on memory utilization." -}}
{{- end}}
{{- if not (empty .Values.controller.autoscaling.targetCPUUtilizationPercentage)}}
{{- $_ := $values.resources.requests | required ".Values.alloy.resources.requests is required when using autoscaling." -}}
{{- $_ := $values.resources.requests.cpu | required ".Values.alloy.resources.requests.cpu is required when using autoscaling based on cpu utilization." -}}
{{- $_ := .Values.configReloader.resources.requests | required ".Values.configReloader.resources.requests is required when using autoscaling." -}}
{{- $_ := .Values.configReloader.resources.requests.cpu | required ".Values.configReloader.resources.requests.cpu is required when using autoscaling based on cpu utilization." -}}
{{- end}}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "alloy.fullname" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: availability
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: {{ .Values.controller.type }}
name: {{ include "alloy.fullname" . }}
{{- with .Values.controller.autoscaling }}
minReplicas: {{ .minReplicas }}
maxReplicas: {{ .maxReplicas }}
behavior:
{{- with .scaleDown }}
scaleDown:
{{- if .policies }}
policies:
{{- range .policies }}
- type: {{ .type }}
value: {{ .value }}
periodSeconds: {{ .periodSeconds }}
{{- end }}
selectPolicy: {{ .selectPolicy }}
{{- end }}
stabilizationWindowSeconds: {{ .stabilizationWindowSeconds }}
{{- end }}
{{- with .scaleUp }}
scaleUp:
{{- if .policies }}
policies:
{{- range .policies }}
- type: {{ .type }}
value: {{ .value }}
periodSeconds: {{ .periodSeconds }}
{{- end }}
selectPolicy: {{ .selectPolicy }}
{{- end }}
stabilizationWindowSeconds: {{ .stabilizationWindowSeconds }}
{{- end }}
metrics:
# Changing the order of the metrics will cause ArgoCD to go into a sync loop
# memory needs to be first.
# More info in: https://github.com/argoproj/argo-cd/issues/1079
{{- with .targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ . }}
{{- end }}
{{- with .targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ . }}
{{- end }}
{{- end }}
{{- end }}

@ -0,0 +1,79 @@
{{- if .Values.ingress.enabled -}}
{{- $ingressApiIsStable := eq (include "alloy.ingress.isStable" .) "true" -}}
{{- $ingressSupportsIngressClassName := eq (include "alloy.ingress.supportsIngressClassName" .) "true" -}}
{{- $ingressSupportsPathType := eq (include "alloy.ingress.supportsPathType" .) "true" -}}
{{- $fullName := include "alloy.fullname" . -}}
{{- $servicePort := .Values.ingress.faroPort -}}
{{- $ingressPath := .Values.ingress.path -}}
{{- $ingressPathType := .Values.ingress.pathType -}}
{{- $extraPaths := .Values.ingress.extraPaths -}}
apiVersion: {{ include "alloy.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ include "alloy.namespace" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: networking
{{- with .Values.ingress.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.ingress.annotations }}
annotations:
{{- range $key, $value := . }}
{{ $key }}: {{ tpl $value $ | quote }}
{{- end }}
{{- end }}
spec:
{{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end -}}
{{- with .Values.ingress.tls }}
tls:
{{- tpl (toYaml .) $ | nindent 4 }}
{{- end }}
rules:
{{- if .Values.ingress.hosts }}
{{- range .Values.ingress.hosts }}
- host: {{ tpl . $ }}
http:
paths:
{{- with $extraPaths }}
{{- toYaml . | nindent 10 }}
{{- end }}
- path: {{ $ingressPath }}
{{- if $ingressSupportsPathType }}
pathType: {{ $ingressPathType }}
{{- end }}
backend:
{{- if $ingressApiIsStable }}
service:
name: {{ $fullName }}
port:
number: {{ $servicePort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $servicePort }}
{{- end }}
{{- end }}
{{- else }}
- http:
paths:
- backend:
{{- if $ingressApiIsStable }}
service:
name: {{ $fullName }}
port:
number: {{ $servicePort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $servicePort }}
{{- end }}
{{- with $ingressPath }}
path: {{ . }}
{{- end }}
{{- if $ingressSupportsPathType }}
pathType: {{ $ingressPathType }}
{{- end }}
{{- end -}}
{{- end }}

@ -0,0 +1,31 @@
{{- if .Values.controller.podDisruptionBudget.enabled }}
{{- if eq .Values.controller.type "daemonset" }}
{{- fail "PDBs (Pod Disruption Budgets) are not intended for DaemonSets. Please use a different controller type." }}
{{- end }}
{{- if and .Values.controller.podDisruptionBudget.minAvailable .Values.controller.podDisruptionBudget.maxUnavailable }}
{{- fail "Only one of minAvailable or maxUnavailable should be defined for PodDisruptionBudget" }}
{{- end }}
{{- if not (or .Values.controller.podDisruptionBudget.minAvailable .Values.controller.podDisruptionBudget.maxUnavailable) }}
{{- fail "Either minAvailable or maxUnavailable must be defined for PodDisruptionBudget" }}
{{- end }}
apiVersion: {{ include "alloy.controller.pdb.apiVersion" . }}
kind: PodDisruptionBudget
metadata:
name: {{ include "alloy.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "alloy.selectorLabels" . | nindent 6 }}
{{- if .Values.controller.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.controller.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.controller.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.controller.podDisruptionBudget.maxUnavailable }}
{{- end }}
{{- end }}

@ -0,0 +1,111 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "alloy.fullname" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: rbac
rules:
# Rules which allow discovery.kubernetes to function.
- apiGroups:
- ""
- "discovery.k8s.io"
- "networking.k8s.io"
resources:
- endpoints
- endpointslices
- ingresses
- nodes
- nodes/proxy
- nodes/metrics
- pods
- services
verbs:
- get
- list
- watch
# Rules which allow loki.source.kubernetes and loki.source.podlogs to work.
- apiGroups:
- ""
resources:
- pods
- pods/log
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- "monitoring.grafana.com"
resources:
- podlogs
verbs:
- get
- list
- watch
# Rules which allow mimir.rules.kubernetes to work.
- apiGroups: ["monitoring.coreos.com"]
resources:
- prometheusrules
verbs:
- get
- list
- watch
- nonResourceURLs:
- /metrics
verbs:
- get
# Rules for prometheus.kubernetes.*
- apiGroups: ["monitoring.coreos.com"]
resources:
- podmonitors
- servicemonitors
- probes
verbs:
- get
- list
- watch
# Rules which allow eventhandler to work.
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
# needed for remote.kubernetes.*
- apiGroups: [""]
resources:
- "configmaps"
- "secrets"
verbs:
- get
- list
- watch
# needed for otelcol.processor.k8sattributes
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "alloy.fullname" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: rbac
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "alloy.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "alloy.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

@ -0,0 +1,42 @@
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if .Values.service.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: {{ include "alloy.fullname" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: networking
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
selector:
{{- include "alloy.selectorLabels" . | nindent 4 }}
{{- if semverCompare ">=1.26-0" .Capabilities.KubeVersion.Version }}
internalTrafficPolicy: {{.Values.service.internalTrafficPolicy}}
{{- end }}
ports:
- name: http-metrics
{{- if eq .Values.service.type "NodePort" }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
port: {{ $values.listenPort }}
targetPort: {{ $values.listenPort }}
protocol: "TCP"
{{- range $portMap := $values.extraPorts }}
- name: {{ $portMap.name }}
port: {{ $portMap.port }}
targetPort: {{ $portMap.targetPort }}
protocol: {{ coalesce $portMap.protocol "TCP" }}
{{- if not (empty $portMap.appProtocol) }}
# Useful for OpenShift clusters that want to expose Alloy ports externally
appProtocol: {{ $portMap.appProtocol }}
{{- end }}
{{- end }}
{{- end }}

@ -0,0 +1,17 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "alloy.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: rbac
{{- with .Values.serviceAccount.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

@ -0,0 +1,36 @@
{{- $values := (mustMergeOverwrite .Values.alloy (or .Values.agent dict)) -}}
{{- if and .Values.service.enabled .Values.serviceMonitor.enabled -}}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "alloy.fullname" . }}
labels:
{{- include "alloy.labels" . | nindent 4 }}
app.kubernetes.io/component: metrics
{{- with .Values.serviceMonitor.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- port: http-metrics
scheme: {{ $values.listenScheme | lower }}
honorLabels: true
{{- if .Values.serviceMonitor.interval }}
interval: {{ .Values.serviceMonitor.interval }}
{{- end }}
{{- if .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{ tpl (toYaml .Values.serviceMonitor.metricRelabelings | nindent 6) . }}
{{- end }}
{{- if .Values.serviceMonitor.relabelings }}
relabelings:
{{ tpl (toYaml .Values.serviceMonitor.relabelings | nindent 6) . }}
{{- end }}
{{- with .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 6 }}
{{- end }}
selector:
matchLabels:
{{- include "alloy.selectorLabels" . | nindent 6 }}
{{- end }}

@ -0,0 +1,378 @@
# -- Overrides the chart's name. Used to change the infix in the resource names.
nameOverride: null
# -- Overrides the chart's computed fullname. Used to change the full prefix of
# resource names.
fullnameOverride: null
## Global properties for image pulling override the values defined under `image.registry` and `configReloader.image.registry`.
## If you want to override only one image registry, use the specific fields but if you want to override them all, use `global.image.registry`
global:
image:
# -- Global image registry to use if it needs to be overriden for some specific use cases (e.g local registries, custom images, ...)
registry: ""
# -- Optional set of global image pull secrets.
pullSecrets: []
# -- Security context to apply to the Grafana Alloy pod.
podSecurityContext: {}
crds:
# -- Whether to install CRDs for monitoring.
create: true
## Various Alloy settings. For backwards compatibility with the grafana-agent
## chart, this field may also be called "agent". Naming this field "agent" is
## deprecated and will be removed in a future release.
alloy:
configMap:
# -- Create a new ConfigMap for the config file.
create: true
# -- Content to assign to the new ConfigMap. This is passed into `tpl` allowing for templating from values.
content: ''
# -- Name of existing ConfigMap to use. Used when create is false.
name: null
# -- Key in ConfigMap to get config from.
key: null
clustering:
# -- Deploy Alloy in a cluster to allow for load distribution.
enabled: false
# -- Name for the Alloy cluster. Used for differentiating between clusters.
name: ""
# -- Name for the port used for clustering, useful if running inside an Istio Mesh
portName: http
# -- Minimum stability level of components and behavior to enable. Must be
# one of "experimental", "public-preview", or "generally-available".
stabilityLevel: "generally-available"
# -- Path to where Grafana Alloy stores data (for example, the Write-Ahead Log).
# By default, data is lost between reboots.
storagePath: /tmp/alloy
# -- Address to listen for traffic on. 0.0.0.0 exposes the UI to other
# containers.
listenAddr: 0.0.0.0
# -- Port to listen for traffic on.
listenPort: 12345
# -- Scheme is needed for readiness probes. If enabling tls in your configs, set to "HTTPS"
listenScheme: HTTP
# -- Base path where the UI is exposed.
uiPathPrefix: /
# -- Enables sending Grafana Labs anonymous usage stats to help improve Grafana
# Alloy.
enableReporting: true
# -- Extra environment variables to pass to the Alloy container.
extraEnv: []
# -- Maps all the keys on a ConfigMap or Secret as environment variables. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#envfromsource-v1-core
envFrom: []
# -- Extra args to pass to `alloy run`: https://grafana.com/docs/alloy/latest/reference/cli/run/
extraArgs: []
# -- Extra ports to expose on the Alloy container.
extraPorts: []
# - name: "faro"
# port: 12347
# targetPort: 12347
# protocol: "TCP"
# appProtocol: "h2c"
# -- Host aliases to add to the Alloy container.
hostAliases: []
# - ip: "20.21.22.23"
# hostnames:
# - "company.grafana.net"
mounts:
# -- Mount /var/log from the host into the container for log collection.
varlog: false
# -- Mount /var/lib/docker/containers from the host into the container for log
# collection.
dockercontainers: false
# -- Extra volume mounts to add into the Grafana Alloy container. Does not
# affect the watch container.
extra: []
# -- Security context to apply to the Grafana Alloy container.
securityContext: {}
# -- Resource requests and limits to apply to the Grafana Alloy container.
resources: {}
# -- Set lifecycle hooks for the Grafana Alloy container.
lifecycle: {}
# preStop:
# exec:
# command:
# - /bin/sleep
# - "10"
image:
# -- Grafana Alloy image registry (defaults to docker.io)
registry: "docker.io"
# -- Grafana Alloy image repository.
repository: grafana/alloy
# -- (string) Grafana Alloy image tag. When empty, the Chart's appVersion is
# used.
tag: null
# -- Grafana Alloy image's SHA256 digest (either in format "sha256:XYZ" or "XYZ"). When set, will override `image.tag`.
digest: null
# -- Grafana Alloy image pull policy.
pullPolicy: IfNotPresent
# -- Optional set of image pull secrets.
pullSecrets: []
rbac:
# -- Whether to create RBAC resources for Alloy.
create: true
serviceAccount:
# -- Whether to create a service account for the Grafana Alloy deployment.
create: true
# -- Additional labels to add to the created service account.
additionalLabels: {}
# -- Annotations to add to the created service account.
annotations: {}
# -- The name of the existing service account to use when
# serviceAccount.create is false.
name: null
# Options for the extra controller used for config reloading.
configReloader:
# -- Enables automatically reloading when the Alloy config changes.
enabled: true
image:
# -- Config reloader image registry (defaults to docker.io)
registry: "ghcr.io"
# -- Repository to get config reloader image from.
repository: jimmidyson/configmap-reload
# -- Tag of image to use for config reloading.
tag: v0.14.0
# -- SHA256 digest of image to use for config reloading (either in format "sha256:XYZ" or "XYZ"). When set, will override `configReloader.image.tag`
digest: ""
# -- Override the args passed to the container.
customArgs: []
# -- Resource requests and limits to apply to the config reloader container.
resources:
requests:
cpu: "1m"
memory: "5Mi"
# -- Security context to apply to the Grafana configReloader container.
securityContext: {}
controller:
# -- Type of controller to use for deploying Grafana Alloy in the cluster.
# Must be one of 'daemonset', 'deployment', or 'statefulset'.
type: 'daemonset'
# -- Number of pods to deploy. Ignored when controller.type is 'daemonset'.
replicas: 1
# -- Annotations to add to controller.
extraAnnotations: {}
# -- Whether to deploy pods in parallel. Only used when controller.type is
# 'statefulset'.
parallelRollout: true
# -- Configures Pods to use the host network. When set to true, the ports that will be used must be specified.
hostNetwork: false
# -- Configures Pods to use the host PID namespace.
hostPID: false
# -- Configures the DNS policy for the pod. https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
dnsPolicy: ClusterFirst
# -- Termination grace period in seconds for the Grafana Alloy pods.
# The default value used by Kubernetes if unspecifed is 30 seconds.
terminationGracePeriodSeconds: null
# -- Update strategy for updating deployed Pods.
updateStrategy: {}
# -- nodeSelector to apply to Grafana Alloy pods.
nodeSelector: {}
# -- Tolerations to apply to Grafana Alloy pods.
tolerations: []
# -- Topology Spread Constraints to apply to Grafana Alloy pods.
topologySpreadConstraints: []
# -- priorityClassName to apply to Grafana Alloy pods.
priorityClassName: ''
# -- Extra pod annotations to add.
podAnnotations: {}
# -- Extra pod labels to add.
podLabels: {}
# -- PodDisruptionBudget configuration.
podDisruptionBudget:
# -- Whether to create a PodDisruptionBudget for the controller.
enabled: false
# -- Minimum number of pods that must be available during a disruption.
# Note: Only one of minAvailable or maxUnavailable should be set.
minAvailable: null
# -- Maximum number of pods that can be unavailable during a disruption.
# Note: Only one of minAvailable or maxUnavailable should be set.
maxUnavailable: null
# -- Whether to enable automatic deletion of stale PVCs due to a scale down operation, when controller.type is 'statefulset'.
enableStatefulSetAutoDeletePVC: false
autoscaling:
# -- Creates a HorizontalPodAutoscaler for controller type deployment.
enabled: false
# -- The lower limit for the number of replicas to which the autoscaler can scale down.
minReplicas: 1
# -- The upper limit for the number of replicas to which the autoscaler can scale up.
maxReplicas: 5
# -- Average CPU utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetCPUUtilizationPercentage` to 0 will disable CPU scaling.
targetCPUUtilizationPercentage: 0
# -- Average Memory utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetMemoryUtilizationPercentage` to 0 will disable Memory scaling.
targetMemoryUtilizationPercentage: 80
scaleDown:
# -- List of policies to determine the scale-down behavior.
policies: []
# - type: Pods
# value: 4
# periodSeconds: 60
# -- Determines which of the provided scaling-down policies to apply if multiple are specified.
selectPolicy: Max
# -- The duration that the autoscaling mechanism should look back on to make decisions about scaling down.
stabilizationWindowSeconds: 300
scaleUp:
# -- List of policies to determine the scale-up behavior.
policies: []
# - type: Pods
# value: 4
# periodSeconds: 60
# -- Determines which of the provided scaling-up policies to apply if multiple are specified.
selectPolicy: Max
# -- The duration that the autoscaling mechanism should look back on to make decisions about scaling up.
stabilizationWindowSeconds: 0
# -- Affinity configuration for pods.
affinity: {}
volumes:
# -- Extra volumes to add to the Grafana Alloy pod.
extra: []
# -- volumeClaimTemplates to add when controller.type is 'statefulset'.
volumeClaimTemplates: []
## -- Additional init containers to run.
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
##
initContainers: []
# -- Additional containers to run alongside the Alloy container and initContainers.
extraContainers: []
service:
# -- Creates a Service for the controller's pods.
enabled: true
# -- Service type
type: ClusterIP
# -- NodePort port. Only takes effect when `service.type: NodePort`
nodePort: 31128
# -- Cluster IP, can be set to None, empty "" or an IP address
clusterIP: ''
# -- Value for internal traffic policy. 'Cluster' or 'Local'
internalTrafficPolicy: Cluster
annotations: {}
# cloud.google.com/load-balancer-type: Internal
serviceMonitor:
enabled: false
# -- Additional labels for the service monitor.
additionalLabels: {}
# -- Scrape interval. If not set, the Prometheus default scrape interval is used.
interval: ""
# -- MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
# ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# -- Customize tls parameters for the service monitor
tlsConfig: {}
# -- RelabelConfigs to apply to samples before scraping
# ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
ingress:
# -- Enables ingress for Alloy (Faro port)
enabled: false
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
faroPort: 12347
# pathType is only for k8s >= 1.1=
pathType: Prefix
hosts:
- chart-example.local
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
extraPaths: []
# - path: /*
# backend:
# serviceName: ssl-redirect
# servicePort: use-annotation
## Or for k8s > 1.19
# - path: /*
# pathType: Prefix
# backend:
# service:
# name: ssl-redirect
# port:
# name: use-annotation
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# -- Extra k8s manifests to deploy
extraObjects: []
# - apiVersion: v1
# kind: Secret
# metadata:
# name: grafana-cloud
# stringData:
# PROMETHEUS_HOST: 'https://prometheus-us-central1.grafana.net/api/prom/push'
# PROMETHEUS_USERNAME: '123456'

@ -0,0 +1,6 @@
docs
schema-mods
tests
Makefile
README.md
README.md.gotmpl

@ -0,0 +1,3 @@
dependencies: []
digest: sha256:643d5437104296e21d906ecb15b2c96ad278f20cfc4af53b12bb6069bd853726
generated: "2024-09-25T13:45:54.706765-05:00"

@ -0,0 +1,11 @@
apiVersion: v2
appVersion: 1.0.0
description: Gathers metrics automatically based on Kubernetes Pod and Service annotations
icon: https://raw.githubusercontent.com/grafana/grafana/main/public/img/grafana_icon.svg
maintainers:
- email: pete.wall@grafana.com
name: petewall
name: feature-annotation-autodiscovery
sources:
- https://github.com/grafana/k8s-monitoring-helm/tree/main/charts/k8s-monitoring/charts/feature-annotation-autodiscovery
version: 1.0.0

@ -0,0 +1,29 @@
{{/*
Create a default fully qualified name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "feature.annotationAutodiscovery.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" | lower }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride | lower }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" | lower }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" | lower }}
{{- end }}
{{- end }}
{{- end }}
{{- define "escape_annotation" -}}
{{ . | replace "-" "_" | replace "." "_" | replace "/" "_" }}
{{- end }}
{{- define "pod_annotation" -}}
{{ printf "__meta_kubernetes_pod_annotation_%s" (include "escape_annotation" .) }}
{{- end }}
{{- define "service_annotation" -}}
{{ printf "__meta_kubernetes_service_annotation_%s" (include "escape_annotation" .) }}
{{- end }}

@ -0,0 +1,266 @@
{{- define "feature.annotationAutodiscovery.module" }}
declare "annotation_autodiscovery" {
argument "metrics_destinations" {
comment = "Must be a list of metric destinations where collected metrics should be forwarded to"
}
discovery.kubernetes "pods" {
role = "pod"
{{- if .Values.namespaces }}
namespaces {
names = {{ .Values.namespaces | toJson }}
}
{{- end }}
}
discovery.relabel "annotation_autodiscovery_pods" {
targets = discovery.kubernetes.pods.targets
{{- if .Values.excludeNamespaces }}
rule {
source_labels = ["__meta_kubernetes_namespace"]
regex = "{{ join "|" .Values.excludeNamespaces }}"
action = "drop"
}
{{- end }}
rule {
source_labels = ["{{ include "pod_annotation" .Values.annotations.scrape }}"]
regex = "true"
action = "keep"
}
rule {
source_labels = ["{{ include "pod_annotation" .Values.annotations.job }}"]
action = "replace"
target_label = "job"
}
rule {
source_labels = ["{{ include "pod_annotation" .Values.annotations.instance }}"]
action = "replace"
target_label = "instance"
}
rule {
source_labels = ["{{ include "pod_annotation" .Values.annotations.metricsPath }}"]
action = "replace"
target_label = "__metrics_path__"
}
// Choose the pod port
// The discovery generates a target for each declared container port of the pod.
// If the metricsPortName annotation has value, keep only the target where the port name matches the one of the annotation.
rule {
source_labels = ["__meta_kubernetes_pod_container_port_name"]
target_label = "__tmp_port"
}
rule {
source_labels = ["{{ include "pod_annotation" .Values.annotations.metricsPortName }}"]
regex = "(.+)"
target_label = "__tmp_port"
}
rule {
source_labels = ["__meta_kubernetes_pod_container_port_name"]
action = "keepequal"
target_label = "__tmp_port"
}
rule {
action = "labeldrop"
regex = "__tmp_port"
}
// If the metrics port number annotation has a value, override the target address to use it, regardless whether it is
// one of the declared ports on that Pod.
rule {
source_labels = ["{{ include "pod_annotation" .Values.annotations.metricsPortNumber }}", "__meta_kubernetes_pod_ip"]
regex = "(\\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})"
replacement = "[$2]:$1" // IPv6
target_label = "__address__"
}
rule {
source_labels = ["{{ include "pod_annotation" .Values.annotations.metricsPortNumber }}", "__meta_kubernetes_pod_ip"]
regex = "(\\d+);((([0-9]+?)(\\.|$)){4})" // IPv4, takes priority over IPv6 when both exists
replacement = "$2:$1"
target_label = "__address__"
}
rule {
source_labels = ["{{ include "pod_annotation" .Values.annotations.metricsScheme }}"]
action = "replace"
target_label = "__scheme__"
}
rule {
source_labels = ["{{ include "pod_annotation" .Values.annotations.metricsScrapeInterval }}"]
action = "replace"
target_label = "__scrape_interval__"
}
{{- if .Values.extraDiscoveryRules }}
{{ .Values.extraDiscoveryRules | indent 4 }}
{{- end }}
}
discovery.kubernetes "services" {
role = "service"
{{- if .Values.namespaces }}
namespaces {
names = {{ .Values.namespaces | toJson }}
}
{{- end }}
}
discovery.relabel "annotation_autodiscovery_services" {
targets = discovery.kubernetes.services.targets
{{- if .Values.excludeNamespaces }}
rule {
source_labels = ["__meta_kubernetes_namespace"]
regex = "{{ join "|" .Values.excludeNamespaces }}"
action = "drop"
}
{{- end }}
rule {
source_labels = ["{{ include "service_annotation" .Values.annotations.scrape }}"]
regex = "true"
action = "keep"
}
rule {
source_labels = ["{{ include "service_annotation" .Values.annotations.job }}"]
action = "replace"
target_label = "job"
}
rule {
source_labels = ["{{ include "service_annotation" .Values.annotations.instance }}"]
action = "replace"
target_label = "instance"
}
rule {
source_labels = ["{{ include "service_annotation" .Values.annotations.metricsPath }}"]
action = "replace"
target_label = "__metrics_path__"
}
// Choose the service port
rule {
source_labels = ["__meta_kubernetes_service_port_name"]
target_label = "__tmp_port"
}
rule {
source_labels = ["{{ include "service_annotation" .Values.annotations.metricsPortName }}"]
regex = "(.+)"
target_label = "__tmp_port"
}
rule {
source_labels = ["__meta_kubernetes_service_port_name"]
action = "keepequal"
target_label = "__tmp_port"
}
rule {
source_labels = ["__meta_kubernetes_service_port_number"]
target_label = "__tmp_port"
}
rule {
source_labels = ["{{ include "service_annotation" .Values.annotations.metricsPortNumber }}"]
regex = "(.+)"
target_label = "__tmp_port"
}
rule {
source_labels = ["__meta_kubernetes_service_port_number"]
action = "keepequal"
target_label = "__tmp_port"
}
rule {
action = "labeldrop"
regex = "__tmp_port"
}
rule {
source_labels = ["{{ include "service_annotation" .Values.annotations.metricsScheme }}"]
action = "replace"
target_label = "__scheme__"
}
rule {
source_labels = ["{{ include "service_annotation" .Values.annotations.metricsScrapeInterval }}"]
action = "replace"
target_label = "__scrape_interval__"
}
{{- if .Values.extraDiscoveryRules }}
{{ .Values.extraDiscoveryRules | indent 4 }}
{{- end }}
}
discovery.relabel "annotation_autodiscovery_http" {
targets = concat(discovery.relabel.annotation_autodiscovery_pods.output, discovery.relabel.annotation_autodiscovery_services.output)
rule {
source_labels = ["__scheme__"]
regex = "https"
action = "drop"
}
}
discovery.relabel "annotation_autodiscovery_https" {
targets = concat(discovery.relabel.annotation_autodiscovery_pods.output, discovery.relabel.annotation_autodiscovery_services.output)
rule {
source_labels = ["__scheme__"]
regex = "https"
action = "keep"
}
}
prometheus.scrape "annotation_autodiscovery_http" {
targets = discovery.relabel.annotation_autodiscovery_http.output
scrape_interval = {{ .Values.scrapeInterval | default .Values.global.scrapeInterval | quote }}
honor_labels = true
{{- if .Values.bearerToken.enabled }}
bearer_token_file = {{ .Values.bearerToken.token | quote }}
{{- end }}
clustering {
enabled = true
}
{{ if or .Values.metricsTuning.includeMetrics .Values.metricsTuning.excludeMetrics .Values.extraMetricProcessingRules }}
forward_to = [prometheus.relabel.annotation_autodiscovery.receiver]
{{- else }}
forward_to = argument.metrics_destinations.value
{{- end }}
}
prometheus.scrape "annotation_autodiscovery_https" {
targets = discovery.relabel.annotation_autodiscovery_https.output
scrape_interval = {{ .Values.scrapeInterval | default .Values.global.scrapeInterval | quote }}
honor_labels = true
{{- if .Values.bearerToken.enabled }}
bearer_token_file = {{ .Values.bearerToken.token | quote }}
{{- end }}
tls_config {
insecure_skip_verify = true
}
clustering {
enabled = true
}
{{ if or .Values.metricsTuning.includeMetrics .Values.metricsTuning.excludeMetrics .Values.extraMetricProcessingRules }}
forward_to = [prometheus.relabel.annotation_autodiscovery.receiver]
}
prometheus.relabel "annotation_autodiscovery" {
max_cache_size = {{ .Values.maxCacheSize | default .Values.global.maxCacheSize | int }}
{{- if .Values.metricsTuning.includeMetrics }}
rule {
source_labels = ["__name__"]
regex = "up|scrape_samples_scraped|{{ join "|" .Values.metricsTuning.includeMetrics }}"
action = "keep"
}
{{- end }}
{{- if .Values.metricsTuning.excludeMetrics }}
rule {
source_labels = ["__name__"]
regex = {{ join "|" .Values.metricsTuning.excludeMetrics | quote }}
action = "drop"
}
{{- end }}
{{- if .Values.extraMetricProcessingRules }}
{{ .Values.extraMetricProcessingRules | indent 4 }}
{{- end }}
{{- end }}
forward_to = argument.metrics_destinations.value
}
}
{{- end -}}
{{- define "feature.annotationAutodiscovery.alloyModules" }}{{- end }}

@ -0,0 +1,11 @@
{{- define "feature.annotationAutodiscovery.notes.deployments" }}{{- end }}
{{- define "feature.annotationAutodiscovery.notes.task" }}
Scrape metrics from pods and services with the "{{.Values.annotations.scrape}}: true" annotation
{{- end }}
{{- define "feature.annotationAutodiscovery.notes.actions" }}{{- end }}
{{- define "feature.annotationAutodiscovery.summary" -}}
version: {{ .Chart.Version }}
{{- end }}

@ -0,0 +1,11 @@
{{- if .Values.deployAsConfigMap }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "feature.annotationAutodiscovery.fullname" . }}
namespace: {{ .Release.Namespace }}
data:
module.alloy: |-
{{- include "feature.annotationAutodiscovery.module" . | indent 4 }}
{{- end }}

@ -0,0 +1,95 @@
{
"$schema": "http://json-schema.org/schema#",
"type": "object",
"properties": {
"annotations": {
"type": "object",
"properties": {
"instance": {
"type": "string"
},
"job": {
"type": "string"
},
"metricsPath": {
"type": "string"
},
"metricsPortName": {
"type": "string"
},
"metricsPortNumber": {
"type": "string"
},
"metricsScheme": {
"type": "string"
},
"metricsScrapeInterval": {
"type": "string"
},
"scrape": {
"type": "string"
}
}
},
"bearerToken": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
},
"token": {
"type": "string"
}
}
},
"deployAsConfigMap": {
"type": "boolean"
},
"excludeNamespaces": {
"type": "array"
},
"extraDiscoveryRules": {
"type": "string"
},
"extraMetricProcessingRules": {
"type": "string"
},
"fullnameOverride": {
"type": "string"
},
"global": {
"type": "object",
"properties": {
"maxCacheSize": {
"type": "integer"
},
"scrapeInterval": {
"type": "string"
}
}
},
"maxCacheSize": {
"type": "null"
},
"metricsTuning": {
"type": "object",
"properties": {
"excludeMetrics": {
"type": "array"
},
"includeMetrics": {
"type": "array"
}
}
},
"nameOverride": {
"type": "string"
},
"namespaces": {
"type": "array"
},
"scrapeInterval": {
"type": "string"
}
}
}

@ -0,0 +1,102 @@
---
# -- Name override
# @section -- General settings
nameOverride: ""
# -- Full name override
# @section -- General settings
fullnameOverride: ""
global:
# -- How frequently to scrape metrics.
# @section -- Global Settings
scrapeInterval: 60s
# -- Sets the max_cache_size for every prometheus.relabel component. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments))
# This should be at least 2x-5x your largest scrape target or samples appended rate.
# @section -- Global Settings
maxCacheSize: 100000
# Annotations that are used to discover and configure metric scraping targets. Add these annotations
# to your services or pods to control how autodiscovery will find and scrape metrics from your service or pod.
annotations:
# -- Annotation for enabling scraping for this service or pod. Value should be either "true" or "false"
# @section -- Annotations
scrape: "k8s.grafana.com/scrape"
# -- Annotation for overriding the job label
# @section -- Annotations
job: "k8s.grafana.com/job"
# -- Annotation for overriding the instance label
# @section -- Annotations
instance: "k8s.grafana.com/instance"
# -- Annotation for setting or overriding the metrics path. If not set, it defaults to /metrics
# @section -- Annotations
metricsPath: "k8s.grafana.com/metrics.path"
# -- Annotation for setting the metrics port by name.
# @section -- Annotations
metricsPortName: "k8s.grafana.com/metrics.portName"
# -- Annotation for setting the metrics port by number.
# @section -- Annotations
metricsPortNumber: "k8s.grafana.com/metrics.portNumber"
# -- Annotation for setting the metrics scheme, default: http.
# @section -- Annotations
metricsScheme: "k8s.grafana.com/metrics.scheme"
# -- Annotation for overriding the scrape interval for this service or pod. Value should be a duration like "15s, 1m".
# Overrides metrics.autoDiscover.scrapeInterval
# @section -- Annotations
metricsScrapeInterval: "k8s.grafana.com/metrics.scrapeInterval"
# -- The list of namespaces to include in autodiscovery. If empty, all namespaces are included.
# @section -- Discovery Settings
namespaces: []
# -- The list of namespaces to exclude from autodiscovery.
# @section -- Discovery Settings
excludeNamespaces: []
# -- Rule blocks to be added to the prometheus.operator.podmonitors component for PodMonitors.
# These relabeling rules are applied pre-scrape against the targets from service discovery.
# The relabelings defined in the PodMonitor object are applied first, then these relabelings are applied.
# Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped.
# ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block))
# @section -- Discovery Settings
extraDiscoveryRules: ""
# -- How frequently to scrape metrics from PodMonitor objects. Only used if the PodMonitor does not specify the scrape interval.
# Overrides global.scrapeInterval
# @default -- 60s
# @section -- Scrape Settings
scrapeInterval: ""
# Adjustments to the scraped metrics to filter the amount of metrics sent to storage.
# @section -- Metric Processing Settings
metricsTuning:
# -- Metrics to keep. Can use regular expressions.
# @section -- Metric Processing Settings
includeMetrics: []
# -- Metrics to drop. Can use regular expressions.
# @section -- Metric Processing Settings
excludeMetrics: []
# -- Rule blocks to be added to the prometheus.relabel component for PodMonitor objects.
# These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present.
# ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#rule-block))
# @section -- Metric Processing Settings
extraMetricProcessingRules: ""
# -- Sets the max_cache_size for cadvisor prometheus.relabel component.
# This should be at least 2x-5x your largest scrape target or samples appended rate.
# ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.relabel/#arguments))
# Overrides global.maxCacheSize
# @raw
# @section -- Metric Processing Settings
maxCacheSize:
# -- Sets bearer_token_file line in the prometheus.scrape annotation_autodiscovery.
# @section -- Scrape Settings
bearerToken:
enabled: true
token: /var/run/secrets/kubernetes.io/serviceaccount/token
# @ignore
deployAsConfigMap: false

@ -0,0 +1,6 @@
docs
schema-mods
tests
Makefile
README.md
README.md.gotmpl

@ -0,0 +1,3 @@
dependencies: []
digest: sha256:643d5437104296e21d906ecb15b2c96ad278f20cfc4af53b12bb6069bd853726
generated: "2024-09-25T13:46:10.334192-05:00"

@ -0,0 +1,11 @@
apiVersion: v2
appVersion: 1.0.0
description: Gathers application data
icon: https://raw.githubusercontent.com/grafana/grafana/main/public/img/grafana_icon.svg
maintainers:
- email: pete.wall@grafana.com
name: petewall
name: feature-application-observability
sources:
- https://github.com/grafana/k8s-monitoring-helm/tree/main/charts/k8s-monitoring/charts/feature-application-observability
version: 1.0.0

@ -0,0 +1,14 @@
{{/* Inputs: Values (values) metricsOutput, name */}}
{{/* https://grafana.com/docs/alloy/latest/reference/components/otelcol/otelcol.connector.host_info/ */}}
{{- define "feature.applicationObservability.connector.host_info.alloy.target" }}otelcol.connector.host_info.{{ .name | default "default" }}.input{{- end }}
{{- define "feature.applicationObservability.connector.host_info.alloy" }}
otelcol.connector.host_info "{{ .name | default "default" }}" {
host_identifiers = [ "k8s.node.name" ]
output {
{{- if and .metrics .Values.metrics.enabled }}
metrics = {{ .metrics }}
{{- end }}
}
}
{{- end }}

@ -0,0 +1,31 @@
{{/* Inputs: Values (values) metricsOutput, name */}}
{{/* https://grafana.com/docs/alloy/latest/reference/components/otelcol/otelcol.connector.spanlogs/ */}}
{{- define "feature.applicationObservability.connector.spanlogs.alloy.target" }}otelcol.connector.spanlogs.{{ .name | default "default" }}.input{{- end }}
{{- define "feature.applicationObservability.connector.spanlogs.alloy" }}
otelcol.connector.spanlogs "{{ .name | default "default" }}" {
{{- if .Values.connectors.spanLogs.spans }}
spans = true
{{- end }}
{{- if .Values.connectors.spanLogs.spansAttributes }}
spans_attributes = {{ .Values.connectors.spanLogs.spansAttributes | toJson }}
{{- end }}
{{- if .Values.connectors.spanLogs.roots }}
roots = true
{{- end }}
{{- if .Values.connectors.spanLogs.process }}
process = true
{{- end }}
{{- if .Values.connectors.spanLogs.processAttributes }}
process_attributes = {{ .Values.connectors.spanLogs.processAttributes | toJson }}
{{- end }}
{{- if .Values.connectors.spanLogs.labels }}
labels = {{ .Values.connectors.spanLogs.labels | toJson }}
{{- end }}
output {
{{- if and .logs .Values.logs.enabled }}
logs = {{ .logs }}
{{- end }}
}
}
{{- end }}

@ -0,0 +1,51 @@
{{/* Inputs: Values (values) metricsOutput, name */}}
{{/* https://grafana.com/docs/alloy/latest/reference/components/otelcol/otelcol.connector.spanmetrics/ */}}
{{- define "feature.applicationObservability.connector.spanmetrics.alloy.target" }}otelcol.connector.spanmetrics.{{ .name | default "default" }}.input{{- end }}
{{- define "feature.applicationObservability.connector.spanmetrics.alloy" }}
otelcol.connector.spanmetrics "{{ .name | default "default" }}" {
{{- range $dimension := .Values.connectors.spanMetrics.dimensions }}
dimension {
name = {{ $dimension.name | quote }}
{{- if $dimension.default }}
default = {{ $dimension.default | quote }}
{{- end }}
}
{{- end }}
dimensions_cache_size = {{ .Values.connectors.spanMetrics.dimensionsCacheSize }}
namespace = {{ .Values.connectors.spanMetrics.namespace | quote }}
{{- if .Values.connectors.spanMetrics.events.enabled }}
events {
enabled = true
}
{{- end }}
{{ if .Values.connectors.spanMetrics.exemplars.enabled }}
exemplars {
enabled = true
{{- if .Values.connectors.spanMetrics.exemplars.maxPerDataPoint }}
max_per_data_point = {{ .Values.connectors.spanMetrics.exemplars.maxPerDataPoint }}
{{- end }}
}
{{- end }}
{{- if .Values.connectors.spanMetrics.histogram.enabled }}
histogram {
disable = false
unit = {{ .Values.connectors.spanMetrics.histogram.unit | quote }}
{{- if eq .Values.connectors.spanMetrics.histogram.type "explicit" }}
explicit {
buckets = {{ .Values.connectors.spanMetrics.histogram.explicit.buckets | toJson }}
}
{{- else if eq .Values.connectors.spanMetrics.histogram.type "exponential" }}
exponential {
max_size = {{ .Values.connectors.spanMetrics.histogram.exponential.maxSize }}
}
{{- end }}
}
{{- end }}
output {
{{- if and .metrics .Values.metrics.enabled }}
metrics = {{ .metrics }}
{{- end }}
}
}
{{- end }}

@ -0,0 +1,30 @@
{{/*
Create a default fully qualified name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "feature.applicationObservability.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" | lower }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride | lower }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" | lower }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" | lower }}
{{- end }}
{{- end }}
{{- end }}
{{- define "english_list" }}
{{- if eq (len .) 0 }}
{{- else if eq (len .) 1 }}
{{- index . 0 }}
{{- else if eq (len .) 2 }}
{{- index . 0 }} and {{ index . 1 }}
{{- else }}
{{- $last := index . (sub (len .) 1) }}
{{- $rest := slice . 0 (sub (len .) 1) }}
{{- join ", " $rest }}, and {{ $last }}
{{- end }}
{{- end }}

@ -0,0 +1,36 @@
{{- define "feature.applicationObservability.module" }}
declare "application_observability" {
argument "metrics_destinations" {
comment = "Must be a list of metrics destinations where collected metrics should be forwarded to"
}
argument "logs_destinations" {
comment = "Must be a list of log destinations where collected logs should be forwarded to"
}
argument "traces_destinations" {
comment = "Must be a list of trace destinations where collected trace should be forwarded to"
}
{{- $pipeline := include "feature.applicationObservability.pipeline" . | fromYamlArray }}
{{- range $component := $pipeline }}
{{- $args := (dict "Values" $.Values "name" $component.name) }}
{{- range $dataType := (list "metrics" "logs" "traces")}}
{{- if kindIs "string" (index $component.targets $dataType) }}
{{- $args = merge $args (dict $dataType (index $component.targets $dataType)) }}
{{- else if kindIs "slice" (index $component.targets $dataType) }}
{{- $targets := list }}
{{- range $target := (index $component.targets $dataType) }}
{{- $targets = append $targets (include (printf "feature.applicationObservability.%s.alloy.target" $target.component) $target) }}
{{- end }}
{{- $args = merge $args (dict $dataType (printf "[%s]" (join ", " $targets))) }}
{{- end }}
{{- end }}
// {{ $component.description | trim }}
{{- include (printf "feature.applicationObservability.%s.alloy" $component.component) $args | indent 2 }}
{{- end }}
}
{{- end }}
{{- define "feature.applicationObservability.alloyModules" }}{{- end }}

@ -0,0 +1,52 @@
{{- define "feature.applicationObservability.notes.deployments" }}{{- end }}
{{- define "feature.applicationObservability.notes.task" }}
{{- $receivers := list }}
{{- if .Values.receivers.otlp.grpc.enabled }}{{- $receivers = append $receivers "OTLP gRPC" }}{{ end }}
{{- if .Values.receivers.otlp.http.enabled }}{{- $receivers = append $receivers "OTLP HTTP" }}{{ end }}
{{- if .Values.receivers.jaeger.grpc.enabled }}{{- $receivers = append $receivers "Jaeger gRPC" }}{{ end }}
{{- if .Values.receivers.jaeger.thriftBinary.enabled }}{{- $receivers = append $receivers "Jaeger Thrift Binary" }}{{ end }}
{{- if .Values.receivers.jaeger.thriftCompact.enabled }}{{- $receivers = append $receivers "Jaeger Thrift Compact" }}{{ end }}
{{- if .Values.receivers.jaeger.thriftHttp.enabled }}{{- $receivers = append $receivers "Jaeger Thrift HTTP" }}{{ end }}
{{- if .Values.receivers.zipkin.enabled }}{{- $receivers = append $receivers "Zipkin" }}{{ end }}
{{- $receiverWord := len $receivers | plural "receiver" "receivers" }}
Gather application data via {{ include "english_list" $receivers }} {{ $receiverWord }}
{{- end }}
{{- define "feature.applicationObservability.notes.actions" }}
Configure your applications to send telemetry data to:
{{- if .Values.receivers.otlp.grpc.enabled }}
* http://{{ .Collector.ServiceName }}.{{ .Collector.Namespace }}.svc.cluster.local:{{ .Values.receivers.otlp.grpc.port }} (OTLP gRPC)
{{- end }}
{{- if .Values.receivers.otlp.http.enabled }}
* http://{{ .Collector.ServiceName }}.{{ .Collector.Namespace }}.svc.cluster.local:{{ .Values.receivers.otlp.http.port }} (OTLP HTTP)
{{- end }}
{{- if .Values.receivers.jaeger.grpc.enabled }}
* http://{{ .Collector.ServiceName }}.{{ .Collector.Namespace }}.svc.cluster.local:{{ .Values.receivers.jaeger.grpc.port }} (Jaeger gRPC)
{{- end }}
{{- if .Values.receivers.jaeger.thriftBinary.enabled }}
* http://{{ .Collector.ServiceName }}.{{ .Collector.Namespace }}.svc.cluster.local:{{ .Values.receivers.jaeger.thriftBinary.port }} (Jaeger Thrift Binary)
{{- end }}
{{- if .Values.receivers.jaeger.thriftCompact.enabled }}
* http://{{ .Collector.ServiceName }}.{{ .Collector.Namespace }}.svc.cluster.local:{{ .Values.receivers.jaeger.thriftCompact.port }} (Jaeger Thrift Compact)
{{- end }}
{{- if .Values.receivers.jaeger.thriftHttp.enabled }}
* http://{{ .Collector.ServiceName }}.{{ .Collector.Namespace }}.svc.cluster.local:{{ .Values.receivers.jaeger.thriftHttp.port }} (Jaeger Thrift HTTP)
{{- end }}
{{- if .Values.receivers.zipkin.enabled }}
* http://{{ .Collector.ServiceName }}.{{ .Collector.Namespace }}.svc.cluster.local:{{ .Values.receivers.zipkin.port }} (Zipkin)
{{- end }}
{{- end }}
{{- define "feature.applicationObservability.summary" -}}
{{- $receivers := list }}
{{- if .Values.receivers.otlp.grpc.enabled }}{{- $receivers = append $receivers "otlpgrpc" }}{{ end }}
{{- if .Values.receivers.otlp.http.enabled }}{{- $receivers = append $receivers "otlphttp" }}{{ end }}
{{- if .Values.receivers.jaeger.grpc.enabled }}{{- $receivers = append $receivers "jaegergrpc" }}{{ end }}
{{- if .Values.receivers.jaeger.thriftBinary.enabled }}{{- $receivers = append $receivers "jaegerthriftbinary" }}{{ end }}
{{- if .Values.receivers.jaeger.thriftCompact.enabled }}{{- $receivers = append $receivers "jaegerthriftcompact" }}{{ end }}
{{- if .Values.receivers.jaeger.thriftHttp.enabled }}{{- $receivers = append $receivers "jaegerthrifthttp" }}{{ end }}
{{- if .Values.receivers.zipkin.enabled }}{{- $receivers = append $receivers "zipkin" }}{{ end }}
version: {{ .Chart.Version }}
protocols: {{ $receivers | join "," }}
{{- end }}

@ -0,0 +1,161 @@
{{- define "feature.applicationObservability.pipeline" }}
# Format:
# - name: Alloy component name
# description: Human friendly description of the component
# component: Component slug (used for including "feature.applicationObservability.%s.alloy")
# targets:
# <type>: <list>
# - name: Name of the target
# component: Component slug (used for including "feature.applicationObservability.%s.alloy.target")
# <type>: <string> Raw target string (useful for terminating with argument.<type>_destinations.value)
# <type>: <null> No target defined for this type
{{- if or .Values.receivers.otlp.grpc.enabled .Values.receivers.otlp.http.enabled }}
- name: default
description: OTLP Receiver
component: receiver.otlp
targets:
{{- if .Values.processors.memoryLimiter.enabled }}
metrics: [{name: default, component: processor.memory_limiter}]
logs: [{name: default, component: processor.memory_limiter}]
traces: [{name: default, component: processor.memory_limiter}]
{{- else }}
metrics: [{name: default, component: processor.resourcedetection}]
logs: [{name: default, component: processor.resourcedetection}]
traces: [{name: default, component: processor.resourcedetection}]
{{- end }}
{{- end }}
{{- if or .Values.receivers.jaeger.grpc.enabled .Values.receivers.jaeger.thriftBinary.enabled .Values.receivers.jaeger.thriftCompact.enabled .Values.receivers.jaeger.thriftHttp.enabled }}
- name: default
description: Jaeger Receiver
component: receiver.jaeger
targets:
{{- if .Values.processors.memoryLimiter.enabled }}
traces: [{name: default, component: processor.memory_limiter}]
{{- else }}
traces: [{name: default, component: processor.resourcedetection}]
{{- end }}
{{- end }}
{{- if .Values.receivers.zipkin.enabled }}
- name: default
description: Zipkin Receiver
component: receiver.zipkin
targets:
{{- if .Values.processors.memoryLimiter.enabled }}
traces: [{name: default, component: processor.memory_limiter}]
{{- else }}
traces: [{name: default, component: processor.resourcedetection}]
{{- end }}
{{- end }}
{{- if .Values.processors.memoryLimiter.enabled }}
- name: default
description: Memory Limiter
component: processor.memory_limiter
targets:
metrics: [{name: default, component: processor.resourcedetection}]
logs: [{name: default, component: processor.resourcedetection}]
traces: [{name: default, component: processor.resourcedetection}]
{{- end }}
- name: default
description: Resource Detection Processor
component: processor.resourcedetection
targets:
metrics: [{name: default, component: processor.k8sattributes}]
logs: [{name: default, component: processor.k8sattributes}]
traces: [{name: default, component: processor.k8sattributes}]
- name: default
description: K8s Attributes Processor
component: processor.k8sattributes
targets:
metrics: [{name: default, component: processor.transform}]
logs: [{name: default, component: processor.transform}]
{{- if (index .Values.processors "grafanaCloudMetrics").enabled | default .Values.connectors.grafanaCloudMetrics.enabled }}
traces: [{name: default, component: processor.transform}, {name: default, component: connector.host_info}]
- name: default
description: Host Info Connector
component: connector.host_info
targets:
metrics: [{name: default, component: processor.batch}]
{{- else }}
traces: [{name: default, component: processor.transform}]
{{- end }}
{{- $filterEnabled := eq (include "feature.applicationObservability.processor.filter.enabled" .) "true" }}
- name: default
description: Transform Processor
component: processor.transform
targets:
traces: [{name: default, component: processor.batch}]
traces:
{{- if .Values.connectors.spanLogs.enabled}}
- {name: default, component: connector.spanlogs}
{{- end }}
{{- if .Values.connectors.spanMetrics.enabled}}
- {name: default, component: connector.spanmetrics}
{{- end }}
{{- if $filterEnabled }}
- {name: default, component: processor.filter}
metrics: [{name: default, component: processor.filter}]
logs: [{name: default, component: processor.filter}]
{{- else }}
- {name: default, component: processor.batch}
metrics: [{name: default, component: processor.batch}]
logs: [{name: default, component: processor.batch}]
{{- end }}
{{- if .Values.connectors.spanLogs.enabled}}
- name: default
description: Span Logs Connector
component: connector.spanlogs
targets:
{{- if $filterEnabled }}
logs: [{name: default, component: processor.filter}]
{{- else }}
logs: [{name: default, component: processor.batch}]
{{- end }}
{{- end }}
{{- if .Values.connectors.spanMetrics.enabled}}
- name: default
description: Span Metrics Connector
component: connector.spanmetrics
targets:
{{- if $filterEnabled }}
metrics: [{name: default, component: processor.filter}]
{{- else }}
metrics: [{name: default, component: processor.batch}]
{{- end }}
{{- end }}
{{- if $filterEnabled }}
- name: default
description: Filter Processor
component: processor.filter
targets:
metrics: [{name: default, component: processor.batch}]
logs: [{name: default, component: processor.batch}]
traces: [{name: default, component: processor.batch}]
{{- end }}
- name: default
description: Batch Processor
component: processor.batch
targets:
{{- if .Values.processors.interval.enabled }}
metrics: [{name: default, component: processor.interval}]
logs: [{name: default, component: processor.interval}]
traces: [{name: default, component: processor.interval}]
- name: default
description: Interval Processor
component: processor.interval
targets:
{{- end }}
metrics: argument.metrics_destinations.value
logs: argument.logs_destinations.value
traces: argument.traces_destinations.value
{{- end }}

@ -0,0 +1,21 @@
{{/* Inputs: Values (values) metricsOutput, logsOutput, tracesOutput, name */}}
{{- define "feature.applicationObservability.processor.batch.alloy.target" }}otelcol.processor.batch.{{ .name | default "default" }}.input{{ end }}
{{- define "feature.applicationObservability.processor.batch.alloy" }}
otelcol.processor.batch {{ .name | default "default" | quote }} {
send_batch_size = {{ .Values.processors.batch.size }}
send_batch_max_size = {{ .Values.processors.batch.maxSize }}
timeout = {{ .Values.processors.batch.timeout | quote}}
output {
{{- if and .metrics .Values.metrics.enabled }}
metrics = {{ .metrics }}
{{- end }}
{{- if and .logs .Values.logs.enabled }}
logs = {{ .logs }}
{{- end }}
{{- if and .traces .Values.traces.enabled }}
traces = {{ .traces }}
{{- end }}
}
}
{{- end }}

@ -0,0 +1,74 @@
{{/* Inputs: Values (values) metricsOutput, logsOutput, tracesOutput, name */}}
{{/* https://grafana.com/docs/alloy/latest/reference/components/otelcol/otelcol.processor.filter/ */}}
{{- define "feature.applicationObservability.processor.filter.enabled" }}
{{- if and .Values.metrics.enabled (or .Values.metrics.filters.metric .Values.metrics.filters.datapoint) -}}
true
{{- else if and .Values.logs.enabled .Values.logs.filters.log_record -}}
true
{{- else if and .Values.traces.enabled (or .Values.traces.filters.span .Values.traces.filters.spanevent) -}}
true
{{- else -}}
false
{{- end }}
{{- end }}
{{- define "feature.applicationObservability.processor.filter.alloy.target" }}otelcol.processor.filter.{{ .name | default "default" }}.input{{ end }}
{{- define "feature.applicationObservability.processor.filter.alloy" }}
otelcol.processor.filter "{{ .name | default "default" }}" {
{{- if and .Values.metrics.enabled (or .Values.metrics.filters.metric .Values.metrics.filters.datapoint) }}
metrics {
{{- if .Values.metrics.filters.metric }}
metric = [
{{- range $filter := .Values.metrics.filters.metric }}
{{ $filter | quote | indent 6 }},
{{- end }}
]
{{- end }}
{{- if .Values.metrics.filters.datapoint }}
datapoint = [
{{- range $filter := .Values.metrics.filters.datapoint }}
{{ $filter | quote | indent 6 }},
{{- end }}
]
{{- end }}
}
{{- end }}
{{- if and .Values.logs.enabled .Values.logs.filters.log_record }}
logs {
log_record = [
{{- range $filter := .Values.logs.filters.log_record }}
{{ $filter | quote | indent 6 }},
{{- end }}
]
}
{{- end }}
{{- if and .Values.traces.enabled (or .Values.traces.filters.span .Values.traces.filters.spanevent) }}
traces {
{{- if .Values.traces.filters.span }}
span = [
{{- range $filter := .Values.traces.filters.span }}
{{ $filter | quote | indent 6 }},
{{- end }}
]
{{- end }}
{{- if .Values.traces.filters.spanevent }}
spanevent = [
{{- range $filter := .Values.traces.filters.spanevent }}
{{ $filter | quote | indent 6 }},
{{- end }}
]
{{- end }}
}
{{- end }}
output {
{{- if and .metrics .Values.metrics.enabled }}
metrics = {{ .metrics }}
{{- end }}
{{- if and .logs .Values.logs.enabled }}
logs = {{ .logs }}
{{- end }}
{{- if and .traces .Values.traces.enabled }}
traces = {{ .traces }}
{{- end }}
}
}
{{- end }}

@ -0,0 +1,23 @@
{{/* Inputs: Values (values) metricsOutput, logsOutput, tracesOutput, name */}}
{{- define "feature.applicationObservability.processor.interval.alloy.target" }}otelcol.processor.interval.{{ .name | default "default" }}.input{{ end }}
{{- define "feature.applicationObservability.processor.interval.alloy" }}
otelcol.processor.interval {{ .name | default "default" | quote }} {
interval = {{ .Values.processors.interval.interval | quote }}
passthrough {
gauge = {{ .Values.processors.interval.passthrough.gauge }}
summary = {{ .Values.processors.interval.passthrough.summary }}
}
output {
{{- if and .metrics .Values.metrics.enabled }}
metrics = {{ .metrics }}
{{- end }}
{{- if and .logs .Values.logs.enabled }}
logs = {{ .logs }}
{{- end }}
{{- if and .traces .Values.traces.enabled }}
traces = {{ .traces }}
{{- end }}
}
}
{{- end }}

@ -0,0 +1,55 @@
{{/* Inputs: Values (values) metricsOutput, logsOutput, tracesOutput, name */}}
{{/* https://grafana.com/docs/alloy/latest/reference/components/otelcol/otelcol.processor.k8sattributes/ */}}
{{- define "feature.applicationObservability.processor.k8sattributes.alloy.target" }}otelcol.processor.k8sattributes.{{ .name | default "default" }}.input{{ end }}
{{- define "feature.applicationObservability.processor.k8sattributes.alloy" }}
otelcol.processor.k8sattributes "{{ .name | default "default" }}" {
extract {
{{- if .Values.processors.k8sattributes.metadata }}
metadata = {{ .Values.processors.k8sattributes.metadata | toJson }}
{{- end }}
{{- range .Values.processors.k8sattributes.labels }}
label {
{{- range $k, $v := . }}
{{ $k }} = {{ $v | quote }}
{{- end }}
}
{{- end }}
{{- range .Values.processors.k8sattributes.annotations }}
annotation {
{{- range $k, $v := . }}
{{ $k }} = {{ $v | quote }}
{{- end }}
}
{{- end }}
}
pod_association {
source {
from = "resource_attribute"
name = "k8s.pod.ip"
}
}
pod_association {
source {
from = "resource_attribute"
name = "k8s.pod.uid"
}
}
pod_association {
source {
from = "connection"
}
}
output {
{{- if and .metrics .Values.metrics.enabled }}
metrics = {{ .metrics }}
{{- end }}
{{- if and .logs .Values.logs.enabled }}
logs = {{ .logs }}
{{- end }}
{{- if and .traces .Values.traces.enabled }}
traces = {{ .traces }}
{{- end }}
}
}
{{- end }}

@ -0,0 +1,20 @@
{{/* Inputs: Values (values) metricsOutput, logsOutput, tracesOutput, name */}}
{{- define "feature.applicationObservability.processor.memory_limiter.alloy.target" }}otelcol.processor.memory_limiter.{{ .name | default "default" }}.input{{ end }}
{{- define "feature.applicationObservability.processor.memory_limiter.alloy" }}
otelcol.processor.memory_limiter "{{ .name | default "default" }}" {
check_interval = {{ .Values.processors.memoryLimiter.checkInterval | quote }}
limit = {{ .Values.processors.memoryLimiter.limit | quote }}
output {
{{- if and .metrics .Values.metrics.enabled }}
metrics = {{ .metrics }}
{{- end }}
{{- if and .logs .Values.logs.enabled }}
logs = {{ .logs }}
{{- end }}
{{- if and .traces .Values.traces.enabled }}
traces = {{ .traces }}
{{- end }}
}
}
{{- end }}

@ -0,0 +1,86 @@
{{/* Inputs: Values (values) metricsOutput, logsOutput, tracesOutput, name */}}
{{/* https://grafana.com/docs/alloy/latest/reference/components/otelcol/otelcol.processor.resourcedetection/ */}}
{{- define "feature.applicationObservability.processor.resourcedetection.alloy.target" }}otelcol.processor.resourcedetection.{{ .name | default "default" }}.input{{ end }}
{{- define "feature.applicationObservability.processor.resourcedetection.alloy" }}
{{- $detectors := include "feature.applicationObservability.processor.resourcedetection.detectors" . | fromYamlArray }}
otelcol.processor.resourcedetection "{{ .name | default "default" }}" {
detectors = {{ $detectors | sortAlpha | toJson }}
{{- range $detector := $detectors }}
{{- /* Skip env, it has no settings */}}
{{- if ne $detector "env" }}
{{ $detectorValues := index $.Values.processors.resourceDetection $detector }}
{{- /* Fix the case style for kubernetesNode --> kubernetes_node */}}
{{- if eq $detector "kubernetesNode" }}
kubernetes_node {
{{- else }}
{{ $detector }} {
{{- end }}
{{- /* Handle detectors with special arguments */}}
{{- if eq $detector "ec2" }}
{{- if $detectorValues.tags }}
tags = {{ $detectorValues.tags | toJson }}
{{- end }}
{{- end }}
{{- if eq $detector "consul" }}
{{ if $detectorValues.address }}address = {{ $detectorValues.address | quote }}{{ end }}
{{ if $detectorValues.datacenter }}datacenter = {{ $detectorValues.datacenter | quote }}{{ end }}
{{ if $detectorValues.token }}token = {{ $detectorValues.token | quote }}{{ end }}
{{ if $detectorValues.namespace }}namespace = {{ $detectorValues.namespace | quote }}{{ end }}
{{ if $detectorValues.meta }}meta = {{ $detectorValues.meta | toJson }}{{ end }}
{{- end }}
{{- if eq $detector "system" }}
{{- if $detectorValues.hostnameSources }}
hostname_sources = {{ $detectorValues.hostnameSources | toJson }}
{{- end }}
{{- end }}
{{- if eq $detector "openshift" }}
{{ if $detectorValues.address }}address = {{ $detectorValues.address | quote }}{{ end }}
{{ if $detectorValues.token }}token = {{ $detectorValues.token | quote }}{{ end }}
{{- end }}
{{- if eq $detector "kubernetesNode" }}
{{ if $detectorValues.authType }}auth_type = {{ $detectorValues.authType | quote }}{{ end }}
{{ if $detectorValues.nodeFromEnvVar }}node_from_env_var = {{ $detectorValues.nodeFromEnvVar | quote }}{{ end }}
{{- end }}
{{- if $detectorValues.resourceAttributes }}
resource_attributes {
{{- range $key, $value := $detectorValues.resourceAttributes }}
{{ if $value.enabled }}{{ $key }} { enabled = true }{{ end }}
{{- end }}
}
{{- end }}
}
{{- end }}
{{- end }}
output {
{{- if and .metrics .Values.metrics.enabled }}
metrics = {{ .metrics }}
{{- end }}
{{- if and .logs .Values.logs.enabled }}
logs = {{ .logs }}
{{- end }}
{{- if and .traces .Values.traces.enabled }}
traces = {{ .traces }}
{{- end }}
}
}
{{- end }}
{{- define "feature.applicationObservability.processor.resourcedetection.detectors" }}
{{- $enabledDetectors := list }}
{{- range $detector, $options := .Values.processors.resourceDetection }}
{{- if $options.enabled }}
{{- $enabledDetectors = append $enabledDetectors $detector }}
{{- end }}
{{- end }}
{{ $enabledDetectors | toJson }}
{{- end }}
{{- define "feature.applicationObservability.processor.resourcedetection.enabled" }}
{{- $detectors := include "feature.applicationObservability.processor.resourcedetection.detectors" . | fromYamlArray }}
{{- gt (len $detectors) 0 }}
{{- end }}

@ -0,0 +1,110 @@
{{/* Inputs: Values (values) metricsOutput, logsOutput, tracesOutput, name */}}
{{/* https://grafana.com/docs/alloy/latest/reference/components/otelcol/otelcol.processor.transform/ */}}
{{- define "feature.applicationObservability.processor.transform.alloy.target" }}otelcol.processor.transform.{{ .name | default "default" }}.input{{ end }}
{{- define "feature.applicationObservability.processor.transform.alloy" }}
otelcol.processor.transform "{{ .name | default "default" }}" {
error_mode = "ignore"
{{- if .Values.metrics.enabled }}
{{- if .Values.metrics.transforms.resource }}
metric_statements {
context = "resource"
statements = [
{{- range $transform := .Values.metrics.transforms.resource }}
{{ $transform | quote | indent 6 }},
{{- end }}
]
}
{{- end }}
{{- if .Values.metrics.transforms.metric }}
metric_statements {
context = "metric"
statements = [
{{- range $transform := .Values.metrics.transforms.metric }}
{{ $transform | quote | indent 6 }},
{{- end }}
]
}
{{- end }}
{{- if .Values.metrics.transforms.datapoint }}
metric_statements {
context = "datapoint"
statements = [
{{- range $transform := .Values.metrics.transforms.datapoint }}
{{ $transform | quote | indent 6 }},
{{- end }}
]
}
{{- end }}
{{- end }}
{{- if .Values.logs.enabled }}
log_statements {
context = "resource"
statements = [
{{- if .Values.logs.transforms.resource }}
{{- range $transform := .Values.logs.transforms.resource }}
{{ $transform | quote | indent 6 }},
{{- end }}
{{- end }}
"set(attributes[\"pod\"], attributes[\"k8s.pod.name\"])",
"set(attributes[\"namespace\"], attributes[\"k8s.namespace.name\"])",
"set(attributes[\"loki.resource.labels\"], \"{{ .Values.logs.transforms.labels | join ", " }}\")",
]
}
{{- if .Values.logs.transforms.log }}
log_statements {
context = "log"
statements = [
{{- range $transform := .Values.logs.transforms.log }}
{{ $transform | quote | indent 6 }},
{{- end }}
]
}
{{- end }}
{{- end }}
{{- if .Values.traces.enabled }}
{{- if .Values.traces.transforms.resource }}
trace_statements {
context = "resource"
statements = [
{{- range $transform := .Values.traces.transforms.resource }}
{{ $transform | quote | indent 6 }},
{{- end }}
]
}
{{- end }}
{{- if .Values.traces.transforms.span }}
trace_statements {
context = "span"
statements = [
{{- range $transform := .Values.traces.transforms.span }}
{{ $transform | quote | indent 6 }},
{{- end }}
]
}
{{- end }}
{{- if .Values.traces.transforms.spanevent }}
trace_statements {
context = "spanevent"
statements = [
{{- range $transform := .Values.traces.transforms.spanevent }}
{{ $transform | quote | indent 6 }},
{{- end }}
]
}
{{- end }}
{{- end }}
output {
{{- if and .metrics .Values.metrics.enabled }}
metrics = {{ .metrics }}
{{- end }}
{{- if and .logs .Values.logs.enabled }}
logs = {{ .logs }}
{{- end }}
{{- if and .traces .Values.traces.enabled }}
traces = {{ .traces }}
{{- end }}
}
}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More