diff --git a/api/observability/v1/conditions.go b/api/observability/v1/conditions.go index c94ab1e8c1..796c914e78 100644 --- a/api/observability/v1/conditions.go +++ b/api/observability/v1/conditions.go @@ -17,7 +17,6 @@ package v1 import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" const ( - // ConditionTrue means the condition is met ConditionTrue = metav1.ConditionTrue @@ -30,8 +29,12 @@ const ( // ConditionTypeAuthorized identifies the state of authorization for the service ConditionTypeAuthorized = GroupName + "/Authorized" + // ConditionTypeLogLevel validates the value of the log-level annotation ConditionTypeLogLevel = GroupName + "/LogLevel" + // ConditionTypeMaxUnavailable validates the value of the max-unavailable-rollout annotation + ConditionTypeMaxUnavailable = GroupName + "/MaxUnavailableAnnotation" + // ConditionTypeReady indicates the service is ready. // // Ready=True means the operands are running and providing some service. @@ -77,9 +80,12 @@ const ( // ReasonMissingSpec applies when a type is specified without a defined spec (e.g. type application without obs.Application) ReasonMissingSpec = "MissingSpec" - // ReasonLogLevelSupported indicates the support for the log level annotation value + // ReasonLogLevelSupported indicates the support for the log-level annotation value ReasonLogLevelSupported = "LogLevelSupported" + // ReasonMaxUnavailableSupported indicates the support for the max-unavailable-rollout annotation value + ReasonMaxUnavailableSupported = "MaxUnavailableAnnotationSupported" + // ReasonReconciliationComplete when the operator has initialized, validated, and deployed the resources for the workload ReasonReconciliationComplete = "ReconciliationComplete" diff --git a/api/observability/v1/output_types.go b/api/observability/v1/output_types.go index 967b1c9abc..1a5ced65b0 100644 --- a/api/observability/v1/output_types.go +++ b/api/observability/v1/output_types.go @@ -744,7 +744,7 @@ type Kafka struct { Brokers []BrokerURL `json:"brokers,omitempty"` } -// +kubebuilder:validation:XValidation:rule="self == '' || (isURL(self) && (self.startsWith('tcp://') || self.startsWith('tls://')))",message="each broker must be a valid URL with a tcp or tls scheme" +// +kubebuilder:validation:XValidation:rule="isURL(self) && (self.startsWith('tcp://') || self.startsWith('tls://'))",message="each broker must be a valid URL with a tcp or tls scheme" type BrokerURL string type LokiTuningSpec struct { diff --git a/bundle/manifests/collector_monitoring.coreos.com_v1_prometheusrule.yaml b/bundle/manifests/collector_monitoring.coreos.com_v1_prometheusrule.yaml index fa1ec303b2..7a7e8f0004 100644 --- a/bundle/manifests/collector_monitoring.coreos.com_v1_prometheusrule.yaml +++ b/bundle/manifests/collector_monitoring.coreos.com_v1_prometheusrule.yaml @@ -6,6 +6,21 @@ spec: groups: - name: logging_collector.alerts rules: + - alert: ClusterLogForwarderDeprecations + annotations: + message: The Cluster Logging Operator version {{$labels.version}} includes + deprecations to some feature of ClusterLogForwarder. + summary: |- + The Cluster Logging Operator version {{$labels.version}} includes deprecations to some features of ClusterLogForwarder which + will be removed in a future release. Please see the release notes for details: + https://docs.redhat.com/en/documentation/red_hat_openshift_logging/6.2/html/release_notes + expr: | + max by (version) (csv_succeeded{exported_namespace="openshift-logging", name=~"cluster-logging.*", version=~"6.2.*"}) > 0 + for: 1m + labels: + namespace: openshift-logging + service: collector + severity: info - alert: CollectorNodeDown annotations: description: Prometheus could not scrape {{ $labels.namespace }}/{{ $labels.pod diff --git a/bundle/manifests/observability.openshift.io_clusterlogforwarders.yaml b/bundle/manifests/observability.openshift.io_clusterlogforwarders.yaml index 098415d286..0486b41d0a 100644 --- a/bundle/manifests/observability.openshift.io_clusterlogforwarders.yaml +++ b/bundle/manifests/observability.openshift.io_clusterlogforwarders.yaml @@ -1627,8 +1627,7 @@ spec: x-kubernetes-validations: - message: each broker must be a valid URL with a tcp or tls scheme - rule: self == '' || (isURL(self) && (self.startsWith('tcp://') - || self.startsWith('tls://'))) + rule: isURL(self) && (self.startsWith('tcp://') || self.startsWith('tls://')) type: array topic: description: |- diff --git a/config/crd/bases/observability.openshift.io_clusterlogforwarders.yaml b/config/crd/bases/observability.openshift.io_clusterlogforwarders.yaml index 48e76eaf21..1e73d95258 100644 --- a/config/crd/bases/observability.openshift.io_clusterlogforwarders.yaml +++ b/config/crd/bases/observability.openshift.io_clusterlogforwarders.yaml @@ -1627,8 +1627,7 @@ spec: x-kubernetes-validations: - message: each broker must be a valid URL with a tcp or tls scheme - rule: self == '' || (isURL(self) && (self.startsWith('tcp://') - || self.startsWith('tls://'))) + rule: isURL(self) && (self.startsWith('tcp://') || self.startsWith('tls://')) type: array topic: description: |- diff --git a/config/prometheus/collector_alerts.yaml b/config/prometheus/collector_alerts.yaml index 2d5cdf8786..005304b684 100644 --- a/config/prometheus/collector_alerts.yaml +++ b/config/prometheus/collector_alerts.yaml @@ -7,6 +7,20 @@ spec: groups: - name: logging_collector.alerts rules: + - alert: ClusterLogForwarderDeprecations + annotations: + message: "The Cluster Logging Operator version {{$labels.version}} includes deprecations to some feature of ClusterLogForwarder." + summary: |- + The Cluster Logging Operator version {{$labels.version}} includes deprecations to some features of ClusterLogForwarder which + will be removed in a future release. Please see the release notes for details: + https://docs.redhat.com/en/documentation/red_hat_openshift_logging/6.2/html/release_notes + expr: | + max by (version) (csv_succeeded{exported_namespace="openshift-logging", name=~"cluster-logging.*", version=~"6.2.*"}) > 0 + for: 1m + labels: + namespace: openshift-logging + service: collector + severity: info - alert: CollectorNodeDown annotations: description: "Prometheus could not scrape {{ $labels.namespace }}/{{ $labels.pod }} collector component for more than 10m." diff --git a/docs/features/kube-api-annotations.adoc b/docs/features/kube-api-annotations.adoc new file mode 100644 index 0000000000..abdd20756e --- /dev/null +++ b/docs/features/kube-api-annotations.adoc @@ -0,0 +1,103 @@ +== Reducing memory pressure on the Kubernetes API server +Steps to introduce rolling update configuration for the logging collector pods in large-scale clusters + +IMPORTANT: The enablement of this feature using an annotation is deprecated and is https://issues.redhat.com/browse/LOG-7587[replaced] in a future release by +directly editing the ClusterLogForwarder spec. + +=== Description +This feature enables the `use_apiserver_cache` config to the vector.toml, as well as a configurable rolling +update `maxUnavailable` to the forwarder's DaemonSet. The 'maxUnavailable' feature is enabled through an annotation. + +==== Configuration +* Update your ClusterLogForwarder instance and include the following `metadata.annotations`: ++ +[source,yaml] +---- + observability.openshift.io/max-unavailable-rollout: +---- ++ +.example forwarder +[source,yaml] +---- +apiVersion: observability.openshift.io/v1 +kind: ClusterLogForwarder +metadata: + annotations: + observability.openshift.io/max-unavailable-rollout: "20%" + name: my-forwarder + namespace: my-logging-namespace +spec: + ... +---- +NOTE: `max-unavailable-rollout` can be an absolute number (e.g., 1) or a percentage (e.g., 10%). The default is 100%. ++ +If you need guidance on updating your forwarder instance, please see the sections below + +==== Verifying +* The following commands can be used to verify the two options exist and are enabled: ++ +.forwarder daemonset +[source,bash] +---- + oc get ds -ojson | jq '.spec.updateStrategy' +---- + + +===== Conditions +* You can verify there are no `False` conditions in the forwarder validation ++ +.forwarder status +[source,bash] +---- + oc get obsclf -ojson | jq 'items[0].status.conditions' +---- ++ +.invalid examples +[source,json] +---- + { + "message": "max-unavailable-rollout value \"200%\" must be an absolute number or a valid percentage", + "reason": "MaxUnavailableAnnotationSupported", + "status": "False", + "type": "observability.openshift.io/MaxUnavailableAnnotation" + } + +---- ++ +NOTE: The conditions for annotations only show when Invalid and the status is set to `False`. If there are no entries that mention +annotations, then either they were not found, or they are valid. + +==== Other Commands +==== +* You can add an annotation using `oc patch` on the clusterlogforwarder instance ++ +.example command +[source,bash] +---- + oc patch obsclf --type='merge' -p '{"metadata":{"annotations":{"observability.openshift.io/max-unavailable-rollout":"20%"}}}' +---- +* Alternatively, you can pull down the forwarder instance and make your changes locally ++ +[source,bash] +---- + oc get obsclf -o yaml > my-forwarder.yaml +---- ++ +Then apply the local file ++ +[source,bash] +---- + oc apply -f my-forwarder.yaml +---- +* You could also use `oc edit` directly on the instance ++ +[source,bash] +---- + oc edit obsclf +---- +==== + +==== References +* Annotation Implemented: https://issues.redhat.com/browse/LOG-7196 +* Knowledgebase Article: https://access.redhat.com/solutions/7121949 +* Upstream Fix: https://github.com/vectordotdev/vector/pull/17095/files diff --git a/internal/collector/collector.go b/internal/collector/collector.go index cc14d40f92..7bb1208214 100644 --- a/internal/collector/collector.go +++ b/internal/collector/collector.go @@ -67,6 +67,8 @@ type Factory struct { ResourceNames *factory.ForwarderResourceNames isDaemonset bool LogLevel string + UseKubeCache bool + MaxUnavailable string } // CollectorResourceRequirements returns the resource requirements for a given collector implementation @@ -85,7 +87,7 @@ func (f *Factory) Tolerations() []v1.Toleration { return f.CollectorSpec.Tolerations } -func New(confHash, clusterID string, collectorSpec *obs.CollectorSpec, secrets internalobs.Secrets, configMaps map[string]*v1.ConfigMap, forwarderSpec obs.ClusterLogForwarderSpec, resNames *factory.ForwarderResourceNames, isDaemonset bool, logLevel string) *Factory { +func New(confHash, clusterID string, collectorSpec *obs.CollectorSpec, secrets internalobs.Secrets, configMaps map[string]*v1.ConfigMap, forwarderSpec obs.ClusterLogForwarderSpec, resNames *factory.ForwarderResourceNames, isDaemonset bool, logLevel string, maxUnavailable string) *Factory { if collectorSpec == nil { collectorSpec = &obs.CollectorSpec{} } @@ -105,13 +107,14 @@ func New(confHash, clusterID string, collectorSpec *obs.CollectorSpec, secrets i PodLabelVisitor: vector.PodLogExcludeLabel, isDaemonset: isDaemonset, LogLevel: logLevel, + MaxUnavailable: maxUnavailable, } return factory } func (f *Factory) NewDaemonSet(namespace, name string, trustedCABundle *v1.ConfigMap, tlsProfileSpec configv1.TLSProfileSpec) *apps.DaemonSet { podSpec := f.NewPodSpec(trustedCABundle, f.ForwarderSpec, f.ClusterID, tlsProfileSpec, namespace) - ds := factory.NewDaemonSet(namespace, name, name, constants.CollectorName, constants.VectorName, *podSpec, f.CommonLabelInitializer, f.PodLabelVisitor) + ds := factory.NewDaemonSet(namespace, name, name, constants.CollectorName, constants.VectorName, f.MaxUnavailable, *podSpec, f.CommonLabelInitializer, f.PodLabelVisitor) ds.Spec.Template.Annotations[constants.AnnotationSecretHash] = f.Secrets.Hash64a() return ds } diff --git a/internal/constants/annotations.go b/internal/constants/annotations.go index 3dff57059e..24f556a5ac 100644 --- a/internal/constants/annotations.go +++ b/internal/constants/annotations.go @@ -17,4 +17,8 @@ const ( AnnotationOtlpOutputTechPreview = "observability.openshift.io/tech-preview-otlp-output" AnnotationSecretHash = "observability.openshift.io/secret-hash" + + // AnnotationMaxUnavailable (Deprecated) configures the maximum number of DaemonSet pods that can be unavailable during a rolling update. + // This can be an absolute number (e.g., 1) or a percentage (e.g., 10%). Default is 100%. + AnnotationMaxUnavailable = "observability.openshift.io/max-unavailable-rollout" ) diff --git a/internal/controller/observability/collector.go b/internal/controller/observability/collector.go index 858df133c9..08ce724082 100644 --- a/internal/controller/observability/collector.go +++ b/internal/controller/observability/collector.go @@ -21,6 +21,7 @@ import ( "github.com/openshift/cluster-logging-operator/internal/runtime/serviceaccount" "github.com/openshift/cluster-logging-operator/internal/tls" "github.com/openshift/cluster-logging-operator/internal/utils" + "github.com/openshift/cluster-logging-operator/internal/validations/observability" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -42,6 +43,9 @@ func ReconcileCollector(context internalcontext.ForwarderContext, pollInterval, options = context.AdditionalContext } + // Set rollout options based on annotation (LOG-7196) + SetMaxUnavailableRolloutOption(context.Forwarder.Annotations, options) + if internalobs.Outputs(context.Forwarder.Spec.Outputs).NeedServiceAccountToken() { // temporarily create SA token until collector is capable of dynamically reloading a projected serviceaccount token var sa *corev1.ServiceAccount @@ -88,28 +92,40 @@ func ReconcileCollector(context internalcontext.ForwarderContext, pollInterval, isDaemonSet := !internalobs.DeployAsDeployment(*context.Forwarder) log.V(3).Info("Deploying as DaemonSet", "isDaemonSet", isDaemonSet) - factory := collector.New(collectorConfHash, context.ClusterID, context.Forwarder.Spec.Collector, context.Secrets, context.ConfigMaps, context.Forwarder.Spec, resourceNames, isDaemonSet, LogLevel(context.Forwarder.Annotations)) - if err = factory.ReconcileCollectorConfig(context.Client, context.Reader, context.Forwarder.Namespace, collectorConfig, ownerRef); err != nil { + collectorFactory := collector.New( + collectorConfHash, + context.ClusterID, + context.Forwarder.Spec.Collector, + context.Secrets, context.ConfigMaps, + context.Forwarder.Spec, + resourceNames, + isDaemonSet, + LogLevel(context.Forwarder.Annotations), + factory.GetMaxUnavailableValue(options), + ) + + if err = collectorFactory.ReconcileCollectorConfig(context.Client, context.Reader, context.Forwarder.Namespace, collectorConfig, ownerRef); err != nil { log.Error(err, "collector.ReconcileCollectorConfig") return } - reconcileWorkload := factory.ReconcileDaemonset + reconcileWorkload := collectorFactory.ReconcileDaemonset if !isDaemonSet { - reconcileWorkload = factory.ReconcileDeployment + reconcileWorkload = collectorFactory.ReconcileDeployment } + if err := reconcileWorkload(context.Client, context.Forwarder.Namespace, trustedCABundle, ownerRef); err != nil { log.Error(err, "Error reconciling the deployment of the collector") return err } - if err := factory.ReconcileInputServices(context.Client, context.Reader, context.Forwarder.Namespace, ownerRef, factory.CommonLabelInitializer); err != nil { + if err := collectorFactory.ReconcileInputServices(context.Client, context.Reader, context.Forwarder.Namespace, ownerRef, collectorFactory.CommonLabelInitializer); err != nil { log.Error(err, "collector.ReconcileInputServices") return err } // Reconcile resources to support metrics gathering - if err := network.ReconcileService(context.Client, context.Forwarder.Namespace, resourceNames.CommonName, context.Forwarder.Name, constants.CollectorName, collector.MetricsPortName, resourceNames.SecretMetrics, collector.MetricsPort, ownerRef, factory.CommonLabelInitializer); err != nil { + if err := network.ReconcileService(context.Client, context.Forwarder.Namespace, resourceNames.CommonName, context.Forwarder.Name, constants.CollectorName, collector.MetricsPortName, resourceNames.SecretMetrics, collector.MetricsPort, ownerRef, collectorFactory.CommonLabelInitializer); err != nil { log.Error(err, "collector.ReconcileService") return err } @@ -122,12 +138,12 @@ func ReconcileCollector(context internalcontext.ForwarderContext, pollInterval, return nil } -func GenerateConfig(k8Client client.Client, spec obs.ClusterLogForwarder, resourceNames factory.ForwarderResourceNames, secrets internalobs.Secrets, op framework.Options) (config string, err error) { +func GenerateConfig(k8Client client.Client, clf obs.ClusterLogForwarder, resourceNames factory.ForwarderResourceNames, secrets internalobs.Secrets, op framework.Options) (config string, err error) { tlsProfile, _ := tls.FetchAPIServerTlsProfile(k8Client) op[framework.ClusterTLSProfileSpec] = tls.GetClusterTLSProfileSpec(tlsProfile) //EvaluateAnnotationsForEnabledCapabilities(clusterRequest.Forwarder, op) g := forwardergenerator.New() - generatedConfig, err := g.GenerateConf(secrets, spec.Spec, spec.Namespace, spec.Name, resourceNames, op) + generatedConfig, err := g.GenerateConf(secrets, clf.Spec, clf.Namespace, clf.Name, resourceNames, op) if err != nil { log.Error(err, "Unable to generate log configuration") @@ -149,6 +165,11 @@ func EvaluateAnnotationsForEnabledCapabilities(annotations map[string]string, op if strings.ToLower(value) == "true" { options[generatorhelpers.EnableDebugOutput] = "true" } + case constants.AnnotationMaxUnavailable: + // Matching the validate_annotations logic + if observability.IsPercentOrWholeNumber(value) { + options[framework.MaxUnavailableOption] = value + } } } } @@ -159,3 +180,12 @@ func LogLevel(annotations map[string]string) string { } return "warn" } + +func SetMaxUnavailableRolloutOption(annotations map[string]string, options framework.Options) { + if value, found := annotations[constants.AnnotationMaxUnavailable]; found { + if observability.IsPercentOrWholeNumber(value) { + log.V(3).Info("Max Unavailable annotation found") + options[framework.MaxUnavailableOption] = value + } + } +} diff --git a/internal/controller/observability/collector_features_test.go b/internal/controller/observability/collector_features_test.go index 215bd62c9e..39889b0b82 100644 --- a/internal/controller/observability/collector_features_test.go +++ b/internal/controller/observability/collector_features_test.go @@ -39,6 +39,10 @@ var _ = Describe("#EvaluateAnnotationsForEnabledCapabilities", func() { Entry("enables debug for true", helpers.EnableDebugOutput, "true", AnnotationDebugOutput, "true"), Entry("enables debug for True", helpers.EnableDebugOutput, "true", AnnotationDebugOutput, "True"), Entry("disables debug for anything else", "", "", AnnotationDebugOutput, "abcdef"), + + Entry("enables max-unavailable for value '10'", framework.MaxUnavailableOption, "10", AnnotationMaxUnavailable, "10"), + Entry("enables max-unavailable for value '99%'", framework.MaxUnavailableOption, "99%", AnnotationMaxUnavailable, "99%"), + Entry("disables max-unavailable option for anything not a number or percentage", "", "", AnnotationMaxUnavailable, "fluffy"), ) }) diff --git a/internal/factory/daemonset.go b/internal/factory/daemonset.go index b3ebd678ca..43a05dbf58 100644 --- a/internal/factory/daemonset.go +++ b/internal/factory/daemonset.go @@ -1,26 +1,26 @@ package factory import ( + "github.com/openshift/cluster-logging-operator/internal/generator/framework" "github.com/openshift/cluster-logging-operator/internal/runtime" + "github.com/openshift/cluster-logging-operator/internal/utils" + "github.com/openshift/cluster-logging-operator/internal/validations/observability" apps "k8s.io/api/apps/v1" core "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" ) // NewDaemonSet stubs an instance of a daemonset -func NewDaemonSet(namespace, daemonsetName, instanceName, component, impl string, podSpec core.PodSpec, visitors ...func(o runtime.Object)) *apps.DaemonSet { +func NewDaemonSet(namespace, daemonsetName, instanceName, component, impl, maxUnavailable string, podSpec core.PodSpec, visitors ...func(o runtime.Object)) *apps.DaemonSet { selectors := runtime.Selectors(instanceName, component, impl) annotations := map[string]string{ "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, } - + intOrStringValue := intstr.Parse(maxUnavailable) strategy := apps.DaemonSetUpdateStrategy{ Type: apps.RollingUpdateDaemonSetStrategyType, RollingUpdate: &apps.RollingUpdateDaemonSet{ - MaxUnavailable: &intstr.IntOrString{ - Type: intstr.String, - StrVal: "100%", - }, + MaxUnavailable: &intOrStringValue, }, } ds := runtime.NewDaemonSet(namespace, daemonsetName, visitors...) @@ -31,3 +31,13 @@ func NewDaemonSet(namespace, daemonsetName, instanceName, component, impl string WithPodSpec(podSpec) return ds } + +// GetMaxUnavailableValue checks the framework options for the flag maxUnavailableRollout +// Default is 100% +func GetMaxUnavailableValue(op framework.Options) string { + value, _ := utils.GetOption(op, framework.MaxUnavailableOption, "100%") + if !observability.IsPercentOrWholeNumber(value) { + value = "100%" + } + return value +} diff --git a/internal/factory/daemonset_test.go b/internal/factory/daemonset_test.go index c3137361ac..9e77806259 100644 --- a/internal/factory/daemonset_test.go +++ b/internal/factory/daemonset_test.go @@ -2,7 +2,9 @@ package factory import ( . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" + "github.com/openshift/cluster-logging-operator/internal/generator/framework" "github.com/openshift/cluster-logging-operator/internal/runtime" apps "k8s.io/api/apps/v1" core "k8s.io/api/core/v1" @@ -11,20 +13,57 @@ import ( var _ = Describe("#NewDaemonSet", func() { var ( - daemonSet *apps.DaemonSet - expSelectors = runtime.Selectors("theinstancename", "thecomponent", "thecomponent") + daemonSet *apps.DaemonSet + expSelectors = runtime.Selectors("theinstancename", "thecomponent", "thecomponent") + op = framework.Options{} + maxUnavailable string ) - BeforeEach(func() { - daemonSet = NewDaemonSet("thenamespace", "thenname", "theinstancename", "thecomponent", "thecomponent", core.PodSpec{}) - }) + Context("with common properties and maxUnavailable empty", func() { + BeforeEach(func() { + daemonSet = NewDaemonSet( + "thenamespace", + "thenname", + "theinstancename", + "thecomponent", + "thecomponent", + maxUnavailable, + core.PodSpec{}, + ) + }) - It("should leave the MinReadySeconds as the default", func() { - Expect(daemonSet.Spec.MinReadySeconds).ToNot(Equal(0), "Exp. the MinReadySeconds to be the default") - }) + It("should leave the MinReadySeconds as the default", func() { + Expect(daemonSet.Spec.MinReadySeconds).ToNot(Equal(0), "Exp. the MinReadySeconds to be the default") + }) - It("should only include the kubernetes common labels in the selector", func() { - Expect(daemonSet.Spec.Selector.MatchLabels).To(Equal(expSelectors), "Exp. the selector to contain kubernetes common labels") + It("should only include the kubernetes common labels in the selector", func() { + Expect(daemonSet.Spec.Selector.MatchLabels).To(Equal(expSelectors), "Exp. the selector to contain kubernetes common labels") + }) }) + // This option should never be set as invalid since there is validation on the setter. This unit test + // ensures the ds method handles it properly regardless + DescribeTable("with maxUnavailable option", func(op framework.Options, exp string) { + maxUnavailable = GetMaxUnavailableValue(op) + daemonSet = NewDaemonSet( + "thenamespace", + "thenname", + "theinstancename", + "thecomponent", + "thecomponent", + maxUnavailable, + core.PodSpec{}, + ) + Expect(daemonSet.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable.String()).To(Equal(exp), "Exp. the maxUnavailable value to match") + }, + Entry("missing", op, "100%"), + Entry("set to empty string", framework.Options{framework.MaxUnavailableOption: ""}, "100%"), + Entry("set to invalid value 'blue'", framework.Options{framework.MaxUnavailableOption: "blue"}, "100%"), + Entry("set to invalid zero", framework.Options{framework.MaxUnavailableOption: "0"}, "100%"), + Entry("set to invalid decimal", framework.Options{framework.MaxUnavailableOption: "2.5"}, "100%"), + Entry("set to invalid percentage", framework.Options{framework.MaxUnavailableOption: "200%"}, "100%"), + Entry("set to whole number", framework.Options{framework.MaxUnavailableOption: "50"}, "50"), + Entry("set to percentage", framework.Options{framework.MaxUnavailableOption: "99%"}, "99%"), + Entry("set to '100%'", framework.Options{framework.MaxUnavailableOption: "100%"}, "100%"), + ) }) diff --git a/internal/generator/framework/options.go b/internal/generator/framework/options.go index 0beef3b3b9..7410646df1 100644 --- a/internal/generator/framework/options.go +++ b/internal/generator/framework/options.go @@ -5,13 +5,13 @@ import ( ) const ( - ClusterTLSProfileSpec = "tlsProfileSpec" - - MinTLSVersion = "minTLSVersion" - Ciphers = "ciphers" - + ClusterTLSProfileSpec = "tlsProfileSpec" + MinTLSVersion = "minTLSVersion" + Ciphers = "ciphers" URL = "url" OptionServiceAccountTokenSecretName = "serviceAccountTokenSecretName" + OptionForwarderName = "forwarderName" + MaxUnavailableOption = "maxUnavailableRollout" ) // Options is a map of Options used to customize the config generation. E.g. Debugging, legacy config generation diff --git a/internal/generator/vector/conf/complex.toml b/internal/generator/vector/conf/complex.toml index 2258c94fea..258e49b3ef 100644 --- a/internal/generator/vector/conf/complex.toml +++ b/internal/generator/vector/conf/complex.toml @@ -103,6 +103,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_infrastructure_container_meta] type = "remap" @@ -144,6 +145,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_mytestapp_container_meta] type = "remap" diff --git a/internal/generator/vector/conf/complex_http_receiver.toml b/internal/generator/vector/conf/complex_http_receiver.toml index 6fa3f26836..e0182af71b 100644 --- a/internal/generator/vector/conf/complex_http_receiver.toml +++ b/internal/generator/vector/conf/complex_http_receiver.toml @@ -103,6 +103,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_infrastructure_container_meta] type = "remap" @@ -178,6 +179,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_mytestapp_container_meta] type = "remap" diff --git a/internal/generator/vector/conf/conf_test.go b/internal/generator/vector/conf/conf_test.go index e122fc8dd3..7460643d8a 100644 --- a/internal/generator/vector/conf/conf_test.go +++ b/internal/generator/vector/conf/conf_test.go @@ -71,49 +71,9 @@ var _ = Describe("Testing Complete Config Generation", func() { conf := Conf(secrets, spec, constants.OpenshiftNS, "my-forwarder", factory.ForwarderResourceNames{CommonName: constants.CollectorName}, op) Expect(string(exp)).To(EqualConfigFrom(conf)) }, - Entry("with spec for containers", "container.toml", nil, - obs.ClusterLogForwarderSpec{ - Inputs: []obs.InputSpec{ - { - Name: "mytestapp", - Type: obs.InputTypeApplication, - Application: &obs.Application{ - Includes: []obs.NamespaceContainerSpec{ - {Namespace: "test-ns"}, - }, - }, - }, - { - Name: "myinfra", - Type: obs.InputTypeInfrastructure, - Infrastructure: &obs.Infrastructure{ - Sources: []obs.InfrastructureSource{obs.InfrastructureSourceContainer}, - }, - }, - }, - Pipelines: []obs.PipelineSpec{ - { - InputRefs: []string{ - "myinfra", - "mytestapp", - }, - OutputRefs: []string{outputName}, - Name: "mypipeline", - FilterRefs: []string{"my-labels"}, - }, - }, - Outputs: []obs.OutputSpec{ - kafkaOutput, - }, - Filters: []obs.FilterSpec{ - { - Name: "my-labels", - Type: obs.FilterTypeOpenshiftLabels, - OpenshiftLabels: map[string]string{"key1": "value1", "key2": "value2"}, - }, - }, - }), - Entry("with complex spec", "complex.toml", nil, + Entry("with complex spec", + "complex.toml", + nil, obs.ClusterLogForwarderSpec{ Inputs: []obs.InputSpec{ { @@ -160,7 +120,9 @@ var _ = Describe("Testing Complete Config Generation", func() { }, }, ), - Entry("with complex spec && http audit receiver as input source", "complex_http_receiver.toml", nil, + Entry("with complex spec && http audit receiver as input source", + "complex_http_receiver.toml", + nil, obs.ClusterLogForwarderSpec{ Inputs: []obs.InputSpec{ { diff --git a/internal/generator/vector/conf/container.toml b/internal/generator/vector/conf/container.toml index b8019307d2..42c15ba1a8 100644 --- a/internal/generator/vector/conf/container.toml +++ b/internal/generator/vector/conf/container.toml @@ -27,6 +27,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_myinfra_container_meta] type = "remap" @@ -56,6 +57,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_mytestapp_container_meta] type = "remap" diff --git a/internal/generator/vector/input/application.toml b/internal/generator/vector/input/application.toml index a667faa668..33a8688a9a 100644 --- a/internal/generator/vector/input/application.toml +++ b/internal/generator/vector/input/application.toml @@ -12,6 +12,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_application_container_meta] type = "remap" diff --git a/internal/generator/vector/input/application_exclude_container_from_infra.toml b/internal/generator/vector/input/application_exclude_container_from_infra.toml index c652ca8f49..ade423afda 100644 --- a/internal/generator/vector/input/application_exclude_container_from_infra.toml +++ b/internal/generator/vector/input/application_exclude_container_from_infra.toml @@ -13,6 +13,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_my_app_container_meta] type = "remap" diff --git a/internal/generator/vector/input/application_excludes_container.toml b/internal/generator/vector/input/application_excludes_container.toml index c20697fe20..e9ccfc0c2e 100644 --- a/internal/generator/vector/input/application_excludes_container.toml +++ b/internal/generator/vector/input/application_excludes_container.toml @@ -12,6 +12,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_my_app_container_meta] type = "remap" diff --git a/internal/generator/vector/input/application_includes_container.toml b/internal/generator/vector/input/application_includes_container.toml index fe7af53f1d..2c6a493f67 100644 --- a/internal/generator/vector/input/application_includes_container.toml +++ b/internal/generator/vector/input/application_includes_container.toml @@ -13,6 +13,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_my_app_container_meta] type = "remap" diff --git a/internal/generator/vector/input/application_with_includes_excludes.toml b/internal/generator/vector/input/application_with_includes_excludes.toml index fc6dfdd208..2be9cb491d 100644 --- a/internal/generator/vector/input/application_with_includes_excludes.toml +++ b/internal/generator/vector/input/application_with_includes_excludes.toml @@ -13,6 +13,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_my_app_container_meta] type = "remap" diff --git a/internal/generator/vector/input/application_with_infra_includes_excludes.toml b/internal/generator/vector/input/application_with_infra_includes_excludes.toml index 8f17f8a050..4f4a2e3029 100644 --- a/internal/generator/vector/input/application_with_infra_includes_excludes.toml +++ b/internal/generator/vector/input/application_with_infra_includes_excludes.toml @@ -13,6 +13,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_my_app_container_meta] type = "remap" diff --git a/internal/generator/vector/input/application_with_infra_includes_infra_excludes.toml b/internal/generator/vector/input/application_with_infra_includes_infra_excludes.toml index d267d815e4..60d1db50a6 100644 --- a/internal/generator/vector/input/application_with_infra_includes_infra_excludes.toml +++ b/internal/generator/vector/input/application_with_infra_includes_infra_excludes.toml @@ -13,6 +13,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_my_app_container_meta] type = "remap" diff --git a/internal/generator/vector/input/application_with_matchLabels.toml b/internal/generator/vector/input/application_with_matchLabels.toml index 7e0a2e227d..e298cc2e86 100644 --- a/internal/generator/vector/input/application_with_matchLabels.toml +++ b/internal/generator/vector/input/application_with_matchLabels.toml @@ -13,6 +13,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_my_app_container_meta] type = "remap" diff --git a/internal/generator/vector/input/application_with_specific_infra_includes_infra_excludes.toml b/internal/generator/vector/input/application_with_specific_infra_includes_infra_excludes.toml index 006c29363f..1ea9fedddb 100644 --- a/internal/generator/vector/input/application_with_specific_infra_includes_infra_excludes.toml +++ b/internal/generator/vector/input/application_with_specific_infra_includes_infra_excludes.toml @@ -13,6 +13,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_my_app_container_meta] type = "remap" diff --git a/internal/generator/vector/input/application_with_throttle.toml b/internal/generator/vector/input/application_with_throttle.toml index 33eab3e269..56eb6a7877 100644 --- a/internal/generator/vector/input/application_with_throttle.toml +++ b/internal/generator/vector/input/application_with_throttle.toml @@ -12,6 +12,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_application_container_meta] type = "remap" diff --git a/internal/generator/vector/input/infrastructure.toml b/internal/generator/vector/input/infrastructure.toml index 19ae940821..91e5646f2f 100644 --- a/internal/generator/vector/input/infrastructure.toml +++ b/internal/generator/vector/input/infrastructure.toml @@ -13,6 +13,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_infrastructure_container_meta] type = "remap" diff --git a/internal/generator/vector/input/infrastructure_container.toml b/internal/generator/vector/input/infrastructure_container.toml index 4edc6ea404..c7c14a9baa 100644 --- a/internal/generator/vector/input/infrastructure_container.toml +++ b/internal/generator/vector/input/infrastructure_container.toml @@ -13,6 +13,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true [transforms.input_myinfra_container_meta] type = "remap" diff --git a/internal/generator/vector/input/source.go b/internal/generator/vector/input/source.go index 3cca2a046e..ad706eda23 100644 --- a/internal/generator/vector/input/source.go +++ b/internal/generator/vector/input/source.go @@ -46,6 +46,8 @@ var ( func NewSource(input obs.InputSpec, collectorNS string, resNames factory.ForwarderResourceNames, secrets internalobs.Secrets, op framework.Options) ([]framework.Element, []string) { els := []framework.Element{} ids := []string{} + // LOG-7196 temporary fix to set vector caching config + // TODO: remove annotation logic and add to spec switch input.Type { case obs.InputTypeApplication: ib := source.NewContainerPathGlobBuilder() @@ -167,6 +169,7 @@ func NewContainerSource(spec obs.InputSpec, namespace, includes, excludes string IncludePaths: includes, ExcludePaths: excludes, ExtraLabelSelector: source.LabelSelectorFrom(selector), + UseKubeCache: true, }, NewLogSourceAndType(metaID, logSource, logType, base, func(remap *elements.Remap) { remap.VRL = fmt.Sprintf( diff --git a/internal/generator/vector/source/kubernetes_logs.go b/internal/generator/vector/source/kubernetes_logs.go index 4a74af9a78..aa54f2e886 100644 --- a/internal/generator/vector/source/kubernetes_logs.go +++ b/internal/generator/vector/source/kubernetes_logs.go @@ -16,6 +16,7 @@ type KubernetesLogs struct { IncludePaths string ExcludePaths string ExtraLabelSelector string + UseKubeCache bool } func (kl KubernetesLogs) Name() string { @@ -46,6 +47,7 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = {{.UseKubeCache}} {{end}}` } @@ -56,6 +58,7 @@ func NewKubernetesLogs(id, includes, excludes string) KubernetesLogs { Desc: "Logs from containers (including openshift containers)", IncludePaths: includes, ExcludePaths: excludes, + UseKubeCache: true, } } diff --git a/internal/generator/vector/source/kubernetes_logs_no_includes_excludes.toml b/internal/generator/vector/source/kubernetes_logs_no_includes_excludes.toml index 0f6e38fe2f..3e8a45e425 100644 --- a/internal/generator/vector/source/kubernetes_logs_no_includes_excludes.toml +++ b/internal/generator/vector/source/kubernetes_logs_no_includes_excludes.toml @@ -11,3 +11,4 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true diff --git a/internal/generator/vector/source/kubernetes_logs_with_includes.toml b/internal/generator/vector/source/kubernetes_logs_with_includes.toml index 9199a5d47f..b5e60cc9d2 100644 --- a/internal/generator/vector/source/kubernetes_logs_with_includes.toml +++ b/internal/generator/vector/source/kubernetes_logs_with_includes.toml @@ -13,3 +13,4 @@ pod_annotation_fields.pod_uid = "kubernetes.pod_id" pod_annotation_fields.pod_node_name = "hostname" namespace_annotation_fields.namespace_uid = "kubernetes.namespace_id" rotate_wait_secs = 5 +use_apiserver_cache = true diff --git a/internal/metrics/logfilemetricexporter/factory.go b/internal/metrics/logfilemetricexporter/factory.go index e82ded3054..e3e539c8a1 100644 --- a/internal/metrics/logfilemetricexporter/factory.go +++ b/internal/metrics/logfilemetricexporter/factory.go @@ -28,6 +28,7 @@ const ( logPods = "varlogpods" logPodsValue = "/var/log/pods" metricsVolumePath = "/etc/logfilemetricexporter/metrics" + lfmeMaxUnavailable = "100%" ) // resourceRequirements returns the resource requirements for a given metric-exporter implementation @@ -69,7 +70,7 @@ func tolerations(exporter loggingv1a1.LogFileMetricExporter) []v1.Toleration { func NewDaemonSet(exporter loggingv1a1.LogFileMetricExporter, namespace, name string, tlsProfileSpec configv1.TLSProfileSpec, visitors ...func(o runtime.Object)) *apps.DaemonSet { podSpec := NewPodSpec(exporter, tlsProfileSpec) - ds := coreFactory.NewDaemonSet(namespace, name, exporter.Name, constants.LogfilesmetricexporterName, constants.LogfilesmetricexporterName, *podSpec, visitors...) + ds := coreFactory.NewDaemonSet(namespace, name, exporter.Name, constants.LogfilesmetricexporterName, constants.LogfilesmetricexporterName, lfmeMaxUnavailable, *podSpec, visitors...) return ds } diff --git a/internal/pkg/generator/forwarder/generator.go b/internal/pkg/generator/forwarder/generator.go index 31b81c44a4..45e03d6560 100644 --- a/internal/pkg/generator/forwarder/generator.go +++ b/internal/pkg/generator/forwarder/generator.go @@ -3,6 +3,7 @@ package forwarder import ( "errors" "fmt" + obs "github.com/openshift/cluster-logging-operator/api/observability/v1" "github.com/openshift/cluster-logging-operator/internal/api/initialize" "github.com/openshift/cluster-logging-operator/internal/factory" diff --git a/internal/validations/observability/validate.go b/internal/validations/observability/validate.go index 3e0e89a8d2..506604d5ed 100644 --- a/internal/validations/observability/validate.go +++ b/internal/validations/observability/validate.go @@ -12,7 +12,8 @@ import ( var ( clfValidators = []func(internalcontext.ForwarderContext){ - validateAnnotations, + validateLogLevelAnnotation, + validateMaxUnavailableAnnotation, ValidatePermissions, inputs.Validate, outputs.Validate, diff --git a/internal/validations/observability/validate_annotations.go b/internal/validations/observability/validate_annotations.go index c8c2871377..5b91d90128 100644 --- a/internal/validations/observability/validate_annotations.go +++ b/internal/validations/observability/validate_annotations.go @@ -2,21 +2,52 @@ package observability import ( "fmt" + "regexp" + "strings" + obs "github.com/openshift/cluster-logging-operator/api/observability/v1" internalcontext "github.com/openshift/cluster-logging-operator/internal/api/context" internalobs "github.com/openshift/cluster-logging-operator/internal/api/observability" "github.com/openshift/cluster-logging-operator/internal/constants" "github.com/openshift/cluster-logging-operator/internal/utils/sets" - "strings" ) -func validateAnnotations(context internalcontext.ForwarderContext) { - allowedLogLevel := sets.NewString("trace", "debug", "info", "warn", "error", "off") +const ( + validMaxUnavailableRegex = `^(100%|[1-9][0-9]?%|[1-9][0-9]*)$` +) + +var ( + compiledMaxUnavailableRegex = regexp.MustCompile(validMaxUnavailableRegex) + allowedLogLevels = sets.NewString("trace", "debug", "info", "warn", "error", "off") + enabledValues = sets.NewString("true", "enabled") +) + +func IsPercentOrWholeNumber(val string) bool { + return compiledMaxUnavailableRegex.MatchString(val) +} + +func validateMaxUnavailableAnnotation(context internalcontext.ForwarderContext) { + if value, ok := context.Forwarder.Annotations[constants.AnnotationMaxUnavailable]; ok { + if !IsPercentOrWholeNumber(value) { + condition := internalobs.NewCondition(obs.ConditionTypeMaxUnavailable, obs.ConditionFalse, obs.ReasonMaxUnavailableSupported, "") + condition.Message = fmt.Sprintf("max-unavailable-rollout value %q must be an absolute number or a valid percentage", value) + internalobs.SetCondition(&context.Forwarder.Status.Conditions, condition) + return + } + } + // Condition is only necessary when it is invalid, otherwise we can remove + internalobs.RemoveConditionByType(&context.Forwarder.Status.Conditions, obs.ConditionTypeMaxUnavailable) +} + +func IsEnabledValue(val string) bool { + return enabledValues.Has(strings.ToLower(val)) +} +func validateLogLevelAnnotation(context internalcontext.ForwarderContext) { if level, ok := context.Forwarder.Annotations[constants.AnnotationVectorLogLevel]; ok { - if !allowedLogLevel.Has(level) { + if !allowedLogLevels.Has(level) { condition := internalobs.NewCondition(obs.ConditionTypeLogLevel, obs.ConditionFalse, obs.ReasonLogLevelSupported, "") - list := strings.Join(allowedLogLevel.List(), ", ") + list := strings.Join(allowedLogLevels.List(), ", ") condition.Message = fmt.Sprintf("log level %q must be one of [%s]", level, list) internalobs.SetCondition(&context.Forwarder.Status.Conditions, condition) return diff --git a/internal/validations/observability/validate_annotations_test.go b/internal/validations/observability/validate_annotations_test.go index 38d425b284..cce5abd772 100644 --- a/internal/validations/observability/validate_annotations_test.go +++ b/internal/validations/observability/validate_annotations_test.go @@ -27,19 +27,19 @@ var _ = Describe("[internal][validations] validate clusterlogforwarder annotatio Context("#validateLogLevel", func() { It("should pass validation if no annotations are set", func() { - validateAnnotations(context) + validateLogLevelAnnotation(context) Expect(clf.Status.Conditions).To(BeEmpty()) }) It("should fail validation if log level is not supported", func() { clf.Annotations = map[string]string{constants.AnnotationVectorLogLevel: "foo"} - validateAnnotations(context) + validateLogLevelAnnotation(context) Expect(clf.Status.Conditions).To(HaveCondition(obs.ConditionTypeLogLevel, false, obs.ReasonLogLevelSupported, ".*must be one of.*")) }) DescribeTable("valid log levels", func(level string) { clf.Annotations = map[string]string{constants.AnnotationVectorLogLevel: level} - validateAnnotations(context) + validateLogLevelAnnotation(context) Expect(clf.Status.Conditions).To(BeEmpty()) }, Entry("should pass with level trace", "trace"), @@ -49,4 +49,35 @@ var _ = Describe("[internal][validations] validate clusterlogforwarder annotatio Entry("should pass with level error", "error"), Entry("should pass with level off", "off")) }) + + Context("#validateMaxUnavailable", func() { + It("should pass validation if no annotations are set", func() { + validateMaxUnavailableAnnotation(context) + Expect(clf.Status.Conditions).To(BeEmpty()) + }) + + DescribeTable("invalid maxUnavailable values", func(value string) { + clf.Annotations = map[string]string{constants.AnnotationMaxUnavailable: value} + validateMaxUnavailableAnnotation(context) + Expect(clf.Status.Conditions).To(HaveCondition(obs.ConditionTypeMaxUnavailable, false, obs.ReasonMaxUnavailableSupported, ".*must be an absolute number or a valid percentage.*")) + }, + Entry("should fail with empty value", ""), + Entry("should fail with value 0", "0"), + Entry("should fail with value 01", "01"), + Entry("should fail with value 0%", "0%"), + Entry("should fail with value 101%", "101%"), + Entry("should fail with value '-1'", "-1"), + Entry("should fail with value '5.5'", "5.5"), + Entry("should fail with value 'foo'", "foo")) + + DescribeTable("valid maxUnavailable values", func(value string) { + clf.Annotations = map[string]string{constants.AnnotationMaxUnavailable: value} + validateMaxUnavailableAnnotation(context) + Expect(clf.Status.Conditions).To(BeEmpty()) + }, + Entry("should pass with value 1", "1"), + Entry("should pass with value 1%", "1%"), + Entry("should pass with value 100", "100"), + Entry("should pass with value 100%", "100%")) + }) })