Skip to content

Commit 74d049c

Browse files
MLE-17368: K8s Operator: E2E test to verify Logging with a backend(Loki) on MarkLogic Cluster (#44)
* e2e tests for logging * e2e assesment for fluent bit logs * added check to verify MarkLogic logs using fluent-bit filter, loki and grafana
1 parent cad97fd commit 74d049c

File tree

4 files changed

+291
-16
lines changed

4 files changed

+291
-16
lines changed

Makefile

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,9 @@
55
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
66
VERSION ?= 0.0.1
77

8+
# VERIFY_HUGE_PAGES defines if hugepages test is enabled or not for e2e test
9+
VERIFY_HUGE_PAGES ?= true
10+
811
export E2E_DOCKER_IMAGE ?= $(IMG)
912
export E2E_KUSTOMIZE_VERSION ?= $(KUSTOMIZE_VERSION)
1013
export E2E_CONTROLLER_TOOLS_VERSION ?= $(CONTROLLER_TOOLS_VERSION)
@@ -126,6 +129,8 @@ test: manifests generate fmt vet envtest ## Run tests.
126129
# To run specific e2e test with label, try go test -v ./test/e2e -count=1 -args --labels="type=tls-multi-node"
127130
.PHONY: e2e-test # Run the e2e tests against a minikube k8s instance that is spun up.
128131
e2e-test:
132+
@echo "=====Check Huges pages test is enabled or not for e2e test"
133+
ifeq ($(VERIFY_HUGE_PAGES), true)
129134
@echo "=====Setting hugepages value to 1280 for hugepages-e2e test"
130135
sudo sysctl -w vm.nr_hugepages=1280
131136

@@ -142,6 +147,10 @@ e2e-test:
142147
@echo "=====Restart minikube cluster"
143148
minikube stop
144149
minikube start
150+
else
151+
@echo "=====Running e2e test without hugepages test"
152+
go test -v -count=1 ./test/e2e
153+
endif
145154

146155
.PHONY: e2e-setup-minikube
147156
e2e-setup-minikube: kustomize controller-gen build docker-build

test/e2e/2_marklogic_cluster_test.go

Lines changed: 218 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package e2e
22

33
import (
44
"context"
5+
"encoding/json"
56
"flag"
67
"fmt"
78
"strings"
@@ -10,6 +11,7 @@ import (
1011

1112
databasev1alpha1 "github.com/marklogic/marklogic-kubernetes-operator/api/v1alpha1"
1213
coreV1 "k8s.io/api/core/v1"
14+
corev1 "k8s.io/api/core/v1"
1315
"k8s.io/apimachinery/pkg/api/resource"
1416
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1517

@@ -31,6 +33,7 @@ const (
3133

3234
var (
3335
replicas = int32(1)
36+
logOutput = "[OUTPUT]\n\tname loki\n\tmatch *\n\thost loki.loki.svc.cluster.local\n\tport 3100\n\tlabels job=fluent-bit\n\thttp_user admin\n\thttp_passwd admin"
3437
adminUsername = "admin"
3538
adminPassword = "Admin@8001"
3639
marklogiccluster = &databasev1alpha1.MarklogicCluster{
@@ -55,14 +58,141 @@ var (
5558
IsBootstrap: true,
5659
},
5760
},
61+
LogCollection: &databasev1alpha1.LogCollection{
62+
Enabled: true,
63+
Image: "fluent/fluent-bit:3.1.1",
64+
Files: databasev1alpha1.LogFilesConfig{
65+
ErrorLogs: true,
66+
AccessLogs: true,
67+
RequestLogs: true,
68+
CrashLogs: true,
69+
AuditLogs: true,
70+
},
71+
Outputs: logOutput,
72+
},
5873
},
5974
}
75+
dashboardPayload = `{
76+
"dashboard": {
77+
"panels": [
78+
{
79+
"type": "graph",
80+
"title": "Fluent Bit Logs",
81+
"targets": [
82+
{
83+
"expr": "rate({job=\"fluent-bit\"}[5m])",
84+
"legendFormat": "{{job}}"
85+
}
86+
]
87+
}
88+
],
89+
"title": "Fluent Bit Dashboard"
90+
},
91+
"overwrite": true
92+
}`
93+
dashboardUID = ""
94+
dataSourcePayload = `{
95+
"name": "Loki",
96+
"type": "loki",
97+
"url": "http://loki-gateway.loki.svc.cluster.local",
98+
"access": "proxy",
99+
"basicAuth": false
100+
}`
101+
dataSourceUID = ""
60102
)
61103

104+
type DashboardResponse struct {
105+
UID string `json:"uid"`
106+
Status string `json:"status"`
107+
}
108+
type DataSourceResponse struct {
109+
DataSource DataSource `json:"datasource"`
110+
}
111+
type DataSource struct {
112+
UID string `json:"uid"`
113+
Message string `json:"message"`
114+
}
115+
62116
func TestMarklogicCluster(t *testing.T) {
63117
feature := features.New("Marklogic Cluster Test")
64118

65-
// Assessment for MarklogicCluster creation
119+
// Setup Loki and Grafana to verify Logging for Operator
120+
feature.Setup(func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context {
121+
t.Log("Setting up Loki and Grafana")
122+
client := c.Client()
123+
err := utils.AddHelmRepo("grafana", "https://grafana.github.io/helm-charts")
124+
if err != nil {
125+
t.Fatalf("Failed to add helm repo: %v", err)
126+
}
127+
128+
err = utils.InstallHelmChart("loki", "grafana/loki", "loki", "6.6.5", "loki.yaml")
129+
if err != nil {
130+
t.Fatalf("Failed to install loki helm chart: %v", err)
131+
}
132+
133+
err = utils.InstallHelmChart("grafana", "grafana/grafana", "grafana", "8.3.2")
134+
if err != nil {
135+
t.Fatalf("Failed to install grafana helm chart: %v", err)
136+
}
137+
138+
podList := &corev1.PodList{}
139+
if err := client.Resources().List(ctx, podList, func(lo *metav1.ListOptions) {
140+
lo.FieldSelector = "metadata.namespace=" + "grafana"
141+
}); err != nil {
142+
t.Fatal(err)
143+
}
144+
145+
grafanaPodName := podList.Items[0].Name
146+
err = utils.WaitForPod(ctx, t, client, "grafana", grafanaPodName, 120*time.Second)
147+
if err != nil {
148+
t.Fatalf("Failed to wait for grafana pod creation: %v", err)
149+
}
150+
151+
// Get Grafana admin password
152+
grafanaAdminUser, grafanaAdminPassword, err := utils.GetSecretData(ctx, client, "grafana", "grafana", "admin-user", "admin-password")
153+
if err != nil {
154+
t.Fatalf("Failed to get Grafana admin user and password: %v", err)
155+
}
156+
157+
//Check Grafana Health before creating datasource
158+
start := time.Now()
159+
timeout := 2 * time.Minute
160+
grafanaURL := "http://localhost:3000"
161+
for {
162+
if time.Since(start) > timeout {
163+
t.Fatalf("Grafana is not ready after %v", timeout)
164+
}
165+
curlCommand := fmt.Sprintf(`curl -s -o /dev/null -w "%%{http_code}" %s/api/health`, grafanaURL)
166+
output, err := utils.ExecCmdInPod(grafanaPodName, "grafana", "grafana", curlCommand)
167+
if err != nil {
168+
t.Logf("Grafana is not ready yet...an error occurred: %v", err)
169+
}
170+
if output == "200" {
171+
t.Log("Grafana is ready")
172+
break
173+
}
174+
time.Sleep(5 * time.Second)
175+
}
176+
177+
// Create datasource for Grafana
178+
url := fmt.Sprintf("%s/api/datasources", grafanaURL)
179+
curlCommand := fmt.Sprintf(`curl -X POST %s -u %s:%s -H "Content-Type: application/json" -d '%s'`, url, grafanaAdminUser, grafanaAdminPassword, dataSourcePayload)
180+
output, err := utils.ExecCmdInPod(grafanaPodName, "grafana", "grafana", curlCommand)
181+
if err != nil {
182+
t.Fatalf("Failed to execute kubectl command grafana in pod: %v", err)
183+
}
184+
if !(strings.Contains(string(output), "Datasource added") && strings.Contains(string(output), "Loki")) {
185+
t.Fatal("Failed to create datasource for Grafana")
186+
}
187+
var dataSourceResponse DataSourceResponse
188+
if err := json.Unmarshal([]byte(output), &dataSourceResponse); err != nil {
189+
t.Fatalf("Failed to unmarshal JSON response: %v", err)
190+
}
191+
dataSourceUID = dataSourceResponse.DataSource.UID
192+
return ctx
193+
})
194+
195+
// Setup for MarklogicCluster creation
66196
feature.Setup(func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context {
67197
client := c.Client()
68198
databasev1alpha1.AddToScheme(client.Resources(mlNamespace).GetScheme())
@@ -106,6 +236,86 @@ func TestMarklogicCluster(t *testing.T) {
106236

107237
})
108238

239+
// Assessment to check for logging in MarkLogic Operator
240+
feature.Assess("Grafana Dashboard created", func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context {
241+
client := c.Client()
242+
podList := &corev1.PodList{}
243+
if err := client.Resources().List(ctx, podList, func(lo *metav1.ListOptions) {
244+
lo.FieldSelector = "metadata.namespace=" + "grafana"
245+
}); err != nil {
246+
t.Fatal(err)
247+
}
248+
grafanaPodName := podList.Items[0].Name
249+
grafanaAdminUser, grafanaAdminPassword, err := utils.GetSecretData(ctx, client, "grafana", "grafana", "admin-user", "admin-password")
250+
if err != nil {
251+
t.Fatalf("Failed to get Grafana admin user and password: %v", err)
252+
}
253+
time.Sleep(90 * time.Second)
254+
grafanaURL := "http://localhost:3000"
255+
url := fmt.Sprintf("%s/api/dashboards/db", grafanaURL)
256+
curlCommand := fmt.Sprintf(`curl -X POST %s -u %s:%s -H "Content-Type: application/json" -d '%s'`, url, grafanaAdminUser, grafanaAdminPassword, dashboardPayload)
257+
output, err := utils.ExecCmdInPod(grafanaPodName, "grafana", "grafana", curlCommand)
258+
if err != nil {
259+
t.Fatalf("Failed to execute kubectl command in grafana pod: %v", err)
260+
}
261+
var dashboardResponse DashboardResponse
262+
if err := json.Unmarshal([]byte(output), &dashboardResponse); err != nil {
263+
t.Fatalf("Failed to unmarshal JSON response: %v", err)
264+
}
265+
dashboardUID = dashboardResponse.UID
266+
if dashboardResponse.Status != "success" {
267+
t.Fatal("Failed to create dashboard with loki and fluent-bit")
268+
}
269+
270+
// Create query to verify MarkLogic logs in Grafana
271+
payload := map[string]interface{}{
272+
"queries": []map[string]interface{}{
273+
{
274+
"refId": "A",
275+
"expr": "{job=\"fluent-bit\"} |= ``",
276+
"queryType": "range",
277+
"datasource": map[string]string{
278+
"type": "loki",
279+
"uid": dataSourceUID,
280+
},
281+
"editorMode": "builder",
282+
"maxLines": 1000,
283+
"legendFormat": "",
284+
"datasourceId": 1,
285+
"intervalMs": 20000,
286+
"maxDataPoints": 1073,
287+
},
288+
},
289+
"from": "now-5m",
290+
"to": "now",
291+
}
292+
293+
payloadBytes, err := json.Marshal(payload)
294+
if err != nil {
295+
t.Fatalf("Failed to marshal payload: %v", err)
296+
}
297+
queryUrl := fmt.Sprintf("%s/api/ds/query?ds_type=loki", grafanaURL)
298+
curlCommand = fmt.Sprintf(`curl -X POST %s -u %s:%s -H "Content-Type: application/json" -d '%s'`, queryUrl, grafanaAdminUser, grafanaAdminPassword, payloadBytes)
299+
output, err = utils.ExecCmdInPod(grafanaPodName, "grafana", "grafana", curlCommand)
300+
if err != nil {
301+
t.Fatalf("Failed to execute kubectl command in grafana pod: %v", err)
302+
}
303+
// t.Logf("Query datasource response: %s", output)
304+
// Verify MarkLogic logs in Grafana using Loki and Fluent Bit
305+
if !(strings.Contains(string(output), "Starting MarkLogic Server")) {
306+
t.Fatal("Failed to Query datasource")
307+
}
308+
309+
curlCommand = fmt.Sprintf(`curl -u %s:%s %s/api/dashboards/uid/%s`, grafanaAdminUser, grafanaAdminPassword, grafanaURL, dashboardUID)
310+
output, err = utils.ExecCmdInPod(grafanaPodName, "grafana", "grafana", curlCommand)
311+
if err != nil {
312+
t.Fatalf("Failed to execute kubectl command in grafana pod: %v", err)
313+
}
314+
if !strings.Contains(string(output), "Fluent Bit Dashboard") {
315+
t.Fatal("Failed to associate Fluent Bit as filter in Grafana dashboard")
316+
}
317+
return ctx
318+
})
109319
// Run hugepages verification tests if verifyHugePages flag is set
110320
if *verifyHugePages {
111321
t.Log("Running HugePages verification tests")
@@ -157,6 +367,13 @@ func TestMarklogicCluster(t *testing.T) {
157367

158368
// Using feature.Teardown to clean up
159369
feature.Teardown(func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context {
370+
client := c.Client()
371+
if err := client.Resources().Delete(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "grafana"}}); err != nil {
372+
t.Fatalf("Failed to delete namespace: %s", err)
373+
}
374+
if err := client.Resources().Delete(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "loki"}}); err != nil {
375+
t.Fatalf("Failed to delete namespace: %s", err)
376+
}
160377
return ctx
161378
})
162379

test/e2e/data/loki.yaml

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
deploymentMode: SingleBinary
2+
loki:
3+
auth_enabled: false
4+
commonConfig:
5+
replication_factor: 1
6+
storage:
7+
type: 'filesystem'
8+
schemaConfig:
9+
configs:
10+
- from: "2024-01-01"
11+
store: tsdb
12+
index:
13+
prefix: loki_index_
14+
period: 24h
15+
object_store: filesystem # we're storing on filesystem so there's no real persistence here.
16+
schema: v13
17+
singleBinary:
18+
replicas: 1
19+
read:
20+
replicas: 0
21+
backend:
22+
replicas: 0
23+
write:
24+
replicas: 0

0 commit comments

Comments
 (0)