Skip to content

Commit 677ba71

Browse files
committed
Added RHOAI E2E tests in prow
vLLM served in Openshift, llama-stack (2.25 RHOAI) and lightspeed-stack (dev-latest) served in Openshift as independent containers. Tests run on a fourth pod.
1 parent dbb065c commit 677ba71

20 files changed

+780
-0
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,7 @@ Lightspeed Core Stack (LCS) supports the large language models from the provider
124124
| -------- | ---------------------------------------------- | ------------ | -------------- | -------------------------------------------------------------------------- |
125125
| OpenAI | gpt-5, gpt-4o, gpt4-turbo, gpt-4.1, o1, o3, o4 | Yes | remote::openai | [1](examples/openai-faiss-run.yaml) [2](examples/openai-pgvector-run.yaml) |
126126
| OpenAI | gpt-3.5-turbo, gpt-4 | No | remote::openai | |
127+
| RHOAI (vLLM)| meta-llama/Llama-3.2-1B-Instruct | Yes | remote::vllm | [1](tests/e2e-prow/rhoai/configs/run.yaml) |
127128
| RHAIIS (vLLM)| meta-llama/Llama-3.1-8B-Instruct | Yes | remote::vllm | [1](tests/e2e/configs/run-rhaiis.yaml) |
128129
| Azure | gpt-5, gpt-5-mini, gpt-5-nano, gpt-5-chat, gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, o3-mini, o4-mini | Yes | remote::azure | [1](examples/azure-run.yaml) |
129130
| Azure | o1, o1-mini | No | remote::azure | |

docs/providers.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ Red Hat providers:
6161

6262
| Name | Version Tested | Type | Pip Dependencies | Supported in LCS |
6363
|---|---|---|---|:---:|
64+
| RHOAI (vllm) | latest operator | remote | `openai` ||
6465
| RHAIIS (vllm) | 3.2.3 (on RHEL 9.20250429.0.4) | remote | `openai` ||
6566

6667

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
name: Lightspeed Core Service (LCS)
2+
service:
3+
host: 0.0.0.0
4+
port: 8080
5+
auth_enabled: false
6+
workers: 1
7+
color_log: true
8+
access_log: true
9+
llama_stack:
10+
# Uses a remote llama-stack service
11+
# The instance would have already been started with a llama-stack-run.yaml file
12+
use_as_library_client: false
13+
# Alternative for "as library use"
14+
# use_as_library_client: true
15+
# library_client_config_path: <path-to-llama-stack-run.yaml-file>
16+
url: http://${env.E2E_LLAMA_HOSTNAME}:8321
17+
api_key: xyzzy
18+
user_data_collection:
19+
feedback_enabled: true
20+
feedback_storage: "/tmp/data/feedback"
21+
transcripts_enabled: true
22+
transcripts_storage: "/tmp/data/transcripts"
23+
24+
authentication:
25+
module: "noop"
Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
version: '2'
2+
image_name: minimal-viable-llama-stack-configuration
3+
4+
apis:
5+
- agents
6+
- datasetio
7+
- eval
8+
- inference
9+
- post_training
10+
- safety
11+
- scoring
12+
- telemetry
13+
- tool_runtime
14+
- vector_io
15+
benchmarks: []
16+
container_image: null
17+
datasets: []
18+
external_providers_dir: null
19+
inference_store:
20+
db_path: .llama/distributions/ollama/inference_store.db
21+
type: sqlite
22+
logging: null
23+
metadata_store:
24+
db_path: .llama/distributions/ollama/registry.db
25+
namespace: null
26+
type: sqlite
27+
providers:
28+
agents:
29+
- config:
30+
persistence_store:
31+
db_path: .llama/distributions/ollama/agents_store.db
32+
namespace: null
33+
type: sqlite
34+
responses_store:
35+
db_path: .llama/distributions/ollama/responses_store.db
36+
type: sqlite
37+
provider_id: meta-reference
38+
provider_type: inline::meta-reference
39+
datasetio:
40+
- config:
41+
kvstore:
42+
db_path: .llama/distributions/ollama/huggingface_datasetio.db
43+
namespace: null
44+
type: sqlite
45+
provider_id: huggingface
46+
provider_type: remote::huggingface
47+
- config:
48+
kvstore:
49+
db_path: .llama/distributions/ollama/localfs_datasetio.db
50+
namespace: null
51+
type: sqlite
52+
provider_id: localfs
53+
provider_type: inline::localfs
54+
eval:
55+
- config:
56+
kvstore:
57+
db_path: .llama/distributions/ollama/meta_reference_eval.db
58+
namespace: null
59+
type: sqlite
60+
provider_id: meta-reference
61+
provider_type: inline::meta-reference
62+
inference:
63+
- provider_id: vllm
64+
provider_type: remote::vllm
65+
config:
66+
url: ${env.KSVC_URL}/v1/
67+
api_token: ${env.VLLM_API_KEY}
68+
tls_verify: false
69+
max_tokens: 1024
70+
post_training:
71+
- config:
72+
checkpoint_format: huggingface
73+
device: cpu
74+
distributed_backend: null
75+
dpo_output_dir: "."
76+
provider_id: huggingface
77+
provider_type: inline::huggingface-gpu
78+
safety:
79+
- config:
80+
excluded_categories: []
81+
provider_id: llama-guard
82+
provider_type: inline::llama-guard
83+
scoring:
84+
- config: {}
85+
provider_id: basic
86+
provider_type: inline::basic
87+
- config: {}
88+
provider_id: llm-as-judge
89+
provider_type: inline::llm-as-judge
90+
- config:
91+
openai_api_key: '********'
92+
provider_id: braintrust
93+
provider_type: inline::braintrust
94+
telemetry:
95+
- config:
96+
service_name: 'lightspeed-stack-telemetry'
97+
sinks: sqlite
98+
sqlite_db_path: .llama/distributions/ollama/trace_store.db
99+
provider_id: meta-reference
100+
provider_type: inline::meta-reference
101+
tool_runtime: []
102+
vector_io: []
103+
scoring_fns: []
104+
server:
105+
auth: null
106+
host: null
107+
port: 8321
108+
quota: null
109+
tls_cafile: null
110+
tls_certfile: null
111+
tls_keyfile: null
112+
shields: []
113+
vector_dbs: []
114+
115+
models:
116+
- model_id: meta-llama/Llama-3.2-1B-Instruct
117+
provider_id: vllm
118+
model_type: llm
119+
provider_model_id: null
120+
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
apiVersion: v1
2+
kind: Pod
3+
metadata:
4+
name: lightspeed-stack-service
5+
namespace: e2e-rhoai-dsc
6+
spec:
7+
containers:
8+
- name: lightspeed-stack-container
9+
env:
10+
- name: E2E_LLAMA_HOSTNAME
11+
valueFrom:
12+
secretKeyRef:
13+
name: llama-stack-ip-secret
14+
key: key
15+
image: quay.io/lightspeed-core/lightspeed-stack:dev-latest
16+
ports:
17+
- containerPort: 8080
18+
volumeMounts:
19+
- name: config
20+
mountPath: /app-root/lightspeed-stack.yaml
21+
subPath: lightspeed-stack.yaml
22+
volumes:
23+
- name: config
24+
configMap:
25+
name: lightspeed-stack-config
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
apiVersion: v1
2+
kind: Pod
3+
metadata:
4+
name: llama-stack-service
5+
namespace: e2e-rhoai-dsc
6+
spec:
7+
containers:
8+
- name: llama-stack-container
9+
env:
10+
- name: KSVC_URL
11+
valueFrom:
12+
secretKeyRef:
13+
name: api-url-secret
14+
key: key
15+
- name: VLLM_API_KEY
16+
valueFrom:
17+
secretKeyRef:
18+
name: vllm-api-key-secret
19+
key: key
20+
image: quay.io/opendatahub/llama-stack:rhoai-v2.25-latest
21+
ports:
22+
- containerPort: 8321
23+
volumeMounts:
24+
- name: app-root
25+
mountPath: /opt/app-root/src/.llama
26+
- name: config
27+
mountPath: /opt/app-root/run.yaml
28+
subPath: run.yaml
29+
volumes:
30+
- name: app-root
31+
emptyDir: {}
32+
- name: config
33+
configMap:
34+
name: llama-stack-config
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
apiVersion: datasciencecluster.opendatahub.io/v1
2+
kind: DataScienceCluster
3+
metadata:
4+
name: default-dsc
5+
namespace: e2e-rhoai-dsc
6+
spec:
7+
serviceMesh:
8+
managementState: Managed
9+
components:
10+
kserve:
11+
managementState: Managed
12+
workbenches:
13+
managementState: Removed
14+
dashboard:
15+
managementState: Removed
16+
dataSciencePipelines:
17+
managementState: Removed
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
apiVersion: operators.coreos.com/v1
2+
kind: OperatorGroup
3+
metadata:
4+
name: global-operators
5+
namespace: openshift-operators
6+
spec:
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
# Service Mesh Operator Subscription
2+
apiVersion: operators.coreos.com/v1alpha1
3+
kind: Subscription
4+
metadata:
5+
name: servicemeshoperator
6+
namespace: openshift-operators
7+
spec:
8+
channel: "stable"
9+
name: "servicemeshoperator"
10+
source: "redhat-operators"
11+
sourceNamespace: "openshift-marketplace"
12+
---
13+
# Serverless Operator Subscription
14+
apiVersion: operators.coreos.com/v1alpha1
15+
kind: Subscription
16+
metadata:
17+
name: serverless-operator
18+
namespace: openshift-operators
19+
spec:
20+
channel: "stable"
21+
name: "serverless-operator"
22+
source: "redhat-operators"
23+
sourceNamespace: "openshift-marketplace"
24+
---
25+
# RHODS Operator Subscription
26+
apiVersion: operators.coreos.com/v1alpha1
27+
kind: Subscription
28+
metadata:
29+
name: rhods-operator
30+
namespace: openshift-operators
31+
spec:
32+
channel: stable
33+
name: rhods-operator
34+
source: redhat-operators
35+
sourceNamespace: openshift-marketplace
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
apiVersion: v1
2+
kind: Pod
3+
metadata:
4+
name: test-pod
5+
namespace: e2e-rhoai-dsc
6+
spec:
7+
containers:
8+
- name: test-container
9+
env:
10+
- name: E2E_LSC_HOSTNAME
11+
valueFrom:
12+
secretKeyRef:
13+
name: lcs-ip-secret
14+
key: key
15+
- name: E2E_LLAMA_HOSTNAME
16+
valueFrom:
17+
secretKeyRef:
18+
name: llama-stack-ip-secret
19+
key: key
20+
image: registry.access.redhat.com/ubi9/python-312
21+
command: ["/bin/sh", "/scripts/run-tests.sh"]
22+
volumeMounts:
23+
- name: script-volume
24+
mountPath: /scripts
25+
volumes:
26+
- name: script-volume
27+
configMap:
28+
name: test-script-cm
29+
defaultMode: 0755 # Make the script executable
30+
restartPolicy: Never

0 commit comments

Comments
 (0)