Skip to content

Commit 9c5a643

Browse files
committed
Fix Node custom labels for v26 nightly; add test for nightly
This refactors node_test.go to add a new "nightly" test, that gets the latest nightly build of Node, runs a docker container, and then runs the test as normal. While creating this, I also discovered a real issue in our support for the v26 prerelease (slated for release in April): nodejs/node#56290 causes one of the symbols we rely on to no longer be exported, so we need to grab it a different way. This commit also fixes that issue.
1 parent bd15b7e commit 9c5a643

File tree

4 files changed

+238
-112
lines changed

4 files changed

+238
-112
lines changed

interpreter/customlabels/integrationtests/node_test.go

Lines changed: 185 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ package customlabels_test
1313

1414
import (
1515
"context"
16+
"encoding/json"
1617
"errors"
1718
"fmt"
1819
"io"
@@ -29,6 +30,7 @@ import (
2930

3031
"time"
3132

33+
"github.com/docker/go-connections/nat"
3234
"github.com/stretchr/testify/require"
3335
testcontainers "github.com/testcontainers/testcontainers-go"
3436
"github.com/testcontainers/testcontainers-go/wait"
@@ -50,6 +52,113 @@ var files = []string{
5052
"broken.md",
5153
}
5254

55+
func runTest(t *testing.T, ctx context.Context, host string, port nat.Port) {
56+
enabledTracers, err := tracertypes.Parse("labels,v8")
57+
require.NoError(t, err)
58+
59+
r := &testutils.MockReporter{}
60+
traceCh, trc := testutils.StartTracer(ctx, t, enabledTracers, r, false)
61+
62+
testHTTPEndpoint(ctx, t, host, port)
63+
framesPerWorkerId := make(map[int]int)
64+
framesPerFileName := make(map[string]int)
65+
66+
totalWorkloadFrames := 0
67+
unlabeledWorkloadFrames := 0
68+
69+
timer := time.NewTimer(3 * time.Second)
70+
defer timer.Stop()
71+
72+
for {
73+
select {
74+
case <-timer.C:
75+
goto done
76+
case trace := <-traceCh:
77+
if trace == nil {
78+
continue
79+
}
80+
ct, err := trc.TraceProcessor().ConvertTrace(trace)
81+
require.NotNil(t, ct)
82+
require.NoError(t, err)
83+
workerId, okWid := trace.CustomLabels["workerId"]
84+
filePath, okFname := trace.CustomLabels["filePath"]
85+
var fileName string
86+
if okFname {
87+
fileName = path.Base(filePath)
88+
}
89+
knownWorkloadFrames := []string{
90+
"lex",
91+
"parse",
92+
"blockTokens",
93+
"readFile",
94+
"readFileHandle",
95+
}
96+
hasWorkloadFrame := false
97+
98+
for i := range ct.Frames {
99+
if ct.Frames[i].Value().Type == libpf.V8Frame {
100+
name := ct.Frames[i].Value().FunctionName.String()
101+
if slices.Contains(knownWorkloadFrames, name) {
102+
hasWorkloadFrame = true
103+
}
104+
}
105+
}
106+
107+
if hasWorkloadFrame {
108+
totalWorkloadFrames++
109+
if !(okWid && okFname) {
110+
unlabeledWorkloadFrames++
111+
}
112+
}
113+
114+
if okWid {
115+
val, err := strconv.Atoi(workerId)
116+
require.NoError(t, err)
117+
118+
require.GreaterOrEqual(t, val, 0)
119+
require.Less(t, val, N_WORKERS)
120+
121+
framesPerWorkerId[val]++
122+
}
123+
124+
if okFname {
125+
require.Contains(t, files, fileName)
126+
framesPerFileName[fileName]++
127+
}
128+
}
129+
}
130+
done:
131+
totalWidFrames := 0
132+
// for 8 workers, each should have roughly 1/8
133+
// of the labeled frames. There will be a bit of skew,
134+
// so accept anything above 60% of that.
135+
for i := 0; i < N_WORKERS; i++ {
136+
totalWidFrames += framesPerWorkerId[i]
137+
}
138+
expectedWorkerAvg := float64(totalWidFrames) / float64(N_WORKERS)
139+
for i := 0; i < N_WORKERS; i++ {
140+
require.Less(t, expectedWorkerAvg*0.60, float64(framesPerWorkerId[i]))
141+
}
142+
// Each of the documents should account for some nontrivial amount of time,
143+
// but since they aren't all the same length, we are less strict.
144+
totalFnameFrames := 0
145+
for _, v := range framesPerFileName {
146+
totalFnameFrames += v
147+
}
148+
expectedFnameAvg := float64(totalFnameFrames) / float64(len(framesPerFileName))
149+
for _, v := range framesPerFileName {
150+
require.Less(t, expectedFnameAvg*0.2, float64(v))
151+
}
152+
153+
// Really, there should be zero frames in the
154+
// `marked` workload that aren't under labels,
155+
// but accept a 5% slop because the unwinder
156+
// isn't perfect (e.g. it might interrupt the
157+
// process when the Node environment is in an
158+
// undefined state)
159+
require.Less(t, 20*unlabeledWorkloadFrames, totalWorkloadFrames)
160+
}
161+
53162
func TestIntegration(t *testing.T) {
54163
if !testutils.IsRoot() {
55164
t.Skip("root privileges required")
@@ -74,112 +183,67 @@ func TestIntegration(t *testing.T) {
74183

75184
cont := startContainer(ctx, t, nodeVersion)
76185

77-
enabledTracers, err := tracertypes.Parse("labels,v8")
186+
host, err := cont.Host(ctx)
187+
require.NoError(t, err)
188+
port, err := cont.MappedPort(ctx, "80")
78189
require.NoError(t, err)
79190

80-
r := &testutils.MockReporter{}
81-
traceCh, trc := testutils.StartTracer(ctx, t, enabledTracers, r, false)
82-
83-
testHTTPEndpoint(ctx, t, cont)
84-
framesPerWorkerId := make(map[int]int)
85-
framesPerFileName := make(map[string]int)
86-
87-
totalWorkloadFrames := 0
88-
unlabeledWorkloadFrames := 0
89-
90-
timer := time.NewTimer(3 * time.Second)
91-
defer timer.Stop()
92-
93-
for {
94-
select {
95-
case <-timer.C:
96-
goto done
97-
case trace := <-traceCh:
98-
if trace == nil {
99-
continue
100-
}
101-
ct, err := trc.TraceProcessor().ConvertTrace(trace)
102-
require.NotNil(t, ct)
103-
require.NoError(t, err)
104-
workerId, okWid := trace.CustomLabels["workerId"]
105-
filePath, okFname := trace.CustomLabels["filePath"]
106-
var fileName string
107-
if okFname {
108-
fileName = path.Base(filePath)
109-
}
110-
knownWorkloadFrames := []string{
111-
"lex",
112-
"parse",
113-
"blockTokens",
114-
"readFile",
115-
"readFileHandle",
116-
}
117-
hasWorkloadFrame := false
118-
119-
for i := range ct.Frames {
120-
if ct.Frames[i].Value().Type == libpf.V8Frame {
121-
name := ct.Frames[i].Value().FunctionName.String()
122-
if slices.Contains(knownWorkloadFrames, name) {
123-
hasWorkloadFrame = true
124-
}
125-
}
126-
}
191+
runTest(t, ctx, host, port)
192+
})
193+
}
194+
t.Run("node-local-nightly", func(t *testing.T) {
195+
type NodejsNightly struct {
196+
Version string `json:"version"`
197+
Files []string `json:"files"`
198+
}
199+
200+
resp, err := http.Get("https://nodejs.org/download/nightly/index.json")
201+
require.NoError(t, err)
202+
defer resp.Body.Close()
203+
204+
var nightlies []NodejsNightly
205+
err = json.NewDecoder(resp.Body).Decode(&nightlies)
206+
require.NoError(t, err)
207+
require.NotEmpty(t, nightlies)
208+
209+
latest := nightlies[0]
210+
211+
var nodeArch string
212+
switch runtime.GOARCH {
213+
case "arm64":
214+
nodeArch = "linux-arm64"
215+
case "amd64":
216+
nodeArch = "linux-x64"
217+
default:
218+
t.Fatalf("Unsupported GOARCH: %s", runtime.GOARCH)
219+
}
220+
221+
tarballName := fmt.Sprintf("node-%s-%s", latest.Version, nodeArch)
222+
223+
var tarballURL string
224+
for _, file := range latest.Files {
225+
if file == nodeArch {
226+
tarballURL = fmt.Sprintf("https://nodejs.org/download/nightly/%s/%s.tar.gz", latest.Version, tarballName)
227+
break
228+
}
229+
}
230+
require.NotEmpty(t, tarballURL, "No tarball found for latest nightly")
127231

128-
if hasWorkloadFrame {
129-
totalWorkloadFrames++
130-
if !(okWid && okFname) {
131-
unlabeledWorkloadFrames++
132-
}
133-
}
232+
ctx, cancel := context.WithCancel(t.Context())
233+
t.Cleanup(cancel)
134234

135-
if okWid {
136-
val, err := strconv.Atoi(workerId)
137-
require.NoError(t, err)
235+
defer cancel()
138236

139-
require.GreaterOrEqual(t, val, 0)
140-
require.Less(t, val, N_WORKERS)
237+
cont := startNightlyContainer(ctx, t, tarballURL)
141238

142-
framesPerWorkerId[val]++
143-
}
239+
host, err := cont.Host(ctx)
240+
require.NoError(t, err)
241+
port, err := cont.MappedPort(ctx, "80")
242+
require.NoError(t, err)
144243

145-
if okFname {
146-
require.Contains(t, files, fileName)
147-
framesPerFileName[fileName]++
148-
}
149-
}
150-
}
151-
done:
152-
totalWidFrames := 0
153-
// for 8 workers, each should have roughly 1/8
154-
// of the labeled frames. There will be a bit of skew,
155-
// so accept anything above 60% of that.
156-
for i := 0; i < N_WORKERS; i++ {
157-
totalWidFrames += framesPerWorkerId[i]
158-
}
159-
expectedWorkerAvg := float64(totalWidFrames) / float64(N_WORKERS)
160-
for i := 0; i < N_WORKERS; i++ {
161-
require.Less(t, expectedWorkerAvg*0.60, float64(framesPerWorkerId[i]))
162-
}
163-
// Each of the documents should account for some nontrivial amount of time,
164-
// but since they aren't all the same length, we are less strict.
165-
totalFnameFrames := 0
166-
for _, v := range framesPerFileName {
167-
totalFnameFrames += v
168-
}
169-
expectedFnameAvg := float64(totalFnameFrames) / float64(len(framesPerFileName))
170-
for _, v := range framesPerFileName {
171-
require.Less(t, expectedFnameAvg*0.2, float64(v))
172-
}
244+
runTest(t, ctx, host, port)
173245

174-
// Really, there should be zero frames in the
175-
// `marked` workload that aren't under labels,
176-
// but accept a 5% slop because the unwinder
177-
// isn't perfect (e.g. it might interrupt the
178-
// process when the Node environment is in an
179-
// undefined state)
180-
require.Less(t, 20*unlabeledWorkloadFrames, totalWorkloadFrames)
181-
})
182-
}
246+
})
183247
}
184248

185249
func startContainer(ctx context.Context, t *testing.T,
@@ -204,15 +268,32 @@ func startContainer(ctx context.Context, t *testing.T,
204268
return cont
205269
}
206270

207-
func testHTTPEndpoint(ctx context.Context, t *testing.T, cont testcontainers.Container) {
208-
const numGoroutines = 10
209-
const requestsPerGoroutine = 10000
271+
func startNightlyContainer(ctx context.Context, t *testing.T, nodeURL string) testcontainers.Container {
272+
t.Log("starting container for node nightly at URL", nodeURL)
273+
//nolint:dogsled
210274

211-
host, err := cont.Host(ctx)
275+
_, path, _, _ := runtime.Caller(0)
276+
cont, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
277+
ContainerRequest: testcontainers.ContainerRequest{
278+
FromDockerfile: testcontainers.FromDockerfile{
279+
Dockerfile: "Dockerfile.nightly",
280+
Context: filepath.Dir(path) + "/testdata/node-md-render/",
281+
BuildArgs: map[string]*string{
282+
"NODE_URL": &nodeURL,
283+
},
284+
},
285+
ExposedPorts: []string{"80/tcp"},
286+
WaitingFor: wait.ForHTTP("/docs/AUTHORS.md"),
287+
},
288+
Started: true,
289+
})
212290
require.NoError(t, err)
291+
return cont
292+
}
213293

214-
port, err := cont.MappedPort(ctx, "80")
215-
require.NoError(t, err)
294+
func testHTTPEndpoint(ctx context.Context, t *testing.T, host string, port nat.Port) {
295+
const numGoroutines = 10
296+
const requestsPerGoroutine = 10000
216297

217298
baseURL := "http://" + net.JoinHostPort(host, port.Port())
218299

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
FROM ubuntu:24.04
2+
3+
RUN apt-get update && apt-get install -y git python3 make g++ libatomic1 curl
4+
5+
6+
WORKDIR /app
7+
ENV PATH /app/node/bin:$PATH
8+
9+
COPY package.json ./
10+
11+
RUN git clone https://github.com/markedjs/marked.git /tmp/marked && \
12+
cd /tmp/marked && \
13+
git checkout 0a0da515346d2b3dd1662531043fa6925cb73fe3 && \
14+
cp -r docs /app/docs && \
15+
rm -rf /tmp/marked
16+
17+
COPY *.js ./
18+
19+
ARG NODE_URL
20+
21+
RUN curl $NODE_URL | tar zxf - && mv node* node && npm install
22+
23+
EXPOSE 80
24+
25+
CMD node index.js

0 commit comments

Comments
 (0)