Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion tests/e2e/e2e.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"time"

ginkgo "github.com/onsi/ginkgo/v2"

"github.com/stretchr/testify/require"

"github.com/ava-labs/avalanchego/tests/fixture"
Expand Down Expand Up @@ -51,7 +52,7 @@ type TestEnvironment struct {
// Retrieve a random URI to naively attempt to spread API load across
// nodes.
func (te *TestEnvironment) GetRandomNodeURI() string {
r := rand.New(rand.NewSource(time.Now().Unix())) //nolint:gosec
r := rand.New(rand.NewSource(time.Now().Unix())) //#nosec G404
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Rather than skipping the whole linter - I think it's better to be explicit on which rule we are silencing.

return te.URIs[r.Intn(len(te.URIs))]
}

Expand Down
2 changes: 2 additions & 0 deletions tests/e2e/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,9 @@ import (
"testing"

ginkgo "github.com/onsi/ginkgo/v2"

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So I'm clear, the preference is for separating every 3rd party import package?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah - that's how it's done through the rest of the repo... Really all I want is some super strict linter that we can get behind... but for now consistency is all I can get

"github.com/onsi/gomega"

"github.com/stretchr/testify/require"

"github.com/ava-labs/avalanchego/tests"
Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/p/permissionless_subnets.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() {
ginkgo.By("retrieving the node ID of a primary network validator", func() {
pChainClient := platformvm.NewClient(nodeURI)
ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultTimeout)
validatorIDs, err := pChainClient.SampleValidators(ctx, ids.ID{}, 1)
validatorIDs, err := pChainClient.SampleValidators(ctx, constants.PrimaryNetworkID, 1)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Named constants ❤️

cancel()
gomega.Expect(err).Should(gomega.BeNil())
gomega.Expect(validatorIDs).Should(gomega.HaveLen(1))
Expand Down
4 changes: 2 additions & 2 deletions tests/e2e/p/workflow.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,8 @@ var _ = e2e.DescribePChain("[Workflow]", func() {

// Use a random node ID to ensure that repeated test runs
// will succeed against a persistent network.
randomShortID, _ := ids.ToShortID(utils.RandomBytes(20))
validatorID := ids.NodeID(randomShortID)
validatorID, err := ids.ToNodeID(utils.RandomBytes(ids.NodeIDLen))
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think ideally we should be able to use ids.GenerateTestNodeID(). Generation of IDs should result in different results for different runs imo.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I tried using that, but since the ids from is.GenerateTestNodeID() are generated sequentially the same ids will be generated on successive test runs. This was not compatible with a persistent testnet fixture, though if there's a better way to avoid collision I'm open to suggestions.

gomega.Expect(err).Should(gomega.BeNil())

vdr := &txs.Validator{
NodeID: validatorID,
Expand Down
29 changes: 16 additions & 13 deletions tests/fixture/test_data_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
"sync"
"time"

"github.com/ava-labs/avalanchego/utils"
"github.com/ava-labs/avalanchego/utils/crypto/secp256k1"
)

Expand All @@ -37,15 +38,14 @@ type TestData struct {

// http server allocating resources to tests potentially executing in parallel
type testDataServer struct {
TestData

// Synchronizes access to test data
lock sync.Mutex
TestData
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it's a general best practice to put the lock over the fields that it is locking. I don't recall where I read this... But I think it makes sense... It isn't as important here... But in cases where the struct gets larger then it can improve readability.

}

// Type used to marshal/unmarshal a set of test keys for transmission over http.
type keysDocument struct {
Keys []*secp256k1.PrivateKey
Keys []*secp256k1.PrivateKey `json:"keys"`
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know the marshaller defaults to the name of the variable... But I think explicitly tagging makes it very clear that we are json marshalling this. Also we typically use camelCase for json rather than CamelCase.

}

func (s *testDataServer) allocateKeys(w http.ResponseWriter, r *http.Request) {
Expand Down Expand Up @@ -74,20 +74,21 @@ func (s *testDataServer) allocateKeys(w http.ResponseWriter, r *http.Request) {
}

// Allocate the requested number of keys
remainingKeys := len(s.FundedKeys) - keyCount
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just found the code a lot easier to understand once remainingKeys was a var

allocatedKeys := s.FundedKeys[remainingKeys:]

keysDoc := &keysDocument{
Keys: s.FundedKeys[len(s.FundedKeys)-keyCount:],
Keys: allocatedKeys,
}
if err := json.NewEncoder(w).Encode(keysDoc); err != nil {
http.Error(w, fmt.Sprintf("failed to encode test keys: %v", err), http.StatusInternalServerError)
msg := fmt.Sprintf("failed to encode test keys: %v", err)
http.Error(w, msg, http.StatusInternalServerError)
return
}

// Forget the allocated keys
for i := len(s.FundedKeys) - keyCount; i < len(s.FundedKeys); i++ {
// Ensure the key can be garbage collected
s.FundedKeys[i] = nil
}
s.FundedKeys = s.FundedKeys[:len(s.FundedKeys)-keyCount]
utils.ZeroSlice(allocatedKeys)
s.FundedKeys = s.FundedKeys[:remainingKeys]
}

// Serve test data via http to ensure allocation is synchronized even when
Expand Down Expand Up @@ -127,6 +128,7 @@ func AllocateFundedKeys(baseURI string, count int) ([]*secp256k1.PrivateKey, err
if count <= 0 {
return nil, errInvalidKeyCount
}

uri, err := url.Parse(baseURI)
if err != nil {
return nil, fmt.Errorf("failed to parse uri: %w", err)
Expand All @@ -139,13 +141,13 @@ func AllocateFundedKeys(baseURI string, count int) ([]*secp256k1.PrivateKey, err
if err != nil {
return nil, fmt.Errorf("failed to construct request: %w", err)
}

resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to request funded keys: %w", err)
}
if resp.Body != nil {
defer resp.Body.Close()
}
defer resp.Body.Close()
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Body is guaranteed to never be nil (and if it wasn't, then the below io.ReadAll(resp.Body) would panic anyway)


body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response for funded keys: %w", err)
Expand All @@ -156,6 +158,7 @@ func AllocateFundedKeys(baseURI string, count int) ([]*secp256k1.PrivateKey, err
}
return nil, fmt.Errorf("test data server returned unexpected status code %d: %v", resp.StatusCode, body)
}

keysDoc := &keysDocument{}
if err := json.Unmarshal(body, keysDoc); err != nil {
return nil, fmt.Errorf("failed to unmarshal funded keys: %w", err)
Expand Down
64 changes: 35 additions & 29 deletions tests/fixture/test_data_server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@ func TestAllocateFundedKeys(t *testing.T) {
require := require.New(t)

factory := secp256k1.Factory{}
keys := []*secp256k1.PrivateKey{}
for i := 0; i < 5; i++ {
keys := make([]*secp256k1.PrivateKey, 5)
for i := range keys {
key, err := factory.NewPrivateKey()
require.NoError(err)
keys = append(keys, key)
keys[i] = key
}

uri, err := ServeTestData(TestData{
Expand All @@ -34,38 +34,44 @@ func TestAllocateFundedKeys(t *testing.T) {
name string
count int
expectedAddresses []ids.ShortID
}{{
name: "single key",
count: 1,
expectedAddresses: []ids.ShortID{
keys[4].Address(),
expectedError error
}{
{
name: "single key",
count: 1,
expectedAddresses: []ids.ShortID{
keys[4].Address(),
},
expectedError: nil,
},
}, {
name: "multiple keys",
count: 4,
expectedAddresses: []ids.ShortID{
keys[0].Address(),
keys[1].Address(),
keys[2].Address(),
keys[3].Address(),
{
name: "multiple keys",
count: 4,
expectedAddresses: []ids.ShortID{
keys[0].Address(),
keys[1].Address(),
keys[2].Address(),
keys[3].Address(),
},
expectedError: nil,
},
}, {
name: "insufficient keys available",
count: 1,
}}
{
name: "insufficient keys available",
count: 1,
expectedAddresses: []ids.ShortID{},
expectedError: errRequestedKeyCountExceedsAvailable,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
keys, err := AllocateFundedKeys(uri, tc.count)
if tc.expectedAddresses == nil {
require.ErrorIs(err, errRequestedKeyCountExceedsAvailable)
} else {
require.NoError(err)
addresses := []ids.ShortID{}
for _, key := range keys {
addresses = append(addresses, key.Address())
}
require.Equal(tc.expectedAddresses, addresses)
require.ErrorIs(err, tc.expectedError)

addresses := make([]ids.ShortID, len(keys))
for i, key := range keys {
addresses[i] = key.Address()
}
require.Equal(tc.expectedAddresses, addresses)
Comment on lines +68 to +74
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a bit odd as it enforces that keys is empty if an error is returned... If we want to remove this requirement than I'd prefer adding a check after the require.ErrorIs(err, tc.expectedError) with if err != nil { return }

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think it matters.

})
}
}
2 changes: 1 addition & 1 deletion tests/http.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func GetNodeMetrics(nodeURI string, metricNames ...string) (NodeMetrics, error)

// GetNodesMetrics retrieves the specified metrics for the provided node URIs.
func GetNodesMetrics(nodeURIs []string, metricNames ...string) (NodesMetrics, error) {
metrics := make(NodesMetrics)
metrics := make(NodesMetrics, len(nodeURIs))
for _, u := range nodeURIs {
var err error
metrics[u], err = GetNodeMetrics(u, metricNames...)
Expand Down
10 changes: 10 additions & 0 deletions utils/zero.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,13 @@ package utils
func Zero[T any]() T {
return *new(T)
}

// ZeroSlice sets all values of the provided slice to the type's zero value.
//
// This can be useful to ensure that the garbage collector doesn't hold
// references to values that are no longer desired.
func ZeroSlice[T any](s []T) {
for i := range s {
s[i] = *new(T)
}
}
Comment on lines +11 to +19
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure if we could've used this in the past (typically we zero out the slice 1 element at a time)... But it seems generally useful.