-
Notifications
You must be signed in to change notification settings - Fork 826
Minor readability improvements for e2e test refactor #1855
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -12,7 +12,9 @@ import ( | |
| "testing" | ||
|
|
||
| ginkgo "github.com/onsi/ginkgo/v2" | ||
|
|
||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So I'm clear, the preference is for separating every 3rd party import package?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah - that's how it's done through the rest of the repo... Really all I want is some super strict linter that we can get behind... but for now consistency is all I can get |
||
| "github.com/onsi/gomega" | ||
|
|
||
| "github.com/stretchr/testify/require" | ||
|
|
||
| "github.com/ava-labs/avalanchego/tests" | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -63,7 +63,7 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { | |
| ginkgo.By("retrieving the node ID of a primary network validator", func() { | ||
| pChainClient := platformvm.NewClient(nodeURI) | ||
| ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultTimeout) | ||
| validatorIDs, err := pChainClient.SampleValidators(ctx, ids.ID{}, 1) | ||
| validatorIDs, err := pChainClient.SampleValidators(ctx, constants.PrimaryNetworkID, 1) | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Named constants ❤️ |
||
| cancel() | ||
| gomega.Expect(err).Should(gomega.BeNil()) | ||
| gomega.Expect(validatorIDs).Should(gomega.HaveLen(1)) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -94,8 +94,8 @@ var _ = e2e.DescribePChain("[Workflow]", func() { | |
|
|
||
| // Use a random node ID to ensure that repeated test runs | ||
| // will succeed against a persistent network. | ||
| randomShortID, _ := ids.ToShortID(utils.RandomBytes(20)) | ||
| validatorID := ids.NodeID(randomShortID) | ||
| validatorID, err := ids.ToNodeID(utils.RandomBytes(ids.NodeIDLen)) | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think ideally we should be able to use
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I tried using that, but since the ids from is.GenerateTestNodeID() are generated sequentially the same ids will be generated on successive test runs. This was not compatible with a persistent testnet fixture, though if there's a better way to avoid collision I'm open to suggestions. |
||
| gomega.Expect(err).Should(gomega.BeNil()) | ||
|
|
||
| vdr := &txs.Validator{ | ||
| NodeID: validatorID, | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -17,6 +17,7 @@ import ( | |
| "sync" | ||
| "time" | ||
|
|
||
| "github.com/ava-labs/avalanchego/utils" | ||
| "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" | ||
| ) | ||
|
|
||
|
|
@@ -37,15 +38,14 @@ type TestData struct { | |
|
|
||
| // http server allocating resources to tests potentially executing in parallel | ||
| type testDataServer struct { | ||
| TestData | ||
|
|
||
| // Synchronizes access to test data | ||
| lock sync.Mutex | ||
| TestData | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think it's a general best practice to put the |
||
| } | ||
|
|
||
| // Type used to marshal/unmarshal a set of test keys for transmission over http. | ||
| type keysDocument struct { | ||
| Keys []*secp256k1.PrivateKey | ||
| Keys []*secp256k1.PrivateKey `json:"keys"` | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I know the marshaller defaults to the name of the variable... But I think explicitly tagging makes it very clear that we are json marshalling this. Also we typically use |
||
| } | ||
|
|
||
| func (s *testDataServer) allocateKeys(w http.ResponseWriter, r *http.Request) { | ||
|
|
@@ -74,20 +74,21 @@ func (s *testDataServer) allocateKeys(w http.ResponseWriter, r *http.Request) { | |
| } | ||
|
|
||
| // Allocate the requested number of keys | ||
| remainingKeys := len(s.FundedKeys) - keyCount | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I just found the code a lot easier to understand once |
||
| allocatedKeys := s.FundedKeys[remainingKeys:] | ||
|
|
||
| keysDoc := &keysDocument{ | ||
| Keys: s.FundedKeys[len(s.FundedKeys)-keyCount:], | ||
| Keys: allocatedKeys, | ||
| } | ||
| if err := json.NewEncoder(w).Encode(keysDoc); err != nil { | ||
| http.Error(w, fmt.Sprintf("failed to encode test keys: %v", err), http.StatusInternalServerError) | ||
| msg := fmt.Sprintf("failed to encode test keys: %v", err) | ||
| http.Error(w, msg, http.StatusInternalServerError) | ||
| return | ||
| } | ||
|
|
||
| // Forget the allocated keys | ||
| for i := len(s.FundedKeys) - keyCount; i < len(s.FundedKeys); i++ { | ||
| // Ensure the key can be garbage collected | ||
| s.FundedKeys[i] = nil | ||
| } | ||
| s.FundedKeys = s.FundedKeys[:len(s.FundedKeys)-keyCount] | ||
| utils.ZeroSlice(allocatedKeys) | ||
| s.FundedKeys = s.FundedKeys[:remainingKeys] | ||
| } | ||
|
|
||
| // Serve test data via http to ensure allocation is synchronized even when | ||
|
|
@@ -127,6 +128,7 @@ func AllocateFundedKeys(baseURI string, count int) ([]*secp256k1.PrivateKey, err | |
| if count <= 0 { | ||
| return nil, errInvalidKeyCount | ||
| } | ||
|
|
||
| uri, err := url.Parse(baseURI) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("failed to parse uri: %w", err) | ||
|
|
@@ -139,13 +141,13 @@ func AllocateFundedKeys(baseURI string, count int) ([]*secp256k1.PrivateKey, err | |
| if err != nil { | ||
| return nil, fmt.Errorf("failed to construct request: %w", err) | ||
| } | ||
|
|
||
| resp, err := http.DefaultClient.Do(req) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("failed to request funded keys: %w", err) | ||
| } | ||
| if resp.Body != nil { | ||
| defer resp.Body.Close() | ||
| } | ||
| defer resp.Body.Close() | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
|
|
||
| body, err := io.ReadAll(resp.Body) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("failed to read response for funded keys: %w", err) | ||
|
|
@@ -156,6 +158,7 @@ func AllocateFundedKeys(baseURI string, count int) ([]*secp256k1.PrivateKey, err | |
| } | ||
| return nil, fmt.Errorf("test data server returned unexpected status code %d: %v", resp.StatusCode, body) | ||
| } | ||
|
|
||
| keysDoc := &keysDocument{} | ||
| if err := json.Unmarshal(body, keysDoc); err != nil { | ||
| return nil, fmt.Errorf("failed to unmarshal funded keys: %w", err) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -18,11 +18,11 @@ func TestAllocateFundedKeys(t *testing.T) { | |
| require := require.New(t) | ||
|
|
||
| factory := secp256k1.Factory{} | ||
| keys := []*secp256k1.PrivateKey{} | ||
| for i := 0; i < 5; i++ { | ||
| keys := make([]*secp256k1.PrivateKey, 5) | ||
| for i := range keys { | ||
| key, err := factory.NewPrivateKey() | ||
| require.NoError(err) | ||
| keys = append(keys, key) | ||
| keys[i] = key | ||
| } | ||
|
|
||
| uri, err := ServeTestData(TestData{ | ||
|
|
@@ -34,38 +34,44 @@ func TestAllocateFundedKeys(t *testing.T) { | |
| name string | ||
| count int | ||
| expectedAddresses []ids.ShortID | ||
| }{{ | ||
| name: "single key", | ||
| count: 1, | ||
| expectedAddresses: []ids.ShortID{ | ||
| keys[4].Address(), | ||
| expectedError error | ||
| }{ | ||
| { | ||
| name: "single key", | ||
| count: 1, | ||
| expectedAddresses: []ids.ShortID{ | ||
| keys[4].Address(), | ||
| }, | ||
| expectedError: nil, | ||
| }, | ||
| }, { | ||
| name: "multiple keys", | ||
| count: 4, | ||
| expectedAddresses: []ids.ShortID{ | ||
| keys[0].Address(), | ||
| keys[1].Address(), | ||
| keys[2].Address(), | ||
| keys[3].Address(), | ||
| { | ||
| name: "multiple keys", | ||
| count: 4, | ||
| expectedAddresses: []ids.ShortID{ | ||
| keys[0].Address(), | ||
| keys[1].Address(), | ||
| keys[2].Address(), | ||
| keys[3].Address(), | ||
| }, | ||
| expectedError: nil, | ||
| }, | ||
| }, { | ||
| name: "insufficient keys available", | ||
| count: 1, | ||
| }} | ||
| { | ||
| name: "insufficient keys available", | ||
| count: 1, | ||
| expectedAddresses: []ids.ShortID{}, | ||
| expectedError: errRequestedKeyCountExceedsAvailable, | ||
| }, | ||
| } | ||
| for _, tc := range testCases { | ||
| t.Run(tc.name, func(t *testing.T) { | ||
| keys, err := AllocateFundedKeys(uri, tc.count) | ||
| if tc.expectedAddresses == nil { | ||
| require.ErrorIs(err, errRequestedKeyCountExceedsAvailable) | ||
| } else { | ||
| require.NoError(err) | ||
| addresses := []ids.ShortID{} | ||
| for _, key := range keys { | ||
| addresses = append(addresses, key.Address()) | ||
| } | ||
| require.Equal(tc.expectedAddresses, addresses) | ||
| require.ErrorIs(err, tc.expectedError) | ||
|
|
||
| addresses := make([]ids.ShortID, len(keys)) | ||
| for i, key := range keys { | ||
| addresses[i] = key.Address() | ||
| } | ||
| require.Equal(tc.expectedAddresses, addresses) | ||
|
Comment on lines
+68
to
+74
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is a bit odd as it enforces that
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't think it matters. |
||
| }) | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -7,3 +7,13 @@ package utils | |
| func Zero[T any]() T { | ||
| return *new(T) | ||
| } | ||
|
|
||
| // ZeroSlice sets all values of the provided slice to the type's zero value. | ||
| // | ||
| // This can be useful to ensure that the garbage collector doesn't hold | ||
| // references to values that are no longer desired. | ||
| func ZeroSlice[T any](s []T) { | ||
| for i := range s { | ||
| s[i] = *new(T) | ||
| } | ||
| } | ||
|
Comment on lines
+11
to
+19
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not sure if we could've used this in the past (typically we zero out the slice 1 element at a time)... But it seems generally useful. |
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Rather than skipping the whole linter - I think it's better to be explicit on which rule we are silencing.