Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 20 additions & 19 deletions test/sequential/test-cluster-net-listen-ipv6only-none.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ if (!common.hasIPv6)
const assert = require('assert');
const cluster = require('cluster');
const net = require('net');
const Countdown = require('../common/countdown');

// This test ensures that the `ipv6Only` option in `net.Server.listen()`
// works as expected when we use cluster with `SCHED_NONE` schedulingPolicy.
Expand All @@ -16,9 +15,24 @@ const host = '::';
const WORKER_ACCOUNT = 3;

if (cluster.isMaster) {
const workers = new Map();
const workers = [];

const countdown = new Countdown(WORKER_ACCOUNT, () => {
for (let i = 0; i < WORKER_ACCOUNT; i += 1) {
const myWorker = new Promise((resolve) => {
const worker = cluster.fork().on('exit', common.mustCall((statusCode) => {
assert.strictEqual(statusCode, 0);
})).on('listening', common.mustCall((workerAddress) => {
assert.strictEqual(workerAddress.addressType, 6);
assert.strictEqual(workerAddress.address, host);
assert.strictEqual(workerAddress.port, common.PORT);
resolve(worker);
}));
});

workers.push(myWorker);
}

Promise.all(workers).then(common.mustCall((resolvedWorkers) => {
// Make sure the `ipv6Only` option works. This is the part of the test that
// requires the whole test to use `common.PORT` rather than port `0`. If it
// used port `0` instead, then the operating system can supply a port that
Expand All @@ -30,24 +44,11 @@ if (cluster.isMaster) {
}, common.mustCall(() => {
// Exit.
server.close();
workers.forEach((worker) => {
worker.disconnect();
resolvedWorkers.forEach((resolvedWorker) => {
resolvedWorker.disconnect();
});
}));
});

for (let i = 0; i < WORKER_ACCOUNT; i += 1) {
const worker = cluster.fork().on('exit', common.mustCall((statusCode) => {
assert.strictEqual(statusCode, 0);
})).on('listening', common.mustCall((workerAddress) => {
assert.strictEqual(workerAddress.addressType, 6);
assert.strictEqual(workerAddress.address, host);
assert.strictEqual(workerAddress.port, common.PORT);
countdown.dec();
}));

workers.set(i, worker);
}
}));
} else {
net.createServer().listen({
host,
Expand Down
46 changes: 24 additions & 22 deletions test/sequential/test-cluster-net-listen-ipv6only-rr.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ if (!common.hasIPv6)
const assert = require('assert');
const cluster = require('cluster');
const net = require('net');
const Countdown = require('../common/countdown');

// This test ensures that the `ipv6Only` option in `net.Server.listen()`
// works as expected when we use cluster with `SCHED_RR` schedulingPolicy.
Expand All @@ -16,37 +15,40 @@ const host = '::';
const WORKER_ACCOUNT = 3;

if (cluster.isMaster) {
const workers = new Map();
const workers = [];
let address;

const countdown = new Countdown(WORKER_ACCOUNT, () => {
// Make sure the `ipv6Only` option works.
for (let i = 0; i < WORKER_ACCOUNT; i += 1) {
const myWorker = new Promise((resolve) => {
const worker = cluster.fork().on('exit', common.mustCall((statusCode) => {
assert.strictEqual(statusCode, 0);
})).on('listening', common.mustCall((workerAddress) => {
if (!address) {
address = workerAddress;
} else {
assert.deepStrictEqual(workerAddress, address);
}
resolve(worker);
}));
});

workers.push(myWorker);
}

Promise.all(workers).then(common.mustCall((resolvedWorkers) => {
// Make sure the `ipv6Only` option works. Should be able to use the port on
// IPv4.
const server = net.createServer().listen({
host: '0.0.0.0',
port: address.port,
}, common.mustCall(() => {
// Exit.
server.close();
workers.forEach((worker) => {
worker.disconnect();
resolvedWorkers.forEach((resolvedWorker) => {
resolvedWorker.disconnect();
});
}));
});

for (let i = 0; i < WORKER_ACCOUNT; i += 1) {
const worker = cluster.fork().on('exit', common.mustCall((statusCode) => {
assert.strictEqual(statusCode, 0);
})).on('listening', common.mustCall((workerAddress) => {
if (!address) {
address = workerAddress;
} else {
assert.deepStrictEqual(workerAddress, address);
}
countdown.dec();
}));

workers.set(i, worker);
}
}));
} else {
// As the cluster member has the potential to grab any port
// from the environment, this can cause collision when master
Expand Down