Just a big comment here for comparison against an old desktop computer (fx8150, 2.1GHz), using same code as Frederic's answer:
var http = require('http');
http.createServer(function (req, res) {
res.writeHead(200, {'Content-Type': 'text/plain'});
res.end('Hello World\n');
}).listen(1337, '127.0.0.1');
console.log('Server running at http://127.0.0.1:1337/');
Single core of fx8150 at 2.1GHz can do this:
latency: {
average: 21.03,
mean: 21.03,
stddev: 5.72,
min: 1,
max: 215,
p0_001: 0,
p0_01: 0,
p0_1: 1,
p1: 17,
p2_5: 18,
p10: 18,
p25: 19,
p50: 20,
p75: 21,
p90: 23,
p97_5: 31,
p99: 44,
p99_9: 85,
p99_99: 196,
p99_999: 203,
totalCount: 224747
},
requests: {
average: 11237.5,
mean: 11237.5,
stddev: 1436.76,
min: 5529,
max: 12300,
total: 224747,
p0_001: 5531,
p0_01: 5531,
p0_1: 5531,
p1: 5531,
p2_5: 5531,
p10: 9871,
p25: 11119,
p50: 11735,
p75: 11895,
p90: 12015,
p97_5: 12303,
p99: 12303,
p99_9: 12303,
p99_99: 12303,
p99_999: 12303,
sent: 224989
},
It has 12k requests per second. Now with queueing:
var http = require('http');
let cmdQueue=[];
function proc()
{
while(cmdQueue.length>0)
{
let cmd = cmdQueue.pop();
cmd.res.writeHead(200, {'Content-Type': 'text/plain'});
cmd.res.end('Hello World\n');
}
setTimeout(function(){ proc(); },1);
};
proc();
http.createServer(function (req, res) {
setTimeout(function(){
cmdQueue.push({res:res,req:req});
},0);
}).listen(1337, '127.0.0.1');
console.log('Server running at http://127.0.0.1:1337/');
benchmark:
latency: {
average: 19.19,
mean: 19.19,
stddev: 6.51,
min: 2,
max: 234,
p0_001: 3,
p0_01: 4,
p0_1: 7,
p1: 9,
p2_5: 10,
p10: 13,
p25: 15,
p50: 19,
p75: 22,
p90: 25,
p97_5: 30,
p99: 37,
p99_9: 95,
p99_99: 180,
p99_999: 208,
totalCount: 245669
},
requests: {
average: 12283.5,
mean: 12283.5,
stddev: 1268.9,
min: 7178,
max: 13067,
total: 245669,
p0_001: 7179,
p0_01: 7179,
p0_1: 7179,
p1: 7179,
p2_5: 7179,
p10: 11479,
p25: 12199,
p50: 12527,
p75: 12935,
p90: 13055,
p97_5: 13071,
p99: 13071,
p99_9: 13071,
p99_99: 13071,
p99_999: 13071,
sent: 245911
},
13k requests per second.
I used autocannon module for nodejs load testing:
const autocannon = require('autocannon')
autocannon({
url: ["http://127.0.0.1:1337/"],
connections:242,
pipelining: 1,
duration: 20,
workers:22
}, console.log)
all 22 threads running on same host machine. So, it is true that a modest(fx8150) CPU can handle tens of thousands to hundred thousands requests per second.
To compare with Apache Benchmark (same settings as Frederic's answer):
ab -r -n 10000 -c 100 http://127.0.0.1:1337/
Benchmarking 127.0.0.1 (be patient)
Completed 1000 requests
Completed 2000 requests
Completed 3000 requests
Completed 4000 requests
Completed 5000 requests
Completed 6000 requests
Completed 7000 requests
Completed 8000 requests
Completed 9000 requests
Completed 10000 requests
Finished 10000 requests
Server Software:
Server Hostname: 127.0.0.1
Server Port: 1337
Document Path: /
Document Length: 12 bytes
Concurrency Level: 100
Time taken for tests: 2.134 seconds
Complete requests: 10000
Failed requests: 0
Total transferred: 1130000 bytes
HTML transferred: 120000 bytes
Requests per second: 4686.83 [#/sec] (mean)
Time per request: 21.336 [ms] (mean)
Time per request: 0.213 [ms] (mean, across all concurrent requests)
Transfer rate: 517.20 [Kbytes/sec] received
Connection Times (ms)
min mean[+/-sd] median max
Connect: 0 2 1.3 2 7
Processing: 7 19 6.2 17 53
Waiting: 5 14 4.1 13 38
Total: 9 21 6.5 19 53
Percentage of the requests served within a certain time (ms)
50% 19
66% 21
75% 24
80% 26
90% 31
95% 35
98% 40
99% 43
100% 53 (longest request)
Let's increase asynchronicity:
var http = require('http');
let cmdQueue=[];
function proc()
{
if(cmdQueue.length>0)
{
setTimeout(function(){
while(cmdQueue.length>0)
{
let cmd = cmdQueue.pop();
cmd.res.writeHead(200, {'Content-Type': 'text/plain'});
cmd.res.end('Hello World\n');
}
setTimeout(function(){ proc(); },1);
},1)
}
else
{
setTimeout(function(){ proc(); },1);
}
};
proc();
http.createServer(function (req, res) {
setTimeout(function(){
cmdQueue.push({res:res,req:req});
},0);
}).listen(1337, '127.0.0.1');
console.log('Server running at http://127.0.0.1:1337/');
result:
ab -r -n 10000 -c 100 http://127.0.0.1:1337/
This is ApacheBench, Version 2.3 <$Revision: 1807734 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking 127.0.0.1 (be patient)
Completed 1000 requests
Completed 2000 requests
Completed 3000 requests
Completed 4000 requests
Completed 5000 requests
Completed 6000 requests
Completed 7000 requests
Completed 8000 requests
Completed 9000 requests
Completed 10000 requests
Finished 10000 requests
Server Software:
Server Hostname: 127.0.0.1
Server Port: 1337
Document Path: /
Document Length: 12 bytes
Concurrency Level: 100
Time taken for tests: 1.806 seconds
Complete requests: 10000
Failed requests: 0
Total transferred: 1130000 bytes
HTML transferred: 120000 bytes
Requests per second: 5537.69 [#/sec] (mean)
Time per request: 18.058 [ms] (mean)
Time per request: 0.181 [ms] (mean, across all concurrent requests)
Transfer rate: 611.09 [Kbytes/sec] received
Connection Times (ms)
min mean[+/-sd] median max
Connect: 0 2 1.4 2 9
Processing: 4 15 4.2 15 34
Waiting: 2 10 3.6 9 23
Total: 6 18 4.2 18 34
Percentage of the requests served within a certain time (ms)
50% 18
66% 20
75% 21
80% 22
90% 24
95% 25
98% 27
99% 29
100% 34 (longest request)
and with autocannon load tester:
latency: {
average: 20.78,
mean: 20.78,
stddev: 7.38,
min: 1,
max: 138,
p0_001: 1,
p0_01: 1,
p0_1: 2,
p1: 5,
p2_5: 9,
p10: 11,
p25: 15,
p50: 21,
p75: 27,
p90: 30,
p97_5: 33,
p99: 34,
p99_9: 54,
p99_99: 97,
p99_999: 124,
totalCount: 227311
},
requests: {
average: 11366.4,
mean: 11366.4,
stddev: 538.05,
min: 10818,
max: 13429,
total: 227311,
p0_001: 10823,
p0_01: 10823,
p0_1: 10823,
p1: 10823,
p2_5: 10823,
p10: 10863,
p25: 10959,
p50: 11375,
p75: 11415,
p90: 11495,
p97_5: 13431,
p99: 13431,
p99_9: 13431,
p99_99: 13431,
p99_999: 13431,
sent: 227553
},
a bit higher requests per second but only half the latency at worst case which is better for client experience.