Using multi-processor for node

来源:互联网 发布:优化营商环境宣传口号 编辑:程序博客网 时间:2024/05/18 13:11

Node is single threaded. This means Node is only using one processor to do its work. However, most servers have several "multi-core" processors, and you can a single multi-core processor has many processors. A server with two physical CPU sockets might have "24 logical cores," that is 24 processors exposed to the operating system. In order to make the best use of Node we should use those too. So if we don't have threads, how do we do that?
Node provides a module called cluster that allows you to deligate work to child processes. This means that Node creates a copy of its current program in another process (on Windows, it is actually another thread). Each child process has some special abilities, such as the ability to share a socket with other children. This allows us to write Node programs that start many other Node programs and then delegate work to them.

It is important to understand that when you use cluster to share work between a number of copies of a Node program, the master process isn't involved in every transaction. The master process manages the child processes, but when the children interact with I/O they do it directly, not through the master. This means if you set up a web server using cluster, requests don't go through your master process, but directly to the children. Hence, dispatching requests does not create a bottleneck in the system.


Here is an example. 
In this example, the main thread of the cluster forks the number which is equal to how many processors on your system of workers(threads) to  serve the http request. The serving is just to tell users which thread(process ID) is doing socket stuffs. As the connection is established, the worker will send confirming message includes the process id of itself back to the main thread. And the main thread will output the number of requests each worker got.
There is another cool feature. If one of the worker die('death' event on cluster), main thread will fork another worker to replace it and listen to the 'message' event on that new one.



/* running node on multi-processers */

var cluster = require('cluster'),

http = require('http'),

NR_CPUS = require('os').cpus().length,

numReqs = {}, 

totle =0/* account for amount requests */


if (cluster.isMaster) {

     console.log('INFO: there are ' + NR_CPUS + ' cpus here');

     console.log('INFO: the PID of main thread is ' + process.pid);
     /* Master process, for doing fork stuffs */

     for (var i = 0; i < NR_CPUS; i += 1 ) {

          var worker = cluster.fork();

        numReqs[Number(worker.pid).toString()] =0;

        console.log('INFO: worker PID[' + worker.pid + '] was spawned');
        /* End of INFO */

          

        worker.on('message',function(msg) {

               ifmsg.cmd&& 'notifyRequest' ===msg.cmd ) {

               console.log('\n');

               numReqs[msg.pid] +=1;

               totle +=1;

                  for ( var pid in numReqs ) {

                         if ( undefined !== numReqs[pid] ) {

                         console.log('worker PID[' + pid + '] gets ' + numReqs[pid] + ' requests now');

                    }

                }

                console.log('total requests are ' + totle);

             }

        });

    }


     cluster.on('death',function(worker) {

          console.log('[worker ' + worker.pid + ' died]');

              /* remove the request counter of dead process */

          numReqs[worker.pid] = undefined;

              /* fork another worker and add its request counter */

           var rebirth = cluster.fork();

          numReqs[Number(rebirth.pid).toString()] =0;


          console.log('[worker ' + rebirth.pid +' join us now]');

              /* add the message event listener on that new worker */

          rebirth.on('message',function(msg) {

                  if ( msg.cmd && 'notifyRequest' === msg.cmd ) {

                    console.log('\n');

                    numReqs[msg.pid] +=1;

                    total +=1;

                         for ( var pid in numReqs ) {

                              if ( undefined !== numReqs[pid] ) {

                              console.log('worker PID[' + pid + '] gets ' + numReqs[pid] + ' requests now');

                         }

                    }

                    console.log('total requests are ' + total);

               }

          });

     });


else {

     /* Child processes serving on http protocal */

    http.createServer(function(req, res) {

            /* send msg to master process */

        process.send({

               cmd:'notifyRequest',

               pid: process.pid.toString()

         });

         res.writeHead(200, {

              'Content-Type':'text/plain'

         });

         res.end('worker PID[' + process.pid + '] start serving you now:)\n');

    }).listen(8888);

}

As using curl -i http://my.domain.com:8888 to test, you will see things happen.

Further more, I added some cool features to this program that 
1). worker threads check their memory usage and send this message to master every second. The running time of each worker will be updated at the same time. If a worker uses too much memory, the master gonna throw a warning. If a worker keeps on its callback for more that 6 second, it will be killed and the master spawns a new one to go on its works.
2). abstract the program into different modules:
     - workerSet: deposit workers and gives public apis to manipulate the infos of workers.
     - msgSys: the message system for communications between workers and master.
3). master can still be notified how many requests got by each worker from clients.

Here is the updated cluster.js:
/* running node on multi-processers */

var cluster = require('cluster'),
     http = require('http'),
     NR_CPUS = require('os').cpus().length,
     rssWarn = (50 * 1024 * 1024),
     workerSet = {

          /* the actual worker set */
          __workerSet: {
          },

          /* total requests getting from clients */
          total: 0,
         
          /* if we have this worker already */
          isWorkerExist: function(worker) {
               return ( worker && worker.pid
                         && undefined !== this.__workerSet[Number(worker.pid).toString()] ) ? true : false;
          },

          pushWorker: function(worker) {
               if ( ! this.isWorkerExist(worker) ) {
                    this.__workerSet[Number(worker.pid).toString()] = {};
                    this.__workerSet[Number(worker.pid).toString()].process = worker;
               }
               /* for cascading */
               return this;
          },

          popWorker: function(worker) {
               if ( this.isWorkerExist(worker) ) {
                    this.__workerSet[Number(worker.pid).toString()] = undefined;
               }
               /* for cascading */
               return this;
          },

          getPid: function(worker) {
               return ( true === this.isWorkerExist(worker) ) ?
                    this.__workerSet[Number(worker.pid).toString()].process.pid : undefined;
          },

          getMemUsage: function(worker) {
               return ( true === this.isWorkerExist(worker) ) ?
                    this.__workerSet[Number(worker.pid).toString()].process.memoryUsage() : undefined;
          },

          numReqs: function(worker, num) {
               if ( 1 == arguments.length )  {
                    /* getter */
                    return ( true === this.isWorkerExist(worker)
                              && undefined !== this.__workerSet[Number(worker.pid).toString()].req ) ?
                                   this.__workerSet[Number(worker.pid).toString()].req : undefined;
               } else if ( 2 == arguments.length ) {
                    /* setter */
                    if ( true === this.isWorkerExist(arguments[0]) ) {
                         var req = this.__workerSet[Number(arguments[0].pid).toString()].req;
                         this.__workerSet[Number(arguments[0].pid).toString()].req =
                              ( undefined === req ) ? 0 : req + num;
                    }
                    return this;
               }

          },

          /* setter and getter */
          duration: function(worker, dur) {
               if ( 1 == arguments.length )  {
                    /* getter */
                    var temp = this.__workerSet[Number(arguments[0].pid).toString()].dur;
                    return ( true === this.isWorkerExist(arguments[0]) && undefined !== temp ) ?
                         temp : undefined;
               } else if ( 2 == arguments.length ) {
                    /* setter */
                    if ( true === this.isWorkerExist(arguments[0]) ) {
                         this.__workerSet[Number(arguments[0].pid).toString()].dur = dur;
                    }
                    return this;
               }
          },

          foreach: function() {
               return this.__workerSet;
          }
     },

   /**
     * The message system
     * Send Methods
     *            @memNotify:      notify master how many memory has been used by worker.
     *            @numReqsNotify: notify master how many requests got by worker from clients.
    *
     * Notified Method
     *            @notified:          get notified of the duration, number of requests and memory usage of
     *                                workers from its last running to check whether it has spent too
     *                                long time on dealing with callback.
    */
     msgSys = {
          memNotify: function(worker) {
               worker.send({
                    pid: worker.pid.toString(),
                    cmd: 'memNotify',
                    memory: worker.memoryUsage()
               });
          },
         
          numReqsNotify: function(worker) {
               worker.send({
                    pid: worker.pid.toString(),
                    cmd: 'numReqsNotify'
               });
          },
         
          notified: function(worker) {

               worker.on('message', function(msg) {
                    /* duration of workers would be updated every time master thread get notified. */
                    workerSet.duration(worker, new Date().getTime());

                    if ( msg.cmd && 'numReqsNotify' === msg.cmd ) {
                         console.log('\n');

                         if ( workerSet.isWorkerExist(worker) ) {
                              workerSet.numReqs(worker, 1);
                              workerSet.total += 1;
                         }

                         for ( var each in workerSet.foreach() ) {
                              /* get the real object */
                              var obj = workerSet.foreach()[each];

                              if ( undefined !== obj ) {
                                   console.log('worker PID[' +
                                                  obj.process.pid +
                                                  '] gets ' + obj.req +
                                                  ' requests now');
                              }
                         }
                         console.log('total requests are ' + workerSet.total);

                    } else if ( msg.cmd && 'memNotify' === msg.cmd ) {

                         /* duration of workers would be updated every time master thread get notified. */
                         workerSet.duration(worker, new Date().getTime());

                         if ( msg.memory.rss > rssWarn ) {
                              console.log('worker ' + msg.pid + ' using too much memory.')
                         }
                    }
               });
          }
     };

function createWorker() {
     var worker = cluster.fork();
     /* worker set initializations. */
     workerSet.pushWorker(worker);
     workerSet.numReqs(worker, 0);
     /* allow boot time */
     workerSet.duration(worker, new Date().getTime() - 1000);
     console.log('INFO: worker PID[' + worker.pid + '] was spawned');

     /* notifications */
     msgSys.notified(worker);
}

if (cluster.isMaster) {
     console.log('INFO: there are ' + NR_CPUS + ' cpus here');
     console.log('INFO: the PID of main thread is ' + process.pid);
     /* Master process, for doing fork stuffs */
     for ( var i = 0; i < NR_CPUS; i += 1 ) {
          createWorker();
     }

     /* kill the worker running out of 6 seconds. */
     setInterval(function() {
          var time = new Date().getTime();
          for ( var each in workerSet.foreach() ) {
               /* get the real object */
               var obj = workerSet.foreach()[each];

               if ( undefined !== obj ) {
                    if ( obj.dur && obj.dur + 6000 < time ) {
                         obj.process.kill();
                    }
               }
          }
     }, 1000);

     /* If worker died */
     cluster.on('death', function(worker) {
          console.log('INFO: worker PID[' + worker.pid + '] died.');
          /* remove the request counter of dead process */
          workerSet.popWorker(worker);
          /* fork another worker and add its request counter */
          createWorker();
     });

} else {
     /* Child processes serving on http protocal */
     http.createServer(function(req, res) {
          res.writeHead(200, {
               'Content-Type': 'text/plain'
          });

          /* notify master how many requests got by worker from clients each
           * time 'request' event on happened. */
          msgSys.numReqsNotify(process);

          /* mess up 1 in 10 reqs */
          var r = Math.floor(Math.random() * 10);
          if ( 4 == r ) {
               res.write('Stopped ' + process.pid + ' from ever finishing\n');
               while (true) {
                    continue;
               }
          }

          res.end('worker PID[' + process.pid + '] starts serving you now:)\n');

     }).listen(8888);

     /* report worker memory usage once a second. */
     setInterval(function() {
          msgSys.memNotify(process);
     }, 1000);
}
原创粉丝点击