zeroMQ初体验-34.发布/订阅模式进阶-克隆模式-下,结言
来源:互联网 发布:东风风神a30abs数据 编辑:程序博客网 时间:2024/05/18 01:42
服务器:
代码不短,不过作者的牢骚更长。(貌似花了一周的时间)
当然作为一个靠谱的模型,总会制定一些规范给某些不太靠谱的人:http://rfc.zeromq.org/spec:12
至此,整个教程算是告一段落了。(之所以这最后一个模型分了三段,着实是代码多了些)
教程结束了,学习才刚开始。至于会不会再有后续,诚如guide结尾:
More coming soon…
结言:
虽然知道翻译技术文章有难度,但着实还是吓着了,在写第一章的时候就打了退堂鼓。终究在自我安慰、勉励下完成了这个系列的笔记(退一步)。好吧,我承认代码、图示占了大比例,不过,好歹算是有始有终的完成了。
原计划一周时间结束的,由于诸多原因(磨蹭,消极,退堂鼓)前后竟然跨了两个多月,总算咬牙坚持了下来,其实不敢说学到了很多,自从中部python的代码不再时,几乎就没有再自己验证代码的可行和逻辑了。写本系列,更多的是自个儿跟自个儿过不去(俺就不信写不完了!)折腾到最后,多少也是有些收获的(谁折腾谁知道~)
回首看看,也就这样了,倒是有些"天凉好个秋"的意味。
也罢,哦了
- //
- // Clone server Model Six
- //
- // Lets us build this source without creating a library
- #include "bstar.c"
- #include "kvmsg.c"
- // Bstar reactor handlers
- static int s_snapshots (zloop_t *loop, void *socket, void *args);
- static int s_collector (zloop_t *loop, void *socket, void *args);
- static int s_flush_ttl (zloop_t *loop, void *socket, void *args);
- static int s_send_hugz (zloop_t *loop, void *socket, void *args);
- static int s_new_master (zloop_t *loop, void *unused, void *args);
- static int s_new_slave (zloop_t *loop, void *unused, void *args);
- static int s_subscriber (zloop_t *loop, void *socket, void *args);
- // Our server is defined by these properties
- typedef struct {
- zctx_t *ctx; // Context wrapper
- zhash_t *kvmap; // Key-value store
- bstar_t *bstar; // Bstar reactor core
- int64_t sequence; // How many updates we're at
- int port; // Main port we're working on
- int peer; // Main port of our peer
- void *publisher; // Publish updates and hugz
- void *collector; // Collect updates from clients
- void *subscriber; // Get updates from peer
- zlist_t *pending; // Pending updates from clients
- Bool primary; // TRUE if we're primary
- Bool master; // TRUE if we're master
- Bool slave; // TRUE if we're slave
- } clonesrv_t;
- int main (int argc, char *argv [])
- {
- clonesrv_t *self = (clonesrv_t *) zmalloc (sizeof (clonesrv_t));
- if (argc == 2 && streq (argv [1], "-p")) {
- zclock_log ("I: primary master, waiting for backup (slave)");
- self->bstar = bstar_new (BSTAR_PRIMARY, "tcp://*:5003",
- "tcp://localhost:5004");
- bstar_voter (self->bstar, "tcp://*:5556", ZMQ_ROUTER,
- s_snapshots, self);
- self->port = 5556;
- self->peer = 5566;
- self->primary = TRUE;
- }
- else
- if (argc == 2 && streq (argv [1], "-b")) {
- zclock_log ("I: backup slave, waiting for primary (master)");
- self->bstar = bstar_new (BSTAR_BACKUP, "tcp://*:5004",
- "tcp://localhost:5003");
- bstar_voter (self->bstar, "tcp://*:5566", ZMQ_ROUTER,
- s_snapshots, self);
- self->port = 5566;
- self->peer = 5556;
- self->primary = FALSE;
- }
- else {
- printf ("Usage: clonesrv4 { -p | -b }\n");
- free (self);
- exit (0);
- }
- // Primary server will become first master
- if (self->primary)
- self->kvmap = zhash_new ();
- self->ctx = zctx_new ();
- self->pending = zlist_new ();
- bstar_set_verbose (self->bstar, TRUE);
- // Set up our clone server sockets
- self->publisher = zsocket_new (self->ctx, ZMQ_PUB);
- self->collector = zsocket_new (self->ctx, ZMQ_SUB);
- zsocket_bind (self->publisher, "tcp://*:%d", self->port + 1);
- zsocket_bind (self->collector, "tcp://*:%d", self->port + 2);
- // Set up our own clone client interface to peer
- self->subscriber = zsocket_new (self->ctx, ZMQ_SUB);
- zsocket_connect (self->subscriber, "tcp://localhost:%d", self->peer + 1);
- // Register state change handlers
- bstar_new_master (self->bstar, s_new_master, self);
- bstar_new_slave (self->bstar, s_new_slave, self);
- // Register our other handlers with the bstar reactor
- zloop_reader (bstar_zloop (self->bstar), self->collector, s_collector, self);
- zloop_timer (bstar_zloop (self->bstar), 1000, 0, s_flush_ttl, self);
- zloop_timer (bstar_zloop (self->bstar), 1000, 0, s_send_hugz, self);
- // Start the Bstar reactor
- bstar_start (self->bstar);
- // Interrupted, so shut down
- while (zlist_size (self->pending)) {
- kvmsg_t *kvmsg = (kvmsg_t *) zlist_pop (self->pending);
- kvmsg_destroy (&kvmsg);
- }
- zlist_destroy (&self->pending);
- bstar_destroy (&self->bstar);
- zhash_destroy (&self->kvmap);
- zctx_destroy (&self->ctx);
- free (self);
- return 0;
- }
- // ---------------------------------------------------------------------
- // Send snapshots to clients who ask for them
- static int s_send_single (char *key, void *data, void *args);
- // Routing information for a key-value snapshot
- typedef struct {
- void *socket; // ROUTER socket to send to
- zframe_t *identity; // Identity of peer who requested state
- char *subtree; // Client subtree specification
- } kvroute_t;
- static int
- s_snapshots (zloop_t *loop, void *snapshot, void *args)
- {
- clonesrv_t *self = (clonesrv_t *) args;
- zframe_t *identity = zframe_recv (snapshot);
- if (identity) {
- // Request is in second frame of message
- char *request = zstr_recv (snapshot);
- char *subtree = NULL;
- if (streq (request, "ICANHAZ?")) {
- free (request);
- subtree = zstr_recv (snapshot);
- }
- else
- printf ("E: bad request, aborting\n");
- if (subtree) {
- // Send state socket to client
- kvroute_t routing = { snapshot, identity, subtree };
- zhash_foreach (self->kvmap, s_send_single, &routing);
- // Now send END message with sequence number
- zclock_log ("I: sending shapshot=%d", (int) self->sequence);
- zframe_send (&identity, snapshot, ZFRAME_MORE);
- kvmsg_t *kvmsg = kvmsg_new (self->sequence);
- kvmsg_set_key (kvmsg, "KTHXBAI");
- kvmsg_set_body (kvmsg, (byte *) subtree, 0);
- kvmsg_send (kvmsg, snapshot);
- kvmsg_destroy (&kvmsg);
- free (subtree);
- }
- }
- return 0;
- }
- // Send one state snapshot key-value pair to a socket
- // Hash item data is our kvmsg object, ready to send
- static int
- s_send_single (char *key, void *data, void *args)
- {
- kvroute_t *kvroute = (kvroute_t *) args;
- kvmsg_t *kvmsg = (kvmsg_t *) data;
- if (strlen (kvroute->subtree) <= strlen (kvmsg_key (kvmsg))
- && memcmp (kvroute->subtree,
- kvmsg_key (kvmsg), strlen (kvroute->subtree)) == 0) {
- // Send identity of recipient first
- zframe_send (&kvroute->identity,
- kvroute->socket, ZFRAME_MORE + ZFRAME_REUSE);
- kvmsg_send (kvmsg, kvroute->socket);
- }
- return 0;
- }
- // ---------------------------------------------------------------------
- // Collect updates from clients
- // If we're master, we apply these to the kvmap
- // If we're slave, or unsure, we queue them on our pending list
- static int s_was_pending (clonesrv_t *self, kvmsg_t *kvmsg);
- static int
- s_collector (zloop_t *loop, void *collector, void *args)
- {
- clonesrv_t *self = (clonesrv_t *) args;
- kvmsg_t *kvmsg = kvmsg_recv (collector);
- kvmsg_dump (kvmsg);
- if (kvmsg) {
- if (self->master) {
- kvmsg_set_sequence (kvmsg, ++self->sequence);
- kvmsg_send (kvmsg, self->publisher);
- int ttl = atoi (kvmsg_get_prop (kvmsg, "ttl"));
- if (ttl)
- kvmsg_set_prop (kvmsg, "ttl",
- "%" PRId64, zclock_time () + ttl * 1000);
- kvmsg_store (&kvmsg, self->kvmap);
- zclock_log ("I: publishing update=%d", (int) self->sequence);
- }
- else {
- // If we already got message from master, drop it, else
- // hold on pending list
- if (s_was_pending (self, kvmsg))
- kvmsg_destroy (&kvmsg);
- else
- zlist_append (self->pending, kvmsg);
- }
- }
- return 0;
- }
- // If message was already on pending list, remove it and
- // return TRUE, else return FALSE.
- static int
- s_was_pending (clonesrv_t *self, kvmsg_t *kvmsg)
- {
- kvmsg_t *held = (kvmsg_t *) zlist_first (self->pending);
- while (held) {
- if (memcmp (kvmsg_uuid (kvmsg),
- kvmsg_uuid (held), sizeof (uuid_t)) == 0) {
- zlist_remove (self->pending, held);
- return TRUE;
- }
- held = (kvmsg_t *) zlist_next (self->pending);
- }
- return FALSE;
- }
- // ---------------------------------------------------------------------
- // Purge ephemeral values that have expired
- static int s_flush_single (char *key, void *data, void *args);
- static int
- s_flush_ttl (zloop_t *loop, void *unused, void *args)
- {
- clonesrv_t *self = (clonesrv_t *) args;
- zhash_foreach (self->kvmap, s_flush_single, args);
- return 0;
- }
- // If key-value pair has expired, delete it and publish the
- // fact to listening clients.
- static int
- s_flush_single (char *key, void *data, void *args)
- {
- clonesrv_t *self = (clonesrv_t *) args;
- kvmsg_t *kvmsg = (kvmsg_t *) data;
- int64_t ttl;
- sscanf (kvmsg_get_prop (kvmsg, "ttl"), "%" PRId64, &ttl);
- if (ttl && zclock_time () >= ttl) {
- kvmsg_set_sequence (kvmsg, ++self->sequence);
- kvmsg_set_body (kvmsg, (byte *) "", 0);
- kvmsg_send (kvmsg, self->publisher);
- kvmsg_store (&kvmsg, self->kvmap);
- zclock_log ("I: publishing delete=%d", (int) self->sequence);
- }
- return 0;
- }
- // ---------------------------------------------------------------------
- // Send hugz to anyone listening on the publisher socket
- static int
- s_send_hugz (zloop_t *loop, void *unused, void *args)
- {
- clonesrv_t *self = (clonesrv_t *) args;
- kvmsg_t *kvmsg = kvmsg_new (self->sequence);
- kvmsg_set_key (kvmsg, "HUGZ");
- kvmsg_set_body (kvmsg, (byte *) "", 0);
- kvmsg_send (kvmsg, self->publisher);
- kvmsg_destroy (&kvmsg);
- return 0;
- }
- // ---------------------------------------------------------------------
- // State change handlers
- // We're becoming master
- //
- // The backup server applies its pending list to its own hash table,
- // and then starts to process state snapshot requests.
- static int
- s_new_master (zloop_t *loop, void *unused, void *args)
- {
- clonesrv_t *self = (clonesrv_t *) args;
- self->master = TRUE;
- self->slave = FALSE;
- zloop_cancel (bstar_zloop (self->bstar), self->subscriber);
- // Apply pending list to own hash table
- while (zlist_size (self->pending)) {
- kvmsg_t *kvmsg = (kvmsg_t *) zlist_pop (self->pending);
- kvmsg_set_sequence (kvmsg, ++self->sequence);
- kvmsg_send (kvmsg, self->publisher);
- kvmsg_store (&kvmsg, self->kvmap);
- zclock_log ("I: publishing pending=%d", (int) self->sequence);
- }
- return 0;
- }
- // ---------------------------------------------------------------------
- // We're becoming slave
- static int
- s_new_slave (zloop_t *loop, void *unused, void *args)
- {
- clonesrv_t *self = (clonesrv_t *) args;
- zhash_destroy (&self->kvmap);
- self->master = FALSE;
- self->slave = TRUE;
- zloop_reader (bstar_zloop (self->bstar), self->subscriber,
- s_subscriber, self);
- return 0;
- }
- // ---------------------------------------------------------------------
- // Collect updates from peer (master)
- // We're always slave when we get these updates
- static int
- s_subscriber (zloop_t *loop, void *subscriber, void *args)
- {
- clonesrv_t *self = (clonesrv_t *) args;
- // Get state snapshot if necessary
- if (self->kvmap == NULL) {
- self->kvmap = zhash_new ();
- void *snapshot = zsocket_new (self->ctx, ZMQ_DEALER);
- zsocket_connect (snapshot, "tcp://localhost:%d", self->peer);
- zclock_log ("I: asking for snapshot from: tcp://localhost:%d",
- self->peer);
- zstr_send (snapshot, "ICANHAZ?");
- while (TRUE) {
- kvmsg_t *kvmsg = kvmsg_recv (snapshot);
- if (!kvmsg)
- break; // Interrupted
- if (streq (kvmsg_key (kvmsg), "KTHXBAI")) {
- self->sequence = kvmsg_sequence (kvmsg);
- kvmsg_destroy (&kvmsg);
- break; // Done
- }
- kvmsg_store (&kvmsg, self->kvmap);
- }
- zclock_log ("I: received snapshot=%d", (int) self->sequence);
- zsocket_destroy (self->ctx, snapshot);
- }
- // Find and remove update off pending list
- kvmsg_t *kvmsg = kvmsg_recv (subscriber);
- if (!kvmsg)
- return 0;
- if (strneq (kvmsg_key (kvmsg), "HUGZ")) {
- if (!s_was_pending (self, kvmsg)) {
- // If master update came before client update, flip it
- // around, store master update (with sequence) on pending
- // list and use to clear client update when it comes later
- zlist_append (self->pending, kvmsg_dup (kvmsg));
- }
- // If update is more recent than our kvmap, apply it
- if (kvmsg_sequence (kvmsg) > self->sequence) {
- self->sequence = kvmsg_sequence (kvmsg);
- kvmsg_store (&kvmsg, self->kvmap);
- zclock_log ("I: received update=%d", (int) self->sequence);
- }
- else
- kvmsg_destroy (&kvmsg);
- }
- else
- kvmsg_destroy (&kvmsg);
- return 0;
- }
//// Clone server Model Six//// Lets us build this source without creating a library#include "bstar.c"#include "kvmsg.c"// Bstar reactor handlersstatic int s_snapshots (zloop_t *loop, void *socket, void *args);static int s_collector (zloop_t *loop, void *socket, void *args);static int s_flush_ttl (zloop_t *loop, void *socket, void *args);static int s_send_hugz (zloop_t *loop, void *socket, void *args);static int s_new_master (zloop_t *loop, void *unused, void *args);static int s_new_slave (zloop_t *loop, void *unused, void *args);static int s_subscriber (zloop_t *loop, void *socket, void *args);// Our server is defined by these propertiestypedef struct { zctx_t *ctx; // Context wrapper zhash_t *kvmap; // Key-value store bstar_t *bstar; // Bstar reactor core int64_t sequence; // How many updates we're at int port; // Main port we're working on int peer; // Main port of our peer void *publisher; // Publish updates and hugz void *collector; // Collect updates from clients void *subscriber; // Get updates from peer zlist_t *pending; // Pending updates from clients Bool primary; // TRUE if we're primary Bool master; // TRUE if we're master Bool slave; // TRUE if we're slave} clonesrv_t;int main (int argc, char *argv []){ clonesrv_t *self = (clonesrv_t *) zmalloc (sizeof (clonesrv_t)); if (argc == 2 && streq (argv [1], "-p")) { zclock_log ("I: primary master, waiting for backup (slave)"); self->bstar = bstar_new (BSTAR_PRIMARY, "tcp://*:5003", "tcp://localhost:5004"); bstar_voter (self->bstar, "tcp://*:5556", ZMQ_ROUTER, s_snapshots, self); self->port = 5556; self->peer = 5566; self->primary = TRUE; } else if (argc == 2 && streq (argv [1], "-b")) { zclock_log ("I: backup slave, waiting for primary (master)"); self->bstar = bstar_new (BSTAR_BACKUP, "tcp://*:5004", "tcp://localhost:5003"); bstar_voter (self->bstar, "tcp://*:5566", ZMQ_ROUTER, s_snapshots, self); self->port = 5566; self->peer = 5556; self->primary = FALSE; } else { printf ("Usage: clonesrv4 { -p | -b }\n"); free (self); exit (0); } // Primary server will become first master if (self->primary) self->kvmap = zhash_new (); self->ctx = zctx_new (); self->pending = zlist_new (); bstar_set_verbose (self->bstar, TRUE); // Set up our clone server sockets self->publisher = zsocket_new (self->ctx, ZMQ_PUB); self->collector = zsocket_new (self->ctx, ZMQ_SUB); zsocket_bind (self->publisher, "tcp://*:%d", self->port + 1); zsocket_bind (self->collector, "tcp://*:%d", self->port + 2); // Set up our own clone client interface to peer self->subscriber = zsocket_new (self->ctx, ZMQ_SUB); zsocket_connect (self->subscriber, "tcp://localhost:%d", self->peer + 1); // Register state change handlers bstar_new_master (self->bstar, s_new_master, self); bstar_new_slave (self->bstar, s_new_slave, self); // Register our other handlers with the bstar reactor zloop_reader (bstar_zloop (self->bstar), self->collector, s_collector, self); zloop_timer (bstar_zloop (self->bstar), 1000, 0, s_flush_ttl, self); zloop_timer (bstar_zloop (self->bstar), 1000, 0, s_send_hugz, self); // Start the Bstar reactor bstar_start (self->bstar); // Interrupted, so shut down while (zlist_size (self->pending)) { kvmsg_t *kvmsg = (kvmsg_t *) zlist_pop (self->pending); kvmsg_destroy (&kvmsg); } zlist_destroy (&self->pending); bstar_destroy (&self->bstar); zhash_destroy (&self->kvmap); zctx_destroy (&self->ctx); free (self); return 0;}// ---------------------------------------------------------------------// Send snapshots to clients who ask for themstatic int s_send_single (char *key, void *data, void *args);// Routing information for a key-value snapshottypedef struct { void *socket; // ROUTER socket to send to zframe_t *identity; // Identity of peer who requested state char *subtree; // Client subtree specification} kvroute_t;static ints_snapshots (zloop_t *loop, void *snapshot, void *args){ clonesrv_t *self = (clonesrv_t *) args; zframe_t *identity = zframe_recv (snapshot); if (identity) { // Request is in second frame of message char *request = zstr_recv (snapshot); char *subtree = NULL; if (streq (request, "ICANHAZ?")) { free (request); subtree = zstr_recv (snapshot); } else printf ("E: bad request, aborting\n"); if (subtree) { // Send state socket to client kvroute_t routing = { snapshot, identity, subtree }; zhash_foreach (self->kvmap, s_send_single, &routing); // Now send END message with sequence number zclock_log ("I: sending shapshot=%d", (int) self->sequence); zframe_send (&identity, snapshot, ZFRAME_MORE); kvmsg_t *kvmsg = kvmsg_new (self->sequence); kvmsg_set_key (kvmsg, "KTHXBAI"); kvmsg_set_body (kvmsg, (byte *) subtree, 0); kvmsg_send (kvmsg, snapshot); kvmsg_destroy (&kvmsg); free (subtree); } } return 0;}// Send one state snapshot key-value pair to a socket// Hash item data is our kvmsg object, ready to sendstatic ints_send_single (char *key, void *data, void *args){ kvroute_t *kvroute = (kvroute_t *) args; kvmsg_t *kvmsg = (kvmsg_t *) data; if (strlen (kvroute->subtree) <= strlen (kvmsg_key (kvmsg)) && memcmp (kvroute->subtree, kvmsg_key (kvmsg), strlen (kvroute->subtree)) == 0) { // Send identity of recipient first zframe_send (&kvroute->identity, kvroute->socket, ZFRAME_MORE + ZFRAME_REUSE); kvmsg_send (kvmsg, kvroute->socket); } return 0;}// ---------------------------------------------------------------------// Collect updates from clients// If we're master, we apply these to the kvmap// If we're slave, or unsure, we queue them on our pending liststatic int s_was_pending (clonesrv_t *self, kvmsg_t *kvmsg);static ints_collector (zloop_t *loop, void *collector, void *args){ clonesrv_t *self = (clonesrv_t *) args; kvmsg_t *kvmsg = kvmsg_recv (collector); kvmsg_dump (kvmsg); if (kvmsg) { if (self->master) { kvmsg_set_sequence (kvmsg, ++self->sequence); kvmsg_send (kvmsg, self->publisher); int ttl = atoi (kvmsg_get_prop (kvmsg, "ttl")); if (ttl) kvmsg_set_prop (kvmsg, "ttl", "%" PRId64, zclock_time () + ttl * 1000); kvmsg_store (&kvmsg, self->kvmap); zclock_log ("I: publishing update=%d", (int) self->sequence); } else { // If we already got message from master, drop it, else // hold on pending list if (s_was_pending (self, kvmsg)) kvmsg_destroy (&kvmsg); else zlist_append (self->pending, kvmsg); } } return 0;}// If message was already on pending list, remove it and// return TRUE, else return FALSE.static ints_was_pending (clonesrv_t *self, kvmsg_t *kvmsg){ kvmsg_t *held = (kvmsg_t *) zlist_first (self->pending); while (held) { if (memcmp (kvmsg_uuid (kvmsg), kvmsg_uuid (held), sizeof (uuid_t)) == 0) { zlist_remove (self->pending, held); return TRUE; } held = (kvmsg_t *) zlist_next (self->pending); } return FALSE;}// ---------------------------------------------------------------------// Purge ephemeral values that have expiredstatic int s_flush_single (char *key, void *data, void *args);static ints_flush_ttl (zloop_t *loop, void *unused, void *args){ clonesrv_t *self = (clonesrv_t *) args; zhash_foreach (self->kvmap, s_flush_single, args); return 0;}// If key-value pair has expired, delete it and publish the// fact to listening clients.static ints_flush_single (char *key, void *data, void *args){ clonesrv_t *self = (clonesrv_t *) args; kvmsg_t *kvmsg = (kvmsg_t *) data; int64_t ttl; sscanf (kvmsg_get_prop (kvmsg, "ttl"), "%" PRId64, &ttl); if (ttl && zclock_time () >= ttl) { kvmsg_set_sequence (kvmsg, ++self->sequence); kvmsg_set_body (kvmsg, (byte *) "", 0); kvmsg_send (kvmsg, self->publisher); kvmsg_store (&kvmsg, self->kvmap); zclock_log ("I: publishing delete=%d", (int) self->sequence); } return 0;}// ---------------------------------------------------------------------// Send hugz to anyone listening on the publisher socketstatic ints_send_hugz (zloop_t *loop, void *unused, void *args){ clonesrv_t *self = (clonesrv_t *) args; kvmsg_t *kvmsg = kvmsg_new (self->sequence); kvmsg_set_key (kvmsg, "HUGZ"); kvmsg_set_body (kvmsg, (byte *) "", 0); kvmsg_send (kvmsg, self->publisher); kvmsg_destroy (&kvmsg); return 0;}// ---------------------------------------------------------------------// State change handlers// We're becoming master//// The backup server applies its pending list to its own hash table,// and then starts to process state snapshot requests.static ints_new_master (zloop_t *loop, void *unused, void *args){ clonesrv_t *self = (clonesrv_t *) args; self->master = TRUE; self->slave = FALSE; zloop_cancel (bstar_zloop (self->bstar), self->subscriber); // Apply pending list to own hash table while (zlist_size (self->pending)) { kvmsg_t *kvmsg = (kvmsg_t *) zlist_pop (self->pending); kvmsg_set_sequence (kvmsg, ++self->sequence); kvmsg_send (kvmsg, self->publisher); kvmsg_store (&kvmsg, self->kvmap); zclock_log ("I: publishing pending=%d", (int) self->sequence); } return 0;}// ---------------------------------------------------------------------// We're becoming slavestatic ints_new_slave (zloop_t *loop, void *unused, void *args){ clonesrv_t *self = (clonesrv_t *) args; zhash_destroy (&self->kvmap); self->master = FALSE; self->slave = TRUE; zloop_reader (bstar_zloop (self->bstar), self->subscriber, s_subscriber, self); return 0;}// ---------------------------------------------------------------------// Collect updates from peer (master)// We're always slave when we get these updatesstatic ints_subscriber (zloop_t *loop, void *subscriber, void *args){ clonesrv_t *self = (clonesrv_t *) args; // Get state snapshot if necessary if (self->kvmap == NULL) { self->kvmap = zhash_new (); void *snapshot = zsocket_new (self->ctx, ZMQ_DEALER); zsocket_connect (snapshot, "tcp://localhost:%d", self->peer); zclock_log ("I: asking for snapshot from: tcp://localhost:%d", self->peer); zstr_send (snapshot, "ICANHAZ?"); while (TRUE) { kvmsg_t *kvmsg = kvmsg_recv (snapshot); if (!kvmsg) break; // Interrupted if (streq (kvmsg_key (kvmsg), "KTHXBAI")) { self->sequence = kvmsg_sequence (kvmsg); kvmsg_destroy (&kvmsg); break; // Done } kvmsg_store (&kvmsg, self->kvmap); } zclock_log ("I: received snapshot=%d", (int) self->sequence); zsocket_destroy (self->ctx, snapshot); } // Find and remove update off pending list kvmsg_t *kvmsg = kvmsg_recv (subscriber); if (!kvmsg) return 0; if (strneq (kvmsg_key (kvmsg), "HUGZ")) { if (!s_was_pending (self, kvmsg)) { // If master update came before client update, flip it // around, store master update (with sequence) on pending // list and use to clear client update when it comes later zlist_append (self->pending, kvmsg_dup (kvmsg)); } // If update is more recent than our kvmap, apply it if (kvmsg_sequence (kvmsg) > self->sequence) { self->sequence = kvmsg_sequence (kvmsg); kvmsg_store (&kvmsg, self->kvmap); zclock_log ("I: received update=%d", (int) self->sequence); } else kvmsg_destroy (&kvmsg); } else kvmsg_destroy (&kvmsg); return 0;}
代码不短,不过作者的牢骚更长。(貌似花了一周的时间)
当然作为一个靠谱的模型,总会制定一些规范给某些不太靠谱的人:http://rfc.zeromq.org/spec:12
至此,整个教程算是告一段落了。(之所以这最后一个模型分了三段,着实是代码多了些)
教程结束了,学习才刚开始。至于会不会再有后续,诚如guide结尾:
More coming soon…
结言:
虽然知道翻译技术文章有难度,但着实还是吓着了,在写第一章的时候就打了退堂鼓。终究在自我安慰、勉励下完成了这个系列的笔记(退一步)。好吧,我承认代码、图示占了大比例,不过,好歹算是有始有终的完成了。
原计划一周时间结束的,由于诸多原因(磨蹭,消极,退堂鼓)前后竟然跨了两个多月,总算咬牙坚持了下来,其实不敢说学到了很多,自从中部python的代码不再时,几乎就没有再自己验证代码的可行和逻辑了。写本系列,更多的是自个儿跟自个儿过不去(俺就不信写不完了!)折腾到最后,多少也是有些收获的(谁折腾谁知道~)
回首看看,也就这样了,倒是有些"天凉好个秋"的意味。
也罢,哦了
- zeroMQ初体验-34.发布/订阅模式进阶-克隆模式-下,结言
- zeroMQ初体验-32.发布/订阅模式进阶-克隆模式-上
- zeroMQ初体验-33.发布/订阅模式进阶-克隆模式-中
- zeroMQ初体验-13.发布/订阅模式 进阶
- zeroMQ初体验-30.发布/订阅模式进阶-自裁的蜗牛订阅者
- zeroMQ初体验-31.发布/订阅模式进阶-黑盒的高速订阅者
- zeroMQ初体验-2.发布订阅模式(pub/sub)
- zeromq 的发布 订阅模式
- zeroMQ初体验-21.应答模式进阶(七)-云计算
- 初探ZeroMQ(三) 发布-订阅模式中套结字总结
- ZeroMQ:订阅-发布模式的java程序示例
- zeroMQ初体验-20.应答模式进阶(六)-多对多路由模式
- zeroMQ初体验-26.可靠性-管家模式
- zeroMQ初体验-27.可靠性-硬盘模式
- zeroMQ初体验-28.可靠性-主从模式
- zeroMQ初体验-29.可靠性-自由模式
- zeroMQ初体验-15.应答模式进阶(一)-数据的封装
- zeroMQ初体验-16.应答模式进阶(二)-定制路由1
- SQL计算中分母为0
- Symbian屏幕双缓冲和DSA
- zeroMQ初体验-33.发布/订阅模式进阶-克隆模式-中
- 什么是Windows Azure
- oracle10g 学习笔记一
- zeroMQ初体验-34.发布/订阅模式进阶-克隆模式-下,结言
- 理解Python的双下划线命名
- SCA 之Tuscany 5 ——helloworld Spring Bean
- java 处理 barCode(条形码)
- Struts2 错误:There is no result type defined for type 'json' mapped with name 'success'.
- android.git.kernel.org 无法访问后 repo失败的完美解决方法
- postgresql与mysql使用dblink联通
- INI 写入 【含IP控件】
- sql工具:一条sql语句,查询sql server某个指定表的所有列及其属性