memcached Master-Worker 模型分析

来源:互联网 发布:深圳查知科技有限公司 编辑:程序博客网 时间:2024/06/06 01:31

scgi的实现是用的多进程,主进程负责监听socket连接请求,然后分发给各个子进程来处理。
nginx的实现是用的多进程,创建好子进程之后,各个子进程直接自己来监听socket连接请求并处理。
memched的实现则是用的多线程,主线程负责监听请求,然后分发给各个子线程来处理。

1.监听套接字如何选择线程,把新的socket给worker

2.worker如何监听新的socket

3.worker是如何epoll-wait

memcached中的状态机是memcached运转发动机,它根据链接的不同状态而采取不同的行为,状态枚举如下:

enum conn_states {
    conn_listening,  /**< the socket which listens for connections */
    conn_new_cmd,    /**< Prepare connection for next command */
    conn_waiting,    /**< waiting for a readable socket */
    conn_read,       /**< reading in a command line */
    conn_parse_cmd,  /**< try to parse a command from the input buffer */
    conn_write,      /**< writing out a simple response */
    conn_nread,      /**< reading in a fixed number of bytes */
    conn_swallow,    /**< swallowing unnecessary bytes w/o storing */
    conn_closing,    /**< closing this connection */
    conn_mwrite,     /**< writing out many items sequentially */
    conn_max_state   /**< Max state value (used for assertion) */
};

态机的实现函数为drive_machine,由于该函数的源码实现过长,这里只分析对conn_listening状态的响应。

static void drive_machine(conn *c) {
    bool stop = false;
    int sfd, flags = 1;
    socklen_t addrlen;
    struct sockaddr_storage addr;
    int nreqs = settings.reqs_per_event;
    int res;

    assert(c != NULL);

    while (!stop) {

        switch(c->state) {
         
   //监听套接字发生事件
        case conn_listening:

            addrlen = sizeof(addr);
            if ((sfd = accept(c->sfd, (struct sockaddr *)&addr, &addrlen)) == -1) {
                if (errno == EAGAIN || errno == EWOULDBLOCK) {
                  
  /* these are transient, so don't log anything */
                    stop = true;

                } else if (errno == EMFILE) {
                    if (settings.verbose > 0)
                        fprintf(stderr, "Too many open connections\n");
                    accept_new_conns(false);
                    stop = true;
                } else {
                    perror("accept()");
                    stop = true;
                }
                break;
            }
       
     //新套接字设置非阻塞
            if ((flags = fcntl(sfd, F_GETFL, 0)) < 0 ||

                fcntl(sfd, F_SETFL, flags | O_NONBLOCK) < 0) {
                perror("setting O_NONBLOCK");
                close(sfd);
                break;
            }
            //调度线程来处理连接
            dispatch_conn_new(sfd, conn_new_cmd, EV_READ | EV_PERSIST,
                                     DATA_BUFFER_SIZE, tcp_transport);
            stop = true;
            break;
...........................//略去其他状态的处理
}

从源码中我们可以看到,当监听套接字建立新连接时,通过事件响应函数event_handler来触发状态机,再调用dispatch_conn_new调度新线程来处理这个连接的读写事件。

/*
 * Dispatches a new connection to another thread. This is only ever called
 * from the main thread, either during initialization (for UDP) or because
 * of an incoming connection.
 */
void dispatch_conn_new(int sfd, enum conn_states init_state, int event_flags,
                       int read_buffer_size, enum network_transport transport) {
    CQ_ITEM *item = cqi_new();
    int tid = (last_thread + 1) % settings.num_threads;

    //以此种方式来取出线程
    LIBEVENT_THREAD *thread = threads + tid;

//如何从多个worker中选择一个线程

    last_thread = tid;

    item->sfd = sfd;
    item->init_state = init_state;
    item->event_flags = event_flags;
    item->read_buffer_size = read_buffer_size;
    item->transport = transport;

    //将新item放至threads的new_conn_queue队列中
    cq_push(thread->new_conn_queue, item);

    MEMCACHED_CONN_DISPATCH(sfd, thread->thread_id);
    //写一个字节启动新的线程
    if (write(thread->notify_send_fd, "", 1) != 1) {
        perror("Writing to thread notify pipe");
    }
}


选择的worker如何接收write(thread->notify_send_fd, "", 1)

 //为管道设置读事件监听,thread_libevent_process为回调函数
    event_set(&me->notify_event, me->notify_receive_fd,

              EV_READ | EV_PERSIST, thread_libevent_process, me);

memcached使用libevent实现事件循环,关于libevent,不熟悉的读者可以查看相关资料,这里不做介绍,源码中的这句代码:

 event_set(&me->notify_event, me->notify_receive_fd,EV_READ | EV_PERSIST, thread_libevent_process, me);

在me->notify_receive_fd(即匿名管道的读端)设置可读事件,回调函数 为thread_libevent_process,函数定义如下:

static void thread_libevent_process(int fd, short which, void *arg) {
    LIBEVENT_THREAD *me = arg;
    CQ_ITEM *item;
    char buf[1];

    //响应pipe可读事件,读取主线程向管道内写的1字节数据(见dispatch_conn_new()函数)
    if (read(fd, buf, 1) != 1)
        if (settings.verbose > 0)
            fprintf(stderr, "Can't read from libevent pipe\n");

    //从链接队列中取出一个conn
    item = cq_pop(me->new_conn_queue);

    if (NULL != item) {
        //使用conn创建新的任务
        conn *c = conn_new(item->sfd, item->init_state, item->event_flags,
                           item->read_buffer_size, item->transport, me->base);
        if (c == NULL) {
            if (IS_UDP(item->transport)) {
                fprintf(stderr, "Can't listen for events on UDP socket\n");
                exit(1);
            } else {
                if (settings.verbose > 0) {
                    fprintf(stderr, "Can't listen for events on fd %d\n",
                        item->sfd);
                }
                close(item->sfd);
            }
        } else {
            c->thread = me;
        }
        cqi_free(item);
    }
}

 

static void thread_libevent_process(int fd, short which, void *arg){
    LIBEVENT_THREAD *me = arg;
    CQ_ITEM *item;
    char buf[1];

    if (read(fd, buf, 1) != 1)
        if (settings.verbose > 0)
            fprintf(stderr, "Can't read from libevent pipe\n");

    item = cq_peek(&me->new_conn_queue);

    if (NULL != item) {
      
  conn *c = conn_new(item->sfd, item->init_state, item->event_flags,
                           item->read_buffer_size, item->is_udp, me->base);
        。。。//省略

    }
}

 

线程池的初始化:

main()中线程池初始化函数入口为:

/* start up worker threads if MT mode */

thread_init(settings.num_threads, main_base);

函数的定义在thread.c实现,源码如下所示:

 

/*
 * Initializes the thread subsystem, creating various worker threads.
 *
 * nthreads  Number of worker event handler threads to spawn
 * main_base Event base for main thread
 */
void thread_init(int nthreads, struct event_base *main_base) {
    int         i;

    pthread_mutex_init(&cache_lock, NULL);
    pthread_mutex_init(&stats_lock, NULL);

    pthread_mutex_init(&init_lock, NULL);
    pthread_cond_init(&init_cond, NULL);

    pthread_mutex_init(&cqi_freelist_lock, NULL);
    cqi_freelist = NULL;

    //分配线程池结构数组
    threads = calloc(nthreads, sizeof(LIBEVENT_THREAD));
    if (! threads) {
        perror("Can't allocate thread descriptors");
        exit(1);
    }

    dispatcher_thread.base = main_base;
    dispatcher_thread.thread_id = pthread_self();

    //为线程池每个线程创建读写管道
    for (i = 0; i < nthreads; i++) {
        int fds[2];
        if (pipe(fds)) {
            perror("Can't create notify pipe");
            exit(1);
        }

        threads[i].notify_receive_fd = fds[0];
        threads[i].notify_send_fd = fds[1];

        //填充线程结构体信息
        setup_thread(&threads[i]);

    }

    /* Create threads after we've done all the libevent setup. */
    for (i = 0; i < nthreads; i++) {
        //为线程池创建数目为nthreads的线程,worker_libevent为线程的回调函数,
       
create_worker(worker_libevent, &threads[i]);
    }

    /* Wait for all the threads to set themselves up before returning. */
    pthread_mutex_lock(&init_lock);
    while (init_count < nthreads) {
        pthread_cond_wait(&init_cond, &init_lock);
    }
    pthread_mutex_unlock(&init_lock);
}

线程池初始化函数由主线程进行调用,该函数先初始化各互斥锁,然后使用calloc分配nthreads*sizeof(LIBEVENT_THREAD)个字节的内存块来管理线程池,返回一个全局static变量 threads(类型为LIBEVENT_THREAD*);然后为每个线程创建一个匿名管道(该pipe将在线程的调度中发挥作用),接下来的setup_thread函数为线程设置事件监听,绑定CQ链表等初始化信息,源码如下所示:

/*
 * Set up a thread's information.
 */
static void setup_thread(LIBEVENT_THREAD *me) {
    me->base = event_init();
    if (! me->base) {
        fprintf(stderr, "Can't allocate event base\n");
        exit(1);
    }

    /* Listen for notifications from other threads */
    //为管道设置读事件监听,thread_libevent_process为回调函数
    event_set(&me->notify_event, me->notify_receive_fd,

              EV_READ | EV_PERSIST, thread_libevent_process, me);
    event_base_set(me->base, &me->notify_event);

    if (event_add(&me->notify_event, 0) == -1) {
        fprintf(stderr, "Can't monitor libevent notify pipe\n");
        exit(1);
    }

    //为新线程创建连接CQ链表
    me->new_conn_queue = malloc(sizeof(struct conn_queue));
    if (me->new_conn_queue == NULL) {
        perror("Failed to allocate memory for connection queue");
        exit(EXIT_FAILURE);
    }
    //初始化线程控制器内的CQ链表
    cq_init(me->new_conn_queue);

    if (pthread_mutex_init(&me->stats.mutex, NULL) != 0) {
        perror("Failed to initialize mutex");
        exit(EXIT_FAILURE);
    }
    //创建cache
    me->suffix_cache = cache_create("suffix", SUFFIX_SIZE, sizeof(char*),
                                    NULL, NULL);
    if (me->suffix_cache == NULL) {
        fprintf(stderr, "Failed to create suffix cache\n");
        exit(EXIT_FAILURE);
    }
}

 

 wroker如何接收网络包?conn_new函数中设置

 conn *c = conn_new(item->sfd, item->init_state, item->event_flags,
                           item->read_buffer_size, item->is_udp, me->base);
        。。。//省略

conn_new函数负责初始化一个连接的conn *c; 结构,这里memcache对于连接的查找方法为直接用fd大小去索引数组,这样速度快很多倍,而且这个数目不会太大的。conn_new里面大部分是数据字段初始化操作。主要就是讲参数里面的fd句柄加入到主线程的 event_base中:

conn *conn_new(const int sfd, enum conn_states init_state,        const int event_flags,        const int read_buffer_size, enum network_transport transport,        struct event_base *base) {  //thread_libevent_process等调用这里,设置一个连接的客户端结构,然后将其加入到event里面,执行函数统一为event_handler  c = conns[sfd];//直接用fd进行数组索引  //加入到事件循环里面。,不管是LISTEN SOCK,还是客户端的SOCK,都是这个执行函数。只有工作线程的管道不是这个  //工作线程的管道执行函数是thread_libevent_process,也就是本函数的调用者之一  event_set(&c->event, sfd, event_flags, event_handler, (void *)c);//用参数,填充&c->event  event_base_set(base, &c->event);//ev->ev_base = base, 就让event记住了所属的base  c->ev_flags = event_flags;//其实 c->event->ev_events也记录了这个事件集合的。  if (event_add(&c->event, 0) == -1) {//加到epoll里面去    perror("event_add");    return NULL;  }}

 

 

 

 

 

 

 


0 0