整理了一些关于boost中asio性能的资料

来源:互联网 发布:软件接口设计文档 编辑:程序博客网 时间:2024/04/30 17:00

I have a very simple server/client performance test using boost::asio on Windows and it seems to be performing really poorly. I'm hoping that I'm just using the library incorrectly and would appreciate any advice.

I have a session class that writes a message-length and then writes a message, and then waits to read a message-length and then read a message, and keeps doing this over and over again nonstop. When I run it locally on my own computer I get blazingly fast performance, however; when I run a server on one computer and a client on another computer, even on the same network, the performance slows down, taking as much as 1 second for a read/write operation to occur.

The server source code file is as follows:

#include <cstdlib>#include <iostream>#include <boost/asio.hpp>#include <boost/bind.hpp>using namespace boost;using namespace boost::asio;using namespace boost::asio::ip;using namespace std;class Session {  public:    Session(io_service& ioService)      : m_socket(ioService) {}    tcp::socket& GetSocket() {      return m_socket;    }    void StartRead() {      m_messageSizeIterator = reinterpret_cast<char*>(&m_messageSize);      async_read(m_socket, buffer(m_messageSizeIterator, sizeof(m_messageSize)),        bind(&Session::HandleSizeRead, this, placeholders::error,        placeholders::bytes_transferred));    }    void StartWrite(const char* message, int messageSize) {      m_messageSize = messageSize;      m_message = new char[m_messageSize];      memcpy(m_message, message, m_messageSize);      async_write(m_socket, buffer(&m_messageSize, sizeof(int)),        bind(&Session::HandleSizeWritten, this, placeholders::error));    }    void HandleSizeRead(const system::error_code& error,        size_t bytes_transferred) {      if(!error) {        m_message = new char[m_messageSize];        async_read(m_socket, buffer(m_message, m_messageSize),          bind(&Session::HandleMessageRead, this, placeholders::error,          placeholders::bytes_transferred));      } else {        delete this;      }    }    void HandleMessageRead(const system::error_code& error,        size_t bytes_transferred) {      if(!error) {        cout << string(m_message, m_messageSize) << endl;        async_write(m_socket, buffer(&m_messageSize, sizeof(int)),          bind(&Session::HandleSizeWritten, this, placeholders::error));      } else {        delete this;      }    }    void HandleSizeWritten(const system::error_code& error) {      if(!error) {        async_write(m_socket, buffer(m_message, m_messageSize),          bind(&Session::HandleMessageWritten, this, placeholders::error));      } else {        delete this;      }    }    void HandleMessageWritten(const system::error_code& error) {      if(!error) {        delete m_message;        m_messageSizeIterator = reinterpret_cast<char*>(&m_messageSize);        async_read(m_socket, buffer(m_messageSizeIterator,          sizeof(m_messageSize)), bind(&Session::HandleSizeRead, this,          placeholders::error, placeholders::bytes_transferred));      } else {        delete this;      }    }  private:    tcp::socket m_socket;    int m_messageSize;    char* m_messageSizeIterator;    char* m_message;};class Server {  public:    Server(io_service& ioService, short port)        : m_ioService(ioService),          m_acceptor(ioService, tcp::endpoint(tcp::v4(), port)) {      Session* new_session = new Session(m_ioService);      m_acceptor.async_accept(new_session->GetSocket(), bind(&Server::HandleAccept,        this, new_session,asio::placeholders::error));    }    void HandleAccept(Session* new_session, const system::error_code& error) {      if(!error) {        new_session->StartRead();        new_session = new Session(m_ioService);        m_acceptor.async_accept(new_session->GetSocket(), bind(          &Server::HandleAccept, this, new_session, placeholders::error));      } else {        delete new_session;      }    }  private:    io_service& m_ioService;    tcp::acceptor m_acceptor;};int main(int argc, char* argv[]) {  try {    if(argc != 2) {      cerr << "Usage: server <port>\n";      return 1;    }    io_service io_service;    Server s(io_service, atoi(argv[1]));    io_service.run();  } catch(std::exception& e) {    cerr << "Exception: " << e.what() << "\n";  }  return 0;}

And the client code is as follows:

#include <cstdlib>#include <cstring>#include <iostream>#include <boost/bind.hpp>#include <boost/asio.hpp>using namespace boost;using namespace boost::asio;using namespace boost::asio::ip;using namespace std;class Session {  public:    Session(io_service& ioService)      : m_socket(ioService) {}    tcp::socket& GetSocket() {      return m_socket;    }    void StartRead() {      m_messageSizeIterator = reinterpret_cast<char*>(&m_messageSize);      async_read(m_socket, buffer(m_messageSizeIterator, sizeof(m_messageSize)),        bind(&Session::HandleSizeRead, this, placeholders::error,        placeholders::bytes_transferred));    }    void StartWrite(const char* message, int messageSize) {      m_messageSize = messageSize;      m_message = new char[m_messageSize];      memcpy(m_message, message, m_messageSize);      async_write(m_socket, buffer(&m_messageSize, sizeof(int)),        bind(&Session::HandleSizeWritten, this, placeholders::error));    }    void HandleSizeRead(const system::error_code& error,        size_t bytes_transferred) {      if(!error) {        m_message = new char[m_messageSize];        async_read(m_socket, buffer(m_message, m_messageSize),          bind(&Session::HandleMessageRead, this, placeholders::error,          placeholders::bytes_transferred));      } else {        delete this;      }    }    void HandleMessageRead(const system::error_code& error,        size_t bytes_transferred) {      if(!error) {        cout << string(m_message, m_messageSize) << endl;        async_write(m_socket, buffer(&m_messageSize, sizeof(int)),          bind(&Session::HandleSizeWritten, this, placeholders::error));      } else {        delete this;      }    }    void HandleSizeWritten(const system::error_code& error) {      if(!error) {        async_write(m_socket, buffer(m_message, m_messageSize),          bind(&Session::HandleMessageWritten, this, placeholders::error));      } else {        delete this;      }    }    void HandleMessageWritten(const system::error_code& error) {      if(!error) {        delete m_message;        m_messageSizeIterator = reinterpret_cast<char*>(&m_messageSize);        async_read(m_socket, buffer(m_messageSizeIterator,          sizeof(m_messageSize)), bind(&Session::HandleSizeRead, this,          placeholders::error, placeholders::bytes_transferred));      } else {        delete this;      }    }  private:    tcp::socket m_socket;    int m_messageSize;    char* m_messageSizeIterator;    char* m_message;};int main(int argc, char* argv[]) {  try {    if(argc != 3) {      cerr << "Usage: client <host> <port>\n";      return 1;    }    io_service io_service;    tcp::resolver resolver(io_service);    tcp::resolver::query query(tcp::v4(), argv[1], argv[2]);    tcp::resolver::iterator iterator = resolver.resolve(query);    Session session(io_service);    tcp::socket& s = session.GetSocket();    s.connect(*iterator);    cout << "Enter message: ";    const int MAX_LENGTH = 1024;    char request[MAX_LENGTH];    cin.getline(request, MAX_LENGTH);    int requestLength = strlen(request);    session.StartWrite(request, requestLength);    io_service.run();  } catch (std::exception& e) {    cerr << "Exception: " << e.what() << "\n";  }  return 0;}

Any help would be appreciated, thanks.


For my purposes, sending really really small messages and wanting virtual real time replies, disabling Nagle's algorithm turned out to be the cause of the poor performance.

解决方法是关闭nagle算法

21 down vote accepted

You must turn off the Nagle algorithm. Call:

m_socket.set_option(tcp::no_delay(true));

Where appropriate for your code.


以前有一个boost 粉丝非说boost性能很好,于是我就上当了,用了好几个月
设计出一套网络服务编程框架,最核心的就是用boost asio的非阻塞epoll模式。
后来发现性能很差,和我自己用纯socket写的比较起来,相同CPU和内存占用
情况下,asio的处理能力只有1/3而且平均延迟在3倍以上。
除了我自己使用了一个秘制的hash表比boost的hash表在性能上占一些优势之外
还发现io方面asio也做得不怎么样。

追了好久,发现问题出在async_send和read上面。
asio自以为聪明地把一个管道加进epoll然后用以唤醒epoll,当然这么一个
菜鸟做法是导致性能低下的核心因素。

后来给asio patch了一下勉强能用。
一个做法是使用epoll_ctrl 来唤醒epoll,一个做法是在epoll_wait的间隙处理
发送队列,都比asio的做法性能高很多。

我以上说的是16个月前的boost版本。已经有一年多没有搞linux服务器开发了,
上面说的东西可能已经过时,仅供参考。


【 在 RoachCock (我的饭盒我的饭) 的大作中提到: 】
: 不是说评测的结果差别在5%以内吗?那就优先看接口好坏和完善度了。




☆─────────────────────────────────────☆
   bbinn (...) 于  (Sat Jul 23 19:02:55 2011)  提到:

说得简明一点,就是每个epoll就好像是套间线程中的WaitForMultiObject。
现在我要向这个epoll管理的socket上去send,于是需要先切入这个线程
套间才可以。于是asio目前的做法就好像是PostThreadMessage,这个显然是
低效的。最高效的做法是将WaitForMultiObject的间隔设置成适当,
然后在间隙中检查队列中的send。只要能背对背地准确维护每个soket的状态,
这种做法是高效的,在小压力下平均延迟就是你的WaitForMultiObject的间隔/2,
在大压力下延迟就很小了。

相似的,所有类似的尽量努力减少对操作系统特征的依赖而自己做尽量
多的状态记录和预测哪些操作肯定成功才去做得方法都能有效提高性能。

原创粉丝点击