- 论坛徽章:
- 0
|
我使用boost中的asio方式实现一个tcp的客户端程序,我使用《Boost程序库完全开放指南》中的例子改写的。
采用的方式为:主线程中启动一个子线程,用于执行io_service的相关操作,而主线程中产生待发送的数据。
我现在采用的方式是在子线程中启动一个定时器,在定时器的回调函数中定时检测“待发送数据缓冲器”,如果缓冲区中有数据就调用start_write()函数发送数据。
但是我觉得这种方式不太好,相当于一种轮询的方式,我觉得应该采用一种类似事件触发的方式实现才更合理一些。
如果我在主线程中调用start_write()函数发送数据,虽然服务器端正常接收到数据了,但是写操作的回调函数write_handler却从来没有执行过,不知道什么原因。
不知道哪位高手能不能帮忙看一下,如果才能改为采用事件触发的方式实现,谢谢。
代码如下:
#include <string>
#include <iostream>
#include <vector>
#include <set>
#include <map>
#include <algorithm>
using namespace std;
#define BOOST_DATE_TIME_SOURCE
#define BOOST_ALL_NO_LIB
#include <boost/system/error_code.hpp>
#include <boost/asio.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
using namespace boost::asio;
using namespace boost::posix_time;
#include <boost/system/error_code.hpp>
using namespace boost::system;
#include <boost/smart_ptr.hpp>
using namespace boost;
#include <boost/bind.hpp>
#include <boost/thread.hpp>
#include <libs/thread/src/pthread/thread.cpp>
#include <libs/thread/src/pthread/once.cpp>
//g++ -o client client.cpp error_code.cpp -lpthread
class client
{
private:
io_service &ios;
ip::tcp::endpoint ep;
typedef shared_ptr<ip::tcp::socket> sock_pt;
sock_pt connected_sock_pt;
mutex io_mu;
deadline_timer& t_timer;
protected:
void set_deadline_timer()
{
t_timer.expires_from_now(seconds(1));
t_timer.async_wait(bind(&client::time_expired,this,placeholders::error));
}
public:
client(io_service& io,deadline_timer& t):ios(io),t_timer(t),
ep(ip::address::from_string("127.0.0.1" ,668
{
start_connect();
set_deadline_timer();
}
void start_connect()
{
cout <<"begin connect server ......"<<endl;
{
mutex::scoped_lock lock(io_mu);
connected_sock_pt.reset();
}
sock_pt sock(new ip::tcp::socket(ios));
sock->async_connect(ep,bind(&client::conn_handler,this,placeholders::error,sock));
}
void conn_handler(const error_code& ec,sock_pt sock)
{
if(ec)
{
cout<<"error msg:"<<ec.message()<<endl;
sleep(1);
start_connect();
return;
}
cout<<"connected to:"<<sock->remote_endpoint().address()<<endl;
{
mutex::scoped_lock lock(io_mu);
connected_sock_pt = sock;
}
//start_write(write_buf);
}
void start_write(vector<unsigned char>& write_buf)
{
cout <<"begin write ......"<<endl;
mutex::scoped_lock lock(io_mu);
if(connected_sock_pt)
{
connected_sock_pt->async_write_some(buffer(write_buf),bind(&client::write_handler,this,placeholders::error,placeholders::bytes_transferred));
}
}
void write_handler(const error_code& ec,std::size_t len)
{
if(ec)
{
cout<<"error msg:"<<ec.message()<<endl;
sleep(1);
start_connect();
return;
}
cout << "send msg complete,len = "<<len<<endl;
}
void time_expired(const error_code& ec)
{
vector<unsigned char> write_buf;
for(unsigned char i=0;i<10;i++)
write_buf.push_back(i);
start_write(write_buf);
set_deadline_timer();
}
static void start_client_thread(io_service& ios)
{
//client cl(ios);
ios.run();
}
};
int main()
try
{
cout << "client start ......"<<endl;
io_service ios;
deadline_timer t(ios);
client cl(ios,t);
//ios.run();
thread t1(&(client::start_client_thread),ref(ios));
int index = 0;
while(index < 15)
{
//cl.start_write(write_buf);
sleep(1);
index++;
}
ios.stop();
t1.join();
}
catch(std::exception& e)
{
cout<<e.what()<<endl;
}
|
|