Android Binder机制浅析(二)
来源:互联网 发布:网络协议分析实验 编辑:程序博客网 时间:2024/06/05 08:14
接上文...
本文根据网上现有资源进行整合,以及自己的理解,有误之处欢迎指正~~
2.7 addService
addService调用了BpServiceManager的函数
目录在 frameworks/native/libs/binder/IServiceManager.cpp
virtual status_t addService(const String16& name, const sp<IBinder>& service,
bool allowIsolated)
{
Parcel data, reply;
// data是发送到BnServiceManager的命令包
// 先把Interface名字写入,在2.4中 android.os.IServerManager
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
// 再把新service名字写入,media.player
data.writeString16(name);
// 把新service MediaPlayerService写入到命令
data.writeStrongBinder(service);
data.writeInt32(allowIsolated ? 1 : 0);
// 调用remote的transact
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
// remote 返回 mRemote,就是在2.5中创建的BpBinder,于是接着到BpBinder中追transact的实现
目录在frameworks/native/libs/binder/BpBinder.cpp
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
// Once a binder has died, it will never come back to life.
if (mAlive) {
// mHandle为0,code是ADD_SERVICE_TRANSACTION,data是命令包
// reply 是回复包,flags=0
>> status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
...
}
其中又调用到IPCThreadState,返回函数追
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
...
if (err == NO_ERROR) {
LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
(flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
//writeTransactionData发送数据
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
...
if ((flags & TF_ONE_WAY) == 0) {
if (reply) {
>> err = waitForResponse(reply);
} else {
Parcel fakeReply;
>> err = waitForResponse(&fakeReply);
}
...
} else {
err = waitForResponse(NULL, NULL);
// 等待回复
}
...
return err;
}
再追一下writeTransactionData查看如何发送
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof(status_t);
tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
tr.offsets_size = 0;
tr.data.ptr.offsets = 0;
} else {
return (mLastError = err);
}
// 将命令数据封装在binder_transaction_data中,之后写到mOut中
// mOut是命令的缓冲区,也是一个Parcel
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
在此看数据写入Parcel,但没有与/dev/binder关联,
与之关联的是waitForResponse
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
// 与driver向关联
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
// 操作mIn,在talkWithDriver中把mOut发送出去,然后从driver中读到数据放到mIn中
cmd = (uint32_t)mIn.readInt32();
IF_LOG_COMMANDS() {
alog << "Processing waitForResponse Command: "
<< getReturnString(cmd) << endl;
}
...
于是乎追 talkWithDriver
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
...
// ioctrl控制读写
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
...
// 回复数据在bwr中,bwr接受回复数据的buffer就是mIn提供的
if (err >= NO_ERROR) {
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else
mOut.setDataSize(0);
}
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
...
至此,发送addService的流程走完了
BpServiceManager发送了一个addService命令给BnServiceManager,然后接受回复。
回到第二章开始的地方,MediaPlayerService是一个BnMediaPlayService,等待BpMediaPlayService来和它交互,但同时addService的另一端也在操作BnServiceManager,接下来先看BnServiceManager吧
2.8 BnServiceManager
正如2.5的最后所说,defaultServiceManager的返回是BpServiceManager,通过它可以把命令请求发送到Binder设备,而且handle的值为0.那么,在系统的另外一端肯定有个接受命令的操作,实际上,并不是BnServiceManager,它是不存在的,但是却有个和它一样功能的程序,目录在
frameworks/native/cmds/servicemanager/service_manager.c
int main(int argc, char** argv)
{
struct binder_state *bs;
union selinux_callback cb;
char *driver;
if (argc > 1) {
driver = argv[1];
} else {
driver = "/dev/binder";
}
bs = binder_open(driver, 128*1024);
// 打开binder设备
if (!bs) {
#ifdef VENDORSERVICEMANAGER
ALOGW("failed to open binder driver %s\n", driver);
...
if (binder_become_context_manager(bs)) {
// 成为manager
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
...
if (sehandle == NULL) {
ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
abort();
}
if (getcon(&service_manager_context) != 0) {
ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
abort();
}
// 处理BpServiceManager发送的命令
binder_loop(bs, svcmgr_handler);
在此,先看看binder_open
struct binder_state *binder_open(const char* driver, size_t mapsize)
{
struct binder_state *bs;
struct binder_version vers;
bs = malloc(sizeof(*bs));
...
bs->fd = open(driver, O_RDWR | O_CLOEXEC);
// 执行打开设备操作
...
bs->mapsize = mapsize;
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
接下来追binder_become_context_manager是如何的
int binder_become_context_manager(struct binder_state *bs)
{
// 简短,把自己设为MANAGER
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
再看看最后的也最重要的binder_loop,binder_loop是从binder设备中读请求,之后写回复的一个循环
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));
// 循环操作
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
...
>> res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
// 接收请求,解析命令
...
}
}
在这整个流程中,还有一个类似handleMessager的地方,处理各种各样的命令,就是
在frameworks/native/cmds/servicemanager/service_manager.c中的
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
...
>> strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
...
switch(txn->code) {
...
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
>> if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
...
do_add_service是真正的添加BnServiceManager的信息,追它看看
int do_add_service(struct binder_state *bs,
const uint16_t *s, size_t len,
uint32_t handle, uid_t uid, int allow_isolated,
pid_t spid)
{
struct svcinfo *si;
si = find_svc(s, len);
// s是一个list
if (si) {
if (si->handle) {
...
}
si->handle = handle;
} else {
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) {
ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n",
str8(s, len), handle, uid);
return -1;
}
si->handle = handle;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '\0';
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
si->next = svclist;
svclist = si;
// svclist就是一个列表,保存了当前注册到ServiceManager中的信息
}
binder_acquire(bs, handle);
// 当这个Service退出后,系统系统能够通知一下,这样就可以及时释放上面malloc出的资源。
binder_link_to_death(bs, handle, &si->death);
Return 0;
}
就是这样
2.9 ServiceManager的意义
原来,Android系统中Service信息都是先add到ServiceManager中,由ServiceManager来集中管理,这样就可以查询当前系统有哪些服务。而且,Android系统中某个服务例如MediaPlayerService的客户端想要和MediaPlayerService通讯的话,必须先向ServiceManager查询MediaPlayerService的信息,然后通过ServiceManager返回的东西再来和MediaPlayerService交互。
毕竟,要是MediaPlayerService身体不好,老是挂掉的话,客户的代码就麻烦了,就不知道后续新生的MediaPlayerService的信息了,所以只能这样:
1. MediaPlayerService向SM注册
2. MediaPlayerClient查询当前注册在SM中的MediaPlayerService的信息
3. 根据这个信息,MediaPlayerClient和MediaPlayerService交互
另外,ServiceManager的handle标示是0,所以只要往handle是0的服务发送消息了,最终都会被传递到ServiceManager中去。
(未完待续...)
- Android Binder机制浅析(二)
- 浅析Android binder机制
- Android Binder机制浅析
- 浅析Android binder机制
- Android Binder机制浅析
- 浅析Android binder机制
- Android Binder机制浅析
- Android Binder机制浅析
- Android Binder机制浅析
- Android Binder机制浅析
- Android的Binder机制浅析
- Android的Binder机制浅析
- Android的Binder机制浅析
- Android的Binder机制浅析
- Android Binder通信机制浅析
- Android Binder机制浅析(一)
- Android Binder机制浅析(三)
- Android Binder机制(二) Binder中的数据结构
- 读书笔记_算法第四版(二)
- 6.监控面板
- hadoop自定义排序,分组排序
- JSP中下载文件
- 推荐算法
- Android Binder机制浅析(二)
- linux 下matplotlib 运用二
- Linux下查看文件内容的命令
- adb 抓取ANR日志
- MVC中使用aspx当作视图需要注意的问题
- java eclipse sonar 圈复杂度
- RequireJS 入门知识讲解
- Spring Boot 整合 Spring AMQP
- 为何引入输入子系统及其架构