binder总结

来源:互联网 发布:南方公园知乎 编辑:程序博客网 时间:2024/06/02 02:13

数据结构:

//open的时候创建,保存打开设备文件/dev/binder的进程的上下文信息,并且将这个进程上下文信息保存在打开文件结构struct file的私有数据成员变量private_data中struct binder_proc {struct hlist_node proc_node;struct rb_root threads;//保存binder_proc进程内用于处理用户请求的线程,它的最大数量由max_threads来决定struct rb_root nodes;//保存binder_proc进程内的Binder实体struct rb_root refs_by_desc;//以句柄作为key值来保存binder_proc进程内的Binder引用,即引用的其它进程的Binder实体struct rb_root refs_by_node;//以引用的实体节点的地址值为key来保存binder_proc进程内的Binder引用,即引用的其它进程的Binder实体int pid;struct vm_area_struct *vma;struct task_struct *tsk;struct files_struct *files;struct hlist_node deferred_work_node;int deferred_work;void *buffer;//映射的物理内存在内核空间中的起始位置ptrdiff_t user_buffer_offset;//内核使用的虚拟地址与进程使用的虚拟地址之间的差值struct list_head buffers;struct rb_root free_buffers;struct rb_root allocated_buffers;size_t free_async_space;struct page **pages;//描述具体的物理页面size_t buffer_size;//映射的内存的大小uint32_t buffer_free;struct list_head todo;wait_queue_head_t wait;//等待队列struct binder_stats stats;struct list_head delivered_death;int max_threads;//最大线程  int requested_threads;int requested_threads_started;int ready_threads;long default_priority;//默认优先级struct dentry *debugfs_entry;};//管理binder_proc中buffer ~ (buffer + buffer_size)这段地址空间的,这个地址空间被划分为一段一段来管理,每一段是结构体struct binder_buffer来描述struct binder_buffer {struct list_head entry; //按从低址到高地址链入到struct binder_proc中的buffers表示的链表中去struct rb_node rb_node; //空闲的链入到struct binder_proc中的free_buffers表示的红黑树中去//正在使用的链入到struct binder_proc中的allocated_buffers表示的红黑树中unsigned free:1;//空闲标志unsigned allow_user_free:1;unsigned async_transaction:1;unsigned debug_id:29;struct binder_transaction *transaction;struct binder_node *target_node;//目标节点size_t data_size;//表示数据的大小size_t offsets_size;//偏移量uint8_t data[0];//用于存储实际数据};//表示一个线程struct binder_thread {struct binder_proc *proc;//所属进程struct rb_node rb_node;//链入binder_proc成员threads这棵红黑树int pid;int looper;//线程的状态struct binder_transaction *transaction_stack;//要接收和发送的进程和线程信息struct list_head todo;//发往该线程的数据列表uint32_t return_error; //操作结果返回码uint32_t return_error2; //操作结果返回码wait_queue_head_t wait;struct binder_stats stats;};//表示一个binder实体struct binder_node {int debug_id;struct binder_work work;union {struct rb_node rb_node;//如果这个Binder实体还在正常使用,则使用rb_node来链入proc->nodes所表示的红黑树的节点struct hlist_node dead_node;//如果这个Binder实体所属的进程已经销毁,而这个Binder实体又被其它进程所引用,则这个Binder实体通过dead_node进入到一个哈希表中去存放};struct binder_proc *proc;//所属进程struct hlist_head refs;//引用了该Binder实体的Binder引用连接起来构成一个链表int internal_strong_refs;//引用计数int local_weak_refs;//引用计数int local_strong_refs;//引用计数void __user *ptr;//在用户空间的地址void __user *cookie;//附加数据unsigned has_strong_ref:1;unsigned pending_strong_ref:1;unsigned has_weak_ref:1;unsigned pending_weak_ref:1;unsigned has_async_transaction:1;unsigned accept_fds:1;unsigned min_priority:8;struct list_head async_todo;};//用户空间通过ioctl读写bind的数据结构struct binder_write_read {      signed long write_size; //写数据大小    signed long write_consumed; //被消耗的写数据    unsigned long   write_buffer;  //地址为一个struct binder_transaction_data    signed long read_size;  //读数据大小    signed long read_consumed;  //被消耗的读数据    unsigned long   read_buffer;  //地址为一个struct binder_transaction_data}; struct binder_transaction_data {/* The first two are only used for bcTRANSACTION and brTRANSACTION, * identifying the target and contents of the transaction. */union {size_thandle;//处理此事件的目标对象的句柄void*ptr;//目标对象是本地Binder实体时,就使用ptr来表示这个对象在本进程中的地址} target;void*cookie;//附加数据unsigned intcode;//对目标对象请求的命令代码/* General information about the transaction. */unsigned intflags;pid_tsender_pid;//发送者进程的piduid_tsender_euid;//发送者进程有效用户IDsize_tdata_size;//data.buffer缓冲区的大小size_toffsets_size;//data.offsets缓冲区的大小,指定在data.buffer缓冲区中,所有Binder实体或者引用的偏移位置/* If this transaction is inline, the data immediately * follows here; otherwise, it ends with a pointer to * the data buffer. */union {struct {/* transaction data */const void*buffer;//真正要传输的数据,分为两类数据:// 1、普通数据//2、Binder实体或者Binder引用/* offsets from buffer to flat_binder_object structs */const void*offsets;} ptr;uint8_tbuf[8];} data;};//表示一个Binder实体或者引用struct flat_binder_object {/* 8 bytes for large_flat_header. */unsigned longtype;//Binder对象的类型unsigned longflags;//Binder对象的标志/* 8 bytes of data. */union {void*binder;//表示这是一个Binder实体signed longhandle;//这是一个Binder引用};void*cookie;//附加数据};//该结构体主要用来中转请求和返回结果,保存接收和要发送的进程信息struct binder_transaction {int debug_id;//调试相关  struct binder_work work;struct binder_thread *from;//发送的线程struct binder_transaction *from_parent;struct binder_proc *to_proc;//接收者进程struct binder_thread *to_thread;//接收者线程struct binder_transaction *to_parent;unsigned need_reply:1;/* unsigned is_dead:1; *//* not used at the moment */struct binder_buffer *buffer;//数据地址,在接收者进程中分配unsigned intcode;//对目标对象请求的命令代码unsigned intflags;longpriority;longsaved_priority;uid_tsender_euid;//发送者euid};struct binder_work {struct list_head entry;//双向链表,存储所有binder_work的队列enum {BINDER_WORK_TRANSACTION = 1,BINDER_WORK_TRANSACTION_COMPLETE,BINDER_WORK_NODE,BINDER_WORK_DEAD_BINDER,BINDER_WORK_DEAD_BINDER_AND_CLEAR,BINDER_WORK_CLEAR_DEATH_NOTIFICATION,} type;};enum transaction_flags {TF_ONE_WAY= 0x01,//单向传递,是异步的,不需要返回TF_ROOT_OBJECT= 0x04,//组建的根对象,对应类型为本地对象BinderTF_STATUS_CODE= 0x08,//内容是一个32位的状态码,将对应类型为远程对象的“引用”(即句柄handle)TF_ACCEPT_FDS= 0x10,//可以接收一个文件描述符,对应的类型为文件(BINDER_TYPE_FD),即handle中存储的为文件描述符};//binder引用struct binder_ref {/* Lookups needed: *//*   node + proc => ref (transaction) *//*   desc + proc => ref (transaction, inc/dec ref) *//*   node => refs + procs (proc exit) */int debug_id;struct rb_node rb_node_desc;//以引用号为索引来将这个binder_ref挂进红黑树中struct rb_node rb_node_node;//以binder节点的内存地址为索引来将这个binder_ref挂进红黑树中struct hlist_node node_entry;struct binder_proc *proc;struct binder_node *node;  //内核binder节点的内存地址uint32_t desc;int strong;int weak;struct binder_ref_death *death;};

2、


在上面图中,我们可以看到,在SMGr中拥有各个Service的binder引用及自己的binder实体,各个Service拥有SMGr的binder引用以及自己的binder实体,而client则拥有SMGr和Service的引用。

具体过程:

1、  首先SMGr创建自己的binder实体,成为守护进程,然后等待其线程结构thread->todo上面有事件过来。

2、  Service首先获取SMGr的引用,然后通过这个引用调用它的add Service方法,此时会在该进程的内核空间创建该binder实体,然后把它的引用注册到SMGr

3、  Client首先也是获取SMGr的引用,然后调用getService从SMGr获取这个Service的引用,并在其内核空间创建到这个Service的引用,这样当要调用其方法时就可以直接和这个Service通讯了,这里要注意client中对Service的引用和SMGr中对Service的引用是不一样的,不过指向的bind实体是同一个。

 

这里面之间的数据传递是通过共享内存来进行的,当A需要和B通信时,A进入内核状态后,然后创建一个事件t,然后会查看这个事物t应该交给哪个目标处理,这时就在目标中分配空间并把数据拷贝过去,然后B得到了数据后,映射到相应的用户空间地址,这样在B进程的用户空间就可以访问A传过来的数据了。

(注意这里各个进程的内核空间是在mmap时创建的)

当client获取到了Service的引用后就可以调用Service中的方法了,此时在内核空间会创建一个事物t,并把目标设置为该Service,并有如下设置代码:

 tr.target.ptr = target_node->ptr;tr.cookie =  target_node->cookie;

这里的ptrcookie是在Service注册的时候初始化的,我们记得在注册Service的时候,Service调用了flatten_binder传进来了一个IBinder,这个对角就是这个Service

status_t flatten_binder(const sp<ProcessState>& proc,    const sp<IBinder>& binder, Parcel* out){    flat_binder_object obj;        obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;    if (binder != NULL) {        IBinder *local = binder->localBinder();        if (!local) {            BpBinder *proxy = binder->remoteBinder();            if (proxy == NULL) {                LOGE("null proxy");            }            const int32_t handle = proxy ? proxy->handle() : 0;            obj.type = BINDER_TYPE_HANDLE;            obj.handle = handle;            obj.cookie = NULL;        } else {            obj.type = BINDER_TYPE_BINDER;            obj.binder = local->getWeakRefs();            obj.cookie = local;        }    } else {        obj.type = BINDER_TYPE_BINDER;        obj.binder = NULL;        obj.cookie = NULL;    }        return finish_flatten_binder(binder, obj, out);}

这样当Service在内核空间获取到事件返回用户空间的时候,通过如下命令转换成binder用户实体

if (tr.target.ptr) {                sp<BBinder> b((BBinder*)tr.cookie);                const status_t error = b->transact(tr.code, buffer, &reply, tr.flags);                if (error < NO_ERROR) reply.setError(error);}

再调用重写的transact实现相应的功能。

 

3、数据传递过程:

首先在IPCThreadState::writeTransactionData函数中通过如下代码把数据保存到mOut,注意这里的结构是binder_transaction_data

status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer){    binder_transaction_data tr;    tr.target.handle = handle;    tr.code = code;    tr.flags = binderFlags;        const status_t err = data.errorCheck();    if (err == NO_ERROR) {        tr.data_size = data.ipcDataSize();        tr.data.ptr.buffer = data.ipcData();        tr.offsets_size = data.ipcObjectsCount()*sizeof(size_t);        tr.data.ptr.offsets = data.ipcObjects();    } else if (statusBuffer) {        tr.flags |= TF_STATUS_CODE;        *statusBuffer = err;        tr.data_size = sizeof(status_t);        tr.data.ptr.buffer = statusBuffer;        tr.offsets_size = 0;        tr.data.ptr.offsets = NULL;    } else {        return (mLastError = err);    }        mOut.writeInt32(cmd);    mOut.write(&tr, sizeof(tr));        return NO_ERROR;}

然后在IPCThreadState::talkWithDriver中

status_t IPCThreadState::talkWithDriver(bool doReceive){    LOG_ASSERT(mProcess->mDriverFD >= 0, "Binder driver is not opened");        binder_write_read bwr;        // Is the read buffer empty?    const bool needRead = mIn.dataPosition() >= mIn.dataSize();        // We don't want to write anything if we are still reading    // from data left in the input buffer and the caller    // has requested to read the next data.    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;        bwr.write_size = outAvail;    bwr.write_buffer = (long unsigned int)mOut.data();    // This is what we'll read.    if (doReceive && needRead) {        bwr.read_size = mIn.dataCapacity();        bwr.read_buffer = (long unsigned int)mIn.data();    } else {        bwr.read_size = 0;    }        IF_LOG_COMMANDS() {        TextOutput::Bundle _b(alog);        if (outAvail != 0) {            alog << "Sending commands to driver: " << indent;            const void* cmds = (const void*)bwr.write_buffer;            const void* end = ((const uint8_t*)cmds)+bwr.write_size;            alog << HexDump(cmds, bwr.write_size) << endl;            while (cmds < end) cmds = printCommand(alog, cmds);            alog << dedent;        }        alog << "Size of receive buffer: " << bwr.read_size            << ", needRead: " << needRead << ", doReceive: " << doReceive << endl;    }        // Return immediately if there is nothing to do.    if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;        bwr.write_consumed = 0;    bwr.read_consumed = 0;    status_t err;    do {        IF_LOG_COMMANDS() {            alog << "About to read/write, write size = " << mOut.dataSize() << endl;        }#if defined(HAVE_ANDROID_OS)        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)            err = NO_ERROR;        else            err = -errno;#else        err = INVALID_OPERATION;#endif        IF_LOG_COMMANDS() {            alog << "Finished read/write, write size = " << mOut.dataSize() << endl;        }    } while (err == -EINTR);       ......}

首先获取前面保存在mOut中的数据并保存到binder_write_read结构中的write_buffer,再调用ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr)把数据传递到内核。

内核中对于BINDER_WRITE_READ,由于write_size大于0,调用ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed)处理,在该函数中,对于BC_TRANSACTION的处理是

case BC_TRANSACTION:case BC_REPLY: {struct binder_transaction_data tr;if (copy_from_user(&tr, ptr, sizeof(tr)))return -EFAULT;ptr += sizeof(tr);binder_transaction(proc, thread, &tr, cmd == BC_REPLY);break;}

在binder_transaction中会把相应的数据拷贝到目标进程的内核空间中,相应的代码:

t->buffer = binder_alloc_buf(target_proc, tr->data_size,tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));if (t->buffer == NULL) {return_error = BR_FAILED_REPLY;goto err_binder_alloc_buf_failed;}t->buffer->allow_user_free = 0;t->buffer->debug_id = t->debug_id;t->buffer->transaction = t;t->buffer->target_node = target_node;if (target_node)binder_inc_node(target_node, 1, 0, NULL);offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {binder_user_error("binder: %d:%d got transaction with invalid ""data ptr\n", proc->pid, thread->pid);return_error = BR_FAILED_REPLY;goto err_copy_data_failed;}if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {binder_user_error("binder: %d:%d got transaction with invalid ""offsets ptr\n", proc->pid, thread->pid);return_error = BR_FAILED_REPLY;goto err_copy_data_failed;}

这样数据就到了目标进程。
目标进程一般睡眠在binder_thread_read函数中的wait_event_interruptible_exclusive上,被唤醒后会把前面请求进程的事务t中的数据拷贝到本地局部变量struct binder_transaction_data tr中,把数据映射到相对应的用户空间地址。

tr.code = t->code;tr.flags = t->flags;tr.sender_euid = t->sender_euid;if (t->from) {struct task_struct *sender = t->from->proc->tsk;tr.sender_pid = task_tgid_nr_ns(sender,current->nsproxy->pid_ns);} else {tr.sender_pid = 0;}tr.data_size = t->buffer->data_size;tr.offsets_size = t->buffer->offsets_size;tr.data.ptr.buffer = (void *)t->buffer->data +proc->user_buffer_offset;tr.data.ptr.offsets = tr.data.ptr.buffer +ALIGN(t->buffer->data_size,    sizeof(void *));if (put_user(cmd, (uint32_t __user *)ptr))return -EFAULT;ptr += sizeof(uint32_t);if (copy_to_user(ptr, &tr, sizeof(tr)))return -EFAULT;ptr += sizeof(tr);

调用put_user把tr的内容拷贝到用户传进来的缓冲区,指针ptr指向这个用户缓冲区的地址,接着后面会回到目标进程的处理函数对数据进行处理,这样主完成了数据 从一个进程传到另一个进程。数据的拷贝也只有一个,即从用户空间到内核空间


4、binder传递

首先在addService中

    virtual status_t addService(const String16& name, const sp<IBinder>& service)    {        Parcel data, reply;        data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());        data.writeString16(name);        data.writeStrongBinder(service);        status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);        return err == NO_ERROR ? reply.readExceptionCode() : err;    }
这里写入一个binder
data.writeStrongBinder(service);
status_t Parcel::writeStrongBinder(const sp<IBinder>& val){    return flatten_binder(ProcessState::self(), val, this);}
struct flat_binder_object来表示传输中的一个binder对象,它的定义如下所示:

/* * This is the flattened representation of a Binder object for transfer * between processes.  The 'offsets' supplied as part of a binder transaction * contains offsets into the data where these structures occur.  The Binder * driver takes care of re-writing the structure type and data as it moves * between processes. */struct flat_binder_object {/* 8 bytes for large_flat_header. */unsigned longtype;unsigned longflags;/* 8 bytes of data. */union {void*binder;/* local object */signed longhandle;/* remote object */};/* extra data associated with local object */void*cookie;};

看下flatten_binder

status_t flatten_binder(const sp<ProcessState>& proc,    const sp<IBinder>& binder, Parcel* out){    flat_binder_object obj;        obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;    if (binder != NULL) {        IBinder *local = binder->localBinder();        if (!local) {            BpBinder *proxy = binder->remoteBinder();            if (proxy == NULL) {                LOGE("null proxy");            }            const int32_t handle = proxy ? proxy->handle() : 0;            obj.type = BINDER_TYPE_HANDLE;            obj.handle = handle;            obj.cookie = NULL;        } else {            obj.type = BINDER_TYPE_BINDER;            obj.binder = local->getWeakRefs();            obj.cookie = local;        }    } else {        obj.type = BINDER_TYPE_BINDER;        obj.binder = NULL;        obj.cookie = NULL;    }        return finish_flatten_binder(binder, obj, out);}

传进来的bind一般是继承自BBinder的本地Binder实体,因此binder->localBinder返回一个BBinder指针,而且肯定不为空,于是执行下面语句:

obj.type = BINDER_TYPE_BINDER;obj.binder = local->getWeakRefs();obj.cookie = local;

函数最后调用finish_flatten_binder来将这个flat_binder_obj写入到Parcel中去:
status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData){    const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;    const bool enoughObjects = mObjectsSize < mObjectsCapacity;    if (enoughData && enoughObjects) {restart_write:        *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;                // Need to write meta-data?        if (nullMetaData || val.binder != NULL) {            mObjects[mObjectsSize] = mDataPos;            acquire_object(ProcessState::self(), val, this);            mObjectsSize++;        }                // remember if it's a file descriptor        if (val.type == BINDER_TYPE_FD) {            mHasFds = mFdsKnown = true;        }        return finishWrite(sizeof(flat_binder_object));    }    if (!enoughData) {        const status_t err = growData(sizeof(val));        if (err != NO_ERROR) return err;    }    if (!enoughObjects) {        size_t newSize = ((mObjectsSize+2)*3)/2;        size_t* objects = (size_t*)realloc(mObjects, newSize*sizeof(size_t));        if (objects == NULL) return NO_MEMORY;        mObjects = objects;        mObjectsCapacity = newSize;    }        goto restart_write;}
*reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
这里将相关数据写进去,这里保存了obj的偏移

 mObjects[mObjectsSize] = mDataPos;

 这里因为,如果进程间传输的数据间带有Binder对象的时候,Binder驱动程序需要作进一步的处理,以维护各个Binder实体的一致性

接着调用writeTransactionData将数据写到mOut

status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer){    binder_transaction_data tr;    tr.target.handle = handle;    tr.code = code;    tr.flags = binderFlags;        const status_t err = data.errorCheck();    if (err == NO_ERROR) {        tr.data_size = data.ipcDataSize();        tr.data.ptr.buffer = data.ipcData();        tr.offsets_size = data.ipcObjectsCount()*sizeof(size_t);        tr.data.ptr.offsets = data.ipcObjects();    } else if (statusBuffer) {        tr.flags |= TF_STATUS_CODE;        *statusBuffer = err;        tr.data_size = sizeof(status_t);        tr.data.ptr.buffer = statusBuffer;        tr.offsets_size = 0;        tr.data.ptr.offsets = NULL;    } else {        return (mLastError = err);    }        mOut.writeInt32(cmd);    mOut.write(&tr, sizeof(tr));        return NO_ERROR;}

这里调用的是

tr.data_size = data.ipcDataSize();tr.data.ptr.buffer = data.ipcData();tr.offsets_size = data.ipcObjectsCount()*sizeof(size_t);tr.data.ptr.offsets = data.ipcObjects();

这里

 tr.data_size就是写入的数据大小

tr.data.ptr.buffer写入的是数据的起始指针

tr.offsets_size写入的是binde的总大小,个数*单个大小

tr.data.ptr.offsets写入的binder的起始地址


数据传到内核后,在binder_transaction处理了这个binder实体

    switch (fp->type) {    case BINDER_TYPE_BINDER:    case BINDER_TYPE_WEAK_BINDER: {struct binder_ref *ref;struct binder_node *node = binder_get_node(proc, fp->binder);if (node == NULL) {node = binder_new_node(proc, fp->binder, fp->cookie);if (node == NULL) {return_error = BR_FAILED_REPLY;goto err_binder_new_node_failed;}node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);}if (fp->cookie != node->cookie) {......goto err_binder_get_ref_for_node_failed;}ref = binder_get_ref_for_node(target_proc, node);if (ref == NULL) {return_error = BR_FAILED_REPLY;goto err_binder_get_ref_for_node_failed;}if (fp->type == BINDER_TYPE_BINDER)fp->type = BINDER_TYPE_HANDLE;elsefp->type = BINDER_TYPE_WEAK_HANDLE;fp->handle = ref->desc;binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo);......  } break;

这里的类型为BINDER_TYPE_BINDER,一般在add services的时候 ,一般是第一次在Binder驱动程序中传输这个services,先获取binder实体,调用binder_get_node函数查询这个Binder实体时,会返回空,于是binder_new_node在proc中新建一个binder_node实体。由于要把这个binder实体交给其它targer(Service Manage)来管理 ,这里建立一个引用,注意下面两句
if (fp->type == BINDER_TYPE_BINDER)fp->type = BINDER_TYPE_HANDLE;elsefp->type = BINDER_TYPE_WEAK_HANDLE;fp->handle = ref->desc;

把type这里设置为HANDLE,因为这个binder要在target中处理,而在target中只能够通过句柄值来引用这个Binder实体,所以后面把fp->handle设为引用值。
这样当target进程被唤醒后,就进入处理,这里add service一般是service_manage
会进入如下流程
int svcmgr_handler(struct binder_state *bs,   struct binder_txn *txn,   struct binder_io *msg,   struct binder_io *reply){struct svcinfo *si;uint16_t *s;unsigned len;void *ptr;uint32_t strict_policy;if (txn->target != svcmgr_handle)return -1;// Equivalent to Parcel::enforceInterface(), reading the RPC// header with the strict mode policy mask and the interface name.// Note that we ignore the strict_policy and don't propagate it// further (since we do no outbound RPCs anyway).strict_policy = bio_get_uint32(msg);s = bio_get_string16(msg, &len);if ((len != (sizeof(svcmgr_id) / 2)) ||memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {fprintf(stderr,"invalid id %s\n", str8(s));return -1;}switch(txn->code) {......case SVC_MGR_ADD_SERVICE:s = bio_get_string16(msg, &len);ptr = bio_get_ref(msg);if (do_add_service(bs, s, len, ptr, txn->sender_euid))return -1;break;......}bio_put_uint32(reply, 0);return 0;}

这里

strict_policy = bio_get_uint32(msg);s = bio_get_string16(msg, &len);s = bio_get_string16(msg, &len);ptr = bio_get_ref(msg);

依次把前面写入的值读出来。_bio_get_obj这个函数就是从binder_io中取得第一个还没取获取过的binder_object,接着调用do_add_service把service的name和句柄值保存起来。

下面就可以等待客户端来获取services了

客户端首先获取services,调用getService,里面调用了checkService,把servcie的名字写进去,通过binder驱动,把参数写到了service manager进程中,并唤醒它,唤醒之后 ,在用户空间调用do_find_service找到之前我们添加进去的services,然后会把这个service在manager中的句柄值写到驱动,在binder_transaction函数中,会重新New一个这个service的引用,这个引用是在请求service的client端的引用,不同于manager中的引用。并把这个引用返回给client.

在client的用户空间,通过reply.readStrongBinder函数的实现来获取一个binder

sp<IBinder> Parcel::readStrongBinder() const{    sp<IBinder> val;    unflatten_binder(ProcessState::self(), *this, &val);    return val;}
这里调用了unflatten_binder函数来构造一个Binder对象:

status_t unflatten_binder(const sp<ProcessState>& proc,    const Parcel& in, sp<IBinder>* out){    const flat_binder_object* flat = in.readObject(false);        if (flat) {        switch (flat->type) {            case BINDER_TYPE_BINDER:                *out = static_cast<IBinder*>(flat->cookie);                return finish_unflatten_binder(NULL, *flat, in);            case BINDER_TYPE_HANDLE:                *out = proc->getStrongProxyForHandle(flat->handle);                return finish_unflatten_binder(                    static_cast<BpBinder*>(out->get()), *flat, in);        }            }    return BAD_TYPE;}
这里的flat->type是BINDER_TYPE_HANDLE,因此调用ProcessState::getStrongProxyForHandle函数:

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle){    sp<IBinder> result;    AutoMutex _l(mLock);    handle_entry* e = lookupHandleLocked(handle);    if (e != NULL) {        // We need to create a new BpBinder if there isn't currently one, OR we        // are unable to acquire a weak reference on this current one.  See comment        // in getWeakProxyForHandle() for more info about this.        IBinder* b = e->binder;        if (b == NULL || !e->refs->attemptIncWeak(this)) {            b = new BpBinder(handle);             e->binder = b;            if (b) e->refs = b->getWeakRefs();            result = b;        } else {            // This little bit of nastyness is to allow us to add a primary            // reference to the remote proxy when this team doesn't have one            // but another team is sending the handle to us.            result.force_set(b);            e->refs->decWeak(this);        }    }    return result;}
   这里我们可以看到,ProcessState会把使用过的Binder远程接口(BpBinder)缓存起来,这样下次从Service Manager那里请求得到相同的句柄(Handle)时就可以直接返回这个Binder远程接口了,不用再创建一个出来。这里是第一次使用,因此,e->binder为空,于是创建了一个BpBinder对象:
b = new BpBinder(handle); e->binder = b;if (b) e->refs = b->getWeakRefs();result = b;
这样,就获取到了一个binder ,以后就可以通过这个binder与service通信了。

5、相关结构图:




剩下的几点:

1、binder_buffer的分配

2、什么时候把事务放到全局的proc队列,什么时候放入thread队列

3、死亡通知


参考:

http://blog.csdn.net/lizhiguo0532?viewmode=contents

原创粉丝点击