小结Binder驱动

来源:互联网 发布:安装新的网络打印机啊 编辑:程序博客网 时间:2024/06/10 16:52
  • 第一章,我们大致看了一下Binder跨进程传输数据中所需要的结构体。
  • 第二章节和第三章,大致看了一下Binder驱动层所需要的函数。

现在我们就先来小结一下前几章的内容,然后深入的理解驱动层重要的函数,进而为彻底了解驱动层打下基础。

前面的内容那些要用到的结构体我们就不说了,我们我们谈一谈那些核心方法的顺序,和重中之重方法。

启动顺序

  • device_initcall(binder_init);//初始化设备
  • misc_register(&binder_miscdev);

    在其中:static struct miscdevice binder_miscdev = {    .minor = MISC_DYNAMIC_MINOR,    .name = "binder",    .fops = &binder_fops};binder_fops:static const struct file_operations binder_fops = {    .owner = THIS_MODULE,    .poll = binder_poll,    .unlocked_ioctl = binder_ioctl,    .mmap = binder_mmap,    .open = binder_open,    .flush = binder_flush,    .release = binder_release,};
  • binder_open()//打开设备
  • binder_mmap()//分配空间
  • binder_ioctl()//进行传输

5218

我们现在从用户态以数据为重点往内核出发追寻代码:

  • Bctest.c
    在main函数中调用svcmgr_lookup(bs, svcmgr, argv[1]);发送数据。
uint32_t svcmgr_lookup(struct binder_state *bs, uint32_t target, const char *name){    uint32_t handle;    unsigned iodata[512/4];    struct binder_io msg, reply;    bio_init(&msg, iodata, sizeof(iodata), 4);//[1.1]    bio_put_uint32(&msg, 0);  //存储0    bio_put_string16_x(&msg, SVC_MGR_NAME);//存储"android.os.IServiceManager"    bio_put_string16_x(&msg, name);//存储要调用函数的名字    //一般情况下还会有一个函数    //bio_put_obj(struct binder_io * bio,void * ptr);//[1.2]    if (binder_call(bs, &msg, &reply, target, SVC_MGR_CHECK_SERVICE))//[1.3]        return 0;    handle = bio_get_ref(&reply);    if (handle)        binder_acquire(bs, handle);    binder_done(bs, &msg, &reply);    return handle;}

通过代码可以看出来在用户态数据是以binder_io的形式存储的。

struct binder_io{    char *data;            /* pointer to read/write from */    binder_size_t *offs;   /* array of offsets */    size_t data_avail;     /* bytes available in data buffer */    size_t offs_avail;     /* entries available in offsets array */    char *data0;           /* start of data buffer */    binder_size_t *offs0;  /* start of offsets buffer */    uint32_t flags;    uint32_t unused;};

1.1bio_init()

void bio_init(struct binder_io *bio, void *data,              size_t maxdata, size_t maxoffs){    size_t n = maxoffs * sizeof(size_t);//4*4    if (n > maxdata) {        bio->flags = BIO_F_OVERFLOW;        bio->data_avail = 0;        bio->offs_avail = 0;        return;    }    bio->data = bio->data0 = (char *) data + n;//data指向4个字节之后    bio->offs = bio->offs0 = data;//offs指向存储空间的开始位置    bio->data_avail = maxdata - n;    bio->offs_avail = maxoffs;    bio->flags = 0;}

1.2bio_put_obj()

这个结构体实际上是在binder_io其中offs0指针指向的地址

这个函数分配一个flat_binder_object结构体,赋值进去。

void bio_put_obj(struct binder_io *bio, void *ptr){    struct flat_binder_object *obj;    obj = bio_alloc_obj(bio);    if (!obj)        return;    obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;    obj->type = BINDER_TYPE_BINDER;    obj->binder = (uintptr_t)ptr;//用户态在main函数中的处理函数指针    obj->cookie = 0;}
struct flat_binder_object {    unsigned long       type;    unsigned long       flags;    union {        void        *binder;        signed long handle;    };    void            *cookie;};

1.3binder_call()

将上层传递过来的binder_io数据进行拆封到binder_write_read中。

int binder_call(struct binder_state *bs,                struct binder_io *msg, struct binder_io *reply,                uint32_t target, uint32_t code){    int res;    struct binder_write_read bwr;    struct {        uint32_t cmd;        struct binder_transaction_data txn;    } __attribute__((packed)) writebuf;    unsigned readbuf[32];    ...    writebuf.cmd = BC_TRANSACTION;//标记BC_TRANSACTION    writebuf.txn.target.handle = target;//目标进程    writebuf.txn.code = code;//SVC_MGR_CHECK_SERVICE    writebuf.txn.flags = 0;    writebuf.txn.data_size = msg->data - msg->data0;//data数据大小    writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0);//offs大小    writebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0;//data起始地址    writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0;//offs起始地址    bwr.write_size = sizeof(writebuf);    bwr.write_consumed = 0;    bwr.write_buffer = (uintptr_t) &writebuf;//将binder_transaction_data封装到bwr    hexdump(msg->data0, msg->data - msg->data0);    for (;;) {        bwr.read_size = sizeof(readbuf);        bwr.read_consumed = 0;        bwr.read_buffer = (uintptr_t) readbuf;        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//驱动进行传输数据        if (res < 0) {            fprintf(stderr,"binder: ioctl failed (%s)\n", strerror(errno));            goto fail;        }        res = binder_parse(bs, reply, (uintptr_t) readbuf, bwr.read_consumed, 0);        if (res == 0) return 0;        if (res < 0) goto fail;    }...}

回顾一下,我们当时说过binder_write_read封装的是读数据和写数据,这里通过write_buffer或者readbuf的大小是不是为0来判断到底是写数据还是读数据,所以在这里把数据封装好然后传递给驱动,具体传递的数据格式是binder_write_read,其中binder_write_read的write_buffer字段封装的binder_write_read结构体,binder_write_read结构体封装的binder_transaction_data,这个结构体存储了binder_io的信息。


内核binder_ioctl()

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg){    ...    switch (cmd) {    case BINDER_WRITE_READ: {        struct binder_write_read bwr;        //从用户空间复制binder_write_read头信息到本地bwr        if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {}        if (bwr.write_size > 0) {//需要写入            ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);        }        if (bwr.read_size > 0) {//需要读取            ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);        }        break;    }    case BINDER_SET_MAX_THREADS://设置最大支持的线程数量        break;    case BINDER_SET_CONTEXT_MGR:ServiceManager专用,将自己设置成为"Binder大管家",系统中只能有一个SM存在        break;    case BINDER_THREAD_EXIT:通知Binder线程退出。        break;    case BINDER_VERSION://获得Binder版本号        break;    }}

内核2binder_thread_write()

此函数目的是将数据写入目的进程,传入的参数是bwr.write_buffer

从用户空间拷贝writebuf.cmd = BC_TRANSACTION;//注意这个命令writebuf.txn.target.handle = target;//target=0时候这个是发给servicemanagerwritebuf.txn.code = code;writebuf.txn.flags = 0;writebuf.txn.data_size = msg->data - msg->data0;//data数据大小writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0);//offset数据区域大小,在当前场景里面是4,表示只有一个flat_binder_objectwritebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0;//data数据区起始端writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0;//offset数据起始端bwr.write_size = sizeof(writebuf);//2.构造一个writebuf写入bwr,如果是0说明只读bwr.write_consumed = 0;bwr.write_buffer = (uintptr_t) &writebuf;
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,            void __user *buffer, int size, signed long *consumed){    uint32_t cmd;    void __user *ptr = buffer + *consumed;//bwr.write_buffer+0    void __user *end = buffer + size;//bwr.write_buffer+sizeof(writebuf)    while (ptr < end && thread->return_error == BR_OK) {        if (get_user(cmd, (uint32_t __user *)ptr))//获得cmd:BC_TRANSACTION            return -EFAULT;        ptr += sizeof(uint32_t);//开始指向target(每次都复制完成都需要增加位置)        switch (cmd) {        ...        case BC_TRANSACTION:        case BC_REPLY: {            struct binder_transaction_data tr;            if (copy_from_user(&tr, ptr, sizeof(tr)))//将binder_transaction_data从用户空间拿出来放入内核空间的binder_transaction_data                return -EFAULT;            ptr += sizeof(tr);            binder_transaction(proc, thread, &tr, cmd == BC_REPLY);//[2.1]            break;        }        ...        }    }    return 0;}

2.1binder_transaction()

真正将数据进行进程之间传递并且返回给用户空间数据的其实是这个函数,这个函数比较庞大,我们耐心分块。

小结此方法:
- 找到代表目标进程的节点
- 搜寻目标线程
- 如果存在目标线程,则使用目标线程的todo,否则使用目标进程的
- 为当前调用的binder创建binder_transaction结构体
- 在目标进程中分配缓冲区,复制用户进程的数据到内核
- 将data和offset都复制进去,到这里,已经把数据拷贝到了目的进程
- 创建binder_node节点,给当前进程创建
- 根据当前进程的node,然后创建出ref引用给目的进程
- 将type改成引用,BINDER_TYPE_BINDER是实体的type,BINDER_TYPE_HANDLE是引用
- 增加引用计数,会返回一些信息给当前进程(test_server)
- t->work.type = BINDER_WORK_TRANSACTION;
- 把binder_work的结构体tcomplete其中type设置成BINDER_WORK_TRANSACTION_COMPLETE并插入当前的todo队列中

static void binder_transaction(struct binder_proc *proc,                   struct binder_thread *thread,                   struct binder_transaction_data *tr, int reply){    ...    //找到代表目标进程的节点    if (tr->target.handle) {//if传进来的handle不是0,在当前进程下,根据handle找到binder_ref结构体        struct binder_ref *ref;        ref = binder_get_ref(proc, tr->target.handle);//查找binder_ref结构体        target_node = ref->node;//通过ref找到对应的node节点    } else {    //handle是0表示是service_manager        target_node = binder_context_mgr_node;//这个是特殊的进程,在binder_ioctl中创建    }    //搜寻目标线程    target_proc = target_node->proc;    if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {        struct binder_transaction *tmp;        tmp = thread->transaction_stack;        while (tmp) {            if (tmp->from && tmp->from->proc == target_proc)                target_thread = tmp->from;            tmp = tmp->from_parent;    }    //如果存在目标线程,则使用目标线程的todo,否则使用目标进程的    if (target_thread) {        e->to_thread = target_thread->pid;        target_list = &target_thread->todo;        target_wait = &target_thread->wait;    } else {        target_list = &target_proc->todo;        target_wait = &target_proc->wait;    }    //为当前调用的binder创建binder_transaction结构体    t = kzalloc(sizeof(*t), GFP_KERNEL);    binder_stats_created(BINDER_STAT_TRANSACTION);    tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);    binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);    ...    //在目标进程中分配缓冲区,复制用户进程的数据到内核    //这块分配了t->buffer,传入参数是目的进程,意思是:从目的进程里面分配内存给t->buffer    t->buffer = binder_alloc_buf(target_proc, tr->data_size,        tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));    ...    if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {//将data和offset都复制进去,到这里,已经把数据拷贝到了目的进程    }    // tr->data.ptr.offsets是flat_binder_object的指针数组,就是binder_io前面那四个字节的数据    if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {    }    for (; offp < off_end; offp++) {                switch (fp->type) {        //binder实体(在flat_binder_object里面有个参数是binder还是handle)        case BINDER_TYPE_BINDER:        case BINDER_TYPE_WEAK_BINDER: {            struct binder_ref *ref;            struct binder_node *node = binder_get_node(proc, fp->binder);            if (node == NULL) {                node = binder_new_node(proc, fp->binder, fp->cookie);//创建binder_node节点,给当前进程创建            }            ref = binder_get_ref_for_node(target_proc, node);//根据当前进程的node,然后创建出ref引用给目的进程            //将type改成引用,BINDER_TYPE_BINDER是实体的type,BINDER_TYPE_HANDLE是引用            if (fp->type == BINDER_TYPE_BINDER)                fp->type = BINDER_TYPE_HANDLE;            else                fp->type = BINDER_TYPE_WEAK_HANDLE;            fp->handle = ref->desc;            binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,                       &thread->todo);//增加引用计数,会返回一些信息给当前进程(test_server)        } break;    }    t->work.type = BINDER_WORK_TRANSACTION;    list_add_tail(&t->work.entry, target_list);//把binder_transaction结构体放入链表中去    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;    list_add_tail(&tcomplete->entry, &thread->todo);    if (target_wait)        wake_up_interruptible(target_wait);    return;}

内核binder_thread_read()

这个方法所做的事情:

  • 如果保存返回结果的缓冲区中还没有数据,先写入BR_NOOP消息
  • 然后等待唤醒
  • 进入循环处理所有的todo链表中的工作
  • 读取todo链表中所需要完成的binder_work w
  • 用switch处理所属类型的工作
  • 调整线程优先级
  • 准备返回数据
static int binder_thread_read(struct binder_proc *proc,                  struct binder_thread *thread,                  void  __user *buffer, int size,                  signed long *consumed, int non_block){    void __user *ptr = buffer + *consumed;//bwr.read_buffer    void __user *end = buffer + size;    //当一开始读的时候,马上写入一个BR_NOOP,对于所有读操作数据头部都是BR_NOOP    //给bwr.read_buffer内写的数据格式是:BR_NOOP+cmd+数据+cmd+数据...    //如果保存返回结果的缓冲区中还没有数据先写入BR_NOOP消息    if (*consumed == 0) {        if (put_user(BR_NOOP, (uint32_t __user *)ptr))            return -EFAULT;        ptr += sizeof(uint32_t);    }    ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));//如果没有数据的话,就停留在休眠状态    while (1) {        if (!list_empty(&thread->todo))//如果线程里面的todo链表有数据拿出来            w = list_first_entry(&thread->todo, struct binder_work, entry);        else if (!list_empty(&proc->todo) && wait_for_proc_work)            //否则线程所属的进程链表有数据的话拿出来            w = list_first_entry(&proc->todo, struct binder_work, entry);        switch (w->type) {        case BINDER_WORK_TRANSACTION: {//这个type是binder_thread_write最后添加链表的时候写的            t = container_of(w, struct binder_transaction, work);//根据work得到binder_transaction        } break;        //调整优先级        if (t->buffer->target_node) {            struct binder_node *target_node = t->buffer->target_node;            tr.target.ptr = target_node->ptr;            tr.cookie =  target_node->cookie;            t->saved_priority = task_nice(current);            if (t->priority < target_node->min_priority &&                !(t->flags & TF_ONE_WAY))                binder_set_nice(t->priority);            else if (!(t->flags & TF_ONE_WAY) ||                 t->saved_priority > target_node->min_priority)                binder_set_nice(target_node->min_priority);            cmd = BR_TRANSACTION;//由于是从驱动返回用户空间把命令改成BR_TRANSACTION        }        //这里进行构造binder_transaction_data,然后返回到service_manager.c中,这里ioctl就执行完毕        tr.data_size = t->buffer->data_size;        tr.offsets_size = t->buffer->offsets_size;        tr.data.ptr.buffer = (void *)t->buffer->data +                    proc->user_buffer_offset;        tr.data.ptr.offsets = tr.data.ptr.buffer +                    ALIGN(t->buffer->data_size,                        sizeof(void *));        //把tr数据复制到用户空间        if (put_user(cmd, (uint32_t __user *)ptr))            return -EFAULT;        ptr += sizeof(uint32_t);        if (copy_to_user(ptr, &tr, sizeof(tr)))            return -EFAULT;        ptr += sizeof(tr);        break;    }    return 0;}
0 0
原创粉丝点击