Binder驱动(二)

来源:互联网 发布:sql 双竖线 用法 编辑:程序博客网 时间:2024/06/05 16:37

上一小节我们介绍了驱动中的几个核心方法
- binder_open()
- binder_mmap()
- binder_ioctl()

只有BC_TRANSACTION,BR_TRANSACTION,BC_REPLY,BR_REPLY会涉及到两个进程,其他的所有cmd
(BC_XXX,BR_XXX)只是app和驱动的交互,改变其状态

这一节,我们探寻一下核心的结构体binder_write_read结构体,看看这个结构体是怎么样支撑进程间通信的输数据封装的。

struct binder_write_read {    signed long write_size; /* bytes to write */    signed long write_consumed; /* bytes consumed by driver */    unsigned long   write_buffer;    signed long read_size;  /* bytes to read */    signed long read_consumed;  /* bytes consumed by driver */    unsigned long   read_buffer;};

在其中:

signed long write_size; /* bytes to write */signed long write_consumed; /* bytes consumed by driver */unsigned long   write_buffer;

支持的是发送IPC数据/IPC应答数据

signed long read_size;  /* bytes to read */signed long read_consumed;  /* bytes consumed by driver */unsigned long   read_buffer;

而这三个是接收IPC数据,IPC应答数据

在其中核心类变量为:
- unsigned long write_buffer;
- unsigned long read_buffer;

既然IPC数据和IPC应答数据存在于这两个变量中,那么他们以怎么样的形态存在呢?

这里我们先来交个底,等会会有代码分析
binder_transaction_data这个结构体中封装了IPC通信的规则。具体规则有:

  • Handle
  • RPC代码
  • RPC数据
  • Binder协议(BC_XXbinder请求,用于IPC层传递到Binder驱动,BR_XXbinder响应吗,用于从Binder驱动层传递到IPC层)

那么这些请求协议在哪里?在binder_thread_write()和binder_thread_read()中的switch中


到了这里,我们在来捋一捋,只用写操作,读操作道理一样

  • 用户空间调用binder_write(bs, readbuf, sizeof(uint32_t));将数据写入binder_write_read中

  • 然后写入到bwr结构体中

    struct binder_write_read bwr;    int res;    bwr.write_size = len;    bwr.write_consumed = 0;    bwr.write_buffer = (uintptr_t) data;    bwr.read_size = 0;    bwr.read_consumed = 0;    bwr.read_buffer = 0;
  • 然后调用ioctl函数进行传送数据
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
  • 然后调用驱动层的binder_thread_write()将得到的bwr进行数据的解析此时就出现了若干种协议(bwr中得到)

具体的协议有:

    请求码             参数类型                    作用    BC_TRANSACTION      binder_transaction_data     Client向Binder驱动发送请求数据    BC_REPLY            binder_transaction_data     Server向Binder驱动发送请求数据    BC_FREE_BUFFER      binder_uintptr_t(指针)        释放内存    BC_INCREFS          __u32(descriptor)           binder_ref弱引用加1操作    BC_DECREFS          __u32(descriptor)           binder_ref弱引用减1操作    BC_ACQUIRE          __u32(descriptor)           binder_ref强引用加1操作    BC_RELEASE          __u32(descriptor)           binder_ref强引用减1操作    BC_ACQUIRE_DONE     binder_ptr_cookie           binder_node强引用减1操作    BC_INCREFS_DONE     binder_ptr_cookie           binder_node弱引用减1操作    BC_REGISTER_LOOPER  无参数                     创建新的looper线程    BC_ENTER_LOOPER     无参数                     应用线程进入looper    BC_EXIT_LOOPER      无参数                     应用线程退出looper    ...

下来我们分析代码:

1binder_thread_write()

从用户空间拷贝数据writebuf.cmd = BC_TRANSACTION;//注意这个命令writebuf.txn.target.handle = target;//target=0时候这个是发给servicemanagerwritebuf.txn.code = code;writebuf.txn.flags = 0;writebuf.txn.data_size = msg->data - msg->data0;//data数据大小writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0);//offset数据区域大小,在当前场景里面是4,表示只有一个flat_binder_objectwritebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0;//data数据区起始端writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0;//offset数据起始端bwr.write_size = sizeof(writebuf);//2.构造一个writebuf写入bwr,如果是0说明只读bwr.write_consumed = 0;bwr.write_buffer = (uintptr_t) &writebuf;
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,            void __user *buffer, int size, signed long *consumed){    uint32_t cmd;    void __user *ptr = buffer + *consumed;//bwr.write_buffer+0    void __user *end = buffer + size;//bwr.write_buffer+sizeof(writebuf)    while (ptr < end && thread->return_error == BR_OK) {        if (get_user(cmd, (uint32_t __user *)ptr))//获得cmd:BC_TRANSACTION            return -EFAULT;        ptr += sizeof(uint32_t);//开始指向target(每次都复制完成都需要增加位置)        if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {            binder_stats.bc[_IOC_NR(cmd)]++;            proc->stats.bc[_IOC_NR(cmd)]++;            thread->stats.bc[_IOC_NR(cmd)]++;        }        switch (cmd) {        ...        case BC_DECREFS: {            uint32_t target;            struct binder_ref *ref;            const char *debug_string;            if (get_user(target, (uint32_t __user *)ptr))//获取target                return -EFAULT;            ptr += sizeof(uint32_t);            if (target == 0 && binder_context_mgr_node &&                (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {                ref = binder_get_ref_for_node(proc,                           binder_context_mgr_node);                ...            } else{                ref = binder_get_ref(proc, target);                ...            }        switch (cmd) {        ...        case BC_TRANSACTION:        case BC_REPLY: {            /*            来自于用户空间的Binder.c            struct {                   uint32_t cmd;                   struct binder_transaction_data txn;               } __attribute__((packed)) writebuf;            */            struct binder_transaction_data tr;            if (copy_from_user(&tr, ptr, sizeof(tr)))                return -EFAULT;            ptr += sizeof(tr);            binder_transaction(proc, thread, &tr, cmd == BC_REPLY);//[1.1]            break;        }        ...        case BC_ENTER_LOOPER://改变当前binder_thread里面的looper的标记(整型)            binder_debug(BINDER_DEBUG_THREADS,                     "binder: %d:%d BC_ENTER_LOOPER\n",                     proc->pid, thread->pid);            if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {                thread->looper |= BINDER_LOOPER_STATE_INVALID;            }            thread->looper |= BINDER_LOOPER_STATE_ENTERED;            break;        ...        }        *consumed = ptr - buffer;    }    return 0;}

1.1binder_transaction()

这个方法有点庞大,具体做的事情是
- 对于BC_REPLY用下面代码找出目的进程
- 进行初步判断

    对于BC_TRANSACATION命令用下面这些进行查找    if传进来的handle不是0,在当前进程下,根据handle找到binder_ref结构体{        查找binder_ref结构体        通过ref找到对应的node节点    }else{        handle0表示是service_manager        这个是特殊的进程,在binder_ioctl中创建    }
  • 这块分配了t->buffer,传入参数是目的进程,意思是:从目的进程里面分配内存给t->buffer
  • 将data和offset都复制进去,到这里,已经把数据拷贝到了目的进程
  • tr->data.ptr.offsets是flat_binder_object的指针数组,就是binder_io前面那四个字节的数据
  • binder实体(在flat_binder_object里面有个参数是binder还是handle)
  • 根据当前进程的node,然后创建出ref引用给目的进程
  • 将type改成引用,BINDER_TYPE_BINDER是实体的type,BINDER_TYPE_HANDLE是引用
  • 增加引用计数,会返回一些信息给当前进程(test_server)
  • 把binder_transaction结构体放入todo链表中去

调用:binder_transaction(proc, thread, &tr, cmd == BC_REPLY)

static void binder_transaction(struct binder_proc *proc,                   struct binder_thread *thread,                   struct binder_transaction_data *tr, int reply){    struct binder_transaction *t;    struct binder_work *tcomplete;    size_t *offp, *off_end;    size_t off_min;    struct binder_proc *target_proc;    struct binder_thread *target_thread = NULL;    struct binder_node *target_node = NULL;    struct list_head *target_list;    wait_queue_head_t *target_wait;    struct binder_transaction *in_reply_to = NULL;    struct binder_transaction_log_entry *e;    uint32_t return_error = BR_OK;    e = binder_transaction_log_add(&binder_transaction_log);    e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);    e->from_proc = proc->pid;    e->from_thread = thread->pid;    e->target_handle = tr->target.handle;    e->data_size = tr->data_size;    e->offsets_size = tr->offsets_size;//对于BC_REPLY用下面代码找出目的进程    if (reply) {        in_reply_to = thread->transaction_stack;        binder_set_nice(in_reply_to->saved_priority);        if (in_reply_to->to_thread != thread) {}        thread->transaction_stack = in_reply_to->to_parent;        target_thread = in_reply_to->from;        if (target_thread->transaction_stack != in_reply_to) {}        target_proc = target_thread->proc;    } else {    //对于BC_TRANSACATION命令用下面这些进行查找        if (tr->target.handle) {//if传进来的handle不是0,在当前进程下,根据handle找到binder_ref结构体            struct binder_ref *ref;            ref = binder_get_ref(proc, tr->target.handle);//查找binder_ref结构体            target_node = ref->node;//通过ref找到对应的node节点        } else {        //handle是0表示是service_manager            target_node = binder_context_mgr_node;//这个是特殊的进程,在binder_ioctl中创建        }        e->to_node = target_node->debug_id;        target_proc = target_node->proc;        if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {}        if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {}    }    if (target_thread) {        e->to_thread = target_thread->pid;        target_list = &target_thread->todo;        target_wait = &target_thread->wait;    } else {        target_list = &target_proc->todo;        target_wait = &target_proc->wait;    }    e->to_proc = target_proc->pid;    t = kzalloc(sizeof(*t), GFP_KERNEL);    binder_stats_created(BINDER_STAT_TRANSACTION);    tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);    if (tcomplete == NULL) {        return_error = BR_FAILED_REPLY;        goto err_alloc_tcomplete_failed;    }    binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);    t->debug_id = ++binder_last_id;    e->debug_id = t->debug_id;    ...    t->sender_euid = proc->tsk->cred->euid;    t->to_proc = target_proc;    t->to_thread = target_thread;    t->code = tr->code;    t->flags = tr->flags;    t->priority = task_nice(current);    //这块分配了t->buffer,传入参数是目的进程,意思是:从目的进程里面分配内存给t->buffer    t->buffer = binder_alloc_buf(target_proc, tr->data_size,        tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));    if (t->buffer == NULL) {        return_error = BR_FAILED_REPLY;        goto err_binder_alloc_buf_failed;    }    t->buffer->allow_user_free = 0;    t->buffer->debug_id = t->debug_id;    t->buffer->transaction = t;    t->buffer->target_node = target_node;    if (target_node)        binder_inc_node(target_node, 1, 0, NULL);    offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));    if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {//将data和offset都复制进去,到这里,已经把数据拷贝到了目的进程        binder_user_error("binder: %d:%d got transaction with invalid "            "data ptr\n", proc->pid, thread->pid);        return_error = BR_FAILED_REPLY;        goto err_copy_data_failed;    }    // tr->data.ptr.offsets是flat_binder_object的指针数组,就是binder_io前面那四个字节的数据    if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {        binder_user_error("binder: %d:%d got transaction with invalid "            "offsets ptr\n", proc->pid, thread->pid);        return_error = BR_FAILED_REPLY;        goto err_copy_data_failed;    }    ...        switch (fp->type) {        //binder实体(在flat_binder_object里面有个参数是binder还是handle)        case BINDER_TYPE_BINDER:        case BINDER_TYPE_WEAK_BINDER: {            struct binder_ref *ref;            struct binder_node *node = binder_get_node(proc, fp->binder);            if (node == NULL) {                node = binder_new_node(proc, fp->binder, fp->cookie);//创建binder_node节点,给当前进程创建                if (node == NULL) {                    return_error = BR_FAILED_REPLY;                    goto err_binder_new_node_failed;                }                node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;                node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);            }            if (fp->cookie != node->cookie) {                binder_user_error("binder: %d:%d sending u%p "                    "node %d, cookie mismatch %p != %p\n",                    proc->pid, thread->pid,                    fp->binder, node->debug_id,                    fp->cookie, node->cookie);                goto err_binder_get_ref_for_node_failed;            }            if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {                return_error = BR_FAILED_REPLY;                goto err_binder_get_ref_for_node_failed;            }            ref = binder_get_ref_for_node(target_proc, node);//根据当前进程的node,然后创建出ref引用给目的进程            if (ref == NULL) {                return_error = BR_FAILED_REPLY;                goto err_binder_get_ref_for_node_failed;            }            //将type改成引用,BINDER_TYPE_BINDER是实体的type,BINDER_TYPE_HANDLE是引用            if (fp->type == BINDER_TYPE_BINDER)                fp->type = BINDER_TYPE_HANDLE;            else                fp->type = BINDER_TYPE_WEAK_HANDLE;            fp->handle = ref->desc;            binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,                       &thread->todo);//增加引用计数,会返回一些信息给当前进程(test_server)    }    if (reply) {        BUG_ON(t->buffer->async_transaction != 0);        binder_pop_transaction(target_thread, in_reply_to);    } else if (!(t->flags & TF_ONE_WAY)) {        BUG_ON(t->buffer->async_transaction != 0);        t->need_reply = 1;        t->from_parent = thread->transaction_stack;        thread->transaction_stack = t;    } else {        BUG_ON(target_node == NULL);        BUG_ON(t->buffer->async_transaction != 1);        if (target_node->has_async_transaction) {            target_list = &target_node->async_todo;//todo链表            target_wait = NULL;        } else            target_node->has_async_transaction = 1;    }    t->work.type = BINDER_WORK_TRANSACTION;    list_add_tail(&t->work.entry, target_list);//把binder_transaction结构体放入链表中去    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;    list_add_tail(&tcomplete->entry, &thread->todo);    if (target_wait)        wake_up_interruptible(target_wait);    return;}

2binder_thread_read()

我们先说一下读数据的时候这些数据是什么形态进行排列的
首先是一个binder_write_read结构体,其中有readbuf,其中readbuf是:

|BR_NOOP|cmd|数据|cmd|数据|

todo链表空的时候会进入客户端请求等待状态,有操作的时候会进入while循环产生相应的响应码。

从用户空间拷贝writebuf.cmd = BC_TRANSACTION;//注意这个命令writebuf.txn.target.handle = target;//target=0时候这个是发给servicemanagerwritebuf.txn.code = code;writebuf.txn.flags = 0;writebuf.txn.data_size = msg->data - msg->data0;//data数据大小writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0);//offset数据区域大小,在当前场景里面是4,表示只有一个flat_binder_objectwritebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0;//data数据区起始端writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0;//offset数据起始端bwr.write_size = sizeof(writebuf);//2.构造一个writebuf写入bwr,如果是0说明只读bwr.write_consumed = 0;bwr.write_buffer = (uintptr_t) &writebuf;
static int binder_thread_read(struct binder_proc *proc,                  struct binder_thread *thread,                  void  __user *buffer, int size,                  signed long *consumed, int non_block){    void __user *ptr = buffer + *consumed;//bwr.read_buffer    void __user *end = buffer + size;    int ret = 0;    int wait_for_proc_work;    //当一开始读的时候,马上写入一个BR_NOOP,对于所有读操作数据头部都是BR_NOOP    //给bwr.read_buffer内写的数据格式是:BR_NOOP+cmd+数据+cmd+数据...    if (*consumed == 0) {        if (put_user(BR_NOOP, (uint32_t __user *)ptr))            return -EFAULT;        ptr += sizeof(uint32_t);    }retry:    wait_for_proc_work = thread->transaction_stack == NULL &&                list_empty(&thread->todo);    if (thread->return_error != BR_OK && ptr < end) {        if (thread->return_error2 != BR_OK) {            if (put_user(thread->return_error2, (uint32_t __user *)ptr))                return -EFAULT;            ptr += sizeof(uint32_t);            if (ptr == end)                goto done;            thread->return_error2 = BR_OK;        }        if (put_user(thread->return_error, (uint32_t __user *)ptr))            return -EFAULT;        ptr += sizeof(uint32_t);        thread->return_error = BR_OK;        goto done;    }    thread->looper |= BINDER_LOOPER_STATE_WAITING;    if (wait_for_proc_work)        proc->ready_threads++;    binder_unlock(__func__);    if (wait_for_proc_work) {        if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |                    BINDER_LOOPER_STATE_ENTERED))) {            binder_user_error("binder: %d:%d ERROR: Thread waiting "                "for process work before calling BC_REGISTER_"                "LOOPER or BC_ENTER_LOOPER (state %x)\n",                proc->pid, thread->pid, thread->looper);            wait_event_interruptible(binder_user_error_wait,                         binder_stop_on_user_error < 2);        }        binder_set_nice(proc->default_priority);        if (non_block) {            if (!binder_has_proc_work(proc, thread))                ret = -EAGAIN;        } else            ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));//如果没有数据的话,就停留在休眠状态    } else {        ...      }    binder_lock(__func__);    ...    while (1) {        uint32_t cmd;        struct binder_transaction_data tr;        struct binder_work *w;        struct binder_transaction *t = NULL;        if (!list_empty(&thread->todo))//如果线程里面的todo链表有数据拿出来            w = list_first_entry(&thread->todo, struct binder_work, entry);        else if (!list_empty(&proc->todo) && wait_for_proc_work)            //否则线程所属的进程链表有数据的话拿出来            w = list_first_entry(&proc->todo, struct binder_work, entry);        else {            if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */                goto retry;            break;        }        if (end - ptr < sizeof(tr) + 4)            break;        switch (w->type) {        case BINDER_WORK_TRANSACTION: {//这个type是binder_thread_write最后添加链表的时候写的            t = container_of(w, struct binder_transaction, work);//根据work得到binder_transaction        } break;        case BINDER_WORK_TRANSACTION_COMPLETE: {            ...        } break;        case BINDER_WORK_NODE: {            ...        } break;        case BINDER_WORK_DEAD_BINDER:        case BINDER_WORK_DEAD_BINDER_AND_CLEAR:        case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {            ...        } break;        }        if (!t)            continue;        BUG_ON(t->buffer == NULL);        if (t->buffer->target_node) {            struct binder_node *target_node = t->buffer->target_node;            tr.target.ptr = target_node->ptr;            tr.cookie =  target_node->cookie;            t->saved_priority = task_nice(current);            if (t->priority < target_node->min_priority &&                !(t->flags & TF_ONE_WAY))                binder_set_nice(t->priority);            else if (!(t->flags & TF_ONE_WAY) ||                 t->saved_priority > target_node->min_priority)                binder_set_nice(target_node->min_priority);            cmd = BR_TRANSACTION;//由于是从驱动返回用户空间把命令改成BR_TRANSACTION        } else {            tr.target.ptr = NULL;            tr.cookie = NULL;            cmd = BR_REPLY;        }        tr.code = t->code;        tr.flags = t->flags;        tr.sender_euid = t->sender_euid;        if (t->from) {            struct task_struct *sender = t->from->proc->tsk;            tr.sender_pid = task_tgid_nr_ns(sender,                            current->nsproxy->pid_ns);        } else {            tr.sender_pid = 0;        }        //这里进行构造binder_transaction_data,然后返回到service_manager.c中,这里ioctl就执行完毕        tr.data_size = t->buffer->data_size;        tr.offsets_size = t->buffer->offsets_size;        tr.data.ptr.buffer = (void *)t->buffer->data +                    proc->user_buffer_offset;        tr.data.ptr.offsets = tr.data.ptr.buffer +                    ALIGN(t->buffer->data_size,                        sizeof(void *));        if (put_user(cmd, (uint32_t __user *)ptr))            return -EFAULT;        ptr += sizeof(uint32_t);        if (copy_to_user(ptr, &tr, sizeof(tr)))            return -EFAULT;        ptr += sizeof(tr);        binder_stat_br(proc, thread, cmd);        binder_debug(BINDER_DEBUG_TRANSACTION,                 "binder: %d:%d %s %d %d:%d, cmd %d"                 "size %zd-%zd ptr %p-%p\n",                 proc->pid, thread->pid,                 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :                 "BR_REPLY",                 t->debug_id, t->from ? t->from->proc->pid : 0,                 t->from ? t->from->pid : 0, cmd,                 t->buffer->data_size, t->buffer->offsets_size,                 tr.data.ptr.buffer, tr.data.ptr.offsets);        list_del(&t->work.entry);        t->buffer->allow_user_free = 1;        if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {            t->to_parent = thread->transaction_stack;            t->to_thread = thread;            thread->transaction_stack = t;        } else {            t->buffer->transaction = NULL;            kfree(t);            binder_stats_deleted(BINDER_STAT_TRANSACTION);        }        break;    }    ...    return 0;}

总结上面写入:
- 用户态调用binder_write()生成binder_write_read结构体其中对应命令为BINDER_WRITE_READ
- 然后放入数据进入binder_ioctl中case BINDER_WRITE_READ通过bwr.write_size判断是不是写入
- 如果写入的话,从buffer中得到cmd其中BC_TRANSACTION,BC_REPLY是写入
- 然后调用binder_transaction(proc, thread, &tr, cmd == BC_REPLY);bwr.write_buffer地址是tr首地址
- 然后找出目的进程
- 把数据放入目的进程中

总结上面读入:
- 同样进入binder_ioctl()
- 由于bwr.write_size > 0所以调用binder_thread_write()
- 休眠等待唤醒,唤醒之后进入循环
- 将线程todo链表数据拿出来
- cmd = BR_TRANSACTION;//由于是从驱动返回用户空间把命令改成BR_TRANSACTION
- 这里进行构造binder_transaction_data,然后返回到service_manager.c中,这里ioctl就执行完毕
- 这样数据就写入到了binder_thread_read传递的参数buffer中,可以在用户空间读取

0 0
原创粉丝点击