Bootstrap

深入内核讲明白Android Binder【三】

深入内核讲明白Android Binder【三】

前言

深入内核讲明白Android Binder【一】实现了Binder跨进程通信的客户端和服务端的C语言Demo,并对服务端向service_manager注册服务,客户端向service_manager获取服务的源码进行了详细分析,但分析仅止步于用户态,深入内核讲明白Android Binder【二】,详细分析了服务注册过程Binder驱动内核源码,本篇文章继续分析服务获取过程及使用过程的Binder驱动内核源码。相信有了对上一篇文章的基础,这篇文章读起来应该轻松很多,那么我们就开始吧~

一、服务的获取过程内核源码解析

1. 客户端获取服务的用户态源码回顾

深入内核讲明白Android Binder【一】详细分析了客户端获取服务的用户态源码,这里简单回顾一下客户端通过svcmgr_lookup函数获取服务的用户态源码。

int main(int argc, char **argv)
{
    int fd;
    struct binder_state *bs;
    uint32_t svcmgr = BINDER_SERVICE_MANAGER;
    uint32_t handle;
	int ret;

	if (argc < 2){
        fprintf(stderr, "Usage:\n");
        fprintf(stderr, "%s <hello|goodbye>\n", argv[0]);
        fprintf(stderr, "%s <hello|goodbye> <name>\n", argv[0]);
        return -1;
	}
    
    //打开驱动
    bs = binder_open(128*1024);
    if (!bs) {
        fprintf(stderr, "failed to open binder driver\n");
        return -1;
    }
	g_bs = bs;
    
    //向service_manager发送数据,获得hello服务句柄
	handle = svcmgr_lookup(bs, svcmgr, "hello");
	if (!handle) {
        fprintf(stderr, "failed to get hello service\n");
        return -1;
	}
	g_hello_handle = handle;
	fprintf(stderr, "Handle for hello service = %d\n", g_hello_handle);

	/* 向服务端发送数据 */
	if (!strcmp(argv[1], "hello"))
	{
		if (argc == 2) {
			sayhello();
		} else if (argc == 3) {
			ret = sayhello_to(argv[2]);
	        fprintf(stderr, "get ret of sayhello_to = %d\n", ret);		
		}
	}

	binder_release(bs, handle);

    return 0;
}


uint32_t svcmgr_lookup(struct binder_state *bs, uint32_t target, const char *name)
{
    uint32_t handle;
    unsigned iodata[512/4];
    struct binder_io msg, reply;

    bio_init(&msg, iodata, sizeof(iodata), 4); // 为msg划分iodata的空间
    bio_put_uint32(&msg, 0);  // strict mode header
    bio_put_string16_x(&msg, SVC_MGR_NAME); // 写入android.os.IServiceManager
    bio_put_string16_x(&msg, name); // 写入服务名 hello

    // target = 0,代表service_manager,SVC_MGR_CHECK_SERVICE代表需要调用service_manager的查找服务的函数
    if (binder_call(bs, &msg, &reply, target, SVC_MGR_CHECK_SERVICE))
        return 0;

    // 获取hello服务的句柄
    handle = bio_get_ref(&reply);

    if (handle)
        binder_acquire(bs, handle);

    binder_done(bs, &msg, &reply);

    return handle;
}

int binder_call(struct binder_state *bs,
                struct binder_io *msg, struct binder_io *reply,
                uint32_t target, uint32_t code)
{
    int res;
    struct binder_write_read bwr;
    struct {
        uint32_t cmd;
        struct binder_transaction_data txn;
    } __attribute__((packed)) writebuf;
    unsigned readbuf[32];

    if (msg->flags & BIO_F_OVERFLOW) {
        fprintf(stderr,"binder: txn buffer overflow\n");
        goto fail;
    }
    // 构造binder_transaction_data
    writebuf.cmd = BC_TRANSACTION;//ioclt类型
    writebuf.txn.target.handle = target;//数据发送给哪个进程
    writebuf.txn.code = code;//调用进程的哪个函数
    writebuf.txn.flags = 0;
    writebuf.txn.data_size = msg->data - msg->data0;//数据本身大小
    writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0);//数据头大小,指向binder_node实体(发送端提供服务函数的地址),bio_put_obj(&msg, ptr);
    writebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0;//指向数据本身内存起点
    writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0;//指向数据头内存起点
   
    // 构造binder_write_read
    bwr.write_size = sizeof(writebuf);
    bwr.write_consumed = 0;
    bwr.write_buffer = (uintptr_t) &writebuf;

    hexdump(msg->data0, msg->data - msg->data0);
    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//调用ioctl发送数据给驱动程序

        if (res < 0) {
            fprintf(stderr,"binder: ioctl failed (%s)\n", strerror(errno));
            goto fail;
        }
        // 解析数据,将readbuf中的数据解析给reply
        res = binder_parse(bs, reply, (uintptr_t) readbuf, bwr.read_consumed, 0);
        if (res == 0) return 0;
        if (res < 0) goto fail;
    }

fail:
    memset(reply, 0, sizeof(*reply));
    reply->flags |= BIO_F_IOERROR;
    return -1;
}

可以看到svcmgr_lookup函数也是组织好binder_io数据,然后调用binder_call函数,把binder_io数据封装为binder_write_read数据,最后通过ioctl发送给service_manager。

这个过程和深入内核讲明白Android Binder【二】中分析的服务端向service_manager注册服务的过程类似,都是组织数据,然后通过ioctl把数据发给service_manager。

那么下面我们就进入linux内核源码,分析数据发给service_manager后,到底干了什么。

2. 客户端获取服务的内核源码分析

服务端调用ioctl,对应会调用到内核Binder驱动程序中的binder_ioctl函数,点击查看源码

2.1 客户端向service_manager发送数据

1. binder_ioctl
// 客户端调用ioctl发送数据给驱动程序
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

// 对应Binder内核驱动程序调用binder_ioctl函数处理数据
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	int ret;
	// 获取服务的binder_proc,它是在服务打开binder驱动的时候创建的,后面我们会分析
	struct binder_proc *proc = filp->private_data;
	struct binder_thread *thread;
	void __user *ubuf = (void __user *)arg;

	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
			proc->pid, current->pid, cmd, arg);*/

	binder_selftest_alloc(&proc->alloc);

	trace_binder_ioctl(cmd, arg);

	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
	if (ret)
		goto err_unlocked;

	//为服务进程proc创建binder_thread
	thread = binder_get_thread(proc);
	if (thread == NULL) {
		ret = -ENOMEM;
		goto err;
	}
    
    // 从上面的分析可知此时cmd=BINDER_WRITE_READ
	switch (cmd) {
	case BINDER_WRITE_READ:
	    // 处理服客户端数据
		ret = binder_ioctl_write_read(filp, arg, thread);
		if (ret)
			goto err;
		break;
		
	......
}
2. binder_ioctl_write_read
static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
                                struct binder_thread *thread)
{
        int ret = 0;
        struct binder_proc *proc = filp->private_data;
        void __user *ubuf = (void __user *)arg; // 用户空间的数据
        // 从用户空间获取客户端发送的数据binder_write_read
        struct binder_write_read bwr;

        //从用户空间发送的数据头拷贝到内核空间(这部分内核空间被mmap映射到了目标进程)
        if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
                ret = -EFAULT;
                goto out;
        }
        binder_debug(BINDER_DEBUG_READ_WRITE,
                     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
                     proc->pid, thread->pid,
                     (u64)bwr.write_size, (u64)bwr.write_buffer,
                     (u64)bwr.read_size, (u64)bwr.read_buffer);
        
        // 上面已经分析过客户端发送的数据保存在binder_write_read,此时它的write_size是大于0的
        if (bwr.write_size > 0) { // 向驱动程序写数据
                ret = binder_thread_write(proc, thread,
                                          bwr.write_buffer,
                                          bwr.write_size,
                                          &bwr.write_consumed);
                trace_binder_write_done(ret);
                if (ret < 0) {
                        bwr.read_consumed = 0;
                        if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                                ret = -EFAULT;
                        goto out;
                }
        }
        if (bwr.read_size > 0) { // 从驱动程序读数据
                ret = binder_thread_read(proc, thread, bwr.read_buffer,
                                         bwr.read_size,
                                         &bwr.read_consumed,
                                         filp->f_flags & O_NONBLOCK);
                trace_binder_read_done(ret);
                binder_inner_proc_lock(proc);
                if (!binder_worklist_empty_ilocked(&proc->todo))
                        binder_wakeup_proc_ilocked(proc);
                binder_inner_proc_unlock(proc);
                if (ret < 0) {
                        if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                                ret = -EFAULT;
                        goto out;
                }
        }
        binder_debug(BINDER_DEBUG_READ_WRITE,
                     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
                     proc->pid, thread->pid,
                     (u64)bwr.write_consumed, (u64)bwr.write_size,
                     (u64)bwr.read_consumed, (u64)bwr.read_size);
        // 复制数据给到用户空间
        if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
                ret = -EFAULT;
                goto out;
        }
out:
        return ret;
}

static inline int copy_from_user(void *to, const void __user volatile *from,
				 unsigned long n)
{
	volatile_memcpy(to, from, n);
	return 0;
}

static inline int copy_to_user(void __user volatile *to, const void *from,
			       unsigned long n)
{
	volatile_memcpy(to, from, n);
	return 0;
}

3. binder_thread_write

此时cmd是BC_TRANSACTION

static int binder_thread_write(struct binder_proc *proc,
                        struct binder_thread *thread,
                        binder_uintptr_t binder_buffer, size_t size,
                        binder_size_t *consumed)
{
        uint32_t cmd;
        struct binder_context *context = proc->context;
        // 获取数据buffer,根据上面总结的发送数据可知,这个buffer由cmd和binder_transcation_data两部分数据组成
        void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
        // 发送来的数据consumed=0,因此ptr指向用户空间数据buffer的起点
        void __user *ptr = buffer + *consumed;
        // 指向数据buffer的末尾
        void __user *end = buffer + size;
        // 逐个读取客户端发送来的数据(cmd+binder_transcation_data)
        while (ptr < end && thread->return_error.cmd == BR_OK) {
                int ret;
                
                // 获取用户空间中buffer的cmd值
                if (get_user(cmd, (uint32_t __user *)ptr))
                        return -EFAULT;
                // 移动指针到cmd的位置之后,指向binder_transcation_data数据的内存起点
                ptr += sizeof(uint32_t);
                trace_binder_command(cmd);
                if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
                        atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
                        atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
                        atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
                }
                // 根据上面总结的发送数据可知,cmd是BC_TRANSACTION
                switch (cmd) {
                   ......
                   /*
                BC_TRANSACTION:进程发送信息的cmd
                BR_TRANSACTION:进程接收BC_TRANSACTION发送信息的cmd
                
                BC_REPLY:进程回复信息的cmd
                BR_REPLY:进程接收BC_REPLY回复信息的cmd
                */
                    case BC_TRANSACTION:
                    case BC_REPLY: {
                            struct binder_transaction_data tr;
                            
                            // 从用户空间拷贝binder_transaction_data到内核空间
                            if (copy_from_user(&tr, ptr, sizeof(tr)))
                                    return -EFAULT;
                            // 移动指针到binder_transaction_data的位置之后,指向下一个cmd数据的内存起点
                            ptr += sizeof(tr);
                            // 处理binder_transaction_data数据
                            binder_transaction(proc, thread, &tr,
                                               cmd == BC_REPLY, 0);
                            break;
                    }
                }
        }
        ......
}

int get_user(int *val, const int __user *ptr) {
    if (copy_from_user(val, ptr, sizeof(int))) {
        return -EFAULT; // 返回错误码
    }
    return 0; // 成功
}

4. binder_transaction
4.1 找到目的进程service_manager
static void binder_transaction(struct binder_proc *proc,
                               struct binder_thread *thread,
                               struct binder_transaction_data *tr, int reply,
                               binder_size_t extra_buffers_size)
{
        ......
        // 此时是客户端向内核发送数据,reply为false
        if (reply) { // Binder内核驱动程序向用户空间回复数据的处理逻辑
                ......
        } else { // 用户空间数据发送给内核空间的处理逻辑
                //1. 找到目的进程,本次分析的是向service_manager获取服务,因此目的进程就是tr->target.handle=0的service_manager
                if (tr->target.handle) { // tr->target.handle == 0 代表是service_manager进程,否则是其它进程
                        .......
                } else { //处理service_manager进程
                        mutex_lock(&context->context_mgr_node_lock);
                        //这个node是在创建service_manager时通过BINDER_SET_CONTEXT_MGR的cmd创建的
                        target_node = context->binder_context_mgr_node; 
                        if (target_node)
                                target_node = binder_get_node_refs_for_txn(
                                                target_node, &target_proc,
                                                &return_error);
                        else
                                return_error = BR_DEAD_REPLY;
                        mutex_unlock(&context->context_mgr_node_lock);
                        if (target_node && target_proc->pid == proc->pid) {
                                binder_user_error("%d:%d got transaction to context manager from process owning it\n",
                                                  proc->pid, thread->pid);
                                return_error = BR_FAILED_REPLY;
                                return_error_param = -EINVAL;
                                return_error_line = __LINE__;
                                goto err_invalid_target_handle;
                        }
                }
                ......
        }
 }
4.2 拷贝客户端binder_transaction_data数据中的data.ptr.offsets到service_manager的mmap内核空间
static void binder_transaction(struct binder_proc *proc,
                               struct binder_thread *thread,
                               struct binder_transaction_data *tr, int reply,
                               binder_size_t extra_buffers_size)
{
        int ret;
        struct binder_transaction *t;
        struct binder_work *w;
        struct binder_work *tcomplete;
        binder_size_t buffer_offset = 0;
        binder_size_t off_start_offset, off_end_offset;
        binder_size_t off_min;
        binder_size_t sg_buf_offset, sg_buf_end_offset;
        binder_size_t user_offset = 0;
        struct binder_proc *target_proc = NULL;
        struct binder_thread *target_thread = NULL;
        struct binder_node *target_node = NULL;
        struct binder_transaction *in_reply_to = NULL;
        struct binder_transaction_log_entry *e;
        uint32_t return_error = 0;
        uint32_t return_error_param = 0;
        uint32_t return_error_line = 0;
        binder_size_t last_fixup_obj_off = 0;
        binder_size_t last_fixup_min_off = 0;
        struct binder_context *context = proc->context;
        int t_debug_id = atomic_inc_return(&binder_last_id);
        ktime_t t_start_time = ktime_get();
        char *secctx = NULL;
        u32 secctx_sz = 0;
        struct list_head sgc_head;
        struct list_head pf_head;
        const void __user *user_buffer = (const void __user *)
                                (uintptr_t)tr->data.ptr.buffer;
        INIT_LIST_HEAD(&sgc_head);
        INIT_LIST_HEAD(&pf_head);

        e = binder_transaction_log_add(&binder_transaction_log);
        e->debug_id = t_debug_id;
        e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
        e->from_proc = proc->pid;
        e->from_thread = thread->pid;
        e->target_handle = tr->target.handle;
        e->data_size = tr->data_size;
        e->offsets_size = tr->offsets_size;
        strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);

        binder_inner_proc_lock(proc);
        binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
        binder_inner_proc_unlock(proc);

        if (reply) {// 找到要回复的进程
                ......
        } else {// 1. 找到要发送的目的进程
                if (tr->target.handle) { // 目的进程非service_manager进程
                       .....
                } else { //目的进程是service_manager进程
                        // 找到service_manager的binder_node节点
                        .....
                }
                ......
        }
        if (target_thread)
                e->to_thread = target_thread->pid;
        e->to_proc = target_proc->pid;

        /* TODO: reuse incoming transaction for reply */
        // 为binder_transcation分配内存
        t = kzalloc(sizeof(*t), GFP_KERNEL);
        
        .....

        if (!reply && !(tr->flags & TF_ONE_WAY))
                t->from = thread;
        else
                t->from = NULL;
        // 存储发送双方的基本信息
        t->from_pid = proc->pid;
        t->from_tid = thread->pid;
        t->sender_euid = task_euid(proc->tsk);
        t->to_proc = target_proc;
        t->to_thread = target_thread;
        t->code = tr->code;
        t->flags = tr->flags;
        t->priority = task_nice(current);

        ......

        t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
                tr->offsets_size, extra_buffers_size,
                !reply && (t->flags & TF_ONE_WAY));
        ......
        
        t->buffer->debug_id = t->debug_id;
        t->buffer->transaction = t;
        t->buffer->target_node = target_node;
        t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
        trace_binder_transaction_alloc_buf(t->buffer);
        
        // 把客户端的数据拷贝到目的进程service_manager mmap的内存空间,即t->buffer指向的内存空间
        if (binder_alloc_copy_user_to_buffer(
                                &target_proc->alloc,
                                t->buffer,
                                ALIGN(tr->data_size, sizeof(void *)),
                                (const void __user *)
                                        (uintptr_t)tr->data.ptr.offsets,
                                tr->offsets_size)) {
                binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
                                proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                return_error_param = -EFAULT;
                return_error_line = __LINE__;
                goto err_copy_data_failed;
        }
        ......
}

/**
 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
 * @alloc: binder_alloc for this proc
 * @buffer: binder buffer to be accessed
 * @buffer_offset: offset into @buffer data
 * @from: userspace pointer to source buffer
 * @bytes: bytes to copy
 *
 * Copy bytes from source userspace to target buffer.
 *
 * Return: bytes remaining to be copied
 */
unsigned long
binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
				 struct binder_buffer *buffer,
				 binder_size_t buffer_offset,
				 const void __user *from,
				 size_t bytes)
{
	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
		return bytes;

	while (bytes) {
		unsigned long size;
		unsigned long ret;
		struct page *page;
		pgoff_t pgoff;
		void *kptr;

		page = binder_alloc_get_page(alloc, buffer,
					     buffer_offset, &pgoff);
		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
		kptr = kmap_local_page(page) + pgoff;
		// 拷贝服务端数据到service_manager mmap的内核内存空间
		ret = copy_from_user(kptr, from, size);
		kunmap_local(kptr);
		if (ret)
			return bytes - size + ret;
		bytes -= size;
		from += size;
		buffer_offset += size;
	}
	return 0;
}
4.3 拷贝客户端binder_transaction_data数据中的data.ptr.buffer到service_manager的mmap内核空间
static void binder_transaction(struct binder_proc *proc,
                               struct binder_thread *thread,
                               struct binder_transaction_data *tr, int reply,
                               binder_size_t extra_buffers_size)
{
        int ret;
        struct binder_transaction *t;
        struct binder_work *w;
        struct binder_work *tcomplete;
        binder_size_t buffer_offset = 0;
        binder_size_t off_start_offset, off_end_offset;
        binder_size_t off_min;
        binder_size_t sg_buf_offset, sg_buf_end_offset;
        binder_size_t user_offset = 0;
        struct binder_proc *target_proc = NULL;
        struct binder_thread *target_thread = NULL;
        struct binder_node *target_node = NULL;
        struct binder_transaction *in_reply_to = NULL;
        struct binder_transaction_log_entry *e;
        uint32_t return_error = 0;
        uint32_t return_error_param = 0;
        uint32_t return_error_line = 0;
        binder_size_t last_fixup_obj_off = 0;
        binder_size_t last_fixup_min_off = 0;
        struct binder_context *context = proc->context;
        int t_debug_id = atomic_inc_return(&binder_last_id);
        ktime_t t_start_time = ktime_get();
        char *secctx = NULL;
        u32 secctx_sz = 0;
        struct list_head sgc_head;
        struct list_head pf_head;
        const void __user *user_buffer = (const void __user *)
                                (uintptr_t)tr->data.ptr.buffer;
        INIT_LIST_HEAD(&sgc_head);
        INIT_LIST_HEAD(&pf_head);

        e = binder_transaction_log_add(&binder_transaction_log);
        e->debug_id = t_debug_id;
        e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
        e->from_proc = proc->pid;
        e->from_thread = thread->pid;
        e->target_handle = tr->target.handle;
        e->data_size = tr->data_size;
        e->offsets_size = tr->offsets_size;
        strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);

        binder_inner_proc_lock(proc);
        binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
        binder_inner_proc_unlock(proc);

        if (reply) {// 找到要回复的进程
                ......
        } else {// 1. 找到要发送的目的进程
                if (tr->target.handle) { // 目的进程非service_manager进程
                       .....
                } else { //目的进程是service_manager进程
                        // 找到service_manager的binder_node节点
                        .....
                }
                ......
        }
        if (target_thread)
                e->to_thread = target_thread->pid;
        e->to_proc = target_proc->pid;

        /* TODO: reuse incoming transaction for reply */
        // 为binder_transcation分配内存
        t = kzalloc(sizeof(*t), GFP_KERNEL);
        
        .....

        if (!reply && !(tr->flags & TF_ONE_WAY))
                t->from = thread;
        else
                t->from = NULL;
        // 存储发送双方的基本信息
        t->from_pid = proc->pid;
        t->from_tid = thread->pid;
        t->sender_euid = task_euid(proc->tsk);
        t->to_proc = target_proc;
        t->to_thread = target_thread;
        t->code = tr->code;
        t->flags = tr->flags;
        t->priority = task_nice(current);

        ......

        t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
                tr->offsets_size, extra_buffers_size,
                !reply && (t->flags & TF_ONE_WAY));
        ......
        
        t->buffer->debug_id = t->debug_id;
        t->buffer->transaction = t;
        t->buffer->target_node = target_node;
        t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
        trace_binder_transaction_alloc_buf(t->buffer);
        
        // 把客户端的数据拷贝到目的进程service_manager mmap的内存空间,即t->buffer指向的内存空间
        if (binder_alloc_copy_user_to_buffer(
                                &target_proc->alloc,
                                t->buffer,
                                ALIGN(tr->data_size, sizeof(void *)),
                                (const void __user *)
                                        (uintptr_t)tr->data.ptr.offsets,
                                tr->offsets_size)) {
                binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
                                proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                return_error_param = -EFAULT;
                return_error_line = __LINE__;
                goto err_copy_data_failed;
        }
        
        ......

		/* Done processing objects, copy the rest of the buffer */
		if (binder_alloc_copy_user_to_buffer(
					&target_proc->alloc,
					t->buffer, user_offset,
					user_buffer + user_offset,
					tr->data_size - user_offset)) {
			binder_user_error("%d:%d got transaction with invalid data ptr\n",
					proc->pid, thread->pid);
			return_error = BR_FAILED_REPLY;
			return_error_param = -EFAULT;
			return_error_line = __LINE__;
			goto err_copy_data_failed;
		}
		
		......
}
4.4 把待处理的数据放到目的进程service_manager的binder_proc或binder_thread的todo链表
static void binder_transaction(struct binder_proc *proc,
                               struct binder_thread *thread,
                               struct binder_transaction_data *tr, int reply,
                               binder_size_t extra_buffers_size)
{
        int ret;
        struct binder_transaction *t;
        struct binder_work *w;
        struct binder_work *tcomplete;
        binder_size_t buffer_offset = 0;
        binder_size_t off_start_offset, off_end_offset;
        binder_size_t off_min;
        binder_size_t sg_buf_offset, sg_buf_end_offset;
        binder_size_t user_offset = 0;
        struct binder_proc *target_proc = NULL;
        struct binder_thread *target_thread = NULL;
        struct binder_node *target_node = NULL;
        struct binder_transaction *in_reply_to = NULL;
        struct binder_transaction_log_entry *e;
        uint32_t return_error = 0;
        uint32_t return_error_param = 0;
        uint32_t return_error_line = 0;
        binder_size_t last_fixup_obj_off = 0;
        binder_size_t last_fixup_min_off = 0;
        struct binder_context *context = proc->context;
        int t_debug_id = atomic_inc_return(&binder_last_id);
        ktime_t t_start_time = ktime_get();
        char *secctx = NULL;
        u32 secctx_sz = 0;
        struct list_head sgc_head;
        struct list_head pf_head;
        const void __user *user_buffer = (const void __user *)
                                (uintptr_t)tr->data.ptr.buffer;
        INIT_LIST_HEAD(&sgc_head);
        INIT_LIST_HEAD(&pf_head);

        e = binder_transaction_log_add(&binder_transaction_log);
        e->debug_id = t_debug_id;
        e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
        e->from_proc = proc->pid;
        e->from_thread = thread->pid;
        e->target_handle = tr->target.handle;
        e->data_size = tr->data_size;
        e->offsets_size = tr->offsets_size;
        strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);

        binder_inner_proc_lock(proc);
        binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
        binder_inner_proc_unlock(proc);

        if (reply) {// 找到要回复的进程
                ......
        } else {// 1. 找到要发送的目的进程
                if (tr->target.handle) { // 目的进程非service_manager进程
                       .....
                } else { //目的进程是service_manager进程
                        // 找到service_manager的binder_node节点
                        .....
                }
                ......
        }
        if (target_thread)
                e->to_thread = target_thread->pid;
        e->to_proc = target_proc->pid;

        /* TODO: reuse incoming transaction for reply */
        // 为binder_transcation分配内存
        t = kzalloc(sizeof(*t), GFP_KERNEL);
        
        .....

        if (!reply && !(tr->flags & TF_ONE_WAY))
                t->from = thread;
        else
                t->from = NULL;
        // 存储发送双方的基本信息
        t->from_pid = proc->pid;
        t->from_tid = thread->pid;
        t->sender_euid = task_euid(proc->tsk);
        t->to_proc = target_proc;
        t->to_thread = target_thread;
        t->code = tr->code;
        t->flags = tr->flags;
        t->priority = task_nice(current);

        ......

        t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
                tr->offsets_size, extra_buffers_size,
                !reply && (t->flags & TF_ONE_WAY));
        ......
        
        t->buffer->debug_id = t->debug_id;
        t->buffer->transaction = t;
        t->buffer->target_node = target_node;
        t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
        trace_binder_transaction_alloc_buf(t->buffer);
        
        // 把客户端的数据拷贝到目的进程service_manager mmap的内存空间,即t->buffer指向的内存空间
        if (binder_alloc_copy_user_to_buffer(
                                &target_proc->alloc,
                                t->buffer,
                                ALIGN(tr->data_size, sizeof(void *)),
                                (const void __user *)
                                        (uintptr_t)tr->data.ptr.offsets,
                                tr->offsets_size)) {
                binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
                                proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                return_error_param = -EFAULT;
                return_error_line = __LINE__;
                goto err_copy_data_failed;
        }
        
        ......

		/* Done processing objects, copy the rest of the buffer */
		if (binder_alloc_copy_user_to_buffer(
					&target_proc->alloc,
					t->buffer, user_offset,
					user_buffer + user_offset,
					tr->data_size - user_offset)) {
			binder_user_error("%d:%d got transaction with invalid data ptr\n",
					proc->pid, thread->pid);
			return_error = BR_FAILED_REPLY;
			return_error_param = -EFAULT;
			return_error_line = __LINE__;
			goto err_copy_data_failed;
		}
		
		......
		
		t->work.type = BINDER_WORK_TRANSACTION;

		if (reply) {
			......
		} else if (!(t->flags & TF_ONE_WAY)) {
			BUG_ON(t->buffer->async_transaction != 0);
			binder_inner_proc_lock(proc);
			/*
			 * Defer the TRANSACTION_COMPLETE, so we don't return to
			 * userspace immediately; this allows the target process to
			 * immediately start processing this transaction, reducing
			 * latency. We will then return the TRANSACTION_COMPLETE when
			 * the target replies (or there is an error).
			 */
			binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
			t->need_reply = 1;
			t->from_parent = thread->transaction_stack;
			//入栈
			thread->transaction_stack = t;
			binder_inner_proc_unlock(proc);
			//将数据放入目的进程的binder_proc或binder_thread的todo链表
			return_error = binder_proc_transaction(t,
					target_proc, target_thread);
			if (return_error) {
				binder_inner_proc_lock(proc);
				binder_pop_transaction_ilocked(thread, t);
				binder_inner_proc_unlock(proc);
				goto err_dead_proc_or_thread;
			}
		} else {
			......
		}
}
4.5 binder_proc_transaction将待处理的数据放到service_manager的todo链表,并唤醒service_manager
static int binder_proc_transaction(struct binder_transaction *t,
				    struct binder_proc *proc,
				    struct binder_thread *thread)
{
	struct binder_node *node = t->buffer->target_node;
	bool oneway = !!(t->flags & TF_ONE_WAY);
	bool pending_async = false;
	struct binder_transaction *t_outdated = NULL;
	bool frozen = false;

	BUG_ON(!node);
	binder_node_lock(node);
	if (oneway) {
		BUG_ON(thread);
		if (node->has_async_transaction)
			pending_async = true;
		else
			node->has_async_transaction = true;
	}

	binder_inner_proc_lock(proc);
	if (proc->is_frozen) {
		frozen = true;
		proc->sync_recv |= !oneway;
		proc->async_recv |= oneway;
	}

	if ((frozen && !oneway) || proc->is_dead ||
			(thread && thread->is_dead)) {
		binder_inner_proc_unlock(proc);
		binder_node_unlock(node);
		return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
	}

	if (!thread && !pending_async)
		thread = binder_select_thread_ilocked(proc);

	if (thread) {
		binder_enqueue_thread_work_ilocked(thread, &t->work);//将数据放入目的进程的binder_thread
	} else if (!pending_async) {
		binder_enqueue_work_ilocked(&t->work, &proc->todo);//将数据放入目的进程的binder_proc
	} else {
		if ((t->flags & TF_UPDATE_TXN) && frozen) {
			t_outdated = binder_find_outdated_transaction_ilocked(t,
									      &node->async_todo);
			if (t_outdated) {
				binder_debug(BINDER_DEBUG_TRANSACTION,
					     "txn %d supersedes %d\n",
					     t->debug_id, t_outdated->debug_id);
				list_del_init(&t_outdated->work.entry);
				proc->outstanding_txns--;
			}
		}
		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
	}

	if (!pending_async)
		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);

	proc->outstanding_txns++;
	binder_inner_proc_unlock(proc);
	binder_node_unlock(node);

	/*
	 * To reduce potential contention, free the outdated transaction and
	 * buffer after releasing the locks.
	 */
	if (t_outdated) {
		struct binder_buffer *buffer = t_outdated->buffer;

		t_outdated->buffer = NULL;
		buffer->transaction = NULL;
		trace_binder_transaction_update_buffer_release(buffer);
		binder_release_entire_buffer(proc, NULL, buffer, false);
		binder_alloc_free_buf(&proc->alloc, buffer);
		kfree(t_outdated);
		binder_stats_deleted(BINDER_STAT_TRANSACTION);
	}

	if (oneway && frozen)
		return BR_TRANSACTION_PENDING_FROZEN;

	return 0;
}

static void
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
				   struct binder_work *work)
{
	WARN_ON(!list_empty(&thread->waiting_thread_node));
	binder_enqueue_work_ilocked(work, &thread->todo); // 将待处理的数据放到thread的todo链表

	/* (e)poll-based threads require an explicit wakeup signal when
	 * queuing their own work; they rely on these events to consume
	 * messages without I/O block. Without it, threads risk waiting
	 * indefinitely without handling the work.
	 */
	if (thread->looper & BINDER_LOOPER_STATE_POLL &&
	    thread->pid == current->pid && !thread->process_todo)
	    // 唤醒service_manager
		wake_up_interruptible_sync(&thread->wait);

	thread->process_todo = true;
}

static void
binder_enqueue_work_ilocked(struct binder_work *work,
			   struct list_head *target_list)
{
	BUG_ON(target_list == NULL);
	BUG_ON(work->entry.next && !list_empty(&work->entry));
	list_add_tail(&work->entry, target_list);
}

2.2. service_manager被唤醒

1. service_manager发送ioctl读取内核中的数据
int main(int argc, char **argv)
{
    struct binder_state *bs;

    bs = binder_open(128*1024);
    if (!bs) {
        ALOGE("failed to open binder driver\n");
        return -1;
    }

    if (binder_become_context_manager(bs)) {
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }


    svcmgr_handle = BINDER_SERVICE_MANAGER;
    binder_loop(bs, svcmgr_handler);

    return 0;
}

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(uint32_t));

    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;
        
        // 发起读操作
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }

        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}
2. binder_ioctl
// res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);进入binder驱动程序
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	int ret;
	struct binder_proc *proc = filp->private_data;
	struct binder_thread *thread;
	void __user *ubuf = (void __user *)arg;

	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
			proc->pid, current->pid, cmd, arg);*/

	binder_selftest_alloc(&proc->alloc);

	trace_binder_ioctl(cmd, arg);

	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
	if (ret)
		goto err_unlocked;

	//为进程proc创建binder_thread
	thread = binder_get_thread(proc);
	if (thread == NULL) {
		ret = -ENOMEM;
		goto err;
	}

	switch (cmd) {
	case BINDER_WRITE_READ:
		ret = binder_ioctl_write_read(filp, arg, thread);
		if (ret)
			goto err;
		break;
		......
	}
	......
}
3. binder_ioctl_write_read
static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
				struct binder_thread *thread)
{
	int ret = 0;
	struct binder_proc *proc = filp->private_data;
	void __user *ubuf = (void __user *)arg;
	struct binder_write_read bwr;

    //从用户空间拷贝数据到内核空间(这部分内核空间被mmap映射到了目标进程)
	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
	binder_debug(BINDER_DEBUG_READ_WRITE,
		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
		     proc->pid, thread->pid,
		     (u64)bwr.write_size, (u64)bwr.write_buffer,
		     (u64)bwr.read_size, (u64)bwr.read_buffer);

	if (bwr.write_size > 0) {
		......
	}
	if (bwr.read_size > 0) {
		ret = binder_thread_read(proc, thread, bwr.read_buffer,
					 bwr.read_size,
					 &bwr.read_consumed,
					 filp->f_flags & O_NONBLOCK);
		trace_binder_read_done(ret);
		binder_inner_proc_lock(proc);
		if (!binder_worklist_empty_ilocked(&proc->todo))
			binder_wakeup_proc_ilocked(proc);
		binder_inner_proc_unlock(proc);
		if (ret < 0) {
			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
				ret = -EFAULT;
			goto out;
		}
	}
	binder_debug(BINDER_DEBUG_READ_WRITE,
		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
		     proc->pid, thread->pid,
		     (u64)bwr.write_consumed, (u64)bwr.write_size,
		     (u64)bwr.read_consumed, (u64)bwr.read_size);
	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
out:
	return ret;
}
4. binder_thread_read

读取service_manager内核空间的数据,写入service_manager用户空间

static int binder_thread_read(struct binder_proc *proc,
			      struct binder_thread *thread,
			      binder_uintptr_t binder_buffer, size_t size,
			      binder_size_t *consumed, int non_block)
{
	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	int ret = 0;
	int wait_for_proc_work;

	if (*consumed == 0) {
		if (put_user(BR_NOOP, (uint32_t __user *)ptr))//对于所有的读操作,数据头部都是BR_NOOP
			return -EFAULT;
		ptr += sizeof(uint32_t);
	}

retry:
	binder_inner_proc_lock(proc);
	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
	binder_inner_proc_unlock(proc);

	thread->looper |= BINDER_LOOPER_STATE_WAITING;

	trace_binder_wait_for_work(wait_for_proc_work,
				   !!thread->transaction_stack,
				   !binder_worklist_empty(proc, &thread->todo));
	if (wait_for_proc_work) {
		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
					BINDER_LOOPER_STATE_ENTERED))) {
			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
				proc->pid, thread->pid, thread->looper);
			wait_event_interruptible(binder_user_error_wait,
						 binder_stop_on_user_error < 2);
		}
		binder_set_nice(proc->default_priority);
	}

    //没有数据就休眠
	if (non_block) {
		if (!binder_has_work(thread, wait_for_proc_work))
			ret = -EAGAIN;
	} else {
		ret = binder_wait_for_work(thread, wait_for_proc_work);
	}

	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
	
	if (ret)
		return ret;

	while (1) {
		uint32_t cmd;
		struct binder_transaction_data_secctx tr;
		struct binder_transaction_data *trd = &tr.transaction_data;
		struct binder_work *w = NULL;
		struct list_head *list = NULL;
		struct binder_transaction *t = NULL;
		struct binder_thread *t_from;
		size_t trsize = sizeof(*trd);

		binder_inner_proc_lock(proc);
		//如果proc的thread->todo链表有数据,拿到链表数据
		if (!binder_worklist_empty_ilocked(&thread->todo))
			list = &thread->todo;
		//如果proc->todo链表有数据,拿到链表数据
		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
			   wait_for_proc_work)
			list = &proc->todo;
		else {
			binder_inner_proc_unlock(proc);

			/* no data added */
			if (ptr - buffer == 4 && !thread->looper_need_return)
				goto retry;
			break;
		}

		if (end - ptr < sizeof(tr) + 4) {
			binder_inner_proc_unlock(proc);
			break;
		}
		w = binder_dequeue_work_head_ilocked(list);
		if (binder_worklist_empty_ilocked(&thread->todo))
			thread->process_todo = false;

		//逐个处理相关类型的数据,server唤醒service_manager,将数据添加到链表时,binder_work.type是BINDER_WORK_TRANSACTION
		switch (w->type) {
		case BINDER_WORK_TRANSACTION: {
			binder_inner_proc_unlock(proc);
			 t= container_of(w, struct binder_transaction, work);//构造出发送方发来的binder_transaction
		} break;
		
		......
		
		}

		if (!t)
			continue;

		BUG_ON(t->buffer == NULL);
		if (t->buffer->target_node) {
			struct binder_node *target_node = t->buffer->target_node;

			trd->target.ptr = target_node->ptr;
			trd->cookie =  target_node->cookie;
			t->saved_priority = task_nice(current);
			if (t->priority < target_node->min_priority &&
			    !(t->flags & TF_ONE_WAY))
				binder_set_nice(t->priority);
			else if (!(t->flags & TF_ONE_WAY) ||
				 t->saved_priority > target_node->min_priority)
				binder_set_nice(target_node->min_priority);
            //从server发送数据给service_manager,cmd是BC_TRANSACTION
            //从service_manager返回数据给server,将cmd设为BR_TRANSACTION,
			cmd = BR_TRANSACTION;
		} else {
			trd->target.ptr = 0;
			trd->cookie = 0;
			cmd = BR_REPLY;
		}
		trd->code = t->code;
		trd->flags = t->flags;
		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);

		......
		
		trd->data_size = t->buffer->data_size;
		trd->offsets_size = t->buffer->offsets_size;
		trd->data.ptr.buffer = t->buffer->user_data;
		trd->data.ptr.offsets = trd->data.ptr.buffer +
					ALIGN(t->buffer->data_size,
					    sizeof(void *));

		tr.secctx = t->security_ctx;
		if (t->security_ctx) {
			cmd = BR_TRANSACTION_SEC_CTX;
			trsize = sizeof(tr);
		}
		// 把cmd写入service_manager的用户空间
		if (put_user(cmd, (uint32_t __user *)ptr)) {
			if (t_from)
				binder_thread_dec_tmpref(t_from);

			binder_cleanup_transaction(t, "put_user failed",
						   BR_FAILED_REPLY);

			return -EFAULT;
		}
		ptr += sizeof(uint32_t);
		// 把tr写入service_manager的用户空间,tr.transaction_data中包括了客户端发送来的数据
		if (copy_to_user(ptr, &tr, trsize)) {
			if (t_from)
				binder_thread_dec_tmpref(t_from);

			binder_cleanup_transaction(t, "copy_to_user failed",
						   BR_FAILED_REPLY);

			return -EFAULT;
		}
		ptr += trsize;

		......

done:

	......
	
	return 0;
}
5. 从service_manager内核空间读取到的数据组织形式

在这里插入图片描述

6. binder_parse解析客户端发送给service_manager的数据

此时cmd是BR_TRANSACTION

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(uint32_t));

    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//读到数据

        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }
        
        //解析读到的数据
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) {
        uint32_t cmd = *(uint32_t *) ptr;
        ptr += sizeof(uint32_t);
#if TRACE
        fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
        switch(cmd) {
        case BR_NOOP:
            break;
        ......
        //收到数据的处理情况,(收到的数据中有服务名称,服务的handle)
        case BR_TRANSACTION: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;
                //构造binder_io
                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);
                //处理binde_io
                res = func(bs, txn, &msg, &reply); // func = svcmgr_handler,用于添加/获取服务
                //将处理完的数据,发送给server
                binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
            }
            ptr += sizeof(*txn);
            break;
        }
        ......
        default:
            ALOGE("parse: OOPS %d\n", cmd);
            return -1;
        }
    }

    return r;
}

7. svcmgr_handler处理客户端发送给service_manager的数据,获取客户端请求的服务handle
int svcmgr_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    size_t len;
    uint32_t handle;
    uint32_t strict_policy;
    int allow_isolated;

    //ALOGI("target=%x code=%d pid=%d uid=%d\n",
    //  txn->target.handle, txn->code, txn->sender_pid, txn->sender_euid);

    if (txn->target.handle != svcmgr_handle)
        return -1;

    if (txn->code == PING_TRANSACTION)
        return 0;

    // Equivalent to Parcel::enforceInterface(), reading the RPC
    // header with the strict mode policy mask and the interface name.
    // Note that we ignore the strict_policy and don't propagate it
    // further (since we do no outbound RPCs anyway).
    strict_policy = bio_get_uint32(msg);
    s = bio_get_string16(msg, &len); //传入的是android.os.IServiceManager
    if (s == NULL) {
        return -1;
    }

    if ((len != (sizeof(svcmgr_id) / 2)) ||
        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {//传入的必须是android.os.IServiceManager
        fprintf(stderr,"invalid id %s\n", str8(s, len));
        return -1;
    }


    switch(txn->code) {
    case SVC_MGR_GET_SERVICE:
    case SVC_MGR_CHECK_SERVICE:
        s = bio_get_string16(msg, &len); // 获取客户端要获取的服务的名字"hello"
        if (s == NULL) {
            return -1;
        }
        // 在service_manager的服务列表中寻找服务名为hello的服务的handle
        handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
        if (!handle)
            break;
            
        // 将服务handle写入reply
        bio_put_ref(reply, handle);
        return 0;

    ......

    bio_put_uint32(reply, 0);//处理完后,最后要构造一个reply,并放入0
    return 0;
}

uint32_t do_find_service(struct binder_state *bs, const uint16_t *s, size_t len, uid_t uid, pid_t spid)
{
    struct svcinfo *si;

    if (!svc_can_find(s, len, spid)) {
        ALOGE("find_service('%s') uid=%d - PERMISSION DENIED\n",
             str8(s, len), uid);
        return 0;
    }
    si = find_svc(s, len);
    //ALOGI("check_service('%s') handle = %x\n", str8(s, len), si ? si->handle : 0);
    if (si && si->handle) {
        if (!si->allow_isolated) {
            // If this service doesn't allow access from isolated processes,
            // then check the uid to see if it is isolated.
            uid_t appid = uid % AID_USER;
            if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) {
                return 0;
            }
        }
        return si->handle;
    } else {
        return 0;
    }
}

struct svcinfo *find_svc(const uint16_t *s16, size_t len)
{
    struct svcinfo *si;

    for (si = svclist; si; si = si->next) {
        if ((len == si->len) &&
            !memcmp(s16, si->name, len * sizeof(uint16_t))) {
            return si;
        }
    }
    return NULL;
}

void bio_put_ref(struct binder_io *bio, uint32_t handle)
{
    struct flat_binder_object *obj;

    if (handle)
        obj = bio_alloc_obj(bio);
    else
        obj = bio_alloc(bio, sizeof(*obj));

    if (!obj)
        return;

    obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    obj->type = BINDER_TYPE_HANDLE;
    obj->handle = handle;
    obj->cookie = 0;
}
8. binder_send_reply将获取到的服务handle数据回复给驱动程序
int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) {
        uint32_t cmd = *(uint32_t *) ptr;
        ptr += sizeof(uint32_t);
#if TRACE
        fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
        switch(cmd) {
        case BR_NOOP:
            break;
        ......
        //收到数据的处理情况,(收到的数据中有服务名称,服务的handle)
        case BR_TRANSACTION: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;
                //构造binder_io
                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);
                //处理binde_io
                res = func(bs, txn, &msg, &reply); // func = svcmgr_handler,用于添加/获取服务
                //将处理完的数据,发送给server
                binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
            }
            ptr += sizeof(*txn);
            break;
        }
        ......
        default:
            ALOGE("parse: OOPS %d\n", cmd);
            return -1;
        }
    }

    return r;
}

void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       binder_uintptr_t buffer_to_free,
                       int status)
{
    struct {
        uint32_t cmd_free;
        binder_uintptr_t buffer;
        uint32_t cmd_reply;
        struct binder_transaction_data txn;
    } __attribute__((packed)) data;

    data.cmd_free = BC_FREE_BUFFER;//server拷贝到service_manager映射的内核态缓冲区的数据,用完后,就可以释放了
    data.buffer = buffer_to_free;
    data.cmd_reply = BC_REPLY; // service_manager处理完数据后,将结果回复回去,cmd = BC_REPLY
    data.txn.target.ptr = 0;
    data.txn.cookie = 0;
    data.txn.code = 0;
    if (status) {
        data.txn.flags = TF_STATUS_CODE;
        data.txn.data_size = sizeof(int);
        data.txn.offsets_size = 0;
        data.txn.data.ptr.buffer = (uintptr_t)&status;
        data.txn.data.ptr.offsets = 0;
    } else {
        data.txn.flags = 0;
        data.txn.data_size = reply->data - reply->data0;
        data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
        data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
        data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
    }
    binder_write(bs, &data, sizeof(data));
}

int binder_write(struct binder_state *bs, void *data, size_t len)
{
    struct binder_write_read bwr;
    int res;

    bwr.write_size = len;
    bwr.write_consumed = 0;
    bwr.write_buffer = (uintptr_t) data;
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
    if (res < 0) {
        fprintf(stderr,"binder_write: ioctl failed (%s)\n",
                strerror(errno));
    }
    return res;
}

2.3 binder驱动接收到service_manager解析完客户端发送的数据的数据

1. binder_ioctl
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	int ret;
	struct binder_proc *proc = filp->private_data;
	struct binder_thread *thread;
	void __user *ubuf = (void __user *)arg;

	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
			proc->pid, current->pid, cmd, arg);*/

	binder_selftest_alloc(&proc->alloc);

	trace_binder_ioctl(cmd, arg);

	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
	if (ret)
		goto err_unlocked;

	//为进程proc创建binder_thread
	thread = binder_get_thread(proc);
	if (thread == NULL) {
		ret = -ENOMEM;
		goto err;
	}

	switch (cmd) {
	case BINDER_WRITE_READ:
		ret = binder_ioctl_write_read(filp, arg, thread);
		if (ret)
			goto err;
		break;
		......
	}
	......
}
2. binder_ioctl_write_read
static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
				struct binder_thread *thread)
{
	int ret = 0;
	struct binder_proc *proc = filp->private_data;
	void __user *ubuf = (void __user *)arg;
	struct binder_write_read bwr;

    //从用户空间拷贝数据到内核空间(这部分内核空间被mmap映射到了目标进程)
	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
	binder_debug(BINDER_DEBUG_READ_WRITE,
		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
		     proc->pid, thread->pid,
		     (u64)bwr.write_size, (u64)bwr.write_buffer,
		     (u64)bwr.read_size, (u64)bwr.read_buffer);

	if (bwr.write_size > 0) {
		ret = binder_thread_write(proc, thread,
					  bwr.write_buffer,
					  bwr.write_size,
					  &bwr.write_consumed);
		trace_binder_write_done(ret);
		if (ret < 0) {
			bwr.read_consumed = 0;
			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
				ret = -EFAULT;
			goto out;
		}
	}
	if (bwr.read_size > 0) {
		......
	}
	......
out:
	return ret;
}
3. binder_thread_write

此时cmd是BC_REPLY

static int binder_thread_write(struct binder_proc *proc,
                        struct binder_thread *thread,
                        binder_uintptr_t binder_buffer, size_t size,
                        binder_size_t *consumed)
{
        uint32_t cmd;
        struct binder_context *context = proc->context;
        // 获取数据buffer,根据上面总结的发送数据可知,这个buffer由cmd和binder_transcation_data两部分数据组成
        void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
        // 发送来的数据consumed=0,因此ptr指向用户空间数据buffer的起点
        void __user *ptr = buffer + *consumed;
        // 指向数据buffer的末尾
        void __user *end = buffer + size;
        // 逐个读取客户端发送来的数据(cmd+binder_transcation_data)
        while (ptr < end && thread->return_error.cmd == BR_OK) {
                int ret;
                
                // 获取用户空间中buffer的cmd值
                if (get_user(cmd, (uint32_t __user *)ptr))
                        return -EFAULT;
                // 移动指针到cmd的位置之后,指向binder_transcation_data数据的内存起点
                ptr += sizeof(uint32_t);
                trace_binder_command(cmd);
                if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
                        atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
                        atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
                        atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
                }
                // 根据上面总结的发送数据可知,cmd是BC_TRANSACTION
                switch (cmd) {
                   ......
                   /*
                BC_TRANSACTION:进程发送信息的cmd
                BR_TRANSACTION:进程接收BC_TRANSACTION发送信息的cmd
                
                BC_REPLY:进程回复信息的cmd
                BR_REPLY:进程接收BC_REPLY回复信息的cmd
                */
                    case BC_TRANSACTION:
                    case BC_REPLY: {
                            struct binder_transaction_data tr;
                            
                            // 从用户空间拷贝binder_transaction_data到内核空间
                            if (copy_from_user(&tr, ptr, sizeof(tr)))
                                    return -EFAULT;
                            // 移动指针到binder_transaction_data的位置之后,指向下一个cmd数据的内存起点
                            ptr += sizeof(tr);
                            // 处理binder_transaction_data数据
                            binder_transaction(proc, thread, &tr,
                                               cmd == BC_REPLY, 0);
                            break;
                    }
                }
        }
        ......
}

int get_user(int *val, const int __user *ptr) {
    if (copy_from_user(val, ptr, sizeof(int))) {
        return -EFAULT; // 返回错误码
    }
    return 0; // 成功
}
4. binder_transaction
4.1. 找到要回复的进程
static void binder_transaction(struct binder_proc *proc,
                               struct binder_thread *thread,
                               struct binder_transaction_data *tr, int reply,
                               binder_size_t extra_buffers_size)
{
        ......

        if (reply) {// 找到要回复的进程
                binder_inner_proc_lock(proc);
				in_reply_to = thread->transaction_stack;//从栈中取出binder_transaction,获得要回复给谁
				if (in_reply_to == NULL) {
					binder_inner_proc_unlock(proc);
					binder_user_error("%d:%d got reply transaction with no transaction stack\n",
							  proc->pid, thread->pid);
					return_error = BR_FAILED_REPLY;
					return_error_param = -EPROTO;
					return_error_line = __LINE__;
					goto err_empty_call_stack;
				}
				if (in_reply_to->to_thread != thread) {
					spin_lock(&in_reply_to->lock);
					binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
						proc->pid, thread->pid, in_reply_to->debug_id,
						in_reply_to->to_proc ?
						in_reply_to->to_proc->pid : 0,
						in_reply_to->to_thread ?
						in_reply_to->to_thread->pid : 0);
					spin_unlock(&in_reply_to->lock);
					binder_inner_proc_unlock(proc);
					return_error = BR_FAILED_REPLY;
					return_error_param = -EPROTO;
					return_error_line = __LINE__;
					in_reply_to = NULL;
					goto err_bad_call_stack;
				}
				thread->transaction_stack = in_reply_to->to_parent;//出栈
				binder_inner_proc_unlock(proc);
				binder_set_nice(in_reply_to->saved_priority);
				target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
				if (target_thread == NULL) {
					/* annotation for sparse */
					__release(&target_thread->proc->inner_lock);
					binder_txn_error("%d:%d reply target not found\n",
						thread->pid, proc->pid);
					return_error = BR_DEAD_REPLY;
					return_error_line = __LINE__;
					goto err_dead_binder;
				}
				if (target_thread->transaction_stack != in_reply_to) {
					binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
						proc->pid, thread->pid,
						target_thread->transaction_stack ?
						target_thread->transaction_stack->debug_id : 0,
						in_reply_to->debug_id);
					binder_inner_proc_unlock(target_thread->proc);
					return_error = BR_FAILED_REPLY;
					return_error_param = -EPROTO;
					return_error_line = __LINE__;
					in_reply_to = NULL;
					target_thread = NULL;
					goto err_dead_binder;
				}
				// 找到要回复的进程
				target_proc = target_thread->proc;
				target_proc->tmp_ref++;
				binder_inner_proc_unlock(target_thread->proc);
        } else {// 1. 找到要发送的目的进程
                ......
        }
......
}
4.2 处理flat_binder_object
static void binder_transaction(struct binder_proc *proc,
                               struct binder_thread *thread,
                               struct binder_transaction_data *tr, int reply,
                               binder_size_t extra_buffers_size)
{
        ......

        if (reply) {// 找到要回复的进程
                binder_inner_proc_lock(proc);
				in_reply_to = thread->transaction_stack;//从栈中取出binder_transaction,获得要回复给谁
				if (in_reply_to == NULL) {
					binder_inner_proc_unlock(proc);
					binder_user_error("%d:%d got reply transaction with no transaction stack\n",
							  proc->pid, thread->pid);
					return_error = BR_FAILED_REPLY;
					return_error_param = -EPROTO;
					return_error_line = __LINE__;
					goto err_empty_call_stack;
				}
				if (in_reply_to->to_thread != thread) {
					spin_lock(&in_reply_to->lock);
					binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
						proc->pid, thread->pid, in_reply_to->debug_id,
						in_reply_to->to_proc ?
						in_reply_to->to_proc->pid : 0,
						in_reply_to->to_thread ?
						in_reply_to->to_thread->pid : 0);
					spin_unlock(&in_reply_to->lock);
					binder_inner_proc_unlock(proc);
					return_error = BR_FAILED_REPLY;
					return_error_param = -EPROTO;
					return_error_line = __LINE__;
					in_reply_to = NULL;
					goto err_bad_call_stack;
				}
				thread->transaction_stack = in_reply_to->to_parent;//出栈
				binder_inner_proc_unlock(proc);
				binder_set_nice(in_reply_to->saved_priority);
				target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
				if (target_thread == NULL) {
					/* annotation for sparse */
					__release(&target_thread->proc->inner_lock);
					binder_txn_error("%d:%d reply target not found\n",
						thread->pid, proc->pid);
					return_error = BR_DEAD_REPLY;
					return_error_line = __LINE__;
					goto err_dead_binder;
				}
				if (target_thread->transaction_stack != in_reply_to) {
					binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
						proc->pid, thread->pid,
						target_thread->transaction_stack ?
						target_thread->transaction_stack->debug_id : 0,
						in_reply_to->debug_id);
					binder_inner_proc_unlock(target_thread->proc);
					return_error = BR_FAILED_REPLY;
					return_error_param = -EPROTO;
					return_error_line = __LINE__;
					in_reply_to = NULL;
					target_thread = NULL;
					goto err_dead_binder;
				}
				// 找到要回复的进程
				target_proc = target_thread->proc;
				target_proc->tmp_ref++;
				binder_inner_proc_unlock(target_thread->proc);
        } else {// 1. 找到要发送的目的进程
                ......
        }
        if (target_thread)
                e->to_thread = target_thread->pid;
        e->to_proc = target_proc->pid;

        /* TODO: reuse incoming transaction for reply */
        // 为binder_transcation分配内存
        t = kzalloc(sizeof(*t), GFP_KERNEL);
        
        .....

        if (!reply && !(tr->flags & TF_ONE_WAY))
                t->from = thread;
        else
                t->from = NULL;
        // 存储发送双方的基本信息
        t->from_pid = proc->pid;
        t->from_tid = thread->pid;
        t->sender_euid = task_euid(proc->tsk);
        t->to_proc = target_proc;
        t->to_thread = target_thread;
        t->code = tr->code;
        t->flags = tr->flags;
        t->priority = task_nice(current);

        ......

        t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
                tr->offsets_size, extra_buffers_size,
                !reply && (t->flags & TF_ONE_WAY));
        ......
        
        t->buffer->debug_id = t->debug_id;
        t->buffer->transaction = t;
        t->buffer->target_node = target_node;
        t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
        trace_binder_transaction_alloc_buf(t->buffer);
        
        // 把客户端的数据拷贝到目的进程test_client mmap的内存空间
        if (binder_alloc_copy_user_to_buffer(
                                &target_proc->alloc,
                                t->buffer,
                                ALIGN(tr->data_size, sizeof(void *)),
                                (const void __user *)
                                        (uintptr_t)tr->data.ptr.offsets,
                                tr->offsets_size)) {
                binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
                                proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                return_error_param = -EFAULT;
                return_error_line = __LINE__;
                goto err_copy_data_failed;
        }
        ......
		
		//处理server传入的binder_io.offs数据,这个数据指向用于构建binder_node实体的		flat_binder_object
		for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
		     buffer_offset += sizeof(binder_size_t)) {
			struct binder_object_header *hdr;
			size_t object_size;
			struct binder_object object;
			binder_size_t object_offset;
			binder_size_t copy_size;
	
			if (binder_alloc_copy_from_buffer(&target_proc->alloc,
							  &object_offset,
							  t->buffer,
							  buffer_offset,
							  sizeof(object_offset))) {
				binder_txn_error("%d:%d copy offset from buffer failed\n",
					thread->pid, proc->pid);
				return_error = BR_FAILED_REPLY;
				return_error_param = -EINVAL;
				return_error_line = __LINE__;
				goto err_bad_offset;
			}
	
			/*
			 * Copy the source user buffer up to the next object
			 * that will be processed.
			 */
			copy_size = object_offset - user_offset;
			if (copy_size && (user_offset > object_offset ||
					binder_alloc_copy_user_to_buffer(
						&target_proc->alloc,
						t->buffer, user_offset,
						user_buffer + user_offset,
						copy_size))) {
				binder_user_error("%d:%d got transaction with invalid data ptr\n",
						proc->pid, thread->pid);
				return_error = BR_FAILED_REPLY;
				return_error_param = -EFAULT;
				return_error_line = __LINE__;
				goto err_copy_data_failed;
			}
			// 将指向flat_binder_object的指针拷贝给object
			object_size = binder_get_object(target_proc, user_buffer,
					t->buffer, object_offset, &object);
			if (object_size == 0 || object_offset < off_min) {
				binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
						  proc->pid, thread->pid,
						  (u64)object_offset,
						  (u64)off_min,
						  (u64)t->buffer->data_size);
				return_error = BR_FAILED_REPLY;
				return_error_param = -EINVAL;
				return_error_line = __LINE__;
				goto err_bad_offset;
			}
			/*
			 * Set offset to the next buffer fragment to be
			 * copied
			 */
			user_offset = object_offset + object_size;
	
			hdr = &object.hdr;
			off_min = object_offset + object_size;
			// 此处 binder类型的是BINDER_TYPE_HANDLE,通过handle在service_manager的binder-ref中找到hello服务的binder_node
			switch (hdr->type) {
			//处理binder实体
			case BINDER_TYPE_BINDER:
			case BINDER_TYPE_WEAK_BINDER: {
				......
				
			} break;
			//处理binder引用
			case BINDER_TYPE_HANDLE:
			case BINDER_TYPE_WEAK_HANDLE: {
				struct flat_binder_object *fp;
	
				fp = to_flat_binder_object(hdr);
				ret = binder_translate_handle(fp, t, thread);
				if (ret < 0 ||
				    binder_alloc_copy_to_buffer(&target_proc->alloc,
								t->buffer,
								object_offset,
								fp, sizeof(*fp))) {
					binder_txn_error("%d:%d translate handle failed\n",
						thread->pid, proc->pid);
					return_error = BR_FAILED_REPLY;
					return_error_param = ret;
					return_error_line = __LINE__;
					goto err_translate_failed;
				}
			} break;
			......
	 }
......
}
4.3 根据handle找到服务的binder_node,
static int binder_translate_handle(struct flat_binder_object *fp,
				   struct binder_transaction *t,
				   struct binder_thread *thread)
{
	struct binder_proc *proc = thread->proc;
	struct binder_proc *target_proc = t->to_proc;
	struct binder_node *node;
	struct binder_ref_data src_rdata;
	int ret = 0;
    
    // 根据handle从service_manager中找到服务的binder_node
	node = binder_get_node_from_ref(proc, fp->handle,
			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
	if (!node) {
		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
				  proc->pid, thread->pid, fp->handle);
		return -EINVAL;
	}
	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
		ret = -EPERM;
		goto done;
	}

	binder_node_lock(node);
	if (node->proc == target_proc) {
		if (fp->hdr.type == BINDER_TYPE_HANDLE)
			fp->hdr.type = BINDER_TYPE_BINDER;
		else
			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
		fp->binder = node->ptr;
		fp->cookie = node->cookie;
		if (node->proc)
			binder_inner_proc_lock(node->proc);
		else
			__acquire(&node->proc->inner_lock);
		binder_inc_node_nilocked(node,
					 fp->hdr.type == BINDER_TYPE_BINDER,
					 0, NULL);
		if (node->proc)
			binder_inner_proc_unlock(node->proc);
		else
			__release(&node->proc->inner_lock);
		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
		binder_debug(BINDER_DEBUG_TRANSACTION,
			     "        ref %d desc %d -> node %d u%016llx\n",
			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
			     (u64)node->ptr);
		binder_node_unlock(node);
	} else {
		struct binder_ref_data dest_rdata;

		binder_node_unlock(node);
		// 为客户端创建binder_ref指向服务的binder_node
		ret = binder_inc_ref_for_node(target_proc, node,
				fp->hdr.type == BINDER_TYPE_HANDLE,
				NULL, &dest_rdata);
		if (ret)
			goto done;

		fp->binder = 0;
		fp->handle = dest_rdata.desc;
		fp->cookie = 0;
		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
						    &dest_rdata);
		binder_debug(BINDER_DEBUG_TRANSACTION,
			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
			     src_rdata.debug_id, src_rdata.desc,
			     dest_rdata.debug_id, dest_rdata.desc,
			     node->debug_id);
	}
done:
	binder_put_node(node);
	return ret;
}

static struct binder_node *binder_get_node_from_ref(
		struct binder_proc *proc,
		u32 desc, bool need_strong_ref,
		struct binder_ref_data *rdata)
{
	struct binder_node *node;
	struct binder_ref *ref;

	binder_proc_lock(proc);
	// 根据handle找到binder_ref
	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
	if (!ref)
		goto err_no_ref;
		
	// 根据binder_ref找到binder_node
	node = ref->node;
	/*
	 * Take an implicit reference on the node to ensure
	 * it stays alive until the call to binder_put_node()
	 */
	binder_inc_node_tmpref(node);
	if (rdata)
		*rdata = ref->data;
	binder_proc_unlock(proc);

	return node;

err_no_ref:
	binder_proc_unlock(proc);
	return NULL;
}

static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
						 u32 desc, bool need_strong_ref)
{
	struct rb_node *n = proc->refs_by_desc.rb_node;
	struct binder_ref *ref;

	while (n) {
		ref = rb_entry(n, struct binder_ref, rb_node_desc);

		if (desc < ref->data.desc) {
			n = n->rb_left;
		} else if (desc > ref->data.desc) {
			n = n->rb_right;
		} else if (need_strong_ref && !ref->data.strong) {
			binder_user_error("tried to use weak ref as strong ref\n");
			return NULL;
		} else {
			return ref;
		}
	}
	return NULL;
}
4.4 为客户端创建binder_ref,指向服务的binder_node
static int binder_inc_ref_for_node(struct binder_proc *proc,
			struct binder_node *node,
			bool strong,
			struct list_head *target_list,
			struct binder_ref_data *rdata)
{
	struct binder_ref *ref;
	struct binder_ref *new_ref = NULL;
	int ret = 0;

	binder_proc_lock(proc);
	// 先查找客户端是否已经有对应的binder_ref,若没有则新建binder_ref
	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
	if (!ref) {
		binder_proc_unlock(proc);
		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
		if (!new_ref)
			return -ENOMEM;
		binder_proc_lock(proc);
		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
	}
	//增加引用计数
	ret = binder_inc_ref_olocked(ref, strong, target_list);
	*rdata = ref->data;
	if (ret && ref == new_ref) {
		/*
		 * Cleanup the failed reference here as the target
		 * could now be dead and have already released its
		 * references by now. Calling on the new reference
		 * with strong=0 and a tmp_refs will not decrement
		 * the node. The new_ref gets kfree'd below.
		 */
		binder_cleanup_ref_olocked(new_ref);
		ref = NULL;
	}

	binder_proc_unlock(proc);
	if (new_ref && ref != new_ref)
		/*
		 * Another thread created the ref first so
		 * free the one we allocated
		 */
		kfree(new_ref);
	return ret;
}

static struct binder_ref *binder_get_ref_for_node_olocked(
					struct binder_proc *proc,
					struct binder_node *node,
					struct binder_ref *new_ref)
{
	struct binder_context *context = proc->context;
	struct rb_node **p = &proc->refs_by_node.rb_node;
	struct rb_node *parent = NULL;
	struct binder_ref *ref;
	struct rb_node *n;

	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct binder_ref, rb_node_node);

		if (node < ref->node)
			p = &(*p)->rb_left;
		else if (node > ref->node)
			p = &(*p)->rb_right;
		else
			return ref;
	}
	if (!new_ref)
		return NULL;

	binder_stats_created(BINDER_STAT_REF);
	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
	new_ref->proc = proc;
	new_ref->node = node;
	rb_link_node(&new_ref->rb_node_node, parent, p);
	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
    
    // 更新binder_ref的handle值,后续客户端通过handle值找到这个binder_ref,进而找到binder_node
	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
		ref = rb_entry(n, struct binder_ref, rb_node_desc);
		if (ref->data.desc > new_ref->data.desc)
			break;
		// 客户端引用服务的handle加1
		new_ref->data.desc = ref->data.desc + 1;
	}

	p = &proc->refs_by_desc.rb_node;
	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct binder_ref, rb_node_desc);

		if (new_ref->data.desc < ref->data.desc)
			p = &(*p)->rb_left;
		else if (new_ref->data.desc > ref->data.desc)
			p = &(*p)->rb_right;
		else
			BUG();
	}
	rb_link_node(&new_ref->rb_node_desc, parent, p);
	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);

	binder_node_lock(node);
	hlist_add_head(&new_ref->node_entry, &node->refs);

	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
		     "%d new ref %d desc %d for node %d\n",
		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
		      node->debug_id);
	binder_node_unlock(node);
	return new_ref;
}
4.5 把数据放到客户端的todo链表,唤醒客户端
static void binder_transaction(struct binder_proc *proc,
                               struct binder_thread *thread,
                               struct binder_transaction_data *tr, int reply,
                               binder_size_t extra_buffers_size)
{
        ......

        if (reply) {// 找到要回复的进程
                binder_inner_proc_lock(proc);
				in_reply_to = thread->transaction_stack;//从栈中取出binder_transaction,获得要回复给谁
				if (in_reply_to == NULL) {
					binder_inner_proc_unlock(proc);
					binder_user_error("%d:%d got reply transaction with no transaction stack\n",
							  proc->pid, thread->pid);
					return_error = BR_FAILED_REPLY;
					return_error_param = -EPROTO;
					return_error_line = __LINE__;
					goto err_empty_call_stack;
				}
				if (in_reply_to->to_thread != thread) {
					spin_lock(&in_reply_to->lock);
					binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
						proc->pid, thread->pid, in_reply_to->debug_id,
						in_reply_to->to_proc ?
						in_reply_to->to_proc->pid : 0,
						in_reply_to->to_thread ?
						in_reply_to->to_thread->pid : 0);
					spin_unlock(&in_reply_to->lock);
					binder_inner_proc_unlock(proc);
					return_error = BR_FAILED_REPLY;
					return_error_param = -EPROTO;
					return_error_line = __LINE__;
					in_reply_to = NULL;
					goto err_bad_call_stack;
				}
				thread->transaction_stack = in_reply_to->to_parent;//出栈
				binder_inner_proc_unlock(proc);
				binder_set_nice(in_reply_to->saved_priority);
				target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
				if (target_thread == NULL) {
					/* annotation for sparse */
					__release(&target_thread->proc->inner_lock);
					binder_txn_error("%d:%d reply target not found\n",
						thread->pid, proc->pid);
					return_error = BR_DEAD_REPLY;
					return_error_line = __LINE__;
					goto err_dead_binder;
				}
				if (target_thread->transaction_stack != in_reply_to) {
					binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
						proc->pid, thread->pid,
						target_thread->transaction_stack ?
						target_thread->transaction_stack->debug_id : 0,
						in_reply_to->debug_id);
					binder_inner_proc_unlock(target_thread->proc);
					return_error = BR_FAILED_REPLY;
					return_error_param = -EPROTO;
					return_error_line = __LINE__;
					in_reply_to = NULL;
					target_thread = NULL;
					goto err_dead_binder;
				}
				// 找到要回复的进程
				target_proc = target_thread->proc;
				target_proc->tmp_ref++;
				binder_inner_proc_unlock(target_thread->proc);
        } else {// 1. 找到要发送的目的进程
                ......
        }
        if (target_thread)
                e->to_thread = target_thread->pid;
        e->to_proc = target_proc->pid;

        /* TODO: reuse incoming transaction for reply */
        // 为binder_transcation分配内存
        t = kzalloc(sizeof(*t), GFP_KERNEL);
        
        .....

        if (!reply && !(tr->flags & TF_ONE_WAY))
                t->from = thread;
        else
                t->from = NULL;
        // 存储发送双方的基本信息
        t->from_pid = proc->pid;
        t->from_tid = thread->pid;
        t->sender_euid = task_euid(proc->tsk);
        t->to_proc = target_proc;
        t->to_thread = target_thread;
        t->code = tr->code;
        t->flags = tr->flags;
        t->priority = task_nice(current);

        ......

        t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
                tr->offsets_size, extra_buffers_size,
                !reply && (t->flags & TF_ONE_WAY));
        ......
        
        t->buffer->debug_id = t->debug_id;
        t->buffer->transaction = t;
        t->buffer->target_node = target_node;
        t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
        trace_binder_transaction_alloc_buf(t->buffer);
        
        // 把客户端的数据拷贝到目的进程test_client mmap的内存空间
        if (binder_alloc_copy_user_to_buffer(
                                &target_proc->alloc,
                                t->buffer,
                                ALIGN(tr->data_size, sizeof(void *)),
                                (const void __user *)
                                        (uintptr_t)tr->data.ptr.offsets,
                                tr->offsets_size)) {
                binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
                                proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                return_error_param = -EFAULT;
                return_error_line = __LINE__;
                goto err_copy_data_failed;
        }
        ......
		
		//处理server传入的binder_io.offs数据,这个数据指向用于构建binder_node实体的		flat_binder_object
		for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
		     buffer_offset += sizeof(binder_size_t)) {
			struct binder_object_header *hdr;
			size_t object_size;
			struct binder_object object;
			binder_size_t object_offset;
			binder_size_t copy_size;
	
			if (binder_alloc_copy_from_buffer(&target_proc->alloc,
							  &object_offset,
							  t->buffer,
							  buffer_offset,
							  sizeof(object_offset))) {
				binder_txn_error("%d:%d copy offset from buffer failed\n",
					thread->pid, proc->pid);
				return_error = BR_FAILED_REPLY;
				return_error_param = -EINVAL;
				return_error_line = __LINE__;
				goto err_bad_offset;
			}
	
			/*
			 * Copy the source user buffer up to the next object
			 * that will be processed.
			 */
			copy_size = object_offset - user_offset;
			if (copy_size && (user_offset > object_offset ||
					binder_alloc_copy_user_to_buffer(
						&target_proc->alloc,
						t->buffer, user_offset,
						user_buffer + user_offset,
						copy_size))) {
				binder_user_error("%d:%d got transaction with invalid data ptr\n",
						proc->pid, thread->pid);
				return_error = BR_FAILED_REPLY;
				return_error_param = -EFAULT;
				return_error_line = __LINE__;
				goto err_copy_data_failed;
			}
			// 将指向flat_binder_object的指针拷贝给object
			object_size = binder_get_object(target_proc, user_buffer,
					t->buffer, object_offset, &object);
			if (object_size == 0 || object_offset < off_min) {
				binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
						  proc->pid, thread->pid,
						  (u64)object_offset,
						  (u64)off_min,
						  (u64)t->buffer->data_size);
				return_error = BR_FAILED_REPLY;
				return_error_param = -EINVAL;
				return_error_line = __LINE__;
				goto err_bad_offset;
			}
			/*
			 * Set offset to the next buffer fragment to be
			 * copied
			 */
			user_offset = object_offset + object_size;
	
			hdr = &object.hdr;
			off_min = object_offset + object_size;
			// 此处 binder类型的是BINDER_TYPE_HANDLE,通过handle在service_manager的binder-ref中找到hello服务的binder_node
			switch (hdr->type) {
			//处理binder实体
			case BINDER_TYPE_BINDER:
			case BINDER_TYPE_WEAK_BINDER: {
				......
				
			} break;
			//处理binder引用
			case BINDER_TYPE_HANDLE:
			case BINDER_TYPE_WEAK_HANDLE: {
				struct flat_binder_object *fp;
	
				fp = to_flat_binder_object(hdr);
				ret = binder_translate_handle(fp, t, thread);
				if (ret < 0 ||
				    binder_alloc_copy_to_buffer(&target_proc->alloc,
								t->buffer,
								object_offset,
								fp, sizeof(*fp))) {
					binder_txn_error("%d:%d translate handle failed\n",
						thread->pid, proc->pid);
					return_error = BR_FAILED_REPLY;
					return_error_param = ret;
					return_error_line = __LINE__;
					goto err_translate_failed;
				}
			} break;
			......
	 }
	......
	t->work.type = BINDER_WORK_TRANSACTION;

	if (reply) {
		binder_enqueue_thread_work(thread, tcomplete);
		binder_inner_proc_lock(target_proc);
		if (target_thread->is_dead) {
			return_error = BR_DEAD_REPLY;
			binder_inner_proc_unlock(target_proc);
			goto err_dead_proc_or_thread;
		}
		BUG_ON(t->buffer->async_transaction != 0);
		binder_pop_transaction_ilocked(target_thread, in_reply_to);//再次出栈
		// 将数据放到客户端target_thread的todo链表
		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
		target_proc->outstanding_txns++;
		binder_inner_proc_unlock(target_proc);
		// 唤醒客户端
		wake_up_interruptible_sync(&target_thread->wait);
		binder_free_transaction(in_reply_to);
	} else if (!(t->flags & TF_ONE_WAY)) {
		......
	} else {
		......
	}
	if (target_thread)
		binder_thread_dec_tmpref(target_thread);
	binder_proc_dec_tmpref(target_proc);
	if (target_node)
		binder_dec_node_tmpref(target_node);
	/*
	 * write barrier to synchronize with initialization
	 * of log entry
	 */
	smp_wmb();
	WRITE_ONCE(e->debug_id_done, t_debug_id);
	return;
	......
}

static void
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
				   struct binder_work *work)
{
	WARN_ON(!list_empty(&thread->waiting_thread_node));
	binder_enqueue_work_ilocked(work, &thread->todo);

	/* (e)poll-based threads require an explicit wakeup signal when
	 * queuing their own work; they rely on these events to consume
	 * messages without I/O block. Without it, threads risk waiting
	 * indefinitely without handling the work.
	 */
	if (thread->looper & BINDER_LOOPER_STATE_POLL &&
	    thread->pid == current->pid && !thread->process_todo)
		wake_up_interruptible_sync(&thread->wait);

	thread->process_todo = true;
}

static void
binder_enqueue_work_ilocked(struct binder_work *work,
			   struct list_head *target_list)
{
	BUG_ON(target_list == NULL);
	BUG_ON(work->entry.next && !list_empty(&work->entry));
	list_add_tail(&work->entry, target_list);
}

2.4 客户端被唤醒,获取客户端binder_ref对应的handle

handle = svcmgr_lookup(bs, svcmgr, "hello");

uint32_t svcmgr_lookup(struct binder_state *bs, uint32_t target, const char *name)
{
    uint32_t handle;
    unsigned iodata[512/4];
    struct binder_io msg, reply;

    bio_init(&msg, iodata, sizeof(iodata), 4);
    bio_put_uint32(&msg, 0);  // strict mode header
    bio_put_string16_x(&msg, SVC_MGR_NAME);
    bio_put_string16_x(&msg, name);
    
    // ioctl到内核处理
    if (binder_call(bs, &msg, &reply, target, SVC_MGR_CHECK_SERVICE))
        return 0;
    
    // 获取引用服务binder_node的客户端binder_ref的handle
    handle = bio_get_ref(&reply);

    if (handle)
        binder_acquire(bs, handle);

    binder_done(bs, &msg, &reply);

    return handle;
}

uint32_t bio_get_ref(struct binder_io *bio)
{
    struct flat_binder_object *obj;

    obj = _bio_get_obj(bio);
    if (!obj)
        return 0;

    if (obj->type == BINDER_TYPE_HANDLE)
        return obj->handle;

    return 0;
}

3 服务注册和获取过程的简要总结图

在这里插入图片描述

二、服务的使用过程内核源码解析

上面我们通过源码分析,获得了客户端想要使用的服务的handle,下面我们接着分析,如何使用该服务。

1. 服务使用过程思路

有了之前Binder源码阅读的经验,我们直接看服务使用的思路,应该很容易能够理解,我们基于这个思路进行分析源码,也更容易理解源码。
在这里插入图片描述

2. 客户端使用服务内核源码解析

2.1 向服务端发送数据

int main(int argc, char **argv)
{
    int fd;
    struct binder_state *bs;
    uint32_t svcmgr = BINDER_SERVICE_MANAGER;
    uint32_t handle;
	int ret;

	if (argc < 2){
        fprintf(stderr, "Usage:\n");
        fprintf(stderr, "%s <hello|goodbye>\n", argv[0]);
        fprintf(stderr, "%s <hello|goodbye> <name>\n", argv[0]);
        return -1;
	}
    
    //打开驱动
    bs = binder_open(128*1024);
    if (!bs) {
        fprintf(stderr, "failed to open binder driver\n");
        return -1;
    }
	g_bs = bs;
    
    //向service_manager发送数据,获得hello服务句柄
	handle = svcmgr_lookup(bs, svcmgr, "hello");
	if (!handle) {
        fprintf(stderr, "failed to get hello service\n");
        return -1;
	}
	g_hello_handle = handle;
	fprintf(stderr, "Handle for hello service = %d\n", g_hello_handle);

	/* 向服务端发送数据 */
	if (!strcmp(argv[1], "hello"))
	{
		if (argc == 2) {
			sayhello();
		} else if (argc == 3) {
			ret = sayhello_to(argv[2]);
	        fprintf(stderr, "get ret of sayhello_to = %d\n", ret);		
		}
	}

	binder_release(bs, handle);

    return 0;
}
1. sayhello_to

以调用服务端的sayhello_to函数为例,分析客户端使用服务的过程

int sayhello_to(char *name)
{
	unsigned iodata[512/4];
	struct binder_io msg, reply;
	int ret;
	int exception;

	/* 构造binder_io */
	bio_init(&msg, iodata, sizeof(iodata), 4);
	bio_put_uint32(&msg, 0);  // strict mode header
    bio_put_string16_x(&msg, "IHelloService");

	/* 放入参数 */
    bio_put_string16_x(&msg, name);

	/* 调用binder_call
	msg:客户端的数据
	reply:携带服务端返回的数据
	g_hello_handle:服务端进程的handle
	HELLO_SVR_CMD_SAYHELLO_TO:要调用的服务端提供的服务
	 */
	if (binder_call(g_bs, &msg, &reply, g_hello_handle, HELLO_SVR_CMD_SAYHELLO_TO))
		return 0;
	
	/* 从reply中解析出返回值 */
	exception = bio_get_uint32(&reply);
	if (exception)
		ret = -1;
	else
		ret = bio_get_uint32(&reply);

	binder_done(g_bs, &msg, &reply);

	return ret;
	
}
2. binder_call

binder_call函数,已经分析很多遍了,这里不再详细分析,贴下代码

int binder_call(struct binder_state *bs,
                struct binder_io *msg, struct binder_io *reply,
                uint32_t target, uint32_t code)
{
    int res;
    struct binder_write_read bwr;
    struct {
        uint32_t cmd;
        struct binder_transaction_data txn;
    } __attribute__((packed)) writebuf;
    unsigned readbuf[32];

    if (msg->flags & BIO_F_OVERFLOW) {
        fprintf(stderr,"binder: txn buffer overflow\n");
        goto fail;
    }

    writebuf.cmd = BC_TRANSACTION;//ioclt类型
    writebuf.txn.target.handle = target;//数据发送给哪个进程
    writebuf.txn.code = code;//调用进程的哪个函数
    writebuf.txn.flags = 0;
    writebuf.txn.data_size = msg->data - msg->data0;//数据本身大小
    writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0);//数据头大小,指向binder_node实体(发送端提供服务函数的地址),bio_put_obj(&msg, ptr);
    writebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0;//指向数据本身内存起点
    writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0;//指向数据头内存起点

    bwr.write_size = sizeof(writebuf);
    bwr.write_consumed = 0;
    bwr.write_buffer = (uintptr_t) &writebuf;

    hexdump(msg->data0, msg->data - msg->data0);
    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//调用ioctl发送数据给驱动程序

        if (res < 0) {
            fprintf(stderr,"binder: ioctl failed (%s)\n", strerror(errno));
            goto fail;
        }

        res = binder_parse(bs, reply, (uintptr_t) readbuf, bwr.read_consumed, 0);
        if (res == 0) return 0;
        if (res < 0) goto fail;
    }

fail:
    memset(reply, 0, sizeof(*reply));
    reply->flags |= BIO_F_IOERROR;
    return -1;
}
3. binder_ioctl

用户态的ioctl调用到内核Binder驱动程序binder_ioctl函数,这个函数也分析很多遍了,相信看到这儿的博友,对这些函数已经很熟悉了。

// res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);进入binder驱动程序
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	int ret;
	struct binder_proc *proc = filp->private_data;
	struct binder_thread *thread;
	void __user *ubuf = (void __user *)arg;

	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
			proc->pid, current->pid, cmd, arg);*/

	binder_selftest_alloc(&proc->alloc);

	trace_binder_ioctl(cmd, arg);

	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
	if (ret)
		goto err_unlocked;

	//为进程proc创建binder_thread
	thread = binder_get_thread(proc);
	if (thread == NULL) {
		ret = -ENOMEM;
		goto err;
	}

	switch (cmd) {
	case BINDER_WRITE_READ:
		ret = binder_ioctl_write_read(filp, arg, thread);
		if (ret)
			goto err;
		break;
		......
	}
	......
}
4. binder_ioctl_write_read
static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
				struct binder_thread *thread)
{
	int ret = 0;
	struct binder_proc *proc = filp->private_data;
	void __user *ubuf = (void __user *)arg;
	struct binder_write_read bwr;

    //从用户空间拷贝数据到内核空间(这部分内核空间被mmap映射到了目标进程)
	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
	binder_debug(BINDER_DEBUG_READ_WRITE,
		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
		     proc->pid, thread->pid,
		     (u64)bwr.write_size, (u64)bwr.write_buffer,
		     (u64)bwr.read_size, (u64)bwr.read_buffer);

	if (bwr.write_size > 0) {
		ret = binder_thread_write(proc, thread,
					  bwr.write_buffer,
					  bwr.write_size,
					  &bwr.write_consumed);
		trace_binder_write_done(ret);
		if (ret < 0) {
			bwr.read_consumed = 0;
			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
				ret = -EFAULT;
			goto out;
		}
	}
	if (bwr.read_size > 0) {
		......
	}
	binder_debug(BINDER_DEBUG_READ_WRITE,
		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
		     proc->pid, thread->pid,
		     (u64)bwr.write_consumed, (u64)bwr.write_size,
		     (u64)bwr.read_consumed, (u64)bwr.read_size);
	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
out:
	return ret;
}
5. binder_thread_write
static int binder_thread_write(struct binder_proc *proc,
                        struct binder_thread *thread,
                        binder_uintptr_t binder_buffer, size_t size,
                        binder_size_t *consumed)
{
        uint32_t cmd;
        struct binder_context *context = proc->context;
        // 获取数据buffer,根据上面总结的发送数据可知,这个buffer由cmd和binder_transcation_data两部分数据组成
        void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
        // 发送来的数据consumed=0,因此ptr指向用户空间数据buffer的起点
        void __user *ptr = buffer + *consumed;
        // 指向数据buffer的末尾
        void __user *end = buffer + size;
        // 逐个读取客户端发送来的数据(cmd+binder_transcation_data)
        while (ptr < end && thread->return_error.cmd == BR_OK) {
                int ret;
                
                // 获取用户空间中buffer的cmd值
                if (get_user(cmd, (uint32_t __user *)ptr))
                        return -EFAULT;
                // 移动指针到cmd的位置之后,指向binder_transcation_data数据的内存起点
                ptr += sizeof(uint32_t);
                trace_binder_command(cmd);
                if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
                        atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
                        atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
                        atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
                }
                // 根据上面总结的发送数据可知,cmd是BC_TRANSACTION
                switch (cmd) {
                   ......
                   /*
                BC_TRANSACTION:进程发送信息的cmd
                BR_TRANSACTION:进程接收BC_TRANSACTION发送信息的cmd
                
                BC_REPLY:进程回复信息的cmd
                BR_REPLY:进程接收BC_REPLY回复信息的cmd
                */
                    case BC_TRANSACTION:
                    case BC_REPLY: {
                            struct binder_transaction_data tr;
                            
                            // 从用户空间拷贝binder_transaction_data到内核空间
                            if (copy_from_user(&tr, ptr, sizeof(tr)))
                                    return -EFAULT;
                            // 移动指针到binder_transaction_data的位置之后,指向下一个cmd数据的内存起点
                            ptr += sizeof(tr);
                            // 处理binder_transaction_data数据
                            binder_transaction(proc, thread, &tr,
                                               cmd == BC_REPLY, 0);
                            break;
                    }
                }
        }
        ......
}

int get_user(int *val, const int __user *ptr) {
    if (copy_from_user(val, ptr, sizeof(int))) {
        return -EFAULT; // 返回错误码
    }
    return 0; // 成功
}
6. binder_transaction

这个函数也分析了很多很多遍了,这里简要分析下

static void binder_transaction(struct binder_proc *proc,
			       struct binder_thread *thread,
			       struct binder_transaction_data *tr, int reply,
			       binder_size_t extra_buffers_size)
{
	int ret;
	struct binder_transaction *t;
	struct binder_work *w;
	struct binder_work *tcomplete;
	binder_size_t buffer_offset = 0;
	binder_size_t off_start_offset, off_end_offset;
	binder_size_t off_min;
	binder_size_t sg_buf_offset, sg_buf_end_offset;
	binder_size_t user_offset = 0;
	struct binder_proc *target_proc = NULL;
	struct binder_thread *target_thread = NULL;
	struct binder_node *target_node = NULL;
	struct binder_transaction *in_reply_to = NULL;
	struct binder_transaction_log_entry *e;
	uint32_t return_error = 0;
	uint32_t return_error_param = 0;
	uint32_t return_error_line = 0;
	binder_size_t last_fixup_obj_off = 0;
	binder_size_t last_fixup_min_off = 0;
	struct binder_context *context = proc->context;
	int t_debug_id = atomic_inc_return(&binder_last_id);
	ktime_t t_start_time = ktime_get();
	char *secctx = NULL;
	u32 secctx_sz = 0;
	struct list_head sgc_head;
	struct list_head pf_head;
	// 客户端发送来的数据buffer
	const void __user *user_buffer = (const void __user *)
				(uintptr_t)tr->data.ptr.buffer;
	INIT_LIST_HEAD(&sgc_head);
	INIT_LIST_HEAD(&pf_head);

	e = binder_transaction_log_add(&binder_transaction_log);
	e->debug_id = t_debug_id;
	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
	e->from_proc = proc->pid;
	e->from_thread = thread->pid;
	e->target_handle = tr->target.handle;
	e->data_size = tr->data_size;
	e->offsets_size = tr->offsets_size;
	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);

	binder_inner_proc_lock(proc);
	binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
	binder_inner_proc_unlock(proc);

	if (reply) {//找到要回复的进程
		......
	} else {
	    //1. 找到要发送的目的进程
		if (tr->target.handle) {// tr->target.handle == 0 代表是service_manager进程,否则是其它进程
			struct binder_ref *ref;

			/*
			 * There must already be a strong ref
			 * on this node. If so, do a strong
			 * increment on the node to ensure it
			 * stays alive until the transaction is
			 * done.
			 */
			binder_proc_lock(proc);
			//根据客户端发送来的handle找到获取binder_ref
			ref = binder_get_ref_olocked(proc, tr->target.handle,
						     true);
			if (ref) {
			    // 根据binder_ref拿到目的进程的binder_node和binder_proc
				target_node = binder_get_node_refs_for_txn(
						ref->node, &target_proc,
						&return_error);
			} else {
				binder_user_error("%d:%d got transaction to invalid handle, %u\n",
						  proc->pid, thread->pid, tr->target.handle);
				return_error = BR_FAILED_REPLY;
			}
			binder_proc_unlock(proc);
		} else {
		    //处理service_manager进程
			......
		}
		
		......
		
		binder_inner_proc_unlock(proc);
	}
	
	......
	
	t->work.type = BINDER_WORK_TRANSACTION;
    if (reply) {
		......
	} else if (!(t->flags & TF_ONE_WAY)) {
		BUG_ON(t->buffer->async_transaction != 0);
		binder_inner_proc_lock(proc);
		/*
		 * Defer the TRANSACTION_COMPLETE, so we don't return to
		 * userspace immediately; this allows the target process to
		 * immediately start processing this transaction, reducing
		 * latency. We will then return the TRANSACTION_COMPLETE when
		 * the target replies (or there is an error).
		 */
		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
		t->need_reply = 1;
		t->from_parent = thread->transaction_stack;
		//入栈
		thread->transaction_stack = t;
		binder_inner_proc_unlock(proc);
		//将数据放入目的进程的binder_proc或binder_thread,并唤醒目的进程
		return_error = binder_proc_transaction(t,
				target_proc, target_thread);
		if (return_error) {
			binder_inner_proc_lock(proc);
			binder_pop_transaction_ilocked(thread, t);
			binder_inner_proc_unlock(proc);
			goto err_dead_proc_or_thread;
		}
	} else {
		......
	}

2.2 服务端被被唤醒,处理客户端发送的数据

服务端的binder_loop函数中有一个死循环,一直在等待数据,现在数据来了,可以开始读取和处理数据了

int main(int argc, char **argv)
{
    int fd;
    struct binder_state *bs;
    uint32_t svcmgr = BINDER_SERVICE_MANAGER;
    uint32_t handle;
	int ret;

    bs = binder_open(128*1024);
    if (!bs) {
        fprintf(stderr, "failed to open binder driver\n");
        return -1;
    }

	/* add service */
	ret = svcmgr_publish(bs, svcmgr, "hello", hello_service_handler);
    if (ret) {
        fprintf(stderr, "failed to publish hello service\n");
        return -1;
    }
	ret = svcmgr_publish(bs, svcmgr, "goodbye", goodbye_service_handler);
    if (ret) {
        fprintf(stderr, "failed to publish goodbye service\n");
    }

#if 0
	while (1)
	{
		/* read data */
		/* parse data, and process */
		/* reply */
	}
#endif

	binder_set_maxthreads(bs, 10);
    
    // 死循环等待读取客户端发送来的数据
    binder_loop(bs, test_server_handler);

    return 0;
}

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(uint32_t));
     
    // 死循环等待读取客户端的数据
    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;
        // 读取客户端发送来的数据(这个内核源码过程和上面service_manager被唤醒后的过程一样,不再赘述)
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }
        // 解析客户端发送来的数据
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) {
        uint32_t cmd = *(uint32_t *) ptr;
        ptr += sizeof(uint32_t);
#if TRACE
        fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
        switch(cmd) {
        ......
        case BR_TRANSACTION: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);
                // 这里的msg就是客户端发送来的数据,func是服务端的函数test_server_handler
                res = func(bs, txn, &msg, &reply);
                binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
            }
            ptr += sizeof(*txn);
            break;
        }
        ......
        }
    }

    return r;
}

int test_server_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
	int (*handler)(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply);

	handler = (int (*)(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply))txn->target.ptr; // 服务端的函数指针,在向service_manager注册服务的时候写入的,此处是hello_service_handler
	
	return handler(bs, txn, msg, reply);
}

int hello_service_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
	/* 根据txn->code知道要调用哪一个函数
	 * 如果需要参数, 可以从msg取出
	 * 如果要返回结果, 可以把结果放入reply
	 */

	/* sayhello
	 * sayhello_to
	 */
	
    uint16_t *s;
	char name[512];
    size_t len;
    uint32_t handle;
    uint32_t strict_policy;
	int i;


    // Equivalent to Parcel::enforceInterface(), reading the RPC
    // header with the strict mode policy mask and the interface name.
    // Note that we ignore the strict_policy and don't propagate it
    // further (since we do no outbound RPCs anyway).
    strict_policy = bio_get_uint32(msg);


    switch(txn->code) {
    case HELLO_SVR_CMD_SAYHELLO:
		sayhello();
		bio_put_uint32(reply, 0); /* no exception */
        return 0;

    case HELLO_SVR_CMD_SAYHELLO_TO:
		/* 从msg里取出字符串 */
		s = bio_get_string16(msg, &len);  //"IHelloService"
		s = bio_get_string16(msg, &len);  // name
		if (s == NULL) {
			return -1;
		}
		for (i = 0; i < len; i++)
			name[i] = s[i];
		name[i] = '\0';

		/* 调用服务函数处理客户端的数据 */
		i = sayhello_to(name);

		/* 把结果放入reply */
		bio_put_uint32(reply, 0); /* no exception */
		bio_put_uint32(reply, i);
		
        break;

    default:
        fprintf(stderr, "unknown code %d\n", txn->code);
        return -1;
    }

    return 0;
}

2.3 客户端收到服务端处理的数据

从reply中获取服务端处理后的数据

int sayhello_to(char *name)
{
	unsigned iodata[512/4];
	struct binder_io msg, reply;
	int ret;
	int exception;

	/* 构造binder_io */
	bio_init(&msg, iodata, sizeof(iodata), 4);
	bio_put_uint32(&msg, 0);  // strict mode header
    bio_put_string16_x(&msg, "IHelloService");

	/* 放入参数 */
    bio_put_string16_x(&msg, name);

	/* 调用binder_call */
	if (binder_call(g_bs, &msg, &reply, g_hello_handle, HELLO_SVR_CMD_SAYHELLO_TO))
		return 0;
	
	/* 从reply中解析出返回值 */
	exception = bio_get_uint32(&reply);
	if (exception)
		ret = -1;
	else
		ret = bio_get_uint32(&reply);

	binder_done(g_bs, &msg, &reply);

	return ret;
	
}

三、后记

自此,我们通过三篇文章完成了Binder跨进程通信的源码分析,我们深入内核去分析Binder跨进程通信实现的源码,相信通过这三篇文章,我们已经非常深入地理解了Binder跨进程通信的实现方案。其实内核的源码我们只是分析了大概,更加细节的源码没有深入分析,但我相信有了现在的基础,我们再独立去更深入的分析内核源码,应该会轻松不少,至少不会像无头苍蝇一下,无从下手。

说实话,Android Binder的内核源码分析,我自认为写的不是很好,还有很多改善空间,还有很多没有讲清楚的地方,后面我也会不断加强自己的技术能力,希望未来有一天,我会再重新写一篇更加通俗易懂的Binder驱动内核源码分析。

;