1.Linux进程通讯机制有哪些

共享内存、Socket、消息队列、管道。

2.Binder与传统IPC对比

Binder 共享内存 Socket
性能 需要拷贝一次 无需拷贝 需要拷贝两次
特点 基于C/S架构,易用性高 控制复杂,易用性低 基于C/S架构,作为一款通讯接口,其传输效率低,开销大
安全性 为每个APP分配UID,同时支持实名和匿名 依赖上层协议,访问接入点是开放的不安全 依赖上层协议,访问接入点是开放的不安全

2.Binder整体架构

Binder子系统架构.jpg

Binder所涉及的类.jpg

Binder驱动.jpg

3.Binder驱动方法解析

3.1 binder_init()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
// kernal/drivers/staging/android/binder.c
// 驱动设备初始化会调用到binder_init()
device_initcall(binder_init);

static int __init binder_init(void)
{
int ret;
char *device_name, *device_names;
struct binder_device *device;
struct hlist_node *tmp;

//创建名为binder的单线程工作队列
binder_deferred_workqueue = create_singlethread_workqueue("binder");
if (!binder_deferred_workqueue)
return -ENOMEM;

binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
binder_debugfs_dir_entry_root);

if (binder_debugfs_dir_entry_root) {
debugfs_create_file("state",
S_IRUGO,
binder_debugfs_dir_entry_root,
NULL,
&binder_state_fops);
debugfs_create_file("stats",
S_IRUGO,
binder_debugfs_dir_entry_root,
NULL,
&binder_stats_fops);
debugfs_create_file("transactions",
S_IRUGO,
binder_debugfs_dir_entry_root,
NULL,
&binder_transactions_fops);
debugfs_create_file("transaction_log",
S_IRUGO,
binder_debugfs_dir_entry_root,
&binder_transaction_log,
&binder_transaction_log_fops);
debugfs_create_file("failed_transaction_log",
S_IRUGO,
binder_debugfs_dir_entry_root,
&binder_transaction_log_failed,
&binder_transaction_log_fops);
}

/*
* Copy the module_parameter string, because we don't want to
* tokenize it in-place.
*/
device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
if (!device_names) {
ret = -ENOMEM;
goto err_alloc_device_names_failed;
}
//从配置文件KConfig中读取binder的名字
strcpy(device_names, binder_devices_param);

while ((device_name = strsep(&device_names, ","))) {
ret = init_binder_device(device_name);
if (ret)
goto err_init_binder_device_failed;
}

......

return ret;
}

首先创建一个工作队列,如果开启了debugfs功能,则挂载proc/、stats、state、transactions、transaction_log
、failed_transaction_log节点。

然后根据配置文件(kernal/drivers/staging/android/Kconfig)中的设备名(ANDROID_BINDER_DEVICES),分配内存,并且将设备名传到init_binder_device()方法中。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
static int __init init_binder_device(const char *name)
{
int ret;
struct binder_device *binder_device;

// 分配binder_device内存
binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
if (!binder_device)
return -ENOMEM;

// 初始化binder_device结构体,将binder_fops操作方法注册到内核中
binder_device->miscdev.fops = &binder_fops;
binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
binder_device->miscdev.name = name;

binder_device->context.binder_context_mgr_uid = INVALID_UID;
binder_device->context.name = name;

// 注册misc设备
ret = misc_register(&binder_device->miscdev);
if (ret < 0) {
kfree(binder_device);
return ret;
}

// 将binder_device->hlist添加到binder_devices中
hlist_add_head(&binder_device->hlist, &binder_devices);

return ret;
}

所注册的操作方法如下:

1
2
3
4
5
6
7
8
9
10
static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
.compat_ioctl = binder_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
};
3.2 binder_open()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
struct binder_device *binder_dev;

binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
current->group_leader->pid, current->pid);

proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
get_task_struct(current);
proc->tsk = current; //将当前线程的task保存到binder进程的tsk
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current); //将当前进程的nice值转换为进程优先级
binder_dev = container_of(filp->private_data, struct binder_device,
miscdev);
proc->context = &binder_dev->context;

binder_lock(__func__); //开启同步锁

binder_stats_created(BINDER_STAT_PROC); //binder_proc对象创建数加1
hlist_add_head(&proc->proc_node, &binder_procs); //将proc_node节点添加到binder_procs的队列头部
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death); // 初始化已分发的死亡通知列表
filp->private_data = proc; //将这个binder_proc与filp关联起来,这样下次通过filp就能找到这个proc

binder_unlock(__func__); //释放同步锁

if (binder_debugfs_dir_entry_proc) {
char strbuf[11];

snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
/*
* proc debug entries are shared between contexts, so
* this will fail if the process tries to open the driver
* again with a different context. The priting code will
* anyway print all contexts that a given PID has, so this
* is not a problem.
*/
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
binder_debugfs_dir_entry_proc,
(void *)(unsigned long)proc->pid,
&binder_proc_fops);
}

return 0;
}
3.3 binder_mmap()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
static const struct vm_operations_struct binder_vm_ops = {
.open = binder_vma_open,
.close = binder_vma_close,
.fault = binder_vm_fault,
};

// vma进程的虚拟内存
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
struct binder_buffer *buffer;

if (proc->tsk != current)
return -EINVAL;

// 用户最大设置4M
if ((vma->vm_end - vma->vm_start) > SZ_4M)
vma->vm_end = vma->vm_start + SZ_4M;

binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));

if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
ret = -EPERM;
failure_string = "bad vm_flags";
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;

mutex_lock(&binder_mmap_lock);
//如果已经做过映射了直接返回错误
if (proc->buffer) {
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
}

//采用 VM_IOREMAP方式分配一块与进程虚拟内存一样大的连续内核空间
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
if (area == NULL) {
ret = -ENOMEM;
failure_string = "get_vm_area";
goto err_get_vm_area_failed;
}
//binder进程的buffer指针指向上面分配的内核空间
proc->buffer = area->addr;
//计算出内核空间与用户空间的偏移地址。偏移地址=用户空间地址 - 内核空间地址
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
mutex_unlock(&binder_mmap_lock);

#ifdef CONFIG_CPU_CACHE_VIPT
if (cache_is_vipt_aliasing()) {
while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
vma->vm_start += PAGE_SIZE;
}
}
#endif
//分配物理页的指针数组,数组大小为vma的等效page个
proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
if (proc->pages == NULL) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
//计算用户空间需要多大
proc->buffer_size = vma->vm_end - vma->vm_start;

vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;

//分配物理页面,同时映射到内核空间和进程空间,先分配1个物理页,PAGE_SIZE==4kb。后面不够再分配
if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
}
buffer = proc->buffer;
INIT_LIST_HEAD(&proc->buffers);
//将buffer连入buffers链表中
list_add(&buffer->entry, &proc->buffers);
buffer->free = 1; //设置此内存可用
binder_insert_free_buffer(proc, buffer); //将buffer插入 proc->free_buffers 链表中
proc->free_async_space = proc->buffer_size / 2; // 异步的可用空闲空间大小
barrier();
proc->files = get_files_struct(current);
proc->vma = vma;
proc->vma_vm_mm = vma->vm_mm;

/*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
return 0;

......
return ret;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
static void binder_insert_free_buffer(struct binder_proc *proc,
struct binder_buffer *new_buffer)
{
struct rb_node **p = &proc->free_buffers.rb_node;
struct rb_node *parent = NULL;
struct binder_buffer *buffer;
size_t buffer_size;
size_t new_buffer_size;

BUG_ON(!new_buffer->free);

new_buffer_size = binder_buffer_size(proc, new_buffer);

binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: add free buffer, size %zd, at %p\n",
proc->pid, new_buffer_size, new_buffer);

while (*p) {
parent = *p;
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);

//binder_buffer_size计算空闲buffer的大小
buffer_size = binder_buffer_size(proc, buffer);

if (new_buffer_size < buffer_size)
p = &parent->rb_left;
else
p = &parent->rb_right;
}
rb_link_node(&new_buffer->rb_node, parent, p);
//将 buffer插入 proc->free_buffers 链表中
rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
}
3.4 binder_ioctl()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;

/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/

trace_binder_ioctl(cmd, arg);

//进入休眠状态,直到中断唤醒
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;

binder_lock(__func__);
//根据当前进程的pid,从binder_proc中查找binder_thread
//如果当前线程已经加入到proc的线程队列则直接返回
//如果不存在则创建binder_thread,并将当前线程添加到当前的proc
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}

// 根据不同cmd执行不同操作
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
case BINDER_SET_MAX_THREADS:
if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
ret = -EINVAL;
goto err;
}
break;
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
if (ret)
goto err;
break;
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
binder_free_thread(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: {
struct binder_version __user *ver = ubuf;

if (size != sizeof(struct binder_version)) {
ret = -EINVAL;
goto err;
}
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
&ver->protocol_version)) {
ret = -EINVAL;
goto err;
}
break;
}
default:
ret = -EINVAL;
goto err;
}
ret = 0;
......
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;

if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto out;
}
//把用户空间数据ubuf拷贝到bwr,copy的是数据头
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
proc->pid, thread->pid,
(u64)bwr.write_size, (u64)bwr.write_buffer,
(u64)bwr.read_size, (u64)bwr.read_buffer);

// 当写缓存中有数据,则执行binder写操作
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
// 当有数据可读,则执行读操作
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
//进程todo队列不为空,则唤醒该队列中的线程
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
// 把内核空间数据bwr拷贝到ubuf
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
}

4.数据结构说明

4.1 file_operations
1
2
3
4
5
6
7
8
9
static const struct file_operations binder_fops = { .owner = THIS_MODULE, 
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
.compat_ioctl = binder_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
};
4.2 binder_proc

每个进程调用open()打开binder驱动都会创建该结构体,用于管理IPC所需的各种信息。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
struct binder_proc { 
struct hlist_node proc_node; // 进程节点
struct rb_root threads; // binder_thread红黑树的根节点
struct rb_root nodes; // binder_node红黑树的根节点
struct rb_root refs_by_desc; // binder_ref红黑树的根节点(以handle为key) struct rb_root refs_by_node; // binder_ref红黑树的根节点(以ptr为key)
int pid; // 相应进程id
struct vm_area_struct *vma; // 指向进程虚拟地址空间的指针
struct mm_struct *vma_vm_mm; // 相应进程的内存结构体
struct task_struct *tsk; // 相应进程的task结构体
struct files_struct *files; // 相应进程的文件结构体
struct hlist_node deferred_work_node;
int deferred_work;
void *buffer; // 内核空间的起始地址
ptrdiff_t user_buffer_offset; // 内核空间与用户空间的地址偏移量
struct list_head buffers; // 所有的buffer
struct rb_root free_buffers; // 空闲的buffer
struct rb_root allocated_buffers; // 已分配的buffer
size_t free_async_space; // 异步的可用空闲空间大小
struct page **pages; // 指向物理内存页指针的指针
size_t buffer_size; // 映射的内核空间大小
uint32_t buffer_free; // 可用内存总大小
struct list_head todo; // 进程将要做的事
wait_queue_head_t wait; // 等待队列
struct binder_stats stats; // binder统计信息
struct list_head delivered_death; // 已分发的死亡通知
int max_threads; // 最大线程数
int requested_threads; // 请求的线程数
int requested_threads_started; // 已启动的请求线程数
int ready_threads; // 准备就绪的线程个数
long default_priority; // 默认优先级
struct dentry *debugfs_entry;
struct binder_context *context;
};
4.3 binder_node
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
struct binder_node { 
int debug_id; // 节点创建时分配,具有全局唯一性,用于调试使用
struct binder_work work;
union {
struct rb_node rb_node; // binder节点正常使用,union
struct hlist_node dead_node; // binder节点已销毁,union
};
struct binder_proc *proc; // binder所在的进程,见后面小节
struct hlist_head refs; // 所有指向该节点的 binder引用队列
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
binder_uintptr_t ptr; // 指向用户空间 binder_node的指针,对应于
flat_binder_object.binder
binder_uintptr_t cookie; // 指向用户空间 binder_node的指针,附件数据,对应于 flat_binder_object.cookie
unsigned has_strong_ref:1; // 占位 1bit
unsigned pending_strong_ref:1; // 占位 1bit
unsigned has_weak_ref:1; // 占位 1bit
unsigned pending_weak_ref:1; // 占位 1bit
unsigned has_async_transaction:1; // 占位 1bit
unsigned accept_fds:1; // 占位 1bit
unsigned min_priority:8; // 占位 8bit,最小优先级
struct list_head async_todo; // 异步todo队列
};
4.4 binder_buffer
1
2
3
4
5
6
7
8
9
10
11
12
13
struct binder_buffer { 
struct list_head entry; // buffer实体的地址
struct rb_node rb_node; // buffer实体的地址
/* by address */
unsigned free:1; // 标记是否是空闲buffer,占位1bit
unsigned allow_user_free:1; // 是否允许用户释放,占位1bit unsigned async_transaction:1; // 占位1bit
unsigned debug_id:29; // 占位29bit
struct binder_transaction *transaction; // 该缓存区的需要处理的事务
struct binder_node *target_node; // 该缓存区所需处理的Binder实体 size_t data_size; // 数据大小
size_t offsets_size; // 数据偏移量
size_t extra_buffers_size;
uint8_t data[0]; // 数据地址
};

5.binder的jni方法注册

5.1 zygote启动
5.1.1 启动zygote进程

zygote是由init进程通过解析 init.zygote.rc文件而创建的,zygote所对应的可执行程序 app_process,所对应的源文件是 app_main.cpp,进程名为zygote。

1
2
3
4
5
6
7
8
9
# system/core/rootdir/init.zygote32.rc
service zygote /system/bin/app_process -Xzygote /system/bin --zygote --start- system-server
class main
socket zygote stream 660 root system
onrestart write /sys/android_power/request_state wake
onrestart write /sys/power/state on
onrestart restart media
onrestart restart netd
writepid /dev/cpuset/foreground/tasks
5.1.2 执行 app_main.cpp中的main方法
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
# frameworks/base/cmds/app_process/app_main.cpp

int main(int argc, char* const argv[])
{

AppRuntime runtime(argv[0], computeArgBlockSize(argc, argv));
// Process command line arguments
// ignore argv[0]
argc--;
argv++;

int i;
for (i = 0; i < argc; i++) {
if (argv[i][0] != '-') {
break;
}
if (argv[i][1] == '-' && argv[i][2] == 0) {
++i; // Skip --.
break;
}
runtime.addOption(strdup(argv[i]));
}

// Parse runtime arguments. Stop at first unrecognized option.
bool zygote = false;
bool startSystemServer = false;
bool application = false;
String8 niceName;
String8 className;

++i; // Skip unused "parent dir" argument.
// 解析init.zygote32.rc中执行app_process传入的参数
while (i < argc) {
const char* arg = argv[i++];
if (strcmp(arg, "--zygote") == 0) {
zygote = true;
niceName = ZYGOTE_NICE_NAME;
} else if (strcmp(arg, "--start-system-server") == 0) {
startSystemServer = true;
} else if (strcmp(arg, "--application") == 0) {
application = true;
} else if (strncmp(arg, "--nice-name=", 12) == 0) {
niceName.setTo(arg + 12);
} else if (strncmp(arg, "--", 2) != 0) {
className.setTo(arg);
break;
} else {
--i;
break;
}
}

Vector<String8> args;
if (!className.isEmpty()) {
// We're not in zygote mode, the only argument we need to pass
// to RuntimeInit is the application argument.
//
// The Remainder of args get passed to startup class main(). Make
// copies of them before we overwrite them with the process name.
args.add(application ? String8("application") : String8("tool"));
runtime.setClassNameAndArgs(className, argc - i, argv + i);
} else {
// We're in zygote mode.
maybeCreateDalvikCache();

if (startSystemServer) {
args.add(String8("start-system-server"));
}

char prop[PROP_VALUE_MAX];
if (property_get(ABI_LIST_PROPERTY, prop, NULL) == 0) {
LOG_ALWAYS_FATAL("app_process: Unable to determine ABI list from property %s.",
ABI_LIST_PROPERTY);
return 11;
}

String8 abiFlag("--abi-list=");
abiFlag.append(prop);
args.add(abiFlag);

// In zygote mode, pass all remaining arguments to the zygote
// main() method.
for (; i < argc; ++i) {
args.add(String8(argv[i]));
}
}

if (!niceName.isEmpty()) {
runtime.setArgv0(niceName.string());
set_process_name(niceName.string());
}

if (zygote) {
// 解析出来是zygote,运行AndroidRuntime.cpp的start方法
runtime.start("com.android.internal.os.ZygoteInit", args, zygote);
} else if (className) {
runtime.start("com.android.internal.os.RuntimeInit", args, zygote);
} else {
fprintf(stderr, "Error: no class name or --zygote supplied.\n");
app_usage();
LOG_ALWAYS_FATAL("app_process: no class name or --zygote supplied.");
return 10;
}
}
5.1.3 AndroidRuntime::start

调用startReg方法来完成jni方法的注册。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
// frameworks/base/core/jni/AndroidRuntime.cpp
void AndroidRuntime::start(const char* className, const Vector<String8>& options, bool zygote)
{
......

// 获取Android根目录
const char* rootDir = getenv("ANDROID_ROOT");
if (rootDir == NULL) {
rootDir = "/system";
if (!hasDir("/system")) {
LOG_FATAL("No root directory specified, and /android does not exist.");
return;
}
setenv("ANDROID_ROOT", rootDir, 1);
}

/* start the virtual machine */
JniInvocation jni_invocation;
jni_invocation.Init(NULL);
JNIEnv* env;
// 启动java虚拟机,在之后分析系统启动的时候详细看startVm里面干了什么
if (startVm(&mJavaVM, &env, zygote) != 0) {
return;
}
onVmCreated(env);

/*
* 注册JNI方法
* 注册了之后java层就可以通过jni调用到native层的内容了
* Register android functions.
*/
if (startReg(env) < 0) {
ALOGE("Unable to register all android natives\n");
return;
}

/*
* We want to call main() with a String array with arguments in it.
* At present we have two arguments, the class name and an option string.
* Create an array to hold them.
*/
jclass stringClass;
jobjectArray strArray;
jstring classNameStr;

stringClass = env->FindClass("java/lang/String");
assert(stringClass != NULL);
strArray = env->NewObjectArray(options.size() + 1, stringClass, NULL);
assert(strArray != NULL);
classNameStr = env->NewStringUTF(className);
assert(classNameStr != NULL);
env->SetObjectArrayElement(strArray, 0, classNameStr);

for (size_t i = 0; i < options.size(); ++i) {
jstring optionsStr = env->NewStringUTF(options.itemAt(i).string());
assert(optionsStr != NULL);
env->SetObjectArrayElement(strArray, i + 1, optionsStr);
}

/*
* Start VM. This thread becomes the main thread of the VM, and will
* not return until the VM exits.
*/
char* slashClassName = toSlashClassName(className);
jclass startClass = env->FindClass(slashClassName);
if (startClass == NULL) {
ALOGE("JavaVM unable to locate class '%s'\n", slashClassName);
/* keep going */
} else {
jmethodID startMeth = env->GetStaticMethodID(startClass, "main",
"([Ljava/lang/String;)V");
if (startMeth == NULL) {
ALOGE("JavaVM unable to find main() in '%s'\n", className);
/* keep going */
} else {
// 该线程会编程AndroidRuntime线程,如果java代码报AndroidRuntime异常就会从这里退出。
env->CallStaticVoidMethod(startClass, startMeth, strArray);

#if 0
if (env->ExceptionCheck())
threadExitUncaughtException(env);
#endif
}
}
free(slashClassName);
......
}

在AndroidRuntime中完成了注册JNI方法的动作。我们在这篇文字主要看Binder的jni方法注册过程。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
int AndroidRuntime::startReg(JNIEnv* env)
{
/*
* This hook causes all future threads created in this process to be
* attached to the JavaVM. (This needs to go away in favor of JNI
* Attach calls.)
*/
androidSetCreateThreadFunc((android_create_thread_fn) javaCreateThreadEtc);

ALOGV("--- registering native functions ---\n");

/*
* Every "register" function calls one or more things that return
* a local reference (e.g. FindClass). Because we haven't really
* started the VM yet, they're all getting stored in the base frame
* and never released. Use Push/Pop to manage the storage.
*/
env->PushLocalFrame(200);

// 正在去注册jni方法
if (register_jni_procs(gRegJNI, NELEM(gRegJNI), env) < 0) {
env->PopLocalFrame(NULL);
return -1;
}
env->PopLocalFrame(NULL);

//createJavaThread("fubar", quickTest, (void*) "hello");

return 0;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
static int register_jni_procs(const RegJNIRec array[], size_t count, JNIEnv* env)
{
for (size_t i = 0; i < count; i++) {
// 宏,定义了一个函数指针
if (array[i].mProc(env) < 0) {
#ifndef NDEBUG
ALOGD("----------!!! %s failed to load\n", array[i].mName);
#endif
return -1;
}
}
return 0;
}
1
2
3
4
5
6
7
8
9
static const RegJNIRec gRegJNI[] = {
......
REG_JNI(register_android_os_SystemProperties),
// 注册binder JNI方法
REG_JNI(register_android_os_Binder),
REG_JNI(register_android_os_Parcel),
REG_JNI(register_android_nio_utils),
......
}
5.2 register_android_os_Binde
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
// frameworks/base/core/jni/android_util_Binder.cpp 
int register_android_os_Binder(JNIEnv* env)
{
// 绑定java方法和jni方法
if (int_register_android_os_Binder(env) < 0)
return -1;
// 绑定java方法和jni方法
if (int_register_android_os_BinderInternal(env) < 0)
return -1;
// 绑定java方法和jni方法
if (int_register_android_os_BinderProxy(env) < 0)
return -1;

jclass clazz = FindClassOrDie(env, "android/util/Log");
gLogOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
gLogOffsets.mLogE = GetStaticMethodIDOrDie(env, clazz, "e",
"(Ljava/lang/String;Ljava/lang/String;Ljava/lang/Throwable;)I");

clazz = FindClassOrDie(env, "android/os/ParcelFileDescriptor");
gParcelFileDescriptorOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
gParcelFileDescriptorOffsets.mConstructor = GetMethodIDOrDie(env, clazz, "<init>",
"(Ljava/io/FileDescriptor;)V");

clazz = FindClassOrDie(env, "android/os/StrictMode");
gStrictModeCallbackOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
gStrictModeCallbackOffsets.mCallback = GetStaticMethodIDOrDie(env, clazz,
"onBinderStrictModePolicyChange", "(I)V");

return 0;
}

5.2.1 int_register_android_os_Binder()

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
// frameworks/base/core/jni/android_util_Binder.cpp 
const char* const kBinderPathName = "android/os/Binder";

static const JNINativeMethod gBinderMethods[] = {
/* name, signature, funcPtr */
{ "getCallingPid", "()I", (void*)android_os_Binder_getCallingPid },
{ "getCallingUid", "()I", (void*)android_os_Binder_getCallingUid },
{ "clearCallingIdentity", "()J", (void*)android_os_Binder_clearCallingIdentity },
{ "restoreCallingIdentity", "(J)V", (void*)android_os_Binder_restoreCallingIdentity },
{ "setThreadStrictModePolicy", "(I)V", (void*)android_os_Binder_setThreadStrictModePolicy },
{ "getThreadStrictModePolicy", "()I", (void*)android_os_Binder_getThreadStrictModePolicy },
{ "flushPendingCommands", "()V", (void*)android_os_Binder_flushPendingCommands },
{ "init", "()V", (void*)android_os_Binder_init },
{ "destroy", "()V", (void*)android_os_Binder_destroy },
{ "blockUntilThreadAvailable", "()V", (void*)android_os_Binder_blockUntilThreadAvailable }
};

static int int_register_android_os_Binder(JNIEnv* env)
{
// 查找文件 kBinderPathName = "android/os/Binder",返回对应Class对象
jclass clazz = FindClassOrDie(env, kBinderPathName);

// 通过gBinderOffsets结构体,保存Java层Binder类的信息,为JNI层访问Java层提供通道
gBinderOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
gBinderOffsets.mExecTransact = GetMethodIDOrDie(env, clazz, "execTransact", "(IJJI)Z");
gBinderOffsets.mObject = GetFieldIDOrDie(env, clazz, "mObject", "J");

// 通过RegisterMethodsOrDie,将为gBinderMethods数组完成映射关系,从而为Java层访问 JNI层提供通道
return RegisterMethodsOrDie(
env, kBinderPathName,
gBinderMethods, NELEM(gBinderMethods));
}

5.2.2 int_register_android_os_BinderInternal()

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
// frameworks/base/core/jni/android_util_Binder.cpp 
static const JNINativeMethod gBinderInternalMethods[] = {
/* name, signature, funcPtr */
{ "getContextObject", "()Landroid/os/IBinder;", (void*)android_os_BinderInternal_getContextObject },
{ "joinThreadPool", "()V", (void*)android_os_BinderInternal_joinThreadPool },
{ "disableBackgroundScheduling", "(Z)V", (void*)android_os_BinderInternal_disableBackgroundScheduling },
{ "handleGc", "()V", (void*)android_os_BinderInternal_handleGc }
};

const char* const kBinderInternalPathName = "com/android/internal/os/BinderInternal";

static int int_register_android_os_BinderInternal(JNIEnv* env)
{
// 找到com/android/internal/os/BinderInternal类
jclass clazz = FindClassOrDie(env, kBinderInternalPathName);

// 通过gBinderInternalOffsets,保存Java层BinderInternal类的信息,为JNI层访问java 层提供通道
gBinderInternalOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
gBinderInternalOffsets.mForceGc = GetStaticMethodIDOrDie(env, clazz, "forceBinderGc", "()V");

// 通过RegisterMethodsOrDie(),将为gBinderInternalMethods数组完成映射关系,从而为 Java层访问JNI层提供通道
return RegisterMethodsOrDie(
env, kBinderInternalPathName,
gBinderInternalMethods, NELEM(gBinderInternalMethods));
}

5.2.3 int_register_android_os_BinderProxy()

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
// frameworks/base/core/jni/android_util_Binder.cpp 
static const JNINativeMethod gBinderProxyMethods[] = {
/* name, signature, funcPtr */
{"pingBinder", "()Z", (void*)android_os_BinderProxy_pingBinder},
{"isBinderAlive", "()Z", (void*)android_os_BinderProxy_isBinderAlive},
{"getInterfaceDescriptor", "()Ljava/lang/String;", (void*)android_os_BinderProxy_getInterfaceDescriptor},
{"transactNative", "(ILandroid/os/Parcel;Landroid/os/Parcel;I)Z", (void*)android_os_BinderProxy_transact},
{"linkToDeath", "(Landroid/os/IBinder$DeathRecipient;I)V", (void*)android_os_BinderProxy_linkToDeath},
{"unlinkToDeath", "(Landroid/os/IBinder$DeathRecipient;I)Z", (void*)android_os_BinderProxy_unlinkToDeath},
{"destroy", "()V", (void*)android_os_BinderProxy_destroy},
};

const char* const kBinderProxyPathName = "android/os/BinderProxy";

static int int_register_android_os_BinderProxy(JNIEnv* env)
{
jclass clazz = FindClassOrDie(env, "java/lang/Error");
gErrorOffsets.mClass = MakeGlobalRefOrDie(env, clazz);

clazz = FindClassOrDie(env, kBinderProxyPathName);
gBinderProxyOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
gBinderProxyOffsets.mConstructor = GetMethodIDOrDie(env, clazz, "<init>", "()V");
gBinderProxyOffsets.mSendDeathNotice = GetStaticMethodIDOrDie(env, clazz, "sendDeathNotice",
"(Landroid/os/IBinder$DeathRecipient;)V");

gBinderProxyOffsets.mObject = GetFieldIDOrDie(env, clazz, "mObject", "J");
gBinderProxyOffsets.mSelf = GetFieldIDOrDie(env, clazz, "mSelf",
"Ljava/lang/ref/WeakReference;");
gBinderProxyOffsets.mOrgue = GetFieldIDOrDie(env, clazz, "mOrgue", "J");

clazz = FindClassOrDie(env, "java/lang/Class");
gClassOffsets.mGetName = GetMethodIDOrDie(env, clazz, "getName", "()Ljava/lang/String;");

return RegisterMethodsOrDie(
env, kBinderProxyPathName,
gBinderProxyMethods, NELEM(gBinderProxyMethods));
}

参考文档

《代码笔记》