0.交互协议

客户端服务端与Binder交互协议.jpg

1.addService流程

1.1 SystemServer

1
2
3
4
// frameworks/base/services/java/com/android/server/SystemServer.java
public static void main(String[] args) {
new SystemServer().run();
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
// frameworks/base/services/java/com/android/server/SystemServer.java
private void run() {
......

// Prepare the main looper thread (this thread).
android.os.Process.setThreadPriority(
android.os.Process.THREAD_PRIORITY_FOREGROUND);
android.os.Process.setCanSelfBackground(false);
// 启动mainLooper
Looper.prepareMainLooper();

// Initialize native services.
System.loadLibrary("android_servers");

// Check whether we failed to shut down last time we tried.
// This call may not return.
performPendingShutdown();

// Initialize the system context.
createSystemContext();

// Create the system service manager.
// 创建SystemServiceManager
mSystemServiceManager = new SystemServiceManager(mSystemContext);
// 将SystemServiceManager
LocalServices.addService(SystemServiceManager.class, mSystemServiceManager);

// Start services.
try {
// 启动AMS等启动类的服务
startBootstrapServices();
startCoreServices();
startOtherServices();
} catch (Throwable ex) {
Slog.e("System", "******************************************");
Slog.e("System", "************ Failure starting system services", ex);
throw ex;
}

// For debug builds, log event loop stalls to dropbox for analysis.
if (StrictMode.conditionallyEnableDebugLogging()) {
Slog.i(TAG, "Enabled StrictMode for system server main thread.");
}

// Loop forever.
Looper.loop();
throw new RuntimeException("Main thread loop unexpectedly exited");
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
private void startBootstrapServices() {
// Wait for installd to finish starting up so that it has a chance to
// create critical directories such as /data/user with the appropriate
// permissions. We need this to complete before we initialize other services.
Installer installer = mSystemServiceManager.startService(Installer.class);

// Activity manager runs the show.
// 通过反射构造AMS并且启动AMS,获取AMS
mActivityManagerService = mSystemServiceManager.startService(
ActivityManagerService.Lifecycle.class).getService();
mActivityManagerService.setSystemServiceManager(mSystemServiceManager);
mActivityManagerService.setInstaller(installer);

// Set up the Application instance for the system process and get started.
// 将自己和自己所持有的一些服务注册到SM中去,并且启动服务
mActivityManagerService.setSystemProcess();

// The sensor service needs access to package manager service, app ops
// service, and permissions service, therefore we start it after them.
startSensorService();
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
public void setSystemProcess() {
try {
// 将自己和持有的服务都注册到SM中,可以供其他服务调用
ServiceManager.addService(Context.ACTIVITY_SERVICE, this, /* allowIsolated= */ true,DUMP_FLAG_PRIORITY_CRITICAL | DUMP_FLAG_PRIORITY_NORMAL | DUMP_FLAG_PROTO);
ServiceManager.addService(ProcessStats.SERVICE_NAME, mProcessStats);
ServiceManager.addService("meminfo", new MemBinder(this), /* allowIsolated= */ false, DUMP_FLAG_PRIORITY_HIGH);
ServiceManager.addService("gfxinfo", new GraphicsBinder(this));
ServiceManager.addService("dbinfo", new DbBinder(this));
if (MONITOR_CPU_USAGE) {
ServiceManager.addService("cpuinfo", new CpuBinder(this), /* allowIsolated= */ false, DUMP_FLAG_PRIORITY_CRITICAL);
}
ServiceManager.addService("permission", new PermissionController(this));
ServiceManager.addService("processinfo", new ProcessInfoService(this));

ApplicationInfo info = mContext.getPackageManager().getApplicationInfo(
"android", STOCK_PM_FLAGS | MATCH_SYSTEM_ONLY);
mSystemThread.installSystemApplicationInfo(info, getClass().getClassLoader());

synchronized (this) {
// 创建ProcessRecord设置AMS的进程属性并且更新Process链表和OOM adj值
ProcessRecord app = newProcessRecordLocked(info, info.processName, false, 0);
app.persistent = true;
app.pid = MY_PID;
app.maxAdj = ProcessList.SYSTEM_ADJ;
app.makeActive(mSystemThread.getApplicationThread(), mProcessStats);
synchronized (mPidsSelfLocked) {
mPidsSelfLocked.put(app.pid, app);
}
updateLruProcessLocked(app, false, null);
updateOomAdjLocked();
}
} catch (PackageManager.NameNotFoundException e) {
throw new RuntimeException(
"Unable to find android system package", e);
}
......
}

1.2 ServiceManager.addService

1
2
3
4
5
6
7
8
9
// frameworks/base/core/java/android/os/ServiceManager.java
public static void addService(String name, IBinder service, boolean allowIsolated,
int dumpPriority) {
try {
getIServiceManager().addService(name, service, allowIsolated, dumpPriority);
} catch (RemoteException e) {
Log.e(TAG, "error in addService", e);
}
}
1
2
3
4
5
6
7
8
9
10
11
12
private static IServiceManager getIServiceManager() {
if (sServiceManager != null) {
return sServiceManager;
}

// Find the service manager
// 相当于 new ServiceManagerProxy(new BinderProxy);
// BinderInternal.getContextObject()返回一个BinderProxy对象
sServiceManager = ServiceManagerNative
.asInterface(Binder.allowBlocking(BinderInternal.getContextObject()));
return sServiceManager;
}
1.2.1 BinderInternal.getContextObject()
1
2
3
// frameworks/base/core/java/com/android/internal/os/BinderInternal.java
// 是个jni方法,找到对应的cpp文件
public static final native IBinder getContextObject();
1
2
3
4
5
6
7
// frameworks/base/core/jni/android_util_Binder.cpp
static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz)
{
// 打开 binder驱动(ProcessState是单例的),创建 BpBinder(handle) 对象,并返回
sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
return javaObjectForIBinder(env, b);
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
// If the argument is a JavaBBinder, return the Java object that was used to create it.
// Otherwise return a BinderProxy for the IBinder. If a previous call was passed the
// same IBinder, and the original BinderProxy is still alive, return the same BinderProxy.
// 如果val是JavaBBinder就返回已经创建的服务端对象,否则返回BinderProxy
jobject javaObjectForIBinder(JNIEnv* env, const sp<IBinder>& val)
{
if (val == NULL) return NULL;

// 检查是不是JavaBBinder,如果是就把自己返回
if (val->checkSubclass(&gBinderOffsets)) {
// One of our own!
jobject object = static_cast<JavaBBinder*>(val.get())->object();
LOGDEATH("objectForBinder %p: it's our own %p!\n", val.get(), object);
return object;
}

// For the rest of the function we will hold this lock, to serialize
// looking/creation of Java proxies for native Binder proxies.
AutoMutex _l(mProxyLock);

// Someone else's... do we know about it?
// 从 BpBinder中查找 BinderProxy对象,第一次为 null
jobject object = (jobject)val->findObject(&gBinderProxyOffsets);
if (object != NULL) {
jobject res = jniGetReferent(env, object);
if (res != NULL) {
ALOGV("objectForBinder %p: found existing %p!\n", val.get(), res);
return res;
}
LOGDEATH("Proxy object %p of IBinder %p no longer in working set!!!", object, val.get());
android_atomic_dec(&gNumProxyRefs);
val->detachObject(&gBinderProxyOffsets);
env->DeleteGlobalRef(object);
}

// 创建 BinderProxy对象
object = env->NewObject(gBinderProxyOffsets.mClass, gBinderProxyOffsets.mConstructor);
// 以下if语句主要完成了BinderProxy和BpBinder的绑定
if (object != NULL) {
LOGDEATH("objectForBinder %p: created new proxy %p !\n", val.get(), object);
// The proxy holds a reference to the native object.
// BinderProxy.mObject成员变量记录 BpBinder对象
// 相当于BinderProxy与BpBinder进行绑定
env->SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get());
val->incStrong((void*)javaObjectForIBinder);

// The native object needs to hold a weak reference back to the
// proxy, so we can retrieve the same proxy if it is still active.
jobject refObject = env->NewGlobalRef(
env->GetObjectField(object, gBinderProxyOffsets.mSelf));
// 将 BinderProxy对象信息添加到 BpBinder的成员变量 mObjects中
val->attachObject(&gBinderProxyOffsets, refObject,
jnienv_to_javavm(env), proxy_cleanup);

// Also remember the death recipients registered on this proxy
sp<DeathRecipientList> drl = new DeathRecipientList;
drl->incStrong((void*)javaObjectForIBinder);
// BinderProxy.mOrgue成员变量记录死亡通知对象
env->SetLongField(object, gBinderProxyOffsets.mOrgue, reinterpret_cast<jlong>(drl.get()));

// Note that a new object reference has been created.
android_atomic_inc(&gNumProxyRefs);
incRefsCreated(env);
}

return object;
}
1.2.2 ServiceManagerNative.asInterface
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// frameworks/base/core/java/android/os/ServiceManagerNative.java
static public IServiceManager asInterface(IBinder obj) {
if (obj == null) {
return null;
}
// 因为 obj为 BinderProxy,默认返回 null
IServiceManager in = (IServiceManager)obj.queryLocalInterface(descriptor);
if (in != null) {
return in;
}

// 使用IServiceManager的BinderProxy创建一个ServiceManagerProxy
// 此处已经创建好持有SM的BinderProxy对象的ServiceManagerProxy,并且返回到ServiceManager.java中的静态addService方法中。
return new ServiceManagerProxy(obj);
}
1
2
3
4
5
6
7
//frameworks/base/core/java/android/os/ServiceManagerNative.java$ServiceManagerProxy.java
class ServiceManagerProxy implements IServiceManager {
// mRemote为 BinderProxy对象
public ServiceManagerProxy(IBinder remote) {
mRemote = remote;
}
}

1.3 ServiceManagerProxy.addService()

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
//frameworks/base/core/java/android/os/ServiceManagerNative.java$ServiceManagerProxy.java
class ServiceManagerProxy implements IServiceManager {
public ServiceManagerProxy(IBinder remote) {
mRemote = remote;
}

public IBinder asBinder() {
return mRemote;
}

public void addService(String name, IBinder service, boolean allowIsolated)
throws RemoteException {
// 封装data数据包
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
data.writeStrongBinder(service);//service == AMS,将AMS打包到data中
data.writeInt(allowIsolated ? 1 : 0);
// 通过SM的BinderProxy对象将数据发往binder驱动,并且挂起线程
mRemote.transact(ADD_SERVICE_TRANSACTION, data, reply, 0);
reply.recycle();
data.recycle();
}
}
1.3.1 writeStrongBinder
1
2
3
4
// frameworks/base/core/java/android/os/Parcel.java
public final void writeStrongBinder(IBinder val) {
nativeWriteStrongBinder(mNativePtr, val);
}
1
2
3
4
5
6
7
8
9
10
11
12
13
// frameworks/base/core/jni/android_os_Parcel.cpp
static void android_os_Parcel_writeStrongBinder(JNIEnv* env, jclass clazz, jlong nativePtr, jobject object)
{
// 将java层 Parcel转换为 native层 Parcel
Parcel* parcel = reinterpret_cast<Parcel*>(nativePtr);
if (parcel != NULL) {
// 通过ibinder找到AMS的服务端BBinder对象,即JavaBBinder对象
const status_t err = parcel->writeStrongBinder(ibinderForJavaObject(env, object));
if (err != NO_ERROR) {
signalExceptionForError(env, clazz, err);
}
}
}
1.3.1.1 ibinderForJavaObject()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
// frameworks/base/core/jni/android_util_Binder.cpp
sp<IBinder> ibinderForJavaObject(JNIEnv* env, jobject obj)
{
if (obj == NULL) return NULL;

// 因为是AMS服务端,因此此if命中,从JavaBBinderHolder中拿出JavaBBinder返回
if (env->IsInstanceOf(obj, gBinderOffsets.mClass)) {
JavaBBinderHolder* jbh = (JavaBBinderHolder*)
env->GetLongField(obj, gBinderOffsets.mObject);
return jbh != NULL ? jbh->get(env, obj) : NULL;
}

// 如果是客户端就返回BpBinder
if (env->IsInstanceOf(obj, gBinderProxyOffsets.mClass)) {
return (IBinder*)
env->GetLongField(obj, gBinderProxyOffsets.mObject);
}

ALOGW("ibinderForJavaObject: %p is not a Binder object", obj);
return NULL;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
// frameworks/base/core/jni/android_util_Binder.cpp$JavaBBinderHolder.cpp
class JavaBBinderHolder : public RefBase
{
public:
sp<JavaBBinder> get(JNIEnv* env, jobject obj)
{
AutoMutex _l(mLock);
sp<JavaBBinder> b = mBinder.promote();
if (b == NULL) {
// 创建一个JavaBBinder对象
b = new JavaBBinder(env, obj);
mBinder = b;
}

return b;
}
}
1.3.1.2 parcel->writeStrongBinder
1
2
3
4
5
6
// !!!!!!注意,此处的val是JavaBBinder对象!!!!!!!!!!
// frameworks/native/libs/binder/Parcel.cpp
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
return flatten_binder(ProcessState::self(), val, this);
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
status_t flatten_binder(const sp<ProcessState>& /*proc*/,
const sp<IBinder>& binder, Parcel* out)
{
flat_binder_object obj;

obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
if (binder != NULL) {
// 当前进程有 Binder,所以本地 Binder不为空
// localBinder()如果是AMS本身就返回this,如果是代理就返回NULL,因为当前是AMS自己注册,所有返回的是this,不为空
IBinder *local = binder->localBinder();
if (!local) { // !local: local是空的话,!local才为真,才会命中该if
BpBinder *proxy = binder->remoteBinder();
if (proxy == NULL) {
ALOGE("null proxy");
}
const int32_t handle = proxy ? proxy->handle() : 0;
obj.type = BINDER_TYPE_HANDLE;
obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
obj.handle = handle;
obj.cookie = 0;
} else {
// // 将Binder对象扁平化,转换成 flat_binder_object对象
obj.type = BINDER_TYPE_BINDER;
obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
obj.cookie = reinterpret_cast<uintptr_t>(local);// 保存AMS
}
} else {
obj.type = BINDER_TYPE_BINDER;
obj.binder = 0;
obj.cookie = 0;
}

return finish_flatten_binder(binder, obj, out);
}
1
2
3
4
5
inline static status_t finish_flatten_binder(
const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out){
// 将flat_binder_object写入到parcel中
return out->writeObject(flat, false);
}
1.3.2 mRemote.transact
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// frameworks/base/core/java/android/os/Binder.java$BinderProxy.java
final class BinderProxy implements IBinder {
public native boolean pingBinder();
public native boolean isBinderAlive();

public IInterface queryLocalInterface(String descriptor) {
return null;
}

// 调用transact向native的Bpbinder发送消息,最终通过BpBinder发送给驱动周转
public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
Binder.checkParcel(this, code, data, "Unreasonably large binder buffer");
return transactNative(code, data, reply, flags);
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
// frameworks/base/core/jni/android_util_Binder.cpp
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
jint code, jobject dataObj, jobject replyObj, jint flags) // throws RemoteException
{
if (dataObj == NULL) {
jniThrowNullPointerException(env, NULL);
return JNI_FALSE;
}

// 将Java对象转化为cpp的parcel对象
Parcel* data = parcelForJavaObject(env, dataObj);
if (data == NULL) {
return JNI_FALSE;
}
Parcel* reply = parcelForJavaObject(env, replyObj);
if (reply == NULL && replyObj != NULL) {
return JNI_FALSE;
}

// 获取 BpBinder 对象
// gBinderProxyOffsets.mObject就是上面绑定的BpBinder
IBinder* target = (IBinder*)
env->GetLongField(obj, gBinderProxyOffsets.mObject);
......

// 调用BpBinder的transact方法发送对象
status_t err = target->transact(code, *data, reply, flags);
......
return JNI_FALSE;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// frameworks/native/libs/binder/BpBinder.cpp
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
// Once a binder has died, it will never come back to life.
if (mAlive) {
// 调用IPCThreadState的transact发送数据
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}

return DEAD_OBJECT;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
// frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
// 数据错误检查
status_t err = data.errorCheck();

// TF_ACCEPT_FDS = 0x10:允许回复中包含文件描述符
// TF_ONE_WAY:当前业务是异步的,不需要等待
// TF_ROOT_OBJECT:所包含的内容是根对象
// TF_STATUS_CODE:所包含的内容是 32-bit 的状态值
flags |= TF_ACCEPT_FDS;

if (err == NO_ERROR) {
LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
(flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
// 整理数据,并把结果存入 mOut 中。(在 talkWithDriver方法中才会将命令真正发送给 Binder驱动)
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}

if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}

// 如果是同步就命中if,否则走else
// waitForResponse中才是正在发送数据到驱动的地方
if ((flags & TF_ONE_WAY) == 0) {
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
} else {
err = waitForResponse(NULL, NULL);
}

return err;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
// 将所有数据包打包成binder_transaction_data
binder_transaction_data tr;

tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;

const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof(status_t);
tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
tr.offsets_size = 0;
tr.data.ptr.offsets = 0;
} else {
return (mLastError = err);
}

// 将指令(BC_TRANSACTION)和数据包写道mOut中
// waitForResponse中将数据通过talkWithDriver发送到驱动
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));

return NO_ERROR;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;

while (1) {
// 循环调用talkWithDriver()等待结果,一旦有结果,马上通过switch进行解析
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;

cmd = (uint32_t)mIn.readInt32();

IF_LOG_COMMANDS() {
alog << "Processing waitForResponse Command: "
<< getReturnString(cmd) << endl;
}

// 当客户端与binder驱动沟通完之后,会回到这里,客户端收到BR_TRANSACTION_COMPLETE
switch (cmd) {
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;

if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
continue;
}
}
goto finish;

default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}

binder_write_read bwr;

// 读的 buffer是否为空。现在读为 null
const bool needRead = mIn.dataPosition() >= mIn.dataSize();

// 读的时候不能写 mOut
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;

bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();// 在 bwr中填写需要 write的大小和内容

// This is what we'll read.
// 刚刚是mOut写了数据,因此needRead为 null,走 else
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}

......

bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
// while循环条件不会成立,只执行一次
// 写入命令 BC_TRANSACTION
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
} while (err == -EINTR);

return err;
}
1.3.3 进入驱动写数据
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
// kernel/drivers/staging/android/binder.c
// 因为是BINDER_WRITE_READ,因此走binder_ioctl_write_read(),然后写数据调到binder_thread_write()中
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
switch (cmd) {
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;

// 此次从用户空间拷贝的不是数据,数据在binder_transaction中拷贝
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr,
cmd == BC_REPLY, 0);
break;
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
binder_size_t extra_buffers_size)
{
//此处 reply为 false(cmd == BC_TRANSACTION)
if (reply) {

} else {
// 此处因为是找SM,SM的handle是0,因此走else
if (tr->target.handle) {

} else {
//获取目标对象的 target_node,目标是 service_manager,所以可以直接使用全局变量 binder_context_mgr_node
target_node = context->binder_context_mgr_node;
......
}
e->to_node = target_node->debug_id;
// target_proc为 service_manager进程
target_proc = target_node->proc;
......
}

// 找到 service_manager进程的 todo队列
target_list = &target_proc->todo;
target_wait = &target_proc->wait;

// 生成一个 binder_transaction 变量(即变量 t),用于描述本次要进行的 transaction(最后将其加入 target_thread->todo)。
// 这样当目标对象被唤醒时,它就可以从这个队列中取出需要做的工作。
// 此处为将把数据写入共享内存做准备
t = kzalloc(sizeof(*t), GFP_KERNEL);

// 生成一个binder_work变量(即变量 tcomplete),用于说明当前调用者线程有一宗未完成的 transaction(它最后会被添加到本线程的 todo队列中)
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);

// 给 transaction结构体赋值,即变量 t
// 如果是同步,命中if
if (!reply && !(tr->flags & TF_ONE_WAY))
// 把当前 thread保存到 transaction的 from字段,为了记住需要返回的线程
t->from = thread;
else
t->from = NULL;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc; // 此次通信目标进程为 service_manager进程
t->to_thread = target_thread;
t->code = tr->code; // 此次通信 code = ADD_SERVICE_TRANSACTION
t->flags = tr->flags; // 此次通信 flags = 0
t->priority = task_nice(current);

// 从接收端(service_manager进程中)分配 buffer(为完成本条 transaction申请内存,从 binder_mmap开辟的空间中申请内存)
// 此处分配的内存都是接收端通过mmap申请的与Binder驱动的共享内存
// t->buffer指向共享区域
t->buffer = binder_alloc_buf(target_proc, tr->data_size, tr->offsets_size, extra_buffers_size, !reply && (t->flags & TF_ONE_WAY));



// !!!!!!!!!!!!!!!!此处才是真正的一次数据拷贝!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓


// 分别拷贝用户空间的 binder_transaction_data中 ptr.buffer和 ptr.offsets到内核
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) tr->data.ptr.buffer, tr->data_size)) { }
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) { }

for (; offp < off_end; offp++) {
switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct flat_binder_object *fp;

fp = to_flat_binder_object(hdr);
// 创建 binder_ref,service_manager的 binder引用对象,下面分析
ret = binder_translate_binder(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
goto err_translate_failed;
}
} break;
}

if (reply) {
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
// 同步命中该if
BUG_ON(t->buffer->async_transaction != 0);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
// 记录本次 transaction,以备后期查询 (service_manager通过这个知道是谁调用的,从而返回数据)
thread->transaction_stack = t;
}

t->work.type = BINDER_WORK_TRANSACTION; // 设置 t的类型为 BINDER_WORK_TRANSACTION,让服务开始处理的时候需要用到该type
list_add_tail(&t->work.entry, target_list); // 将 t加入目标的处理队列中
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; // 设置 binder_work的类型为 BINDER_WORK_TRANSACTION_COMPLETE ,后面service_manager读的时候需要用到这个type
list_add_tail(&tcomplete->entry, &thread->todo); // 当前线程有一个未完成的操作
if (target_wait) wake_up_interruptible(target_wait);// 唤醒目标,即 service_manager
}

此处通过wake_up_interruptible唤醒service_manager,service_manager通过binder_thread_read的BINDER_WORK_TRANSACTIONcase开始处理事务.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
// kernel/drivers/staging/android/binder.c
static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
{
struct binder_node *node;
struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;

node = binder_get_node(proc, fp->binder);
if (!node) {
node = binder_new_node(proc, fp->binder, fp->cookie);
if (!node)
return -ENOMEM;

node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
......

// 创建一个 binder_ref
ref = binder_get_ref_for_node(target_proc, node);
if (!ref)
return -EINVAL;

// 改变类型为 BINDER_TYPE_HANDLE
if (fp->hdr.type == BINDER_TYPE_BINDER)
fp->hdr.type = BINDER_TYPE_HANDLE;
else
fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
fp->binder = 0;
fp->handle = ref->desc;
fp->cookie = 0;
binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);

return 0;
}
1.3.4 驱动中目标进程读数据(service_manager已被唤醒)
1
2
3
4
5
6
kernel/drivers/staging/android/binder.c 

static int binder_ioctl_write_read(struct file *filp, unsigned int cmd, unsigned long arg, struct binder_thread *thread)

// service_manager开始通过binder_thread_read读数据
ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
// kernel/drivers/staging/android/binder.c
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
// 前面把一个 binder_work添加到 thread->todo队列中,所以 w不为空,类型为 BINDER_WORK_TRANSACTION_COMPLETE
if (!list_empty(&thread->todo)) {
w = list_first_entry(&thread->todo, struct binder_work,
entry);
}

// 直接看switch case, type是BINDER_WORK_TRANSACTION_COMPLETE
case BINDER_WORK_TRANSACTION_COMPLETE: {
cmd = BR_TRANSACTION_COMPLETE;
// 写入命令 BR_TRANSACTION_COMPLETE
// 此处完成时客户端发送BC_TRANSACTION,服务端响应BR_TRANSACTION_COMPLETE操作
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
}

至此,客户端驱动的交互完成!!!!!!客户端收到BR_TRANSACTION_COMPLETE, 回到waitForResponse()方法开始处理返回命令.

1.3.5 返回用户空间并且挂起客户端

此处返回用户空间处理驱动返回的BR_TRANSACTION_COMPLETE命令

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult){
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;

// 此处mIn是有数据的,再次执行 talkWithDriver方法,这个时候 bwr.write_size==0,bwr.read_size还是大于 0,所以直接执行驱动中 binder_thread_read
cmd = (uint32_t)mIn.readInt32();

switch (cmd) {
case BR_TRANSACTION_COMPLETE:
// 当前为同步,不会进入 if,继续 while循环
if (!reply && !acquireResult) goto finish;
break;
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
// kernel/drivers/staging/android/binder.c
static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block) {
// 此时因为客户端线程还在等在服务端响应,thread->transaction_stack不为null,因此wait_for_proc_work为false
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);

......
if (wait_for_proc_work) {
......
} else {
if (non_block) { // 由于是同步的命中else
if (!binder_has_thread_work(thread))
ret = -EAGAIN;
} else
// 让客户端挂起
ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
}
1.3.6 service_manager开始处理消息
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{

while (1) {
case BINDER_WORK_TRANSACTION: {
// 主要是把用户的请求复制到 service_manager中并对各种队列进行调整
t = container_of(w, struct binder_transaction, work);
} break;

// target_node == SM
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;

tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) ||
t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
// 设置服务端处理指令.
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = 0;
tr.cookie = 0;
cmd = BR_REPLY;
}

// 向用户空间发送指令.此时SM会收到BR_TRANSACTION指令
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
}

}

此时binder驱动把BR_TRANSACTION发送到SM中,因此SM的BBinder会解析这个指令

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
// 注意,此时是在servicemanager/binder.c中
// frameworks/native/cmds/servicemanager/binder.c
// SM 在启动之后就一直在binder_loop()中死循环收命令
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];

bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;

readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));

for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;

res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

// 解析命令
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);

}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
while (ptr < end) {

switch(cmd) {
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
// reply初始化
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply); // 由 svcmgr_handler 处理请求
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);// 将 reply发给 binder驱动
}
ptr += sizeof(*txn);
break;
}
......
}

return r;
}
1
2
3
4
5
6
7
8
// frameworks/native/cmds/servicemanager/service_manager.c
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
case SVC_MGR_ADD_SERVICE: // 316 注册指定服务 if (do_add_service(bs, s, len, handle, txn->sender_euid, allow_isolated, txn->sender_pid))
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
int do_add_service(struct binder_state *bs,
const uint16_t *s, size_t len,
uint32_t handle, uid_t uid, int allow_isolated,
pid_t spid)
{
struct svcinfo *si;

// 校验服务权限,查看是否可以注册
if (!svc_can_register(s, len, spid, uid)) {
ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n",
str8(s, len), handle, uid);
return -1;
}

// 查找是否服务已经存在
si = find_svc(s, len);
if (si) {
if (si->handle) {
ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
str8(s, len), handle, uid);
svcinfo_death(bs, si); // 服务已注册时,释放相应的服务
}
si->handle = handle; // 重新放入新的
} else {
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) { // 内存不足,无法分配足够内存
ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n",
str8(s, len), handle, uid);
return -1;
}
si->handle = handle;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t)); ;// 内存拷贝服务信息
si->name[len] = '\0';
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
si->next = svclist; // svclist保存所有已注册的服务
svclist = si;
}

// 以 BC_ACQUIRE命令,handle为目标的信息,通过ioctl发送给 binder驱动,binder_ref强引用加1操作
binder_acquire(bs, handle);
// 以 BC_REQUEST_DEATH_NOTIFICATION命令的信息,通过ioctl发送给 binder驱动,主要用于清理内存等收尾工作
binder_link_to_death(bs, handle, &si->death);
return 0;
}

处理完添加消息之后, SM的Bbinder发送处理结束消息给binder

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
// frameworks/native/cmds/servicemanager/binder.c
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
{
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data;

data.cmd_free = BC_FREE_BUFFER; // free buffer命令
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY; // 设置命令
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) { // status == 0
data.txn.flags = TF_STATUS_CODE;
data.txn.data_size = sizeof(int);
data.txn.offsets_size = 0;
data.txn.data.ptr.buffer = (uintptr_t)&status;
data.txn.data.ptr.offsets = 0;
} else {
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
binder_write(bs, &data, sizeof(data)); // 向 Binder驱动通信
}

binder驱动通过binder_write_read()方法和binder_thread_write()方法收到BC_REPLY之后,调用binder_transaction(proc, thread, &tr, cmd == BC_REPLY, 0);方法

像通知客户端一样通知服务端BR_TRANSACTION_COMPLETE,并且将SM挂起.当服务端被挂起之后,通过wake_up_interruptible(target_wait);唤醒客户端.

当客户端被唤醒之后, Binder会在case:BINDER_WORK_TRANSACTION中通知客户端BR_REPLY. 表示此次Binder通信结束.

2.调用流程

2.1 如何注册到SM中

getIServiceManager().addService(name, service, false);

  • getIServiceManager — new ServiceManagerProxy(new BinderProxy())
    • ServiceManagerNative.asInterface(BinderInternal.getContextObject())
      • BinderInternal.getContextObject — 返回 BinderProxy 对象
        • ProcessState::self()->getContextObject:创建一个BpBinder
        • javaObjectForIBinder – BinderProxy 和 BpBinder 互相绑定
      • ServiceManagerNative.asInterface
        • 返回 ServiceManagerProxy
  • addService
    • data.writeStrongBinder(service); – service == AMS — 将AMS 放入 data中
    • mRemote.transact — mRemote == BinderProxy
      • 获取BpBinder — IPCThreadState::transact
        • 1.writeTransactionData — out 写入命令 –write — cmd == BC_TRANSACTION
        • 2.waitForResponse
          • talkWithDriver – 非常重要 — 代码非常长
            • binder_transaction
              • handle == 0 –》 sm
                1. target_node
                2. proc
                3. todo,wait
                4. 创建t,tcomplete,
                5. 数据拷贝
                6. binder_transaction_binder –> handle
                7. thread->transaction_stack = t; —> 方便sm找到client
                8. t->work.type = BINDER_WORK_TRANSACTION; – 给sm – 做事
                9. tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; – 给client–挂起
                10. wake_up_interruptible 唤醒sm
        • client挂起
          • BR_NOOP ,BR_TRANSACTION_COMPLETE
          • wait_event_freezable — 挂起
        • sm处理添加服务
          • BINDER_WORK_TRANSACTION — 要处理 cmd == BR_TRANSACTION
            1. reply初始化
            2. res = func(bs, txn, &msg, &reply); — 函数指针 — svcmgr_handler作用:获取或者添加 service
              1. sm是用 svclist 保存所有服务的
            3. binder_send_reply — bc_reply
            4. t->work.type = BINDER_WORK_TRANSACTION; — 给Client
              list_add_tail(&t->work.entry, target_list);
                 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; -- 给SM --- 被挂起
                 list_add_tail(&tcomplete->entry, &thread->todo);
              
            5. wake_up_interruptible(target_wait); – 唤醒 Client
        • client 被唤醒
          • 在binder_thread_read()被挂起,当唤醒的时候继续执行
          • BINDER_WORK_TRANSACTION — cmd = BR_REPLY;
2.2 SM 处理 onTransact
  • IPCThreadState::executeCommand

    • error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,

       &reply, tr.flags);
      
    • JavaBBinder.onTransact — C++

    • jboolean res = env->CallBooleanMethod(mObject, gBinderOffsets.mExecTransact,

       code, reinterpret_cast<jlong>(&data), reinterpret_cast<jlong>(reply), flags); -- Binder.java.execTransact 方法
      

binder_node — binder对象

binder_ref – binder引用

线程池管理

  • 主线程 – 不会退出,非主线程
  • 线程最大数 — 15个 — 非主线程
  • 主线程有一个 – 不算这在线程最大数
  • 线程真正最大数 : 15 + 1 + 其他线程

ProcessState代表一个进程,每一个线程都执行在IPCThreadState中,如果没有线程就会去创建一个。

Binder进程与线程.png

Binder类关系图.png