0.交互协议
1.addService流程 1.1 SystemServer 1 2 3 4 public static void main (String[] args) { new SystemServer().run(); }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 private void run () { ...... android.os.Process.setThreadPriority( android.os.Process.THREAD_PRIORITY_FOREGROUND); android.os.Process.setCanSelfBackground(false ); Looper.prepareMainLooper(); System.loadLibrary("android_servers" ); performPendingShutdown(); createSystemContext(); mSystemServiceManager = new SystemServiceManager(mSystemContext); LocalServices.addService(SystemServiceManager.class, mSystemServiceManager); try { startBootstrapServices(); startCoreServices(); startOtherServices(); } catch (Throwable ex) { Slog.e("System" , "******************************************" ); Slog.e("System" , "************ Failure starting system services" , ex); throw ex; } if (StrictMode.conditionallyEnableDebugLogging()) { Slog.i(TAG, "Enabled StrictMode for system server main thread." ); } Looper.loop(); throw new RuntimeException("Main thread loop unexpectedly exited" ); }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 private void startBootstrapServices () { Installer installer = mSystemServiceManager.startService(Installer.class); mActivityManagerService = mSystemServiceManager.startService( ActivityManagerService.Lifecycle.class).getService(); mActivityManagerService.setSystemServiceManager(mSystemServiceManager); mActivityManagerService.setInstaller(installer); mActivityManagerService.setSystemProcess(); startSensorService(); }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 public void setSystemProcess () { try { ServiceManager.addService(Context.ACTIVITY_SERVICE, this , true ,DUMP_FLAG_PRIORITY_CRITICAL | DUMP_FLAG_PRIORITY_NORMAL | DUMP_FLAG_PROTO); ServiceManager.addService(ProcessStats.SERVICE_NAME, mProcessStats); ServiceManager.addService("meminfo" , new MemBinder(this ), false , DUMP_FLAG_PRIORITY_HIGH); ServiceManager.addService("gfxinfo" , new GraphicsBinder(this )); ServiceManager.addService("dbinfo" , new DbBinder(this )); if (MONITOR_CPU_USAGE) { ServiceManager.addService("cpuinfo" , new CpuBinder(this ), false , DUMP_FLAG_PRIORITY_CRITICAL); } ServiceManager.addService("permission" , new PermissionController(this )); ServiceManager.addService("processinfo" , new ProcessInfoService(this )); ApplicationInfo info = mContext.getPackageManager().getApplicationInfo( "android" , STOCK_PM_FLAGS | MATCH_SYSTEM_ONLY); mSystemThread.installSystemApplicationInfo(info, getClass().getClassLoader()); synchronized (this ) { ProcessRecord app = newProcessRecordLocked(info, info.processName, false , 0 ); app.persistent = true ; app.pid = MY_PID; app.maxAdj = ProcessList.SYSTEM_ADJ; app.makeActive(mSystemThread.getApplicationThread(), mProcessStats); synchronized (mPidsSelfLocked) { mPidsSelfLocked.put(app.pid, app); } updateLruProcessLocked(app, false , null ); updateOomAdjLocked(); } } catch (PackageManager.NameNotFoundException e) { throw new RuntimeException( "Unable to find android system package" , e); } ...... }
1.2 ServiceManager.addService 1 2 3 4 5 6 7 8 9 public static void addService (String name, IBinder service, boolean allowIsolated, int dumpPriority) { try { getIServiceManager().addService(name, service, allowIsolated, dumpPriority); } catch (RemoteException e) { Log.e(TAG, "error in addService" , e); } }
1 2 3 4 5 6 7 8 9 10 11 12 private static IServiceManager getIServiceManager () { if (sServiceManager != null ) { return sServiceManager; } sServiceManager = ServiceManagerNative .asInterface(Binder.allowBlocking(BinderInternal.getContextObject())); return sServiceManager; }
1.2.1 BinderInternal.getContextObject() 1 2 3 public static final native IBinder getContextObject () ;
1 2 3 4 5 6 7 static jobject android_os_BinderInternal_getContextObject (JNIEnv* env, jobject clazz) { sp<IBinder> b = ProcessState::self()->getContextObject(NULL); return javaObjectForIBinder(env, b); }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 jobject javaObjectForIBinder (JNIEnv* env, const sp<IBinder>& val) { if (val == NULL) return NULL; if (val->checkSubclass(&gBinderOffsets)) { jobject object = static_cast<JavaBBinder*>(val.get())->object(); LOGDEATH("objectForBinder %p: it's our own %p!\n" , val.get(), object); return object; } AutoMutex _l (mProxyLock) ; jobject object = (jobject)val->findObject(&gBinderProxyOffsets); if (object != NULL) { jobject res = jniGetReferent(env, object); if (res != NULL) { ALOGV("objectForBinder %p: found existing %p!\n" , val.get(), res); return res; } LOGDEATH("Proxy object %p of IBinder %p no longer in working set!!!" , object, val.get()); android_atomic_dec(&gNumProxyRefs); val->detachObject(&gBinderProxyOffsets); env->DeleteGlobalRef(object); } object = env->NewObject(gBinderProxyOffsets.mClass, gBinderProxyOffsets.mConstructor); if (object != NULL) { LOGDEATH("objectForBinder %p: created new proxy %p !\n" , val.get(), object); env->SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get()); val->incStrong((void *)javaObjectForIBinder); jobject refObject = env->NewGlobalRef( env->GetObjectField(object, gBinderProxyOffsets.mSelf)); val->attachObject(&gBinderProxyOffsets, refObject, jnienv_to_javavm(env), proxy_cleanup); sp<DeathRecipientList> drl = new DeathRecipientList; drl->incStrong((void *)javaObjectForIBinder); env->SetLongField(object, gBinderProxyOffsets.mOrgue, reinterpret_cast<jlong>(drl.get())); android_atomic_inc(&gNumProxyRefs); incRefsCreated(env); } return object; }
1.2.2 ServiceManagerNative.asInterface 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 static public IServiceManager asInterface (IBinder obj) { if (obj == null ) { return null ; } IServiceManager in = (IServiceManager)obj.queryLocalInterface(descriptor); if (in != null ) { return in; } return new ServiceManagerProxy(obj); }
1 2 3 4 5 6 7 class ServiceManagerProxy implements IServiceManager { public ServiceManagerProxy (IBinder remote) { mRemote = remote; } }
1.3 ServiceManagerProxy.addService() 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 class ServiceManagerProxy implements IServiceManager { public ServiceManagerProxy (IBinder remote) { mRemote = remote; } public IBinder asBinder () { return mRemote; } public void addService (String name, IBinder service, boolean allowIsolated) throws RemoteException { Parcel data = Parcel.obtain(); Parcel reply = Parcel.obtain(); data.writeInterfaceToken(IServiceManager.descriptor); data.writeString(name); data.writeStrongBinder(service); data.writeInt(allowIsolated ? 1 : 0 ); mRemote.transact(ADD_SERVICE_TRANSACTION, data, reply, 0 ); reply.recycle(); data.recycle(); } }
1.3.1 writeStrongBinder 1 2 3 4 public final void writeStrongBinder (IBinder val) { nativeWriteStrongBinder(mNativePtr, val); }
1 2 3 4 5 6 7 8 9 10 11 12 13 static void android_os_Parcel_writeStrongBinder (JNIEnv* env, jclass clazz, jlong nativePtr, jobject object) { Parcel* parcel = reinterpret_cast <Parcel*>(nativePtr); if (parcel != NULL ) { const status_t err = parcel->writeStrongBinder(ibinderForJavaObject(env, object)); if (err != NO_ERROR) { signalExceptionForError(env, clazz, err); } } }
1.3.1.1 ibinderForJavaObject() 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 sp<IBinder> ibinderForJavaObject (JNIEnv* env, jobject obj) { if (obj == NULL ) return NULL ; if (env->IsInstanceOf(obj, gBinderOffsets.mClass)) { JavaBBinderHolder* jbh = (JavaBBinderHolder*) env->GetLongField(obj, gBinderOffsets.mObject); return jbh != NULL ? jbh->get(env, obj) : NULL ; } if (env->IsInstanceOf(obj, gBinderProxyOffsets.mClass)) { return (IBinder*) env->GetLongField(obj, gBinderProxyOffsets.mObject); } ALOGW("ibinderForJavaObject: %p is not a Binder object" , obj); return NULL ; }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 class JavaBBinderHolder : public RefBase{ public : sp<JavaBBinder> get (JNIEnv* env, jobject obj) { AutoMutex _l(mLock); sp<JavaBBinder> b = mBinder.promote(); if (b == NULL ) { b = new JavaBBinder(env, obj); mBinder = b; } return b; } }
1.3.1.2 parcel->writeStrongBinder 1 2 3 4 5 6 status_t Parcel::writeStrongBinder (const sp<IBinder>& val) { return flatten_binder(ProcessState::self(), val, this ); }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 status_t flatten_binder (const sp<ProcessState>& , const sp<IBinder>& binder, Parcel* out) { flat_binder_object obj; obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; if (binder != NULL ) { IBinder *local = binder->localBinder(); if (!local) { BpBinder *proxy = binder->remoteBinder(); if (proxy == NULL ) { ALOGE("null proxy" ); } const int32_t handle = proxy ? proxy->handle() : 0 ; obj.type = BINDER_TYPE_HANDLE; obj.binder = 0 ; obj.handle = handle; obj.cookie = 0 ; } else { obj.type = BINDER_TYPE_BINDER; obj.binder = reinterpret_cast <uintptr_t >(local->getWeakRefs()); obj.cookie = reinterpret_cast <uintptr_t >(local); } } else { obj.type = BINDER_TYPE_BINDER; obj.binder = 0 ; obj.cookie = 0 ; } return finish_flatten_binder(binder, obj, out); }
1 2 3 4 5 inline static status_t finish_flatten_binder ( const sp<IBinder>& , const flat_binder_object& flat, Parcel* out) { return out->writeObject(flat, false ); }
1.3.2 mRemote.transact 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 final class BinderProxy implements IBinder { public native boolean pingBinder () ; public native boolean isBinderAlive () ; public IInterface queryLocalInterface (String descriptor) { return null ; } public boolean transact (int code, Parcel data, Parcel reply, int flags) throws RemoteException { Binder.checkParcel(this , code, data, "Unreasonably large binder buffer" ); return transactNative(code, data, reply, flags); } }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 static jboolean android_os_BinderProxy_transact (JNIEnv* env, jobject obj, jint code, jobject dataObj, jobject replyObj, jint flags) { if (dataObj == NULL ) { jniThrowNullPointerException(env, NULL ); return JNI_FALSE; } Parcel* data = parcelForJavaObject(env, dataObj); if (data == NULL ) { return JNI_FALSE; } Parcel* reply = parcelForJavaObject(env, replyObj); if (reply == NULL && replyObj != NULL ) { return JNI_FALSE; } IBinder* target = (IBinder*) env->GetLongField(obj, gBinderProxyOffsets.mObject); ...... status_t err = target->transact(code, *data, reply, flags); ...... return JNI_FALSE; }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 status_t BpBinder::transact ( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { if (mAlive) { status_t status = IPCThreadState::self()->transact( mHandle, code, data, reply, flags); if (status == DEAD_OBJECT) mAlive = 0 ; return status; } return DEAD_OBJECT; }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 status_t IPCThreadState::transact (int32_t handle, uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { status_t err = data.errorCheck(); flags |= TF_ACCEPT_FDS; if (err == NO_ERROR) { LOG_ONEWAY(">>>> SEND from pid %d uid %d %s" , getpid(), getuid(), (flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY" ); err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL ); } if (err != NO_ERROR) { if (reply) reply->setError(err); return (mLastError = err); } if ((flags & TF_ONE_WAY) == 0 ) { if (reply) { err = waitForResponse(reply); } else { Parcel fakeReply; err = waitForResponse(&fakeReply); } } else { err = waitForResponse(NULL , NULL ); } return err; }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 status_t IPCThreadState::writeTransactionData (int32_t cmd, uint32_t binderFlags, int32_t handle, uint32_t code, const Parcel& data, status_t * statusBuffer) { binder_transaction_data tr; tr.target.ptr = 0 ; tr.target.handle = handle; tr.code = code; tr.flags = binderFlags; tr.cookie = 0 ; tr.sender_pid = 0 ; tr.sender_euid = 0 ; const status_t err = data.errorCheck(); if (err == NO_ERROR) { tr.data_size = data.ipcDataSize(); tr.data.ptr.buffer = data.ipcData(); tr.offsets_size = data.ipcObjectsCount()*sizeof (binder_size_t ); tr.data.ptr.offsets = data.ipcObjects(); } else if (statusBuffer) { tr.flags |= TF_STATUS_CODE; *statusBuffer = err; tr.data_size = sizeof (status_t ); tr.data.ptr.buffer = reinterpret_cast <uintptr_t >(statusBuffer); tr.offsets_size = 0 ; tr.data.ptr.offsets = 0 ; } else { return (mLastError = err); } mOut.writeInt32(cmd); mOut.write(&tr, sizeof (tr)); return NO_ERROR; }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 status_t IPCThreadState::waitForResponse (Parcel *reply, status_t *acquireResult) { uint32_t cmd; int32_t err; while (1 ) { if ((err=talkWithDriver()) < NO_ERROR) break ; err = mIn.errorCheck(); if (err < NO_ERROR) break ; if (mIn.dataAvail() == 0 ) continue ; cmd = (uint32_t )mIn.readInt32(); IF_LOG_COMMANDS() { alog << "Processing waitForResponse Command: " << getReturnString(cmd) << endl ; } switch (cmd) { case BR_REPLY: { binder_transaction_data tr; err = mIn.read(&tr, sizeof (tr)); ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY" ); if (err != NO_ERROR) goto finish; if (reply) { if ((tr.flags & TF_STATUS_CODE) == 0 ) { reply->ipcSetDataReference( reinterpret_cast <const uint8_t *>(tr.data.ptr.buffer), tr.data_size, reinterpret_cast <const binder_size_t *>(tr.data.ptr.offsets), tr.offsets_size/sizeof (binder_size_t ), freeBuffer, this ); } else { err = *reinterpret_cast <const status_t *>(tr.data.ptr.buffer); freeBuffer(NULL , reinterpret_cast <const uint8_t *>(tr.data.ptr.buffer), tr.data_size, reinterpret_cast <const binder_size_t *>(tr.data.ptr.offsets), tr.offsets_size/sizeof (binder_size_t ), this ); } } else { freeBuffer(NULL , reinterpret_cast <const uint8_t *>(tr.data.ptr.buffer), tr.data_size, reinterpret_cast <const binder_size_t *>(tr.data.ptr.offsets), tr.offsets_size/sizeof (binder_size_t ), this ); continue ; } } goto finish; default : err = executeCommand(cmd); if (err != NO_ERROR) goto finish; break ; } } }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 status_t IPCThreadState::talkWithDriver (bool doReceive) { if (mProcess->mDriverFD <= 0 ) { return -EBADF; } binder_write_read bwr; const bool needRead = mIn.dataPosition() >= mIn.dataSize(); const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0 ; bwr.write_size = outAvail; bwr.write_buffer = (uintptr_t )mOut.data(); if (doReceive && needRead) { bwr.read_size = mIn.dataCapacity(); bwr.read_buffer = (uintptr_t )mIn.data(); } else { bwr.read_size = 0 ; bwr.read_buffer = 0 ; } ...... bwr.write_consumed = 0 ; bwr.read_consumed = 0 ; status_t err; do { if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0 ) err = NO_ERROR; else err = -errno; } while (err == -EINTR); return err; }
1.3.3 进入驱动写数据 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 static int binder_thread_write (struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed) { switch (cmd) { case BC_TRANSACTION: case BC_REPLY: { struct binder_transaction_data tr ; if (copy_from_user(&tr, ptr, sizeof (tr))) return -EFAULT; ptr += sizeof (tr); binder_transaction(proc, thread, &tr, cmd == BC_REPLY, 0 ); break ; } }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 static void binder_transaction (struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply, binder_size_t extra_buffers_size) { if (reply) { } else { if (tr->target.handle) { } else { target_node = context->binder_context_mgr_node; ...... } e->to_node = target_node->debug_id; target_proc = target_node->proc; ...... } target_list = &target_proc->todo; target_wait = &target_proc->wait; t = kzalloc(sizeof (*t), GFP_KERNEL); tcomplete = kzalloc(sizeof (*tcomplete), GFP_KERNEL); if (!reply && !(tr->flags & TF_ONE_WAY)) t->from = thread; else t->from = NULL ; t->sender_euid = task_euid(proc->tsk); t->to_proc = target_proc; t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; t->priority = task_nice(current); t->buffer = binder_alloc_buf(target_proc, tr->data_size, tr->offsets_size, extra_buffers_size, !reply && (t->flags & TF_ONE_WAY)); if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t ) tr->data.ptr.buffer, tr->data_size)) { } if (copy_from_user(offp, (const void __user *)(uintptr_t ) tr->data.ptr.offsets, tr->offsets_size)) { } for (; offp < off_end; offp++) { switch (hdr->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct flat_binder_object *fp ; fp = to_flat_binder_object(hdr); ret = binder_translate_binder(fp, t, thread); if (ret < 0 ) { return_error = BR_FAILED_REPLY; goto err_translate_failed; } } break ; } if (reply) { BUG_ON(t->buffer->async_transaction != 0 ); binder_pop_transaction(target_thread, in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0 ); t->need_reply = 1 ; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; } t->work.type = BINDER_WORK_TRANSACTION; list_add_tail(&t->work.entry, target_list); tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; list_add_tail(&tcomplete->entry, &thread->todo); if (target_wait) wake_up_interruptible(target_wait); }
此处通过wake_up_interruptible唤醒service_manager,service_manager通过binder_thread_read
的BINDER_WORK_TRANSACTIONcase开始处理事务.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 static int binder_translate_binder (struct flat_binder_object *fp, struct binder_transaction *t, struct binder_thread *thread) { struct binder_node *node ; struct binder_ref *ref ; struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; node = binder_get_node(proc, fp->binder); if (!node) { node = binder_new_node(proc, fp->binder, fp->cookie); if (!node) return -ENOMEM; node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); } ...... ref = binder_get_ref_for_node(target_proc, node); if (!ref) return -EINVAL; if (fp->hdr.type == BINDER_TYPE_BINDER) fp->hdr.type = BINDER_TYPE_HANDLE; else fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; fp->binder = 0 ; fp->handle = ref->desc; fp->cookie = 0 ; binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo); return 0 ; }
1.3.4 驱动中目标进程读数据(service_manager已被唤醒) 1 2 3 4 5 6 kernel/drivers/staging/android/binder.c static int binder_ioctl_write_read (struct file *filp, unsigned int cmd, unsigned long arg, struct binder_thread *thread) ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 static int binder_thread_read (struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block) { if (!list_empty(&thread->todo)) { w = list_first_entry(&thread->todo, struct binder_work, entry); } case BINDER_WORK_TRANSACTION_COMPLETE: { cmd = BR_TRANSACTION_COMPLETE; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; }
至此,客户端驱动的交互完成!!!!!!客户端收到BR_TRANSACTION_COMPLETE, 回到waitForResponse()方法开始处理返回命令.
1.3.5 返回用户空间并且挂起客户端 此处返回用户空间处理驱动返回的BR_TRANSACTION_COMPLETE命令
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 status_t IPCThreadState::waitForResponse (Parcel *reply, status_t *acquireResult) { while (1 ) { if ((err=talkWithDriver()) < NO_ERROR) break ; cmd = (uint32_t )mIn.readInt32(); switch (cmd) { case BR_TRANSACTION_COMPLETE: if (!reply && !acquireResult) goto finish; break ; } }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 static int binder_thread_read (struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block) { wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo); ...... if (wait_for_proc_work) { ...... } else { if (non_block) { if (!binder_has_thread_work(thread)) ret = -EAGAIN; } else ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); } }
1.3.6 service_manager开始处理消息 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 static int binder_thread_read (struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block) { while (1 ) { case BINDER_WORK_TRANSACTION: { t = container_of(w, struct binder_transaction, work); } break ; if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; tr.target.ptr = target_node->ptr; tr.cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) binder_set_nice(t->priority); else if (!(t->flags & TF_ONE_WAY) || t->saved_priority > target_node->min_priority) binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; } else { tr.target.ptr = 0 ; tr.cookie = 0 ; cmd = BR_REPLY; } if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; } }
此时binder驱动把BR_TRANSACTION发送到SM中,因此SM的BBinder会解析这个指令
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 void binder_loop (struct binder_state *bs, binder_handler func) { int res; struct binder_write_read bwr ; uint32_t readbuf[32 ]; bwr.write_size = 0 ; bwr.write_consumed = 0 ; bwr.write_buffer = 0 ; readbuf[0 ] = BC_ENTER_LOOPER; binder_write(bs, readbuf, sizeof (uint32_t )); for (;;) { bwr.read_size = sizeof (readbuf); bwr.read_consumed = 0 ; bwr.read_buffer = (uintptr_t ) readbuf; res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); res = binder_parse(bs, 0 , (uintptr_t ) readbuf, bwr.read_consumed, func); } }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 int binder_parse (struct binder_state *bs, struct binder_io *bio, uintptr_t ptr, size_t size, binder_handler func) { while (ptr < end) { switch (cmd) { case BR_TRANSACTION: { struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr; if ((end - ptr) < sizeof (*txn)) { ALOGE("parse: txn too small!\n" ); return -1 ; } binder_dump_txn(txn); if (func) { unsigned rdata[256 /4 ]; struct binder_io msg ; struct binder_io reply ; int res; bio_init(&reply, rdata, sizeof (rdata), 4 ); bio_init_from_txn(&msg, txn); res = func(bs, txn, &msg, &reply); binder_send_reply(bs, &reply, txn->data.ptr.buffer, res); } ptr += sizeof (*txn); break ; } ...... } return r; }
1 2 3 4 5 6 7 8 int svcmgr_handler (struct binder_state *bs, struct binder_transaction_data *txn, struct binder_io *msg, struct binder_io *reply) { case SVC_MGR_ADD_SERVICE: }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 int do_add_service (struct binder_state *bs, const uint16_t *s, size_t len, uint32_t handle, uid_t uid, int allow_isolated, pid_t spid) { struct svcinfo *si ; if (!svc_can_register(s, len, spid, uid)) { ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n" , str8(s, len), handle, uid); return -1 ; } si = find_svc(s, len); if (si) { if (si->handle) { ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n" , str8(s, len), handle, uid); svcinfo_death(bs, si); } si->handle = handle; } else { si = malloc (sizeof (*si) + (len + 1 ) * sizeof (uint16_t )); if (!si) { ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n" , str8(s, len), handle, uid); return -1 ; } si->handle = handle; si->len = len; memcpy (si->name, s, (len + 1 ) * sizeof (uint16_t )); ; si->name[len] = '\0' ; si->death.func = (void *) svcinfo_death; si->death.ptr = si; si->allow_isolated = allow_isolated; si->next = svclist; svclist = si; } binder_acquire(bs, handle); binder_link_to_death(bs, handle, &si->death); return 0 ; }
处理完添加消息之后, SM的Bbinder发送处理结束消息给binder
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 void binder_send_reply (struct binder_state *bs, struct binder_io *reply, binder_uintptr_t buffer_to_free, int status) { struct { uint32_t cmd_free; binder_uintptr_t buffer; uint32_t cmd_reply; struct binder_transaction_data txn ; } __attribute__((packed)) data; data.cmd_free = BC_FREE_BUFFER; data.buffer = buffer_to_free; data.cmd_reply = BC_REPLY; data.txn.target.ptr = 0 ; data.txn.cookie = 0 ; data.txn.code = 0 ; if (status) { data.txn.flags = TF_STATUS_CODE; data.txn.data_size = sizeof (int ); data.txn.offsets_size = 0 ; data.txn.data.ptr.buffer = (uintptr_t )&status; data.txn.data.ptr.offsets = 0 ; } else { data.txn.flags = 0 ; data.txn.data_size = reply->data - reply->data0; data.txn.offsets_size = ((char *) reply->offs) - ((char *) reply->offs0); data.txn.data.ptr.buffer = (uintptr_t )reply->data0; data.txn.data.ptr.offsets = (uintptr_t )reply->offs0; } binder_write(bs, &data, sizeof (data)); }
binder驱动通过binder_write_read()方法和binder_thread_write()方法收到BC_REPLY之后,调用binder_transaction(proc, thread, &tr, cmd == BC_REPLY, 0);
方法
像通知客户端一样通知服务端BR_TRANSACTION_COMPLETE,并且将SM挂起.当服务端被挂起之后,通过wake_up_interruptible(target_wait);
唤醒客户端.
当客户端被唤醒之后, Binder会在case:BINDER_WORK_TRANSACTION中通知客户端BR_REPLY. 表示此次Binder通信结束.
2.调用流程 2.1 如何注册到SM中 getIServiceManager().addService(name, service, false);
getIServiceManager — new ServiceManagerProxy(new BinderProxy())
ServiceManagerNative.asInterface(BinderInternal.getContextObject())
BinderInternal.getContextObject — 返回 BinderProxy 对象
ProcessState::self()->getContextObject:创建一个BpBinder
javaObjectForIBinder – BinderProxy 和 BpBinder 互相绑定
ServiceManagerNative.asInterface
addService
data.writeStrongBinder(service); – service == AMS — 将AMS 放入 data中
mRemote.transact — mRemote == BinderProxy
获取BpBinder — IPCThreadState::transact
1.writeTransactionData — out 写入命令 –write — cmd == BC_TRANSACTION
2.waitForResponse
talkWithDriver – 非常重要 — 代码非常长
binder_transaction
handle == 0 –》 sm
target_node
proc
todo,wait
创建t,tcomplete,
数据拷贝
binder_transaction_binder –> handle
thread->transaction_stack = t; —> 方便sm找到client
t->work.type = BINDER_WORK_TRANSACTION; – 给sm – 做事
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; – 给client–挂起
wake_up_interruptible 唤醒sm
client挂起
BR_NOOP ,BR_TRANSACTION_COMPLETE
wait_event_freezable — 挂起
sm处理添加服务
BINDER_WORK_TRANSACTION — 要处理 cmd == BR_TRANSACTION
reply初始化
res = func(bs, txn, &msg, &reply); — 函数指针 — svcmgr_handler作用:获取或者添加 service
sm是用 svclist 保存所有服务的
binder_send_reply — bc_reply
t->work.type = BINDER_WORK_TRANSACTION; — 给Client list_add_tail(&t->work.entry, target_list); tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; -- 给SM --- 被挂起
list_add_tail(&tcomplete->entry, &thread->todo);
wake_up_interruptible(target_wait); – 唤醒 Client
client 被唤醒
在binder_thread_read()被挂起,当唤醒的时候继续执行
BINDER_WORK_TRANSACTION — cmd = BR_REPLY;
2.2 SM 处理 onTransact
binder_node — binder对象
binder_ref – binder引用
线程池管理
主线程 – 不会退出,非主线程
线程最大数 — 15个 — 非主线程
主线程有一个 – 不算这在线程最大数
线程真正最大数 : 15 + 1 + 其他线程
ProcessState代表一个进程,每一个线程都执行在IPCThreadState中,如果没有线程就会去创建一个。