Binder概要
Binder协议包含在IPC数据中,分为两类:
-
BINDER_COMMAND_PROTOCOL
:binder请求码,以”BC_“开头,简称BC码,用于从IPC层传递到Binder Driver层; -
BINDER_RETURN_PROTOCOL
:binder响应码,以”BR_“开头,简称BR码,用于从Binder Driver层传递到IPC层;
请求码 | 参数 | 说明 |
---|---|---|
BC_TRANSACTION | binder_transaction_data | Client向Binder驱动发送请求数据 |
BC_REPLY | binder_transaction_data | Server向Binder驱动发送请求数据 |
省略部分
响应码 | 参数类型 | 作用 |
---|---|---|
BR_ERROR | __s32 | 操作发生错误 |
BR_OK | 无参数 | 操作完成 |
BR_NOOP | 无参数 | 不做任何事 |
BR_SPAWN_LOOPER | 无参数 | 创建新的Looper线程 |
BR_TRANSACTION | binder_transaction_data | Binder驱动向Server端发送请求数据 |
BR_REPLY | binder_transaction_data | Binder驱动向Client端发送回复数据 |
通信模型
Binder
是Android
系统提供的一种IPC
(进程间通信)机制。由于Android
是基于Linux
内核的,因此,除了Binder外,还存在其他的IPC
机制,例如管道和socket
等。Binder
相对于其他IPC
机制来说,就更加灵活和方便了。
在基于Binder
通信的C/S
架构体系中,除了C/S
架构所包括的Client
端和Server
端外,Android
还有一个全局的ServiceManager
端,它的作用是管理系统中的各种服务(Service)。Client
、Server
和ServiceManager
这三者之间的交互关系
Server
进程要先注册一些Server
到ServiceManager
中,所以Server
是ServiceManger
的客户端,而ServiceManager
就是服务端了
Client
进程要使用某个Server
就先需要到ServiceManager
中获取该Server
的信息,所以Client
是ServiceManger
的客户端
Client
更具获得的Server
信息建立与Server
所在的Server
进程通信通道,然后直接与Server
交互了,所以Client
是Server
的客户端
1 详解MediaServer
在MediaServer
中存在许多重要的Service
,包括
-
AudioFlinger
: 音频系统中重要核心服务 -
AudioPolicyService
:音频系统中关于音频策略的重要服务 -
MediaPlayerService
:多媒体系统中重要服务 -
CameraService
:有关摄像/照相的重要服务
1.1 MediaServer的入口函数
frameworks/av/media/mediaserver/Main_mediaserver.cpp
int main(int argc __unused, char **argv __unused)
{
signal(SIGPIPE, SIG_IGN);
//获得一个强引用的ProcessState实例 [#1.1.1]
sp<ProcessState> proc(ProcessState::self());
//调用defaultServiceManager,得到一个IServiceManager [#1.1.2]
sp<IServiceManager> sm(defaultServiceManager());
InitializeIcuOrDie();
//MediaPlayer服务 [#1.2]
MediaPlayerService::instantiate();
ResourceManagerService::instantiate();
registerExtensions();
//[#1.3.1]
ProcessState::self()->startThreadPool();
//[#1.3.2]
IPCThreadState::self()->joinThreadPool();
}
1.1.1 ProcessState
每个进程只有一个ProcessState
.通过调用ProcessState::self()
获得实例
frameworks/native/libs/binder/ProcessState.cpp
sp<ProcessState> ProcessState::self()
{
Mutex::Autolock _l(gProcessMutex);
//gProcess是在Static.cpp中定义的一个全局变量
if (gProcess != NULL) {
return gProcess;
}
//创建对象
gProcess = new ProcessState; // [#1.1.1.1]
return gProcess;
}
1.1.1.1 构造函数
frameworks/native/libs/binder/ProcessState.cpp
ProcessState::ProcessState()
//打开binder
: mDriverFD(open_driver()) //[#1.1.1.2]
//映射内存的起始地址
, mVMStart(MAP_FAILED)
, mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
, mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
, mExecutingThreadsCount(0)
, mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
, mStarvationStartTimeMs(0)
, mManagesContexts(false)
, mBinderContextCheckFunc(NULL)
, mBinderContextUserData(NULL)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
{
if (mDriverFD >= 0) {
// mmap the binder, providing a chunk of virtual address space to receive transactions.
//分配虚拟地址空间,完成数据wirte/red,内存的memcpy等操作就相当于write/read(mDriverFD)
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
if (mVMStart == MAP_FAILED) {
// *sigh*
close(mDriverFD);
mDriverFD = -1;
}
}
}
1.1.1.2 open_driver()
open_driver的作用就是打开/dev/binder这个设备,它是android在内核中专门用于完成进程间通信而设置的一个虚拟设备.
frameworks/native/libs/binder/ProcessState.cpp
static int open_driver()
{
int fd = open("/dev/binder", O_RDWR | O_CLOEXEC); //打开 /dev/binder
if (fd >= 0) {
int vers = 0;
//通过ioctl通知binder驱动binder版本
status_t result = ioctl(fd, BINDER_VERSION, &vers);
if (result == -1) {
ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
close(fd);
fd = -1;
}
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
ALOGE("Binder driver protocol does not match user space protocol!");
close(fd);
fd = -1;
}
//设置当前fd最多支持DEFAULT_MAX_BINDER_THREADS线程数量
size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
if (result == -1) {
ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
}
} else {
ALOGW("Opening '/dev/binder' failed: %s\n", strerror(errno));
}
return fd;
}
1.1.1.3 总结
- 打开
/dev/binder
设备,这就相当于与内核的Binder
驱动有了交互的通道。 - 对返回的
fd
使用mmap
,这样Binder
驱动就会分配一块内存来接收数据。 - 由于
ProcessState
的唯一性,因此一个进程只打开设备一次。
1.1.2 defaultServiceManager()
frameworks/native/libs/binder/IServiceManager.cpp
sp<IServiceManager> defaultServiceManager()
{ //单例
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
{
AutoMutex _l(gDefaultServiceManagerLock);
while (gDefaultServiceManager == NULL) {
//创建对象 ps:c/c++中 NULL == 0
gDefaultServiceManager = interface_cast<IServiceManager>(
ProcessState::self()->getContextObject(NULL)); //[#1.1.2.1]
//[#1.1.2.4] interface_cast<IServiceManager>
if (gDefaultServiceManager == NULL)
sleep(1);
}
}
return gDefaultServiceManager;
}
IServiceManager
对象时通过ProcessState
获得的。
1.1.2.1 getContextObject()
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
return getStrongProxyForHandle(0);
}
1.1.2.2 getStrongProxyForHandle()
getStrongProxyForHandle
这个函数调用参数的名字叫handle,handle的值正是该资源项在集合中的索引.
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
//根据索引查找对应资源。如果lookupHandleLocked发现没有对应的资源项,则会创建一个新的项并返回。
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
// We need to create a new BpBinder if there isn't currently one, OR we
// are unable to acquire a weak reference on this current one. See comment
// in getWeakProxyForHandle() for more info about this.
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
// Special case for context manager...
// The context manager is the only object for which we create
// a BpBinder proxy without already holding a reference.
// Perform a dummy transaction to ensure the context manager
// is registered before we create the first local reference
// to it (which will occur when creating the BpBinder).
// If a local reference is created for the BpBinder when the
// context manager is not present, the driver will fail to
// provide a reference to the context manager, but the
// driver API does not return status.
//
// Note that this is not race-free if the context manager
// dies while this code runs.
//
// TODO: add a driver API to wait for context manager, or
// stop special casing handle 0 for context manager and add
// a driver API to get a handle to the context manager with
// proper reference counting.
//确保在获得servicemanager的时候它已经构造好了,或者还活着,否则返回NULL
//大概应该是这个意思
Parcel data;
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);
if (status == DEAD_OBJECT)
return NULL;
}
b = new BpBinder(handle); //创建一个BpBinder
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
// This little bit of nastyness is to allow us to add a primary
// reference to the remote proxy when this team doesn't have one
// but another team is sending the handle to us.
result.force_set(b);
e->refs->decWeak(this);
}
}
return result; //返回BpBinder
}
1.1.2.3 BpBinder
frameworks/native/libs/binder/BpBinder
BpBinder::BpBinder(int32_t handle)
: mHandle(handle)
, mAlive(1)
, mObitsSent(0)
, mObituaries(NULL)
{
ALOGV("Creating BpBinder %p handle %d\n", this, mHandle);
//见过,常见类那一章,设置生命周期的Flag
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
//
IPCThreadState::self()->incWeakHandle(handle);
}
1.1.2.4 interface_cast
在defaultServiceManager()
函数中通过ProcessState
的getContextObject()
返回了一个BpBinder(0)
,最后通过interface_cast<IServiceManager>()
转换成了一个IServiceManager
,神奇,快打开源码。
frameworks/native/include/binder/IInterface.h
template<typename INTERFACE>
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)
{
return INTERFACE::asInterface(obj);
}
那么INTERFACE::asInterface(obj)
就等于是sp<IServiceManager>interface_cast(obj)
1.1.3 IServiceManager
frameworks/native/include/binder/IServiceManager.h
class IServiceManager : public IInterface
{
public:
//坑爹的宏
DECLARE_META_INTERFACE(ServiceManager)
/**
* Retrieve an existing service, blocking for a few seconds
* if it doesn't yet exist.
*/
virtual sp<IBinder> getService( const String16& name) const = 0;
/**
* Retrieve an existing service, non-blocking.
*/
virtual sp<IBinder> checkService( const String16& name) const = 0;
/**
* Register a service.
*/
virtual status_t addService( const String16& name,
const sp<IBinder>& service,
bool allowIsolated = false) = 0;
/**
* Return list of all existing services.
*/
virtual Vector<String16> listServices() = 0;
enum {
GET_SERVICE_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
CHECK_SERVICE_TRANSACTION,
ADD_SERVICE_TRANSACTION,
LIST_SERVICES_TRANSACTION,
};
};
是不是找不到asInterface()
,坑爹,往下看。
1.1.3.1 DECLARE_META_INTERFACE
Android巧妙地通过DECLARE_META_INTERFACE
和IMPLEMENT_META_INTERFACE
宏,将业务和通信牢牢地钩在了一起。DECLARE_META_INTERFACE
和IMPLEMENT_META_INTERFACE
这两个宏都定义在刚才的IInterface.h
frameworks/native/include/binder/IInterface.h
#define DECLARE_META_INTERFACE(INTERFACE) \
static const android::String16 descriptor; \
static android::sp<I##INTERFACE> asInterface( \
const android::sp<android::IBinder>& obj); \
virtual const android::String16& getInterfaceDescriptor() const; \
I##INTERFACE(); \
virtual ~I##INTERFACE(); \
进行宏替换
static const android::String16 descriptor;
static android::sp<IServiceManager>
asInterface(constandroid::sp<android::IBinder>& obj)
virtual const android::String16&getInterfaceDescriptor() const;
IServiceManager ();
virtual ~IServiceManager();
DECLARE_META_INTERFACE
是进行函数声明。实现就是IMPLEMENT_META_INTERFACE
这个宏
1.1.3.2 IMPLEMENT_META_INTERFACE
#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME) \
const android::String16 I##INTERFACE::descriptor(NAME); \
const android::String16& \
I##INTERFACE::getInterfaceDescriptor() const { \
return I##INTERFACE::descriptor; \
} \
android::sp<I##INTERFACE> I##INTERFACE::asInterface( \
const android::sp<android::IBinder>& obj) \
{ \
android::sp<I##INTERFACE> intr; \
if (obj != NULL) { \
intr = static_cast<I##INTERFACE*>( \
obj->queryLocalInterface( \
I##INTERFACE::descriptor).get()); \
if (intr == NULL) { \
intr = new Bp##INTERFACE(obj); \
} \
} \
return intr; \
} \
I##INTERFACE::I##INTERFACE() { } \
I##INTERFACE::~I##INTERFACE() { } \
进行宏替换
const android::String16 IServiceManager::descriptor(NAME);
const android::String16&
IServiceManager::getInterfaceDescriptor() const {
return IServiceManager::descriptor;
}
android::sp<IServiceManager> IServiceManager::asInterface(
const android::sp<android::IBinder>& obj){
android::sp<IServiceManager> intr;
if (obj != NULL) {
intr = static_cast<IServiceManager>(
obj->queryLocalInterface(
IServiceManager::descriptor).get());
if (intr == NULL) {
intr = new BpServiceManager(obj);
}
}
return intr;
}
IServiceManager::IServiceManager () { }
IServiceManager::~IServiceManager() { }
这下终于找到了。这个模板代码写的,厉害。
interface_cast
不是指针的转换,而是利用BpBinder对象作为参数新建了一个BpServiceManager
对象。又搞出一个新对象,BpBinder
,IServiceManager
还没搞定完
1.1.3.3 IServiceManager家族
- IServiceManager、BpServiceManager和BnServiceManager都与业务逻辑相关
- BnServiceManager同时从BBinder派生,表示它可以直接参与Binder通信。
- BpServiceManager虽然从BpInterface中派生,但是这条分支似乎与BpBinder没有关系。
- BnServiceManager是一个虚类,它的业务函数最终需要子类来实现。
frameworks/native/libs/binder/IServiceManager.cpp
explicit BpServiceManager(const sp<IBinder>& impl)
//基类构造
: BpInterface<IServiceManager>(impl)
{
}
frameworks/native/include/binder/IInterface.h
template<typename INTERFACE>
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
//
: BpRefBase(remote)
{
}
frameworks/native/libs/binder/Binder.cpp
BpRefBase::BpRefBase(const sp<IBinder>& o)
//mRemote = new BpBinder(0)
: mRemote(o.get()), mRefs(NULL), mState(0)
{
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
if (mRemote) {
mRemote->incStrong(this); // Removed on first IncStrong().
mRefs = mRemote->createWeak(this); // Held for our entire lifetime.
}
}
1.1.4 总结
经过一系列的转换最终得到
一个BpBinder
对象,handle
值为0,也就是和它通信的服务端就是我们需要的ServiceManager
一个BpServiceManager
对象,它的mRemote
值为BpBinder
BpServiceManager
对象实现了IServiceManager
的业务函数,BpBinder
作为通信。
1.2 注册MediaPlayerService
在MediaServer
入口函数中,得到IServiceManager
对象后,执行了MediaPlayerService::instantiate()
来初始化MediaPlayerService
frameworks/av/media/libmediaplayerservice/MediaPlayService.cpp
void MediaPlayerService::instantiate() {
defaultServiceManager()->addService(
String16("media.player"), new MediaPlayerService());
}
defaultServiceManager()
返回的是一个BpServiceManger
,它是IServiceManager
的子类
virtual status_t addService(const String16& name, const sp<IBinder>& service,
bool allowIsolated)
{
//data 传输的数据; reply 回传数据
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
data.writeInt32(allowIsolated ? 1 : 0);
//BpBinder::transact()
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
BpServiceManager
的addService()
是一个业务层函数,负责把数据打包到data
中。
BpBinder
的transcat()
负责通信,把打包好的数据发送出去,并处理返回数据replay
1.2.1 通信工作
通过上面代码只知道,通信是通过BpBinder
的transcat()
完成的
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
// Once a binder has died, it will never come back to life.
if (mAlive) {
//mHandle就是要通信的目标
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
实际工作完成工作的是IPCThreadState::transact()
1.2.1.1 IPCThreadState
frameworks/native/libs/binder/IPCThreadState.cpp
IPCThreadState* IPCThreadState::self()
{
if (gHaveTLS) {
restart:
const pthread_key_t k = gTLS;
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
return new IPCThreadState;
}
if (gShutdown) {
ALOGW("Calling IPCThreadState::self() during shutdown is dangerous, expect a crash.\n");
return NULL;
}
pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS) {
int key_create_value = pthread_key_create(&gTLS, threadDestructor);
if (key_create_value != 0) {
pthread_mutex_unlock(&gTLSMutex);
ALOGW("IPCThreadState::self() unable to create TLS key, expect a crash: %s\n",
strerror(key_create_value));
return NULL;
}
gHaveTLS = true;
}
pthread_mutex_unlock(&gTLSMutex);
goto restart;
}
上面来代码可以简单理解问,取出当前线程本地空间中存放的IPCThreadState
对象,如果没有new
一个,并放入线程本地空间,返回new
来的对象
frameworks/native/libs/binder/IPCThreadState.cpp
IPCThreadState::IPCThreadState()
: mProcess(ProcessState::self()),
mMyThreadId(gettid()),
mStrictModePolicy(0),
mLastTransactionBinderFlags(0)
{
//把自己设置到线程本地空间中
pthread_setspecific(gTLS, this);
clearCaller();
//两个Parcel,用于发送接收数据
mIn.setDataCapacity(256);
mOut.setDataCapacity(256);
}
1.2.1.2 实际的通信的transact()
frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
...//略
if (err == NO_ERROR) {
LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
(flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
//BC_TRANSACTION,想binder设备发消息的消息吗,binder恢复消息的消息
//以BR_开头,消息嘛定义在binder_module.h中
//请求消息码和回应消息码的对应关系
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
if ((flags & TF_ONE_WAY) == 0) {
...//略
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
...//略
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
先发送数据,然后等待服务端返回结果
继续跟入writeTransactionData()
frameworks/native/libs/binder/IPCThreadState.cpp
tatus_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
//和binder通信的数据结构
binder_transaction_data tr;
tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof(status_t);
tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
tr.offsets_size = 0;
tr.data.ptr.offsets = 0;
} else {
return (mLastError = err);
}
//把命令和数据写在mOut中,但是没有发送数据
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
等待回复 waitForResponse()
frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
//talkWithDriver,通信了?
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
...//略
switch (cmd) {
//BR_ binder回复的消息
case BR_TRANSACTION_COMPLETE:
if (!reply && !acquireResult) goto finish;
break;
...//略 很多种回复码
default:
//
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
}
return err;
}
收到了回复码,以当前分析的注册服务来说,就应该执行到executeCommand()
上面的时候提到过
请求消息码和回应消息码是对应关系。所我们要找到BR_TRANSACTION
frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
case BR_ERROR:
result = mIn.readInt32();
break;
...//略
case BR_TRANSACTION:
{
binder_transaction_data tr;
result = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(result == NO_ERROR,
"Not enough command data for brTRANSACTION");
if (result != NO_ERROR) break;
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
const pid_t origPid = mCallingPid;
const uid_t origUid = mCallingUid;
const int32_t origStrictModePolicy = mStrictModePolicy;
const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
mLastTransactionBinderFlags = tr.flags;
int curPrio = getpriority(PRIO_PROCESS, mMyThreadId);
if (gDisableBackgroundScheduling) {
if (curPrio > ANDROID_PRIORITY_NORMAL) {
// We have inherited a reduced priority from the caller, but do not
// want to run in that state in this process. The driver set our
// priority already (though not our scheduling class), so bounce
// it back to the default before invoking the transaction.
setpriority(PRIO_PROCESS, mMyThreadId, ANDROID_PRIORITY_NORMAL);
}
} else {
if (curPrio >= ANDROID_PRIORITY_BACKGROUND) {
// We want to use the inherited priority from the caller.
// Ensure this thread is in the background scheduling class,
// since the driver won't modify scheduling classes for us.
// The scheduling group is reset to default by the caller
// once this method returns after the transaction is complete.
set_sched_policy(mMyThreadId, SP_BACKGROUND);
}
}
//ALOGI(">>>> TRANSACT from pid %d uid %d\n", mCallingPid, mCallingUid);
Parcel reply;
status_t error;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
}
if (tr.target.ptr) {
// We only have a weak reference on the target object, so we must first try to
// safely acquire a strong reference before doing anything else with it.
if (reinterpret_cast<RefBase::weakref_type*>(
tr.target.ptr)->attemptIncStrong(this)) {
error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,
&reply, tr.flags);
reinterpret_cast<BBinder*>(tr.cookie)->decStrong(this);
} else {
error = UNKNOWN_TRANSACTION;
}
} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
if ((tr.flags & TF_ONE_WAY) == 0) {
if (error < NO_ERROR) reply.setError(error);
sendReply(reply, 0);
} else {
}
mCallingPid = origPid;
mCallingUid = origUid;
mStrictModePolicy = origStrictModePolicy;
mLastTransactionBinderFlags = origTransactionBinderFlags;
}
}
break;
case BR_DEAD_BINDER:
{
//收到binder发来的service死掉的消息
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->sendObituary();
mOut.writeInt32(BC_DEAD_BINDER_DONE);
mOut.writePointer((uintptr_t)proxy);
} break;
case BR_CLEAR_DEATH_NOTIFICATION_DONE:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->getWeakRefs()->decWeak(proxy);
} break;
case BR_FINISHED:
result = TIMED_OUT;
break;
case BR_NOOP:
break;
case BR_SPAWN_LOOPER:
//收到驱动的指示,可以创建一个新线程,用于和binder通信
mProcess->spawnPooledThread(false);
break;
default:
printf("*** BAD COMMAND %d received from Binder driver\n", cmd);
result = UNKNOWN_ERROR;
break;
}
if (result != NO_ERROR) {
mLastError = result;
}
return result;
}
1.2.1.3 talkWithDriver()
frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
#if defined(__ANDROID__)
//用ioctl方式传递数据
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
} while (err == -EINTR);
if (err >= NO_ERROR) {
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else
mOut.setDataSize(0);
}
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
return NO_ERROR;
}
return err;
}
通过ioctl与mDriverFD通信,是真正与Binder驱动进行数据读写交互的过程。 主要是操作mOut和mIn变量。
1.3 后续处理
1.3.1 startThreadPool()
如果注册完service
,如果函数也就进入到了ProcessState::self()->startThreadPool();
frameworks/native/libs/binder/IPCThreadState.cpp
void ProcessState::startThreadPool()
{
AutoMutex _l(mLock);
if (!mThreadPoolStarted) {
mThreadPoolStarted = true;
spawnPooledThread(true);
}
}
没什么实际作用,调用了spawnPooledThread(true)
frameworks/native/libs/binder/IPCThreadState.cpp
void ProcessState::spawnPooledThread(bool isMain)
{
if (mThreadPoolStarted) {
String8 name = makeBinderThreadName();
//创建了一个Thread对象
sp<Thread> t = new PoolThread(isMain); // true
t->run(name.string());
}
}
PoolThread
是ProcessState
的内部类,集成自Thread
class PoolThread : public Thread
{
public:
explicit PoolThread(bool isMain)
: mIsMain(isMain){}
protected:
virtual bool threadLoop()
{
//在这个新的线程中创建了一个IPCThreadState对象
//还加调用了joinThreadPool mIsMain = true
IPCThreadState::self()->joinThreadPool(mIsMain);
return false;
}
const bool mIsMain;
};
1.3.2 joinThreadPool()
新创建的线程调用了这个函数,当前把isMain
为true
void IPCThreadState::joinThreadPool(bool isMain)
{
(void*)pthread_self(), getpid());
//写入请求只,有对应返回值 BR_ENTER_LOOPER
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
// This thread may have been spawned by a thread that was in the background
// scheduling group, so first we will make sure it is in the foreground
// one to avoid performing an initial transaction in the background.
set_sched_policy(mMyThreadId, SP_FOREGROUND); //设置前台调度策略
status_t result;
do {
processPendingDerefs(); //清除队列的引用
// now get the next command to be processed, waiting if necessary
result = getAndExecuteCommand(); //与binder通信,并处理返回
if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
mProcess->mDriverFD, result);
abort();
}
// Let this thread exit the thread pool if it is no longer
// needed and it is not the main process thread.
if(result == TIMED_OUT && !isMain) {
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);
(void*)pthread_self(), getpid(), (void*)result);
mOut.writeInt32(BC_EXIT_LOOPER); // 线程退出循环
talkWithDriver(false);
}
现在已知至少有两个线程在和Binder
通信,一个是通过startThreadPool()
创建的新的线程
mediaServer
进程本身也调用了joinThreadPool
来不断的和Binder交流信息
1.4 总结
Binder
是通信机制
业务可以基于Binder
通信,也可以使用其他的IPC
方式
通过BBinder
,BpBinder
,binder设备
,构成了IPC通信
2 ServiceManager
通过上面的分析,可以得知,defaultServiceManager()
返回的是一个BpServiceManager
,通过BpBinder
向binder
设备通信,通信的目标是handle = 0
2.1 ServiceManager的入口函数
frameworks/native/cmds/servicemanager/Service_manager.c
int main() {
struct binder_state *bs;
//打开binder设备,设置内存大小为128k [#2.1.1]
bs = binder_open(128*1024);
if (!bs) {
return -1;
}
//成为context mananger [#2.1.2]
if (binder_become_context_manager(bs)) {
return -1;
}
/* SELinux相关权限操作 */
selinux_enabled = is_selinux_enabled();
...//略
//loop,处理client端的请求[#2.1.3]
binder_loop(bs, svcmgr_handler);
return 0;
}
2.1.1 打开Binder设备 (binder_open)
binder_open()
函数用于打开binder
设备
frameworks/native/cmds/servicemanager/Binder.c
struct binder_state *binder_open(size_t mapsize) {
struct binder_state *bs;
struct binder_version vers;
//分配内存
bs = malloc(sizeof(*bs));
...//略
bs->fd = open("/dev/binder", O_RDWR | O_CLOEXEC);
...//略
//获得binder的version信息
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
fprintf(stderr,
"binder: kernel driver version (%d) differs from user space version (%d)\n",
vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
goto fail_open;
}
bs->mapsize = mapsize;
//内存映射
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
goto fail_map;
}
return bs;
fail_map:
close(bs->fd);
fail_open:
free(bs);
return NULL;
}
这里的binder_open()
和ProcessState
中的是一致的
- 打开设备
- 内存映射
只是少了分配内存。
2.1.2 BecomeContextManager
frameworks/native/cmds/servicemanager/Binder.c
int binder_become_context_manager(struct binder_state *bs) {
//系统调用,传递指令为BINDER_SET_CONTEXT_MGR,设置handle为0
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
2.1.3 binder_loop
frameworks/native/cmds/servicemanager/Binder.c
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
//将BC_ENTER_LOOPER命令发送给binder驱动
binder_write(bs, readbuf, sizeof(uint32_t));
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
//信息交换
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
break;
}
//处理信息,最终会调用func来处理这些请求[#2.1.4]
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
...//略
}
}
2.1.4 svcmgr_handler
frameworks/native/cmds/servicemanager/Service_manager.c
处理请求
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
//比较target是不是自己
//BINDER_SERVICE_MANAGER 0U
if (txn->target.ptr != BINDER_SERVICE_MANAGER)
return -1;
if (txn->code == PING_TRANSACTION)
return 0;
// Equivalent to Parcel::enforceInterface(), reading the RPC
// header with the strict mode policy mask and the interface name.
// Note that we ignore the strict_policy and don't propagate it
// further (since we do no outbound RPCs anyway).
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
////svcmgr_id是由“android.os.IServiceManager”字符组成的。svcmgr_id与s的内存块的内容是否一致。
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
return -1;
}
if (sehandle && selinux_status_updated() > 0) {
struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
if (tmp_sehandle) {
selabel_close(sehandle);
sehandle = tmp_sehandle;
}
}
//请求
switch(txn->code) {
case SVC_MGR_GET_SERVICE://对应getService
case SVC_MGR_CHECK_SERVICE://对应checkService
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
//更具名称查找服务
handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;
case SVC_MGR_ADD_SERVICE: //addService
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
//add
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
case SVC_MGR_LIST_SERVICES: { //listService
uint32_t n = bio_get_uint32(msg);
if (!svc_can_list(txn->sender_pid, txn->sender_euid)) {
ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
txn->sender_euid);
return -1;
}
si = svclist;
while ((n-- > 0) && si)
si = si->next;
if (si) {
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
default:
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}
不同的code
对应了不同的请求。
2.2 服务注册
上面再svcmgr_handler
中看到个serviceManager
中的各个业务函数。详细分析do_add_service
frameworks/native/cmds/servicemanager/Service_manager.c
int do_add_service(struct binder_state *bs,
const uint16_t *s, size_t len,
uint32_t handle, uid_t uid, int allow_isolated,
pid_t spid)
{
struct svcinfo *si;
//ALOGI("add_service('%s',%x,%s) uid=%d\n", str8(s, len), handle,
// allow_isolated ? "allow_isolated" : "!allow_isolated", uid);
if (!handle || (len == 0) || (len > 127))
return -1;
//查看是否有权限注册服务
if (!svc_can_register(s, len, spid, uid)) {
ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n",
str8(s, len), handle, uid);
return -1;
}
//服务检索
si = find_svc(s, len);
if (si) {
//服务已经注册
if (si->handle) {
ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
str8(s, len), handle, uid);
svcinfo_death(bs, si);
}
si->handle = handle;
} else {
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
//内存不足
if (!si) {
ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n",
str8(s, len), handle, uid);
return -1;
}
si->handle = handle;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '\0';
si->death.func = (void*) svcinfo_death;//service退出的通知函数
si->death.ptr = si;
si->allow_isolated = allow_isolated;
si->next = svclist;//加到svclist中
svclist = si;
}
//以BC_ACQUIRE命令,handle为目标的信息,通过ioctl发送给binder驱动
binder_acquire(bs, handle);
//service进程退出后,ServiceManger能得到Binder设备的通知
binder_link_to_death(bs, handle, &si->death);
return 0;
}
2.2.1 svc_can_register
frameworks/native/cmds/servicemanager/Service_manager.c
static int svc_can_register(const uint16_t *name, size_t name_len, pid_t spid, uid_t uid)
{
const char *perm = "add";
if (multiuser_get_app_id(uid) >= AID_APP) {
return 0; /* Don't allow apps to register services */
}
return check_mac_perms_from_lookup(spid, uid, perm, str8(name, name_len)) ? 1 : 0;
}
2.3 ServiceManager存在的意义
- ServiceManger能集中管理系统内的所有服务,它能施加权限控制,并不是任何进程都能注册服务。
- ServiceManager支持通过字符串名称来查找对应的Service。这个功能很像DNS。
- 由于各种原因,Server进程可能生死无常。如果让每个Client都去检测,压力实在太大。现在有了统一的管理机构,Client只需要查询ServiceManager,就能把握动向,得到最新信息。
3 MediaPlayerService和它的Client
3.1 查询ServiceManager
现在我们很熟悉了,需要使用某个service
就需要和ServiceManager
打交道,通过getService()
来获得service
的信息。
示例分析:
const sp<IMediaPlayerService>
IMediaDeathNotifier::getMediaPlayerService(){
Mutex::Autolock _l(sServiceLock);
if (sMediaPlayerService == 0) {
//得到servicemanager的代理
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder;
do {
//向ServiceManager查询对应服务的信息,返回BpBinder。
binder = sm->getService(String16("media.player"));
if (binder != 0) {
break;
}
usleep(500000); // 0.5 s
} while (true);
if (sDeathNotifier == NULL) {
sDeathNotifier = new DeathNotifier();
}
binder->linkToDeath(sDeathNotifier);
//将BpBinder转换为BpMediaPlayService
sMediaPlayerService = interface_cast<IMediaPlayerService>(binder);
}
return sMediaPlayerService;
}
从上面代码可知,如果服务还没注册,就需要一致等待服务注册为止。
3.2 service的处理
MediaPlayService
是驻留在MediaServer
进程中,从前面分析mediaServer
时可知,有两个线程在和binder
通信,一个是进程本,一个是新开的线程,在读取到binder
的信息是,都交给executeCommand()
函数处理信息。再来分析这个函数。
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
case BR_ERROR:
result = mIn.readInt32();
break;
case BR_OK:
break;
case BR_ACQUIRE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
ALOG_ASSERT(refs->refBase() == obj,
"BR_ACQUIRE: object %p does not match cookie %p (expected %p)",
refs, obj, refs->refBase());
obj->incStrong(mProcess.get());
IF_LOG_REMOTEREFS() {
LOG_REMOTEREFS("BR_ACQUIRE from driver on %p", obj);
obj->printRefs();
}
mOut.writeInt32(BC_ACQUIRE_DONE);
mOut.writePointer((uintptr_t)refs);
mOut.writePointer((uintptr_t)obj);
break;
...//略
case BR_TRANSACTION:
{
binder_transaction_data tr;
result = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(result == NO_ERROR,
"Not enough command data for brTRANSACTION");
if (result != NO_ERROR) break;
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
const pid_t origPid = mCallingPid;
const uid_t origUid = mCallingUid;
const int32_t origStrictModePolicy = mStrictModePolicy;
const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
mLastTransactionBinderFlags = tr.flags;
int curPrio = getpriority(PRIO_PROCESS, mMyThreadId);
if (gDisableBackgroundScheduling) {
if (curPrio > ANDROID_PRIORITY_NORMAL) {
// We have inherited a reduced priority from the caller, but do not
// want to run in that state in this process. The driver set our
// priority already (though not our scheduling class), so bounce
// it back to the default before invoking the transaction.
setpriority(PRIO_PROCESS, mMyThreadId, ANDROID_PRIORITY_NORMAL);
}
} else {
if (curPrio >= ANDROID_PRIORITY_BACKGROUND) {
// We want to use the inherited priority from the caller.
// Ensure this thread is in the background scheduling class,
// since the driver won't modify scheduling classes for us.
// The scheduling group is reset to default by the caller
// once this method returns after the transaction is complete.
set_sched_policy(mMyThreadId, SP_BACKGROUND);
}
}
//ALOGI(">>>> TRANSACT from pid %d uid %d\n", mCallingPid, mCallingUid);
Parcel reply;
status_t error;
if (tr.target.ptr) {
// We only have a weak reference on the target object, so we must first try to
// safely acquire a strong reference before doing anything else with it.
if (reinterpret_cast<RefBase::weakref_type*>(
tr.target.ptr)->attemptIncStrong(this)) {
//BBinder
error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,
&reply, tr.flags);
reinterpret_cast<BBinder*>(tr.cookie)->decStrong(this);
} else {
error = UNKNOWN_TRANSACTION;
}
} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
//ALOGI("<<<< TRANSACT from pid %d restore pid %d uid %d\n",
// mCallingPid, origPid, origUid);
if ((tr.flags & TF_ONE_WAY) == 0) {
LOG_ONEWAY("Sending reply to %d!", mCallingPid);
if (error < NO_ERROR) reply.setError(error);
sendReply(reply, 0);
} else {
LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
}
mCallingPid = origPid;
mCallingUid = origUid;
mStrictModePolicy = origStrictModePolicy;
mLastTransactionBinderFlags = origTransactionBinderFlags;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BC_REPLY thr " << (void*)pthread_self() << " / obj "
<< tr.target.ptr << ": " << indent << reply << dedent << endl;
}
}
break;
...//
if (result != NO_ERROR) {
mLastError = result;
}
return result;
}
上面的函数调用了BBinder
的transac()
函数,这个函函数内部调用了onTransac()
是个虚函数。由之前的,具体实现就是在IMediaPlayerService.cpp