通过上面的代码我们知道service manager的核心服务主要有4个
- do_add_service()函数:注册服务
- do_find_service()函数:查找服务
- binder_link_to_death()函数:结束服务
- binder_send_reply()函数:将注册结果返回给Binder驱动
1 do_add_service()函数 注册服务
//service_manager.c 194行
int do_add_service(struct binder_state *bs,
const uint16_t *s, size_t len,
uint32_t handle, uid_t uid, int allow_isolated,
pid_t spid)
{
struct svcinfo *si;
if (!handle || (len == 0) || (len > 127))
return -1;
//权限检查
if (!svc_can_register(s, len, spid)) {
return -1;
}
//服务检索
si = find_svc(s, len);
if (si) {
if (si->handle) {
svcinfo_death(bs, si); //服务已注册时,释放相应的服务
}
si->handle = handle;
} else {
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) {
//内存不足,无法分配足够内存
return -1;
}
si->handle = handle;
si->len = len;
//内存拷贝服务信息
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '\0';
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
// svclist保存所有已注册的服务
si->next = svclist;
svclist = si;
}
//以BC_ACQUIRE命令,handle为目标的信息,通过ioctl发送给binder驱动
binder_acquire(bs, handle);
//以BC_REQUEST_DEATH_NOTIFICATION命令的信息,通过ioctl发送给binder驱动,主要用于清理内存等收尾工作。
binder_link_to_death(bs, handle, &si->death);
return 0;
}
注册服务部分主要分块内容:
- svc_can_register:检查权限:检查selinux权限是否满足
- find_svc:服务检索,根据服务名来查询匹配的服务;
- svcinfo_death:释放服务,当查询到已存在的同名的服务,则先清理该服务信息,再讲当前的服务加入到服务列表svclist;
1.1 svc_can_register()函数
//service_manager.c 110行
static int svc_can_register(const uint16_t *name, size_t name_len, pid_t spid)
{
const char *perm = "add";
//检查selinux权限是否满足
return check_mac_perms_from_lookup(spid, perm, str8(name, name_len)) ? 1 : 0;
}
1.2 svcinfo_death()函数
//service_manager.c 153行
void svcinfo_death(struct binder_state *bs, void *ptr)
{
struct svcinfo *si = (struct svcinfo* ) ptr;
if (si->handle) {
binder_release(bs, si->handle);
si->handle = 0;
}
}
3 bio_get_ref()函数
// framework/native/cmds/servicemanager/binder.c 627行
uint32_t bio_get_ref(struct binder_io *bio)
{
struct flat_binder_object *obj;
obj = _bio_get_obj(bio);
if (!obj)
return 0;
if (obj->type == BINDER_TYPE_HANDLE)
return obj->handle;
return 0;
}
2 do_find_service() 查找服务
//service_manager.c 170行
uint32_t do_find_service(struct binder_state *bs, const uint16_t *s, size_t len, uid_t uid, pid_t spid)
{
//具体查询相应的服务
struct svcinfo *si = find_svc(s, len);
if (!si || !si->handle) {
return 0;
}
if (!si->allow_isolated) {
uid_t appid = uid % AID_USER;
//检查该服务是否允许孤立于进程而单独存在
if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) {
return 0;
}
}
//服务是否满足于查询条件
if (!svc_can_find(s, len, spid)) {
return 0;
}
/返回结点中的ptr,这个ptr是binder中对应的binder_ref.des
return si->handle;
}
主要就是查询目标服务,并返回该服务所对应的handle
2.1 find_svc()函数
//service_manager.c 140行
struct svcinfo *find_svc(const uint16_t *s16, size_t len)
{
struct svcinfo *si;
for (si = svclist; si; si = si->next) {
//当名字完全一致,则返回查询到的结果
if ((len == si->len) &&
!memcmp(s16, si->name, len * sizeof(uint16_t))) {
return si;
}
}
return NULL;
}
在svclist服务列表中,根据服务名遍历查找是否已经注册。当服务已经存在svclist,则返回相应的服务名,否则返回null。
在svcmgr_handler中当执行完do_find_service()函数后,会调用bio_put_ref()函数,将handle封装到reply.
bio_put_ref()函数
// framework/native/cmds/servicemanager/binder.c 505行
void bio_put_ref(struct binder_io *bio, uint32_t handle)
{
//构造了一个flat_binder_object
struct flat_binder_object *obj;
if (handle)
obj = bio_alloc_obj(bio);
else
obj = bio_alloc(bio, sizeof(*obj));
if (!obj)
return;
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
obj->type = BINDER_TYPE_HANDLE; //返回的是HANDLE类型
//以service manager的身份回应给kernel driver,ptr就是handler对应的ref索引值 1,2,3,4,5,6等
obj->handle = handle;
obj->cookie = 0;
}
这个段代码也不复杂,就是根据handle来判断分别执行bio_alloc_obj()函数和bio_alloc()函数
bio_alloc_obj()函数
// framework/native/cmds/servicemanager/binder.c 468 行
static struct flat_binder_object *bio_alloc_obj(struct binder_io *bio)
{
struct flat_binder_object *obj;
obj = bio_alloc(bio, sizeof(*obj));
if (obj && bio->offs_avail) {
bio->offs_avail--;
*bio->offs++ = ((char*) obj) - ((char*) bio->data0);
return obj;
}
bio->flags |= BIO_F_OVERFLOW;
return NULL;
}
bio_alloc()函数
// framework/native/cmds/servicemanager/binder.c 437 行
static void *bio_alloc(struct binder_io *bio, size_t size)
{
size = (size + 3) & (~3);
if (size > bio->data_avail) {
bio->flags |= BIO_F_OVERFLOW;
return NULL;
} else {
void *ptr = bio->data;
bio->data += size;
bio->data_avail -= size;
return ptr;
}
}
3 binder_link_to_death() 函数
// framework/native/cmds/servicemanager/binder.c 305行
void binder_link_to_death(struct binder_state *bs, uint32_t target, struct binder_death *death)
{
struct {
uint32_t cmd;
struct binder_handle_cookie payload;
} __attribute__((packed)) data;
data.cmd = BC_REQUEST_DEATH_NOTIFICATION;
data.payload.handle = target;
data.payload.cookie = (uintptr_t) death;
binder_write(bs, &data, sizeof(data));
}
binder_write和前面的binder_write一样,进入Binder driver后,直接调用binder_thread_write,处理BC_REQUEST_DEATH_NOTIFICATION命令。其中binder_ioctl_write_read()函数,前面面已经讲解过了。这里就不详细讲解了
3.1 binder_thread_write() 函数
//kernel/drivers/android/binder.c 2248行
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
struct binder_context *context = proc->context;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
//获取命令
get_user(cmd, (uint32_t __user *)ptr);
switch (cmd) {
//**** 省略部分代码 ****
// 注册死亡通知
case BC_REQUEST_DEATH_NOTIFICATION:
case BC_CLEAR_DEATH_NOTIFICATION: {
uint32_t target;
void __user *cookie;
struct binder_ref *ref;
struct binder_ref_death *death;
//获取taget
get_user(target, (uint32_t __user *)ptr);
ptr += sizeof(uint32_t);
/获取death
get_user(cookie, (void __user * __user *)ptr); /
ptr += sizeof(void *);
//拿到目标服务的binder_ref
ref = binder_get_ref(proc, target);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
//已设死亡通知
if (ref->death) {
break;
}
death = kzalloc(sizeof(*death), GFP_KERNEL);
INIT_LIST_HEAD(&death->work.entry);
death->cookie = cookie;
ref->death = death;
//当目标服务所在进程已死,则发送死亡通知
if (ref->node->proc == NULL) {
//当前线程为binder线程,则直接添加到当前线程的TODO队列
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
list_add_tail(&ref->death->work.entry, &thread->todo);
} else {
list_add_tail(&ref->death->work.entry, &proc->todo);
wake_up_interruptible(&proc->wait);
}
}
} else {
...
}
} break;
//**** 省略部分代码 ****
}
//**** 省略部分代码 ****
return 0;
}
- 此方法中的proc,thread都是指当前的servicemanager进程信息,此时TODO队列有数据,则进入binder_thread_read。
- 那么问题来了,哪些场景会向队列增加BINDER_WORK_READ_BINDER事物?那边是当binder所在进程死亡后,会调用binder_realse方法,然后调用binder_node_release这个过程便会发出死亡通知的回调。
3.2 binder_thread_read() 函数
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
...
//只有当前线程todo队列为空,并且transaction_stack也为空,才会开始处于当前进程的事务
if (wait_for_proc_work) {
...
ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
} else {
...
ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
//加锁
binder_lock(__func__);
if (wait_for_proc_work)
//空闲的binder线程减1
proc->ready_threads--;
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
//从todo队列拿出前面放入的binder_work, 此时type为BINDER_WORK_DEAD_BINDER
if (!list_empty(&thread->todo)) {
w = list_first_entry(&thread->todo, struct binder_work,
entry);
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
w = list_first_entry(&proc->todo, struct binder_work,
entry);
}
switch (w->type) {
case BINDER_WORK_DEAD_BINDER: {
struct binder_ref_death *death;
uint32_t cmd;
death = container_of(w, struct binder_ref_death, work);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
...
else
//进入此分支
cmd = BR_DEAD_BINDER;
//拷贝用户空间
put_user(cmd, (uint32_t __user *)ptr);
ptr += sizeof(uint32_t);
//此处的cookie是前面传递的svcinfo_death
put_user(death->cookie, (binder_uintptr_t __user *)ptr);
ptr += sizeof(binder_uintptr_t);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
...
} else
list_move(&w->entry, &proc->delivered_death);
if (cmd == BR_DEAD_BINDER)
goto done;
} break;
}
}
...
return 0;
}
- 将命令BR_DEAD_BINDER写到用户空间,此处的cookie是前面传递的svcinfo_death。当binder_loop下一次执行binder_parse的过程便会处理该消息。
- binder_parse()函数和svcinfo_death()函数上面已经说明了,这里就不详细说明了。
3.3 binder_release() 函数
//frameworks/native/cmds/servicemanager/binder.c 297行
void binder_release(struct binder_state *bs, uint32_t target)
{
uint32_t cmd[2];
cmd[0] = BC_RELEASE;
cmd[1] = target;
binder_write(bs, cmd, sizeof(cmd));
}
向Binder Driver写入BC_RELEASE命令,最终进入Binder Driver后执行binder_dec_ref(ref,1) 来减少binder node的引用。
4 binder_send_reply() 函数 将注册结果返回给Binder驱动
//frameworks/native/cmds/servicemanager/binder.c 170行
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status) {
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data;
//free buffer命令
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
//replay命令
data.cmd_reply = BC_REPLY;
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
data.txn.flags = TF_STATUS_CODE;
data.txn.data_size = sizeof(int);
data.txn.offsets_size = 0;
data.txn.data.ptr.buffer = (uintptr_t) & status;
data.txn.data.ptr.offsets = 0;
} else {
data.txn.flags = 0;
data.txn.data_size = reply -> data - reply -> data0;
data.txn.offsets_size = ((char*)reply -> offs)-((char*)reply -> offs0);
data.txn.data.ptr.buffer = (uintptr_t) reply -> data0;
data.txn.data.ptr.offsets = (uintptr_t) reply -> offs0;
}
//向Binder驱动通信
binder_write(bs, & data, sizeof(data));
}
执行binder_parse方法,先调用svcmgr_handler()函数,然后再执行binder_send_reply过程,该过程会调用binder_write进入binder驱动后,将BC_FREE_BUFFER和BC_REPLY命令协议发送给Binder驱动,向Client端发送reply,其中data数据区中保存的是TYPE为HANDLE。