lowmemkiller 分析

来源:互联网 发布:决战武林进阶数据亲 编辑:程序博客网 时间:2024/05/01 23:55
Android是一个多任务系统,也就是说可以同时运行多个程序,这个大家应该很熟悉。一般来说,启动运行一个程序是有一定的时间开销的,因此为了加快运行速度,当你退出一个程序时,Android并不会立即杀掉它,这样下次再运行该程序时,可以很快的启动。随着系统中保留的程序越来越多,内存肯定会出现不足,lowmemorykiller就是在系统内存低于某值时,清除相关的程序,保障系统保持拥有一定数量的空闲内存。


lowmemorykiller 根据进程重要性和进程优先级来选择进程kill。


linux内核原始lowmemkiller实现比较简单,初始化的时候将lowmem_shrinker注册到
shrink_list中,然后在kernel kswapd中会周期执行lowmem_shrink函数,
这个函数中通过冒泡排序找到oom_adj大于等于min_adj的进程,如果有多个进程时进一步找到
占用内存更大的进程kill掉。


代码中设计两个数组:
kill进程时用来比较的adj值
static short lowmem_adj[6] = {
0,
1,
6,
12,
};
kill进程时空闲内存的警戒值,当系统可用内存少于某个警戒值时就kill掉比对应lowmem_adj大于等于的进程
static int lowmem_minfree[6] = {//以page数为单位
3 * 512, /* 6MB */
2 * 1024, /* 8MB */
4 * 1024, /* 16MB */
16 * 1024, /* 64MB */
};


具体实现代码可以找来linux标准实现。
高通针对这部分代码有一些修改,重点讲下高通平台上代码实现。
高通加入了内存压力管理,内存使用达到一定限度的时候,会调整kill时选择的oom_score_adj。
//初始化时除了注册了shrink_list,另外注册了vmpressure的notify函数,
//同样在内存回收时会计算当前vmpressure值,然后再lowmem_shrinker中调整oom_score_adj
static int __init lowmem_init(void)
{
register_shrinker(&lowmem_shrinker);
vmpressure_notifier_register(&lmk_vmpr_nb);
return 0;
}


/*
NR_FILE_PAGES=global_page_state(NR_FILE_PAGES) 来自 vmstat[NR_FILE_PAGES],表示所有的缓存页(page cache)的总和,它包括:
Cached
buffers
交换区缓存(swap cache)
other_file:buffer+cached 页面数
other_free:空闲内存页面数
*/


static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
struct task_struct *selected = NULL;
int rem = 0;
int tasksize;
int i;
int ret = 0;
short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
int minfree = 0;
int selected_tasksize = 0;
short selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free;
int other_file;
unsigned long nr_to_scan = sc->nr_to_scan;


if (nr_to_scan > 0) {
if (mutex_lock_interruptible(&scan_mutex) < 0)
return 0;
}


other_free = global_page_state(NR_FREE_PAGES);


if (global_page_state(NR_SHMEM) + total_swapcache_pages() <
global_page_state(NR_FILE_PAGES))
other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM) -
total_swapcache_pages();
else
other_file = 0; 


tune_lmk_param(&other_free, &other_file, sc);


if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
if (lowmem_minfree_size < array_size)
array_size = lowmem_minfree_size;
for (i = 0; i < array_size; i++) {
minfree = lowmem_minfree[i];
if (other_free < minfree && other_file < minfree) {
min_score_adj = lowmem_adj[i];
break;
}
}
if (nr_to_scan > 0) {
ret = adjust_minadj(&min_score_adj);
lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %hd\n",
nr_to_scan, sc->gfp_mask, other_free,
other_file, min_score_adj);
}
//rem LRU list 页面总数
rem = global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_ANON) +
global_page_state(NR_INACTIVE_FILE);
if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
    nr_to_scan, sc->gfp_mask, rem);


if (nr_to_scan > 0)
mutex_unlock(&scan_mutex);


if ((min_score_adj == OOM_SCORE_ADJ_MAX + 1) &&
(nr_to_scan > 0))
trace_almk_shrink(0, ret, other_free, other_file, 0);


return rem;
}
selected_oom_score_adj = min_score_adj;


rcu_read_lock();
for_each_process(tsk) {
struct task_struct *p;
short oom_score_adj;


if (tsk->flags & PF_KTHREAD)//kernel thread 不能kill
continue;


/* if task no longer has any memory ignore it */
//task mm_struct结构被释放,task不再占用内存
if (test_task_flag(tsk, TIF_MM_RELEASED))
continue;

//if jiffies <= lowmem_deathpending_timeout
if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
if (test_task_flag(tsk, TIF_MEMDIE)) {//task ±»oomɱµô
rcu_read_unlock();
/* give the system time to free up the memory */
msleep_interruptible(20);
mutex_unlock(&scan_mutex);
return 0;
}
}
//tsk 的mm_struct可能已经被释放,但是它的线程还是有效的,此时返回它的线程
p = find_lock_task_mm(tsk);
if (!p)
continue;

// /proc/PID/oom_score_adj 
//这个值表示kill时的优先级,值越大越容易被kill
oom_score_adj = p->signal->oom_score_adj;
if (oom_score_adj < min_score_adj) {
task_unlock(p);
continue;
}
tasksize = get_mm_rss(p->mm);//进程占用的文件页加匿名页的大小
task_unlock(p);
if (tasksize <= 0)
continue;
if (selected) {//冒泡排序找到占用内存最大的进程
if (oom_score_adj < selected_oom_score_adj)
continue;
if (oom_score_adj == selected_oom_score_adj &&
   tasksize <= selected_tasksize)
continue;
}
selected = p;
selected_tasksize = tasksize;
selected_oom_score_adj = oom_score_adj;
lowmem_print(3, "select '%s' (%d), adj %hd, size %d, to kill\n",
    p->comm, p->pid, oom_score_adj, tasksize);
}
if (selected) {
long cache_size = other_file * (long)(PAGE_SIZE / 1024);
long cache_limit = minfree * (long)(PAGE_SIZE / 1024);
long free = other_free * (long)(PAGE_SIZE / 1024);
trace_lowmemory_kill(selected, cache_size, cache_limit, free);
lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" \
"   to free %ldkB on behalf of '%s' (%d) because\n" \
"   cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \
"   Free memory is %ldkB above reserved.\n" \
"   Free CMA is %ldkB\n" \
"   Total reserve is %ldkB\n" \
"   Total free pages is %ldkB\n" \
"   Total file cache is %ldkB\n" \
"   Slab Reclaimable is %ldkB\n" \
"   Slab UnReclaimable is %ldkB\n" \
"   Total Slab is %ldkB\n" \
"   GFP mask is 0x%x\n",
    selected->comm, selected->pid,
    selected_oom_score_adj,
    selected_tasksize * (long)(PAGE_SIZE / 1024),
    current->comm, current->pid,
    cache_size, cache_limit,
    min_score_adj,
    free ,
    global_page_state(NR_FREE_CMA_PAGES) *
(long)(PAGE_SIZE / 1024),
    totalreserve_pages * (long)(PAGE_SIZE / 1024),
    global_page_state(NR_FREE_PAGES) *
(long)(PAGE_SIZE / 1024),
    global_page_state(NR_FILE_PAGES) *
(long)(PAGE_SIZE / 1024),
    global_page_state(NR_SLAB_RECLAIMABLE) *
(long)(PAGE_SIZE / 1024),
    global_page_state(NR_SLAB_UNRECLAIMABLE) *
(long)(PAGE_SIZE / 1024),
    global_page_state(NR_SLAB_RECLAIMABLE) *
(long)(PAGE_SIZE / 1024) +
    global_page_state(NR_SLAB_UNRECLAIMABLE) *
(long)(PAGE_SIZE / 1024),
    sc->gfp_mask);


if (lowmem_debug_level >= 2 && selected_oom_score_adj == 0) {
show_mem(SHOW_MEM_FILTER_NODES);
dump_tasks(NULL, NULL);
show_mem_call_notifiers();
}


lowmem_deathpending_timeout = jiffies + HZ;
send_sig(SIGKILL, selected, 0);
set_tsk_thread_flag(selected, TIF_MEMDIE);
rem -= selected_tasksize;
rcu_read_unlock();
/* give the system time to free up the memory */
msleep_interruptible(20);
trace_almk_shrink(selected_tasksize, ret,
other_free, other_file, selected_oom_score_adj);
} else {
trace_almk_shrink(1, ret, other_free, other_file, 0);
rcu_read_unlock();
}


lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
    nr_to_scan, sc->gfp_mask, rem);
mutex_unlock(&scan_mutex);
return rem;
}


void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
{
gfp_t gfp_mask;
struct zone *preferred_zone;
struct zonelist *zonelist;
enum zone_type high_zoneidx, classzone_idx;
unsigned long balance_gap;
int use_cma_pages;


gfp_mask = sc->gfp_mask;
adjust_gfp_mask(&gfp_mask);


zonelist = node_zonelist(0, gfp_mask);
high_zoneidx = gfp_zone(gfp_mask);
//找到第一个扫描的zone区
first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone); 
classzone_idx = zone_idx(preferred_zone);
use_cma_pages = can_use_cma_pages(gfp_mask);


//获取zone low watermark页面数跟zone当前所有的页面数较小值
balance_gap = min(low_wmark_pages(preferred_zone),
 (preferred_zone->present_pages +
  KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
  KSWAPD_ZONE_BALANCE_GAP_RATIO);

//当前进程是kswap并且zone水位满足要求,即空闲页面大于mark值
if (likely(current_is_kswapd() && zone_watermark_ok(preferred_zone, 0,
 high_wmark_pages(preferred_zone) + SWAP_CLUSTER_MAX +
 balance_gap, 0, 0))) {
if (lmk_fast_run)//use this
tune_lmk_zone_param(zonelist, classzone_idx, other_free,
      other_file, use_cma_pages);
else
tune_lmk_zone_param(zonelist, classzone_idx, other_free,
      NULL, use_cma_pages);


if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) {
if (!use_cma_pages) {
*other_free -= min(
 preferred_zone->lowmem_reserve[_ZONE]
 + zone_page_state(
   preferred_zone, NR_FREE_CMA_PAGES),
 zone_page_state(
   preferred_zone, NR_FREE_PAGES));
} else {
*other_free -=
 preferred_zone->lowmem_reserve[_ZONE];
}
} else {
*other_free -= zone_page_state(preferred_zone,
     NR_FREE_PAGES);
}


lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem "
    "ofree %d, %d\n", *other_free, *other_file);
} else {
tune_lmk_zone_param(zonelist, classzone_idx, other_free,
      other_file, use_cma_pages);


if (!use_cma_pages) {
*other_free -=
 zone_page_state(preferred_zone, NR_FREE_CMA_PAGES);
}


lowmem_print(4, "lowmem_shrink tunning for others ofree %d, "
    "%d\n", *other_free, *other_file);
}
}


//扫描zone区调整other_free other_file的值
void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
int *other_free, int *other_file,
int use_cma_pages)
{
struct zone *zone;
struct zoneref *zoneref;
int zone_idx;


for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) {
zone_idx = zonelist_zone_idx(zoneref);
if (zone_idx == ZONE_MOVABLE) {
if (!use_cma_pages && other_free)
*other_free -=
   zone_page_state(zone, NR_FREE_CMA_PAGES);
continue;
}


if (zone_idx > classzone_idx) {
if (other_free != NULL)
*other_free -= zone_page_state(zone,
      NR_FREE_PAGES);
if (other_file != NULL)
*other_file -= zone_page_state(zone,
      NR_FILE_PAGES)
- zone_page_state(zone, NR_SHMEM)
- zone_page_state(zone, NR_SWAPCACHE);
} else if (zone_idx < classzone_idx) {
if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0) &&
   other_free) {
if (!use_cma_pages) {
*other_free -= min(
 zone->lowmem_reserve[classzone_idx] +
 zone_page_state(
   zone, NR_FREE_CMA_PAGES),
 zone_page_state(
   zone, NR_FREE_PAGES));
} else {
*other_free -=
 zone->lowmem_reserve[classzone_idx];
}
} else {
if (other_free)
*other_free -=
 zone_page_state(zone, NR_FREE_PAGES);
}
}
}
}
0 0