shrink_page_list(

来源:互联网 发布:网络漏洞扫描工具 编辑:程序博客网 时间:2024/04/24 12:38

static unsigned longshrink_page_list(struct list_head *page_list, struct zone *zone,

                                  struct scan_control *sc,enum ttu_flagsttu_flags,

                                  unsigned long *ret_nr_dirty,unsigned long*ret_nr_unqueued_dirty,

                                  unsigned long *ret_nr_congested,unsignedlong *ret_nr_writeback,

                                  unsigned long *ret_nr_immediate,boolforce_reclaim)

{

       LIST_HEAD(ret_pages);

       LIST_HEAD(free_pages);

       intpgactivate = 0;

       unsignedlong nr_unqueued_dirty = 0;

       unsignedlong nr_dirty = 0;

       unsignedlong nr_congested = 0;

       unsignedlong nr_reclaimed = 0;

       unsignedlong nr_writeback = 0;

       unsignedlong nr_immediate = 0;

 

       cond_resched();

 

       while (!list_empty(page_list)) {

              structaddress_space *mapping;

              structpage *page;

              intmay_enter_fs;

              enumpage_references references = PAGEREF_RECLAIM_CLEAN;

              booldirty, writeback;

              boollazyfree = false;

              intret = SWAP_SUCCESS;

 

              cond_resched();

 

              page= lru_to_page(page_list);

              list_del(&page->lru);

 

              if (!trylock_page(page))

                     gotokeep;

 

              VM_BUG_ON_PAGE(PageActive(page),page);

              VM_BUG_ON_PAGE(page_zone(page)!= zone, page);

 

              sc->nr_scanned++;

 

              if(unlikely(!page_evictable(page)))

                     gotocull_mlocked;

 

              if (!sc->may_unmap &&page_mapped(page))

                     gotokeep_locked;

 

              /*Double the slab pressure for mapped and swapcache pages */

              if (page_mapped(page) ||PageSwapCache(page))

                     sc->nr_scanned++;

 

              may_enter_fs= (sc->gfp_mask & __GFP_FS) || (PageSwapCache(page) &&

(sc->gfp_mask & __GFP_IO));

 

              page_check_dirty_writeback(page,&dirty, &writeback);

              if(dirty || writeback)

                     nr_dirty++;

 

              if(dirty && !writeback)

                     nr_unqueued_dirty++;

 

              mapping = page_mapping(page);

              if(((dirty || writeback) && mapping &&inode_write_congested(mapping->host)) ||

                            (writeback&& PageReclaim(page)))

                     nr_congested++;

 

              if(PageWriteback(page)) {

                     /*Case 1 above */

                     if (current_is_kswapd() &&PageReclaim(page) &&

 test_bit(ZONE_WRITEBACK, &zone->flags)) {

                            nr_immediate++;

                            gotokeep_locked;

 

                     /*Case 2 above */

                     }else if (sane_reclaim(sc) || !PageReclaim(page) || !may_enter_fs) {

                            SetPageReclaim(page);

                            nr_writeback++;

                            gotokeep_locked;

 

                     /*Case 3 above */

                     } else {

                            unlock_page(page);

                            wait_on_page_writeback(page);

                            /*then go back and try same page again */

                            list_add_tail(&page->lru,page_list);

                            continue;

                     }

              }

 

              if(!force_reclaim)

                     references= page_check_references(page, sc);

 

              switch(references) {

              case PAGEREF_ACTIVATE:

                     gotoactivate_locked;

              case PAGEREF_KEEP:

                     gotokeep_locked;

              case PAGEREF_RECLAIM:

              case PAGEREF_RECLAIM_CLEAN:

                     ;/* try to reclaim the page below */

              }

 

              if (PageAnon(page) &&!PageSwapCache(page)) {

                     if(!(sc->gfp_mask & __GFP_IO))

                            gotokeep_locked;

                     if(!add_to_swap(page, page_list))

                            gotoactivate_locked;

                     lazyfree= true;

                     may_enter_fs= 1;

 

                     /*Adding to swap updated mapping */

                     mapping= page_mapping(page);

              }

 

              if (page_mapped(page) && mapping) {

                     switch (ret = try_to_unmap(page, lazyfree ?

(ttu_flags |TTU_BATCH_FLUSH | TTU_LZFREE) :

(ttu_flags |TTU_BATCH_FLUSH))) {

                     case SWAP_FAIL:

                            gotoactivate_locked;

                     case SWAP_AGAIN:

                            gotokeep_locked;

                     case SWAP_MLOCK:

                            gotocull_mlocked;

                     case SWAP_LZFREE:

                            gotolazyfree;

                     case SWAP_SUCCESS:

                            ;/* try to free the page below */

                     }

              }

 

              if (PageDirty(page)) {

                     if(page_is_file_cache(page) && (!current_is_kswapd() || !test_bit(ZONE_DIRTY,

                     &zone->flags))){

                            inc_zone_page_state(page,NR_VMSCAN_IMMEDIATE);

                            SetPageReclaim(page);

                            gotokeep_locked;

                     }

 

                     if(references == PAGEREF_RECLAIM_CLEAN)

                            gotokeep_locked;

                     if(!may_enter_fs)

                            gotokeep_locked;

                     if(!sc->may_writepage)

                            gotokeep_locked;

 

                     try_to_unmap_flush_dirty();

                     switch(pageout(page, mapping, sc)) {

                     casePAGE_KEEP:

                            gotokeep_locked;

                     casePAGE_ACTIVATE:

                            gotoactivate_locked;

                     casePAGE_SUCCESS:

                            if(PageWriteback(page))

                                   gotokeep;

                            if(PageDirty(page))

                                   gotokeep;

 

                            if(!trylock_page(page))

                                   gotokeep;

                            if(PageDirty(page) || PageWriteback(page))

                                   gotokeep_locked;

                            mapping= page_mapping(page);

                     casePAGE_CLEAN:

                            ;/* try to free the page below */

                     }

              }

 

              if (page_has_private(page)) {

                     if(!try_to_release_page(page, sc->gfp_mask))

                            gotoactivate_locked;

                     if(!mapping && page_count(page) == 1) {

                            unlock_page(page);

                            if(put_page_testzero(page))

                                   gotofree_it;

                            else{

                                   nr_reclaimed++;

                                   continue;

                            }

                     }

              }

 

lazyfree:

              if(!mapping || !__remove_mapping(mapping, page, true))

                     gotokeep_locked;

 

              __ClearPageLocked(page);

free_it:

              if(ret == SWAP_LZFREE)

                     count_vm_event(PGLAZYFREED);

 

              nr_reclaimed++;

 

              list_add(&page->lru,&free_pages);

              continue;

 

cull_mlocked:

              if (PageSwapCache(page))

                     try_to_free_swap(page);

              unlock_page(page);

              list_add(&page->lru,&ret_pages);

              continue;

 

activate_locked:

              if(PageSwapCache(page) && mem_cgroup_swap_full(page))

                     try_to_free_swap(page);

              VM_BUG_ON_PAGE(PageActive(page),page);

              SetPageActive(page);

              pgactivate++;

keep_locked:

              unlock_page(page);

keep:

              list_add(&page->lru,&ret_pages);

              VM_BUG_ON_PAGE(PageLRU(page)|| PageUnevictable(page), page);

       }

 

       mem_cgroup_uncharge_list(&free_pages);

       try_to_unmap_flush();

       free_hot_cold_page_list(&free_pages,true);

 

       list_splice(&ret_pages,page_list);

       count_vm_events(PGACTIVATE,pgactivate);

 

       *ret_nr_dirty+= nr_dirty;

       *ret_nr_congested+= nr_congested;

       *ret_nr_unqueued_dirty+= nr_unqueued_dirty;

       *ret_nr_writeback+= nr_writeback;

       *ret_nr_immediate+= nr_immediate;

       returnnr_reclaimed;

}

0 0
原创粉丝点击