- 论坛徽章:
- 13
|
回复 2# Buddy_Zhang1
我最近这样往论坛发代码,不知道会不会被公司抓起来 !我可是冒了好大危险的,你一定要帮我好好想想呀。
- /**
- * page_launder - clean dirty inactive pages, move to inactive_clean list
- * @gfp_mask: what operations we are allowed to do
- * @sync: should we wait synchronously for the cleaning of pages
- *
- * When this function is called, we are most likely low on free +
- * inactive_clean pages. Since we want to refill those pages as
- * soon as possible, we'll make two loops over the inactive list,
- * one to move the already cleaned pages to the inactive_clean lists
- * and one to (often asynchronously) clean the dirty inactive pages.
- *
- * In situations where kswapd cannot keep up, user processes will
- * end up calling this function. Since the user process needs to
- * have a page before it can continue with its allocation, we'll
- * do synchronous page flushing in that case.
- *
- * This code is heavily inspired by the FreeBSD source code. Thanks
- * go out to Matthew Dillon.
- */
- #define MAX_LAUNDER (4 * (1 << page_cluster))
- int page_launder(int gfp_mask, int sync)
- {
- int launder_loop, maxscan, cleaned_pages, maxlaunder;
- int can_get_io_locks;
- struct list_head * page_lru;
- struct page * page;
- /*
- * We can only grab the IO locks (eg. for flushing dirty
- * buffers to disk) if __GFP_IO is set.
- */
- can_get_io_locks = gfp_mask & __GFP_IO;
- launder_loop = 0;
- maxlaunder = 0;
- cleaned_pages = 0;
- dirty_page_rescan:
- spin_lock(&pagemap_lru_lock);
- maxscan = nr_inactive_dirty_pages;
- while ((page_lru = inactive_dirty_list.prev) != &inactive_dirty_list &&
- maxscan-- > 0) {
- page = list_entry(page_lru, struct page, lru);
- /* Wrong page on list?! (list corruption, should not happen) */
- if (!PageInactiveDirty(page)) {
- printk("VM: page_launder, wrong page on list.\n");
- list_del(page_lru);
- nr_inactive_dirty_pages--;
- page->zone->inactive_dirty_pages--;
- continue;
- }
- /* Page is or was in use? Move it to the active list. */
- if (PageTestandClearReferenced(page) || page->age > 0 ||
- (!page->buffers && page_count(page) > 1) ||
- page_ramdisk(page)) {
- del_page_from_inactive_dirty_list(page);
- add_page_to_active_list(page);
- continue;
- }
- /*
- * The page is locked. IO in progress?
- * Move it to the back of the list.
- */
- if (TryLockPage(page)) {
- list_del(page_lru);
- list_add(page_lru, &inactive_dirty_list);
- continue;
- }
- /*
- * Dirty swap-cache page? Write it out if
- * last copy..
- */
- if (PageDirty(page)) {
- int (*writepage)(struct page *) = page->mapping->a_ops->writepage;
- int result;
- if (!writepage)
- goto page_active;
- /* First time through? Move it to the back of the list */
- if (!launder_loop) {
- list_del(page_lru);
- list_add(page_lru, &inactive_dirty_list);
- UnlockPage(page);
- continue;
- }
- /* OK, do a physical asynchronous write to swap. */
- ClearPageDirty(page);
- page_cache_get(page);
- spin_unlock(&pagemap_lru_lock);
- result = writepage(page);
- page_cache_release(page);
- /* And re-start the thing.. */
- spin_lock(&pagemap_lru_lock);
- if (result != 1)
- continue;
- /* writepage refused to do anything */
- set_page_dirty(page);
- goto page_active;
- }
- /*
- * If the page has buffers, try to free the buffer mappings
- * associated with this page. If we succeed we either free
- * the page (in case it was a buffercache only page) or we
- * move the page to the inactive_clean list.
- *
- * On the first round, we should free all previously cleaned
- * buffer pages
- */
- if (page->buffers) {
- int wait, clearedbuf;
- int freed_page = 0;
- /*
- * Since we might be doing disk IO, we have to
- * drop the spinlock and take an extra reference
- * on the page so it doesn't go away from under us.
- */
- del_page_from_inactive_dirty_list(page);
- page_cache_get(page);
- spin_unlock(&pagemap_lru_lock);
- /* Will we do (asynchronous) IO? */
- if (launder_loop && maxlaunder == 0 && sync)
- wait = 2; /* Synchrounous IO */
- else if (launder_loop && maxlaunder-- > 0)
- wait = 1; /* Async IO */
- else
- wait = 0; /* No IO */
- /* Try to free the page buffers. */
- clearedbuf = try_to_free_buffers(page, wait);
- /*
- * Re-take the spinlock. Note that we cannot
- * unlock the page yet since we're still
- * accessing the page_struct here...
- */
- spin_lock(&pagemap_lru_lock);
- /* The buffers were not freed. */
- if (!clearedbuf) {
- add_page_to_inactive_dirty_list(page);
- /* The page was only in the buffer cache. */
- } else if (!page->mapping) {
- atomic_dec(&buffermem_pages);
- freed_page = 1;
- cleaned_pages++;
- /* The page has more users besides the cache and us. */
- } else if (page_count(page) > 2) {
- add_page_to_active_list(page);
- /* OK, we "created" a freeable page. */
- } else /* page->mapping && page_count(page) == 2 */ {
- add_page_to_inactive_clean_list(page);
- cleaned_pages++;
- }
- /*
- * Unlock the page and drop the extra reference.
- * We can only do it here because we ar accessing
- * the page struct above.
- */
- UnlockPage(page);
- page_cache_release(page);
- /*
- * If we're freeing buffer cache pages, stop when
- * we've got enough free memory.
- */
- if (freed_page && !free_shortage())
- break;
- continue;
- } else if (page->mapping && !PageDirty(page)) {
- /*
- * If a page had an extra reference in
- * deactivate_page(), we will find it here.
- * Now the page is really freeable, so we
- * move it to the inactive_clean list.
- */
- del_page_from_inactive_dirty_list(page);
- add_page_to_inactive_clean_list(page);
- UnlockPage(page);
- cleaned_pages++;
- } else {
- page_active:
- /*
- * OK, we don't know what to do with the page.
- * It's no use keeping it here, so we move it to
- * the active list.
- */
- del_page_from_inactive_dirty_list(page);
- add_page_to_active_list(page);
- UnlockPage(page);
- }
- }
- spin_unlock(&pagemap_lru_lock);
- /*
- * If we don't have enough free pages, we loop back once
- * to queue the dirty pages for writeout. When we were called
- * by a user process (that /needs/ a free page) and we didn't
- * free anything yet, we wait synchronously on the writeout of
- * MAX_SYNC_LAUNDER pages.
- *
- * We also wake up bdflush, since bdflush should, under most
- * loads, flush out the dirty pages before we have to wait on
- * IO.
- */
- if (can_get_io_locks && !launder_loop && free_shortage()) {
- launder_loop = 1;
- /* If we cleaned pages, never do synchronous IO. */
- if (cleaned_pages)
- sync = 0;
- /* We only do a few "out of order" flushes. */
- maxlaunder = MAX_LAUNDER;
- /* Kflushd takes care of the rest. */
- wakeup_bdflush(0);
- goto dirty_page_rescan;
- }
- /* Return the number of pages moved to the inactive_clean list. */
- return cleaned_pages;
- }
复制代码 |
|