diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/filemap.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/filemap.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/filemap.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/filemap.c 2006-03-12 07:20:05.000000000 -0500 @@ -111,28 +111,28 @@ : * is safe. The caller must hold a write_lock on the mapping's tree_lock. : */ :void __remove_from_page_cache(struct page *page) - 135 0.0017 0 0 0 0 3 1.4e-04 :{ /* __remove_from_page_cache total: 258 0.0032 0 0 0 0 57 0.0027 */ - 5 6.2e-05 0 0 0 0 0 0 : struct address_space *mapping = page->mapping; + 104 0.0012 0 0 0 0 10 4.5e-04 :{ /* __remove_from_page_cache total: 387 0.0044 0 0 0 0 174 0.0079 */ + 7 7.9e-05 0 0 0 0 0 0 : struct address_space *mapping = page->mapping; : - 3 3.7e-05 0 0 0 0 0 0 : radix_tree_delete(&mapping->page_tree, page->index); - 8 1.0e-04 0 0 0 0 0 0 : page->mapping = NULL; - 18 2.2e-04 0 0 0 0 14 6.7e-04 : mapping->nrpages--; + 5 5.6e-05 0 0 0 0 0 0 : radix_tree_delete(&mapping->page_tree, page->index); + 22 2.5e-04 0 0 0 0 24 0.0011 : page->mapping = NULL; + 58 6.5e-04 0 0 0 0 39 0.0018 : mapping->nrpages--; : pagecache_acct(-1); - 33 4.1e-04 0 0 0 0 9 4.3e-04 :} + 49 5.5e-04 0 0 0 0 12 5.4e-04 :} : :void remove_from_page_cache(struct page *page) - 88 0.0011 0 0 0 0 21 0.0010 :{ /* remove_from_page_cache total: 253 0.0032 0 0 0 0 116 0.0055 */ + 67 7.5e-04 0 0 0 0 11 5.0e-04 :{ /* remove_from_page_cache total: 211 0.0024 0 0 1 0.0339 84 0.0038 */ : struct address_space *mapping = page->mapping; : - 27 3.4e-04 0 0 0 0 7 3.3e-04 : BUG_ON(!PageLocked(page)); + 32 3.6e-04 0 0 1 0.0339 7 3.2e-04 : BUG_ON(!PageLocked(page)); : - 11 1.4e-04 0 0 0 0 4 1.9e-04 : write_lock_irq(&mapping->tree_lock); - 61 7.6e-04 0 0 0 0 23 0.0011 : __remove_from_page_cache(page); - 29 3.6e-04 0 0 0 0 14 6.7e-04 : write_unlock_irq(&mapping->tree_lock); - 27 3.4e-04 0 0 0 0 36 0.0017 :} + 9 1.0e-04 0 0 0 0 3 1.4e-04 : write_lock_irq(&mapping->tree_lock); + 46 5.2e-04 0 0 0 0 10 4.5e-04 : __remove_from_page_cache(page); + 26 2.9e-04 0 0 0 0 13 5.9e-04 : write_unlock_irq(&mapping->tree_lock); + 24 2.7e-04 0 0 0 0 30 0.0014 :} : :static int sync_page(void *word) - 10 1.2e-04 0 0 0 0 0 0 :{ /* sync_page total: 21 2.6e-04 0 0 0 0 1 4.8e-05 */ + 46 5.2e-04 0 0 0 0 7 3.2e-04 :{ /* sync_page total: 128 0.0014 0 0 0 0 14 6.4e-04 */ : struct address_space *mapping; : struct page *page; : @@ -161,11 +161,11 @@ : */ : smp_mb(); : mapping = page_mapping(page); - 4 5.0e-05 0 0 0 0 0 0 : if (mapping && mapping->a_ops && mapping->a_ops->sync_page) + 18 2.0e-04 0 0 0 0 2 9.1e-05 : if (mapping && mapping->a_ops && mapping->a_ops->sync_page) : mapping->a_ops->sync_page(page); : io_schedule(); : return 0; - 6 7.5e-05 0 0 0 0 1 4.8e-05 :} + 61 6.9e-04 0 0 0 0 5 2.3e-04 :} : :/** : * filemap_fdatawrite_range - start writeback against all of a mapping's @@ -232,7 +232,7 @@ : */ :static int wait_on_page_writeback_range(struct address_space *mapping, : pgoff_t start, pgoff_t end) - :{ /* wait_on_page_writeback_range total: 1 1.2e-05 0 0 0 0 0 0 */ + :{ : struct pagevec pvec; : int nr_pages; : int ret = 0; @@ -243,7 +243,7 @@ : : pagevec_init(&pvec, 0); : index = start; - 1 1.2e-05 0 0 0 0 0 0 : while ((index <= end) && + : while ((index <= end) && : (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, : PAGECACHE_TAG_WRITEBACK, : min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { @@ -338,7 +338,7 @@ : if (i_size == 0) : return 0; : - : return wait_on_page_writeback_range(mapping, 0, + 0 0 0 0 0 0 1 4.5e-05 : return wait_on_page_writeback_range(mapping, 0, : (i_size - 1) >> PAGE_CACHE_SHIFT); :} :EXPORT_SYMBOL(filemap_fdatawait); @@ -394,36 +394,36 @@ : */ :int add_to_page_cache(struct page *page, struct address_space *mapping, : pgoff_t offset, gfp_t gfp_mask) - 331 0.0041 0 0 0 0 113 0.0054 :{ /* add_to_page_cache total: 1620 0.0202 0 0 1 0.0351 756 0.0361 */ - 64 8.0e-04 0 0 0 0 11 5.3e-04 : int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); + 458 0.0052 0 0 0 0 151 0.0069 :{ /* add_to_page_cache total: 1790 0.0201 0 0 0 0 933 0.0424 */ + 69 7.8e-04 0 0 0 0 10 4.5e-04 : int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); : - 13 1.6e-04 0 0 0 0 2 9.6e-05 : if (error == 0) { - 8 1.0e-04 0 0 0 0 11 5.3e-04 : write_lock_irq(&mapping->tree_lock); - 378 0.0047 0 0 1 0.0351 218 0.0104 : error = radix_tree_insert(&mapping->page_tree, offset, page); - 62 7.7e-04 0 0 0 0 57 0.0027 : if (!error) { + 9 1.0e-04 0 0 0 0 6 2.7e-04 : if (error == 0) { + 7 7.9e-05 0 0 0 0 10 4.5e-04 : write_lock_irq(&mapping->tree_lock); + 397 0.0045 0 0 0 0 241 0.0109 : error = radix_tree_insert(&mapping->page_tree, offset, page); + 45 5.1e-04 0 0 0 0 46 0.0021 : if (!error) { : page_cache_get(page); : SetPageLocked(page); - 17 2.1e-04 0 0 0 0 10 4.8e-04 : page->mapping = mapping; + 16 1.8e-04 0 0 0 0 15 6.8e-04 : page->mapping = mapping; : page->index = offset; : mapping->nrpages++; : pagecache_acct(1); : } - 49 6.1e-04 0 0 0 0 8 3.8e-04 : write_unlock_irq(&mapping->tree_lock); + 100 0.0011 0 0 0 0 6 2.7e-04 : write_unlock_irq(&mapping->tree_lock); : radix_tree_preload_end(); : } : return error; - 144 0.0018 0 0 0 0 74 0.0035 :} + 154 0.0017 0 0 0 0 119 0.0054 :} : :EXPORT_SYMBOL(add_to_page_cache); : :int add_to_page_cache_lru(struct page *page, struct address_space *mapping, : pgoff_t offset, gfp_t gfp_mask) - 5 6.2e-05 0 0 0 0 7 3.3e-04 :{ /* add_to_page_cache_lru total: 68 8.5e-04 0 0 0 0 20 9.6e-04 */ - 46 5.7e-04 0 0 0 0 5 2.4e-04 : int ret = add_to_page_cache(page, mapping, offset, gfp_mask); - 1 1.2e-05 0 0 0 0 1 4.8e-05 : if (ret == 0) - : lru_cache_add(page); + 23 2.6e-04 0 0 0 0 12 5.4e-04 :{ /* add_to_page_cache_lru total: 159 0.0018 0 0 0 0 29 0.0013 */ + 94 0.0011 0 0 0 0 10 4.5e-04 : int ret = add_to_page_cache(page, mapping, offset, gfp_mask); + 4 4.5e-05 0 0 0 0 0 0 : if (ret == 0) + 2 2.2e-05 0 0 0 0 0 0 : lru_cache_add(page); : return ret; - 16 2.0e-04 0 0 0 0 7 3.3e-04 :} + 36 4.0e-04 0 0 0 0 7 3.2e-04 :} : :/* : * In order to wait for pages to become available there must be @@ -436,11 +436,11 @@ : * collisions. : */ :static wait_queue_head_t *page_waitqueue(struct page *page) - 76 9.5e-04 0 0 0 0 38 0.0018 :{ /* page_waitqueue total: 2121 0.0264 0 0 0 0 617 0.0295 */ - 1104 0.0138 0 0 0 0 306 0.0146 : const struct zone *zone = page_zone(page); + 107 0.0012 0 0 0 0 51 0.0023 :{ /* page_waitqueue total: 2676 0.0301 0 0 1 0.0339 816 0.0371 */ + 1269 0.0143 0 0 0 0 318 0.0144 : const struct zone *zone = page_zone(page); : : return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; - 48 6.0e-04 0 0 0 0 10 4.8e-04 :} + 62 7.0e-04 0 0 1 0.0339 25 0.0011 :} : :static inline void wake_up_page(struct page *page, int bit) :{ @@ -448,13 +448,13 @@ :} : :void fastcall wait_on_page_bit(struct page *page, int bit_nr) - :{ + :{ /* wait_on_page_bit total: 0 0 0 0 0 0 1 4.5e-05 */ : DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); : : if (test_bit(bit_nr, &page->flags)) : __wait_on_bit(page_waitqueue(page), &wait, sync_page, : TASK_UNINTERRUPTIBLE); - :} + 0 0 0 0 0 0 1 4.5e-05 :} :EXPORT_SYMBOL(wait_on_page_bit); : :/** @@ -473,27 +473,27 @@ : * parallel wait_on_page_locked()). : */ :void fastcall unlock_page(struct page *page) - 1119 0.0139 0 0 0 0 424 0.0203 :{ /* unlock_page total: 2008 0.0250 0 0 0 0 560 0.0268 */ + 1446 0.0163 0 0 1 0.0339 557 0.0253 :{ /* unlock_page total: 2565 0.0288 0 0 1 0.0339 813 0.0369 */ : smp_mb__before_clear_bit(); - 45 5.6e-04 0 0 0 0 8 3.8e-04 : if (!TestClearPageLocked(page)) + 62 7.0e-04 0 0 0 0 22 1.0e-03 : if (!TestClearPageLocked(page)) : BUG(); : smp_mb__after_clear_bit(); : wake_up_page(page, PG_locked); - 450 0.0056 0 0 0 0 53 0.0025 :} + 492 0.0055 0 0 0 0 85 0.0039 :} :EXPORT_SYMBOL(unlock_page); : :/* : * End writeback against a page. : */ :void end_page_writeback(struct page *page) - 47 5.9e-04 0 0 0 0 13 6.2e-04 :{ /* end_page_writeback total: 137 0.0017 0 0 0 0 38 0.0018 */ - 8 1.0e-04 0 0 0 0 5 2.4e-04 : if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) { - 33 4.1e-04 0 0 0 0 2 9.6e-05 : if (!test_clear_page_writeback(page)) + 48 5.4e-04 0 0 0 0 34 0.0015 :{ /* end_page_writeback total: 129 0.0015 0 0 1 0.0339 73 0.0033 */ + 12 1.3e-04 0 0 0 0 11 5.0e-04 : if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) { + 20 2.2e-04 0 0 1 0.0339 15 6.8e-04 : if (!test_clear_page_writeback(page)) : BUG(); : } : smp_mb__after_clear_bit(); : wake_up_page(page, PG_writeback); - 26 3.2e-04 0 0 0 0 12 5.7e-04 :} + 28 3.1e-04 0 0 0 0 8 3.6e-04 :} :EXPORT_SYMBOL(end_page_writeback); : :/* @@ -505,12 +505,12 @@ : * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. : */ :void fastcall __lock_page(struct page *page) - 10 1.2e-04 0 0 0 0 0 0 :{ /* __lock_page total: 21 2.6e-04 0 0 0 0 1 4.8e-05 */ - 1 1.2e-05 0 0 0 0 0 0 : DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); + 23 2.6e-04 0 0 0 0 0 0 :{ /* __lock_page total: 77 8.7e-04 0 0 0 0 8 3.6e-04 */ + 8 9.0e-05 0 0 0 0 2 9.1e-05 : DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); : - : __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page, + 2 2.2e-05 0 0 0 0 0 0 : __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page, : TASK_UNINTERRUPTIBLE); - 10 1.2e-04 0 0 0 0 1 4.8e-05 :} + 43 4.8e-04 0 0 0 0 6 2.7e-04 :} :EXPORT_SYMBOL(__lock_page); : :/* @@ -518,16 +518,16 @@ : * hashed page atomically. : */ :struct page * find_get_page(struct address_space *mapping, unsigned long offset) - 3536 0.0441 0 0 0 0 1088 0.0520 :{ /* find_get_page total: 27385 0.3412 0 0 7 0.2459 9915 0.4737 */ + 3939 0.0443 0 0 0 0 1031 0.0468 :{ /* find_get_page total: 30603 0.3442 0 0 3 0.1018 9811 0.4456 */ : struct page *page; : - 783 0.0098 0 0 0 0 695 0.0332 : read_lock_irq(&mapping->tree_lock); - 9748 0.1215 0 0 2 0.0702 4204 0.2008 : page = radix_tree_lookup(&mapping->page_tree, offset); - 53 6.6e-04 0 0 1 0.0351 55 0.0026 : if (page) + 776 0.0087 0 0 0 0 511 0.0232 : read_lock_irq(&mapping->tree_lock); + 12228 0.1375 0 0 3 0.1018 3880 0.1762 : page = radix_tree_lookup(&mapping->page_tree, offset); + 47 5.3e-04 0 0 0 0 53 0.0024 : if (page) : page_cache_get(page); - 213 0.0027 0 0 0 0 32 0.0015 : read_unlock_irq(&mapping->tree_lock); + 222 0.0025 0 0 0 0 32 0.0015 : read_unlock_irq(&mapping->tree_lock); : return page; - 8555 0.1066 0 0 3 0.1054 2205 0.1053 :} + 8562 0.0963 0 0 0 0 2379 0.1080 :} : :EXPORT_SYMBOL(find_get_page); : @@ -535,16 +535,16 @@ : * Same as above, but trylock it instead of incrementing the count. : */ :struct page *find_trylock_page(struct address_space *mapping, unsigned long offset) - :{ + 2 2.2e-05 0 0 0 0 1 4.5e-05 :{ /* find_trylock_page total: 38 4.3e-04 0 0 0 0 23 0.0010 */ : struct page *page; : - : read_lock_irq(&mapping->tree_lock); - : page = radix_tree_lookup(&mapping->page_tree, offset); - : if (page && TestSetPageLocked(page)) + 1 1.1e-05 0 0 0 0 2 9.1e-05 : read_lock_irq(&mapping->tree_lock); + 10 1.1e-04 0 0 0 0 4 1.8e-04 : page = radix_tree_lookup(&mapping->page_tree, offset); + 6 6.7e-05 0 0 0 0 2 9.1e-05 : if (page && TestSetPageLocked(page)) : page = NULL; : read_unlock_irq(&mapping->tree_lock); : return page; - :} + 11 1.2e-04 0 0 0 0 6 2.7e-04 :} : :EXPORT_SYMBOL(find_trylock_page); : @@ -561,15 +561,15 @@ : */ :struct page *find_lock_page(struct address_space *mapping, : unsigned long offset) - 252 0.0031 0 0 0 0 172 0.0082 :{ /* find_lock_page total: 1468 0.0183 0 0 0 0 627 0.0300 */ + 296 0.0033 0 0 0 0 161 0.0073 :{ /* find_lock_page total: 1377 0.0155 0 0 0 0 585 0.0266 */ : struct page *page; : - 1 1.2e-05 0 0 0 0 0 0 : read_lock_irq(&mapping->tree_lock); + : read_lock_irq(&mapping->tree_lock); :repeat: - 259 0.0032 0 0 0 0 146 0.0070 : page = radix_tree_lookup(&mapping->page_tree, offset); - 28 3.5e-04 0 0 0 0 13 6.2e-04 : if (page) { + 218 0.0025 0 0 0 0 154 0.0070 : page = radix_tree_lookup(&mapping->page_tree, offset); + 30 3.4e-04 0 0 0 0 19 8.6e-04 : if (page) { : page_cache_get(page); - 3 3.7e-05 0 0 0 0 1 4.8e-05 : if (TestSetPageLocked(page)) { + 4 4.5e-05 0 0 0 0 0 0 : if (TestSetPageLocked(page)) { : read_unlock_irq(&mapping->tree_lock); : __lock_page(page); : read_lock_irq(&mapping->tree_lock); @@ -583,9 +583,9 @@ : } : } : } - 209 0.0026 0 0 0 0 38 0.0018 : read_unlock_irq(&mapping->tree_lock); + 198 0.0022 0 0 0 0 41 0.0019 : read_unlock_irq(&mapping->tree_lock); : return page; - 145 0.0018 0 0 0 0 47 0.0022 :} + 148 0.0017 0 0 0 0 38 0.0017 :} : :EXPORT_SYMBOL(find_lock_page); : @@ -609,20 +609,20 @@ : */ :struct page *find_or_create_page(struct address_space *mapping, : unsigned long index, gfp_t gfp_mask) - 25 3.1e-04 0 0 0 0 9 4.3e-04 :{ /* find_or_create_page total: 58 7.2e-04 0 0 0 0 21 0.0010 */ + 37 4.2e-04 0 0 0 0 12 5.4e-04 :{ /* find_or_create_page total: 79 8.9e-04 0 0 0 0 27 0.0012 */ : struct page *page, *cached_page = NULL; : int err; :repeat: - 6 7.5e-05 0 0 0 0 1 4.8e-05 : page = find_lock_page(mapping, index); - : if (!page) { - 2 2.5e-05 0 0 0 0 2 9.6e-05 : if (!cached_page) { + 16 1.8e-04 0 0 0 0 2 9.1e-05 : page = find_lock_page(mapping, index); + 1 1.1e-05 0 0 0 0 1 4.5e-05 : if (!page) { + 2 2.2e-05 0 0 0 0 0 0 : if (!cached_page) { : cached_page = alloc_page(gfp_mask); : if (!cached_page) : return NULL; : } - 2 2.5e-05 0 0 0 0 0 0 : err = add_to_page_cache_lru(cached_page, mapping, + 1 1.1e-05 0 0 0 0 1 4.5e-05 : err = add_to_page_cache_lru(cached_page, mapping, : index, gfp_mask); - 16 2.0e-04 0 0 0 0 2 9.6e-05 : if (!err) { + 13 1.5e-04 0 0 0 0 2 9.1e-05 : if (!err) { : page = cached_page; : cached_page = NULL; : } else if (err == -EEXIST) @@ -631,7 +631,7 @@ : if (cached_page) : page_cache_release(cached_page); : return page; - 6 7.5e-05 0 0 0 0 5 2.4e-04 :} + 5 5.6e-05 0 0 0 0 5 2.3e-04 :} : :EXPORT_SYMBOL(find_or_create_page); : @@ -653,18 +653,18 @@ : */ :unsigned find_get_pages(struct address_space *mapping, pgoff_t start, : unsigned int nr_pages, struct page **pages) - 159 0.0020 0 0 0 0 40 0.0019 :{ /* find_get_pages total: 1128 0.0141 0 0 1 0.0351 156 0.0075 */ + 44 4.9e-04 0 0 0 0 9 4.1e-04 :{ /* find_get_pages total: 720 0.0081 0 0 0 0 85 0.0039 */ : unsigned int i; : unsigned int ret; : : read_lock_irq(&mapping->tree_lock); - 75 9.3e-04 0 0 0 0 33 0.0016 : ret = radix_tree_gang_lookup(&mapping->page_tree, + 18 2.0e-04 0 0 0 0 10 4.5e-04 : ret = radix_tree_gang_lookup(&mapping->page_tree, : (void **)pages, start, nr_pages); - 140 0.0017 0 0 0 0 10 4.8e-04 : for (i = 0; i < ret; i++) - 134 0.0017 0 0 1 0.0351 13 6.2e-04 : page_cache_get(pages[i]); - 301 0.0038 0 0 0 0 28 0.0013 : read_unlock_irq(&mapping->tree_lock); + 175 0.0020 0 0 0 0 18 8.2e-04 : for (i = 0; i < ret; i++) + 71 8.0e-04 0 0 0 0 4 1.8e-04 : page_cache_get(pages[i]); + 178 0.0020 0 0 0 0 20 9.1e-04 : read_unlock_irq(&mapping->tree_lock); : return ret; - 95 0.0012 0 0 0 0 15 7.2e-04 :} + 27 3.0e-04 0 0 0 0 4 1.8e-04 :} : :/* : * Like find_get_pages, except we only return pages which are tagged with @@ -672,20 +672,20 @@ : */ :unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, : int tag, unsigned int nr_pages, struct page **pages) - 67 8.3e-04 0 0 0 0 41 0.0020 :{ /* find_get_pages_tag total: 1545 0.0193 0 0 0 0 383 0.0183 */ + 64 7.2e-04 0 0 0 0 29 0.0013 :{ /* find_get_pages_tag total: 1313 0.0148 0 0 0 0 363 0.0165 */ : unsigned int i; : unsigned int ret; : : read_lock_irq(&mapping->tree_lock); - 251 0.0031 0 0 0 0 151 0.0072 : ret = radix_tree_gang_lookup_tag(&mapping->page_tree, + 302 0.0034 0 0 0 0 154 0.0070 : ret = radix_tree_gang_lookup_tag(&mapping->page_tree, : (void **)pages, *index, nr_pages, tag); - 138 0.0017 0 0 0 0 23 0.0011 : for (i = 0; i < ret; i++) - 294 0.0037 0 0 0 0 27 0.0013 : page_cache_get(pages[i]); - 360 0.0045 0 0 0 0 36 0.0017 : if (ret) - 19 2.4e-04 0 0 0 0 4 1.9e-04 : *index = pages[ret - 1]->index + 1; - 13 1.6e-04 0 0 0 0 6 2.9e-04 : read_unlock_irq(&mapping->tree_lock); + 70 7.9e-04 0 0 0 0 17 7.7e-04 : for (i = 0; i < ret; i++) + 264 0.0030 0 0 0 0 35 0.0016 : page_cache_get(pages[i]); + 273 0.0031 0 0 0 0 34 0.0015 : if (ret) + 10 1.1e-04 0 0 0 0 1 4.5e-05 : *index = pages[ret - 1]->index + 1; + 16 1.8e-04 0 0 0 0 4 1.8e-04 : read_unlock_irq(&mapping->tree_lock); : return ret; - 121 0.0015 0 0 0 0 71 0.0034 :} + 103 0.0012 0 0 0 0 65 0.0030 :} : :/* : * Same as grab_cache_page, but do not wait if the page is unavailable. @@ -736,8 +736,8 @@ : loff_t *ppos, : read_descriptor_t *desc, : read_actor_t actor) - 1201 0.0150 0 0 0 0 497 0.0237 :{ /* do_generic_mapping_read total: 16147 0.2012 0 0 1 0.0351 4205 0.2009 */ - 78 9.7e-04 0 0 0 0 65 0.0031 : struct inode *inode = mapping->host; + 1290 0.0145 0 0 1 0.0339 441 0.0200 :{ /* do_generic_mapping_read total: 16545 0.1861 0 0 3 0.1018 4148 0.1884 */ + 92 0.0010 0 0 0 0 61 0.0028 : struct inode *inode = mapping->host; : unsigned long index; : unsigned long end_index; : unsigned long offset; @@ -747,48 +747,48 @@ : loff_t isize; : struct page *cached_page; : int error; - 12 1.5e-04 0 0 0 0 3 1.4e-04 : struct file_ra_state ra = *_ra; + 14 1.6e-04 0 0 0 0 1 4.5e-05 : struct file_ra_state ra = *_ra; : : cached_page = NULL; - 65 8.1e-04 0 0 0 0 2 9.6e-05 : index = *ppos >> PAGE_CACHE_SHIFT; + 74 8.3e-04 0 0 0 0 2 9.1e-05 : index = *ppos >> PAGE_CACHE_SHIFT; : next_index = index; - 260 0.0032 0 0 0 0 37 0.0018 : prev_index = ra.prev_page; - 65 8.1e-04 0 0 0 0 20 9.6e-04 : last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; - 21 2.6e-04 0 0 0 0 0 0 : offset = *ppos & ~PAGE_CACHE_MASK; + 218 0.0025 0 0 0 0 36 0.0016 : prev_index = ra.prev_page; + 68 7.6e-04 0 0 0 0 27 0.0012 : last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; + 14 1.6e-04 0 0 0 0 0 0 : offset = *ppos & ~PAGE_CACHE_MASK; : : isize = i_size_read(inode); - 24 3.0e-04 0 0 0 0 8 3.8e-04 : if (!isize) + 31 3.5e-04 0 0 0 0 7 3.2e-04 : if (!isize) : goto out; : - 474 0.0059 0 0 0 0 217 0.0104 : end_index = (isize - 1) >> PAGE_CACHE_SHIFT; + 545 0.0061 0 0 1 0.0339 236 0.0107 : end_index = (isize - 1) >> PAGE_CACHE_SHIFT; : for (;;) { : struct page *page; : unsigned long nr, ret; : : /* nr is the maximum number of bytes to copy from this page */ : nr = PAGE_CACHE_SIZE; - 679 0.0085 0 0 0 0 224 0.0107 : if (index >= end_index) { - 15 1.9e-04 0 0 0 0 12 5.7e-04 : if (index > end_index) + 769 0.0086 0 0 0 0 200 0.0091 : if (index >= end_index) { + 9 1.0e-04 0 0 0 0 13 5.9e-04 : if (index > end_index) : goto out; - 292 0.0036 0 0 0 0 167 0.0080 : nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; - 26 3.2e-04 0 0 0 0 5 2.4e-04 : if (nr <= offset) { + 345 0.0039 0 0 0 0 161 0.0073 : nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; + 25 2.8e-04 0 0 0 0 10 4.5e-04 : if (nr <= offset) { : goto out; : } : } - 527 0.0066 0 0 0 0 249 0.0119 : nr = nr - offset; + 597 0.0067 0 0 0 0 266 0.0121 : nr = nr - offset; : - 4 5.0e-05 0 0 0 0 0 0 : cond_resched(); - 134 0.0017 0 0 0 0 44 0.0021 : if (index == next_index) - 114 0.0014 0 0 0 0 49 0.0023 : next_index = page_cache_readahead(mapping, &ra, filp, + 2 2.2e-05 0 0 0 0 1 4.5e-05 : cond_resched(); + 111 0.0012 0 0 0 0 33 0.0015 : if (index == next_index) + 135 0.0015 0 0 0 0 62 0.0028 : next_index = page_cache_readahead(mapping, &ra, filp, : index, last_index - index); : :find_page: - 1402 0.0175 0 0 1 0.0351 594 0.0284 : page = find_get_page(mapping, index); - 1 1.2e-05 0 0 0 0 0 0 : if (unlikely(page == NULL)) { + 1589 0.0179 0 0 0 0 577 0.0262 : page = find_get_page(mapping, index); + : if (unlikely(page == NULL)) { : handle_ra_miss(mapping, &ra, index); : goto no_cached_page; : } - 61 7.6e-04 0 0 0 0 21 0.0010 : if (!PageUptodate(page)) + 53 6.0e-04 0 0 0 0 16 7.3e-04 : if (!PageUptodate(page)) : goto page_not_up_to_date; :page_ok: : @@ -803,8 +803,8 @@ : * When (part of) the same page is read multiple times : * in succession, only mark it as accessed the first time. : */ - 115 0.0014 0 0 0 0 35 0.0017 : if (prev_index != index) - 827 0.0103 0 0 0 0 243 0.0116 : mark_page_accessed(page); + 110 0.0012 0 0 0 0 38 0.0017 : if (prev_index != index) + 705 0.0079 0 0 1 0.0339 142 0.0064 : mark_page_accessed(page); : prev_index = index; : : /* @@ -817,13 +817,13 @@ : * "pos" here (the actor routine has to update the user buffer : * pointers and the remaining count). : */ - 3496 0.0436 0 0 0 0 314 0.0150 : ret = actor(desc, page, offset, nr); - 30 3.7e-04 0 0 0 0 16 7.6e-04 : offset += ret; - 49 6.1e-04 0 0 0 0 30 0.0014 : index += offset >> PAGE_CACHE_SHIFT; - 31 3.9e-04 0 0 0 0 21 0.0010 : offset &= ~PAGE_CACHE_MASK; + 3142 0.0353 0 0 0 0 275 0.0125 : ret = actor(desc, page, offset, nr); + 32 3.6e-04 0 0 0 0 10 4.5e-04 : offset += ret; + 50 5.6e-04 0 0 0 0 40 0.0018 : index += offset >> PAGE_CACHE_SHIFT; + 24 2.7e-04 0 0 0 0 23 0.0010 : offset &= ~PAGE_CACHE_MASK; : - 541 0.0067 0 0 0 0 239 0.0114 : page_cache_release(page); - 522 0.0065 0 0 0 0 159 0.0076 : if (ret == nr && desc->count) + 518 0.0058 0 0 0 0 279 0.0127 : page_cache_release(page); + 484 0.0054 0 0 0 0 127 0.0058 : if (ret == nr && desc->count) : continue; : goto out; : @@ -832,15 +832,15 @@ : lock_page(page); : : /* Did it get unhashed before we got the lock? */ - 5 6.2e-05 0 0 0 0 0 0 : if (!page->mapping) { + 11 1.2e-04 0 0 0 0 0 0 : if (!page->mapping) { : unlock_page(page); : page_cache_release(page); : continue; : } : : /* Did somebody else fill it already? */ - : if (PageUptodate(page)) { - 1 1.2e-05 0 0 0 0 0 0 : unlock_page(page); + 1 1.1e-05 0 0 0 0 0 0 : if (PageUptodate(page)) { + 7 7.9e-05 0 0 0 0 2 9.1e-05 : unlock_page(page); : goto page_ok; : } : @@ -856,7 +856,7 @@ : goto readpage_error; : } : - : if (!PageUptodate(page)) { + 1 1.1e-05 0 0 0 0 0 0 : if (!PageUptodate(page)) { : lock_page(page); : if (!PageUptodate(page)) { : if (page->mapping == NULL) { @@ -883,8 +883,8 @@ : * another truncate extends the file - this is desired though). : */ : isize = i_size_read(inode); - 1 1.2e-05 0 0 0 0 0 0 : end_index = (isize - 1) >> PAGE_CACHE_SHIFT; - 1 1.2e-05 0 0 0 0 0 0 : if (unlikely(!isize || index > end_index)) { + 1 1.1e-05 0 0 0 0 0 0 : end_index = (isize - 1) >> PAGE_CACHE_SHIFT; + : if (unlikely(!isize || index > end_index)) { : page_cache_release(page); : goto out; : } @@ -898,7 +898,7 @@ : goto out; : } : } - 1 1.2e-05 0 0 0 0 0 0 : nr = nr - offset; + : nr = nr - offset; : goto page_ok; : :readpage_error: @@ -932,37 +932,37 @@ : goto readpage; : } : - 1078 0.0134 0 0 0 0 197 0.0094 :out: - 1841 0.0229 0 0 0 0 286 0.0137 : *_ra = ra; + 1240 0.0139 0 0 0 0 226 0.0103 :out: + 1910 0.0215 0 0 0 0 332 0.0151 : *_ra = ra; : - 736 0.0092 0 0 0 0 62 0.0030 : *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; - 26 3.2e-04 0 0 0 0 0 0 : if (cached_page) + 819 0.0092 0 0 0 0 71 0.0032 : *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; + 29 3.3e-04 0 0 0 0 1 4.5e-05 : if (cached_page) : page_cache_release(cached_page); - 102 0.0013 0 0 0 0 8 3.8e-04 : if (filp) + 95 0.0011 0 0 0 0 14 6.4e-04 : if (filp) : file_accessed(filp); - 701 0.0087 0 0 0 0 314 0.0150 :} + 784 0.0088 0 0 0 0 356 0.0162 :} : :EXPORT_SYMBOL(do_generic_mapping_read); : :int file_read_actor(read_descriptor_t *desc, struct page *page, : unsigned long offset, unsigned long size) - 7254 0.0904 0 0 1 0.0351 805 0.0385 :{ /* file_read_actor total: 17166 0.2139 0 0 3 0.1054 2633 0.1258 */ + 7670 0.0863 0 0 0 0 901 0.0409 :{ /* file_read_actor total: 18457 0.2076 0 0 1 0.0339 2746 0.1247 */ : char *kaddr; : unsigned long left, count = desc->count; : - 222 0.0028 0 0 0 0 32 0.0015 : if (size > count) + 267 0.0030 0 0 0 0 40 0.0018 : if (size > count) : size = count; : : /* : * Faults on the destination of a read are common, so do it before : * taking the kmap. : */ - 5524 0.0688 0 0 0 0 538 0.0257 : if (!fault_in_pages_writeable(desc->arg.buf, size)) { + 6268 0.0705 0 0 1 0.0339 538 0.0244 : if (!fault_in_pages_writeable(desc->arg.buf, size)) { : kaddr = kmap_atomic(page, KM_USER0); : left = __copy_to_user_inatomic(desc->arg.buf, : kaddr + offset, size); : kunmap_atomic(kaddr, KM_USER0); - 50 6.2e-04 0 0 0 0 14 6.7e-04 : if (left == 0) + 53 6.0e-04 0 0 0 0 21 9.5e-04 : if (left == 0) : goto success; : } : @@ -976,11 +976,11 @@ : desc->error = -EFAULT; : } :success: - : desc->count = count - size; - 37 4.6e-04 0 0 0 0 6 2.9e-04 : desc->written += size; - 39 4.9e-04 0 0 0 0 3 1.4e-04 : desc->arg.buf += size; + 3 3.4e-05 0 0 0 0 0 0 : desc->count = count - size; + 42 4.7e-04 0 0 0 0 13 5.9e-04 : desc->written += size; + 44 4.9e-04 0 0 0 0 9 4.1e-04 : desc->arg.buf += size; : return size; - 294 0.0037 0 0 0 0 40 0.0019 :} + 241 0.0027 0 0 0 0 34 0.0015 :} : :/* : * This is the "read()" routine for all filesystems @@ -989,24 +989,24 @@ :ssize_t :__generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, : unsigned long nr_segs, loff_t *ppos) - 319 0.0040 0 0 0 0 101 0.0048 :{ /* __generic_file_aio_read total: 5909 0.0736 0 0 0 0 2520 0.1204 */ - 915 0.0114 0 0 0 0 268 0.0128 : struct file *filp = iocb->ki_filp; + 315 0.0035 0 0 0 0 123 0.0056 :{ /* __generic_file_aio_read total: 6246 0.0702 0 0 1 0.0339 2555 0.1160 */ + 962 0.0108 0 0 0 0 288 0.0131 : struct file *filp = iocb->ki_filp; : ssize_t retval; : unsigned long seg; : size_t count; : : count = 0; - 941 0.0117 0 0 0 0 365 0.0174 : for (seg = 0; seg < nr_segs; seg++) { + 1023 0.0115 0 0 1 0.0339 354 0.0161 : for (seg = 0; seg < nr_segs; seg++) { : const struct iovec *iv = &iov[seg]; : : /* : * If any segment has a negative length, or the cumulative : * length ever wraps negative then return -EINVAL. : */ - 54 6.7e-04 0 0 0 0 33 0.0016 : count += iv->iov_len; - 30 3.7e-04 0 0 0 0 21 0.0010 : if (unlikely((ssize_t)(count|iv->iov_len) < 0)) + 45 5.1e-04 0 0 0 0 28 0.0013 : count += iv->iov_len; + 30 3.4e-04 0 0 0 0 26 0.0012 : if (unlikely((ssize_t)(count|iv->iov_len) < 0)) : return -EINVAL; - 518 0.0065 0 0 0 0 166 0.0079 : if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len)) + 553 0.0062 0 0 0 0 137 0.0062 : if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len)) : continue; : if (seg == 0) : return -EFAULT; @@ -1016,7 +1016,7 @@ : } : : /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ - 207 0.0026 0 0 0 0 126 0.0060 : if (filp->f_flags & O_DIRECT) { + 155 0.0017 0 0 0 0 101 0.0046 : if (filp->f_flags & O_DIRECT) { : loff_t pos = *ppos, size; : struct address_space *mapping; : struct inode *inode; @@ -1040,19 +1040,19 @@ : } : : retval = 0; - 518 0.0065 0 0 0 0 139 0.0066 : if (count) { - 635 0.0079 0 0 0 0 328 0.0157 : for (seg = 0; seg < nr_segs; seg++) { + 591 0.0066 0 0 0 0 165 0.0075 : if (count) { + 731 0.0082 0 0 0 0 332 0.0151 : for (seg = 0; seg < nr_segs; seg++) { : read_descriptor_t desc; : - 37 4.6e-04 0 0 0 0 26 0.0012 : desc.written = 0; - 200 0.0025 0 0 0 0 119 0.0057 : desc.arg.buf = iov[seg].iov_base; - 1 1.2e-05 0 0 0 0 1 4.8e-05 : desc.count = iov[seg].iov_len; - 33 4.1e-04 0 0 0 0 15 7.2e-04 : if (desc.count == 0) + 34 3.8e-04 0 0 0 0 23 0.0010 : desc.written = 0; + 227 0.0026 0 0 0 0 105 0.0048 : desc.arg.buf = iov[seg].iov_base; + 4 4.5e-05 0 0 0 0 4 1.8e-04 : desc.count = iov[seg].iov_len; + 37 4.2e-04 0 0 0 0 14 6.4e-04 : if (desc.count == 0) : continue; - 40 5.0e-04 0 0 0 0 39 0.0019 : desc.error = 0; + 39 4.4e-04 0 0 0 0 34 0.0015 : desc.error = 0; : do_generic_file_read(filp,ppos,&desc,file_read_actor); - 792 0.0099 0 0 0 0 449 0.0215 : retval += desc.written; - 21 2.6e-04 0 0 0 0 10 4.8e-04 : if (desc.error) { + 854 0.0096 0 0 0 0 486 0.0221 : retval += desc.written; + 10 1.1e-04 0 0 0 0 9 4.1e-04 : if (desc.error) { : retval = retval ?: desc.error; : break; : } @@ -1060,18 +1060,18 @@ : } :out: : return retval; - 511 0.0064 0 0 0 0 245 0.0117 :} + 499 0.0056 0 0 0 0 250 0.0114 :} : :EXPORT_SYMBOL(__generic_file_aio_read); : :ssize_t :generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) - 2031 0.0253 0 0 0 0 529 0.0253 :{ /* generic_file_aio_read total: 2902 0.0362 0 0 0 0 874 0.0418 */ - 33 4.1e-04 0 0 0 0 4 1.9e-04 : struct iovec local_iov = { .iov_base = buf, .iov_len = count }; + 2316 0.0260 0 0 1 0.0339 545 0.0248 :{ /* generic_file_aio_read total: 3180 0.0358 0 0 1 0.0339 966 0.0439 */ + 24 2.7e-04 0 0 0 0 5 2.3e-04 : struct iovec local_iov = { .iov_base = buf, .iov_len = count }; : - 29 3.6e-04 0 0 0 0 7 3.3e-04 : BUG_ON(iocb->ki_pos != pos); - 234 0.0029 0 0 0 0 48 0.0023 : return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); - 575 0.0072 0 0 0 0 286 0.0137 :} + 21 2.4e-04 0 0 0 0 7 3.2e-04 : BUG_ON(iocb->ki_pos != pos); + 217 0.0024 0 0 0 0 56 0.0025 : return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); + 602 0.0068 0 0 0 0 353 0.0160 :} : :EXPORT_SYMBOL(generic_file_aio_read); : @@ -1231,25 +1231,25 @@ : */ :struct page *filemap_nopage(struct vm_area_struct *area, : unsigned long address, int *type) - 19512 0.2431 0 0 0 0 3891 0.1859 :{ /* filemap_nopage total: 135139 1.6839 0 0 12 0.4215 19486 0.9309 */ + 32916 0.3702 0 0 3 0.1018 5734 0.2604 :{ /* filemap_nopage total: 149350 1.6797 0 0 20 0.6789 21853 0.9925 */ : int error; - 260 0.0032 0 0 0 0 49 0.0023 : struct file *file = area->vm_file; - 1 1.2e-05 0 0 0 0 0 0 : struct address_space *mapping = file->f_mapping; - 255 0.0032 0 0 1 0.0351 19 9.1e-04 : struct file_ra_state *ra = &file->f_ra; - 34 4.2e-04 0 0 0 0 6 2.9e-04 : struct inode *inode = mapping->host; + 270 0.0030 0 0 0 0 131 0.0059 : struct file *file = area->vm_file; + 3 3.4e-05 0 0 0 0 0 0 : struct address_space *mapping = file->f_mapping; + 289 0.0033 0 0 0 0 123 0.0056 : struct file_ra_state *ra = &file->f_ra; + 27 3.0e-04 0 0 0 0 30 0.0014 : struct inode *inode = mapping->host; : struct page *page; : unsigned long size, pgoff; : int did_readaround = 0, majmin = VM_FAULT_MINOR; : - 2091 0.0261 0 0 0 0 1125 0.0537 : pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; + 2112 0.0238 0 0 0 0 1204 0.0547 : pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; : :retry_all: : size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - 2858 0.0356 0 0 0 0 573 0.0274 : if (pgoff >= size) + 3211 0.0361 0 0 0 0 638 0.0290 : if (pgoff >= size) : goto outside_data_content; : : /* If we don't want any read-ahead, don't bother */ - 2414 0.0301 0 0 0 0 483 0.0231 : if (VM_RandomReadHint(area)) + 4176 0.0470 0 0 1 0.0339 942 0.0428 : if (VM_RandomReadHint(area)) : goto no_cached_page; : : /* @@ -1258,22 +1258,22 @@ : * : * For sequential accesses, we use the generic readahead logic. : */ - 578 0.0072 0 0 0 0 125 0.0060 : if (VM_SequentialReadHint(area)) + 671 0.0075 0 0 0 0 107 0.0049 : if (VM_SequentialReadHint(area)) : page_cache_readahead(mapping, ra, file, pgoff, 1); : : /* : * Do we have something in the page cache already? : */ :retry_find: - 6132 0.0764 0 0 0 0 2326 0.1111 : page = find_get_page(mapping, pgoff); - 1 1.2e-05 0 0 0 0 0 0 : if (!page) { + 5950 0.0669 0 0 1 0.0339 2257 0.1025 : page = find_get_page(mapping, pgoff); + 3 3.4e-05 0 0 0 0 0 0 : if (!page) { : unsigned long ra_pages; : - : if (VM_SequentialReadHint(area)) { + 1 1.1e-05 0 0 0 0 0 0 : if (VM_SequentialReadHint(area)) { : handle_ra_miss(mapping, ra, pgoff); : goto no_cached_page; : } - 1 1.2e-05 0 0 0 0 0 0 : ra->mmap_miss++; + 1 1.1e-05 0 0 0 0 0 0 : ra->mmap_miss++; : : /* : * Do we miss much more than hit in this file? If so, @@ -1288,39 +1288,39 @@ : */ : if (!did_readaround) { : majmin = VM_FAULT_MAJOR; - : inc_page_state(pgmajfault); + 2 2.2e-05 0 0 0 0 2 9.1e-05 : inc_page_state(pgmajfault); : } : did_readaround = 1; - : ra_pages = max_sane_readahead(file->f_ra.ra_pages); + 0 0 0 0 0 0 1 4.5e-05 : ra_pages = max_sane_readahead(file->f_ra.ra_pages); : if (ra_pages) { : pgoff_t start = 0; : : if (pgoff > ra_pages / 2) : start = pgoff - ra_pages / 2; - : do_page_cache_readahead(mapping, file, start, ra_pages); + 0 0 0 0 0 0 1 4.5e-05 : do_page_cache_readahead(mapping, file, start, ra_pages); : } - : page = find_get_page(mapping, pgoff); - : if (!page) + 3 3.4e-05 0 0 0 0 0 0 : page = find_get_page(mapping, pgoff); + 2 2.2e-05 0 0 0 0 0 0 : if (!page) : goto no_cached_page; : } : - 31327 0.3903 0 0 3 0.1054 4260 0.2035 : if (!did_readaround) - 145 0.0018 0 0 0 0 20 9.6e-04 : ra->mmap_hit++; + 30354 0.3414 0 0 3 0.1018 4154 0.1887 : if (!did_readaround) + 160 0.0018 0 0 0 0 22 1.0e-03 : ra->mmap_hit++; : : /* : * Ok, found a page in the page cache, now we need to check : * that it's up-to-date. : */ - 692 0.0086 0 0 0 0 199 0.0095 : if (!PageUptodate(page)) + 671 0.0075 0 0 0 0 179 0.0081 : if (!PageUptodate(page)) : goto page_not_uptodate; : :success: : /* : * Found the page and have a reference on it. : */ - 16103 0.2007 0 0 4 0.1405 1328 0.0634 : mark_page_accessed(page); - 22270 0.2775 0 0 3 0.1054 1224 0.0585 : if (type) - 5379 0.0670 0 0 0 0 586 0.0280 : *type = majmin; + 16508 0.1857 0 0 2 0.0679 1465 0.0665 : mark_page_accessed(page); + 21942 0.2468 0 0 6 0.2037 1226 0.0557 : if (type) + 5808 0.0653 0 0 0 0 625 0.0284 : *type = majmin; : return page; : :outside_data_content: @@ -1357,14 +1357,14 @@ : return NULL; : :page_not_uptodate: - 3 3.7e-05 0 0 0 0 0 0 : if (!did_readaround) { + 5 5.6e-05 0 0 0 0 1 4.5e-05 : if (!did_readaround) { : majmin = VM_FAULT_MAJOR; : inc_page_state(pgmajfault); : } : lock_page(page); : : /* Did it get unhashed while we waited for it? */ - 2 2.5e-05 0 0 0 0 0 0 : if (!page->mapping) { + 6 6.7e-05 0 0 0 0 0 0 : if (!page->mapping) { : unlock_page(page); : page_cache_release(page); : goto retry_all; @@ -1403,7 +1403,7 @@ : : /* Somebody else successfully read it in? */ : if (PageUptodate(page)) { - : unlock_page(page); + 5 5.6e-05 0 0 0 0 0 0 : unlock_page(page); : goto success; : } : ClearPageError(page); @@ -1423,7 +1423,7 @@ : */ : page_cache_release(page); : return NULL; - 22087 0.2752 0 0 1 0.0351 2769 0.1323 :} + 21120 0.2375 0 0 4 0.1358 2485 0.1129 :} : :EXPORT_SYMBOL(filemap_nopage); : @@ -1608,15 +1608,15 @@ :/* This is used for a general mmap of a disk file */ : :int generic_file_mmap(struct file * file, struct vm_area_struct * vma) - 234 0.0029 0 0 1 0.0351 165 0.0079 :{ /* generic_file_mmap total: 3193 0.0398 0 0 1 0.0351 1361 0.0650 */ + 217 0.0024 0 0 0 0 91 0.0041 :{ /* generic_file_mmap total: 3261 0.0367 0 0 1 0.0339 1283 0.0583 */ : struct address_space *mapping = file->f_mapping; : - 1247 0.0155 0 0 0 0 397 0.0190 : if (!mapping->a_ops->readpage) + 1295 0.0146 0 0 0 0 339 0.0154 : if (!mapping->a_ops->readpage) : return -ENOEXEC; : file_accessed(file); - 1055 0.0131 0 0 0 0 575 0.0275 : vma->vm_ops = &generic_file_vm_ops; + 993 0.0112 0 0 1 0.0339 573 0.0260 : vma->vm_ops = &generic_file_vm_ops; : return 0; - 100 0.0012 0 0 0 0 51 0.0024 :} + 97 0.0011 0 0 0 0 68 0.0031 :} : :/* : * This is for filesystems which do not implement ->writepage. @@ -1760,30 +1760,30 @@ : * remove privs : */ :int remove_suid(struct dentry *dentry) - 238 0.0030 0 0 0 0 51 0.0024 :{ /* remove_suid total: 602 0.0075 0 0 0 0 204 0.0097 */ - 70 8.7e-04 0 0 0 0 17 8.1e-04 : mode_t mode = dentry->d_inode->i_mode; + 231 0.0026 0 0 0 0 42 0.0019 :{ /* remove_suid total: 663 0.0075 0 0 0 0 153 0.0069 */ + 90 0.0010 0 0 0 0 14 6.4e-04 : mode_t mode = dentry->d_inode->i_mode; : int kill = 0; : int result = 0; : : /* suid always must be killed */ - 10 1.2e-04 0 0 0 0 6 2.9e-04 : if (unlikely(mode & S_ISUID)) + 11 1.2e-04 0 0 0 0 1 4.5e-05 : if (unlikely(mode & S_ISUID)) : kill = ATTR_KILL_SUID; : : /* : * sgid without any exec bits is just a mandatory locking mark; leave : * it alone. If some exec bits are set, it's a real sgid; kill it. : */ - 48 6.0e-04 0 0 0 0 19 9.1e-04 : if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) + 52 5.8e-04 0 0 0 0 13 5.9e-04 : if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) : kill |= ATTR_KILL_SGID; : - 51 6.4e-04 0 0 0 0 17 8.1e-04 : if (unlikely(kill && !capable(CAP_FSETID))) { + 45 5.1e-04 0 0 0 0 16 7.3e-04 : if (unlikely(kill && !capable(CAP_FSETID))) { : struct iattr newattrs; : : newattrs.ia_valid = ATTR_FORCE | kill; : result = notify_change(dentry, &newattrs); : } : return result; - 185 0.0023 0 0 0 0 94 0.0045 :} + 234 0.0026 0 0 0 0 67 0.0030 :} :EXPORT_SYMBOL(remove_suid); : :static inline size_t @@ -1835,23 +1835,23 @@ : */ :inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) :{ - 7 8.7e-05 0 0 0 0 8 3.8e-04 : struct inode *inode = file->f_mapping->host; - 79 9.8e-04 0 0 0 0 15 7.2e-04 : unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; + 3 3.4e-05 0 0 0 0 1 4.5e-05 : struct inode *inode = file->f_mapping->host; + 109 0.0012 0 0 0 0 18 8.2e-04 : unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; : - 53 6.6e-04 0 0 0 0 24 0.0011 : if (unlikely(*pos < 0)) + 60 6.7e-04 0 0 0 0 26 0.0012 : if (unlikely(*pos < 0)) : return -EINVAL; : - 6 7.5e-05 0 0 0 0 0 0 : if (!isblk) { + 6 6.7e-05 0 0 0 0 1 4.5e-05 : if (!isblk) { : /* FIXME: this is for backwards compatibility with 2.4 */ - 7 8.7e-05 0 0 0 0 4 1.9e-04 : if (file->f_flags & O_APPEND) + 9 1.0e-04 0 0 0 0 1 4.5e-05 : if (file->f_flags & O_APPEND) : *pos = i_size_read(inode); : - 89 0.0011 0 0 0 0 12 5.7e-04 : if (limit != RLIM_INFINITY) { + 141 0.0016 0 0 0 0 14 6.4e-04 : if (limit != RLIM_INFINITY) { : if (*pos >= limit) { : send_sig(SIGXFSZ, current, 0); : return -EFBIG; : } - 6 7.5e-05 0 0 0 0 7 3.3e-04 : if (*count > limit - (typeof(limit))*pos) { + 13 1.5e-04 0 0 0 0 6 2.7e-04 : if (*count > limit - (typeof(limit))*pos) { : *count = limit - (typeof(limit))*pos; : } : } @@ -1860,7 +1860,7 @@ : /* : * LFS rule : */ - 59 7.4e-04 0 0 0 0 33 0.0016 : if (unlikely(*pos + *count > MAX_NON_LFS && + 52 5.8e-04 0 0 0 0 34 0.0015 : if (unlikely(*pos + *count > MAX_NON_LFS && : !(file->f_flags & O_LARGEFILE))) { : if (*pos >= MAX_NON_LFS) { : send_sig(SIGXFSZ, current, 0); @@ -1878,8 +1878,8 @@ : * exceeded without writing data we send a signal and return EFBIG. : * Linus frestrict idea will clean these up nicely.. : */ - 71 8.8e-04 0 0 0 0 19 9.1e-04 : if (likely(!isblk)) { - 45 5.6e-04 0 0 0 0 20 9.6e-04 : if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { + 79 8.9e-04 0 0 0 0 11 5.0e-04 : if (likely(!isblk)) { + 33 3.7e-04 0 0 0 0 22 1.0e-03 : if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { : if (*count || *pos > inode->i_sb->s_maxbytes) { : send_sig(SIGXFSZ, current, 0); : return -EFBIG; @@ -1899,8 +1899,8 @@ : return -ENOSPC; : } : - 43 5.4e-04 0 0 0 0 22 0.0011 : if (*pos + *count > isize) - 58 7.2e-04 0 0 0 0 8 3.8e-04 : *count = isize - *pos; + 51 5.7e-04 0 0 0 0 9 4.1e-04 : if (*pos + *count > isize) + 98 0.0011 0 0 0 0 8 3.6e-04 : *count = isize - *pos; : } : return 0; :} @@ -1950,11 +1950,11 @@ :generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, : unsigned long nr_segs, loff_t pos, loff_t *ppos, : size_t count, ssize_t written) - 240 0.0030 0 0 0 0 95 0.0045 :{ /* generic_file_buffered_write total: 6191 0.0771 0 0 2 0.0702 2373 0.1134 */ - 25 3.1e-04 0 0 0 0 21 0.0010 : struct file *file = iocb->ki_filp; - 18 2.2e-04 0 0 0 0 25 0.0012 : struct address_space * mapping = file->f_mapping; - 2 2.5e-05 0 0 0 0 1 4.8e-05 : struct address_space_operations *a_ops = mapping->a_ops; - 107 0.0013 0 0 0 0 35 0.0017 : struct inode *inode = mapping->host; + 320 0.0036 0 0 0 0 140 0.0064 :{ /* generic_file_buffered_write total: 6190 0.0696 0 0 2 0.0679 2406 0.1093 */ + 27 3.0e-04 0 0 0 0 11 5.0e-04 : struct file *file = iocb->ki_filp; + 23 2.6e-04 0 0 0 0 19 8.6e-04 : struct address_space * mapping = file->f_mapping; + 4 4.5e-05 0 0 0 0 4 1.8e-04 : struct address_space_operations *a_ops = mapping->a_ops; + 148 0.0017 0 0 0 0 41 0.0019 : struct inode *inode = mapping->host; : long status = 0; : struct page *page; : struct page *cached_page = NULL; @@ -1969,11 +1969,11 @@ : /* : * handle partial DIO write. Adjust cur_iov if needed. : */ - 24 3.0e-04 0 0 0 0 20 9.6e-04 : if (likely(nr_segs == 1)) - 97 0.0012 0 0 0 0 58 0.0028 : buf = iov->iov_base + written; + 31 3.5e-04 0 0 0 0 20 9.1e-04 : if (likely(nr_segs == 1)) + 79 8.9e-04 0 0 0 0 53 0.0024 : buf = iov->iov_base + written; : else { : filemap_set_next_iovec(&cur_iov, &iov_base, written); - 12 1.5e-04 0 0 0 0 12 5.7e-04 : buf = cur_iov->iov_base + iov_base; + 7 7.9e-05 0 0 0 0 16 7.3e-04 : buf = cur_iov->iov_base + iov_base; : } : : do { @@ -1982,10 +1982,10 @@ : unsigned long maxlen; : size_t copied; : - 241 0.0030 0 0 0 0 57 0.0027 : offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ - 29 3.6e-04 0 0 0 0 17 8.1e-04 : index = pos >> PAGE_CACHE_SHIFT; - 16 2.0e-04 0 0 0 0 16 7.6e-04 : bytes = PAGE_CACHE_SIZE - offset; - 65 8.1e-04 0 0 0 0 56 0.0027 : if (bytes > count) + 278 0.0031 0 0 0 0 63 0.0029 : offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ + 29 3.3e-04 0 0 0 0 23 0.0010 : index = pos >> PAGE_CACHE_SHIFT; + 20 2.2e-04 0 0 0 0 11 5.0e-04 : bytes = PAGE_CACHE_SIZE - offset; + 69 7.8e-04 0 0 0 0 60 0.0027 : if (bytes > count) : bytes = count; : : /* @@ -1994,19 +1994,19 @@ : * same page as we're writing to, without it being marked : * up-to-date. : */ - 45 5.6e-04 0 0 0 0 37 0.0018 : maxlen = cur_iov->iov_len - iov_base; - 70 8.7e-04 0 0 0 0 65 0.0031 : if (maxlen > bytes) + 42 4.7e-04 0 0 0 0 30 0.0014 : maxlen = cur_iov->iov_len - iov_base; + 53 6.0e-04 0 0 0 0 45 0.0020 : if (maxlen > bytes) : maxlen = bytes; - 19 2.4e-04 0 0 0 0 17 8.1e-04 : fault_in_pages_readable(buf, maxlen); + 29 3.3e-04 0 0 0 0 18 8.2e-04 : fault_in_pages_readable(buf, maxlen); : : page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); - 120 0.0015 0 0 0 0 34 0.0016 : if (!page) { + 117 0.0013 0 0 0 0 24 0.0011 : if (!page) { : status = -ENOMEM; : break; : } : - 1312 0.0163 0 0 0 0 267 0.0128 : status = a_ops->prepare_write(file, page, offset, offset+bytes); - 13 1.6e-04 0 0 0 0 1 4.8e-05 : if (unlikely(status)) { + 1116 0.0126 0 0 0 0 225 0.0102 : status = a_ops->prepare_write(file, page, offset, offset+bytes); + 8 9.0e-05 0 0 0 0 2 9.1e-05 : if (unlikely(status)) { : loff_t isize = i_size_read(inode); : : if (status != AOP_TRUNCATED_PAGE) @@ -2022,59 +2022,59 @@ : vmtruncate(inode, isize); : break; : } - 165 0.0021 0 0 0 0 52 0.0025 : if (likely(nr_segs == 1)) + 252 0.0028 0 0 0 0 64 0.0029 : if (likely(nr_segs == 1)) : copied = filemap_copy_from_user(page, offset, : buf, bytes); : else : copied = filemap_copy_from_user_iovec(page, offset, : cur_iov, iov_base, bytes); : flush_dcache_page(page); - 404 0.0050 0 0 1 0.0351 343 0.0164 : status = a_ops->commit_write(file, page, offset, offset+bytes); - 10 1.2e-04 0 0 0 0 8 3.8e-04 : if (status == AOP_TRUNCATED_PAGE) { + 414 0.0047 0 0 0 0 313 0.0142 : status = a_ops->commit_write(file, page, offset, offset+bytes); + 13 1.5e-04 0 0 0 0 16 7.3e-04 : if (status == AOP_TRUNCATED_PAGE) { : page_cache_release(page); : continue; : } - 31 3.9e-04 0 0 0 0 41 0.0020 : if (likely(copied > 0)) { + 36 4.0e-04 0 0 0 0 55 0.0025 : if (likely(copied > 0)) { : if (!status) - 0 0 0 0 0 0 2 9.6e-05 : status = copied; + 1 1.1e-05 0 0 0 0 0 0 : status = copied; : - 11 1.4e-04 0 0 0 0 12 5.7e-04 : if (status >= 0) { - 2 2.5e-05 0 0 0 0 1 4.8e-05 : written += status; - 7 8.7e-05 0 0 0 0 24 0.0011 : count -= status; - 14 1.7e-04 0 0 0 0 11 5.3e-04 : pos += status; - 1 1.2e-05 0 0 0 0 0 0 : buf += status; - : if (unlikely(nr_segs > 1)) { + 23 2.6e-04 0 0 0 0 22 1.0e-03 : if (status >= 0) { + 1 1.1e-05 0 0 0 0 0 0 : written += status; + 14 1.6e-04 0 0 0 0 6 2.7e-04 : count -= status; + 8 9.0e-05 0 0 0 0 13 5.9e-04 : pos += status; + 1 1.1e-05 0 0 0 0 1 4.5e-05 : buf += status; + 2 2.2e-05 0 0 0 0 0 0 : if (unlikely(nr_segs > 1)) { : filemap_set_next_iovec(&cur_iov, : &iov_base, status); : if (count) : buf = cur_iov->iov_base + : iov_base; : } else { - 12 1.5e-04 0 0 0 0 11 5.3e-04 : iov_base += status; + 11 1.2e-04 0 0 0 0 7 3.2e-04 : iov_base += status; : } : } : } - 114 0.0014 0 0 0 0 16 7.6e-04 : if (unlikely(copied != bytes)) + 176 0.0020 0 0 0 0 17 7.7e-04 : if (unlikely(copied != bytes)) : if (status >= 0) : status = -EFAULT; - 29 3.6e-04 0 0 0 0 4 1.9e-04 : unlock_page(page); - 211 0.0026 0 0 0 0 31 0.0015 : mark_page_accessed(page); - 81 0.0010 0 0 0 0 30 0.0014 : page_cache_release(page); - 31 3.9e-04 0 0 0 0 6 2.9e-04 : if (status < 0) + 37 4.2e-04 0 0 0 0 12 5.4e-04 : unlock_page(page); + 313 0.0035 0 0 0 0 41 0.0019 : mark_page_accessed(page); + 91 0.0010 0 0 0 0 21 9.5e-04 : page_cache_release(page); + 27 3.0e-04 0 0 0 0 11 5.0e-04 : if (status < 0) : break; - 36 4.5e-04 0 0 0 0 27 0.0013 : balance_dirty_pages_ratelimited(mapping); - 9 1.1e-04 0 0 0 0 1 4.8e-05 : cond_resched(); - 33 4.1e-04 0 0 0 0 13 6.2e-04 : } while (count); - 113 0.0014 0 0 1 0.0351 74 0.0035 : *ppos = pos; + 41 4.6e-04 0 0 0 0 32 0.0015 : balance_dirty_pages_ratelimited(mapping); + 8 9.0e-05 0 0 0 0 1 4.5e-05 : cond_resched(); + 20 2.2e-04 0 0 0 0 13 5.9e-04 : } while (count); + 115 0.0013 0 0 0 0 83 0.0038 : *ppos = pos; : - 9 1.1e-04 0 0 0 0 3 1.4e-04 : if (cached_page) + 12 1.3e-04 0 0 0 0 5 2.3e-04 : if (cached_page) : page_cache_release(cached_page); : : /* : * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC : */ - 26 3.2e-04 0 0 0 0 12 5.7e-04 : if (likely(status >= 0)) { - 107 0.0013 0 0 0 0 65 0.0031 : if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) { + 25 2.8e-04 0 0 0 0 11 5.0e-04 : if (likely(status >= 0)) { + 91 0.0010 0 0 0 0 54 0.0025 : if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) { : if (!a_ops->writepage || !is_sync_kiocb(iocb)) : status = generic_osync_inode(inode, mapping, : OSYNC_METADATA|OSYNC_DATA); @@ -2086,40 +2086,40 @@ : * to buffered writes (block instantiation inside i_size). So we sync : * the file data here, to try to honour O_DIRECT expectations. : */ - 120 0.0015 0 0 0 0 22 0.0011 : if (unlikely(file->f_flags & O_DIRECT) && written) + 145 0.0016 0 0 0 0 22 1.0e-03 : if (unlikely(file->f_flags & O_DIRECT) && written) : status = filemap_write_and_wait(mapping); : : pagevec_lru_add(&lru_pvec); - 88 0.0011 0 0 0 0 52 0.0025 : return written ? written : status; - 167 0.0021 0 0 0 0 65 0.0031 :} + 108 0.0012 0 0 0 0 46 0.0021 : return written ? written : status; + 156 0.0018 0 0 0 0 75 0.0034 :} :EXPORT_SYMBOL(generic_file_buffered_write); : :static ssize_t :__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, : unsigned long nr_segs, loff_t *ppos) - 126 0.0016 0 0 0 0 19 9.1e-04 :{ /* __generic_file_aio_write_nolock total: 2016 0.0251 0 0 2 0.0702 801 0.0383 */ - 48 6.0e-04 0 0 0 0 9 4.3e-04 : struct file *file = iocb->ki_filp; - 15 1.9e-04 0 0 0 0 5 2.4e-04 : struct address_space * mapping = file->f_mapping; + 154 0.0017 0 0 0 0 18 8.2e-04 :{ /* __generic_file_aio_write_nolock total: 2225 0.0250 0 0 0 0 742 0.0337 */ + 62 7.0e-04 0 0 0 0 16 7.3e-04 : struct file *file = iocb->ki_filp; + 13 1.5e-04 0 0 0 0 10 4.5e-04 : struct address_space * mapping = file->f_mapping; : size_t ocount; /* original count */ : size_t count; /* after file limit checks */ - 17 2.1e-04 0 0 0 0 2 9.6e-05 : struct inode *inode = mapping->host; + 15 1.7e-04 0 0 0 0 1 4.5e-05 : struct inode *inode = mapping->host; : unsigned long seg; : loff_t pos; : ssize_t written; : ssize_t err; : : ocount = 0; - 201 0.0025 0 0 0 0 118 0.0056 : for (seg = 0; seg < nr_segs; seg++) { + 203 0.0023 0 0 0 0 120 0.0054 : for (seg = 0; seg < nr_segs; seg++) { : const struct iovec *iv = &iov[seg]; : : /* : * If any segment has a negative length, or the cumulative : * length ever wraps negative then return -EINVAL. : */ - 4 5.0e-05 0 0 0 0 1 4.8e-05 : ocount += iv->iov_len; - 5 6.2e-05 0 0 0 0 6 2.9e-04 : if (unlikely((ssize_t)(ocount|iv->iov_len) < 0)) + 10 1.1e-04 0 0 0 0 1 4.5e-05 : ocount += iv->iov_len; + 6 6.7e-05 0 0 0 0 4 1.8e-04 : if (unlikely((ssize_t)(ocount|iv->iov_len) < 0)) : return -EINVAL; - 32 4.0e-04 0 0 0 0 21 0.0010 : if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) + 31 3.5e-04 0 0 0 0 19 8.6e-04 : if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) : continue; : if (seg == 0) : return -EFAULT; @@ -2129,29 +2129,29 @@ : } : : count = ocount; - 26 3.2e-04 0 0 0 0 14 6.7e-04 : pos = *ppos; + 16 1.8e-04 0 0 0 0 8 3.6e-04 : pos = *ppos; : - 80 1.0e-03 0 0 0 0 56 0.0027 : vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); + 99 0.0011 0 0 0 0 57 0.0026 : vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); : : /* We can write back this queue in page reclaim */ - 182 0.0023 0 0 0 0 43 0.0021 : current->backing_dev_info = mapping->backing_dev_info; + 225 0.0025 0 0 0 0 35 0.0016 : current->backing_dev_info = mapping->backing_dev_info; : written = 0; : - 58 7.2e-04 0 0 0 0 19 9.1e-04 : err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); - 39 4.9e-04 0 0 0 0 7 3.3e-04 : if (err) + 64 7.2e-04 0 0 0 0 20 9.1e-04 : err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); + 45 5.1e-04 0 0 0 0 6 2.7e-04 : if (err) : goto out; : - 13 1.6e-04 0 0 0 0 0 0 : if (count == 0) + 15 1.7e-04 0 0 0 0 2 9.1e-05 : if (count == 0) : goto out; : - 47 5.9e-04 0 0 0 0 9 4.3e-04 : err = remove_suid(file->f_dentry); - 6 7.5e-05 0 0 0 0 1 4.8e-05 : if (err) + 48 5.4e-04 0 0 0 0 6 2.7e-04 : err = remove_suid(file->f_dentry); + 4 4.5e-05 0 0 0 0 0 0 : if (err) : goto out; : - 27 3.4e-04 0 0 0 0 7 3.3e-04 : file_update_time(file); + 23 2.6e-04 0 0 0 0 9 4.1e-04 : file_update_time(file); : : /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ - 20 2.5e-04 0 0 0 0 13 6.2e-04 : if (unlikely(file->f_flags & O_DIRECT)) { + 31 3.5e-04 0 0 0 0 24 0.0011 : if (unlikely(file->f_flags & O_DIRECT)) { : written = generic_file_direct_write(iocb, iov, : &nr_segs, pos, ppos, count, ocount); : if (written < 0 || written == count) @@ -2164,12 +2164,12 @@ : count -= written; : } : - 433 0.0054 0 0 2 0.0702 224 0.0107 : written = generic_file_buffered_write(iocb, iov, nr_segs, + 371 0.0042 0 0 0 0 167 0.0076 : written = generic_file_buffered_write(iocb, iov, nr_segs, : pos, ppos, count, written); :out: - : current->backing_dev_info = NULL; - 22 2.7e-04 0 0 0 0 12 5.7e-04 : return written ? written : err; - 64 8.0e-04 0 0 0 0 35 0.0017 :} + 1 1.1e-05 0 0 0 0 0 0 : current->backing_dev_info = NULL; + 41 4.6e-04 0 0 0 0 15 6.8e-04 : return written ? written : err; + 68 7.6e-04 0 0 0 0 43 0.0020 :} :EXPORT_SYMBOL(generic_file_aio_write_nolock); : :/* @@ -2389,22 +2389,22 @@ : :ssize_t generic_file_aio_write(struct kiocb *iocb, const char __user *buf, : size_t count, loff_t pos) - 284 0.0035 0 0 0 0 52 0.0025 :{ /* generic_file_aio_write total: 1221 0.0152 0 0 0 0 311 0.0149 */ - 1 1.2e-05 0 0 0 0 0 0 : struct file *file = iocb->ki_filp; - 6 7.5e-05 0 0 0 0 0 0 : struct address_space *mapping = file->f_mapping; - 2 2.5e-05 0 0 0 0 0 0 : struct inode *inode = mapping->host; + 344 0.0039 0 0 0 0 52 0.0024 :{ /* generic_file_aio_write total: 1322 0.0149 0 0 0 0 319 0.0145 */ + 3 3.4e-05 0 0 0 0 1 4.5e-05 : struct file *file = iocb->ki_filp; + 8 9.0e-05 0 0 0 0 1 4.5e-05 : struct address_space *mapping = file->f_mapping; + 1 1.1e-05 0 0 0 0 0 0 : struct inode *inode = mapping->host; : ssize_t ret; : struct iovec local_iov = { .iov_base = (void __user *)buf, - 19 2.4e-04 0 0 0 0 2 9.6e-05 : .iov_len = count }; + 23 2.6e-04 0 0 0 0 3 1.4e-04 : .iov_len = count }; : - 11 1.4e-04 0 0 0 0 1 4.8e-05 : BUG_ON(iocb->ki_pos != pos); + 15 1.7e-04 0 0 0 0 1 4.5e-05 : BUG_ON(iocb->ki_pos != pos); : - 130 0.0016 0 0 0 0 28 0.0013 : mutex_lock(&inode->i_mutex); - 592 0.0074 0 0 0 0 102 0.0049 : ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1, + 141 0.0016 0 0 0 0 31 0.0014 : mutex_lock(&inode->i_mutex); + 606 0.0068 0 0 0 0 109 0.0050 : ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1, : &iocb->ki_pos); : mutex_unlock(&inode->i_mutex); : - 103 0.0013 0 0 0 0 67 0.0032 : if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { + 96 0.0011 0 0 0 0 73 0.0033 : if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { : ssize_t err; : : err = sync_page_range(inode, mapping, pos, ret); @@ -2412,7 +2412,7 @@ : ret = err; : } : return ret; - 73 9.1e-04 0 0 0 0 59 0.0028 :} + 85 9.6e-04 0 0 0 0 48 0.0022 :} :EXPORT_SYMBOL(generic_file_aio_write); : :ssize_t generic_file_write(struct file *file, const char __user *buf, @@ -2532,12 +2532,12 @@ /* * Total samples for file : "mm/filemap.c" * - * 211117 2.6306 0 0 27 0.9484 42881 2.0485 + * 231362 2.6020 0 0 34 1.1541 45118 2.0491 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/highmem.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/highmem.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/highmem.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/highmem.c 2006-03-12 07:20:06.000000000 -0500 @@ -466,7 +466,7 @@ :} : :void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) - 118 0.0015 0 0 0 0 12 5.7e-04 :{ /* blk_queue_bounce total: 277 0.0035 0 0 0 0 118 0.0056 */ + 199 0.0022 0 0 0 0 31 0.0014 :{ /* blk_queue_bounce total: 467 0.0053 0 0 0 0 127 0.0058 */ : mempool_t *pool; : : /* @@ -474,8 +474,8 @@ : * to or bigger than the highest pfn in the system -- in that case, : * don't waste time iterating over bio segments : */ - 10 1.2e-04 0 0 0 0 2 9.6e-05 : if (!(q->bounce_gfp & GFP_DMA)) { - 23 2.9e-04 0 0 0 0 3 1.4e-04 : if (q->bounce_pfn >= blk_max_pfn) + 10 1.1e-04 0 0 0 0 0 0 : if (!(q->bounce_gfp & GFP_DMA)) { + 24 2.7e-04 0 0 0 0 1 4.5e-05 : if (q->bounce_pfn >= blk_max_pfn) : return; : pool = page_pool; : } else { @@ -487,7 +487,7 @@ : * slow path : */ : __blk_queue_bounce(q, bio_orig, pool); - 126 0.0016 0 0 0 0 101 0.0048 :} + 234 0.0026 0 0 0 0 95 0.0043 :} : :EXPORT_SYMBOL(blk_queue_bounce); : @@ -614,12 +614,12 @@ /* * Total samples for file : "mm/highmem.c" * - * 277 0.0035 0 0 0 0 118 0.0056 + * 467 0.0053 0 0 0 0 127 0.0058 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/hugetlb.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/hugetlb.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/hugetlb.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/hugetlb.c 2006-03-12 07:20:06.000000000 -0500 @@ -222,15 +222,15 @@ :#endif /* CONFIG_SYSCTL */ : :int hugetlb_report_meminfo(char *buf) - :{ /* hugetlb_report_meminfo total: 98 0.0012 0 0 0 0 11 5.3e-04 */ - 94 0.0012 0 0 0 0 9 4.3e-04 : return sprintf(buf, + :{ /* hugetlb_report_meminfo total: 91 0.0010 0 0 0 0 5 2.3e-04 */ + 83 9.3e-04 0 0 0 0 5 2.3e-04 : return sprintf(buf, : "HugePages_Total: %5lu\n" : "HugePages_Free: %5lu\n" : "Hugepagesize: %5lu kB\n", : nr_huge_pages, : free_huge_pages, : HPAGE_SIZE/1024); - 4 5.0e-05 0 0 0 0 2 9.6e-05 :} + 8 9.0e-05 0 0 0 0 0 0 :} : :int hugetlb_report_node_meminfo(int nid, char *buf) :{ @@ -248,9 +248,9 @@ : :/* Return the number pages of memory we physically have, in PAGE_SIZE units. */ :unsigned long hugetlb_total_pages(void) - 103 0.0013 0 0 0 0 5 2.4e-04 :{ /* hugetlb_total_pages total: 103 0.0013 0 0 0 0 5 2.4e-04 */ + 95 0.0011 0 0 0 0 7 3.2e-04 :{ /* hugetlb_total_pages total: 97 0.0011 0 0 0 0 7 3.2e-04 */ : return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); - :} + 2 2.2e-05 0 0 0 0 0 0 :} : :/* : * We cannot handle pagefaults against hugetlb pages at all. They cause @@ -576,12 +576,12 @@ /* * Total samples for file : "mm/hugetlb.c" * - * 201 0.0025 0 0 0 0 16 7.6e-04 + * 188 0.0021 0 0 0 0 12 5.4e-04 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/memory.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/memory.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/memory.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/memory.c 2006-03-12 07:20:05.000000000 -0500 @@ -122,7 +122,7 @@ : */ :static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) :{ - 147 0.0018 0 0 0 0 45 0.0021 : struct page *page = pmd_page(*pmd); + 229 0.0026 0 0 0 0 30 0.0014 : struct page *page = pmd_page(*pmd); : pmd_clear(pmd); : pte_lock_deinit(page); : pte_free_tlb(tlb, page); @@ -139,7 +139,7 @@ : unsigned long start; : : start = addr; - 80 1.0e-03 0 0 0 0 23 0.0011 : pmd = pmd_offset(pud, addr); + 81 9.1e-04 0 0 0 0 19 8.6e-04 : pmd = pmd_offset(pud, addr); : do { : next = pmd_addr_end(addr, end); : if (pmd_none_or_clear_bad(pmd)) @@ -172,7 +172,7 @@ : unsigned long start; : : start = addr; - 316 0.0039 0 0 0 0 63 0.0030 : pud = pud_offset(pgd, addr); + 303 0.0034 0 0 0 0 37 0.0017 : pud = pud_offset(pgd, addr); : do { : next = pud_addr_end(addr, end); : if (pud_none_or_clear_bad(pud)) @@ -204,7 +204,7 @@ :void free_pgd_range(struct mmu_gather **tlb, : unsigned long addr, unsigned long end, : unsigned long floor, unsigned long ceiling) - 671 0.0084 0 0 0 0 364 0.0174 :{ /* free_pgd_range total: 4486 0.0559 0 0 0 0 2052 0.0980 */ + 669 0.0075 0 0 0 0 370 0.0168 :{ /* free_pgd_range total: 5066 0.0570 0 0 0 0 2087 0.0948 */ : pgd_t *pgd; : unsigned long next; : unsigned long start; @@ -236,46 +236,46 @@ : */ : : addr &= PMD_MASK; - 34 4.2e-04 0 0 0 0 38 0.0018 : if (addr < floor) { + 40 4.5e-04 0 0 0 0 40 0.0018 : if (addr < floor) { : addr += PMD_SIZE; - 58 7.2e-04 0 0 0 0 73 0.0035 : if (!addr) + 53 6.0e-04 0 0 0 0 78 0.0035 : if (!addr) : return; : } - 118 0.0015 0 0 0 0 68 0.0032 : if (ceiling) { + 102 0.0011 0 0 0 0 72 0.0033 : if (ceiling) { : ceiling &= PMD_MASK; - 67 8.3e-04 0 0 0 0 48 0.0023 : if (!ceiling) + 95 0.0011 0 0 0 0 63 0.0029 : if (!ceiling) : return; : } - 100 0.0012 0 0 0 0 117 0.0056 : if (end - 1 > ceiling - 1) - 79 9.8e-04 0 0 0 0 84 0.0040 : end -= PMD_SIZE; - 225 0.0028 0 0 0 0 76 0.0036 : if (addr > end - 1) + 92 0.0010 0 0 0 0 110 0.0050 : if (end - 1 > ceiling - 1) + 91 0.0010 0 0 0 0 85 0.0039 : end -= PMD_SIZE; + 241 0.0027 0 0 0 0 73 0.0033 : if (addr > end - 1) : return; : : start = addr; - 61 7.6e-04 0 0 0 0 36 0.0017 : pgd = pgd_offset((*tlb)->mm, addr); + 68 7.6e-04 0 0 0 0 36 0.0016 : pgd = pgd_offset((*tlb)->mm, addr); : do { - 33 4.1e-04 0 0 0 0 21 0.0010 : next = pgd_addr_end(addr, end); + 70 7.9e-04 0 0 0 0 17 7.7e-04 : next = pgd_addr_end(addr, end); : if (pgd_none_or_clear_bad(pgd)) : continue; - 63 7.9e-04 0 0 0 0 8 3.8e-04 : free_pud_range(*tlb, pgd, addr, next, floor, ceiling); - 59 7.4e-04 0 0 0 0 21 0.0010 : } while (pgd++, addr = next, addr != end); + 90 0.0010 0 0 0 0 17 7.7e-04 : free_pud_range(*tlb, pgd, addr, next, floor, ceiling); + 50 5.6e-04 0 0 0 0 20 9.1e-04 : } while (pgd++, addr = next, addr != end); : : if (!(*tlb)->fullmm) : flush_tlb_pgtables((*tlb)->mm, start, end); - 531 0.0066 0 0 0 0 345 0.0165 :} + 607 0.0068 0 0 0 0 371 0.0168 :} : :void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, : unsigned long floor, unsigned long ceiling) - 743 0.0093 0 0 0 0 236 0.0113 :{ /* free_pgtables total: 4165 0.0519 0 0 0 0 1672 0.0799 */ - 390 0.0049 0 0 0 0 121 0.0058 : while (vma) { - 32 4.0e-04 0 0 0 0 15 7.2e-04 : struct vm_area_struct *next = vma->vm_next; - 166 0.0021 0 0 0 0 78 0.0037 : unsigned long addr = vma->vm_start; + 915 0.0103 0 0 0 0 195 0.0089 :{ /* free_pgtables total: 4762 0.0536 0 0 0 0 1695 0.0770 */ + 560 0.0063 0 0 0 0 186 0.0084 : while (vma) { + 30 3.4e-04 0 0 0 0 26 0.0012 : struct vm_area_struct *next = vma->vm_next; + 241 0.0027 0 0 0 0 79 0.0036 : unsigned long addr = vma->vm_start; : : /* : * Hide vma from rmap and vmtruncate before freeing pgtables : */ - : anon_vma_unlink(vma); - 342 0.0043 0 0 0 0 172 0.0082 : unlink_file_vma(vma); + 1 1.1e-05 0 0 0 0 0 0 : anon_vma_unlink(vma); + 354 0.0040 0 0 0 0 164 0.0074 : unlink_file_vma(vma); : : if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) { : hugetlb_free_pgd_range(tlb, addr, vma->vm_end, @@ -284,40 +284,40 @@ : /* : * Optimization: gather nearby vmas into one call down : */ - 1118 0.0139 0 0 0 0 460 0.0220 : while (next && next->vm_start <= vma->vm_end + PMD_SIZE + 1066 0.0120 0 0 0 0 430 0.0195 : while (next && next->vm_start <= vma->vm_end + PMD_SIZE : && !is_hugepage_only_range(vma->vm_mm, next->vm_start, : HPAGE_SIZE)) { : vma = next; - 33 4.1e-04 0 0 0 0 15 7.2e-04 : next = vma->vm_next; - 627 0.0078 0 0 0 0 142 0.0068 : anon_vma_unlink(vma); - 188 0.0023 0 0 0 0 65 0.0031 : unlink_file_vma(vma); + 34 3.8e-04 0 0 0 0 12 5.4e-04 : next = vma->vm_next; + 704 0.0079 0 0 0 0 135 0.0061 : anon_vma_unlink(vma); + 322 0.0036 0 0 0 0 61 0.0028 : unlink_file_vma(vma); : } - 264 0.0033 0 0 0 0 146 0.0070 : free_pgd_range(tlb, addr, vma->vm_end, + 273 0.0031 0 0 0 0 147 0.0067 : free_pgd_range(tlb, addr, vma->vm_end, : floor, next? next->vm_start: ceiling); : } : vma = next; : } - 262 0.0033 0 0 0 0 222 0.0106 :} + 262 0.0029 0 0 0 0 260 0.0118 :} : :int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) - 351 0.0044 0 0 0 0 66 0.0032 :{ /* __pte_alloc total: 1802 0.0225 0 0 0 0 564 0.0269 */ + 319 0.0036 0 0 0 0 54 0.0025 :{ /* __pte_alloc total: 1885 0.0212 0 0 0 0 533 0.0242 */ : struct page *new = pte_alloc_one(mm, address); : if (!new) : return -ENOMEM; : - 12 1.5e-04 0 0 0 0 1 4.8e-05 : pte_lock_init(new); - 9 1.1e-04 0 0 0 0 1 4.8e-05 : spin_lock(&mm->page_table_lock); - 179 0.0022 0 0 0 0 51 0.0024 : if (pmd_present(*pmd)) { /* Another has populated it */ + 11 1.2e-04 0 0 0 0 2 9.1e-05 : pte_lock_init(new); + 4 4.5e-05 0 0 0 0 3 1.4e-04 : spin_lock(&mm->page_table_lock); + 153 0.0017 0 0 0 0 48 0.0022 : if (pmd_present(*pmd)) { /* Another has populated it */ : pte_lock_deinit(new); : pte_free(new); : } else { - 5 6.2e-05 0 0 0 0 4 1.9e-04 : mm->nr_ptes++; - 460 0.0057 0 0 0 0 91 0.0043 : inc_page_state(nr_page_table_pages); + 3 3.4e-05 0 0 0 0 0 0 : mm->nr_ptes++; + 499 0.0056 0 0 0 0 69 0.0031 : inc_page_state(nr_page_table_pages); : pmd_populate(mm, pmd, new); : } : spin_unlock(&mm->page_table_lock); : return 0; - 208 0.0026 0 0 0 0 99 0.0047 :} + 207 0.0023 0 0 0 0 80 0.0036 :} : :int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) :{ @@ -336,7 +336,7 @@ : :static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) :{ - 2229 0.0278 0 0 1 0.0351 949 0.0453 : if (file_rss) + 2387 0.0268 0 0 0 0 984 0.0447 : if (file_rss) : add_mm_counter(mm, file_rss, file_rss); : if (anon_rss) : add_mm_counter(mm, anon_rss, anon_rss); @@ -385,10 +385,10 @@ : * VM_PFNMAP range). : */ :struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) - 4604 0.0574 0 0 0 0 1722 0.0823 :{ /* vm_normal_page total: 35862 0.4469 0 0 4 0.1405 24383 1.1648 */ - 7633 0.0951 0 0 0 0 2838 0.1356 : unsigned long pfn = pte_pfn(pte); + 4748 0.0534 0 0 1 0.0339 1733 0.0787 :{ /* vm_normal_page total: 34822 0.3916 0 0 3 0.1018 23511 1.0678 */ + 8442 0.0949 0 0 1 0.0339 3104 0.1410 : unsigned long pfn = pte_pfn(pte); : - 963 0.0120 0 0 0 0 262 0.0125 : if (vma->vm_flags & VM_PFNMAP) { + 619 0.0070 0 0 0 0 244 0.0111 : if (vma->vm_flags & VM_PFNMAP) { : unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; : if (pfn == vma->vm_pgoff + off) : return NULL; @@ -404,7 +404,7 @@ : * : * Remove this test eventually! : */ - 9884 0.1232 0 0 1 0.0351 2773 0.1325 : if (unlikely(!pfn_valid(pfn))) { + 8251 0.0928 0 0 1 0.0339 2746 0.1247 : if (unlikely(!pfn_valid(pfn))) { : print_bad_pte(vma, pte, addr); : return NULL; : } @@ -416,8 +416,8 @@ : * The PAGE_ZERO() pages and various VDSO mappings can : * cause them to exist. : */ - 4015 0.0500 0 0 1 0.0351 4106 0.1962 : return pfn_to_page(pfn); - 8763 0.1092 0 0 2 0.0702 12682 0.6059 :} + 3908 0.0440 0 0 0 0 3621 0.1644 : return pfn_to_page(pfn); + 8854 0.0996 0 0 0 0 12063 0.5478 :} : :/* : * copy one vm_area from one task to the other. Assumes the page tables @@ -572,11 +572,11 @@ : :int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, : struct vm_area_struct *vma) - 380 0.0047 0 0 0 0 54 0.0026 :{ /* copy_page_range total: 13670 0.1703 0 0 1 0.0351 3738 0.1786 */ + 420 0.0047 0 0 0 0 58 0.0026 :{ /* copy_page_range total: 14772 0.1661 0 0 1 0.0339 3739 0.1698 */ : pgd_t *src_pgd, *dst_pgd; : unsigned long next; - 1 1.2e-05 0 0 0 0 0 0 : unsigned long addr = vma->vm_start; - 32 4.0e-04 0 0 0 0 3 1.4e-04 : unsigned long end = vma->vm_end; + 0 0 0 0 0 0 1 4.5e-05 : unsigned long addr = vma->vm_start; + 29 3.3e-04 0 0 0 0 2 9.1e-05 : unsigned long end = vma->vm_end; : : /* : * Don't copy ptes where a page fault will fill them correctly. @@ -584,39 +584,39 @@ : * readonly mappings. The tradeoff is that copy_page_range is more : * efficient than faulting. : */ - 34 4.2e-04 0 0 0 0 11 5.3e-04 : if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) { - 3 3.7e-05 0 0 0 0 1 4.8e-05 : if (!vma->anon_vma) + 43 4.8e-04 0 0 0 0 5 2.3e-04 : if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) { + 4 4.5e-05 0 0 0 0 0 0 : if (!vma->anon_vma) : return 0; : } : - 218 0.0027 0 0 0 0 96 0.0046 : if (is_vm_hugetlb_page(vma)) + 177 0.0020 0 0 0 0 124 0.0056 : if (is_vm_hugetlb_page(vma)) : return copy_hugetlb_page_range(dst_mm, src_mm, vma); : - 36 4.5e-04 0 0 0 0 35 0.0017 : dst_pgd = pgd_offset(dst_mm, addr); - 8 1.0e-04 0 0 0 0 19 9.1e-04 : src_pgd = pgd_offset(src_mm, addr); + 20 2.2e-04 0 0 0 0 27 0.0012 : dst_pgd = pgd_offset(dst_mm, addr); + 23 2.6e-04 0 0 0 0 14 6.4e-04 : src_pgd = pgd_offset(src_mm, addr); : do { - 116 0.0014 0 0 0 0 25 0.0012 : next = pgd_addr_end(addr, end); + 88 9.9e-04 0 0 0 0 30 0.0014 : next = pgd_addr_end(addr, end); : if (pgd_none_or_clear_bad(src_pgd)) : continue; : if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, : vma, addr, next)) : return -ENOMEM; - 394 0.0049 0 0 0 0 110 0.0053 : } while (dst_pgd++, src_pgd++, addr = next, addr != end); + 339 0.0038 0 0 0 0 113 0.0051 : } while (dst_pgd++, src_pgd++, addr = next, addr != end); : return 0; - 118 0.0015 0 0 0 0 66 0.0032 :} + 121 0.0014 0 0 0 0 78 0.0035 :} : :static unsigned long zap_pte_range(struct mmu_gather *tlb, : struct vm_area_struct *vma, pmd_t *pmd, : unsigned long addr, unsigned long end, : long *zap_work, struct zap_details *details) :{ - 108 0.0013 0 0 0 0 33 0.0016 : struct mm_struct *mm = tlb->mm; + 82 9.2e-04 0 0 0 0 36 0.0016 : struct mm_struct *mm = tlb->mm; : pte_t *pte; : spinlock_t *ptl; : int file_rss = 0; : int anon_rss = 0; : - 4985 0.0621 0 0 1 0.0351 2211 0.1056 : pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + 5175 0.0582 0 0 0 0 2259 0.1026 : pte = pte_offset_map_lock(mm, pmd, addr, &ptl); : do { : pte_t ptent = *pte; : if (pte_none(ptent)) { @@ -695,7 +695,7 @@ : pmd_t *pmd; : unsigned long next; : - 1095 0.0136 0 0 0 0 384 0.0183 : pmd = pmd_offset(pud, addr); + 1235 0.0139 0 0 0 0 418 0.0190 : pmd = pmd_offset(pud, addr); : do { : next = pmd_addr_end(addr, end); : if (pmd_none_or_clear_bad(pmd)) { @@ -717,7 +717,7 @@ : pud_t *pud; : unsigned long next; : - 1209 0.0151 0 0 0 0 400 0.0191 : pud = pud_offset(pgd, addr); + 1186 0.0133 0 0 0 0 375 0.0170 : pud = pud_offset(pgd, addr); : do { : next = pud_addr_end(addr, end); : if (pud_none_or_clear_bad(pud)) { @@ -739,21 +739,21 @@ : pgd_t *pgd; : unsigned long next; : - 668 0.0083 0 0 0 0 178 0.0085 : if (details && !details->check_mapping && !details->nonlinear_vma) + 840 0.0094 0 0 0 0 147 0.0067 : if (details && !details->check_mapping && !details->nonlinear_vma) : details = NULL; : - 60 7.5e-04 0 0 0 0 49 0.0023 : BUG_ON(addr >= end); + 62 7.0e-04 0 0 0 0 43 0.0020 : BUG_ON(addr >= end); : tlb_start_vma(tlb, vma); - 460 0.0057 0 0 0 0 126 0.0060 : pgd = pgd_offset(vma->vm_mm, addr); + 489 0.0055 0 0 0 0 126 0.0057 : pgd = pgd_offset(vma->vm_mm, addr); : do { - 468 0.0058 0 0 0 0 173 0.0083 : next = pgd_addr_end(addr, end); + 487 0.0055 0 0 0 0 186 0.0084 : next = pgd_addr_end(addr, end); : if (pgd_none_or_clear_bad(pgd)) { : (*zap_work)--; : continue; : } : next = zap_pud_range(tlb, vma, pgd, addr, next, : zap_work, details); - 350 0.0044 0 0 0 0 110 0.0053 : } while (pgd++, addr = next, (addr != end && *zap_work > 0)); + 373 0.0042 0 0 0 0 75 0.0034 : } while (pgd++, addr = next, (addr != end && *zap_work > 0)); : tlb_end_vma(tlb, vma); : : return addr; @@ -796,66 +796,66 @@ : struct vm_area_struct *vma, unsigned long start_addr, : unsigned long end_addr, unsigned long *nr_accounted, : struct zap_details *details) - 1655 0.0206 0 0 0 0 1281 0.0612 :{ /* unmap_vmas total: 220147 2.7431 0 0 33 1.1591 83033 3.9667 */ + 1745 0.0196 0 0 0 0 1241 0.0564 :{ /* unmap_vmas total: 245835 2.7648 0 0 23 0.7807 85820 3.8976 */ : long zap_work = ZAP_BLOCK_SIZE; : unsigned long tlb_start = 0; /* For tlb_finish_mmu */ : int tlb_start_valid = 0; : unsigned long start = start_addr; - 47 5.9e-04 0 0 0 0 20 9.6e-04 : spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; - 723 0.0090 0 0 0 0 307 0.0147 : int fullmm = (*tlbp)->fullmm; + 51 5.7e-04 0 0 0 0 12 5.4e-04 : spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; + 659 0.0074 0 0 0 0 293 0.0133 : int fullmm = (*tlbp)->fullmm; : - 1856 0.0231 0 0 0 0 634 0.0303 : for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { + 2842 0.0320 0 0 1 0.0339 641 0.0291 : for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { : unsigned long end; : - 444 0.0055 0 0 0 0 97 0.0046 : start = max(vma->vm_start, start_addr); - 199 0.0025 0 0 0 0 58 0.0028 : if (start >= vma->vm_end) + 464 0.0052 0 0 0 0 86 0.0039 : start = max(vma->vm_start, start_addr); + 144 0.0016 0 0 0 0 35 0.0016 : if (start >= vma->vm_end) : continue; - 25 3.1e-04 0 0 0 0 14 6.7e-04 : end = min(vma->vm_end, end_addr); - 248 0.0031 0 0 0 0 74 0.0035 : if (end <= vma->vm_start) + 21 2.4e-04 0 0 0 0 9 4.1e-04 : end = min(vma->vm_end, end_addr); + 171 0.0019 0 0 0 0 44 0.0020 : if (end <= vma->vm_start) : continue; : - 139 0.0017 0 0 0 0 45 0.0021 : if (vma->vm_flags & VM_ACCOUNT) - 1154 0.0144 0 0 0 0 296 0.0141 : *nr_accounted += (end - start) >> PAGE_SHIFT; + 116 0.0013 0 0 0 0 40 0.0018 : if (vma->vm_flags & VM_ACCOUNT) + 1718 0.0193 0 0 0 0 293 0.0133 : *nr_accounted += (end - start) >> PAGE_SHIFT; : - 2446 0.0305 0 0 0 0 686 0.0328 : while (start != end) { + 2953 0.0332 0 0 0 0 733 0.0333 : while (start != end) { : if (!tlb_start_valid) { : tlb_start = start; : tlb_start_valid = 1; : } : - 1058 0.0132 0 0 0 0 258 0.0123 : if (unlikely(is_vm_hugetlb_page(vma))) { + 1263 0.0142 0 0 1 0.0339 258 0.0117 : if (unlikely(is_vm_hugetlb_page(vma))) { : unmap_hugepage_range(vma, start, end); : zap_work -= (end - start) / : (HPAGE_SIZE / PAGE_SIZE); : start = end; : } else - 1136 0.0142 0 0 0 0 262 0.0125 : start = unmap_page_range(*tlbp, vma, + 1676 0.0188 0 0 0 0 245 0.0111 : start = unmap_page_range(*tlbp, vma, : start, end, &zap_work, details); : - 519 0.0065 0 0 0 0 140 0.0067 : if (zap_work > 0) { - 2 2.5e-05 0 0 0 0 1 4.8e-05 : BUG_ON(start != end); + 1060 0.0119 0 0 0 0 360 0.0163 : if (zap_work > 0) { + 2 2.2e-05 0 0 0 0 0 0 : BUG_ON(start != end); : break; : } : - 7 8.7e-05 0 0 0 0 1 4.8e-05 : tlb_finish_mmu(*tlbp, tlb_start, start); + 9 1.0e-04 0 0 0 0 0 0 : tlb_finish_mmu(*tlbp, tlb_start, start); : : if (need_resched() || : (i_mmap_lock && need_lockbreak(i_mmap_lock))) { - 0 0 0 0 0 0 1 4.8e-05 : if (i_mmap_lock) { + 3 3.4e-05 0 0 0 0 2 9.1e-05 : if (i_mmap_lock) { : *tlbp = NULL; : goto out; : } : cond_resched(); : } : - 2 2.5e-05 0 0 0 0 0 0 : *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm); + : *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm); : tlb_start_valid = 0; : zap_work = ZAP_BLOCK_SIZE; : } : } :out: : return start; /* which is now the end (or restart) address */ - 552 0.0069 0 0 0 0 179 0.0086 :} + 751 0.0084 0 0 0 0 134 0.0061 :} : :/** : * zap_page_range - remove user pages in a given range @@ -1164,16 +1164,16 @@ :} : :pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) - 97 0.0012 0 0 0 0 18 8.6e-04 :{ /* get_locked_pte total: 536 0.0067 0 0 0 0 139 0.0066 */ - 23 2.9e-04 0 0 0 0 4 1.9e-04 : pgd_t * pgd = pgd_offset(mm, addr); + 101 0.0011 0 0 0 0 24 0.0011 :{ /* get_locked_pte total: 511 0.0057 0 0 0 0 148 0.0067 */ + 30 3.4e-04 0 0 0 0 6 2.7e-04 : pgd_t * pgd = pgd_offset(mm, addr); : pud_t * pud = pud_alloc(mm, pgd, addr); - : if (pud) { + 3 3.4e-05 0 0 0 0 1 4.5e-05 : if (pud) { : pmd_t * pmd = pmd_alloc(mm, pud, addr); - 2 2.5e-05 0 0 0 0 1 4.8e-05 : if (pmd) - 110 0.0014 0 0 0 0 40 0.0019 : return pte_alloc_map_lock(mm, pmd, addr, ptl); + 2 2.2e-05 0 0 0 0 0 0 : if (pmd) + 88 9.9e-04 0 0 0 0 36 0.0016 : return pte_alloc_map_lock(mm, pmd, addr, ptl); : } : return NULL; - 132 0.0016 0 0 0 0 20 9.6e-04 :} + 82 9.2e-04 0 0 0 0 14 6.4e-04 :} : :/* : * This is the old fallback for page remapping. @@ -1444,23 +1444,23 @@ :static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, : unsigned long address, pte_t *page_table, pmd_t *pmd, : spinlock_t *ptl, pte_t orig_pte) - 1173 0.0146 0 0 0 0 255 0.0122 :{ /* do_wp_page total: 15500 0.1931 0 0 1 0.0351 3178 0.1518 */ + 1218 0.0137 0 0 0 0 258 0.0117 :{ /* do_wp_page total: 16788 0.1888 0 0 2 0.0679 2991 0.1358 */ : struct page *old_page, *new_page; : pte_t entry; : int ret = VM_FAULT_MINOR; : - 198 0.0025 0 0 0 0 125 0.0060 : old_page = vm_normal_page(vma, address, orig_pte); - 66 8.2e-04 0 0 0 0 17 8.1e-04 : if (!old_page) + 202 0.0023 0 0 0 0 133 0.0060 : old_page = vm_normal_page(vma, address, orig_pte); + 90 0.0010 0 0 0 0 20 9.1e-04 : if (!old_page) : goto gotten; : - 66 8.2e-04 0 0 0 0 28 0.0013 : if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { - 19 2.4e-04 0 0 0 0 7 3.3e-04 : int reuse = can_share_swap_page(old_page); - 28 3.5e-04 0 0 0 0 4 1.9e-04 : unlock_page(old_page); - 95 0.0012 0 0 0 0 17 8.1e-04 : if (reuse) { + 52 5.8e-04 0 0 0 0 42 0.0019 : if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { + 33 3.7e-04 0 0 0 0 7 3.2e-04 : int reuse = can_share_swap_page(old_page); + 35 3.9e-04 0 0 0 0 4 1.8e-04 : unlock_page(old_page); + 126 0.0014 0 0 0 0 15 6.8e-04 : if (reuse) { : flush_cache_page(vma, address, pte_pfn(orig_pte)); : entry = pte_mkyoung(orig_pte); : entry = maybe_mkwrite(pte_mkdirty(entry), vma); - 137 0.0017 0 0 0 0 49 0.0023 : ptep_set_access_flags(vma, address, page_table, entry, 1); + 161 0.0018 0 0 0 0 41 0.0019 : ptep_set_access_flags(vma, address, page_table, entry, 1); : update_mmu_cache(vma, address, entry); : lazy_mmu_prot_update(entry); : ret |= VM_FAULT_WRITE; @@ -1475,15 +1475,15 @@ :gotten: : pte_unmap_unlock(page_table, ptl); : - 136 0.0017 0 0 0 0 41 0.0020 : if (unlikely(anon_vma_prepare(vma))) + 155 0.0017 0 0 0 0 52 0.0024 : if (unlikely(anon_vma_prepare(vma))) : goto oom; - 856 0.0107 0 0 0 0 75 0.0036 : if (old_page == ZERO_PAGE(address)) { - 288 0.0036 0 0 1 0.0351 70 0.0033 : new_page = alloc_zeroed_user_highpage(vma, address); - 10 1.2e-04 0 0 0 0 0 0 : if (!new_page) + 1047 0.0118 0 0 0 0 52 0.0024 : if (old_page == ZERO_PAGE(address)) { + 229 0.0026 0 0 0 0 53 0.0024 : new_page = alloc_zeroed_user_highpage(vma, address); + 13 1.5e-04 0 0 0 0 0 0 : if (!new_page) : goto oom; : } else { - 203 0.0025 0 0 0 0 50 0.0024 : new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); - 3 3.7e-05 0 0 0 0 2 9.6e-05 : if (!new_page) + 199 0.0022 0 0 0 0 61 0.0028 : new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); + 4 4.5e-05 0 0 0 0 1 4.5e-05 : if (!new_page) : goto oom; : cow_user_page(new_page, old_page, address); : } @@ -1491,33 +1491,33 @@ : /* : * Re-check the pte - we dropped the lock : */ - 1294 0.0161 0 0 0 0 216 0.0103 : page_table = pte_offset_map_lock(mm, pmd, address, &ptl); - 495 0.0062 0 0 0 0 269 0.0129 : if (likely(pte_same(*page_table, orig_pte))) { - 45 5.6e-04 0 0 0 0 12 5.7e-04 : if (old_page) { - 22 2.7e-04 0 0 0 0 5 2.4e-04 : page_remove_rmap(old_page); - 102 0.0013 0 0 0 0 68 0.0032 : if (!PageAnon(old_page)) { - 54 6.7e-04 0 0 0 0 10 4.8e-04 : dec_mm_counter(mm, file_rss); + 1244 0.0140 0 0 0 0 168 0.0076 : page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + 484 0.0054 0 0 0 0 203 0.0092 : if (likely(pte_same(*page_table, orig_pte))) { + 46 5.2e-04 0 0 0 0 3 1.4e-04 : if (old_page) { + 27 3.0e-04 0 0 0 0 3 1.4e-04 : page_remove_rmap(old_page); + 92 0.0010 0 0 0 0 33 0.0015 : if (!PageAnon(old_page)) { + 66 7.4e-04 0 0 0 0 12 5.4e-04 : dec_mm_counter(mm, file_rss); : inc_mm_counter(mm, anon_rss); : } : } else - 150 0.0019 0 0 0 0 20 9.6e-04 : inc_mm_counter(mm, anon_rss); + 48 5.4e-04 0 0 0 0 5 2.3e-04 : inc_mm_counter(mm, anon_rss); : flush_cache_page(vma, address, pte_pfn(orig_pte)); - 155 0.0019 0 0 0 0 30 0.0014 : entry = mk_pte(new_page, vma->vm_page_prot); + 125 0.0014 0 0 0 0 34 0.0015 : entry = mk_pte(new_page, vma->vm_page_prot); : entry = maybe_mkwrite(pte_mkdirty(entry), vma); - 319 0.0040 0 0 0 0 50 0.0024 : ptep_establish(vma, address, page_table, entry); + 467 0.0053 0 0 0 0 100 0.0045 : ptep_establish(vma, address, page_table, entry); : update_mmu_cache(vma, address, entry); : lazy_mmu_prot_update(entry); - 2801 0.0349 0 0 0 0 205 0.0098 : lru_cache_add_active(new_page); - 469 0.0058 0 0 0 0 111 0.0053 : page_add_new_anon_rmap(new_page, vma, address); + 2648 0.0298 0 0 0 0 179 0.0081 : lru_cache_add_active(new_page); + 775 0.0087 0 0 0 0 101 0.0046 : page_add_new_anon_rmap(new_page, vma, address); : : /* Free the old page.. */ : new_page = old_page; : ret |= VM_FAULT_WRITE; : } - 131 0.0016 0 0 0 0 18 8.6e-04 : if (new_page) - 19 2.4e-04 0 0 0 0 2 9.6e-05 : page_cache_release(new_page); - 37 4.6e-04 0 0 0 0 6 2.9e-04 : if (old_page) - 72 9.0e-04 0 0 0 0 18 8.6e-04 : page_cache_release(old_page); + 134 0.0015 0 0 0 0 23 0.0010 : if (new_page) + 14 1.6e-04 0 0 0 0 2 9.1e-05 : page_cache_release(new_page); + 39 4.4e-04 0 0 0 0 8 3.6e-04 : if (old_page) + 63 7.1e-04 0 0 0 0 17 7.7e-04 : page_cache_release(old_page); :unlock: : pte_unmap_unlock(page_table, ptl); : return ret; @@ -1525,7 +1525,7 @@ : if (old_page) : page_cache_release(old_page); : return VM_FAULT_OOM; - 266 0.0033 0 0 0 0 51 0.0024 :} + 268 0.0030 0 0 0 0 45 0.0020 :} : :/* : * Helper functions for unmap_mapping_range(). @@ -1686,10 +1686,10 @@ : */ :void unmap_mapping_range(struct address_space *mapping, : loff_t const holebegin, loff_t const holelen, int even_cows) - :{ /* unmap_mapping_range total: 41 5.1e-04 0 0 0 0 2 9.6e-05 */ + :{ /* unmap_mapping_range total: 36 4.0e-04 0 0 0 0 7 3.2e-04 */ : struct zap_details details; : pgoff_t hba = holebegin >> PAGE_SHIFT; - 12 1.5e-04 0 0 0 0 2 9.6e-05 : pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; + 11 1.2e-04 0 0 0 0 1 4.5e-05 : pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; : : /* Check for overflow. */ : if (sizeof(holelen) > sizeof(hlen)) { @@ -1699,10 +1699,10 @@ : hlen = ULONG_MAX - hba + 1; : } : - 2 2.5e-05 0 0 0 0 0 0 : details.check_mapping = even_cows? NULL: mapping; - 1 1.2e-05 0 0 0 0 0 0 : details.nonlinear_vma = NULL; + 1 1.1e-05 0 0 0 0 0 0 : details.check_mapping = even_cows? NULL: mapping; + 1 1.1e-05 0 0 0 0 0 0 : details.nonlinear_vma = NULL; : details.first_index = hba; - 1 1.2e-05 0 0 0 0 0 0 : details.last_index = hba + hlen - 1; + : details.last_index = hba + hlen - 1; : if (details.last_index < details.first_index) : details.last_index = ULONG_MAX; : details.i_mmap_lock = &mapping->i_mmap_lock; @@ -1710,26 +1710,26 @@ : spin_lock(&mapping->i_mmap_lock); : : /* serialize i_size write against truncate_count write */ - : smp_wmb(); + 3 3.4e-05 0 0 0 0 2 9.1e-05 : smp_wmb(); : /* Protect against page faults, and endless unmapping loops */ - : mapping->truncate_count++; + 1 1.1e-05 0 0 0 0 0 0 : mapping->truncate_count++; : /* : * For archs where spin_lock has inclusive semantics like ia64 : * this smp_mb() will prevent to read pagetable contents : * before the truncate_count increment is visible to : * other cpus. : */ - 6 7.5e-05 0 0 0 0 0 0 : smp_mb(); - 1 1.2e-05 0 0 0 0 0 0 : if (unlikely(is_restart_addr(mapping->truncate_count))) { + 5 5.6e-05 0 0 0 0 2 9.1e-05 : smp_mb(); + 2 2.2e-05 0 0 0 0 1 4.5e-05 : if (unlikely(is_restart_addr(mapping->truncate_count))) { : if (mapping->truncate_count == 0) : reset_vma_truncate_counts(mapping); : mapping->truncate_count++; : } : details.truncate_count = mapping->truncate_count; : - 7 8.7e-05 0 0 0 0 0 0 : if (unlikely(!prio_tree_empty(&mapping->i_mmap))) + 6 6.7e-05 0 0 0 0 0 0 : if (unlikely(!prio_tree_empty(&mapping->i_mmap))) : unmap_mapping_range_tree(&mapping->i_mmap, &details); - 10 1.2e-04 0 0 0 0 0 0 : if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) + 6 6.7e-05 0 0 0 0 1 4.5e-05 : if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) : unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); : spin_unlock(&mapping->i_mmap_lock); :} @@ -1744,7 +1744,7 @@ : * incomplete page. Ugly, but necessary. : */ :int vmtruncate(struct inode * inode, loff_t offset) - 11 1.4e-04 0 0 0 0 0 0 :{ /* vmtruncate total: 32 4.0e-04 0 0 0 0 3 1.4e-04 */ + 15 1.7e-04 0 0 0 0 1 4.5e-05 :{ /* vmtruncate total: 37 4.2e-04 0 0 0 0 3 1.4e-04 */ : struct address_space *mapping = inode->i_mapping; : unsigned long limit; : @@ -1754,11 +1754,11 @@ : * truncation of in-use swapfiles is disallowed - it would cause : * subsequent swapout to scribble on the now-freed blocks. : */ - 0 0 0 0 0 0 2 9.6e-05 : if (IS_SWAPFILE(inode)) + 1 1.1e-05 0 0 0 0 0 0 : if (IS_SWAPFILE(inode)) : goto out_busy; : i_size_write(inode, offset); : unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); - 8 1.0e-04 0 0 0 0 1 4.8e-05 : truncate_inode_pages(mapping, offset); + 8 9.0e-05 0 0 0 0 0 0 : truncate_inode_pages(mapping, offset); : goto out_truncate; : :do_expand: @@ -1770,16 +1770,16 @@ : i_size_write(inode, offset); : :out_truncate: - 2 2.5e-05 0 0 0 0 0 0 : if (inode->i_op && inode->i_op->truncate) + 4 4.5e-05 0 0 0 0 1 4.5e-05 : if (inode->i_op && inode->i_op->truncate) : inode->i_op->truncate(inode); : return 0; :out_sig: - : send_sig(SIGXFSZ, current, 0); + 2 2.2e-05 0 0 0 0 0 0 : send_sig(SIGXFSZ, current, 0); :out_big: : return -EFBIG; :out_busy: : return -ETXTBSY; - 11 1.4e-04 0 0 0 0 0 0 :} + 7 7.9e-05 0 0 0 0 1 4.5e-05 :} :EXPORT_SYMBOL(vmtruncate); : :int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) @@ -1818,7 +1818,7 @@ : * Caller must hold down_read on the vma->vm_mm if vma is not NULL. : */ :void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma) - :{ + 7 7.9e-05 0 0 0 0 2 9.1e-05 :{ /* swapin_readahead total: 59 6.6e-04 0 0 0 0 8 3.6e-04 */ :#ifdef CONFIG_NUMA : struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL; :#endif @@ -1830,9 +1830,9 @@ : * Get the number of handles we should do readahead io to. : */ : num = valid_swaphandles(entry, &offset); - : for (i = 0; i < num; offset++, i++) { + 18 2.0e-04 0 0 0 0 0 0 : for (i = 0; i < num; offset++, i++) { : /* Ok, do the async read-ahead now */ - : new_page = read_swap_cache_async(swp_entry(swp_type(entry), + 19 2.1e-04 0 0 0 0 5 2.3e-04 : new_page = read_swap_cache_async(swp_entry(swp_type(entry), : offset), vma, addr); : if (!new_page) : break; @@ -1842,14 +1842,14 @@ : * Find the next applicable VMA for the NUMA policy. : */ : addr += PAGE_SIZE; - : if (addr == 0) + 1 1.1e-05 0 0 0 0 1 4.5e-05 : if (addr == 0) : vma = NULL; : if (vma) { : if (addr >= vma->vm_end) { : vma = next_vma; - : next_vma = vma ? vma->vm_next : NULL; + 1 1.1e-05 0 0 0 0 0 0 : next_vma = vma ? vma->vm_next : NULL; : } - : if (vma && addr < vma->vm_start) + 10 1.1e-04 0 0 0 0 0 0 : if (vma && addr < vma->vm_start) : vma = NULL; : } else { : if (next_vma && addr >= next_vma->vm_start) { @@ -1859,8 +1859,8 @@ : } :#endif : } - : lru_add_drain(); /* Push any new pages onto the LRU now */ - :} + 1 1.1e-05 0 0 0 0 0 0 : lru_add_drain(); /* Push any new pages onto the LRU now */ + 2 2.2e-05 0 0 0 0 0 0 :} : :/* : * We enter with non-exclusive mmap_sem (to exclude vma changes, @@ -2216,7 +2216,7 @@ : spinlock_t *ptl; : int ret, type = VXPT_UNKNOWN; : - 1457 0.0182 0 0 1 0.0351 160 0.0076 : old_entry = entry = *pte; + 1498 0.0168 0 0 0 0 144 0.0065 : old_entry = entry = *pte; : if (!pte_present(entry)) { : if (pte_none(entry)) { : if (!vma->vm_ops || !vma->vm_ops->nopage) { @@ -2283,32 +2283,32 @@ : */ :int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, : unsigned long address, int write_access) - 14408 0.1795 0 0 1 0.0351 6250 0.2986 :{ /* __handle_mm_fault total: 399476 4.9777 0 0 38 1.3347119198 5.6944 */ + 14940 0.1680 0 0 2 0.0679 6600 0.2997 :{ /* __handle_mm_fault total: 470125 5.2873 0 0 64 2.1724131308 5.9634 */ : pgd_t *pgd; : pud_t *pud; : pmd_t *pmd; : pte_t *pte; : - 370 0.0046 0 0 0 0 161 0.0077 : __set_current_state(TASK_RUNNING); + 362 0.0041 0 0 0 0 137 0.0062 : __set_current_state(TASK_RUNNING); : - 3682 0.0459 0 0 2 0.0702 2302 0.1100 : inc_page_state(pgfault); + 3929 0.0442 0 0 0 0 2762 0.1254 : inc_page_state(pgfault); : - 2159 0.0269 0 0 0 0 1419 0.0678 : if (unlikely(is_vm_hugetlb_page(vma))) + 2313 0.0260 0 0 1 0.0339 1436 0.0652 : if (unlikely(is_vm_hugetlb_page(vma))) : return hugetlb_fault(mm, vma, address, write_access); : - 5551 0.0692 0 0 0 0 1942 0.0928 : pgd = pgd_offset(mm, address); + 5509 0.0620 0 0 1 0.0339 1868 0.0848 : pgd = pgd_offset(mm, address); : pud = pud_alloc(mm, pgd, address); - 1125 0.0140 0 0 0 0 772 0.0369 : if (!pud) + 1142 0.0128 0 0 1 0.0339 672 0.0305 : if (!pud) : return VM_FAULT_OOM; : pmd = pmd_alloc(mm, pud, address); - 1578 0.0197 0 0 0 0 715 0.0342 : if (!pmd) + 1562 0.0176 0 0 1 0.0339 602 0.0273 : if (!pmd) : return VM_FAULT_OOM; - 42907 0.5346 0 0 3 0.1054 11800 0.5637 : pte = pte_alloc_map(mm, pmd, address); - 6232 0.0777 0 0 1 0.0351 641 0.0306 : if (!pte) + 44533 0.5008 0 0 7 0.2376 12143 0.5515 : pte = pte_alloc_map(mm, pmd, address); + 6397 0.0719 0 0 0 0 650 0.0295 : if (!pte) : return VM_FAULT_OOM; : : return handle_pte_fault(mm, vma, address, pte, pmd, write_access); - 9353 0.1165 0 0 1 0.0351 3102 0.1482 :} + 7170 0.0806 0 0 1 0.0339 3199 0.1453 :} : :EXPORT_SYMBOL_GPL(__handle_mm_fault); : @@ -2318,19 +2318,19 @@ : * We've already handled the fast-path in-line. : */ :int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) - 247 0.0031 0 0 0 0 24 0.0011 :{ /* __pud_alloc total: 660 0.0082 0 0 0 0 145 0.0069 */ + 224 0.0025 0 0 0 0 21 9.5e-04 :{ /* __pud_alloc total: 652 0.0073 0 0 0 0 124 0.0056 */ : pud_t *new = pud_alloc_one(mm, address); - 79 9.8e-04 0 0 0 0 36 0.0017 : if (!new) + 88 9.9e-04 0 0 0 0 34 0.0015 : if (!new) : return -ENOMEM; : - 4 5.0e-05 0 0 0 0 3 1.4e-04 : spin_lock(&mm->page_table_lock); - 188 0.0023 0 0 0 0 45 0.0021 : if (pgd_present(*pgd)) /* Another has populated it */ + 4 4.5e-05 0 0 0 0 1 4.5e-05 : spin_lock(&mm->page_table_lock); + 205 0.0023 0 0 0 0 42 0.0019 : if (pgd_present(*pgd)) /* Another has populated it */ : pud_free(new); : else - 108 0.0013 0 0 0 0 24 0.0011 : pgd_populate(mm, pgd, new); + 105 0.0012 0 0 0 0 17 7.7e-04 : pgd_populate(mm, pgd, new); : spin_unlock(&mm->page_table_lock); : return 0; - 18 2.2e-04 0 0 0 0 6 2.9e-04 :} + 16 1.8e-04 0 0 0 0 6 2.7e-04 :} :#else :/* Workaround for gcc 2.96 */ :int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) @@ -2345,17 +2345,17 @@ : * We've already handled the fast-path in-line. : */ :int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) - 265 0.0033 0 0 0 0 46 0.0022 :{ /* __pmd_alloc total: 629 0.0078 0 0 0 0 212 0.0101 */ + 230 0.0026 0 0 0 0 43 0.0020 :{ /* __pmd_alloc total: 593 0.0067 0 0 0 0 189 0.0086 */ : pmd_t *new = pmd_alloc_one(mm, address); - 49 6.1e-04 0 0 0 0 46 0.0022 : if (!new) + 63 7.1e-04 0 0 0 0 42 0.0019 : if (!new) : return -ENOMEM; : - 12 1.5e-04 0 0 0 0 11 5.3e-04 : spin_lock(&mm->page_table_lock); + 9 1.0e-04 0 0 0 0 11 5.0e-04 : spin_lock(&mm->page_table_lock); :#ifndef __ARCH_HAS_4LEVEL_HACK - 58 7.2e-04 0 0 0 0 36 0.0017 : if (pud_present(*pud)) /* Another has populated it */ + 51 5.7e-04 0 0 0 0 27 0.0012 : if (pud_present(*pud)) /* Another has populated it */ : pmd_free(new); : else - 135 0.0017 0 0 0 0 41 0.0020 : pud_populate(mm, pud, new); + 122 0.0014 0 0 0 0 36 0.0016 : pud_populate(mm, pud, new); :#else : if (pgd_present(*pud)) /* Another has populated it */ : pmd_free(new); @@ -2364,7 +2364,7 @@ :#endif /* __ARCH_HAS_4LEVEL_HACK */ : spin_unlock(&mm->page_table_lock); : return 0; - 18 2.2e-04 0 0 0 0 6 2.9e-04 :} + 13 1.5e-04 0 0 0 0 7 3.2e-04 :} :#else :/* Workaround for gcc 2.96 */ :int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) @@ -2473,12 +2473,12 @@ /* * Total samples for file : "mm/memory.c" * - * 169225 2.1086 0 0 16 0.5620 68692 3.2816 + * 173916 1.9560 0 0 19 0.6449 68701 3.1201 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mempolicy.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mempolicy.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mempolicy.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mempolicy.c 2006-03-12 07:20:05.000000000 -0500 @@ -1070,27 +1070,27 @@ :/* Return effective policy for a VMA */ :static struct mempolicy * get_vma_policy(struct task_struct *task, : struct vm_area_struct *vma, unsigned long addr) - 11772 0.1467 0 0 4 0.1405 1756 0.0839 :{ /* get_vma_policy total: 20161 0.2512 0 0 5 0.1756 4556 0.2177 */ - 1 1.2e-05 0 0 0 0 0 0 : struct mempolicy *pol = task->mempolicy; + 19248 0.2165 0 0 2 0.0679 4112 0.1867 :{ /* get_vma_policy total: 25998 0.2924 0 0 2 0.0679 6292 0.2858 */ + 2 2.2e-05 0 0 0 0 0 0 : struct mempolicy *pol = task->mempolicy; : - 679 0.0085 0 0 0 0 162 0.0077 : if (vma) { - 2087 0.0260 0 0 1 0.0351 707 0.0338 : if (vma->vm_ops && vma->vm_ops->get_policy) + 841 0.0095 0 0 0 0 149 0.0068 : if (vma) { + 1825 0.0205 0 0 0 0 515 0.0234 : if (vma->vm_ops && vma->vm_ops->get_policy) : pol = vma->vm_ops->get_policy(vma, addr); - 3006 0.0375 0 0 0 0 992 0.0474 : else if (vma->vm_policy && + 1608 0.0181 0 0 0 0 602 0.0273 : else if (vma->vm_policy && : vma->vm_policy->policy != MPOL_DEFAULT) : pol = vma->vm_policy; : } - 915 0.0114 0 0 0 0 290 0.0139 : if (!pol) + 1000 0.0112 0 0 0 0 309 0.0140 : if (!pol) : pol = &default_policy; : return pol; - 1701 0.0212 0 0 0 0 649 0.0310 :} + 1474 0.0166 0 0 0 0 605 0.0275 :} : :/* Return a zonelist representing a mempolicy */ :static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy) - 2661 0.0332 0 0 0 0 1442 0.0689 :{ + 2763 0.0311 0 0 0 0 1746 0.0793 :{ : int nd; : - 4760 0.0593 0 0 3 0.1054 2927 0.1398 : switch (policy->policy) { /* zonelist_policy total: 25895 0.3227 0 0 6 0.2107 18194 0.8692 */ + 6991 0.0786 0 0 3 0.1018 4580 0.2080 : switch (policy->policy) { /* zonelist_policy total: 30016 0.3376 0 0 5 0.1697 17890 0.8125 */ : case MPOL_PREFERRED: : nd = policy->v.preferred_node; : if (nd < 0) @@ -1105,38 +1105,38 @@ : /*FALL THROUGH*/ : case MPOL_INTERLEAVE: /* should not happen */ : case MPOL_DEFAULT: - 8769 0.1093 0 0 1 0.0351 6790 0.3244 : nd = numa_node_id(); + 9408 0.1058 0 0 0 0 5526 0.2510 : nd = numa_node_id(); : break; : default: : nd = 0; : BUG(); : } - 7522 0.0937 0 0 2 0.0702 5577 0.2664 : return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp); - 1460 0.0182 0 0 0 0 973 0.0465 :} + 8811 0.0991 0 0 2 0.0679 4852 0.2204 : return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp); + 1456 0.0164 0 0 0 0 987 0.0448 :} : :/* Do dynamic interleaving for a process */ :static unsigned interleave_nodes(struct mempolicy *policy) - 87 0.0011 0 0 0 0 34 0.0016 :{ /* interleave_nodes total: 1234 0.0154 0 0 0 0 585 0.0279 */ + 137 0.0015 0 0 0 0 95 0.0043 :{ /* interleave_nodes total: 1415 0.0159 0 0 0 0 824 0.0374 */ : unsigned nid, next; : struct task_struct *me = current; : - 6 7.5e-05 0 0 0 0 4 1.9e-04 : nid = me->il_next; - 12 1.5e-04 0 0 0 0 0 0 : next = next_node(nid, policy->v.nodes); - 15 1.9e-04 0 0 0 0 2 9.6e-05 : if (next >= MAX_NUMNODES) - 3 3.7e-05 0 0 0 0 6 2.9e-04 : next = first_node(policy->v.nodes); - 5 6.2e-05 0 0 0 0 7 3.3e-04 : me->il_next = next; + 6 6.7e-05 0 0 0 0 3 1.4e-04 : nid = me->il_next; + 18 2.0e-04 0 0 0 0 7 3.2e-04 : next = next_node(nid, policy->v.nodes); + 17 1.9e-04 0 0 0 0 11 5.0e-04 : if (next >= MAX_NUMNODES) + 9 1.0e-04 0 0 0 0 15 6.8e-04 : next = first_node(policy->v.nodes); + 10 1.1e-04 0 0 0 0 13 5.9e-04 : me->il_next = next; : return nid; - 31 3.9e-04 0 0 0 0 27 0.0013 :} + 37 4.2e-04 0 0 0 0 39 0.0018 :} : :/* : * Depending on the memory policy provide a node from which to allocate the : * next slab entry. : */ :unsigned slab_node(struct mempolicy *policy) - 22 2.7e-04 0 0 0 0 7 3.3e-04 :{ - 163 0.0020 0 0 1 0.0351 46 0.0022 : switch (policy->policy) { /* slab_node total: 287 0.0036 0 0 1 0.0351 111 0.0053 */ + 30 3.4e-04 0 0 0 0 16 7.3e-04 :{ + 321 0.0036 0 0 0 0 111 0.0050 : switch (policy->policy) { /* slab_node total: 434 0.0049 0 0 0 0 213 0.0097 */ : case MPOL_INTERLEAVE: - 70 8.7e-04 0 0 0 0 32 0.0015 : return interleave_nodes(policy); + 55 6.2e-04 0 0 0 0 50 0.0023 : return interleave_nodes(policy); : : case MPOL_BIND: : /* @@ -1153,7 +1153,7 @@ : default: : return numa_node_id(); : } - 32 4.0e-04 0 0 0 0 26 0.0012 :} + 28 3.1e-04 0 0 0 0 36 0.0016 :} : :/* Do static interleaving for a VMA with known offset. */ :static unsigned offset_il_node(struct mempolicy *pol, @@ -1206,11 +1206,11 @@ : Own path because it needs to do special accounting. */ :static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, : unsigned nid) - :{ /* alloc_page_interleave total: 1 1.2e-05 0 0 0 0 0 0 */ + :{ /* alloc_page_interleave total: 2 2.2e-05 0 0 0 0 0 0 */ : struct zonelist *zl; : struct page *page; : - 1 1.2e-05 0 0 0 0 0 0 : zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp); + 2 2.2e-05 0 0 0 0 0 0 : zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp); : page = __alloc_pages(gfp, order, zl); : if (page && page_zone(page) == zl->zones[0]) { : zone_pcp(zl->zones[0],get_cpu())->interleave_hit++; @@ -1243,19 +1243,19 @@ : */ :struct page * :alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) - 7106 0.0885 0 0 3 0.1054 2349 0.1122 :{ /* alloc_page_vma total: 25999 0.3240 0 0 8 0.2810 10412 0.4974 */ - 2787 0.0347 0 0 0 0 606 0.0290 : struct mempolicy *pol = get_vma_policy(current, vma, addr); + 11118 0.1250 0 0 2 0.0679 2603 0.1182 :{ /* alloc_page_vma total: 33798 0.3801 0 0 4 0.1358 11938 0.5422 */ + 4650 0.0523 0 0 0 0 855 0.0388 : struct mempolicy *pol = get_vma_policy(current, vma, addr); : : cpuset_update_task_memory_state(); : - 502 0.0063 0 0 1 0.0351 243 0.0116 : if (unlikely(pol->policy == MPOL_INTERLEAVE)) { + 633 0.0071 0 0 0 0 211 0.0096 : if (unlikely(pol->policy == MPOL_INTERLEAVE)) { : unsigned nid; : : nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); : return alloc_page_interleave(gfp, 0, nid); : } - 3813 0.0475 0 0 1 0.0351 1780 0.0850 : return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol)); - 11481 0.1431 0 0 3 0.1054 5389 0.2574 :} + 4958 0.0558 0 0 1 0.0339 2458 0.1116 : return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol)); + 11773 0.1324 0 0 0 0 5745 0.2609 :} : :/** : * alloc_pages_current - Allocate pages. @@ -1277,17 +1277,17 @@ : * 2) allocating for current task (not interrupt). : */ :struct page *alloc_pages_current(gfp_t gfp, unsigned order) - 742 0.0092 0 0 0 0 125 0.0060 :{ /* alloc_pages_current total: 2059 0.0257 0 0 0 0 629 0.0300 */ - 73 9.1e-04 0 0 0 0 22 0.0011 : struct mempolicy *pol = current->mempolicy; + 811 0.0091 0 0 0 0 155 0.0070 :{ /* alloc_pages_current total: 2135 0.0240 0 0 0 0 674 0.0306 */ + 62 7.0e-04 0 0 0 0 24 0.0011 : struct mempolicy *pol = current->mempolicy; : - 239 0.0030 0 0 0 0 62 0.0030 : if ((gfp & __GFP_WAIT) && !in_interrupt()) + 223 0.0025 0 0 0 0 54 0.0025 : if ((gfp & __GFP_WAIT) && !in_interrupt()) : cpuset_update_task_memory_state(); - 81 0.0010 0 0 0 0 45 0.0021 : if (!pol || in_interrupt()) + 80 9.0e-04 0 0 0 0 42 0.0019 : if (!pol || in_interrupt()) : pol = &default_policy; - 111 0.0014 0 0 0 0 33 0.0016 : if (pol->policy == MPOL_INTERLEAVE) + 114 0.0013 0 0 0 0 42 0.0019 : if (pol->policy == MPOL_INTERLEAVE) : return alloc_page_interleave(gfp, order, interleave_nodes(pol)); - 465 0.0058 0 0 0 0 202 0.0097 : return __alloc_pages(gfp, order, zonelist_policy(gfp, pol)); - 341 0.0042 0 0 0 0 138 0.0066 :} + 467 0.0053 0 0 0 0 173 0.0079 : return __alloc_pages(gfp, order, zonelist_policy(gfp, pol)); + 372 0.0042 0 0 0 0 176 0.0080 :} :EXPORT_SYMBOL(alloc_pages_current); : :/* @@ -1823,12 +1823,12 @@ /* * Total samples for file : "mm/mempolicy.c" * - * 73521 0.9161 0 0 20 0.7025 33450 1.5980 + * 91358 1.0275 0 0 10 0.3394 36917 1.6766 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mempool.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mempool.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mempool.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mempool.c 2006-03-12 07:20:05.000000000 -0500 @@ -201,7 +201,7 @@ : * fail if called from an IRQ context.) : */ :void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) - 826 0.0103 0 0 0 0 395 0.0189 :{ /* mempool_alloc total: 2995 0.0373 0 0 1 0.0351 837 0.0400 */ + 1152 0.0130 0 0 0 0 444 0.0202 :{ /* mempool_alloc total: 3765 0.0423 0 0 1 0.0339 972 0.0441 */ : void *element; : unsigned long flags; : wait_queue_t wait; @@ -211,14 +211,14 @@ : : gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ : gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ - 134 0.0017 0 0 0 0 82 0.0039 : gfp_mask |= __GFP_NOWARN; /* failures are OK */ + 159 0.0018 0 0 0 0 104 0.0047 : gfp_mask |= __GFP_NOWARN; /* failures are OK */ : - 1 1.2e-05 0 0 0 0 0 0 : gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); + : gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); : :repeat_alloc: : - 1564 0.0195 0 0 1 0.0351 287 0.0137 : element = pool->alloc(gfp_temp, pool->pool_data); - 2 2.5e-05 0 0 0 0 0 0 : if (likely(element != NULL)) + 1785 0.0201 0 0 0 0 311 0.0141 : element = pool->alloc(gfp_temp, pool->pool_data); + 3 3.4e-05 0 0 0 0 0 0 : if (likely(element != NULL)) : return element; : : spin_lock_irqsave(&pool->lock, flags); @@ -243,7 +243,7 @@ : finish_wait(&pool->wait, &wait); : : goto repeat_alloc; - 468 0.0058 0 0 0 0 73 0.0035 :} + 666 0.0075 0 0 1 0.0339 113 0.0051 :} :EXPORT_SYMBOL(mempool_alloc); : :/** @@ -255,11 +255,11 @@ : * this function only sleeps if the free_fn() function sleeps. : */ :void mempool_free(void *element, mempool_t *pool) - 1835 0.0229 0 0 0 0 195 0.0093 :{ /* mempool_free total: 8445 0.1052 0 0 1 0.0351 2018 0.0964 */ + 2409 0.0271 0 0 0 0 277 0.0126 :{ /* mempool_free total: 9904 0.1114 0 0 0 0 2504 0.1137 */ : unsigned long flags; : - 321 0.0040 0 0 0 0 39 0.0019 : smp_mb(); - 5094 0.0635 0 0 1 0.0351 1413 0.0675 : if (pool->curr_nr < pool->min_nr) { + 302 0.0034 0 0 0 0 69 0.0031 : smp_mb(); + 5829 0.0656 0 0 0 0 1712 0.0778 : if (pool->curr_nr < pool->min_nr) { : spin_lock_irqsave(&pool->lock, flags); : if (pool->curr_nr < pool->min_nr) { : add_element(pool, element); @@ -269,35 +269,35 @@ : } : spin_unlock_irqrestore(&pool->lock, flags); : } - 97 0.0012 0 0 0 0 85 0.0041 : pool->free(element, pool->pool_data); - 1098 0.0137 0 0 0 0 286 0.0137 :} + 153 0.0017 0 0 0 0 108 0.0049 : pool->free(element, pool->pool_data); + 1211 0.0136 0 0 0 0 338 0.0154 :} :EXPORT_SYMBOL(mempool_free); : :/* : * A commonly used alloc and free fn. : */ :void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) - 858 0.0107 0 0 0 0 609 0.0291 :{ /* mempool_alloc_slab total: 1212 0.0151 0 0 0 0 749 0.0358 */ + 1075 0.0121 0 0 1 0.0339 661 0.0300 :{ /* mempool_alloc_slab total: 1802 0.0203 0 0 1 0.0339 829 0.0376 */ : kmem_cache_t *mem = (kmem_cache_t *) pool_data; - 4 5.0e-05 0 0 0 0 1 4.8e-05 : return kmem_cache_alloc(mem, gfp_mask); - 350 0.0044 0 0 0 0 139 0.0066 :} + 13 1.5e-04 0 0 0 0 3 1.4e-04 : return kmem_cache_alloc(mem, gfp_mask); + 714 0.0080 0 0 0 0 165 0.0075 :} :EXPORT_SYMBOL(mempool_alloc_slab); : :void mempool_free_slab(void *element, void *pool_data) - 317 0.0039 0 0 0 0 403 0.0193 :{ /* mempool_free_slab total: 401 0.0050 0 0 0 0 475 0.0227 */ + 390 0.0044 0 0 0 0 578 0.0263 :{ /* mempool_free_slab total: 507 0.0057 0 0 0 0 649 0.0295 */ : kmem_cache_t *mem = (kmem_cache_t *) pool_data; - 1 1.2e-05 0 0 0 0 0 0 : kmem_cache_free(mem, element); - 83 0.0010 0 0 0 0 72 0.0034 :} + : kmem_cache_free(mem, element); + 117 0.0013 0 0 0 0 71 0.0032 :} :EXPORT_SYMBOL(mempool_free_slab); /* * Total samples for file : "mm/mempool.c" * - * 13053 0.1626 0 0 2 0.0702 4079 0.1949 + * 15978 0.1797 0 0 2 0.0679 4954 0.2250 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mmap.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mmap.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mmap.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mmap.c 2006-03-12 07:20:05.000000000 -0500 @@ -82,22 +82,22 @@ : * wish to use this logic. : */ :int __vm_enough_memory(long pages, int cap_sys_admin) - 1034 0.0129 0 0 0 0 148 0.0071 :{ /* __vm_enough_memory total: 3519 0.0438 0 0 1 0.0351 650 0.0311 */ + 1077 0.0121 0 0 0 0 127 0.0058 :{ /* __vm_enough_memory total: 3618 0.0407 0 0 1 0.0339 602 0.0273 */ : unsigned long free, allowed; : - 6 7.5e-05 0 0 0 0 4 1.9e-04 : vm_acct_memory(pages); + 7 7.9e-05 0 0 0 0 0 0 : vm_acct_memory(pages); : : /* : * Sometimes we want to use more memory than we have : */ - 916 0.0114 0 0 0 0 131 0.0063 : if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) + 852 0.0096 0 0 1 0.0339 140 0.0064 : if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) : return 0; : - 101 0.0013 0 0 0 0 22 0.0011 : if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { + 95 0.0011 0 0 0 0 13 5.9e-04 : if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { : unsigned long n; : : free = get_page_cache_size(); - 4 5.0e-05 0 0 0 0 2 9.6e-05 : free += nr_swap_pages; + 3 3.4e-05 0 0 0 0 1 4.5e-05 : free += nr_swap_pages; : : /* : * Any slabs which are created with the @@ -105,15 +105,15 @@ : * which are reclaimable, under pressure. The dentry : * cache and most inode caches should fall into this : */ - 48 6.0e-04 0 0 0 0 13 6.2e-04 : free += atomic_read(&slab_reclaim_pages); + 39 4.4e-04 0 0 0 0 16 7.3e-04 : free += atomic_read(&slab_reclaim_pages); : : /* : * Leave the last 3% for root : */ - 50 6.2e-04 0 0 0 0 12 5.7e-04 : if (!cap_sys_admin) + 85 9.6e-04 0 0 0 0 20 9.1e-04 : if (!cap_sys_admin) : free -= free / 32; : - 618 0.0077 0 0 0 0 58 0.0028 : if (free > pages) + 692 0.0078 0 0 0 0 47 0.0021 : if (free > pages) : return 0; : : /* @@ -154,7 +154,7 @@ : vm_unacct_memory(pages); : : return -ENOMEM; - 549 0.0068 0 0 0 0 156 0.0075 :} + 510 0.0057 0 0 0 0 136 0.0062 :} : :EXPORT_SYMBOL(__vm_enough_memory); : @@ -163,78 +163,78 @@ : */ :static void __remove_shared_vm_struct(struct vm_area_struct *vma, : struct file *file, struct address_space *mapping) - 221 0.0028 0 0 0 0 62 0.0030 :{ - 167 0.0021 0 0 0 0 23 0.0011 : if (vma->vm_flags & VM_DENYWRITE) /* __remove_shared_vm_struct total: 3569 0.0445 0 0 0 0 778 0.0372 */ - 622 0.0078 0 0 0 0 54 0.0026 : atomic_inc(&file->f_dentry->d_inode->i_writecount); - 1137 0.0142 0 0 0 0 189 0.0090 : if (vma->vm_flags & VM_SHARED) + 300 0.0034 0 0 0 0 60 0.0027 :{ + 168 0.0019 0 0 0 0 40 0.0018 : if (vma->vm_flags & VM_DENYWRITE) /* __remove_shared_vm_struct total: 4536 0.0510 0 0 1 0.0339 880 0.0400 */ + 657 0.0074 0 0 0 0 74 0.0034 : atomic_inc(&file->f_dentry->d_inode->i_writecount); + 1496 0.0168 0 0 1 0.0339 225 0.0102 : if (vma->vm_flags & VM_SHARED) : mapping->i_mmap_writable--; : : flush_dcache_mmap_lock(mapping); - 1155 0.0144 0 0 0 0 317 0.0151 : if (unlikely(vma->vm_flags & VM_NONLINEAR)) + 1618 0.0182 0 0 0 0 358 0.0163 : if (unlikely(vma->vm_flags & VM_NONLINEAR)) : list_del_init(&vma->shared.vm_set.list); : else - 157 0.0020 0 0 0 0 68 0.0032 : vma_prio_tree_remove(vma, &mapping->i_mmap); + 164 0.0018 0 0 0 0 62 0.0028 : vma_prio_tree_remove(vma, &mapping->i_mmap); : flush_dcache_mmap_unlock(mapping); - 110 0.0014 0 0 0 0 65 0.0031 :} + 132 0.0015 0 0 0 0 60 0.0027 :} : :/* : * Unlink a file-based vm structure from its prio_tree, to hide : * vma from rmap and vmtruncate before freeing its page tables. : */ :void unlink_file_vma(struct vm_area_struct *vma) - 1559 0.0194 0 0 0 0 355 0.0170 :{ /* unlink_file_vma total: 6180 0.0770 0 0 1 0.0351 1366 0.0653 */ - 103 0.0013 0 0 0 0 46 0.0022 : struct file *file = vma->vm_file; + 1626 0.0183 0 0 0 0 324 0.0147 :{ /* unlink_file_vma total: 7124 0.0801 0 0 0 0 1433 0.0651 */ + 111 0.0012 0 0 0 0 51 0.0023 : struct file *file = vma->vm_file; : - 41 5.1e-04 0 0 0 0 14 6.7e-04 : if (file) { - 93 0.0012 0 0 0 0 39 0.0019 : struct address_space *mapping = file->f_mapping; - 299 0.0037 0 0 0 0 91 0.0043 : spin_lock(&mapping->i_mmap_lock); - 1137 0.0142 0 0 0 0 315 0.0150 : __remove_shared_vm_struct(vma, file, mapping); + 42 4.7e-04 0 0 0 0 18 8.2e-04 : if (file) { + 110 0.0012 0 0 0 0 37 0.0017 : struct address_space *mapping = file->f_mapping; + 365 0.0041 0 0 0 0 91 0.0041 : spin_lock(&mapping->i_mmap_lock); + 1342 0.0151 0 0 0 0 382 0.0173 : __remove_shared_vm_struct(vma, file, mapping); : spin_unlock(&mapping->i_mmap_lock); : } - 2920 0.0364 0 0 1 0.0351 494 0.0236 :} + 3499 0.0394 0 0 0 0 520 0.0236 :} : :/* : * Close a vm structure and free it, returning the next. : */ :static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) - 651 0.0081 0 0 0 0 116 0.0055 :{ /* remove_vma total: 2702 0.0337 0 0 0 0 1190 0.0568 */ + 1225 0.0138 0 0 0 0 80 0.0036 :{ /* remove_vma total: 3671 0.0413 0 0 0 0 993 0.0451 */ : struct vm_area_struct *next = vma->vm_next; : : might_sleep(); - 488 0.0061 0 0 0 0 223 0.0107 : if (vma->vm_ops && vma->vm_ops->close) + 470 0.0053 0 0 0 0 177 0.0080 : if (vma->vm_ops && vma->vm_ops->close) : vma->vm_ops->close(vma); - 560 0.0070 0 0 0 0 243 0.0116 : if (vma->vm_file) - 66 8.2e-04 0 0 0 0 32 0.0015 : fput(vma->vm_file); - 68 8.5e-04 0 0 0 0 71 0.0034 : mpol_free(vma_policy(vma)); - 265 0.0033 0 0 0 0 172 0.0082 : kmem_cache_free(vm_area_cachep, vma); + 584 0.0066 0 0 0 0 207 0.0094 : if (vma->vm_file) + 61 6.9e-04 0 0 0 0 42 0.0019 : fput(vma->vm_file); + 118 0.0013 0 0 0 0 56 0.0025 : mpol_free(vma_policy(vma)); + 448 0.0050 0 0 0 0 140 0.0064 : kmem_cache_free(vm_area_cachep, vma); : return next; - 335 0.0042 0 0 0 0 151 0.0072 :} + 289 0.0033 0 0 0 0 131 0.0059 :} : :asmlinkage unsigned long sys_brk(unsigned long brk) - 816 0.0102 0 0 0 0 114 0.0054 :{ /* sys_brk total: 2564 0.0319 0 0 0 0 424 0.0203 */ + 846 0.0095 0 0 0 0 142 0.0064 :{ /* sys_brk total: 2674 0.0301 0 0 0 0 481 0.0218 */ : unsigned long rlim, retval; : unsigned long newbrk, oldbrk; - 9 1.1e-04 0 0 0 0 2 9.6e-05 : struct mm_struct *mm = current->mm; + 11 1.2e-04 0 0 0 0 9 4.1e-04 : struct mm_struct *mm = current->mm; : - 6 7.5e-05 0 0 0 0 0 0 : down_write(&mm->mmap_sem); + 9 1.0e-04 0 0 0 0 0 0 : down_write(&mm->mmap_sem); : - 96 0.0012 0 0 0 0 18 8.6e-04 : if (brk < mm->end_code) + 90 0.0010 0 0 0 0 22 1.0e-03 : if (brk < mm->end_code) : goto out; - 153 0.0019 0 0 0 0 55 0.0026 : newbrk = PAGE_ALIGN(brk); - 62 7.7e-04 0 0 0 0 23 0.0011 : oldbrk = PAGE_ALIGN(mm->brk); - 4 5.0e-05 0 0 0 0 1 4.8e-05 : if (oldbrk == newbrk) + 167 0.0019 0 0 0 0 56 0.0025 : newbrk = PAGE_ALIGN(brk); + 56 6.3e-04 0 0 0 0 19 8.6e-04 : oldbrk = PAGE_ALIGN(mm->brk); + 3 3.4e-05 0 0 0 0 0 0 : if (oldbrk == newbrk) : goto set_brk; : : /* Always allow shrinking brk. */ - 9 1.1e-04 0 0 0 0 5 2.4e-04 : if (brk <= mm->brk) { - 48 6.0e-04 0 0 0 0 22 0.0011 : if (!do_munmap(mm, newbrk, oldbrk-newbrk)) + 12 1.3e-04 0 0 0 0 5 2.3e-04 : if (brk <= mm->brk) { + 56 6.3e-04 0 0 0 0 21 9.5e-04 : if (!do_munmap(mm, newbrk, oldbrk-newbrk)) : goto set_brk; : goto out; : } : : /* Check against rlimit.. */ - 374 0.0047 0 0 0 0 47 0.0022 : rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; - 7 8.7e-05 0 0 0 0 1 4.8e-05 : if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) + 334 0.0038 0 0 0 0 39 0.0018 : rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; + 7 7.9e-05 0 0 0 0 2 9.1e-05 : if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) : goto out; : : /* Check against existing mmap mappings. */ @@ -242,15 +242,15 @@ : goto out; : : /* Ok, looks good - let it rip. */ - 95 0.0012 0 0 0 0 28 0.0013 : if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) + 192 0.0022 0 0 0 0 42 0.0019 : if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) : goto out; :set_brk: - 5 6.2e-05 0 0 0 0 7 3.3e-04 : mm->brk = brk; + 15 1.7e-04 0 0 0 0 3 1.4e-04 : mm->brk = brk; :out: - 112 0.0014 0 0 0 0 17 8.1e-04 : retval = mm->brk; + 134 0.0015 0 0 0 0 22 1.0e-03 : retval = mm->brk; : up_write(&mm->mmap_sem); : return retval; - 185 0.0023 0 0 0 0 21 0.0010 :} + 175 0.0020 0 0 0 0 33 0.0015 :} : :#ifdef DEBUG_MM_RB :static int browse_rb(struct rb_root *root) @@ -305,50 +305,50 @@ :find_vma_prepare(struct mm_struct *mm, unsigned long addr, : struct vm_area_struct **pprev, struct rb_node ***rb_link, : struct rb_node ** rb_parent) - 512 0.0064 0 0 0 0 108 0.0052 :{ /* find_vma_prepare total: 9323 0.1162 0 0 2 0.0702 3543 0.1693 */ + 541 0.0061 0 0 0 0 114 0.0052 :{ /* find_vma_prepare total: 9649 0.1085 0 0 1 0.0339 3750 0.1703 */ : struct vm_area_struct * vma; : struct rb_node ** __rb_link, * __rb_parent, * rb_prev; : - 903 0.0113 0 0 0 0 149 0.0071 : __rb_link = &mm->mm_rb.rb_node; + 968 0.0109 0 0 0 0 167 0.0076 : __rb_link = &mm->mm_rb.rb_node; : rb_prev = __rb_parent = NULL; : vma = NULL; : - 972 0.0121 0 0 1 0.0351 414 0.0198 : while (*__rb_link) { + 1033 0.0116 0 0 0 0 447 0.0203 : while (*__rb_link) { : struct vm_area_struct *vma_tmp; : : __rb_parent = *__rb_link; - 1683 0.0210 0 0 1 0.0351 694 0.0332 : vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); + 1707 0.0192 0 0 0 0 800 0.0363 : vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); : - 833 0.0104 0 0 0 0 324 0.0155 : if (vma_tmp->vm_end > addr) { + 850 0.0096 0 0 0 0 350 0.0159 : if (vma_tmp->vm_end > addr) { : vma = vma_tmp; - 1372 0.0171 0 0 0 0 513 0.0245 : if (vma_tmp->vm_start <= addr) + 1420 0.0160 0 0 1 0.0339 546 0.0248 : if (vma_tmp->vm_start <= addr) : return vma; - 314 0.0039 0 0 0 0 161 0.0077 : __rb_link = &__rb_parent->rb_left; + 380 0.0043 0 0 0 0 172 0.0078 : __rb_link = &__rb_parent->rb_left; : } else { : rb_prev = __rb_parent; - 1969 0.0245 0 0 0 0 878 0.0419 : __rb_link = &__rb_parent->rb_right; + 1934 0.0218 0 0 0 0 860 0.0391 : __rb_link = &__rb_parent->rb_right; : } : } : - 365 0.0045 0 0 0 0 118 0.0056 : *pprev = NULL; - 76 9.5e-04 0 0 0 0 36 0.0017 : if (rb_prev) - 112 0.0014 0 0 0 0 42 0.0020 : *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); - 43 5.4e-04 0 0 0 0 13 6.2e-04 : *rb_link = __rb_link; - 24 3.0e-04 0 0 0 0 11 5.3e-04 : *rb_parent = __rb_parent; + 354 0.0040 0 0 0 0 101 0.0046 : *pprev = NULL; + 77 8.7e-04 0 0 0 0 33 0.0015 : if (rb_prev) + 102 0.0011 0 0 0 0 47 0.0021 : *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); + 53 6.0e-04 0 0 0 0 20 9.1e-04 : *rb_link = __rb_link; + 40 4.5e-04 0 0 0 0 5 2.3e-04 : *rb_parent = __rb_parent; : return vma; - 145 0.0018 0 0 0 0 82 0.0039 :} + 190 0.0021 0 0 0 0 88 0.0040 :} : :static inline void :__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, : struct vm_area_struct *prev, struct rb_node *rb_parent) :{ - 26 3.2e-04 0 0 0 0 10 4.8e-04 : if (prev) { - 143 0.0018 0 0 0 0 51 0.0024 : vma->vm_next = prev->vm_next; - 21 2.6e-04 0 0 0 0 13 6.2e-04 : prev->vm_next = vma; + 17 1.9e-04 0 0 0 0 14 6.4e-04 : if (prev) { + 124 0.0014 0 0 0 0 39 0.0018 : vma->vm_next = prev->vm_next; + 15 1.7e-04 0 0 0 0 11 5.0e-04 : prev->vm_next = vma; : } else { - 85 0.0011 0 0 0 0 10 4.8e-04 : mm->mmap = vma; - : if (rb_parent) - 36 4.5e-04 0 0 0 0 9 4.3e-04 : vma->vm_next = rb_entry(rb_parent, + 103 0.0012 0 0 0 0 8 3.6e-04 : mm->mmap = vma; + 1 1.1e-05 0 0 0 0 2 9.1e-05 : if (rb_parent) + 23 2.6e-04 0 0 0 0 5 2.3e-04 : vma->vm_next = rb_entry(rb_parent, : struct vm_area_struct, vm_rb); : else : vma->vm_next = NULL; @@ -357,10 +357,10 @@ : :void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, : struct rb_node **rb_link, struct rb_node *rb_parent) - 559 0.0070 0 0 0 0 131 0.0063 :{ /* __vma_link_rb total: 2690 0.0335 0 0 0 0 603 0.0288 */ + 480 0.0054 0 0 0 0 136 0.0062 :{ /* __vma_link_rb total: 2469 0.0278 0 0 0 0 567 0.0258 */ : rb_link_node(&vma->vm_rb, rb_parent, rb_link); - 535 0.0067 0 0 0 0 71 0.0034 : rb_insert_color(&vma->vm_rb, &mm->mm_rb); - 461 0.0057 0 0 0 0 138 0.0066 :} + 508 0.0057 0 0 0 0 49 0.0022 : rb_insert_color(&vma->vm_rb, &mm->mm_rb); + 384 0.0043 0 0 0 0 126 0.0057 :} : :static inline void __vma_link_file(struct vm_area_struct *vma) :{ @@ -388,37 +388,37 @@ :__vma_link(struct mm_struct *mm, struct vm_area_struct *vma, : struct vm_area_struct *prev, struct rb_node **rb_link, : struct rb_node *rb_parent) - 895 0.0112 0 0 0 0 648 0.0310 :{ /* __vma_link total: 2473 0.0308 0 0 0 0 1487 0.0710 */ + 725 0.0082 0 0 1 0.0339 483 0.0219 :{ /* __vma_link total: 2335 0.0263 0 0 2 0.0679 1244 0.0565 */ : __vma_link_list(mm, vma, prev, rb_parent); - 339 0.0042 0 0 0 0 134 0.0064 : __vma_link_rb(mm, vma, rb_link, rb_parent); - 506 0.0063 0 0 0 0 426 0.0204 : __anon_vma_link(vma); - 204 0.0025 0 0 0 0 59 0.0028 :} + 376 0.0042 0 0 0 0 128 0.0058 : __vma_link_rb(mm, vma, rb_link, rb_parent); + 547 0.0062 0 0 1 0.0339 377 0.0171 : __anon_vma_link(vma); + 206 0.0023 0 0 0 0 61 0.0028 :} : :static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, : struct vm_area_struct *prev, struct rb_node **rb_link, : struct rb_node *rb_parent) - 332 0.0041 0 0 0 0 144 0.0069 :{ /* vma_link total: 5701 0.0710 0 0 1 0.0351 1736 0.0829 */ + 323 0.0036 0 0 0 0 149 0.0068 :{ /* vma_link total: 5449 0.0613 0 0 0 0 1860 0.0845 */ : struct address_space *mapping = NULL; : - 74 9.2e-04 0 0 0 0 60 0.0029 : if (vma->vm_file) - 4 5.0e-05 0 0 0 0 2 9.6e-05 : mapping = vma->vm_file->f_mapping; + 96 0.0011 0 0 0 0 45 0.0020 : if (vma->vm_file) + 3 3.4e-05 0 0 0 0 0 0 : mapping = vma->vm_file->f_mapping; : - 98 0.0012 0 0 0 0 77 0.0037 : if (mapping) { - 19 2.4e-04 0 0 0 0 17 8.1e-04 : spin_lock(&mapping->i_mmap_lock); - 209 0.0026 0 0 0 0 183 0.0087 : vma->vm_truncate_count = mapping->truncate_count; + 69 7.8e-04 0 0 0 0 63 0.0029 : if (mapping) { + 13 1.5e-04 0 0 0 0 11 5.0e-04 : spin_lock(&mapping->i_mmap_lock); + 287 0.0032 0 0 0 0 281 0.0128 : vma->vm_truncate_count = mapping->truncate_count; : } : anon_vma_lock(vma); : - 245 0.0031 0 0 0 0 239 0.0114 : __vma_link(mm, vma, prev, rb_link, rb_parent); + 262 0.0029 0 0 0 0 253 0.0115 : __vma_link(mm, vma, prev, rb_link, rb_parent); : __vma_link_file(vma); : : anon_vma_unlock(vma); - 84 0.0010 0 0 0 0 24 0.0011 : if (mapping) + 68 7.6e-04 0 0 0 0 27 0.0012 : if (mapping) : spin_unlock(&mapping->i_mmap_lock); : - 287 0.0036 0 0 0 0 47 0.0022 : mm->map_count++; + 255 0.0029 0 0 0 0 53 0.0024 : mm->map_count++; : validate_mm(mm); - 1164 0.0145 0 0 0 0 222 0.0106 :} + 1162 0.0131 0 0 0 0 191 0.0087 :} : :/* : * Helper for vma_adjust in the split_vma insert case: @@ -457,28 +457,28 @@ : */ :void vma_adjust(struct vm_area_struct *vma, unsigned long start, : unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) - 979 0.0122 0 0 1 0.0351 157 0.0075 :{ /* vma_adjust total: 12560 0.1565 0 0 2 0.0702 2505 0.1197 */ - 361 0.0045 0 0 0 0 79 0.0038 : struct mm_struct *mm = vma->vm_mm; - 5 6.2e-05 0 0 0 0 0 0 : struct vm_area_struct *next = vma->vm_next; + 1012 0.0114 0 0 1 0.0339 159 0.0072 :{ /* vma_adjust total: 12642 0.1422 0 0 2 0.0679 2583 0.1173 */ + 428 0.0048 0 0 0 0 75 0.0034 : struct mm_struct *mm = vma->vm_mm; + 3 3.4e-05 0 0 0 0 0 0 : struct vm_area_struct *next = vma->vm_next; : struct vm_area_struct *importer = NULL; : struct address_space *mapping = NULL; : struct prio_tree_root *root = NULL; - 55 6.9e-04 0 0 0 0 49 0.0023 : struct file *file = vma->vm_file; + 55 6.2e-04 0 0 0 0 42 0.0019 : struct file *file = vma->vm_file; : struct anon_vma *anon_vma = NULL; : long adjust_next = 0; : int remove_next = 0; : - 108 0.0013 0 0 0 0 52 0.0025 : if (next && !insert) { - 72 9.0e-04 0 0 0 0 37 0.0018 : if (end >= next->vm_end) { + 111 0.0012 0 0 0 0 70 0.0032 : if (next && !insert) { + 62 7.0e-04 0 0 0 0 45 0.0020 : if (end >= next->vm_end) { : /* : * vma expands, overlapping all the next, and : * perhaps the one after too (mprotect case 6). : */ - 1 1.2e-05 0 0 0 0 0 0 :again: remove_next = 1 + (end > next->vm_end); + 0 0 0 0 0 0 1 4.5e-05 :again: remove_next = 1 + (end > next->vm_end); : end = next->vm_end; : anon_vma = next->anon_vma; : importer = vma; - 538 0.0067 0 0 1 0.0351 90 0.0043 : } else if (end > next->vm_start) { + 573 0.0064 0 0 0 0 98 0.0045 : } else if (end > next->vm_start) { : /* : * vma expands, overlapping part of the next: : * mprotect case 5 shifting the boundary up. @@ -486,24 +486,24 @@ : adjust_next = (end - next->vm_start) >> PAGE_SHIFT; : anon_vma = next->anon_vma; : importer = vma; - 117 0.0015 0 0 0 0 45 0.0021 : } else if (end < vma->vm_end) { + 93 0.0010 0 0 0 0 49 0.0022 : } else if (end < vma->vm_end) { : /* : * vma shrinks, and !insert tells it's not : * split_vma inserting another: so it must be : * mprotect case 4 shifting the boundary down. : */ : adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); - 871 0.0109 0 0 0 0 224 0.0107 : anon_vma = next->anon_vma; + 1034 0.0116 0 0 0 0 220 0.0100 : anon_vma = next->anon_vma; : importer = next; : } : } : - 11 1.4e-04 0 0 0 0 0 0 : if (file) { - 71 8.8e-04 0 0 0 0 34 0.0016 : mapping = file->f_mapping; + 13 1.5e-04 0 0 0 0 1 4.5e-05 : if (file) { + 81 9.1e-04 0 0 0 0 33 0.0015 : mapping = file->f_mapping; : if (!(vma->vm_flags & VM_NONLINEAR)) - 69 8.6e-04 0 0 0 0 16 7.6e-04 : root = &mapping->i_mmap; - 18 2.2e-04 0 0 0 0 17 8.1e-04 : spin_lock(&mapping->i_mmap_lock); - 159 0.0020 0 0 0 0 53 0.0025 : if (importer && + 65 7.3e-04 0 0 0 0 9 4.1e-04 : root = &mapping->i_mmap; + 17 1.9e-04 0 0 0 0 9 4.1e-04 : spin_lock(&mapping->i_mmap_lock); + 140 0.0016 0 0 0 0 78 0.0035 : if (importer && : vma->vm_truncate_count != next->vm_truncate_count) { : /* : * unmap_mapping_range might be in progress: @@ -511,7 +511,7 @@ : */ : importer->vm_truncate_count = 0; : } - 96 0.0012 0 0 0 0 12 5.7e-04 : if (insert) { + 86 9.7e-04 0 0 0 0 20 9.1e-04 : if (insert) { : insert->vm_truncate_count = vma->vm_truncate_count; : /* : * Put into prio_tree now, so instantiated pages @@ -527,54 +527,54 @@ : * When changing only vma->vm_end, we don't really need : * anon_vma lock: but is that case worth optimizing out? : */ - 1269 0.0158 0 0 0 0 184 0.0088 : if (vma->anon_vma) + 1209 0.0136 0 0 0 0 169 0.0077 : if (vma->anon_vma) : anon_vma = vma->anon_vma; - 442 0.0055 0 0 0 0 66 0.0032 : if (anon_vma) { - 468 0.0058 0 0 0 0 76 0.0036 : spin_lock(&anon_vma->lock); + 447 0.0050 0 0 0 0 91 0.0041 : if (anon_vma) { + 451 0.0051 0 0 0 0 70 0.0032 : spin_lock(&anon_vma->lock); : /* : * Easily overlooked: when mprotect shifts the boundary, : * make sure the expanding vma has anon_vma set if the : * shrinking vma had, to cover any anon pages imported. : */ - 1312 0.0163 0 0 0 0 200 0.0096 : if (importer && !importer->anon_vma) { + 1372 0.0154 0 0 0 0 221 0.0100 : if (importer && !importer->anon_vma) { : importer->anon_vma = anon_vma; : __anon_vma_link(importer); : } : } : - 939 0.0117 0 0 0 0 82 0.0039 : if (root) { + 869 0.0098 0 0 0 0 72 0.0033 : if (root) { : flush_dcache_mmap_lock(mapping); - 177 0.0022 0 0 0 0 22 0.0011 : vma_prio_tree_remove(vma, root); - 10 1.2e-04 0 0 0 0 5 2.4e-04 : if (adjust_next) + 182 0.0020 0 0 0 0 24 0.0011 : vma_prio_tree_remove(vma, root); + 10 1.1e-04 0 0 0 0 4 1.8e-04 : if (adjust_next) : vma_prio_tree_remove(next, root); : } : - 29 3.6e-04 0 0 0 0 22 0.0011 : vma->vm_start = start; - 6 7.5e-05 0 0 0 0 3 1.4e-04 : vma->vm_end = end; - 32 4.0e-04 0 0 0 0 16 7.6e-04 : vma->vm_pgoff = pgoff; - 218 0.0027 0 0 0 0 76 0.0036 : if (adjust_next) { + 41 4.6e-04 0 0 0 0 24 0.0011 : vma->vm_start = start; + 7 7.9e-05 0 0 0 0 2 9.1e-05 : vma->vm_end = end; + 26 2.9e-04 0 0 0 0 10 4.5e-04 : vma->vm_pgoff = pgoff; + 204 0.0023 0 0 0 0 78 0.0035 : if (adjust_next) { : next->vm_start += adjust_next << PAGE_SHIFT; : next->vm_pgoff += adjust_next; : } : - 136 0.0017 0 0 0 0 59 0.0028 : if (root) { - 55 6.9e-04 0 0 0 0 12 5.7e-04 : if (adjust_next) + 163 0.0018 0 0 0 0 77 0.0035 : if (root) { + 53 6.0e-04 0 0 0 0 26 0.0012 : if (adjust_next) : vma_prio_tree_insert(next, root); - 77 9.6e-04 0 0 0 0 38 0.0018 : vma_prio_tree_insert(vma, root); + 82 9.2e-04 0 0 0 0 22 1.0e-03 : vma_prio_tree_insert(vma, root); : flush_dcache_mmap_unlock(mapping); : } : - 751 0.0094 0 0 0 0 90 0.0043 : if (remove_next) { + 633 0.0071 0 0 0 0 97 0.0044 : if (remove_next) { : /* : * vma_merge has merged next into vma, and needs : * us to remove next before dropping the locks. : */ : __vma_unlink(mm, next, vma); - 1 1.2e-05 0 0 0 0 0 0 : if (file) + 0 0 0 0 0 0 1 4.5e-05 : if (file) : __remove_shared_vm_struct(next, file, mapping); - 3 3.7e-05 0 0 0 0 0 0 : if (next->anon_vma) - 2 2.5e-05 0 0 0 0 0 0 : __anon_vma_merge(vma, next); - 334 0.0042 0 0 0 0 62 0.0030 : } else if (insert) { + 4 4.5e-05 0 0 0 0 0 0 : if (next->anon_vma) + 1 1.1e-05 0 0 0 0 0 0 : __anon_vma_merge(vma, next); + 289 0.0033 0 0 0 0 71 0.0032 : } else if (insert) { : /* : * split_vma has split insert from vma, and needs : * us to insert it before dropping the locks @@ -583,17 +583,17 @@ : __insert_vm_struct(mm, insert); : } : - 480 0.0060 0 0 0 0 43 0.0021 : if (anon_vma) + 495 0.0056 0 0 0 0 74 0.0034 : if (anon_vma) : spin_unlock(&anon_vma->lock); - 116 0.0014 0 0 0 0 43 0.0021 : if (mapping) + 95 0.0011 0 0 0 0 46 0.0021 : if (mapping) : spin_unlock(&mapping->i_mmap_lock); : - 93 0.0012 0 0 0 0 41 0.0020 : if (remove_next) { - 3 3.7e-05 0 0 0 0 0 0 : if (file) + 90 0.0010 0 0 0 0 21 9.5e-04 : if (remove_next) { + 1 1.1e-05 0 0 0 0 0 0 : if (file) : fput(file); - 5 6.2e-05 0 0 0 0 1 4.8e-05 : mm->map_count--; + 6 6.7e-05 0 0 0 0 2 9.1e-05 : mm->map_count--; : mpol_free(vma_policy(next)); - 7 8.7e-05 0 0 0 0 2 9.6e-05 : kmem_cache_free(vm_area_cachep, next); + 5 5.6e-05 0 0 0 0 1 4.5e-05 : kmem_cache_free(vm_area_cachep, next); : /* : * In mprotect's case 6 (see comments on vma_merge), : * we must remove another next too. It would clutter @@ -606,7 +606,7 @@ : } : : validate_mm(mm); - 469 0.0058 0 0 0 0 128 0.0061 :} + 423 0.0048 0 0 0 0 127 0.0058 :} : :/* : * If the vma has a ->close operation then the driver probably needs to release @@ -617,11 +617,11 @@ :static inline int is_mergeable_vma(struct vm_area_struct *vma, : struct file *file, unsigned long vm_flags) :{ - 921 0.0115 0 0 1 0.0351 660 0.0315 : if (vma->vm_flags != vm_flags) + 928 0.0104 0 0 0 0 758 0.0344 : if (vma->vm_flags != vm_flags) : return 0; - 120 0.0015 0 0 0 0 54 0.0026 : if (vma->vm_file != file) + 126 0.0014 0 0 0 0 49 0.0022 : if (vma->vm_file != file) : return 0; - 80 1.0e-03 0 0 0 0 33 0.0016 : if (vma->vm_ops && vma->vm_ops->close) + 70 7.9e-04 0 0 0 0 29 0.0013 : if (vma->vm_ops && vma->vm_ops->close) : return 0; : return 1; :} @@ -629,7 +629,7 @@ :static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, : struct anon_vma *anon_vma2) :{ - 135 0.0017 0 0 0 0 68 0.0032 : return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2); + 185 0.0021 0 0 0 0 64 0.0029 : return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2); :} : :/* @@ -646,14 +646,14 @@ :static int :can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, : struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) - 18 2.2e-04 0 0 0 0 2 9.6e-05 :{ - 1 1.2e-05 0 0 0 0 0 0 : if (is_mergeable_vma(vma, file, vm_flags) && + 26 2.9e-04 0 0 0 0 3 1.4e-04 :{ + : if (is_mergeable_vma(vma, file, vm_flags) && : is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { - 30 3.7e-04 0 0 0 0 9 4.3e-04 : if (vma->vm_pgoff == vm_pgoff) + 41 4.6e-04 0 0 0 0 11 5.0e-04 : if (vma->vm_pgoff == vm_pgoff) : return 1; : } : return 0; - 20 2.5e-04 0 0 0 0 1 4.8e-05 :} + 9 1.0e-04 0 0 0 0 2 9.1e-05 :} : :/* : * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) @@ -665,16 +665,16 @@ :static int :can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, : struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) - 918 0.0114 0 0 0 0 216 0.0103 :{ - 11 1.4e-04 0 0 0 0 7 3.3e-04 : if (is_mergeable_vma(vma, file, vm_flags) && + 981 0.0110 0 0 0 0 244 0.0111 :{ + 10 1.1e-04 0 0 0 0 3 1.4e-04 : if (is_mergeable_vma(vma, file, vm_flags) && : is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { : pgoff_t vm_pglen; : vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - 475 0.0059 0 0 0 0 313 0.0150 : if (vma->vm_pgoff + vm_pglen == vm_pgoff) + 563 0.0063 0 0 0 0 272 0.0124 : if (vma->vm_pgoff + vm_pglen == vm_pgoff) : return 1; : } : return 0; - 149 0.0019 0 0 0 0 79 0.0038 :} + 143 0.0016 0 0 0 0 59 0.0027 :} : :/* : * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out @@ -710,46 +710,46 @@ : unsigned long end, unsigned long vm_flags, : struct anon_vma *anon_vma, struct file *file, : pgoff_t pgoff, struct mempolicy *policy) - 1748 0.0218 0 0 0 0 391 0.0187 :{ /* vma_merge total: 7150 0.0891 0 0 0 0 2037 0.0973 */ - 82 0.0010 0 0 0 0 27 0.0013 : pgoff_t pglen = (end - addr) >> PAGE_SHIFT; + 1782 0.0200 0 0 0 0 415 0.0188 :{ /* vma_merge total: 7523 0.0846 0 0 0 0 2019 0.0917 */ + 91 0.0010 0 0 0 0 24 0.0011 : pgoff_t pglen = (end - addr) >> PAGE_SHIFT; : struct vm_area_struct *area, *next; : : /* : * We later require that vma->vm_flags == vm_flags, : * so this tests vma->vm_flags & VM_SPECIAL, too. : */ - 119 0.0015 0 0 0 0 25 0.0012 : if (vm_flags & VM_SPECIAL) + 137 0.0015 0 0 0 0 20 9.1e-04 : if (vm_flags & VM_SPECIAL) : return NULL; : : if (prev) - 180 0.0022 0 0 0 0 94 0.0045 : next = prev->vm_next; + 158 0.0018 0 0 0 0 81 0.0037 : next = prev->vm_next; : else : next = mm->mmap; : area = next; - 84 0.0010 0 0 0 0 32 0.0015 : if (next && next->vm_end == end) /* cases 6, 7, 8 */ - 5 6.2e-05 0 0 0 0 0 0 : next = next->vm_next; + 74 8.3e-04 0 0 0 0 38 0.0017 : if (next && next->vm_end == end) /* cases 6, 7, 8 */ + 6 6.7e-05 0 0 0 0 4 1.8e-04 : next = next->vm_next; : : /* : * Can it merge with the predecessor? : */ - 1443 0.0180 0 0 0 0 601 0.0287 : if (prev && prev->vm_end == addr && + 1542 0.0173 0 0 0 0 554 0.0252 : if (prev && prev->vm_end == addr && : mpol_equal(vma_policy(prev), policy) && : can_vma_merge_after(prev, vm_flags, : anon_vma, file, pgoff)) { : /* : * OK, it can. Can we now merge in the successor as well? : */ - 56 7.0e-04 0 0 0 0 30 0.0014 : if (next && end == next->vm_start && + 81 9.1e-04 0 0 0 0 52 0.0024 : if (next && end == next->vm_start && : mpol_equal(policy, vma_policy(next)) && : can_vma_merge_before(next, vm_flags, : anon_vma, file, pgoff+pglen) && : is_mergeable_anon_vma(prev->anon_vma, : next->anon_vma)) { : /* cases 1, 6 */ - 2 2.5e-05 0 0 0 0 1 4.8e-05 : vma_adjust(prev, prev->vm_start, + 1 1.1e-05 0 0 0 0 0 0 : vma_adjust(prev, prev->vm_start, : next->vm_end, prev->vm_pgoff, NULL); : } else /* cases 2, 5, 7 */ - 867 0.0108 0 0 0 0 108 0.0052 : vma_adjust(prev, prev->vm_start, + 1001 0.0113 0 0 0 0 145 0.0066 : vma_adjust(prev, prev->vm_start, : end, prev->vm_pgoff, NULL); : return prev; : } @@ -757,7 +757,7 @@ : /* : * Can this new request be merged in front of next? : */ - 556 0.0069 0 0 0 0 241 0.0115 : if (next && end == next->vm_start && + 607 0.0068 0 0 0 0 214 0.0097 : if (next && end == next->vm_start && : mpol_equal(policy, vma_policy(next)) && : can_vma_merge_before(next, vm_flags, : anon_vma, file, pgoff+pglen)) { @@ -765,13 +765,13 @@ : vma_adjust(prev, prev->vm_start, : addr, prev->vm_pgoff, NULL); : else /* cases 3, 8 */ - 57 7.1e-04 0 0 0 0 20 9.6e-04 : vma_adjust(area, addr, next->vm_end, + 52 5.8e-04 0 0 0 0 20 9.1e-04 : vma_adjust(area, addr, next->vm_end, : next->vm_pgoff - pglen, NULL); : return area; : } : : return NULL; - 1749 0.0218 0 0 0 0 339 0.0162 :} + 1730 0.0195 0 0 0 0 323 0.0147 :} : :/* : * find_mergeable_anon_vma is used by anon_vma_prepare, to check @@ -782,12 +782,12 @@ : * mprotect. : */ :struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) - 115 0.0014 0 0 0 0 39 0.0019 :{ /* find_mergeable_anon_vma total: 782 0.0097 0 0 0 0 300 0.0143 */ + 75 8.4e-04 0 0 0 0 58 0.0026 :{ /* find_mergeable_anon_vma total: 714 0.0080 0 0 0 0 277 0.0126 */ : struct vm_area_struct *near; : unsigned long vm_flags; : - 15 1.9e-04 0 0 0 0 18 8.6e-04 : near = vma->vm_next; - 72 9.0e-04 0 0 0 0 10 4.8e-04 : if (!near) + 5 5.6e-05 0 0 0 0 10 4.5e-04 : near = vma->vm_next; + 57 6.4e-04 0 0 0 0 15 6.8e-04 : if (!near) : goto try_prev; : : /* @@ -796,10 +796,10 @@ : * Neither mlock nor madvise tries to remerge at present, : * so leave their flags as obstructing a merge. : */ - 3 3.7e-05 0 0 0 0 11 5.3e-04 : vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); - 36 4.5e-04 0 0 0 0 30 0.0014 : vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); + 8 9.0e-05 0 0 0 0 1 4.5e-05 : vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); + 36 4.0e-04 0 0 0 0 22 1.0e-03 : vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); : - 58 7.2e-04 0 0 0 0 23 0.0011 : if (near->anon_vma && vma->vm_end == near->vm_start && + 69 7.8e-04 0 0 0 0 22 1.0e-03 : if (near->anon_vma && vma->vm_end == near->vm_start && : mpol_equal(vma_policy(vma), vma_policy(near)) && : can_vma_merge_before(near, vm_flags, : NULL, vma->vm_file, vma->vm_pgoff + @@ -813,19 +813,19 @@ : * (e.g. stash info in next's anon_vma_node when assigning : * an anon_vma, or when trying vma_merge). Another time. : */ - 218 0.0027 0 0 0 0 69 0.0033 : if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma) + 215 0.0024 0 0 0 0 56 0.0025 : if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma) : BUG(); - 16 2.0e-04 0 0 0 0 4 1.9e-04 : if (!near) + 23 2.6e-04 0 0 0 0 7 3.2e-04 : if (!near) : goto none; : - 3 3.7e-05 0 0 0 0 1 4.8e-05 : vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); - 31 3.9e-04 0 0 0 0 14 6.7e-04 : vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); + 3 3.4e-05 0 0 0 0 1 4.5e-05 : vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); + 38 4.3e-04 0 0 0 0 8 3.6e-04 : vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); : - 90 0.0011 0 0 0 0 38 0.0018 : if (near->anon_vma && near->vm_end == vma->vm_start && + 80 9.0e-04 0 0 0 0 39 0.0018 : if (near->anon_vma && near->vm_end == vma->vm_start && : mpol_equal(vma_policy(near), vma_policy(vma)) && : can_vma_merge_after(near, vm_flags, : NULL, vma->vm_file, vma->vm_pgoff)) - 5 6.2e-05 0 0 0 0 3 1.4e-04 : return near->anon_vma; + 4 4.5e-05 0 0 0 0 2 9.1e-05 : return near->anon_vma; :none: : /* : * There's no absolute need to look only at touching neighbours: @@ -836,12 +836,12 @@ : * not trying to minimize memory used for anon_vmas. : */ : return NULL; - 119 0.0015 0 0 0 0 39 0.0019 :} + 95 0.0011 0 0 0 0 36 0.0016 :} : :#ifdef CONFIG_PROC_FS :void vm_stat_account(struct mm_struct *mm, unsigned long flags, : struct file *file, long pages) - 1352 0.0168 0 0 0 0 169 0.0081 :{ /* vm_stat_account total: 3522 0.0439 0 0 1 0.0351 845 0.0404 */ + 1923 0.0216 0 0 0 0 257 0.0117 :{ /* vm_stat_account total: 5158 0.0580 0 0 0 0 1040 0.0472 */ : const unsigned long stack_flags : = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN); : @@ -853,15 +853,15 @@ : } :#endif /* CONFIG_HUGETLB */ : - 183 0.0023 0 0 0 0 66 0.0032 : if (file) { - 333 0.0041 0 0 0 0 91 0.0043 : mm->shared_vm += pages; - 170 0.0021 0 0 0 0 45 0.0021 : if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) - 124 0.0015 0 0 0 0 19 9.1e-04 : mm->exec_vm += pages; - 200 0.0025 0 0 1 0.0351 49 0.0023 : } else if (flags & stack_flags) + 159 0.0018 0 0 0 0 62 0.0028 : if (file) { + 845 0.0095 0 0 0 0 164 0.0074 : mm->shared_vm += pages; + 167 0.0019 0 0 0 0 45 0.0020 : if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) + 160 0.0018 0 0 0 0 25 0.0011 : mm->exec_vm += pages; + 297 0.0033 0 0 0 0 61 0.0028 : } else if (flags & stack_flags) : mm->stack_vm += pages; - 699 0.0087 0 0 0 0 251 0.0120 : if (flags & (VM_RESERVED|VM_IO)) + 977 0.0110 0 0 0 0 251 0.0114 : if (flags & (VM_RESERVED|VM_IO)) : mm->reserved_vm += pages; - 459 0.0057 0 0 0 0 155 0.0074 :} + 629 0.0071 0 0 0 0 175 0.0079 :} :#endif /* CONFIG_PROC_FS */ : :/* @@ -871,8 +871,8 @@ :unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, : unsigned long len, unsigned long prot, : unsigned long flags, unsigned long pgoff) - 2296 0.0286 0 0 0 0 574 0.0274 :{ /* do_mmap_pgoff total: 23214 0.2893 0 0 1 0.0351 7901 0.3775 */ - 122 0.0015 0 0 0 0 28 0.0013 : struct mm_struct * mm = current->mm; + 2331 0.0262 0 0 0 0 575 0.0261 :{ /* do_mmap_pgoff total: 26609 0.2993 0 0 2 0.0679 8340 0.3788 */ + 109 0.0012 0 0 0 0 21 9.5e-04 : struct mm_struct * mm = current->mm; : struct vm_area_struct * vma, * prev; : struct inode *inode; : unsigned int vm_flags; @@ -882,14 +882,14 @@ : int accountable = 1; : unsigned long charged = 0, reqprot = prot; : - 104 0.0013 0 0 0 0 15 7.2e-04 : if (file) { + 93 0.0010 0 0 0 0 13 5.9e-04 : if (file) { : if (is_file_hugepages(file)) : accountable = 0; : - 233 0.0029 0 0 0 0 42 0.0020 : if (!file->f_op || !file->f_op->mmap) + 218 0.0025 0 0 0 0 42 0.0019 : if (!file->f_op || !file->f_op->mmap) : return -ENODEV; : - 95 0.0012 0 0 0 0 46 0.0022 : if ((prot & PROT_EXEC) && + 93 0.0010 0 0 0 0 39 0.0018 : if ((prot & PROT_EXEC) && : (file->f_vfsmnt->mnt_flags & MNT_NOEXEC)) : return -EPERM; : } @@ -899,47 +899,47 @@ : * (the exception is when the underlying filesystem is noexec : * mounted, in which case we dont add PROT_EXEC.) : */ - 1108 0.0138 0 0 0 0 393 0.0188 : if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) + 1105 0.0124 0 0 0 0 336 0.0153 : if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) : if (!(file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))) : prot |= PROT_EXEC; : - 120 0.0015 0 0 0 0 40 0.0019 : if (!len) + 141 0.0016 0 0 0 0 59 0.0027 : if (!len) : return -EINVAL; : : /* Careful about overflows.. */ - 19 2.4e-04 0 0 0 0 3 1.4e-04 : len = PAGE_ALIGN(len); - 647 0.0081 0 0 0 0 240 0.0115 : if (!len || len > TASK_SIZE) + 8 9.0e-05 0 0 0 0 7 3.2e-04 : len = PAGE_ALIGN(len); + 639 0.0072 0 0 0 0 253 0.0115 : if (!len || len > TASK_SIZE) : return -ENOMEM; : : /* offset overflow? */ - 392 0.0049 0 0 0 0 94 0.0045 : if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) + 436 0.0049 0 0 0 0 111 0.0050 : if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) : return -EOVERFLOW; : : /* Too many mappings? */ - 166 0.0021 0 0 0 0 75 0.0036 : if (mm->map_count > sysctl_max_map_count) + 150 0.0017 0 0 0 0 87 0.0040 : if (mm->map_count > sysctl_max_map_count) : return -ENOMEM; : : /* Obtain the address to map to. we verify (or select) it and ensure : * that it represents a valid section of the address space. : */ - 407 0.0051 0 0 0 0 274 0.0131 : addr = get_unmapped_area(file, addr, len, pgoff, flags); - 76 9.5e-04 0 0 0 0 64 0.0031 : if (addr & ~PAGE_MASK) + 418 0.0047 0 0 0 0 309 0.0140 : addr = get_unmapped_area(file, addr, len, pgoff, flags); + 86 9.7e-04 0 0 0 0 74 0.0034 : if (addr & ~PAGE_MASK) : return addr; : : /* Do simple checking here so the lower-level routines won't have : * to. we assume access permissions have been handled by the open : * of the memory object, so we don't do any here. : */ - 891 0.0111 0 0 0 0 562 0.0268 : vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | + 907 0.0102 0 0 0 0 568 0.0258 : vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | : mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; : - 58 7.2e-04 0 0 0 0 17 8.1e-04 : if (flags & MAP_LOCKED) { + 49 5.5e-04 0 0 0 0 29 0.0013 : if (flags & MAP_LOCKED) { : if (!can_do_mlock()) : return -EPERM; : vm_flags |= VM_LOCKED; : } : /* mlock MCL_FUTURE? */ - 839 0.0105 0 0 0 0 301 0.0144 : if (vm_flags & VM_LOCKED) { + 838 0.0094 0 0 0 0 289 0.0131 : if (vm_flags & VM_LOCKED) { : unsigned long locked, lock_limit; : locked = len >> PAGE_SHIFT; : locked += mm->locked_vm; @@ -949,19 +949,19 @@ : return -EAGAIN; : } : - 936 0.0117 0 0 0 0 531 0.0254 : inode = file ? file->f_dentry->d_inode : NULL; + 1002 0.0113 0 0 0 0 629 0.0286 : inode = file ? file->f_dentry->d_inode : NULL; : : if (file) { - 136 0.0017 0 0 0 0 106 0.0051 : switch (flags & MAP_TYPE) { + 131 0.0015 0 0 0 0 108 0.0049 : switch (flags & MAP_TYPE) { : case MAP_SHARED: - 8 1.0e-04 0 0 0 0 1 4.8e-05 : if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) + 0 0 0 0 0 0 2 9.1e-05 : if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) : return -EACCES; : : /* : * Make sure we don't allow writing to an append-only : * file.. : */ - 0 0 0 0 0 0 1 4.8e-05 : if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) + 1 1.1e-05 0 0 0 0 0 0 : if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) : return -EACCES; : : /* @@ -972,11 +972,11 @@ : : vm_flags |= VM_SHARED | VM_MAYSHARE; : if (!(file->f_mode & FMODE_WRITE)) - 2 2.5e-05 0 0 0 0 1 4.8e-05 : vm_flags &= ~(VM_MAYWRITE | VM_SHARED); + 6 6.7e-05 0 0 0 0 0 0 : vm_flags &= ~(VM_MAYWRITE | VM_SHARED); : : /* fall through */ : case MAP_PRIVATE: - 137 0.0017 0 0 0 0 54 0.0026 : if (!(file->f_mode & FMODE_READ)) + 118 0.0013 0 0 0 0 64 0.0029 : if (!(file->f_mode & FMODE_READ)) : return -EACCES; : break; : @@ -984,7 +984,7 @@ : return -EINVAL; : } : } else { - 1234 0.0154 0 0 0 0 181 0.0086 : switch (flags & MAP_TYPE) { + 1253 0.0141 0 0 0 0 171 0.0078 : switch (flags & MAP_TYPE) { : case MAP_SHARED: : vm_flags |= VM_SHARED | VM_MAYSHARE; : break; @@ -992,7 +992,7 @@ : /* : * Set pgoff according to addr for anon_vma. : */ - 587 0.0073 0 0 0 0 40 0.0019 : pgoff = addr >> PAGE_SHIFT; + 635 0.0071 0 0 0 0 39 0.0018 : pgoff = addr >> PAGE_SHIFT; : break; : default: : return -EINVAL; @@ -1006,30 +1006,30 @@ : /* Clear old maps */ : error = -ENOMEM; :munmap_back: - 503 0.0063 0 0 0 0 267 0.0128 : vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); - 264 0.0033 0 0 0 0 183 0.0087 : if (vma && vma->vm_start < addr + len) { - 97 0.0012 0 0 0 0 52 0.0025 : if (do_munmap(mm, addr, len)) + 618 0.0070 0 0 0 0 347 0.0158 : vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); + 252 0.0028 0 0 0 0 158 0.0072 : if (vma && vma->vm_start < addr + len) { + 119 0.0013 0 0 0 0 37 0.0017 : if (do_munmap(mm, addr, len)) : return -ENOMEM; : goto munmap_back; : } : : /* Check against address space limit. */ - 818 0.0102 0 0 0 0 229 0.0109 : if (!may_expand_vm(mm, len >> PAGE_SHIFT)) + 1571 0.0177 0 0 0 0 497 0.0226 : if (!may_expand_vm(mm, len >> PAGE_SHIFT)) : return -ENOMEM; : - 73 9.1e-04 0 0 0 0 47 0.0022 : if (accountable && (!(flags & MAP_NORESERVE) || + 326 0.0037 0 0 0 0 156 0.0071 : if (accountable && (!(flags & MAP_NORESERVE) || : sysctl_overcommit_memory == OVERCOMMIT_NEVER)) { - 182 0.0023 0 0 0 0 79 0.0038 : if (vm_flags & VM_SHARED) { + 1664 0.0187 0 0 0 0 503 0.0228 : if (vm_flags & VM_SHARED) { : /* Check memory availability in shmem_file_setup? */ : vm_flags |= VM_ACCOUNT; - 155 0.0019 0 0 0 0 131 0.0063 : } else if (vm_flags & VM_WRITE) { + 202 0.0023 0 0 0 0 46 0.0021 : } else if (vm_flags & VM_WRITE) { : /* : * Private writable mapping: check memory availability : */ : charged = len >> PAGE_SHIFT; - 116 0.0014 0 0 0 0 35 0.0017 : if (security_vm_enough_memory(charged)) + 97 0.0011 0 0 0 0 31 0.0014 : if (security_vm_enough_memory(charged)) : return -ENOMEM; - 108 0.0013 0 0 0 0 52 0.0025 : vm_flags |= VM_ACCOUNT; + 64 7.2e-04 0 0 0 0 20 9.1e-04 : vm_flags |= VM_ACCOUNT; : } : } : @@ -1038,7 +1038,7 @@ : * The VM_SHARED test is necessary because shmem_zero_setup : * will create the file object for a shared anonymous map below. : */ - 874 0.0109 0 0 0 0 293 0.0140 : if (!file && !(vm_flags & VM_SHARED) && + 843 0.0095 0 0 0 0 248 0.0113 : if (!file && !(vm_flags & VM_SHARED) && : vma_merge(mm, prev, addr, addr + len, vm_flags, : NULL, NULL, pgoff, NULL)) : goto out; @@ -1048,39 +1048,39 @@ : * specific mapper. the address has already been validated, but : * not unmapped, but the maps are removed from the list. : */ - 304 0.0038 0 0 1 0.0351 171 0.0082 : vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); - 114 0.0014 0 0 0 0 65 0.0031 : if (!vma) { + 345 0.0039 0 0 0 0 86 0.0039 : vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + 173 0.0019 0 0 0 0 28 0.0013 : if (!vma) { : error = -ENOMEM; : goto unacct_error; : } - 54 6.7e-04 0 0 0 0 61 0.0029 : memset(vma, 0, sizeof(*vma)); + 60 6.7e-04 0 0 0 0 4 1.8e-04 : memset(vma, 0, sizeof(*vma)); : - 35 4.4e-04 0 0 0 0 1 4.8e-05 : vma->vm_mm = mm; + 30 3.4e-04 0 0 0 0 0 0 : vma->vm_mm = mm; : vma->vm_start = addr; - 28 3.5e-04 0 0 0 0 0 0 : vma->vm_end = addr + len; - 35 4.4e-04 0 0 0 0 3 1.4e-04 : vma->vm_flags = vm_flags; - 19 2.4e-04 0 0 0 0 3 1.4e-04 : vma->vm_page_prot = protection_map[vm_flags & 0x0f]; - 52 6.5e-04 0 0 0 0 4 1.9e-04 : vma->vm_pgoff = pgoff; + 32 3.6e-04 0 0 0 0 2 9.1e-05 : vma->vm_end = addr + len; + 24 2.7e-04 0 0 0 0 1 4.5e-05 : vma->vm_flags = vm_flags; + 10 1.1e-04 0 0 0 0 0 0 : vma->vm_page_prot = protection_map[vm_flags & 0x0f]; + 65 7.3e-04 0 0 0 0 1 4.5e-05 : vma->vm_pgoff = pgoff; : - 63 7.9e-04 0 0 0 0 4 1.9e-04 : if (file) { + 57 6.4e-04 0 0 0 0 2 9.1e-05 : if (file) { : error = -EINVAL; - 347 0.0043 0 0 0 0 45 0.0021 : if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) + 369 0.0041 0 0 0 0 28 0.0013 : if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) : goto free_vma; - 128 0.0016 0 0 0 0 67 0.0032 : if (vm_flags & VM_DENYWRITE) { - 114 0.0014 0 0 0 0 19 9.1e-04 : error = deny_write_access(file); + 134 0.0015 0 0 0 0 69 0.0031 : if (vm_flags & VM_DENYWRITE) { + 133 0.0015 0 0 0 0 23 0.0010 : error = deny_write_access(file); : if (error) : goto free_vma; : correct_wcount = 1; : } - 7 8.7e-05 0 0 0 0 0 0 : vma->vm_file = file; + 4 4.5e-05 0 0 0 0 1 4.5e-05 : vma->vm_file = file; : get_file(file); - 705 0.0088 0 0 0 0 132 0.0063 : error = file->f_op->mmap(file, vma); - 157 0.0020 0 0 0 0 70 0.0033 : if (error) + 655 0.0074 0 0 0 0 162 0.0074 : error = file->f_op->mmap(file, vma); + 131 0.0015 0 0 0 0 68 0.0031 : if (error) : goto unmap_and_free_vma; - 117 0.0015 0 0 0 0 19 9.1e-04 : } else if (vm_flags & VM_SHARED) { + 141 0.0016 0 0 0 0 16 7.3e-04 : } else if (vm_flags & VM_SHARED) { : error = shmem_zero_setup(vma); : if (error) - 2 2.5e-05 0 0 0 0 1 4.8e-05 : goto free_vma; + 3 3.4e-05 0 0 0 0 1 4.5e-05 : goto free_vma; : } : : /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform @@ -1088,7 +1088,7 @@ : * that memory reservation must be checked; but that reservation : * belongs to shared memory object, not to vma: so now clear it. : */ - 105 0.0013 0 0 0 0 66 0.0032 : if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT)) + 113 0.0013 0 0 0 0 78 0.0035 : if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT)) : vma->vm_flags &= ~VM_ACCOUNT; : : /* Can addr have changed?? @@ -1096,15 +1096,15 @@ : * Answer: Yes, several device drivers can do it in their : * f_op->mmap method. -DaveM : */ - 29 3.6e-04 0 0 0 0 16 7.6e-04 : addr = vma->vm_start; - 32 4.0e-04 0 0 0 0 13 6.2e-04 : pgoff = vma->vm_pgoff; - 40 5.0e-04 0 0 0 0 9 4.3e-04 : vm_flags = vma->vm_flags; + 26 2.9e-04 0 0 0 0 16 7.3e-04 : addr = vma->vm_start; + 31 3.5e-04 0 0 0 0 9 4.1e-04 : pgoff = vma->vm_pgoff; + 40 4.5e-04 0 0 0 0 9 4.1e-04 : vm_flags = vma->vm_flags; : - 609 0.0076 0 0 0 0 236 0.0113 : if (!file || !vma_merge(mm, prev, addr, vma->vm_end, + 601 0.0068 0 0 1 0.0339 186 0.0084 : if (!file || !vma_merge(mm, prev, addr, vma->vm_end, : vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { - : file = vma->vm_file; - 331 0.0041 0 0 0 0 49 0.0023 : vma_link(mm, vma, prev, rb_link, rb_parent); - 796 0.0099 0 0 0 0 185 0.0088 : if (correct_wcount) + 1 1.1e-05 0 0 0 0 0 0 : file = vma->vm_file; + 307 0.0035 0 0 0 0 49 0.0022 : vma_link(mm, vma, prev, rb_link, rb_parent); + 720 0.0081 0 0 0 0 167 0.0076 : if (correct_wcount) : atomic_inc(&inode->i_writecount); : } else { : if (file) { @@ -1116,13 +1116,13 @@ : kmem_cache_free(vm_area_cachep, vma); : } :out: - 1162 0.0145 0 0 0 0 147 0.0070 : vx_vmpages_add(mm, len >> PAGE_SHIFT); - 162 0.0020 0 0 0 0 50 0.0024 : vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); - 638 0.0079 0 0 0 0 280 0.0134 : if (vm_flags & VM_LOCKED) { + 1290 0.0145 0 0 0 0 168 0.0076 : vx_vmpages_add(mm, len >> PAGE_SHIFT); + 120 0.0013 0 0 0 0 58 0.0026 : vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); + 1207 0.0136 0 0 1 0.0339 251 0.0114 : if (vm_flags & VM_LOCKED) { : vx_vmlocked_add(mm, len >> PAGE_SHIFT); : make_pages_present(addr, addr + len); : } - 150 0.0019 0 0 0 0 60 0.0029 : if (flags & MAP_POPULATE) { + 164 0.0018 0 0 0 0 54 0.0025 : if (flags & MAP_POPULATE) { : up_write(&mm->mmap_sem); : sys_remap_file_pages(addr, len, 0, : pgoff, flags & MAP_NONBLOCK); @@ -1145,7 +1145,7 @@ : if (charged) : vm_unacct_memory(charged); : return error; - 878 0.0109 0 0 0 0 524 0.0250 :} + 984 0.0111 0 0 0 0 410 0.0186 :} : :EXPORT_SYMBOL(do_mmap_pgoff); : @@ -1217,15 +1217,15 @@ :#endif : :void arch_unmap_area(struct mm_struct *mm, unsigned long addr) - 1407 0.0175 0 0 2 0.0702 538 0.0257 :{ /* arch_unmap_area total: 1968 0.0245 0 0 2 0.0702 711 0.0340 */ + 1393 0.0157 0 0 0 0 534 0.0243 :{ /* arch_unmap_area total: 1951 0.0219 0 0 1 0.0339 717 0.0326 */ : /* : * Is this a new hole at the lowest possible address? : */ - 245 0.0031 0 0 0 0 48 0.0023 : if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { - 7 8.7e-05 0 0 0 0 2 9.6e-05 : mm->free_area_cache = addr; - 43 5.4e-04 0 0 0 0 20 9.6e-04 : mm->cached_hole_size = ~0UL; + 234 0.0026 0 0 1 0.0339 60 0.0027 : if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { + 13 1.5e-04 0 0 0 0 4 1.8e-04 : mm->free_area_cache = addr; + 31 3.5e-04 0 0 0 0 16 7.3e-04 : mm->cached_hole_size = ~0UL; : } - 142 0.0018 0 0 0 0 60 0.0029 :} + 155 0.0017 0 0 0 0 62 0.0028 :} : :/* : * This mmap-allocator allocates new areas top-down from below the @@ -1331,25 +1331,25 @@ :unsigned long :get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, : unsigned long pgoff, unsigned long flags) - 305 0.0038 0 0 0 0 219 0.0105 :{ /* get_unmapped_area total: 4893 0.0610 0 0 1 0.0351 1962 0.0937 */ + 256 0.0029 0 0 0 0 210 0.0095 :{ /* get_unmapped_area total: 4897 0.0551 0 0 0 0 1912 0.0868 */ : unsigned long ret; : - 966 0.0120 0 0 0 0 199 0.0095 : if (!(flags & MAP_FIXED)) { + 960 0.0108 0 0 0 0 198 0.0090 : if (!(flags & MAP_FIXED)) { : unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); : - 171 0.0021 0 0 0 0 186 0.0089 : get_area = current->mm->get_unmapped_area; - 165 0.0021 0 0 0 0 202 0.0097 : if (file && file->f_op && file->f_op->get_unmapped_area) - 35 4.4e-04 0 0 0 0 37 0.0018 : get_area = file->f_op->get_unmapped_area; - 328 0.0041 0 0 0 0 183 0.0087 : addr = get_area(file, addr, len, pgoff, flags); - 39 4.9e-04 0 0 0 0 7 3.3e-04 : if (IS_ERR_VALUE(addr)) + 169 0.0019 0 0 0 0 168 0.0076 : get_area = current->mm->get_unmapped_area; + 162 0.0018 0 0 0 0 203 0.0092 : if (file && file->f_op && file->f_op->get_unmapped_area) + 51 5.7e-04 0 0 0 0 28 0.0013 : get_area = file->f_op->get_unmapped_area; + 379 0.0043 0 0 0 0 186 0.0084 : addr = get_area(file, addr, len, pgoff, flags); + 54 6.1e-04 0 0 0 0 6 2.7e-04 : if (IS_ERR_VALUE(addr)) : return addr; : } : - 927 0.0116 0 0 0 0 182 0.0087 : if (addr > TASK_SIZE - len) + 839 0.0094 0 0 0 0 141 0.0064 : if (addr > TASK_SIZE - len) : return -ENOMEM; - 398 0.0050 0 0 0 0 80 0.0038 : if (addr & ~PAGE_MASK) + 426 0.0048 0 0 0 0 88 0.0040 : if (addr & ~PAGE_MASK) : return -EINVAL; - 267 0.0033 0 0 0 0 180 0.0086 : if (file && is_file_hugepages(file)) { + 280 0.0031 0 0 0 0 187 0.0085 : if (file && is_file_hugepages(file)) { : /* : * Check if the given range is hugepage aligned, and : * can be made suitable for hugepages. @@ -1366,45 +1366,45 @@ : if (ret) : return -EINVAL; : return addr; - 870 0.0108 0 0 1 0.0351 303 0.0145 :} + 930 0.0105 0 0 0 0 324 0.0147 :} : :EXPORT_SYMBOL(get_unmapped_area); : :/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ :struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr) - 1681 0.0209 0 0 0 0 709 0.0339 :{ /* find_vma total: 72272 0.9005 0 0 12 0.4215 25914 1.2380 */ + 1669 0.0188 0 0 0 0 712 0.0323 :{ /* find_vma total: 81286 0.9142 0 0 6 0.2037 27527 1.2502 */ : struct vm_area_struct *vma = NULL; : - 6827 0.0851 0 0 1 0.0351 2865 0.1369 : if (mm) { + 6796 0.0764 0 0 1 0.0339 3174 0.1441 : if (mm) { : /* Check the cache first. */ : /* (Cache hit rate is typically around 35%.) */ - 304 0.0038 0 0 0 0 185 0.0088 : vma = mm->mmap_cache; - 10067 0.1254 0 0 2 0.0702 4507 0.2153 : if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { + 324 0.0036 0 0 0 0 157 0.0071 : vma = mm->mmap_cache; + 11424 0.1285 0 0 2 0.0679 5132 0.2331 : if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { : struct rb_node * rb_node; : - 6826 0.0851 0 0 1 0.0351 3687 0.1761 : rb_node = mm->mm_rb.rb_node; + 7929 0.0892 0 0 0 0 3841 0.1744 : rb_node = mm->mm_rb.rb_node; : vma = NULL; : - 8813 0.1098 0 0 3 0.1054 2442 0.1167 : while (rb_node) { + 9797 0.1102 0 0 1 0.0339 2464 0.1119 : while (rb_node) { : struct vm_area_struct * vma_tmp; : - 6926 0.0863 0 0 1 0.0351 2092 0.0999 : vma_tmp = rb_entry(rb_node, + 7843 0.0882 0 0 0 0 2176 0.0988 : vma_tmp = rb_entry(rb_node, : struct vm_area_struct, vm_rb); : - 2953 0.0368 0 0 0 0 1420 0.0678 : if (vma_tmp->vm_end > addr) { + 3170 0.0357 0 0 0 0 1378 0.0626 : if (vma_tmp->vm_end > addr) { : vma = vma_tmp; - 13433 0.1674 0 0 1 0.0351 2706 0.1293 : if (vma_tmp->vm_start <= addr) + 16543 0.1861 0 0 2 0.0679 2778 0.1262 : if (vma_tmp->vm_start <= addr) : break; - 2829 0.0353 0 0 0 0 672 0.0321 : rb_node = rb_node->rb_left; + 3343 0.0376 0 0 0 0 743 0.0337 : rb_node = rb_node->rb_left; : } else - 1684 0.0210 0 0 1 0.0351 738 0.0353 : rb_node = rb_node->rb_right; + 1845 0.0207 0 0 0 0 648 0.0294 : rb_node = rb_node->rb_right; : } - 1961 0.0244 0 0 1 0.0351 497 0.0237 : if (vma) - 188 0.0023 0 0 0 0 49 0.0023 : mm->mmap_cache = vma; + 1983 0.0223 0 0 0 0 489 0.0222 : if (vma) + 265 0.0030 0 0 0 0 49 0.0022 : mm->mmap_cache = vma; : } : } : return vma; - 7780 0.0969 0 0 1 0.0351 3345 0.1598 :} + 8355 0.0940 0 0 0 0 3786 0.1719 :} : :EXPORT_SYMBOL(find_vma); : @@ -1412,36 +1412,36 @@ :struct vm_area_struct * :find_vma_prev(struct mm_struct *mm, unsigned long addr, : struct vm_area_struct **pprev) - 513 0.0064 0 0 0 0 140 0.0067 :{ /* find_vma_prev total: 6488 0.0808 0 0 1 0.0351 2698 0.1289 */ + 489 0.0055 0 0 0 0 137 0.0062 :{ /* find_vma_prev total: 6708 0.0754 0 0 1 0.0339 2800 0.1272 */ : struct vm_area_struct *vma = NULL, *prev = NULL; : struct rb_node * rb_node; - 451 0.0056 0 0 0 0 159 0.0076 : if (!mm) + 465 0.0052 0 0 0 0 144 0.0065 : if (!mm) : goto out; : : /* Guard against addr being lower than the first VMA */ - 2 2.5e-05 0 0 0 0 1 4.8e-05 : vma = mm->mmap; + : vma = mm->mmap; : : /* Go through the RB tree quickly. */ - 55 6.9e-04 0 0 0 0 31 0.0015 : rb_node = mm->mm_rb.rb_node; + 61 6.9e-04 0 0 0 0 28 0.0013 : rb_node = mm->mm_rb.rb_node; : - 1214 0.0151 0 0 0 0 734 0.0351 : while (rb_node) { + 1160 0.0130 0 0 0 0 758 0.0344 : while (rb_node) { : struct vm_area_struct *vma_tmp; - 586 0.0073 0 0 0 0 376 0.0180 : vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); + 535 0.0060 0 0 0 0 375 0.0170 : vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); : - 218 0.0027 0 0 0 0 105 0.0050 : if (addr < vma_tmp->vm_end) { - 759 0.0095 0 0 1 0.0351 310 0.0148 : rb_node = rb_node->rb_left; + 264 0.0030 0 0 0 0 104 0.0047 : if (addr < vma_tmp->vm_end) { + 805 0.0091 0 0 0 0 380 0.0173 : rb_node = rb_node->rb_left; : } else { : prev = vma_tmp; - 880 0.0110 0 0 0 0 399 0.0191 : if (!prev->vm_next || (addr < prev->vm_next->vm_end)) + 1034 0.0116 0 0 0 0 406 0.0184 : if (!prev->vm_next || (addr < prev->vm_next->vm_end)) : break; - 845 0.0105 0 0 0 0 169 0.0081 : rb_node = rb_node->rb_right; + 759 0.0085 0 0 0 0 174 0.0079 : rb_node = rb_node->rb_right; : } : } : - 3 3.7e-05 0 0 0 0 2 9.6e-05 :out: - 188 0.0023 0 0 0 0 42 0.0020 : *pprev = prev; - 507 0.0063 0 0 0 0 152 0.0073 : return prev ? prev->vm_next : vma; - 267 0.0033 0 0 0 0 78 0.0037 :} + 2 2.2e-05 0 0 0 0 0 0 :out: + 251 0.0028 0 0 0 0 48 0.0022 : *pprev = prev; + 584 0.0066 0 0 1 0.0339 175 0.0079 : return prev ? prev->vm_next : vma; + 299 0.0034 0 0 0 0 71 0.0032 :} : :/* : * Verify that the stack growth is acceptable and @@ -1651,20 +1651,20 @@ :static void unmap_region(struct mm_struct *mm, : struct vm_area_struct *vma, struct vm_area_struct *prev, : unsigned long start, unsigned long end) - 309 0.0039 0 0 0 0 160 0.0076 :{ /* unmap_region total: 3397 0.0423 0 0 1 0.0351 1545 0.0738 */ - 322 0.0040 0 0 0 0 67 0.0032 : struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; + 291 0.0033 0 0 0 0 141 0.0064 :{ /* unmap_region total: 3873 0.0436 0 0 0 0 1446 0.0657 */ + 392 0.0044 0 0 0 0 63 0.0029 : struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; : struct mmu_gather *tlb; - 11 1.4e-04 0 0 0 0 3 1.4e-04 : unsigned long nr_accounted = 0; + 2 2.2e-05 0 0 0 0 3 1.4e-04 : unsigned long nr_accounted = 0; : - 23 2.9e-04 0 0 0 0 44 0.0021 : lru_add_drain(); - 1 1.2e-05 0 0 0 0 0 0 : tlb = tlb_gather_mmu(mm, 0); - 121 0.0015 0 0 0 0 24 0.0011 : update_hiwater_rss(mm); - 193 0.0024 0 0 0 0 133 0.0064 : unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); + 29 3.3e-04 0 0 0 0 46 0.0021 : lru_add_drain(); + 2 2.2e-05 0 0 0 0 0 0 : tlb = tlb_gather_mmu(mm, 0); + 134 0.0015 0 0 0 0 13 5.9e-04 : update_hiwater_rss(mm); + 219 0.0025 0 0 0 0 131 0.0059 : unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); : vm_unacct_memory(nr_accounted); - 590 0.0074 0 0 1 0.0351 220 0.0105 : free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, + 776 0.0087 0 0 0 0 174 0.0079 : free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, : next? next->vm_start: 0); : tlb_finish_mmu(tlb, start, end); - 529 0.0066 0 0 0 0 361 0.0172 :} + 540 0.0061 0 0 0 0 364 0.0165 :} : :/* : * Create a list of vma's touched by the unmap, removing them from the mm's @@ -1678,21 +1678,21 @@ : struct vm_area_struct *tail_vma = NULL; : unsigned long addr; : - 32 4.0e-04 0 0 0 0 14 6.7e-04 : insertion_point = (prev ? &prev->vm_next : &mm->mmap); + 24 2.7e-04 0 0 0 0 18 8.2e-04 : insertion_point = (prev ? &prev->vm_next : &mm->mmap); : do { - 64 8.0e-04 0 0 0 0 27 0.0013 : rb_erase(&vma->vm_rb, &mm->mm_rb); - 36 4.5e-04 0 0 0 0 5 2.4e-04 : mm->map_count--; + 80 9.0e-04 0 0 0 0 34 0.0015 : rb_erase(&vma->vm_rb, &mm->mm_rb); + 18 2.0e-04 0 0 0 0 7 3.2e-04 : mm->map_count--; : tail_vma = vma; - 61 7.6e-04 0 0 0 0 9 4.3e-04 : vma = vma->vm_next; - 89 0.0011 0 0 0 0 35 0.0017 : } while (vma && vma->vm_start < end); - 77 9.6e-04 0 0 0 0 21 0.0010 : *insertion_point = vma; - 44 5.5e-04 0 0 0 0 6 2.9e-04 : tail_vma->vm_next = NULL; - 34 4.2e-04 0 0 0 0 8 3.8e-04 : if (mm->unmap_area == arch_unmap_area) - 27 3.4e-04 0 0 0 0 12 5.7e-04 : addr = prev ? prev->vm_end : mm->mmap_base; + 50 5.6e-04 0 0 0 0 14 6.4e-04 : vma = vma->vm_next; + 109 0.0012 0 0 0 0 44 0.0020 : } while (vma && vma->vm_start < end); + 91 0.0010 0 0 0 0 25 0.0011 : *insertion_point = vma; + 39 4.4e-04 0 0 0 0 9 4.1e-04 : tail_vma->vm_next = NULL; + 40 4.5e-04 0 0 0 0 21 9.5e-04 : if (mm->unmap_area == arch_unmap_area) + 49 5.5e-04 0 0 0 0 13 5.9e-04 : addr = prev ? prev->vm_end : mm->mmap_base; : else : addr = vma ? vma->vm_start : mm->mmap_base; - 189 0.0024 0 0 0 0 44 0.0021 : mm->unmap_area(mm, addr); - 1 1.2e-05 0 0 0 0 0 0 : mm->mmap_cache = NULL; /* Kill the cache. */ + 187 0.0021 0 0 0 0 40 0.0018 : mm->unmap_area(mm, addr); + 1 1.1e-05 0 0 0 0 1 4.5e-05 : mm->mmap_cache = NULL; /* Kill the cache. */ :} : :/* @@ -1701,51 +1701,51 @@ : */ :int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, : unsigned long addr, int new_below) - 472 0.0059 0 0 0 0 100 0.0048 :{ /* split_vma total: 1987 0.0248 0 0 0 0 473 0.0226 */ + 496 0.0056 0 0 0 0 108 0.0049 :{ /* split_vma total: 2123 0.0239 0 0 0 0 423 0.0192 */ : struct mempolicy *pol; : struct vm_area_struct *new; : - 6 7.5e-05 0 0 0 0 2 9.6e-05 : if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK)) + 5 5.6e-05 0 0 0 0 1 4.5e-05 : if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK)) : return -EINVAL; : - 81 0.0010 0 0 0 0 23 0.0011 : if (mm->map_count >= sysctl_max_map_count) + 66 7.4e-04 0 0 0 0 25 0.0011 : if (mm->map_count >= sysctl_max_map_count) : return -ENOMEM; : - 241 0.0030 0 0 0 0 43 0.0021 : new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); - 39 4.9e-04 0 0 0 0 14 6.7e-04 : if (!new) + 347 0.0039 0 0 0 0 35 0.0016 : new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + 84 9.4e-04 0 0 0 0 6 2.7e-04 : if (!new) : return -ENOMEM; : : /* most fields are the same, copy all, and then fixup */ - 22 2.7e-04 0 0 0 0 6 2.9e-04 : *new = *vma; + 28 3.1e-04 0 0 0 0 3 1.4e-04 : *new = *vma; : - 16 2.0e-04 0 0 0 0 1 4.8e-05 : if (new_below) - 21 2.6e-04 0 0 0 0 5 2.4e-04 : new->vm_end = addr; + 23 2.6e-04 0 0 0 0 2 9.1e-05 : if (new_below) + 33 3.7e-04 0 0 0 0 3 1.4e-04 : new->vm_end = addr; : else { - 5 6.2e-05 0 0 0 0 1 4.8e-05 : new->vm_start = addr; - 51 6.4e-04 0 0 0 0 10 4.8e-04 : new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); + 7 7.9e-05 0 0 0 0 1 4.5e-05 : new->vm_start = addr; + 52 5.8e-04 0 0 0 0 6 2.7e-04 : new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); : } : - 6 7.5e-05 0 0 0 0 0 0 : pol = mpol_copy(vma_policy(vma)); - 53 6.6e-04 0 0 0 0 7 3.3e-04 : if (IS_ERR(pol)) { + 7 7.9e-05 0 0 0 0 0 0 : pol = mpol_copy(vma_policy(vma)); + 45 5.1e-04 0 0 0 0 5 2.3e-04 : if (IS_ERR(pol)) { : kmem_cache_free(vm_area_cachep, new); : return PTR_ERR(pol); : } - 75 9.3e-04 0 0 0 0 11 5.3e-04 : vma_set_policy(new, pol); + 42 4.7e-04 0 0 0 0 9 4.1e-04 : vma_set_policy(new, pol); : - 8 1.0e-04 0 0 0 0 4 1.9e-04 : if (new->vm_file) + 7 7.9e-05 0 0 0 0 4 1.8e-04 : if (new->vm_file) : get_file(new->vm_file); : - 183 0.0023 0 0 0 0 54 0.0026 : if (new->vm_ops && new->vm_ops->open) + 228 0.0026 0 0 0 0 56 0.0025 : if (new->vm_ops && new->vm_ops->open) : new->vm_ops->open(new); : - 107 0.0013 0 0 0 0 29 0.0014 : if (new_below) - 139 0.0017 0 0 0 0 18 8.6e-04 : vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + + 104 0.0012 0 0 0 0 20 9.1e-04 : if (new_below) + 112 0.0013 0 0 0 0 33 0.0015 : vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + : ((addr - new->vm_start) >> PAGE_SHIFT), new); : else - 353 0.0044 0 0 0 0 89 0.0043 : vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); + 315 0.0035 0 0 0 0 64 0.0029 : vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); : : return 0; - 81 0.0010 0 0 0 0 46 0.0022 :} + 94 0.0011 0 0 0 0 38 0.0017 :} : :/* Munmap is split into 2 main parts -- this part which finds : * what needs doing, and the areas themselves, which do the @@ -1753,25 +1753,25 @@ : * Jeremy Fitzhardinge : */ :int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) - 491 0.0061 0 0 0 0 181 0.0086 :{ /* do_munmap total: 5119 0.0638 0 0 1 0.0351 1962 0.0937 */ + 578 0.0065 0 0 0 0 204 0.0093 :{ /* do_munmap total: 5423 0.0610 0 0 1 0.0339 2063 0.0937 */ : unsigned long end; : struct vm_area_struct *vma, *prev, *last; : - 315 0.0039 0 0 0 0 189 0.0090 : if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) + 329 0.0037 0 0 0 0 211 0.0096 : if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) : return -EINVAL; : - 90 0.0011 0 0 0 0 57 0.0027 : if ((len = PAGE_ALIGN(len)) == 0) + 78 8.8e-04 0 0 0 0 54 0.0025 : if ((len = PAGE_ALIGN(len)) == 0) : return -EINVAL; : : /* Find the first overlapping VMA */ - 110 0.0014 0 0 0 0 66 0.0032 : vma = find_vma_prev(mm, start, &prev); - 57 7.1e-04 0 0 0 0 15 7.2e-04 : if (!vma) + 90 0.0010 0 0 0 0 71 0.0032 : vma = find_vma_prev(mm, start, &prev); + 57 6.4e-04 0 0 0 0 21 9.5e-04 : if (!vma) : return 0; : /* we have start < vma->vm_end */ : : /* if it doesn't overlap, we have nothing.. */ - 17 2.1e-04 0 0 0 0 9 4.3e-04 : end = start + len; - 35 4.4e-04 0 0 0 0 13 6.2e-04 : if (vma->vm_start >= end) + 23 2.6e-04 0 0 0 0 10 4.5e-04 : end = start + len; + 39 4.4e-04 0 0 1 0.0339 17 7.7e-04 : if (vma->vm_start >= end) : return 0; : : /* @@ -1781,48 +1781,48 @@ : * unmapped vm_area_struct will remain in use: so lower split_vma : * places tmp vma above, and higher split_vma places tmp vma below. : */ - 62 7.7e-04 0 0 0 0 19 9.1e-04 : if (start > vma->vm_start) { - 44 5.5e-04 0 0 0 0 20 9.6e-04 : int error = split_vma(mm, vma, start, 0); - 27 3.4e-04 0 0 0 0 23 0.0011 : if (error) + 58 6.5e-04 0 0 0 0 17 7.7e-04 : if (start > vma->vm_start) { + 56 6.3e-04 0 0 0 0 22 1.0e-03 : int error = split_vma(mm, vma, start, 0); + 29 3.3e-04 0 0 0 0 26 0.0012 : if (error) : return error; - 15 1.9e-04 0 0 0 0 7 3.3e-04 : prev = vma; + 12 1.3e-04 0 0 0 0 6 2.7e-04 : prev = vma; : } : : /* Does it split the last one? */ - 286 0.0036 0 0 0 0 76 0.0036 : last = find_vma(mm, end); - 67 8.3e-04 0 0 0 0 56 0.0027 : if (last && end > last->vm_start) { - 23 2.9e-04 0 0 0 0 9 4.3e-04 : int error = split_vma(mm, last, end, 1); - 15 1.9e-04 0 0 0 0 4 1.9e-04 : if (error) + 325 0.0037 0 0 0 0 90 0.0041 : last = find_vma(mm, end); + 57 6.4e-04 0 0 0 0 52 0.0024 : if (last && end > last->vm_start) { + 17 1.9e-04 0 0 0 0 7 3.2e-04 : int error = split_vma(mm, last, end, 1); + 9 1.0e-04 0 0 0 0 4 1.8e-04 : if (error) : return error; : } - 148 0.0018 0 0 0 0 80 0.0038 : vma = prev? prev->vm_next: mm->mmap; + 147 0.0017 0 0 0 0 87 0.0040 : vma = prev? prev->vm_next: mm->mmap; : : /* : * Remove the vma's, and unmap the actual pages : */ : detach_vmas_to_be_unmapped(mm, vma, prev, end); - 425 0.0053 0 0 0 0 76 0.0036 : unmap_region(mm, vma, prev, start, end); + 441 0.0050 0 0 0 0 85 0.0039 : unmap_region(mm, vma, prev, start, end); : : /* Fix up all other VM information */ : remove_vma_list(mm, vma); : : return 0; - 189 0.0024 0 0 0 0 38 0.0018 :} + 189 0.0021 0 0 0 0 40 0.0018 :} : :EXPORT_SYMBOL(do_munmap); : :asmlinkage long sys_munmap(unsigned long addr, size_t len) - 1908 0.0238 0 0 0 0 505 0.0241 :{ /* sys_munmap total: 2996 0.0373 0 0 0 0 957 0.0457 */ + 1986 0.0223 0 0 0 0 500 0.0227 :{ /* sys_munmap total: 3149 0.0354 0 0 0 0 967 0.0439 */ : int ret; - 89 0.0011 0 0 0 0 3 1.4e-04 : struct mm_struct *mm = current->mm; + 109 0.0012 0 0 0 0 8 3.6e-04 : struct mm_struct *mm = current->mm; : - 5 6.2e-05 0 0 0 0 2 9.6e-05 : profile_munmap(addr); + 3 3.4e-05 0 0 0 0 1 4.5e-05 : profile_munmap(addr); : - 8 1.0e-04 0 0 0 0 4 1.9e-04 : down_write(&mm->mmap_sem); - 105 0.0013 0 0 0 0 64 0.0031 : ret = do_munmap(mm, addr, len); + 7 7.9e-05 0 0 0 0 5 2.3e-04 : down_write(&mm->mmap_sem); + 120 0.0013 0 0 0 0 58 0.0026 : ret = do_munmap(mm, addr, len); : up_write(&mm->mmap_sem); : return ret; - 416 0.0052 0 0 0 0 32 0.0015 :} + 400 0.0045 0 0 0 0 36 0.0016 :} : :static inline void verify_mm_writelocked(struct mm_struct *mm) :{ @@ -1840,24 +1840,24 @@ : * brk-specific accounting here. : */ :unsigned long do_brk(unsigned long addr, unsigned long len) - 284 0.0035 0 0 0 0 36 0.0017 :{ /* do_brk total: 3643 0.0454 0 0 0 0 674 0.0322 */ - 52 6.5e-04 0 0 0 0 6 2.9e-04 : struct mm_struct * mm = current->mm; + 315 0.0035 0 0 0 0 46 0.0021 :{ /* do_brk total: 3857 0.0434 0 0 0 0 748 0.0340 */ + 44 4.9e-04 0 0 0 0 14 6.4e-04 : struct mm_struct * mm = current->mm; : struct vm_area_struct * vma, * prev; : unsigned long flags; : struct rb_node ** rb_link, * rb_parent; - 16 2.0e-04 0 0 0 0 3 1.4e-04 : pgoff_t pgoff = addr >> PAGE_SHIFT; + 13 1.5e-04 0 0 0 0 1 4.5e-05 : pgoff_t pgoff = addr >> PAGE_SHIFT; : - 2 2.5e-05 0 0 0 0 1 4.8e-05 : len = PAGE_ALIGN(len); - 6 7.5e-05 0 0 0 0 1 4.8e-05 : if (!len) + 7 7.9e-05 0 0 0 0 0 0 : len = PAGE_ALIGN(len); + 8 9.0e-05 0 0 0 0 4 1.8e-04 : if (!len) : return addr; : - 226 0.0028 0 0 0 0 82 0.0039 : if ((addr + len) > TASK_SIZE || (addr + len) < addr) + 256 0.0029 0 0 0 0 74 0.0034 : if ((addr + len) > TASK_SIZE || (addr + len) < addr) : return -EINVAL; : : /* : * mlock MCL_FUTURE? : */ - 33 4.1e-04 0 0 0 0 18 8.6e-04 : if (mm->def_flags & VM_LOCKED) { + 33 3.7e-04 0 0 0 0 19 8.6e-04 : if (mm->def_flags & VM_LOCKED) { : unsigned long locked, lock_limit; : locked = len >> PAGE_SHIFT; : locked += mm->locked_vm; @@ -1879,98 +1879,98 @@ : * Clear old maps. this also does some error checking for us : */ : munmap_back: - 717 0.0089 0 0 0 0 85 0.0041 : vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); - 46 5.7e-04 0 0 0 0 30 0.0014 : if (vma && vma->vm_start < addr + len) { + 687 0.0077 0 0 0 0 64 0.0029 : vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); + 61 6.9e-04 0 0 0 0 30 0.0014 : if (vma && vma->vm_start < addr + len) { : if (do_munmap(mm, addr, len)) : return -ENOMEM; : goto munmap_back; : } : : /* Check against address space limits *after* clearing old maps... */ - 109 0.0014 0 0 0 0 37 0.0018 : if (!may_expand_vm(mm, len >> PAGE_SHIFT)) + 103 0.0012 0 0 0 0 52 0.0024 : if (!may_expand_vm(mm, len >> PAGE_SHIFT)) : return -ENOMEM; : - 208 0.0026 0 0 0 0 50 0.0024 : if (mm->map_count > sysctl_max_map_count) + 258 0.0029 0 0 0 0 53 0.0024 : if (mm->map_count > sysctl_max_map_count) : return -ENOMEM; : - 35 4.4e-04 0 0 0 0 26 0.0012 : if (security_vm_enough_memory(len >> PAGE_SHIFT) || + 42 4.7e-04 0 0 0 0 17 7.7e-04 : if (security_vm_enough_memory(len >> PAGE_SHIFT) || : !vx_vmpages_avail(mm, len >> PAGE_SHIFT)) : return -ENOMEM; : - 631 0.0079 0 0 0 0 74 0.0035 : flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; + 349 0.0039 0 0 0 0 42 0.0019 : flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; : : /* Can we just expand an old private anonymous mapping? */ - 134 0.0017 0 0 0 0 35 0.0017 : if (vma_merge(mm, prev, addr, addr + len, flags, + 170 0.0019 0 0 0 0 67 0.0030 : if (vma_merge(mm, prev, addr, addr + len, flags, : NULL, NULL, pgoff, NULL)) : goto out; : : /* : * create a vma struct for an anonymous mapping : */ - 10 1.2e-04 0 0 0 0 4 1.9e-04 : vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); - 4 5.0e-05 0 0 0 0 1 4.8e-05 : if (!vma) { + 8 9.0e-05 0 0 0 0 2 9.1e-05 : vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + 4 4.5e-05 0 0 0 0 1 4.5e-05 : if (!vma) { : vm_unacct_memory(len >> PAGE_SHIFT); : return -ENOMEM; : } - 34 4.2e-04 0 0 0 0 6 2.9e-04 : memset(vma, 0, sizeof(*vma)); + 91 0.0010 0 0 0 0 2 9.1e-05 : memset(vma, 0, sizeof(*vma)); : : vma->vm_mm = mm; - 1 1.2e-05 0 0 0 0 0 0 : vma->vm_start = addr; + 1 1.1e-05 0 0 0 0 0 0 : vma->vm_start = addr; : vma->vm_end = addr + len; - 8 1.0e-04 0 0 0 0 1 4.8e-05 : vma->vm_pgoff = pgoff; + 4 4.5e-05 0 0 0 0 1 4.5e-05 : vma->vm_pgoff = pgoff; : vma->vm_flags = flags; - 3 3.7e-05 0 0 0 0 0 0 : vma->vm_page_prot = protection_map[flags & 0x0f]; - 8 1.0e-04 0 0 0 0 1 4.8e-05 : vma_link(mm, vma, prev, rb_link, rb_parent); + 4 4.5e-05 0 0 0 0 2 9.1e-05 : vma->vm_page_prot = protection_map[flags & 0x0f]; + 7 7.9e-05 0 0 0 0 0 0 : vma_link(mm, vma, prev, rb_link, rb_parent); :out: - 267 0.0033 0 0 0 0 44 0.0021 : vx_vmpages_add(mm, len >> PAGE_SHIFT); - 265 0.0033 0 0 0 0 44 0.0021 : if (flags & VM_LOCKED) { + 293 0.0033 0 0 0 0 42 0.0019 : vx_vmpages_add(mm, len >> PAGE_SHIFT); + 10 1.1e-04 0 0 0 0 0 0 : if (flags & VM_LOCKED) { : vx_vmlocked_add(mm, len >> PAGE_SHIFT); : make_pages_present(addr, addr + len); : } : return addr; - 232 0.0029 0 0 0 0 44 0.0021 :} + 327 0.0037 0 0 0 0 64 0.0029 :} : :EXPORT_SYMBOL(do_brk); : :/* Release all mmaps. */ :void exit_mmap(struct mm_struct *mm) - 193 0.0024 0 0 0 0 19 9.1e-04 :{ /* exit_mmap total: 2084 0.0260 0 0 1 0.0351 518 0.0247 */ + 273 0.0031 0 0 0 0 23 0.0010 :{ /* exit_mmap total: 2449 0.0275 0 0 0 0 451 0.0205 */ : struct mmu_gather *tlb; - 3 3.7e-05 0 0 0 0 0 0 : struct vm_area_struct *vma = mm->mmap; + : struct vm_area_struct *vma = mm->mmap; : unsigned long nr_accounted = 0; : unsigned long end; : - : lru_add_drain(); + 1 1.1e-05 0 0 0 0 0 0 : lru_add_drain(); : flush_cache_mm(mm); - 4 5.0e-05 0 0 0 0 3 1.4e-04 : tlb = tlb_gather_mmu(mm, 1); + 2 2.2e-05 0 0 0 0 0 0 : tlb = tlb_gather_mmu(mm, 1); : /* Don't update_hiwater_rss(mm) here, do_exit already did */ : /* Use -1 here to ensure all VMAs in the mm are unmapped */ - 14 1.7e-04 0 0 0 0 7 3.3e-04 : end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); + 11 1.2e-04 0 0 0 0 7 3.2e-04 : end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); : vm_unacct_memory(nr_accounted); - 13 1.6e-04 0 0 0 0 2 9.6e-05 : free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); + 26 2.9e-04 0 0 0 0 3 1.4e-04 : free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); : tlb_finish_mmu(tlb, 0, end); : - : set_mm_counter(mm, file_rss, 0); - 118 0.0015 0 0 0 0 9 4.3e-04 : set_mm_counter(mm, anon_rss, 0); - 37 4.6e-04 0 0 0 0 2 9.6e-05 : vx_vmpages_sub(mm, mm->total_vm); - 18 2.2e-04 0 0 0 0 0 0 : vx_vmlocked_sub(mm, mm->locked_vm); + 2 2.2e-05 0 0 0 0 0 0 : set_mm_counter(mm, file_rss, 0); + 178 0.0020 0 0 0 0 5 2.3e-04 : set_mm_counter(mm, anon_rss, 0); + 45 5.1e-04 0 0 0 0 6 2.7e-04 : vx_vmpages_sub(mm, mm->total_vm); + 2 2.2e-05 0 0 0 0 0 0 : vx_vmlocked_sub(mm, mm->locked_vm); : : /* : * Walk the list again, actually closing and freeing it, : * with preemption enabled, without holding any MM locks. : */ - 931 0.0116 0 0 1 0.0351 239 0.0114 : while (vma) - 552 0.0069 0 0 0 0 125 0.0060 : vma = remove_vma(vma); + 1119 0.0126 0 0 0 0 214 0.0097 : while (vma) + 543 0.0061 0 0 0 0 105 0.0048 : vma = remove_vma(vma); : - 40 5.0e-04 0 0 0 0 11 5.3e-04 : BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); - 29 3.6e-04 0 0 0 0 10 4.8e-04 :} + 64 7.2e-04 0 0 0 0 16 7.3e-04 : BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); + 23 2.6e-04 0 0 0 0 6 2.7e-04 :} : :/* Insert vm structure into process list sorted by address : * and into the inode's i_mmap tree. If vm_file is non-NULL : * then i_mmap_lock is taken here. : */ :int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) - 167 0.0021 0 0 0 0 18 8.6e-04 :{ /* insert_vm_struct total: 365 0.0045 0 0 0 0 67 0.0032 */ + 173 0.0019 0 0 0 0 11 5.0e-04 :{ /* insert_vm_struct total: 429 0.0048 0 0 0 0 69 0.0031 */ : struct vm_area_struct * __vma, * prev; : struct rb_node ** rb_link, * rb_parent; : @@ -1986,20 +1986,20 @@ : * using the existing file pgoff checks and manipulations. : * Similarly in do_mmap_pgoff and in do_brk. : */ - 1 1.2e-05 0 0 0 0 0 0 : if (!vma->vm_file) { - 1 1.2e-05 0 0 0 0 1 4.8e-05 : BUG_ON(vma->anon_vma); - 18 2.2e-04 0 0 0 0 7 3.3e-04 : vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; + 2 2.2e-05 0 0 0 0 0 0 : if (!vma->vm_file) { + 2 2.2e-05 0 0 0 0 0 0 : BUG_ON(vma->anon_vma); + 11 1.2e-04 0 0 0 0 4 1.8e-04 : vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; : } - 71 8.8e-04 0 0 0 0 9 4.3e-04 : __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent); - 3 3.7e-05 0 0 0 0 0 0 : if (__vma && __vma->vm_start < vma->vm_end) + 56 6.3e-04 0 0 0 0 7 3.2e-04 : __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent); + 3 3.4e-05 0 0 0 0 0 0 : if (__vma && __vma->vm_start < vma->vm_end) : return -ENOMEM; - 20 2.5e-04 0 0 0 0 4 1.9e-04 : if ((vma->vm_flags & VM_ACCOUNT) && + 21 2.4e-04 0 0 0 0 3 1.4e-04 : if ((vma->vm_flags & VM_ACCOUNT) && : (security_vm_enough_memory(vma_pages(vma)) || : !vx_vmpages_avail(mm, vma_pages(vma)))) : return -ENOMEM; - 67 8.3e-04 0 0 0 0 20 9.6e-04 : vma_link(mm, vma, prev, rb_link, rb_parent); + 38 4.3e-04 0 0 0 0 5 2.3e-04 : vma_link(mm, vma, prev, rb_link, rb_parent); : return 0; - 11 1.4e-04 0 0 0 0 2 9.6e-05 :} + 6 6.7e-05 0 0 0 0 5 2.3e-04 :} : :/* : * Copy the vma structure to a new location in the same mm, @@ -2007,8 +2007,8 @@ : */ :struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, : unsigned long addr, unsigned long len, pgoff_t pgoff) - :{ /* copy_vma total: 4 5.0e-05 0 0 0 0 1 4.8e-05 */ - 1 1.2e-05 0 0 0 0 1 4.8e-05 : struct vm_area_struct *vma = *vmap; + :{ /* copy_vma total: 3 3.4e-05 0 0 0 0 0 0 */ + : struct vm_area_struct *vma = *vmap; : unsigned long vma_start = vma->vm_start; : struct mm_struct *mm = vma->vm_mm; : struct vm_area_struct *new_vma, *prev; @@ -2022,8 +2022,8 @@ : if (!vma->vm_file && !vma->anon_vma) : pgoff = addr >> PAGE_SHIFT; : - 2 2.5e-05 0 0 0 0 0 0 : find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); - : new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, + : find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); + 1 1.1e-05 0 0 0 0 0 0 : new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, : vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); : if (new_vma) { : /* @@ -2033,7 +2033,7 @@ : vma_start < new_vma->vm_end) : *vmap = new_vma; : } else { - : new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + 1 1.1e-05 0 0 0 0 0 0 : new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); : if (new_vma) { : *new_vma = *vma; : pol = mpol_copy(vma_policy(vma)); @@ -2042,14 +2042,14 @@ : return NULL; : } : vma_set_policy(new_vma, pol); - : new_vma->vm_start = addr; + 1 1.1e-05 0 0 0 0 0 0 : new_vma->vm_start = addr; : new_vma->vm_end = addr + len; : new_vma->vm_pgoff = pgoff; : if (new_vma->vm_file) : get_file(new_vma->vm_file); : if (new_vma->vm_ops && new_vma->vm_ops->open) : new_vma->vm_ops->open(new_vma); - 1 1.2e-05 0 0 0 0 0 0 : vma_link(mm, new_vma, prev, rb_link, rb_parent); + : vma_link(mm, new_vma, prev, rb_link, rb_parent); : } : } : return new_vma; @@ -2060,27 +2060,27 @@ : * number of pages : */ :int may_expand_vm(struct mm_struct *mm, unsigned long npages) - 138 0.0017 0 0 0 0 67 0.0032 :{ /* may_expand_vm total: 2713 0.0338 0 0 0 0 631 0.0301 */ - 815 0.0102 0 0 0 0 78 0.0037 : unsigned long cur = mm->total_vm; /* pages */ + 148 0.0017 0 0 0 0 91 0.0041 :{ /* may_expand_vm total: 4075 0.0458 0 0 1 0.0339 929 0.0422 */ + 950 0.0107 0 0 0 0 84 0.0038 : unsigned long cur = mm->total_vm; /* pages */ : unsigned long lim; : : lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; : - 784 0.0098 0 0 0 0 273 0.0130 : if (cur + npages > lim) + 770 0.0087 0 0 0 0 318 0.0144 : if (cur + npages > lim) : return 0; - 49 6.1e-04 0 0 0 0 37 0.0018 : if (!vx_vmpages_avail(mm, npages)) + 65 7.3e-04 0 0 0 0 29 0.0013 : if (!vx_vmpages_avail(mm, npages)) : return 0; : return 1; - 847 0.0106 0 0 0 0 132 0.0063 :} + 740 0.0083 0 0 0 0 144 0.0065 :} /* * Total samples for file : "mm/mmap.c" * - * 182231 2.2707 0 0 26 0.9132 60254 2.8785 + * 200646 2.2566 0 0 17 0.5771 62704 2.8477 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mprotect.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mprotect.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mprotect.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mprotect.c 2006-03-12 07:20:05.000000000 -0500 @@ -31,7 +31,7 @@ : pte_t *pte; : spinlock_t *ptl; : - 423 0.0053 0 0 0 0 99 0.0047 : pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + 421 0.0047 0 0 0 0 83 0.0038 : pte = pte_offset_map_lock(mm, pmd, addr, &ptl); : do { : if (pte_present(*pte)) { : pte_t ptent; @@ -54,7 +54,7 @@ : pmd_t *pmd; : unsigned long next; : - 63 7.9e-04 0 0 0 0 22 0.0011 : pmd = pmd_offset(pud, addr); + 63 7.1e-04 0 0 0 0 15 6.8e-04 : pmd = pmd_offset(pud, addr); : do { : next = pmd_addr_end(addr, end); : if (pmd_none_or_clear_bad(pmd)) @@ -69,7 +69,7 @@ : pud_t *pud; : unsigned long next; : - 342 0.0043 0 0 0 0 16 7.6e-04 : pud = pud_offset(pgd, addr); + 279 0.0031 0 0 0 0 24 0.0011 : pud = pud_offset(pgd, addr); : do { : next = pud_addr_end(addr, end); : if (pud_none_or_clear_bad(pud)) @@ -178,43 +178,43 @@ : :asmlinkage long :sys_mprotect(unsigned long start, size_t len, unsigned long prot) - 36 4.5e-04 0 0 0 0 5 2.4e-04 :{ /* sys_mprotect total: 5001 0.0623 0 0 1 0.0351 2026 0.0968 */ + 46 5.2e-04 0 0 0 0 4 1.8e-04 :{ /* sys_mprotect total: 4736 0.0533 0 0 2 0.0679 1964 0.0892 */ : unsigned long vm_flags, nstart, end, tmp, reqprot; : struct vm_area_struct *vma, *prev; : int error = -EINVAL; - 5 6.2e-05 0 0 0 0 0 0 : const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); - 197 0.0025 0 0 0 0 43 0.0021 : prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); - 9 1.1e-04 0 0 0 0 1 4.8e-05 : if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ + 3 3.4e-05 0 0 0 0 2 9.1e-05 : const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); + 169 0.0019 0 0 0 0 52 0.0024 : prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); + 5 5.6e-05 0 0 0 0 2 9.1e-05 : if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ : return -EINVAL; : - 2 2.5e-05 0 0 0 0 0 0 : if (start & ~PAGE_MASK) + 2 2.2e-05 0 0 0 0 3 1.4e-04 : if (start & ~PAGE_MASK) : return -EINVAL; - 3 3.7e-05 0 0 0 0 4 1.9e-04 : if (!len) + 5 5.6e-05 0 0 0 0 2 9.1e-05 : if (!len) : return 0; : len = PAGE_ALIGN(len); - 115 0.0014 0 0 0 0 11 5.3e-04 : end = start + len; - 14 1.7e-04 0 0 0 0 4 1.9e-04 : if (end <= start) + 114 0.0013 0 0 0 0 19 8.6e-04 : end = start + len; + 13 1.5e-04 0 0 0 0 0 0 : if (end <= start) : return -ENOMEM; - 9 1.1e-04 0 0 0 0 1 4.8e-05 : if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) + 5 5.6e-05 0 0 0 0 1 4.5e-05 : if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) : return -EINVAL; : : reqprot = prot; : /* : * Does the application expect PROT_READ to imply PROT_EXEC: : */ - 33 4.1e-04 0 0 0 0 9 4.3e-04 : if (unlikely((prot & PROT_READ) && + 28 3.1e-04 0 0 0 0 7 3.2e-04 : if (unlikely((prot & PROT_READ) && : (current->personality & READ_IMPLIES_EXEC))) : prot |= PROT_EXEC; : : vm_flags = calc_vm_prot_bits(prot); : - 6 7.5e-05 0 0 0 0 6 2.9e-04 : down_write(¤t->mm->mmap_sem); + 7 7.9e-05 0 0 0 0 4 1.8e-04 : down_write(¤t->mm->mmap_sem); : - 89 0.0011 0 0 1 0.0351 20 9.6e-04 : vma = find_vma_prev(current->mm, start, &prev); + 80 9.0e-04 0 0 0 0 26 0.0012 : vma = find_vma_prev(current->mm, start, &prev); : error = -ENOMEM; - : if (!vma) + 2 2.2e-05 0 0 0 0 0 0 : if (!vma) : goto out; - 4 5.0e-05 0 0 0 0 2 9.6e-05 : if (unlikely(grows & PROT_GROWSDOWN)) { + 2 2.2e-05 0 0 0 0 0 0 : if (unlikely(grows & PROT_GROWSDOWN)) { : if (vma->vm_start >= end) : goto out; : start = vma->vm_start; @@ -223,9 +223,9 @@ : goto out; : } : else { - 35 4.4e-04 0 0 0 0 10 4.8e-04 : if (vma->vm_start > start) + 27 3.0e-04 0 0 0 0 16 7.3e-04 : if (vma->vm_start > start) : goto out; - 57 7.1e-04 0 0 0 0 14 6.7e-04 : if (unlikely(grows & PROT_GROWSUP)) { + 50 5.6e-04 0 0 0 0 10 4.5e-04 : if (unlikely(grows & PROT_GROWSUP)) { : end = vma->vm_end; : error = -EINVAL; : if (!(vma->vm_flags & VM_GROWSUP)) @@ -233,22 +233,22 @@ : } : } : if (start > vma->vm_start) - 27 3.4e-04 0 0 0 0 15 7.2e-04 : prev = vma; + 38 4.3e-04 0 0 1 0.0339 17 7.7e-04 : prev = vma; : : for (nstart = start ; ; ) { : unsigned long newflags; : : /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ : - 3 3.7e-05 0 0 0 0 3 1.4e-04 : if (is_vm_hugetlb_page(vma)) { + 6 6.7e-05 0 0 0 0 0 0 : if (is_vm_hugetlb_page(vma)) { : error = -EACCES; : goto out; : } : - 66 8.2e-04 0 0 0 0 20 9.6e-04 : newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); + 73 8.2e-04 0 0 0 0 16 7.3e-04 : newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); : : /* newflags >> 4 shift VM_MAY% in place of VM_% */ - 10 1.2e-04 0 0 0 0 1 4.8e-05 : if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { + 6 6.7e-05 0 0 0 0 2 9.1e-05 : if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { : error = -EACCES; : goto out; : } @@ -257,17 +257,17 @@ : if (error) : goto out; : - 1 1.2e-05 0 0 0 0 0 0 : tmp = vma->vm_end; - 11 1.4e-04 0 0 0 0 3 1.4e-04 : if (tmp > end) + 1 1.1e-05 0 0 0 0 0 0 : tmp = vma->vm_end; + 7 7.9e-05 0 0 0 0 0 0 : if (tmp > end) : tmp = end; : error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); : if (error) : goto out; : nstart = tmp; : - 23 2.9e-04 0 0 0 0 7 3.3e-04 : if (nstart < prev->vm_end) + 23 2.6e-04 0 0 0 0 9 4.1e-04 : if (nstart < prev->vm_end) : nstart = prev->vm_end; - 2 2.5e-05 0 0 0 0 2 9.6e-05 : if (nstart >= end) + 1 1.1e-05 0 0 0 0 3 1.4e-04 : if (nstart >= end) : goto out; : : vma = prev->vm_next; @@ -277,18 +277,18 @@ : } : } :out: - 69 8.6e-04 0 0 0 0 13 6.2e-04 : up_write(¤t->mm->mmap_sem); - 16 2.0e-04 0 0 0 0 1 4.8e-05 : return error; - 57 7.1e-04 0 0 0 0 6 2.9e-04 :} + 46 5.2e-04 0 0 0 0 13 5.9e-04 : up_write(¤t->mm->mmap_sem); + 17 1.9e-04 0 0 0 0 4 1.8e-04 : return error; + 42 4.7e-04 0 0 0 0 12 5.4e-04 :} /* * Total samples for file : "mm/mprotect.c" * - * 1727 0.0215 0 0 1 0.0351 338 0.0161 + * 1581 0.0178 0 0 1 0.0339 346 0.0157 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mremap.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mremap.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mremap.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/mremap.c 2006-03-12 07:20:06.000000000 -0500 @@ -34,11 +34,11 @@ : if (pgd_none_or_clear_bad(pgd)) : return NULL; : - 1 1.2e-05 0 0 0 0 0 0 : pud = pud_offset(pgd, addr); + 2 2.2e-05 0 0 0 0 0 0 : pud = pud_offset(pgd, addr); : if (pud_none_or_clear_bad(pud)) : return NULL; : - : pmd = pmd_offset(pud, addr); + 1 1.1e-05 0 0 0 0 0 0 : pmd = pmd_offset(pud, addr); : if (pmd_none_or_clear_bad(pmd)) : return NULL; : @@ -123,14 +123,14 @@ :static unsigned long move_page_tables(struct vm_area_struct *vma, : unsigned long old_addr, struct vm_area_struct *new_vma, : unsigned long new_addr, unsigned long len) - :{ /* move_page_tables total: 26 3.2e-04 0 0 0 0 5 2.4e-04 */ + :{ /* move_page_tables total: 25 2.8e-04 0 0 0 0 2 9.1e-05 */ : unsigned long extent, next, old_end; : pmd_t *old_pmd, *new_pmd; : - 2 2.5e-05 0 0 0 0 0 0 : old_end = old_addr + len; + 1 1.1e-05 0 0 0 0 0 0 : old_end = old_addr + len; : flush_cache_range(vma, old_addr, old_end); : - 3 3.7e-05 0 0 0 0 0 0 : for (; old_addr < old_end; old_addr += extent, new_addr += extent) { + 3 3.4e-05 0 0 0 0 0 0 : for (; old_addr < old_end; old_addr += extent, new_addr += extent) { : cond_resched(); : next = (old_addr + PMD_SIZE) & PMD_MASK; : if (next - 1 > old_end) @@ -143,16 +143,16 @@ : if (!new_pmd) : break; : next = (new_addr + PMD_SIZE) & PMD_MASK; - 1 1.2e-05 0 0 0 0 0 0 : if (extent > next - new_addr) + : if (extent > next - new_addr) : extent = next - new_addr; - 1 1.2e-05 0 0 0 0 0 0 : if (extent > LATENCY_LIMIT) + : if (extent > LATENCY_LIMIT) : extent = LATENCY_LIMIT; : move_ptes(vma, old_pmd, old_addr, old_addr + extent, : new_vma, new_pmd, new_addr); : } : : return len + old_addr - old_end; /* how much done */ - :} + 1 1.1e-05 0 0 0 0 0 0 :} : :static unsigned long move_vma(struct vm_area_struct *vma, : unsigned long old_addr, unsigned long old_len, @@ -249,20 +249,20 @@ :unsigned long do_mremap(unsigned long addr, : unsigned long old_len, unsigned long new_len, : unsigned long flags, unsigned long new_addr) - 3 3.7e-05 0 0 0 0 0 0 :{ /* do_mremap total: 18 2.2e-04 0 0 0 0 2 9.6e-05 */ + 2 2.2e-05 0 0 0 0 1 4.5e-05 :{ /* do_mremap total: 16 1.8e-04 0 0 0 0 1 4.5e-05 */ : struct mm_struct *mm = current->mm; : struct vm_area_struct *vma; : unsigned long ret = -EINVAL; : unsigned long charged = 0; : - : if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) + 1 1.1e-05 0 0 0 0 0 0 : if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) : goto out; : : if (addr & ~PAGE_MASK) : goto out; : : old_len = PAGE_ALIGN(old_len); - : new_len = PAGE_ALIGN(new_len); + 1 1.1e-05 0 0 0 0 0 0 : new_len = PAGE_ALIGN(new_len); : : /* : * We allow a zero old-len as a special case @@ -301,7 +301,7 @@ : * the unnecessary pages.. : * do_munmap does all the needed commit accounting : */ - 3 3.7e-05 0 0 0 0 0 0 : if (old_len >= new_len) { + 3 3.4e-05 0 0 0 0 0 0 : if (old_len >= new_len) { : ret = do_munmap(mm, addr+new_len, old_len - new_len); : if (ret && old_len != new_len) : goto out; @@ -346,7 +346,7 @@ : goto out; : } : - : if (vma->vm_flags & VM_ACCOUNT) { + 1 1.1e-05 0 0 0 0 0 0 : if (vma->vm_flags & VM_ACCOUNT) { : charged = (new_len - old_len) >> PAGE_SHIFT; : if (security_vm_enough_memory(charged)) : goto out_nc; @@ -355,11 +355,11 @@ : /* old_len exactly to the end of the area.. : * And we're not relocating the area. : */ - : if (old_len == vma->vm_end - addr && + 1 1.1e-05 0 0 0 0 0 0 : if (old_len == vma->vm_end - addr && : !((flags & MREMAP_FIXED) && (addr != new_addr)) && : (old_len != new_len || !(flags & MREMAP_MAYMOVE))) { : unsigned long max_addr = TASK_SIZE; - 4 5.0e-05 0 0 0 0 0 0 : if (vma->vm_next) + : if (vma->vm_next) : max_addr = vma->vm_next->vm_start; : /* can we just expand the current mapping? */ : if (max_addr - addr >= new_len) { @@ -368,7 +368,7 @@ : vma_adjust(vma, vma->vm_start, : addr + new_len, vma->vm_pgoff, NULL); : - 1 1.2e-05 0 0 0 0 0 0 : vx_vmpages_add(mm, pages); + : vx_vmpages_add(mm, pages); : vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); : if (vma->vm_flags & VM_LOCKED) { : vx_vmlocked_add(mm, pages); @@ -385,7 +385,7 @@ : * we need to create a new one and move it.. : */ : ret = -ENOMEM; - 2 2.5e-05 0 0 0 0 0 0 : if (flags & MREMAP_MAYMOVE) { + 1 1.1e-05 0 0 0 0 0 0 : if (flags & MREMAP_MAYMOVE) { : if (!(flags & MREMAP_FIXED)) { : unsigned long map_flags = 0; : if (vma->vm_flags & VM_MAYSHARE) @@ -395,12 +395,12 @@ : vma->vm_pgoff, map_flags); : ret = new_addr; : if (new_addr & ~PAGE_MASK) - 0 0 0 0 0 0 1 4.8e-05 : goto out; + : goto out; : } : ret = move_vma(vma, addr, old_len, new_len, new_addr); : } :out: - : if (ret & ~PAGE_MASK) + 2 2.2e-05 0 0 0 0 0 0 : if (ret & ~PAGE_MASK) : vm_unacct_memory(charged); :out_nc: : return ret; @@ -409,23 +409,23 @@ :asmlinkage unsigned long sys_mremap(unsigned long addr, : unsigned long old_len, unsigned long new_len, : unsigned long flags, unsigned long new_addr) - 2 2.5e-05 0 0 0 0 0 0 :{ /* sys_mremap total: 3 3.7e-05 0 0 0 0 0 0 */ + 1 1.1e-05 0 0 0 0 0 0 :{ /* sys_mremap total: 1 1.1e-05 0 0 0 0 0 0 */ : unsigned long ret; : : down_write(¤t->mm->mmap_sem); : ret = do_mremap(addr, old_len, new_len, flags, new_addr); : up_write(¤t->mm->mmap_sem); : return ret; - 1 1.2e-05 0 0 0 0 0 0 :} + :} /* * Total samples for file : "mm/mremap.c" * - * 24 3.0e-04 0 0 0 0 1 4.8e-05 + * 21 2.4e-04 0 0 0 0 1 4.5e-05 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/page_alloc.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/page_alloc.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/page_alloc.c 2006-03-12 07:18:53.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/page_alloc.c 2006-03-12 07:20:05.000000000 -0500 @@ -224,14 +224,14 @@ :} : :static inline void set_page_order(struct page *page, int order) { - 535 0.0067 0 0 0 0 382 0.0182 : set_page_private(page, order); + 351 0.0039 0 0 0 0 290 0.0132 : set_page_private(page, order); : __SetPagePrivate(page); :} : :static inline void rmv_page_order(struct page *page) :{ : __ClearPagePrivate(page); - 604 0.0075 0 0 0 0 422 0.0202 : set_page_private(page, 0); + 646 0.0073 0 0 0 0 428 0.0194 : set_page_private(page, 0); :} : :/* @@ -262,7 +262,7 @@ :static inline unsigned long :__find_combined_index(unsigned long page_idx, unsigned int order) :{ - 2 2.5e-05 0 0 0 0 0 0 : return (page_idx & ~(1 << order)); + 3 3.4e-05 0 0 0 0 1 4.5e-05 : return (page_idx & ~(1 << order)); :} : :/* @@ -317,18 +317,18 @@ : struct zone *zone, unsigned int order) :{ : unsigned long page_idx; - 3697 0.0461 0 0 2 0.0702 1637 0.0782 : int order_size = 1 << order; + 4523 0.0509 0 0 0 0 1800 0.0817 : int order_size = 1 << order; : - 4542 0.0566 0 0 0 0 2994 0.1430 : if (unlikely(PageCompound(page))) + 5270 0.0593 0 0 0 0 2975 0.1351 : if (unlikely(PageCompound(page))) : destroy_compound_page(page, order); : - 1764 0.0220 0 0 0 0 356 0.0170 : page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); + 1999 0.0225 0 0 0 0 502 0.0228 : page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); : - 694 0.0086 0 0 0 0 275 0.0131 : BUG_ON(page_idx & (order_size - 1)); + 819 0.0092 0 0 0 0 328 0.0149 : BUG_ON(page_idx & (order_size - 1)); : BUG_ON(bad_range(zone, page)); : - 2943 0.0367 0 0 3 0.1054 2717 0.1298 : zone->free_pages += order_size; - 1805 0.0225 0 0 0 0 974 0.0465 : while (order < MAX_ORDER-1) { + 4612 0.0519 0 0 1 0.0339 2754 0.1251 : zone->free_pages += order_size; + 1878 0.0211 0 0 1 0.0339 828 0.0376 : while (order < MAX_ORDER-1) { : unsigned long combined_idx; : struct free_area *area; : struct page *buddy; @@ -337,18 +337,18 @@ : if (!page_is_buddy(buddy, order)) : break; /* Move the buddy up one level. */ : - 34 4.2e-04 0 0 1 0.0351 48 0.0023 : list_del(&buddy->lru); - 3725 0.0464 0 0 1 0.0351 1262 0.0603 : area = zone->free_area + order; - 252 0.0031 0 0 1 0.0351 17 8.1e-04 : area->nr_free--; + 27 3.0e-04 0 0 0 0 30 0.0014 : list_del(&buddy->lru); + 5305 0.0597 0 0 0 0 1226 0.0557 : area = zone->free_area + order; + 236 0.0027 0 0 0 0 19 8.6e-04 : area->nr_free--; : rmv_page_order(buddy); : combined_idx = __find_combined_index(page_idx, order); - 1236 0.0154 0 0 0 0 492 0.0235 : page = page + (combined_idx - page_idx); + 923 0.0104 0 0 0 0 392 0.0178 : page = page + (combined_idx - page_idx); : page_idx = combined_idx; - 494 0.0062 0 0 0 0 746 0.0356 : order++; + 345 0.0039 0 0 0 0 442 0.0201 : order++; : } : set_page_order(page, order); - 2371 0.0295 0 0 0 0 2329 0.1113 : list_add(&page->lru, &zone->free_area[order].free_list); - 1 1.2e-05 0 0 0 0 1 4.8e-05 : zone->free_area[order].nr_free++; + 2471 0.0278 0 0 1 0.0339 2280 0.1035 : list_add(&page->lru, &zone->free_area[order].free_list); + 3 3.4e-05 0 0 0 0 1 4.5e-05 : zone->free_area[order].nr_free++; :} : :static inline int free_pages_check(struct page *page) @@ -390,38 +390,38 @@ : */ :static void free_pages_bulk(struct zone *zone, int count, : struct list_head *list, int order) - 222 0.0028 0 0 0 0 189 0.0090 :{ /* free_pages_bulk total: 72070 0.8980 0 0 28 0.9835 56248 2.6871 */ - 30 3.7e-04 0 0 0 0 27 0.0013 : spin_lock(&zone->lock); - 185 0.0023 0 0 0 0 147 0.0070 : zone->all_unreclaimable = 0; - 151 0.0019 0 0 0 0 23 0.0011 : zone->pages_scanned = 0; - 2109 0.0263 0 0 0 0 1818 0.0869 : while (count--) { + 255 0.0029 0 0 1 0.0339 169 0.0077 :{ /* free_pages_bulk total: 77032 0.8663 0 0 19 0.6449 49785 2.2610 */ + 23 2.6e-04 0 0 0 0 29 0.0013 : spin_lock(&zone->lock); + 342 0.0038 0 0 0 0 275 0.0125 : zone->all_unreclaimable = 0; + 202 0.0023 0 0 0 0 28 0.0013 : zone->pages_scanned = 0; + 2407 0.0271 0 0 1 0.0339 2098 0.0953 : while (count--) { : struct page *page; : - 1218 0.0152 0 0 0 0 886 0.0423 : BUG_ON(list_empty(list)); - 4296 0.0535 0 0 2 0.0702 3178 0.1518 : page = list_entry(list->prev, struct page, lru); + 1628 0.0183 0 0 0 0 1001 0.0455 : BUG_ON(list_empty(list)); + 4222 0.0475 0 0 2 0.0679 3837 0.1743 : page = list_entry(list->prev, struct page, lru); : /* have to delete it as __free_one_page list manipulates */ : list_del(&page->lru); - : __free_one_page(page, zone, order); + 1 1.1e-05 0 0 0 0 0 0 : __free_one_page(page, zone, order); : } : spin_unlock(&zone->lock); - 856 0.0107 0 0 0 0 531 0.0254 :} + 1128 0.0127 0 0 0 0 550 0.0250 :} : :static void free_one_page(struct zone *zone, struct page *page, int order) :{ : LIST_HEAD(list); - 1 1.2e-05 0 0 0 0 0 0 : list_add(&page->lru, &list); - : free_pages_bulk(zone, 1, &list, order); + 2 2.2e-05 0 0 0 0 0 0 : list_add(&page->lru, &list); + 4 4.5e-05 0 0 0 0 2 9.1e-05 : free_pages_bulk(zone, 1, &list, order); :} : :static void __free_pages_ok(struct page *page, unsigned int order) - 205 0.0026 0 0 0 0 36 0.0017 :{ /* __free_pages_ok total: 622 0.0078 0 0 0 0 113 0.0054 */ + 124 0.0014 0 0 0 0 17 7.7e-04 :{ /* __free_pages_ok total: 599 0.0067 0 0 0 0 119 0.0054 */ : unsigned long flags; : int i; : int reserved = 0; : : arch_free_page(page, order); : if (!PageHighMem(page)) - 5 6.2e-05 0 0 0 0 0 0 : mutex_debug_check_no_locks_freed(page_address(page), + 2 2.2e-05 0 0 0 0 5 2.3e-04 : mutex_debug_check_no_locks_freed(page_address(page), : PAGE_SIZE< low) { - 6860 0.0855 0 0 1 0.0351 1350 0.0645 : area--; - 6 7.5e-05 0 0 0 0 0 0 : high--; - 266 0.0033 0 0 0 0 213 0.0102 : size >>= 1; + 2261 0.0254 0 0 1 0.0339 1038 0.0471 : while (high > low) { + 4837 0.0544 0 0 2 0.0679 893 0.0406 : area--; + 5 5.6e-05 0 0 0 0 0 0 : high--; + 186 0.0021 0 0 0 0 157 0.0071 : size >>= 1; : BUG_ON(bad_range(zone, &page[size])); - 4 5.0e-05 0 0 0 0 1 4.8e-05 : list_add(&page[size].lru, &area->free_list); - 5 6.2e-05 0 0 0 0 2 9.6e-05 : area->nr_free++; + 8 9.0e-05 0 0 0 0 2 9.1e-05 : list_add(&page[size].lru, &area->free_list); + 2 2.2e-05 0 0 0 0 2 9.1e-05 : area->nr_free++; : set_page_order(&page[size], high); : } :} @@ -547,27 +547,27 @@ : * Call me with the zone->lock already held. : */ :static struct page *__rmqueue(struct zone *zone, unsigned int order) - 2254 0.0281 0 0 2 0.0702 1034 0.0494 :{ /* __rmqueue total: 61267 0.7634 0 0 14 0.4917 28439 1.3586 */ + 2359 0.0265 0 0 1 0.0339 1015 0.0461 :{ /* __rmqueue total: 71244 0.8012 0 0 13 0.4413 26753 1.2150 */ : struct free_area * area; : unsigned int current_order; : struct page *page; : - 6335 0.0789 0 0 2 0.0702 4307 0.2058 : for (current_order = order; current_order < MAX_ORDER; ++current_order) { - 3033 0.0378 0 0 0 0 2717 0.1298 : area = zone->free_area + current_order; - 7257 0.0904 0 0 0 0 1610 0.0769 : if (list_empty(&area->free_list)) + 6846 0.0770 0 0 2 0.0679 3451 0.1567 : for (current_order = order; current_order < MAX_ORDER; ++current_order) { + 2582 0.0290 0 0 0 0 2207 0.1002 : area = zone->free_area + current_order; + 5582 0.0628 0 0 0 0 1338 0.0608 : if (list_empty(&area->free_list)) : continue; : - 405 0.0050 0 0 1 0.0351 339 0.0162 : page = list_entry(area->free_list.next, struct page, lru); + 504 0.0057 0 0 0 0 384 0.0174 : page = list_entry(area->free_list.next, struct page, lru); : list_del(&page->lru); : rmv_page_order(page); - 3 3.7e-05 0 0 0 0 0 0 : area->nr_free--; - 7680 0.0957 0 0 5 0.1756 4216 0.2014 : zone->free_pages -= 1UL << order; + 2 2.2e-05 0 0 0 0 0 0 : area->nr_free--; + 13970 0.1571 0 0 3 0.1018 4166 0.1892 : zone->free_pages -= 1UL << order; : expand(zone, page, order, current_order, area); : return page; : } : : return NULL; - 6725 0.0838 0 0 0 0 2569 0.1227 :} + 9258 0.1041 0 0 1 0.0339 3129 0.1421 :} : :/* : * Obtain a specified number of elements from the buddy allocator, all under @@ -579,12 +579,12 @@ :{ : int i; : - 657 0.0082 0 0 0 0 111 0.0053 : spin_lock(&zone->lock); - 4510 0.0562 0 0 0 0 1208 0.0577 : for (i = 0; i < count; ++i) { - 4903 0.0611 0 0 1 0.0351 3235 0.1545 : struct page *page = __rmqueue(zone, order); - 2231 0.0278 0 0 0 0 629 0.0300 : if (unlikely(page == NULL)) + 661 0.0074 0 0 0 0 133 0.0060 : spin_lock(&zone->lock); + 3998 0.0450 0 0 0 0 968 0.0440 : for (i = 0; i < count; ++i) { + 5207 0.0586 0 0 0 0 2619 0.1189 : struct page *page = __rmqueue(zone, order); + 5278 0.0594 0 0 1 0.0339 1390 0.0631 : if (unlikely(page == NULL)) : break; - 588 0.0073 0 0 0 0 677 0.0323 : list_add_tail(&page->lru, list); + 459 0.0052 0 0 0 0 493 0.0224 : list_add_tail(&page->lru, list); : } : spin_unlock(&zone->lock); : return i; @@ -593,29 +593,29 @@ :#ifdef CONFIG_NUMA :/* Called from the slab reaper to drain remote pagesets */ :void drain_remote_pages(void) - 6 7.5e-05 0 0 0 0 0 0 :{ /* drain_remote_pages total: 40 5.0e-04 0 0 0 0 9 4.3e-04 */ + 2 2.2e-05 0 0 0 0 0 0 :{ /* drain_remote_pages total: 30 3.4e-04 0 0 0 0 5 2.3e-04 */ : struct zone *zone; : int i; : unsigned long flags; : : local_irq_save(flags); - 5 6.2e-05 0 0 0 0 1 4.8e-05 : for_each_zone(zone) { + 8 9.0e-05 0 0 0 0 0 0 : for_each_zone(zone) { : struct per_cpu_pageset *pset; : : /* Do not drain local pagesets */ - 0 0 0 0 0 0 1 4.8e-05 : if (zone->zone_pgdat->node_id == numa_node_id()) + 1 1.1e-05 0 0 0 0 0 0 : if (zone->zone_pgdat->node_id == numa_node_id()) : continue; : - 3 3.7e-05 0 0 0 0 1 4.8e-05 : pset = zone_pcp(zone, smp_processor_id()); - 1 1.2e-05 0 0 0 0 2 9.6e-05 : for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { + : pset = zone_pcp(zone, smp_processor_id()); + : for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { : struct per_cpu_pages *pcp; : - 4 5.0e-05 0 0 0 0 0 0 : pcp = &pset->pcp[i]; - 6 7.5e-05 0 0 0 0 1 4.8e-05 : free_pages_bulk(zone, pcp->count, &pcp->list, 0); - : pcp->count = 0; + 1 1.1e-05 0 0 0 0 1 4.5e-05 : pcp = &pset->pcp[i]; + 6 6.7e-05 0 0 0 0 1 4.5e-05 : free_pages_bulk(zone, pcp->count, &pcp->list, 0); + 0 0 0 0 0 0 1 4.5e-05 : pcp->count = 0; : } : } - 1 1.2e-05 0 0 0 0 0 0 : local_irq_restore(flags); + 1 1.1e-05 0 0 0 0 0 0 : local_irq_restore(flags); :} :#endif : @@ -708,37 +708,37 @@ : * Free a 0-order page : */ :static void fastcall free_hot_cold_page(struct page *page, int cold) - 2239 0.0279 0 0 0 0 2381 0.1137 :{ /* free_hot_cold_page total: 45165 0.5628 0 0 11 0.3864 32370 1.5464 */ + 2226 0.0250 0 0 1 0.0339 2122 0.0964 :{ /* free_hot_cold_page total: 47347 0.5325 0 0 15 0.5092 33080 1.5023 */ : struct zone *zone = page_zone(page); : struct per_cpu_pages *pcp; : unsigned long flags; : : arch_free_page(page, 0); : - 1730 0.0216 0 0 1 0.0351 2089 0.0998 : if (PageAnon(page)) - 886 0.0110 0 0 0 0 1062 0.0507 : page->mapping = NULL; - 842 0.0105 0 0 0 0 525 0.0251 : if (free_pages_check(page)) + 1915 0.0215 0 0 2 0.0679 1943 0.0882 : if (PageAnon(page)) + 876 0.0099 0 0 0 0 1086 0.0493 : page->mapping = NULL; + 921 0.0104 0 0 0 0 486 0.0221 : if (free_pages_check(page)) : return; : : kernel_map_pages(page, 1, 0); : - 1408 0.0175 0 0 1 0.0351 278 0.0133 : pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; - 2533 0.0316 0 0 0 0 351 0.0168 : local_irq_save(flags); - 3238 0.0403 0 0 0 0 412 0.0197 : __inc_page_state(pgfree); - 7 8.7e-05 0 0 0 0 0 0 : list_add(&page->lru, &pcp->list); - 9 1.1e-04 0 0 0 0 2 9.6e-05 : pcp->count++; - 1848 0.0230 0 0 0 0 2934 0.1402 : if (pcp->count >= pcp->high) { - 512 0.0064 0 0 0 0 322 0.0154 : free_pages_bulk(zone, pcp->batch, &pcp->list, 0); - 114 0.0014 0 0 0 0 62 0.0030 : pcp->count -= pcp->batch; + 1475 0.0166 0 0 0 0 314 0.0143 : pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; + 2728 0.0307 0 0 0 0 451 0.0205 : local_irq_save(flags); + 3494 0.0393 0 0 0 0 464 0.0211 : __inc_page_state(pgfree); + 7 7.9e-05 0 0 0 0 0 0 : list_add(&page->lru, &pcp->list); + 10 1.1e-04 0 0 0 0 0 0 : pcp->count++; + 1846 0.0208 0 0 0 0 2851 0.1295 : if (pcp->count >= pcp->high) { + 523 0.0059 0 0 0 0 314 0.0143 : free_pages_bulk(zone, pcp->batch, &pcp->list, 0); + 111 0.0012 0 0 0 0 53 0.0024 : pcp->count -= pcp->batch; : } - 2749 0.0343 0 0 0 0 1360 0.0650 : local_irq_restore(flags); + 2809 0.0316 0 0 2 0.0679 1714 0.0778 : local_irq_restore(flags); : put_cpu(); - 11131 0.1387 0 0 3 0.1054 7776 0.3715 :} + 11487 0.1292 0 0 4 0.1358 7863 0.3571 :} : :void fastcall free_hot_page(struct page *page) - 38 4.7e-04 0 0 0 0 17 8.1e-04 :{ /* free_hot_page total: 192 0.0024 0 0 0 0 47 0.0022 */ - 127 0.0016 0 0 0 0 19 9.1e-04 : free_hot_cold_page(page, 0); - 27 3.4e-04 0 0 0 0 11 5.3e-04 :} + 53 6.0e-04 0 0 0 0 16 7.3e-04 :{ /* free_hot_page total: 205 0.0023 0 0 0 0 37 0.0017 */ + 123 0.0014 0 0 0 0 15 6.8e-04 : free_hot_cold_page(page, 0); + 29 3.3e-04 0 0 0 0 6 2.7e-04 :} : :void fastcall free_cold_page(struct page *page) :{ @@ -768,42 +768,42 @@ : int cpu; : :again: - 1202 0.0150 0 0 1 0.0351 978 0.0467 : cpu = get_cpu(); - 4239 0.0528 0 0 2 0.0702 3193 0.1525 : if (likely(order == 0)) { + 1172 0.0132 0 0 1 0.0339 1105 0.0502 : cpu = get_cpu(); + 4499 0.0506 0 0 0 0 3316 0.1506 : if (likely(order == 0)) { : struct per_cpu_pages *pcp; : - 1047 0.0130 0 0 0 0 1150 0.0549 : pcp = &zone_pcp(zone, cpu)->pcp[cold]; - 2046 0.0255 0 0 1 0.0351 1929 0.0922 : local_irq_save(flags); - 3682 0.0459 0 0 0 0 2579 0.1232 : if (!pcp->count) { - 613 0.0076 0 0 0 0 304 0.0145 : pcp->count += rmqueue_bulk(zone, 0, + 1074 0.0121 0 0 1 0.0339 1164 0.0529 : pcp = &zone_pcp(zone, cpu)->pcp[cold]; + 2225 0.0250 0 0 0 0 2075 0.0942 : local_irq_save(flags); + 3857 0.0434 0 0 1 0.0339 2471 0.1122 : if (!pcp->count) { + 660 0.0074 0 0 0 0 261 0.0119 : pcp->count += rmqueue_bulk(zone, 0, : pcp->batch, &pcp->list); - 913 0.0114 0 0 0 0 449 0.0215 : if (unlikely(!pcp->count)) + 1333 0.0150 0 0 0 0 428 0.0194 : if (unlikely(!pcp->count)) : goto failed; : } - 310 0.0039 0 0 0 0 144 0.0069 : page = list_entry(pcp->list.next, struct page, lru); + 290 0.0033 0 0 0 0 152 0.0069 : page = list_entry(pcp->list.next, struct page, lru); : list_del(&page->lru); - 562 0.0070 0 0 0 0 411 0.0196 : pcp->count--; + 613 0.0069 0 0 0 0 368 0.0167 : pcp->count--; : } else { - 26 3.2e-04 0 0 0 0 6 2.9e-04 : spin_lock_irqsave(&zone->lock, flags); - 35 4.4e-04 0 0 0 0 10 4.8e-04 : page = __rmqueue(zone, order); + 44 4.9e-04 0 0 0 0 8 3.6e-04 : spin_lock_irqsave(&zone->lock, flags); + 29 3.3e-04 0 0 0 0 7 3.2e-04 : page = __rmqueue(zone, order); : spin_unlock(&zone->lock); - 0 0 0 0 0 0 1 4.8e-05 : if (!page) + : if (!page) : goto failed; : } : - 18115 0.2257 0 0 2 0.0702 6735 0.3218 : __mod_page_state_zone(zone, pgalloc, 1 << order); + 20182 0.2270 0 0 4 0.1358 6825 0.3100 : __mod_page_state_zone(zone, pgalloc, 1 << order); : zone_statistics(zonelist, zone, cpu); - 4735 0.0590 0 0 0 0 1153 0.0551 : local_irq_restore(flags); + 5446 0.0612 0 0 0 0 1236 0.0561 : local_irq_restore(flags); : put_cpu(); : : BUG_ON(bad_range(zone, page)); : if (prep_new_page(page, order)) : goto again; : - 772 0.0096 0 0 0 0 508 0.0243 : if (gfp_flags & __GFP_ZERO) + 808 0.0091 0 0 0 0 564 0.0256 : if (gfp_flags & __GFP_ZERO) : prep_zero_page(page, order, gfp_flags); : - 1938 0.0241 0 0 0 0 241 0.0115 : if (order && (gfp_flags & __GFP_COMP)) + 2358 0.0265 0 0 0 0 291 0.0132 : if (order && (gfp_flags & __GFP_COMP)) : prep_compound_page(page, order); : return page; : @@ -827,30 +827,30 @@ : */ :int zone_watermark_ok(struct zone *z, int order, unsigned long mark, : int classzone_idx, int alloc_flags) - 13225 0.1648 0 0 2 0.0702 8335 0.3982 :{ /* zone_watermark_ok total: 30451 0.3794 0 0 7 0.2459 21537 1.0289 */ + 14118 0.1588 0 0 6 0.2037 8417 0.3823 :{ /* zone_watermark_ok total: 32883 0.3698 0 0 12 0.4073 23603 1.0719 */ : /* free_pages my go negative - that's OK */ - 1648 0.0205 0 0 0 0 1405 0.0671 : long min = mark, free_pages = z->free_pages - (1 << order) + 1; + 1803 0.0203 0 0 0 0 1558 0.0708 : long min = mark, free_pages = z->free_pages - (1 << order) + 1; : int o; : - 4594 0.0572 0 0 1 0.0351 3224 0.1540 : if (alloc_flags & ALLOC_HIGH) + 4778 0.0537 0 0 3 0.1018 3429 0.1557 : if (alloc_flags & ALLOC_HIGH) : min -= min / 2; - 946 0.0118 0 0 0 0 806 0.0385 : if (alloc_flags & ALLOC_HARDER) - : min -= min / 4; + 983 0.0111 0 0 0 0 907 0.0412 : if (alloc_flags & ALLOC_HARDER) + 2 2.2e-05 0 0 0 0 1 4.5e-05 : min -= min / 4; : - 4669 0.0582 0 0 2 0.0702 3689 0.1762 : if (free_pages <= min + z->lowmem_reserve[classzone_idx]) + 5069 0.0570 0 0 0 0 4353 0.1977 : if (free_pages <= min + z->lowmem_reserve[classzone_idx]) : return 0; - 2763 0.0344 0 0 1 0.0351 2060 0.0984 : for (o = 0; o < order; o++) { + 3193 0.0359 0 0 1 0.0339 2486 0.1129 : for (o = 0; o < order; o++) { : /* At the next order, this order's pages become unavailable */ - 1 1.2e-05 0 0 0 0 3 1.4e-04 : free_pages -= z->free_area[o].nr_free << o; + 2 2.2e-05 0 0 0 0 3 1.4e-04 : free_pages -= z->free_area[o].nr_free << o; : : /* Require fewer higher order pages to be free */ - 11 1.4e-04 0 0 0 0 16 7.6e-04 : min >>= 1; + 15 1.7e-04 0 0 0 0 7 3.2e-04 : min >>= 1; : - 0 0 0 0 0 0 1 4.8e-05 : if (free_pages <= min) + 1 1.1e-05 0 0 0 0 0 0 : if (free_pages <= min) : return 0; : } : return 1; - 2594 0.0323 0 0 1 0.0351 1998 0.0955 :} + 2919 0.0328 0 0 2 0.0679 2442 0.1109 :} : :/* : * get_page_from_freeliest goes through the zonelist trying to allocate @@ -859,10 +859,10 @@ :static struct page * :get_page_from_freelist(gfp_t gfp_mask, unsigned int order, : struct zonelist *zonelist, int alloc_flags) - 14815 0.1846 0 0 4 0.1405 11292 0.5395 :{ /* get_page_from_freelist total: 158902 1.9800 0 0 34 1.1942 92515 4.4197 */ - 3 3.7e-05 0 0 0 0 0 0 : struct zone **z = zonelist->zones; + 14944 0.1681 0 0 3 0.1018 9490 0.4310 :{ /* get_page_from_freelist total: 178667 2.0094 0 0 28 0.9504 93897 4.2644 */ + 3 3.4e-05 0 0 0 0 0 0 : struct zone **z = zonelist->zones; : struct page *page = NULL; - 3324 0.0414 0 0 3 0.1054 3912 0.1869 : int classzone_idx = zone_idx(*z); + 3186 0.0358 0 0 2 0.0679 2754 0.1251 : int classzone_idx = zone_idx(*z); : : /* : * Go through the zonelist once, looking for a zone with enough free. @@ -873,28 +873,28 @@ : !cpuset_zone_allowed(*z, gfp_mask)) : continue; : - 45 5.6e-04 0 0 0 0 41 0.0020 : if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { + 193 0.0022 0 0 0 0 147 0.0067 : if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { : unsigned long mark; - 727 0.0091 0 0 0 0 873 0.0417 : if (alloc_flags & ALLOC_WMARK_MIN) - : mark = (*z)->pages_min; - 1828 0.0228 0 0 0 0 2371 0.1133 : else if (alloc_flags & ALLOC_WMARK_LOW) - 726 0.0090 0 0 0 0 1010 0.0483 : mark = (*z)->pages_low; + 765 0.0086 0 0 0 0 719 0.0327 : if (alloc_flags & ALLOC_WMARK_MIN) + 5 5.6e-05 0 0 0 0 6 2.7e-04 : mark = (*z)->pages_min; + 1746 0.0196 0 0 1 0.0339 1556 0.0707 : else if (alloc_flags & ALLOC_WMARK_LOW) + 794 0.0089 0 0 0 0 756 0.0343 : mark = (*z)->pages_low; : else : mark = (*z)->pages_high; - 11897 0.1482 0 0 3 0.1054 12101 0.5781 : if (!zone_watermark_ok(*z, order, mark, + 13699 0.1541 0 0 2 0.0679 14267 0.6479 : if (!zone_watermark_ok(*z, order, mark, : classzone_idx, alloc_flags)) - 5 6.2e-05 0 0 0 0 1 4.8e-05 : if (!zone_reclaim_mode || + 25 2.8e-04 0 0 0 0 34 0.0015 : if (!zone_reclaim_mode || : !zone_reclaim(*z, gfp_mask, order)) : continue; : } : - 749 0.0093 0 0 0 0 854 0.0408 : page = buffered_rmqueue(zonelist, *z, order, gfp_mask); + 811 0.0091 0 0 0 0 1029 0.0467 : page = buffered_rmqueue(zonelist, *z, order, gfp_mask); : if (page) { : break; : } - 44 5.5e-04 0 0 0 0 35 0.0017 : } while (*(++z) != NULL); + 289 0.0033 0 0 0 0 223 0.0101 : } while (*(++z) != NULL); : return page; - 6100 0.0760 0 0 2 0.0702 739 0.0353 :} + 5647 0.0635 0 0 2 0.0679 846 0.0384 :} : :/* : * This is the 'heart' of the zoned buddy allocator. @@ -902,8 +902,8 @@ :struct page * fastcall :__alloc_pages(gfp_t gfp_mask, unsigned int order, : struct zonelist *zonelist) - 5472 0.0682 0 0 1 0.0351 2521 0.1204 :{ /* __alloc_pages total: 51656 0.6437 0 0 6 0.2107 16257 0.7766 */ - 6776 0.0844 0 0 3 0.1054 4278 0.2044 : const gfp_t wait = gfp_mask & __GFP_WAIT; + 5837 0.0656 0 0 1 0.0339 2318 0.1053 :{ /* __alloc_pages total: 54542 0.6134 0 0 7 0.2376 15752 0.7154 */ + 6698 0.0753 0 0 0 0 3797 0.1724 : const gfp_t wait = gfp_mask & __GFP_WAIT; : struct zone **z; : struct page *page; : struct reclaim_state reclaim_state; @@ -915,21 +915,21 @@ : might_sleep_if(wait); : :restart: - 1649 0.0205 0 0 0 0 1027 0.0491 : z = zonelist->zones; /* the list of zones suitable for gfp_mask */ + 1605 0.0181 0 0 0 0 1009 0.0458 : z = zonelist->zones; /* the list of zones suitable for gfp_mask */ : - 27 3.4e-04 0 0 0 0 5 2.4e-04 : if (unlikely(*z == NULL)) { + 27 3.0e-04 0 0 0 0 6 2.7e-04 : if (unlikely(*z == NULL)) { : /* Should this ever happen?? */ : return NULL; : } : - 32595 0.4061 0 0 1 0.0351 5315 0.2539 : page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, + 34936 0.3929 0 0 3 0.1018 5244 0.2382 : page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, : zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); - 1173 0.0146 0 0 1 0.0351 95 0.0045 : if (page) + 1122 0.0126 0 0 1 0.0339 128 0.0058 : if (page) : goto got_pg; : : do { - : wakeup_kswapd(*z, order); - : } while (*(++z)); + 17 1.9e-04 0 0 0 0 28 0.0013 : wakeup_kswapd(*z, order); + 23 2.6e-04 0 0 0 0 28 0.0013 : } while (*(++z)); : : /* : * OK, we're below the kswapd watermark and have kicked background @@ -942,11 +942,11 @@ : * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). : */ : alloc_flags = ALLOC_WMARK_MIN; - : if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) + 11 1.2e-04 0 0 0 0 12 5.4e-04 : if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) : alloc_flags |= ALLOC_HARDER; - : if (gfp_mask & __GFP_HIGH) + 7 7.9e-05 0 0 0 0 7 3.2e-04 : if (gfp_mask & __GFP_HIGH) : alloc_flags |= ALLOC_HIGH; - : alloc_flags |= ALLOC_CPUSET; + 4 4.5e-05 0 0 0 0 12 5.4e-04 : alloc_flags |= ALLOC_CPUSET; : : /* : * Go through the zonelist again. Let __GFP_HIGH and allocations @@ -956,8 +956,8 @@ : * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. : * See also cpuset_zone_allowed() comment in kernel/cpuset.c. : */ - 2 2.5e-05 0 0 0 0 0 0 : page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); - : if (page) + 101 0.0011 0 0 0 0 14 6.4e-04 : page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); + 3 3.4e-05 0 0 0 0 0 0 : if (page) : goto got_pg; : : /* This allocation should allow future memory freeing. */ @@ -992,7 +992,7 @@ : reclaim_state.reclaimed_slab = 0; : p->reclaim_state = &reclaim_state; : - : did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); + 3 3.4e-05 0 0 0 0 2 9.1e-05 : did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); : : p->reclaim_state = NULL; : p->flags &= ~PF_MEMALLOC; @@ -1049,7 +1049,7 @@ : } :got_pg: : return page; - 3955 0.0493 0 0 0 0 3014 0.1440 :} + 4143 0.0466 0 0 2 0.0679 3145 0.1428 :} : :EXPORT_SYMBOL(__alloc_pages); : @@ -1057,61 +1057,61 @@ : * Common helper functions. : */ :fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) - 7 8.7e-05 0 0 0 0 2 9.6e-05 :{ /* __get_free_pages total: 427 0.0053 0 0 0 0 61 0.0029 */ + 11 1.2e-04 0 0 0 0 1 4.5e-05 :{ /* __get_free_pages total: 462 0.0052 0 0 0 0 54 0.0025 */ : struct page * page; : page = alloc_pages(gfp_mask, order); - 0 0 0 0 0 0 1 4.8e-05 : if (!page) + 0 0 0 0 0 0 1 4.5e-05 : if (!page) : return 0; - 3 3.7e-05 0 0 0 0 1 4.8e-05 : return (unsigned long) page_address(page); - 21 2.6e-04 0 0 0 0 5 2.4e-04 :} + 6 6.7e-05 0 0 0 0 3 1.4e-04 : return (unsigned long) page_address(page); + 22 2.5e-04 0 0 0 0 9 4.1e-04 :} : :EXPORT_SYMBOL(__get_free_pages); : :fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) - 41 5.1e-04 0 0 0 0 15 7.2e-04 :{ /* get_zeroed_page total: 631 0.0079 0 0 0 0 127 0.0061 */ + 48 5.4e-04 0 0 0 0 15 6.8e-04 :{ /* get_zeroed_page total: 708 0.0080 0 0 0 0 128 0.0058 */ : struct page * page; : : /* : * get_zeroed_page() returns a 32-bit address, which cannot represent : * a highmem page : */ - 300 0.0037 0 0 0 0 29 0.0014 : BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); + 357 0.0040 0 0 0 0 35 0.0016 : BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); : : page = alloc_pages(gfp_mask | __GFP_ZERO, 0); - 175 0.0022 0 0 0 0 47 0.0022 : if (page) - 27 3.4e-04 0 0 0 0 8 3.8e-04 : return (unsigned long) page_address(page); + 188 0.0021 0 0 0 0 40 0.0018 : if (page) + 34 3.8e-04 0 0 0 0 13 5.9e-04 : return (unsigned long) page_address(page); : return 0; - 15 1.9e-04 0 0 0 0 6 2.9e-04 :} + 10 1.1e-04 0 0 0 0 1 4.5e-05 :} : :EXPORT_SYMBOL(get_zeroed_page); : :void __pagevec_free(struct pagevec *pvec) - 311 0.0039 0 0 0 0 247 0.0118 :{ /* __pagevec_free total: 4735 0.0590 0 0 2 0.0702 3722 0.1778 */ - 153 0.0019 0 0 1 0.0351 212 0.0101 : int i = pagevec_count(pvec); + 281 0.0032 0 0 0 0 234 0.0106 :{ /* __pagevec_free total: 4857 0.0546 0 0 0 0 3652 0.1659 */ + 138 0.0016 0 0 0 0 202 0.0092 : int i = pagevec_count(pvec); : - 1578 0.0197 0 0 1 0.0351 935 0.0447 : while (--i >= 0) - 1543 0.0192 0 0 0 0 1297 0.0620 : free_hot_cold_page(pvec->pages[i], pvec->cold); - 1150 0.0143 0 0 0 0 1031 0.0493 :} + 1740 0.0196 0 0 0 0 1024 0.0465 : while (--i >= 0) + 1618 0.0182 0 0 0 0 1223 0.0555 : free_hot_cold_page(pvec->pages[i], pvec->cold); + 1080 0.0121 0 0 0 0 969 0.0440 :} : :fastcall void __free_pages(struct page *page, unsigned int order) - 96 0.0012 0 0 0 0 34 0.0016 :{ - 16 2.0e-04 0 0 0 0 9 4.3e-04 : if (put_page_testzero(page)) { /* __free_pages total: 273 0.0034 0 0 0 0 87 0.0042 */ - 14 1.7e-04 0 0 0 0 13 6.2e-04 : if (order == 0) - 28 3.5e-04 0 0 0 0 4 1.9e-04 : free_hot_page(page); + 114 0.0013 0 0 0 0 30 0.0014 :{ + 24 2.7e-04 0 0 0 0 12 5.4e-04 : if (put_page_testzero(page)) { /* __free_pages total: 323 0.0036 0 0 0 0 103 0.0047 */ + 22 2.5e-04 0 0 0 0 21 9.5e-04 : if (order == 0) + 20 2.2e-04 0 0 0 0 5 2.3e-04 : free_hot_page(page); : else : __free_pages_ok(page, order); : } - 36 4.5e-04 0 0 0 0 9 4.3e-04 :} + 46 5.2e-04 0 0 0 0 17 7.7e-04 :} : :EXPORT_SYMBOL(__free_pages); : :fastcall void free_pages(unsigned long addr, unsigned int order) - 25 3.1e-04 0 0 0 0 4 1.9e-04 :{ /* free_pages total: 507 0.0063 0 0 0 0 109 0.0052 */ - 178 0.0022 0 0 0 0 20 9.6e-04 : if (addr != 0) { - 121 0.0015 0 0 0 0 18 8.6e-04 : BUG_ON(!virt_addr_valid((void *)addr)); - 134 0.0017 0 0 0 0 47 0.0022 : __free_pages(virt_to_page((void *)addr), order); + 22 2.5e-04 0 0 0 0 6 2.7e-04 :{ /* free_pages total: 590 0.0066 0 0 0 0 114 0.0052 */ + 202 0.0023 0 0 0 0 21 9.5e-04 : if (addr != 0) { + 151 0.0017 0 0 0 0 27 0.0012 : BUG_ON(!virt_addr_valid((void *)addr)); + 168 0.0019 0 0 0 0 43 0.0020 : __free_pages(virt_to_page((void *)addr), order); : } - 49 6.1e-04 0 0 0 0 20 9.6e-04 :} + 47 5.3e-04 0 0 0 0 17 7.7e-04 :} : :EXPORT_SYMBOL(free_pages); : @@ -1119,15 +1119,15 @@ : * Total amount of free (allocatable) RAM: : */ :unsigned int nr_free_pages(void) - 2 2.5e-05 0 0 0 0 0 0 :{ /* nr_free_pages total: 174 0.0022 0 0 0 0 46 0.0022 */ + 4 4.5e-05 0 0 0 0 0 0 :{ /* nr_free_pages total: 204 0.0023 0 0 0 0 31 0.0014 */ : unsigned int sum = 0; : struct zone *zone; : - 112 0.0014 0 0 0 0 11 5.3e-04 : for_each_zone(zone) - 7 8.7e-05 0 0 0 0 3 1.4e-04 : sum += zone->free_pages; + 125 0.0014 0 0 0 0 8 3.6e-04 : for_each_zone(zone) + 8 9.0e-05 0 0 0 0 1 4.5e-05 : sum += zone->free_pages; : : return sum; - 8 1.0e-04 0 0 0 0 5 2.4e-04 :} + 8 9.0e-05 0 0 0 0 3 1.4e-04 :} : :EXPORT_SYMBOL(nr_free_pages); : @@ -1215,31 +1215,31 @@ :#endif : :static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) - 2 2.5e-05 0 0 0 0 0 0 :{ + :{ : int cpu = 0; : - 155 0.0019 0 0 0 0 8 3.8e-04 : memset(ret, 0, nr * sizeof(unsigned long)); /* __get_page_state total: 502 0.0063 0 0 0 0 65 0.0031 */ + 136 0.0015 0 0 0 0 16 7.3e-04 : memset(ret, 0, nr * sizeof(unsigned long)); /* __get_page_state total: 497 0.0056 0 0 0 0 97 0.0044 */ : cpus_and(*cpumask, *cpumask, cpu_online_map); : : cpu = first_cpu(*cpumask); - 133 0.0017 0 0 0 0 17 8.1e-04 : while (cpu < NR_CPUS) { + 133 0.0015 0 0 0 0 21 9.5e-04 : while (cpu < NR_CPUS) { : unsigned long *in, *out, off; : : if (!cpu_isset(cpu, *cpumask)) : continue; : - 5 6.2e-05 0 0 0 0 2 9.6e-05 : in = (unsigned long *)&per_cpu(page_states, cpu); + 3 3.4e-05 0 0 0 0 2 9.1e-05 : in = (unsigned long *)&per_cpu(page_states, cpu); : : cpu = next_cpu(cpu, *cpumask); : - 37 4.6e-04 0 0 0 0 2 9.6e-05 : if (likely(cpu < NR_CPUS)) - 19 2.4e-04 0 0 0 0 0 0 : prefetch(&per_cpu(page_states, cpu)); + 46 5.2e-04 0 0 0 0 6 2.7e-04 : if (likely(cpu < NR_CPUS)) + 17 1.9e-04 0 0 0 0 3 1.4e-04 : prefetch(&per_cpu(page_states, cpu)); : - 9 1.1e-04 0 0 0 0 12 5.7e-04 : out = (unsigned long *)ret; - 90 0.0011 0 0 0 0 10 4.8e-04 : for (off = 0; off < nr; off++) - 16 2.0e-04 0 0 0 0 7 3.3e-04 : *out++ += *in++; + 7 7.9e-05 0 0 0 0 3 1.4e-04 : out = (unsigned long *)ret; + 87 9.8e-04 0 0 0 0 23 0.0010 : for (off = 0; off < nr; off++) + 18 2.0e-04 0 0 0 0 6 2.7e-04 : *out++ += *in++; : } - 5 6.2e-05 0 0 0 0 2 9.6e-05 :} + 5 5.6e-05 0 0 0 0 3 1.4e-04 :} : :void get_page_state_node(struct page_state *ret, int node) :{ @@ -1253,15 +1253,15 @@ :} : :void get_page_state(struct page_state *ret) - 6 7.5e-05 0 0 0 0 0 0 :{ /* get_page_state total: 113 0.0014 0 0 0 0 2 9.6e-05 */ + 6 6.7e-05 0 0 0 0 1 4.5e-05 :{ /* get_page_state total: 139 0.0016 0 0 0 0 13 5.9e-04 */ : int nr; - 104 0.0013 0 0 0 0 2 9.6e-05 : cpumask_t mask = CPU_MASK_ALL; + 124 0.0014 0 0 0 0 12 5.4e-04 : cpumask_t mask = CPU_MASK_ALL; : : nr = offsetof(struct page_state, GET_PAGE_STATE_LAST); : nr /= sizeof(unsigned long); : - : __get_page_state(ret, nr + 1, &mask); - 3 3.7e-05 0 0 0 0 0 0 :} + 2 2.2e-05 0 0 0 0 0 0 : __get_page_state(ret, nr + 1, &mask); + 7 7.9e-05 0 0 0 0 0 0 :} : :void get_full_page_state(struct page_state *ret) :{ @@ -1271,90 +1271,90 @@ :} : :unsigned long read_page_state_offset(unsigned long offset) - :{ /* read_page_state_offset total: 63 7.9e-04 0 0 0 0 15 7.2e-04 */ + 5 5.6e-05 0 0 0 0 0 0 :{ /* read_page_state_offset total: 492 0.0055 0 0 0 0 141 0.0064 */ : unsigned long ret = 0; : int cpu; : - : for_each_online_cpu(cpu) { + 9 1.0e-04 0 0 0 0 4 1.8e-04 : for_each_online_cpu(cpu) { : unsigned long in; : - 7 8.7e-05 0 0 0 0 3 1.4e-04 : in = (unsigned long)&per_cpu(page_states, cpu) + offset; + 72 8.1e-04 0 0 0 0 27 0.0012 : in = (unsigned long)&per_cpu(page_states, cpu) + offset; : ret += *((unsigned long *)in); : } : return ret; - 8 1.0e-04 0 0 0 0 2 9.6e-05 :} + 42 4.7e-04 0 0 0 0 8 3.6e-04 :} : :void __mod_page_state_offset(unsigned long offset, unsigned long delta) - 3824 0.0476 0 0 0 0 867 0.0414 :{ /* __mod_page_state_offset total: 38591 0.4809 0 0 4 0.1405 12089 0.5775 */ + 4061 0.0457 0 0 0 0 739 0.0336 :{ /* __mod_page_state_offset total: 39156 0.4404 0 0 7 0.2376 11081 0.5032 */ : void *ptr; : - 20377 0.2539 0 0 3 0.1054 6680 0.3191 : ptr = &__get_cpu_var(page_states); - 3 3.7e-05 0 0 0 0 0 0 : *(unsigned long *)(ptr + offset) += delta; - 14387 0.1793 0 0 1 0.0351 4542 0.2170 :} + 21061 0.2369 0 0 4 0.1358 6001 0.2725 : ptr = &__get_cpu_var(page_states); + 3 3.4e-05 0 0 0 0 0 0 : *(unsigned long *)(ptr + offset) += delta; + 14031 0.1578 0 0 3 0.1018 4341 0.1971 :} :EXPORT_SYMBOL(__mod_page_state_offset); : :void mod_page_state_offset(unsigned long offset, unsigned long delta) - 8765 0.1092 0 0 1 0.0351 4117 0.1967 :{ /* mod_page_state_offset total: 45541 0.5675 0 0 7 0.2459 26089 1.2463 */ + 9159 0.1030 0 0 3 0.1018 4477 0.2033 :{ /* mod_page_state_offset total: 48186 0.5419 0 0 8 0.2716 26732 1.2140 */ : unsigned long flags; : void *ptr; : - 4649 0.0579 0 0 1 0.0351 2602 0.1243 : local_irq_save(flags); - 8648 0.1078 0 0 0 0 4080 0.1949 : ptr = &__get_cpu_var(page_states); - : *(unsigned long *)(ptr + offset) += delta; - 1222 0.0152 0 0 0 0 869 0.0415 : local_irq_restore(flags); - 22257 0.2773 0 0 5 0.1756 14421 0.6889 :} + 5338 0.0600 0 0 0 0 2955 0.1342 : local_irq_save(flags); + 9229 0.1038 0 0 0 0 4347 0.1974 : ptr = &__get_cpu_var(page_states); + 1 1.1e-05 0 0 0 0 0 0 : *(unsigned long *)(ptr + offset) += delta; + 1365 0.0154 0 0 0 0 872 0.0396 : local_irq_restore(flags); + 23094 0.2597 0 0 5 0.1697 14081 0.6395 :} :EXPORT_SYMBOL(mod_page_state_offset); : :void __get_zone_counts(unsigned long *active, unsigned long *inactive, : unsigned long *free, struct pglist_data *pgdat) - 0 0 0 0 0 0 1 4.8e-05 :{ /* __get_zone_counts total: 600 0.0075 0 0 0 0 38 0.0018 */ + 2 2.2e-05 0 0 0 0 0 0 :{ /* __get_zone_counts total: 664 0.0075 0 0 0 0 68 0.0031 */ : struct zone *zones = pgdat->node_zones; : int i; : - 84 0.0010 0 0 0 0 9 4.3e-04 : *active = 0; - : *inactive = 0; - 4 5.0e-05 0 0 0 0 1 4.8e-05 : *free = 0; - 9 1.1e-04 0 0 0 0 0 0 : for (i = 0; i < MAX_NR_ZONES; i++) { - 225 0.0028 0 0 0 0 19 9.1e-04 : *active += zones[i].nr_active; - 2 2.5e-05 0 0 0 0 0 0 : *inactive += zones[i].nr_inactive; - 2 2.5e-05 0 0 0 0 2 9.6e-05 : *free += zones[i].free_pages; + 74 8.3e-04 0 0 0 0 8 3.6e-04 : *active = 0; + 1 1.1e-05 0 0 0 0 0 0 : *inactive = 0; + 2 2.2e-05 0 0 0 0 0 0 : *free = 0; + 10 1.1e-04 0 0 0 0 2 9.1e-05 : for (i = 0; i < MAX_NR_ZONES; i++) { + 254 0.0029 0 0 0 0 29 0.0013 : *active += zones[i].nr_active; + : *inactive += zones[i].nr_inactive; + 7 7.9e-05 0 0 0 0 4 1.8e-04 : *free += zones[i].free_pages; : } - 274 0.0034 0 0 0 0 6 2.9e-04 :} + 314 0.0035 0 0 0 0 25 0.0011 :} : :void get_zone_counts(unsigned long *active, : unsigned long *inactive, unsigned long *free) - 135 0.0017 0 0 0 0 6 2.9e-04 :{ /* get_zone_counts total: 289 0.0036 0 0 0 0 27 0.0013 */ + 129 0.0015 0 0 0 0 7 3.2e-04 :{ /* get_zone_counts total: 284 0.0032 0 0 0 0 33 0.0015 */ : struct pglist_data *pgdat; : : *active = 0; - 5 6.2e-05 0 0 0 0 1 4.8e-05 : *inactive = 0; + 4 4.5e-05 0 0 0 0 0 0 : *inactive = 0; : *free = 0; - 66 8.2e-04 0 0 0 0 7 3.3e-04 : for_each_pgdat(pgdat) { + 83 9.3e-04 0 0 0 0 9 4.1e-04 : for_each_pgdat(pgdat) { : unsigned long l, m, n; - 24 3.0e-04 0 0 0 0 1 4.8e-05 : __get_zone_counts(&l, &m, &n, pgdat); - 2 2.5e-05 0 0 0 0 1 4.8e-05 : *active += l; - 4 5.0e-05 0 0 0 0 0 0 : *inactive += m; - 2 2.5e-05 0 0 0 0 1 4.8e-05 : *free += n; + 11 1.2e-04 0 0 0 0 3 1.4e-04 : __get_zone_counts(&l, &m, &n, pgdat); + 3 3.4e-05 0 0 0 0 1 4.5e-05 : *active += l; + 2 2.2e-05 0 0 0 0 1 4.5e-05 : *inactive += m; + 1 1.1e-05 0 0 0 0 1 4.5e-05 : *free += n; : } - 51 6.4e-04 0 0 0 0 10 4.8e-04 :} + 51 5.7e-04 0 0 0 0 11 5.0e-04 :} : :void si_meminfo(struct sysinfo *val) - 151 0.0019 0 0 0 0 5 2.4e-04 :{ /* si_meminfo total: 202 0.0025 0 0 0 0 13 6.2e-04 */ - 2 2.5e-05 0 0 0 0 0 0 : val->totalram = totalram_pages; - 1 1.2e-05 0 0 0 0 0 0 : val->sharedram = 0; - 3 3.7e-05 0 0 0 0 1 4.8e-05 : val->freeram = nr_free_pages(); - 11 1.4e-04 0 0 0 0 3 1.4e-04 : val->bufferram = nr_blockdev_pages(); + 122 0.0014 0 0 0 0 6 2.7e-04 :{ /* si_meminfo total: 181 0.0020 0 0 0 0 12 5.4e-04 */ + 1 1.1e-05 0 0 0 0 0 0 : val->totalram = totalram_pages; + : val->sharedram = 0; + 4 4.5e-05 0 0 0 0 1 4.5e-05 : val->freeram = nr_free_pages(); + 7 7.9e-05 0 0 0 0 1 4.5e-05 : val->bufferram = nr_blockdev_pages(); :#ifdef CONFIG_HIGHMEM : val->totalhigh = totalhigh_pages; : val->freehigh = nr_free_highpages(); :#else : val->totalhigh = 0; - 2 2.5e-05 0 0 0 0 0 0 : val->freehigh = 0; + : val->freehigh = 0; :#endif : val->mem_unit = PAGE_SIZE; - 3 3.7e-05 0 0 0 0 0 0 : if (vx_flags(VXF_VIRT_MEM, 0)) + 6 6.7e-05 0 0 0 0 2 9.1e-05 : if (vx_flags(VXF_VIRT_MEM, 0)) : vx_vsi_meminfo(val); - 29 3.6e-04 0 0 0 0 4 1.9e-04 :} + 41 4.6e-04 0 0 0 0 2 9.1e-05 :} : :EXPORT_SYMBOL(si_meminfo); : @@ -2738,12 +2738,12 @@ /* * Total samples for file : "mm/page_alloc.c" * - * 377305 4.7014 0 0 71 2.4939202368 9.6677 + * 406881 4.5760 0 0 78 2.6477200951 9.1263 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/page_io.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/page_io.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/page_io.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/page_io.c 2006-03-12 07:20:05.000000000 -0500 @@ -21,50 +21,50 @@ : :static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index, : struct page *page, bio_end_io_t end_io) - :{ /* get_swap_bio total: 4 5.0e-05 0 0 0 0 0 0 */ + 35 3.9e-04 0 0 0 0 16 7.3e-04 :{ /* get_swap_bio total: 226 0.0025 0 0 0 0 79 0.0036 */ : struct bio *bio; : - 1 1.2e-05 0 0 0 0 0 0 : bio = bio_alloc(gfp_flags, 1); - : if (bio) { + 2 2.2e-05 0 0 0 0 3 1.4e-04 : bio = bio_alloc(gfp_flags, 1); + 21 2.4e-04 0 0 0 0 1 4.5e-05 : if (bio) { : struct swap_info_struct *sis; : swp_entry_t entry = { .val = index, }; : - : sis = get_swap_info_struct(swp_type(entry)); - 1 1.2e-05 0 0 0 0 0 0 : bio->bi_sector = map_swap_page(sis, swp_offset(entry)) * + 16 1.8e-04 0 0 0 0 0 0 : sis = get_swap_info_struct(swp_type(entry)); + 79 8.9e-04 0 0 0 0 5 2.3e-04 : bio->bi_sector = map_swap_page(sis, swp_offset(entry)) * : (PAGE_SIZE >> 9); : bio->bi_bdev = sis->bdev; - : bio->bi_io_vec[0].bv_page = page; - : bio->bi_io_vec[0].bv_len = PAGE_SIZE; + 2 2.2e-05 0 0 0 0 0 0 : bio->bi_io_vec[0].bv_page = page; + 6 6.7e-05 0 0 0 0 5 2.3e-04 : bio->bi_io_vec[0].bv_len = PAGE_SIZE; : bio->bi_io_vec[0].bv_offset = 0; - 1 1.2e-05 0 0 0 0 0 0 : bio->bi_vcnt = 1; + 7 7.9e-05 0 0 0 0 4 1.8e-04 : bio->bi_vcnt = 1; : bio->bi_idx = 0; - : bio->bi_size = PAGE_SIZE; - : bio->bi_end_io = end_io; + 9 1.0e-04 0 0 0 0 16 7.3e-04 : bio->bi_size = PAGE_SIZE; + 2 2.2e-05 0 0 0 0 0 0 : bio->bi_end_io = end_io; : } : return bio; - 1 1.2e-05 0 0 0 0 0 0 :} + 47 5.3e-04 0 0 0 0 29 0.0013 :} : :static int end_swap_bio_write(struct bio *bio, unsigned int bytes_done, int err) - 3 3.7e-05 0 0 0 0 0 0 :{ /* end_swap_bio_write total: 3 3.7e-05 0 0 0 0 0 0 */ + 176 0.0020 0 0 0 0 47 0.0021 :{ /* end_swap_bio_write total: 263 0.0030 0 0 0 0 55 0.0025 */ : const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); : struct page *page = bio->bi_io_vec[0].bv_page; : - : if (bio->bi_size) + 3 3.4e-05 0 0 0 0 0 0 : if (bio->bi_size) : return 1; : : if (!uptodate) : SetPageError(page); : end_page_writeback(page); - : bio_put(bio); + 72 8.1e-04 0 0 0 0 7 3.2e-04 : bio_put(bio); : return 0; - :} + 11 1.2e-04 0 0 0 0 0 0 :} : :static int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err) - :{ + 73 8.2e-04 0 0 0 0 14 6.4e-04 :{ /* end_swap_bio_read total: 102 0.0011 0 0 0 0 27 0.0012 */ : const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); : struct page *page = bio->bi_io_vec[0].bv_page; : - : if (bio->bi_size) + 0 0 0 0 0 0 1 4.5e-05 : if (bio->bi_size) : return 1; : : if (!uptodate) { @@ -73,25 +73,25 @@ : } else { : SetPageUptodate(page); : } - : unlock_page(page); - : bio_put(bio); + 11 1.2e-04 0 0 0 0 3 1.4e-04 : unlock_page(page); + 14 1.6e-04 0 0 0 0 7 3.2e-04 : bio_put(bio); : return 0; - :} + 3 3.4e-05 0 0 0 0 2 9.1e-05 :} : :/* : * We may have stale swap cache pages in memory: notice : * them here and get rid of the unnecessary final write. : */ :int swap_writepage(struct page *page, struct writeback_control *wbc) - :{ /* swap_writepage total: 0 0 0 0 0 0 1 4.8e-05 */ + 4 4.5e-05 0 0 0 0 0 0 :{ /* swap_writepage total: 100 0.0011 0 0 0 0 53 0.0024 */ : struct bio *bio; : int ret = 0, rw = WRITE; : - : if (remove_exclusive_swap_page(page)) { + 2 2.2e-05 0 0 0 0 3 1.4e-04 : if (remove_exclusive_swap_page(page)) { : unlock_page(page); : goto out; : } - : bio = get_swap_bio(GFP_NOIO, page_private(page), page, + 14 1.6e-04 0 0 0 0 19 8.6e-04 : bio = get_swap_bio(GFP_NOIO, page_private(page), page, : end_swap_bio_write); : if (bio == NULL) { : set_page_dirty(page); @@ -99,35 +99,35 @@ : ret = -ENOMEM; : goto out; : } - : if (wbc->sync_mode == WB_SYNC_ALL) + 2 2.2e-05 0 0 0 0 3 1.4e-04 : if (wbc->sync_mode == WB_SYNC_ALL) : rw |= (1 << BIO_RW_SYNC); - : inc_page_state(pswpout); + 2 2.2e-05 0 0 0 0 1 4.5e-05 : inc_page_state(pswpout); : set_page_writeback(page); - : unlock_page(page); - : submit_bio(rw, bio); + 40 4.5e-04 0 0 0 0 21 9.5e-04 : unlock_page(page); + 4 4.5e-05 0 0 0 0 0 0 : submit_bio(rw, bio); :out: : return ret; - :} + 29 3.3e-04 0 0 0 0 4 1.8e-04 :} : :int swap_readpage(struct file *file, struct page *page) - :{ + 15 1.7e-04 0 0 0 0 2 9.1e-05 :{ /* swap_readpage total: 50 5.6e-04 0 0 0 0 11 5.0e-04 */ : struct bio *bio; : int ret = 0; : - : BUG_ON(!PageLocked(page)); + 1 1.1e-05 0 0 0 0 0 0 : BUG_ON(!PageLocked(page)); : ClearPageUptodate(page); - : bio = get_swap_bio(GFP_KERNEL, page_private(page), page, + 12 1.3e-04 0 0 0 0 6 2.7e-04 : bio = get_swap_bio(GFP_KERNEL, page_private(page), page, : end_swap_bio_read); - : if (bio == NULL) { + 8 9.0e-05 0 0 0 0 0 0 : if (bio == NULL) { : unlock_page(page); : ret = -ENOMEM; : goto out; : } - : inc_page_state(pswpin); - : submit_bio(READ, bio); + 3 3.4e-05 0 0 0 0 0 0 : inc_page_state(pswpin); + 2 2.2e-05 0 0 0 0 0 0 : submit_bio(READ, bio); :out: : return ret; - :} + 4 4.5e-05 0 0 0 0 3 1.4e-04 :} : :#ifdef CONFIG_SOFTWARE_SUSPEND :/* @@ -163,12 +163,12 @@ /* * Total samples for file : "mm/page_io.c" * - * 7 8.7e-05 0 0 0 0 0 0 + * 731 0.0082 0 0 0 0 222 0.0101 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/page-writeback.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/page-writeback.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/page-writeback.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/page-writeback.c 2006-03-12 07:20:05.000000000 -0500 @@ -108,12 +108,12 @@ :}; : :static void get_writeback_state(struct writeback_state *wbs) - 10 1.2e-04 0 0 0 0 0 0 :{ /* get_writeback_state total: 11 1.4e-04 0 0 0 0 0 0 */ - : wbs->nr_dirty = read_page_state(nr_dirty); - : wbs->nr_unstable = read_page_state(nr_unstable); - 1 1.2e-05 0 0 0 0 0 0 : wbs->nr_mapped = read_page_state(nr_mapped); - : wbs->nr_writeback = read_page_state(nr_writeback); - :} + 31 3.5e-04 0 0 0 0 2 9.1e-05 :{ /* get_writeback_state total: 42 4.7e-04 0 0 0 0 10 4.5e-04 */ + 1 1.1e-05 0 0 0 0 1 4.5e-05 : wbs->nr_dirty = read_page_state(nr_dirty); + 4 4.5e-05 0 0 0 0 1 4.5e-05 : wbs->nr_unstable = read_page_state(nr_unstable); + : wbs->nr_mapped = read_page_state(nr_mapped); + 4 4.5e-05 0 0 0 0 4 1.8e-04 : wbs->nr_writeback = read_page_state(nr_writeback); + 2 2.2e-05 0 0 0 0 2 9.1e-05 :} : :/* : * Work out the current dirty-memory clamping and background writeout @@ -135,13 +135,13 @@ :static void :get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, : struct address_space *mapping) - 8 1.0e-04 0 0 0 0 1 4.8e-05 :{ /* get_dirty_limits total: 26 3.2e-04 0 0 0 0 4 1.9e-04 */ + 19 2.1e-04 0 0 0 0 3 1.4e-04 :{ /* get_dirty_limits total: 217 0.0024 0 0 0 0 45 0.0020 */ : int background_ratio; /* Percentages */ : int dirty_ratio; : int unmapped_ratio; : long background; : long dirty; - : unsigned long available_memory = total_pages; + 1 1.1e-05 0 0 0 0 0 0 : unsigned long available_memory = total_pages; : struct task_struct *tsk; : : get_writeback_state(wbs); @@ -159,7 +159,7 @@ : unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages; : : dirty_ratio = vm_dirty_ratio; - 7 8.7e-05 0 0 0 0 1 4.8e-05 : if (dirty_ratio > unmapped_ratio / 2) + 95 0.0011 0 0 0 0 27 0.0012 : if (dirty_ratio > unmapped_ratio / 2) : dirty_ratio = unmapped_ratio / 2; : : if (dirty_ratio < 5) @@ -167,18 +167,18 @@ : : background_ratio = dirty_background_ratio; : if (background_ratio >= dirty_ratio) - : background_ratio = dirty_ratio / 2; + 19 2.1e-04 0 0 0 0 6 2.7e-04 : background_ratio = dirty_ratio / 2; : - 1 1.2e-05 0 0 0 0 0 0 : background = (background_ratio * available_memory) / 100; - 4 5.0e-05 0 0 0 0 2 9.6e-05 : dirty = (dirty_ratio * available_memory) / 100; + 1 1.1e-05 0 0 0 0 0 0 : background = (background_ratio * available_memory) / 100; + 73 8.2e-04 0 0 0 0 9 4.1e-04 : dirty = (dirty_ratio * available_memory) / 100; : tsk = current; : if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { : background += background / 4; : dirty += dirty / 4; : } : *pbackground = background; - 4 5.0e-05 0 0 0 0 0 0 : *pdirty = dirty; - 2 2.5e-05 0 0 0 0 0 0 :} + 5 5.6e-05 0 0 0 0 0 0 : *pdirty = dirty; + 3 3.4e-05 0 0 0 0 0 0 :} : :/* : * balance_dirty_pages() must be called by processes which are generating dirty @@ -194,7 +194,7 @@ : long background_thresh; : long dirty_thresh; : unsigned long pages_written = 0; - : unsigned long write_chunk = sync_writeback_pages(); + 1 1.1e-05 0 0 0 0 0 0 : unsigned long write_chunk = sync_writeback_pages(); : : struct backing_dev_info *bdi = mapping->backing_dev_info; : @@ -204,12 +204,12 @@ : .sync_mode = WB_SYNC_NONE, : .older_than_this = NULL, : .nr_to_write = write_chunk, - : }; + 1 1.1e-05 0 0 0 0 0 0 : }; : : get_dirty_limits(&wbs, &background_thresh, : &dirty_thresh, mapping); - 1 1.2e-05 0 0 0 0 0 0 : nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; - 0 0 0 0 0 0 1 4.8e-05 : if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) + 1 1.1e-05 0 0 0 0 0 0 : nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; + : if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) : break; : : if (!dirty_exceeded) @@ -238,7 +238,7 @@ : if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded) : dirty_exceeded = 0; : - 3 3.7e-05 0 0 0 0 0 0 : if (writeback_in_progress(bdi)) + 3 3.4e-05 0 0 0 0 0 0 : if (writeback_in_progress(bdi)) : return; /* pdflush is already working this queue */ : : /* @@ -268,48 +268,48 @@ : * from overshooting the limit by (ratelimit_pages) each. : */ :void balance_dirty_pages_ratelimited(struct address_space *mapping) - 81 0.0010 0 0 0 0 38 0.0018 :{ /* balance_dirty_pages_ratelimited total: 677 0.0084 0 0 0 0 173 0.0083 */ + 67 7.5e-04 0 0 0 0 27 0.0012 :{ /* balance_dirty_pages_ratelimited total: 760 0.0085 0 0 1 0.0339 157 0.0071 */ : static DEFINE_PER_CPU(int, ratelimits) = 0; : long ratelimit; : : ratelimit = ratelimit_pages; - 139 0.0017 0 0 0 0 20 9.6e-04 : if (dirty_exceeded) + 168 0.0019 0 0 0 0 27 0.0012 : if (dirty_exceeded) : ratelimit = 8; : : /* : * Check the rate limiting. Also, we do not want to throttle real-time : * tasks in balance_dirty_pages(). Period. : */ - 80 1.0e-03 0 0 0 0 32 0.0015 : if (get_cpu_var(ratelimits)++ >= ratelimit) { - 1 1.2e-05 0 0 0 0 0 0 : __get_cpu_var(ratelimits) = 0; + 69 7.8e-04 0 0 1 0.0339 22 1.0e-03 : if (get_cpu_var(ratelimits)++ >= ratelimit) { + 1 1.1e-05 0 0 0 0 1 4.5e-05 : __get_cpu_var(ratelimits) = 0; : put_cpu_var(ratelimits); : balance_dirty_pages(mapping); : return; : } : put_cpu_var(ratelimits); - 372 0.0046 0 0 0 0 82 0.0039 :} + 449 0.0050 0 0 0 0 80 0.0036 :} :EXPORT_SYMBOL(balance_dirty_pages_ratelimited); : :void throttle_vm_writeout(void) - 0 0 0 0 0 0 1 4.8e-05 :{ /* throttle_vm_writeout total: 2 2.5e-05 0 0 0 0 2 9.6e-05 */ + 19 2.1e-04 0 0 0 0 4 1.8e-04 :{ /* throttle_vm_writeout total: 83 9.3e-04 0 0 0 0 23 0.0010 */ : struct writeback_state wbs; : long background_thresh; : long dirty_thresh; : : for ( ; ; ) { - : get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); + 4 4.5e-05 0 0 0 0 1 4.5e-05 : get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); : : /* : * Boost the allowable dirty threshold a bit for page : * allocators so they don't get DoS'ed by heavy writers : */ - 2 2.5e-05 0 0 0 0 1 4.8e-05 : dirty_thresh += dirty_thresh / 10; /* wheeee... */ + 53 6.0e-04 0 0 0 0 17 7.7e-04 : dirty_thresh += dirty_thresh / 10; /* wheeee... */ : - : if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh) + 2 2.2e-05 0 0 0 0 0 0 : if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh) : break; : blk_congestion_wait(WRITE, HZ/10); : } - :} + 5 5.6e-05 0 0 0 0 1 4.5e-05 :} : : :/* @@ -317,7 +317,7 @@ : * memory is less than the background threshold, or until we're all clean. : */ :static void background_writeout(unsigned long _min_pages) - :{ /* background_writeout total: 5 6.2e-05 0 0 0 0 0 0 */ + :{ /* background_writeout total: 8 9.0e-05 0 0 0 0 4 1.8e-04 */ : long min_pages = _min_pages; : struct writeback_control wbc = { : .bdi = NULL, @@ -325,15 +325,15 @@ : .older_than_this = NULL, : .nr_to_write = 0, : .nonblocking = 1, - : }; + 4 4.5e-05 0 0 0 0 2 9.1e-05 : }; : : for ( ; ; ) { : struct writeback_state wbs; : long background_thresh; : long dirty_thresh; : - 2 2.5e-05 0 0 0 0 0 0 : get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); - : if (wbs.nr_dirty + wbs.nr_unstable < background_thresh + 1 1.1e-05 0 0 0 0 0 0 : get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); + 1 1.1e-05 0 0 0 0 0 0 : if (wbs.nr_dirty + wbs.nr_unstable < background_thresh : && min_pages <= 0) : break; : wbc.encountered_congestion = 0; @@ -341,10 +341,10 @@ : wbc.pages_skipped = 0; : writeback_inodes(&wbc); : min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; - 1 1.2e-05 0 0 0 0 0 0 : if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { + 1 1.1e-05 0 0 0 0 0 0 : if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { : /* Wrote less than expected */ : blk_congestion_wait(WRITE, HZ/10); - 2 2.5e-05 0 0 0 0 0 0 : if (!wbc.encountered_congestion) + 1 1.1e-05 0 0 0 0 2 9.1e-05 : if (!wbc.encountered_congestion) : break; : } : } @@ -388,7 +388,7 @@ : * all dirty pages if they are all attached to "old" mappings. : */ :static void wb_kupdate(unsigned long arg) - :{ /* wb_kupdate total: 14 1.7e-04 0 0 0 0 2 9.6e-05 */ + :{ /* wb_kupdate total: 15 1.7e-04 0 0 0 0 1 4.5e-05 */ : unsigned long oldest_jif; : unsigned long start_jif; : unsigned long next_jif; @@ -401,27 +401,27 @@ : .nr_to_write = 0, : .nonblocking = 1, : .for_kupdate = 1, - 4 5.0e-05 0 0 0 0 1 4.8e-05 : }; + 1 1.1e-05 0 0 0 0 0 0 : }; : : sync_supers(); : : get_writeback_state(&wbs); - : oldest_jif = jiffies - (dirty_expire_centisecs * HZ) / 100; + 1 1.1e-05 0 0 0 0 1 4.5e-05 : oldest_jif = jiffies - (dirty_expire_centisecs * HZ) / 100; : start_jif = jiffies; : next_jif = start_jif + (dirty_writeback_centisecs * HZ) / 100; - : nr_to_write = wbs.nr_dirty + wbs.nr_unstable + + 1 1.1e-05 0 0 0 0 0 0 : nr_to_write = wbs.nr_dirty + wbs.nr_unstable + : (inodes_stat.nr_inodes - inodes_stat.nr_unused); : while (nr_to_write > 0) { - 5 6.2e-05 0 0 0 0 0 0 : wbc.encountered_congestion = 0; + 5 5.6e-05 0 0 0 0 0 0 : wbc.encountered_congestion = 0; : wbc.nr_to_write = MAX_WRITEBACK_PAGES; : writeback_inodes(&wbc); - 1 1.2e-05 0 0 0 0 0 0 : if (wbc.nr_to_write > 0) { + 1 1.1e-05 0 0 0 0 0 0 : if (wbc.nr_to_write > 0) { : if (wbc.encountered_congestion) - 1 1.2e-05 0 0 0 0 1 4.8e-05 : blk_congestion_wait(WRITE, HZ/10); + : blk_congestion_wait(WRITE, HZ/10); : else : break; /* All the old data is written */ : } - 3 3.7e-05 0 0 0 0 0 0 : nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; + 6 6.7e-05 0 0 0 0 0 0 : nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; : } : if (time_before(next_jif, jiffies + HZ)) : next_jif = jiffies + HZ; @@ -446,8 +446,8 @@ :} : :static void wb_timer_fn(unsigned long unused) - :{ /* wb_timer_fn total: 1 1.2e-05 0 0 0 0 0 0 */ - 1 1.2e-05 0 0 0 0 0 0 : if (pdflush_operation(wb_kupdate, 0) < 0) + :{ + : if (pdflush_operation(wb_kupdate, 0) < 0) : mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */ :} : @@ -550,19 +550,19 @@ :} : :int do_writepages(struct address_space *mapping, struct writeback_control *wbc) - 88 0.0011 0 0 0 0 22 0.0011 :{ /* do_writepages total: 325 0.0040 0 0 0 0 97 0.0046 */ + 64 7.2e-04 0 0 0 0 4 1.8e-04 :{ /* do_writepages total: 271 0.0030 0 0 0 0 72 0.0033 */ : int ret; : - 55 6.9e-04 0 0 0 0 11 5.3e-04 : if (wbc->nr_to_write <= 0) + 56 6.3e-04 0 0 0 0 12 5.4e-04 : if (wbc->nr_to_write <= 0) : return 0; : wbc->for_writepages = 1; - 115 0.0014 0 0 0 0 21 0.0010 : if (mapping->a_ops->writepages) + 101 0.0011 0 0 0 0 15 6.8e-04 : if (mapping->a_ops->writepages) : ret = mapping->a_ops->writepages(mapping, wbc); : else : ret = generic_writepages(mapping, wbc); - 3 3.7e-05 0 0 0 0 2 9.6e-05 : wbc->for_writepages = 0; + 5 5.6e-05 0 0 0 0 0 0 : wbc->for_writepages = 0; : return ret; - 62 7.7e-04 0 0 0 0 41 0.0020 :} + 43 4.8e-04 0 0 0 0 41 0.0019 :} : :/** : * write_one_page - write out a single page and optionally wait on I/O @@ -620,33 +620,33 @@ : * mapping by re-checking page_mapping() insode tree_lock. : */ :int __set_page_dirty_nobuffers(struct page *page) - 144 0.0018 0 0 0 0 51 0.0024 :{ /* __set_page_dirty_nobuffers total: 1446 0.0180 0 0 1 0.0351 540 0.0258 */ + 245 0.0028 0 0 0 0 55 0.0025 :{ /* __set_page_dirty_nobuffers total: 1565 0.0176 0 0 1 0.0339 551 0.0250 */ : int ret = 0; : - 39 4.9e-04 0 0 0 0 28 0.0013 : if (!TestSetPageDirty(page)) { + 45 5.1e-04 0 0 0 0 25 0.0011 : if (!TestSetPageDirty(page)) { : struct address_space *mapping = page_mapping(page); : struct address_space *mapping2; : - 62 7.7e-04 0 0 0 0 76 0.0036 : if (mapping) { - 1 1.2e-05 0 0 0 0 0 0 : write_lock_irq(&mapping->tree_lock); + 75 8.4e-04 0 0 0 0 85 0.0039 : if (mapping) { + 1 1.1e-05 0 0 0 0 0 0 : write_lock_irq(&mapping->tree_lock); : mapping2 = page_mapping(page); - 144 0.0018 0 0 0 0 31 0.0015 : if (mapping2) { /* Race with truncate? */ - 6 7.5e-05 0 0 0 0 2 9.6e-05 : BUG_ON(mapping2 != mapping); - 57 7.1e-04 0 0 0 0 34 0.0016 : if (mapping_cap_account_dirty(mapping)) - 18 2.2e-04 0 0 0 0 1 4.8e-05 : inc_page_state(nr_dirty); - 25 3.1e-04 0 0 0 0 8 3.8e-04 : radix_tree_tag_set(&mapping->page_tree, + 157 0.0018 0 0 0 0 31 0.0014 : if (mapping2) { /* Race with truncate? */ + 9 1.0e-04 0 0 0 0 1 4.5e-05 : BUG_ON(mapping2 != mapping); + 61 6.9e-04 0 0 0 0 33 0.0015 : if (mapping_cap_account_dirty(mapping)) + 16 1.8e-04 0 0 0 0 1 4.5e-05 : inc_page_state(nr_dirty); + 38 4.3e-04 0 0 0 0 7 3.2e-04 : radix_tree_tag_set(&mapping->page_tree, : page_index(page), PAGECACHE_TAG_DIRTY); : } - 313 0.0039 0 0 0 0 74 0.0035 : write_unlock_irq(&mapping->tree_lock); - 57 7.1e-04 0 0 1 0.0351 64 0.0031 : if (mapping->host) { + 257 0.0029 0 0 1 0.0339 114 0.0052 : write_unlock_irq(&mapping->tree_lock); + 67 7.5e-04 0 0 0 0 49 0.0022 : if (mapping->host) { : /* !PageAnon && !swapper_space */ - 10 1.2e-04 0 0 0 0 6 2.9e-04 : __mark_inode_dirty(mapping->host, + 12 1.3e-04 0 0 0 0 7 3.2e-04 : __mark_inode_dirty(mapping->host, : I_DIRTY_PAGES); : } : } : } : return ret; - 47 5.9e-04 0 0 0 0 7 3.3e-04 :} + 57 6.4e-04 0 0 0 0 8 3.6e-04 :} :EXPORT_SYMBOL(__set_page_dirty_nobuffers); : :/* @@ -666,19 +666,19 @@ : * just fall through and assume that it wants buffer_heads. : */ :int fastcall set_page_dirty(struct page *page) - :{ + 10 1.1e-04 0 0 0 0 0 0 :{ : struct address_space *mapping = page_mapping(page); : : if (likely(mapping)) { - : int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; + 1 1.1e-05 0 0 0 0 3 1.4e-04 : int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; : if (spd) - : return (*spd)(page); - : return __set_page_dirty_buffers(page); + 2 2.2e-05 0 0 0 0 0 0 : return (*spd)(page); + 6 6.7e-05 0 0 0 0 2 9.1e-05 : return __set_page_dirty_buffers(page); : } : if (!PageDirty(page)) : SetPageDirty(page); : return 0; - :} + 6 6.7e-05 0 0 0 0 1 4.5e-05 :} :EXPORT_SYMBOL(set_page_dirty); : :/* @@ -707,26 +707,26 @@ : * Returns true if the page was previously dirty. : */ :int test_clear_page_dirty(struct page *page) - 107 0.0013 0 0 0 0 41 0.0020 :{ /* test_clear_page_dirty total: 1198 0.0149 0 0 0 0 455 0.0217 */ + 195 0.0022 0 0 0 0 36 0.0016 :{ /* test_clear_page_dirty total: 1850 0.0208 0 0 0 0 753 0.0342 */ : struct address_space *mapping = page_mapping(page); : unsigned long flags; : - 44 5.5e-04 0 0 0 0 7 3.3e-04 : if (mapping) { - 466 0.0058 0 0 0 0 185 0.0088 : write_lock_irqsave(&mapping->tree_lock, flags); - 10 1.2e-04 0 0 0 0 6 2.9e-04 : if (TestClearPageDirty(page)) { - 34 4.2e-04 0 0 0 0 4 1.9e-04 : radix_tree_tag_clear(&mapping->page_tree, + 50 5.6e-04 0 0 0 0 22 1.0e-03 : if (mapping) { + 637 0.0072 0 0 0 0 282 0.0128 : write_lock_irqsave(&mapping->tree_lock, flags); + 14 1.6e-04 0 0 0 0 3 1.4e-04 : if (TestClearPageDirty(page)) { + 25 2.8e-04 0 0 0 0 3 1.4e-04 : radix_tree_tag_clear(&mapping->page_tree, : page_index(page), : PAGECACHE_TAG_DIRTY); - 25 3.1e-04 0 0 0 0 20 9.6e-04 : write_unlock_irqrestore(&mapping->tree_lock, flags); - 41 5.1e-04 0 0 0 0 20 9.6e-04 : if (mapping_cap_account_dirty(mapping)) - 5 6.2e-05 0 0 0 0 0 0 : dec_page_state(nr_dirty); + 26 2.9e-04 0 0 0 0 16 7.3e-04 : write_unlock_irqrestore(&mapping->tree_lock, flags); + 64 7.2e-04 0 0 0 0 38 0.0017 : if (mapping_cap_account_dirty(mapping)) + 6 6.7e-05 0 0 0 0 1 4.5e-05 : dec_page_state(nr_dirty); : return 1; : } - 213 0.0027 0 0 0 0 115 0.0055 : write_unlock_irqrestore(&mapping->tree_lock, flags); + 238 0.0027 0 0 0 0 233 0.0106 : write_unlock_irqrestore(&mapping->tree_lock, flags); : return 0; : } : return TestClearPageDirty(page); - 90 0.0011 0 0 0 0 18 8.6e-04 :} + 334 0.0038 0 0 0 0 54 0.0025 :} :EXPORT_SYMBOL(test_clear_page_dirty); : :/* @@ -744,67 +744,67 @@ : * unfortunate, but it only exists while the page is locked. : */ :int clear_page_dirty_for_io(struct page *page) - 7 8.7e-05 0 0 0 0 1 4.8e-05 :{ /* clear_page_dirty_for_io total: 178 0.0022 0 0 0 0 31 0.0015 */ + 30 3.4e-04 0 0 0 0 4 1.8e-04 :{ /* clear_page_dirty_for_io total: 191 0.0021 0 0 0 0 49 0.0022 */ : struct address_space *mapping = page_mapping(page); : : if (mapping) { - 8 1.0e-04 0 0 0 0 0 0 : if (TestClearPageDirty(page)) { - 22 2.7e-04 0 0 0 0 4 1.9e-04 : if (mapping_cap_account_dirty(mapping)) - 23 2.9e-04 0 0 0 0 10 4.8e-04 : dec_page_state(nr_dirty); + 16 1.8e-04 0 0 0 0 3 1.4e-04 : if (TestClearPageDirty(page)) { + 24 2.7e-04 0 0 0 0 13 5.9e-04 : if (mapping_cap_account_dirty(mapping)) + 29 3.3e-04 0 0 0 0 12 5.4e-04 : dec_page_state(nr_dirty); : return 1; : } : return 0; : } : return TestClearPageDirty(page); - 66 8.2e-04 0 0 0 0 3 1.4e-04 :} + 26 2.9e-04 0 0 0 0 5 2.3e-04 :} :EXPORT_SYMBOL(clear_page_dirty_for_io); : :int test_clear_page_writeback(struct page *page) - 184 0.0023 0 0 0 0 21 0.0010 :{ /* test_clear_page_writeback total: 816 0.0102 0 0 1 0.0351 405 0.0193 */ + 322 0.0036 0 0 0 0 30 0.0014 :{ /* test_clear_page_writeback total: 975 0.0110 0 0 0 0 420 0.0191 */ : struct address_space *mapping = page_mapping(page); : int ret; : - 6 7.5e-05 0 0 0 0 4 1.9e-04 : if (mapping) { + 5 5.6e-05 0 0 0 0 2 9.1e-05 : if (mapping) { : unsigned long flags; : - 281 0.0035 0 0 0 0 119 0.0057 : write_lock_irqsave(&mapping->tree_lock, flags); - 10 1.2e-04 0 0 0 0 1 4.8e-05 : ret = TestClearPageWriteback(page); + 252 0.0028 0 0 0 0 119 0.0054 : write_lock_irqsave(&mapping->tree_lock, flags); + 16 1.8e-04 0 0 0 0 2 9.1e-05 : ret = TestClearPageWriteback(page); : if (ret) - 4 5.0e-05 0 0 0 0 2 9.6e-05 : radix_tree_tag_clear(&mapping->page_tree, + 4 4.5e-05 0 0 0 0 1 4.5e-05 : radix_tree_tag_clear(&mapping->page_tree, : page_index(page), : PAGECACHE_TAG_WRITEBACK); - 164 0.0020 0 0 1 0.0351 182 0.0087 : write_unlock_irqrestore(&mapping->tree_lock, flags); + 193 0.0022 0 0 0 0 195 0.0089 : write_unlock_irqrestore(&mapping->tree_lock, flags); : } else { : ret = TestClearPageWriteback(page); : } : return ret; - 31 3.9e-04 0 0 0 0 24 0.0011 :} + 43 4.8e-04 0 0 0 0 34 0.0015 :} : :int test_set_page_writeback(struct page *page) - 55 6.9e-04 0 0 0 0 60 0.0029 :{ /* test_set_page_writeback total: 915 0.0114 0 0 0 0 514 0.0246 */ + 56 6.3e-04 0 0 0 0 63 0.0029 :{ /* test_set_page_writeback total: 848 0.0095 0 0 0 0 523 0.0238 */ : struct address_space *mapping = page_mapping(page); : int ret; : - 8 1.0e-04 0 0 0 0 7 3.3e-04 : if (mapping) { + 6 6.7e-05 0 0 0 0 7 3.2e-04 : if (mapping) { : unsigned long flags; : - 448 0.0056 0 0 0 0 172 0.0082 : write_lock_irqsave(&mapping->tree_lock, flags); - 10 1.2e-04 0 0 0 0 3 1.4e-04 : ret = TestSetPageWriteback(page); + 310 0.0035 0 0 0 0 110 0.0050 : write_lock_irqsave(&mapping->tree_lock, flags); + 19 2.1e-04 0 0 0 0 3 1.4e-04 : ret = TestSetPageWriteback(page); : if (!ret) - 9 1.1e-04 0 0 0 0 1 4.8e-05 : radix_tree_tag_set(&mapping->page_tree, + 12 1.3e-04 0 0 0 0 1 4.5e-05 : radix_tree_tag_set(&mapping->page_tree, : page_index(page), : PAGECACHE_TAG_WRITEBACK); - 114 0.0014 0 0 0 0 35 0.0017 : if (!PageDirty(page)) - 8 1.0e-04 0 0 0 0 4 1.9e-04 : radix_tree_tag_clear(&mapping->page_tree, + 61 6.9e-04 0 0 0 0 26 0.0012 : if (!PageDirty(page)) + 6 6.7e-05 0 0 0 0 7 3.2e-04 : radix_tree_tag_clear(&mapping->page_tree, : page_index(page), : PAGECACHE_TAG_DIRTY); - 123 0.0015 0 0 0 0 188 0.0090 : write_unlock_irqrestore(&mapping->tree_lock, flags); + 175 0.0020 0 0 0 0 232 0.0105 : write_unlock_irqrestore(&mapping->tree_lock, flags); : } else { : ret = TestSetPageWriteback(page); : } : return ret; : - 25 3.1e-04 0 0 0 0 15 7.2e-04 :} + 34 3.8e-04 0 0 0 0 17 7.7e-04 :} :EXPORT_SYMBOL(test_set_page_writeback); : :/* @@ -812,25 +812,25 @@ : * passed tag. : */ :int mapping_tagged(struct address_space *mapping, int tag) - 37 4.6e-04 0 0 0 0 13 6.2e-04 :{ /* mapping_tagged total: 312 0.0039 0 0 0 0 193 0.0092 */ + 36 4.0e-04 0 0 0 0 13 5.9e-04 :{ /* mapping_tagged total: 278 0.0031 0 0 0 0 183 0.0083 */ : unsigned long flags; : int ret; : : read_lock_irqsave(&mapping->tree_lock, flags); - 111 0.0014 0 0 0 0 44 0.0021 : ret = radix_tree_tagged(&mapping->page_tree, tag); + 112 0.0013 0 0 0 0 60 0.0027 : ret = radix_tree_tagged(&mapping->page_tree, tag); : read_unlock_irqrestore(&mapping->tree_lock, flags); : return ret; - 164 0.0020 0 0 0 0 136 0.0065 :} + 130 0.0015 0 0 0 0 110 0.0050 :} :EXPORT_SYMBOL(mapping_tagged); /* * Total samples for file : "mm/page-writeback.c" * - * 4935 0.0615 0 0 2 0.0702 2125 0.1015 + * 5964 0.0671 0 0 2 0.0679 2491 0.1131 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/pdflush.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/pdflush.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/pdflush.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/pdflush.c 2006-03-12 07:20:06.000000000 -0500 @@ -169,7 +169,7 @@ : * been observed. This is just paranoia). : */ :static int pdflush(void *dummy) - :{ /* pdflush total: 11 1.4e-04 0 0 0 0 0 0 */ + :{ /* pdflush total: 8 9.0e-05 0 0 0 0 0 0 */ : struct pdflush_work my_work; : cpumask_t cpus_allowed; : @@ -198,16 +198,16 @@ : * payload to it. : */ :int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0) - :{ /* pdflush_operation total: 5 6.2e-05 0 0 0 0 0 0 */ + :{ /* pdflush_operation total: 9 1.0e-04 0 0 0 0 1 4.5e-05 */ : unsigned long flags; : int ret = 0; : - 1 1.2e-05 0 0 0 0 0 0 : if (fn == NULL) + 2 2.2e-05 0 0 0 0 0 0 : if (fn == NULL) : BUG(); /* Hard to diagnose if it's deferred */ : : spin_lock_irqsave(&pdflush_lock, flags); - : if (list_empty(&pdflush_list)) { - : spin_unlock_irqrestore(&pdflush_lock, flags); + 1 1.1e-05 0 0 0 0 0 0 : if (list_empty(&pdflush_list)) { + 2 2.2e-05 0 0 0 0 1 4.5e-05 : spin_unlock_irqrestore(&pdflush_lock, flags); : ret = -1; : } else { : struct pdflush_work *pdf; @@ -242,12 +242,12 @@ /* * Total samples for file : "mm/pdflush.c" * - * 1 1.2e-05 0 0 0 0 0 0 + * 5 5.6e-05 0 0 0 0 1 4.5e-05 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/prio_tree.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/prio_tree.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/prio_tree.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/prio_tree.c 2006-03-12 07:20:05.000000000 -0500 @@ -73,60 +73,60 @@ : * Note that it just happens to work correctly on i_mmap_nonlinear too. : */ :void vma_prio_tree_add(struct vm_area_struct *vma, struct vm_area_struct *old) - 54 6.7e-04 0 0 0 0 14 6.7e-04 :{ + 36 4.0e-04 0 0 0 0 14 6.4e-04 :{ : /* Leave these BUG_ONs till prio_tree patch stabilizes */ - 431 0.0054 0 0 0 0 94 0.0045 : BUG_ON(RADIX_INDEX(vma) != RADIX_INDEX(old)); /* vma_prio_tree_add total: 2947 0.0367 0 0 0 0 471 0.0225 */ - 1327 0.0165 0 0 0 0 104 0.0050 : BUG_ON(HEAP_INDEX(vma) != HEAP_INDEX(old)); + 472 0.0053 0 0 0 0 82 0.0037 : BUG_ON(RADIX_INDEX(vma) != RADIX_INDEX(old)); /* vma_prio_tree_add total: 3084 0.0347 0 0 0 0 465 0.0211 */ + 1437 0.0162 0 0 0 0 116 0.0053 : BUG_ON(HEAP_INDEX(vma) != HEAP_INDEX(old)); : - 0 0 0 0 0 0 1 4.8e-05 : vma->shared.vm_set.head = NULL; - 127 0.0016 0 0 0 0 9 4.3e-04 : vma->shared.vm_set.parent = NULL; + 1 1.1e-05 0 0 0 0 0 0 : vma->shared.vm_set.head = NULL; + 120 0.0013 0 0 0 0 9 4.1e-04 : vma->shared.vm_set.parent = NULL; : - 190 0.0024 0 0 0 0 43 0.0021 : if (!old->shared.vm_set.parent) - 208 0.0026 0 0 0 0 25 0.0012 : list_add(&vma->shared.vm_set.list, + 214 0.0024 0 0 0 0 46 0.0021 : if (!old->shared.vm_set.parent) + 287 0.0032 0 0 0 0 35 0.0016 : list_add(&vma->shared.vm_set.list, : &old->shared.vm_set.list); - 184 0.0023 0 0 0 0 65 0.0031 : else if (old->shared.vm_set.head) - 14 1.7e-04 0 0 0 0 9 4.3e-04 : list_add_tail(&vma->shared.vm_set.list, + 169 0.0019 0 0 0 0 68 0.0031 : else if (old->shared.vm_set.head) + 8 9.0e-05 0 0 0 0 6 2.7e-04 : list_add_tail(&vma->shared.vm_set.list, : &old->shared.vm_set.head->shared.vm_set.list); : else { : INIT_LIST_HEAD(&vma->shared.vm_set.list); - : vma->shared.vm_set.head = old; + 1 1.1e-05 0 0 0 0 1 4.5e-05 : vma->shared.vm_set.head = old; : old->shared.vm_set.head = vma; : } - 235 0.0029 0 0 0 0 42 0.0020 :} + 217 0.0024 0 0 0 0 34 0.0015 :} : :void vma_prio_tree_insert(struct vm_area_struct *vma, : struct prio_tree_root *root) - 541 0.0067 0 0 0 0 302 0.0144 :{ /* vma_prio_tree_insert total: 2191 0.0273 0 0 0 0 883 0.0422 */ + 595 0.0067 0 0 0 0 280 0.0127 :{ /* vma_prio_tree_insert total: 2038 0.0229 0 0 1 0.0339 827 0.0376 */ : struct prio_tree_node *ptr; : struct vm_area_struct *old; : - 27 3.4e-04 0 0 0 0 20 9.6e-04 : vma->shared.vm_set.head = NULL; + 35 3.9e-04 0 0 0 0 20 9.1e-04 : vma->shared.vm_set.head = NULL; : - 67 8.3e-04 0 0 0 0 33 0.0016 : ptr = raw_prio_tree_insert(root, &vma->shared.prio_tree_node); - 1246 0.0155 0 0 0 0 352 0.0168 : if (ptr != (struct prio_tree_node *) &vma->shared.prio_tree_node) { + 61 6.9e-04 0 0 0 0 38 0.0017 : ptr = raw_prio_tree_insert(root, &vma->shared.prio_tree_node); + 1049 0.0118 0 0 0 0 318 0.0144 : if (ptr != (struct prio_tree_node *) &vma->shared.prio_tree_node) { : old = prio_tree_entry(ptr, struct vm_area_struct, : shared.prio_tree_node); - 141 0.0018 0 0 0 0 55 0.0026 : vma_prio_tree_add(vma, old); + 147 0.0017 0 0 0 0 47 0.0021 : vma_prio_tree_add(vma, old); : } - 169 0.0021 0 0 0 0 121 0.0058 :} + 151 0.0017 0 0 1 0.0339 124 0.0056 :} : :void vma_prio_tree_remove(struct vm_area_struct *vma, : struct prio_tree_root *root) - 2348 0.0293 0 0 0 0 993 0.0474 :{ /* vma_prio_tree_remove total: 4640 0.0578 0 0 0 0 1436 0.0686 */ + 2888 0.0325 0 0 0 0 949 0.0431 :{ /* vma_prio_tree_remove total: 5400 0.0607 0 0 0 0 1401 0.0636 */ : struct vm_area_struct *node, *head, *new_head; : - 119 0.0015 0 0 0 0 29 0.0014 : if (!vma->shared.vm_set.head) { - 444 0.0055 0 0 0 0 88 0.0042 : if (!vma->shared.vm_set.parent) + 161 0.0018 0 0 0 0 33 0.0015 : if (!vma->shared.vm_set.head) { + 535 0.0060 0 0 0 0 77 0.0035 : if (!vma->shared.vm_set.parent) : list_del_init(&vma->shared.vm_set.list); : else - 201 0.0025 0 0 0 0 41 0.0020 : raw_prio_tree_remove(root, &vma->shared.prio_tree_node); + 196 0.0022 0 0 0 0 36 0.0016 : raw_prio_tree_remove(root, &vma->shared.prio_tree_node); : } else { : /* Leave this BUG_ON till prio_tree patch stabilizes */ - 75 9.3e-04 0 0 0 0 12 5.7e-04 : BUG_ON(vma->shared.vm_set.head->shared.vm_set.head != vma); - 21 2.6e-04 0 0 0 0 5 2.4e-04 : if (vma->shared.vm_set.parent) { + 105 0.0012 0 0 0 0 5 2.3e-04 : BUG_ON(vma->shared.vm_set.head->shared.vm_set.head != vma); + 51 5.7e-04 0 0 0 0 4 1.8e-04 : if (vma->shared.vm_set.parent) { : head = vma->shared.vm_set.head; - 7 8.7e-05 0 0 0 0 1 4.8e-05 : if (!list_empty(&head->shared.vm_set.list)) { - : new_head = list_entry( + 21 2.4e-04 0 0 0 0 1 4.5e-05 : if (!list_empty(&head->shared.vm_set.list)) { + 4 4.5e-05 0 0 0 0 0 0 : new_head = list_entry( : head->shared.vm_set.list.next, : struct vm_area_struct, : shared.vm_set.list); @@ -134,16 +134,16 @@ : } else : new_head = NULL; : - 4 5.0e-05 0 0 0 0 0 0 : raw_prio_tree_replace(root, &vma->shared.prio_tree_node, + 14 1.6e-04 0 0 0 0 0 0 : raw_prio_tree_replace(root, &vma->shared.prio_tree_node, : &head->shared.prio_tree_node); - : head->shared.vm_set.head = new_head; + 4 4.5e-05 0 0 0 0 0 0 : head->shared.vm_set.head = new_head; : if (new_head) - : new_head->shared.vm_set.head = head; + 1 1.1e-05 0 0 0 0 0 0 : new_head->shared.vm_set.head = head; : : } else { : node = vma->shared.vm_set.head; - 23 2.9e-04 0 0 0 0 5 2.4e-04 : if (!list_empty(&vma->shared.vm_set.list)) { - 2 2.5e-05 0 0 0 0 0 0 : new_head = list_entry( + 22 2.5e-04 0 0 0 0 6 2.7e-04 : if (!list_empty(&vma->shared.vm_set.list)) { + 1 1.1e-05 0 0 0 0 0 0 : new_head = list_entry( : vma->shared.vm_set.list.next, : struct vm_area_struct, : shared.vm_set.list); @@ -151,10 +151,10 @@ : node->shared.vm_set.head = new_head; : new_head->shared.vm_set.head = node; : } else - 1 1.2e-05 0 0 0 0 0 0 : node->shared.vm_set.head = NULL; + : node->shared.vm_set.head = NULL; : } : } - 535 0.0067 0 0 0 0 138 0.0066 :} + 560 0.0063 0 0 0 0 150 0.0068 :} : :/* : * Helper function to enumerate vmas that map a given file page or a set of @@ -163,11 +163,11 @@ : */ :struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma, : struct prio_tree_iter *iter) - :{ + 15 1.7e-04 0 0 0 0 16 7.3e-04 :{ /* vma_prio_tree_next total: 260 0.0029 0 0 0 0 117 0.0053 */ : struct prio_tree_node *ptr; : struct vm_area_struct *next; : - : if (!vma) { + 38 4.3e-04 0 0 0 0 11 5.0e-04 : if (!vma) { : /* : * First call is with NULL vma : */ @@ -181,39 +181,39 @@ : return NULL; : } : - : if (vma->shared.vm_set.parent) { - : if (vma->shared.vm_set.head) { + 12 1.3e-04 0 0 0 0 7 3.2e-04 : if (vma->shared.vm_set.parent) { + 5 5.6e-05 0 0 0 0 0 0 : if (vma->shared.vm_set.head) { : next = vma->shared.vm_set.head; : prefetch(next->shared.vm_set.list.next); : return next; : } : } else { - : next = list_entry(vma->shared.vm_set.list.next, + 43 4.8e-04 0 0 0 0 11 5.0e-04 : next = list_entry(vma->shared.vm_set.list.next, : struct vm_area_struct, shared.vm_set.list); - : if (!next->shared.vm_set.head) { + 40 4.5e-04 0 0 0 0 10 4.5e-04 : if (!next->shared.vm_set.head) { : prefetch(next->shared.vm_set.list.next); : return next; : } : } : - : ptr = prio_tree_next(iter); - : if (ptr) { + 1 1.1e-05 0 0 0 0 2 9.1e-05 : ptr = prio_tree_next(iter); + 2 2.2e-05 0 0 0 0 0 0 : if (ptr) { : next = prio_tree_entry(ptr, struct vm_area_struct, : shared.prio_tree_node); : prefetch(next->shared.vm_set.head); : return next; : } else : return NULL; - :} + 76 8.5e-04 0 0 0 0 43 0.0020 :} /* * Total samples for file : "mm/prio_tree.c" * - * 8741 0.1089 0 0 0 0 2601 0.1243 + * 9795 0.1102 0 0 1 0.0339 2599 0.1180 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/readahead.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/readahead.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/readahead.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/readahead.c 2006-03-12 07:20:05.000000000 -0500 @@ -34,10 +34,10 @@ : */ :void :file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) - 1340 0.0167 0 0 0 0 274 0.0131 :{ - 168 0.0021 0 0 0 0 121 0.0058 : ra->ra_pages = mapping->backing_dev_info->ra_pages; /* file_ra_state_init total: 1618 0.0202 0 0 0 0 503 0.0240 */ - 26 3.2e-04 0 0 0 0 50 0.0024 : ra->prev_page = -1; - 84 0.0010 0 0 0 0 58 0.0028 :} + 1518 0.0171 0 0 0 0 334 0.0152 :{ + 203 0.0023 0 0 0 0 113 0.0051 : ra->ra_pages = mapping->backing_dev_info->ra_pages; /* file_ra_state_init total: 1834 0.0206 0 0 0 0 543 0.0247 */ + 33 3.7e-04 0 0 0 0 49 0.0022 : ra->prev_page = -1; + 80 9.0e-04 0 0 0 0 47 0.0021 :} : :/* : * Return max readahead size for this inode in number-of-pages. @@ -90,14 +90,14 @@ :{ : unsigned long max = get_max_readahead(ra); : unsigned long min = get_min_readahead(ra); - 1 1.2e-05 0 0 0 0 0 0 : unsigned long cur = ra->size; + 1 1.1e-05 0 0 0 0 0 0 : unsigned long cur = ra->size; : unsigned long newsize; : : if (ra->flags & RA_FLAG_MISS) { : ra->flags &= ~RA_FLAG_MISS; : newsize = max((cur - 2), min); - 2 2.5e-05 0 0 0 0 0 0 : } else if (cur < max / 16) { - 12 1.5e-04 0 0 0 0 1 4.8e-05 : newsize = 4 * cur; + : } else if (cur < max / 16) { + 15 1.7e-04 0 0 0 0 2 9.1e-05 : newsize = 4 * cur; : } else { : newsize = 2 * cur; : } @@ -258,56 +258,56 @@ :static int :__do_page_cache_readahead(struct address_space *mapping, struct file *filp, : pgoff_t offset, unsigned long nr_to_read) - 899 0.0112 0 0 0 0 364 0.0174 :{ /* __do_page_cache_readahead total: 10818 0.1348 0 0 1 0.0351 2853 0.1363 */ - 3 3.7e-05 0 0 0 0 0 0 : struct inode *inode = mapping->host; + 917 0.0103 0 0 1 0.0339 342 0.0155 :{ /* __do_page_cache_readahead total: 11295 0.1270 0 0 2 0.0679 2895 0.1315 */ + 3 3.4e-05 0 0 0 0 1 4.5e-05 : struct inode *inode = mapping->host; : struct page *page; : unsigned long end_index; /* The last page we want to read */ - 24 3.0e-04 0 0 0 0 21 0.0010 : LIST_HEAD(page_pool); + 37 4.2e-04 0 0 1 0.0339 12 5.4e-04 : LIST_HEAD(page_pool); : int page_idx; : int ret = 0; : loff_t isize = i_size_read(inode); : - 102 0.0013 0 0 0 0 32 0.0015 : if (isize == 0) + 100 0.0011 0 0 0 0 46 0.0021 : if (isize == 0) : goto out; : - 32 4.0e-04 0 0 0 0 14 6.7e-04 : end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); + 20 2.2e-04 0 0 0 0 16 7.3e-04 : end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); : : /* : * Preallocate as many pages as we will need. : */ - 1336 0.0166 0 0 0 0 522 0.0249 : read_lock_irq(&mapping->tree_lock); - 2038 0.0254 0 0 0 0 343 0.0164 : for (page_idx = 0; page_idx < nr_to_read; page_idx++) { - 1257 0.0157 0 0 0 0 196 0.0094 : pgoff_t page_offset = offset + page_idx; + 1409 0.0158 0 0 0 0 485 0.0220 : read_lock_irq(&mapping->tree_lock); + 2153 0.0242 0 0 0 0 363 0.0165 : for (page_idx = 0; page_idx < nr_to_read; page_idx++) { + 1073 0.0121 0 0 0 0 210 0.0095 : pgoff_t page_offset = offset + page_idx; : - 211 0.0026 0 0 1 0.0351 78 0.0037 : if (page_offset > end_index) + 148 0.0017 0 0 0 0 75 0.0034 : if (page_offset > end_index) : break; : - 1772 0.0221 0 0 0 0 323 0.0154 : page = radix_tree_lookup(&mapping->page_tree, page_offset); - 269 0.0034 0 0 0 0 120 0.0057 : if (page) + 1991 0.0224 0 0 0 0 350 0.0159 : page = radix_tree_lookup(&mapping->page_tree, page_offset); + 284 0.0032 0 0 0 0 120 0.0054 : if (page) : continue; : - 10 1.2e-04 0 0 0 0 3 1.4e-04 : read_unlock_irq(&mapping->tree_lock); + 13 1.5e-04 0 0 0 0 3 1.4e-04 : read_unlock_irq(&mapping->tree_lock); : page = page_cache_alloc_cold(mapping); - 1 1.2e-05 0 0 0 0 0 0 : read_lock_irq(&mapping->tree_lock); - 15 1.9e-04 0 0 0 0 18 8.6e-04 : if (!page) + : read_lock_irq(&mapping->tree_lock); + 30 3.4e-04 0 0 0 0 38 0.0017 : if (!page) : break; : page->index = page_offset; - 3 3.7e-05 0 0 0 0 0 0 : list_add(&page->lru, &page_pool); - 5 6.2e-05 0 0 0 0 1 4.8e-05 : ret++; + 5 5.6e-05 0 0 0 0 3 1.4e-04 : list_add(&page->lru, &page_pool); + 1 1.1e-05 0 0 0 0 0 0 : ret++; : } - 1006 0.0125 0 0 0 0 341 0.0163 : read_unlock_irq(&mapping->tree_lock); + 970 0.0109 0 0 0 0 295 0.0134 : read_unlock_irq(&mapping->tree_lock); : : /* : * Now start the IO. We ignore I/O errors - if the page is not : * uptodate then the caller will launch readpage again, and : * will then handle the error. : */ - 147 0.0018 0 0 0 0 122 0.0058 : if (ret) + 161 0.0018 0 0 0 0 152 0.0069 : if (ret) : read_pages(mapping, filp, &page_pool, ret); - 843 0.0105 0 0 0 0 112 0.0054 : BUG_ON(!list_empty(&page_pool)); + 1066 0.0120 0 0 0 0 130 0.0059 : BUG_ON(!list_empty(&page_pool)); :out: : return ret; - 533 0.0066 0 0 0 0 158 0.0075 :} + 518 0.0058 0 0 0 0 141 0.0064 :} : :/* : * Chunk the readahead into 2 megabyte units, so that we don't pin too much @@ -372,12 +372,12 @@ : */ :int do_page_cache_readahead(struct address_space *mapping, struct file *filp, : pgoff_t offset, unsigned long nr_to_read) - 1 1.2e-05 0 0 0 0 0 0 :{ /* do_page_cache_readahead total: 1 1.2e-05 0 0 0 0 0 0 */ - : if (bdi_read_congested(mapping->backing_dev_info)) + 8 9.0e-05 0 0 0 0 0 0 :{ /* do_page_cache_readahead total: 12 1.3e-04 0 0 0 0 0 0 */ + 2 2.2e-05 0 0 0 0 0 0 : if (bdi_read_congested(mapping->backing_dev_info)) : return -1; : : return __do_page_cache_readahead(mapping, filp, offset, nr_to_read); - :} + 1 1.1e-05 0 0 0 0 0 0 :} : :/* : * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block' @@ -390,30 +390,30 @@ :blockable_page_cache_readahead(struct address_space *mapping, struct file *filp, : pgoff_t offset, unsigned long nr_to_read, : struct file_ra_state *ra, int block) - 203 0.0025 0 0 0 0 97 0.0046 :{ /* blockable_page_cache_readahead total: 4583 0.0571 0 0 1 0.0351 902 0.0431 */ + 194 0.0022 0 0 0 0 88 0.0040 :{ /* blockable_page_cache_readahead total: 4386 0.0493 0 0 0 0 932 0.0423 */ : int actual; : - 1025 0.0128 0 0 0 0 414 0.0198 : if (!block && bdi_read_congested(mapping->backing_dev_info)) + 1240 0.0139 0 0 0 0 390 0.0177 : if (!block && bdi_read_congested(mapping->backing_dev_info)) : return 0; : - 308 0.0038 0 0 0 0 75 0.0036 : actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read); + 303 0.0034 0 0 0 0 102 0.0046 : actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read); : : return check_ra_success(ra, nr_to_read, actual); - 948 0.0118 0 0 1 0.0351 126 0.0060 :} + 909 0.0102 0 0 0 0 149 0.0068 :} : :static int make_ahead_window(struct address_space *mapping, struct file *filp, : struct file_ra_state *ra, int force) - 13 1.6e-04 0 0 0 0 6 2.9e-04 :{ /* make_ahead_window total: 44 5.5e-04 0 0 0 0 11 5.3e-04 */ + 18 2.0e-04 0 0 0 0 3 1.4e-04 :{ /* make_ahead_window total: 47 5.3e-04 0 0 0 0 7 3.2e-04 */ : int block, ret; : - 1 1.2e-05 0 0 0 0 0 0 : ra->ahead_size = get_next_ra_size(ra); - 2 2.5e-05 0 0 0 0 0 0 : ra->ahead_start = ra->start + ra->size; + 1 1.1e-05 0 0 0 0 0 0 : ra->ahead_size = get_next_ra_size(ra); + : ra->ahead_start = ra->start + ra->size; : : block = force || (ra->prev_page >= ra->ahead_start); - 5 6.2e-05 0 0 0 0 2 9.6e-05 : ret = blockable_page_cache_readahead(mapping, filp, + 0 0 0 0 0 0 1 4.5e-05 : ret = blockable_page_cache_readahead(mapping, filp, : ra->ahead_start, ra->ahead_size, ra, block); : - 5 6.2e-05 0 0 0 0 0 0 : if (!ret && !force) { + 3 3.4e-05 0 0 0 0 0 0 : if (!ret && !force) { : /* A read failure in blocking mode, implies pages are : * all cached. So we can safely assume we have taken : * care of all the pages requested in this call. @@ -431,7 +431,7 @@ : } : : return ret; - 3 3.7e-05 0 0 0 0 2 9.6e-05 :} + 9 1.0e-04 0 0 0 0 1 4.5e-05 :} : :/** : * page_cache_readahead - generic adaptive readahead @@ -454,7 +454,7 @@ :unsigned long :page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra, : struct file *filp, pgoff_t offset, unsigned long req_size) - 1538 0.0192 0 0 0 0 453 0.0216 :{ /* page_cache_readahead total: 4746 0.0591 0 0 0 0 1831 0.0875 */ + 1697 0.0191 0 0 0 0 494 0.0224 :{ /* page_cache_readahead total: 5150 0.0579 0 0 0 0 1803 0.0819 */ : unsigned long max, newsize; : int sequential; : @@ -462,31 +462,31 @@ : * We avoid doing extra work and bogusly perturbing the readahead : * window expansion logic. : */ - 70 8.7e-04 0 0 0 0 29 0.0014 : if (offset == ra->prev_page && --req_size) - 48 6.0e-04 0 0 0 0 44 0.0021 : ++offset; + 65 7.3e-04 0 0 0 0 24 0.0011 : if (offset == ra->prev_page && --req_size) + 33 3.7e-04 0 0 0 0 38 0.0017 : ++offset; : : /* Note that prev_page == -1 if it is a first read */ - 101 0.0013 0 0 0 0 54 0.0026 : sequential = (offset == ra->prev_page + 1); - 3 3.7e-05 0 0 0 0 0 0 : ra->prev_page = offset; + 103 0.0012 0 0 0 0 35 0.0016 : sequential = (offset == ra->prev_page + 1); + 1 1.1e-05 0 0 0 0 0 0 : ra->prev_page = offset; : : max = get_max_readahead(ra); - 42 5.2e-04 0 0 0 0 27 0.0013 : newsize = min(req_size, max); + 30 3.4e-04 0 0 0 0 26 0.0012 : newsize = min(req_size, max); : : /* No readahead or sub-page sized read or file already in cache */ - 603 0.0075 0 0 0 0 246 0.0118 : if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE)) + 694 0.0078 0 0 0 0 263 0.0119 : if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE)) : goto out; : - 80 1.0e-03 0 0 0 0 74 0.0035 : ra->prev_page += newsize - 1; + 90 0.0010 0 0 0 0 68 0.0031 : ra->prev_page += newsize - 1; : : /* : * Special case - first read at start of file. We'll assume it's : * a whole-file read and grow the window fast. Or detect first : * sequential access : */ - 54 6.7e-04 0 0 0 0 58 0.0028 : if (sequential && ra->size == 0) { - : ra->size = get_init_ra_size(newsize, max); - 19 2.4e-04 0 0 0 0 38 0.0018 : ra->start = offset; - 213 0.0027 0 0 0 0 86 0.0041 : if (!blockable_page_cache_readahead(mapping, filp, offset, + 50 5.6e-04 0 0 0 0 42 0.0019 : if (sequential && ra->size == 0) { + 3 3.4e-05 0 0 0 0 0 0 : ra->size = get_init_ra_size(newsize, max); + 27 3.0e-04 0 0 0 0 32 0.0015 : ra->start = offset; + 203 0.0023 0 0 0 0 89 0.0040 : if (!blockable_page_cache_readahead(mapping, filp, offset, : ra->size, ra, 1)) : goto out; : @@ -498,8 +498,8 @@ : * IOs,* thus preventing stalls. so issue the ahead window : * immediately. : */ - 455 0.0057 0 0 0 0 186 0.0089 : if (req_size >= max) - 47 5.9e-04 0 0 0 0 11 5.3e-04 : make_ahead_window(mapping, filp, ra, 1); + 508 0.0057 0 0 0 0 187 0.0085 : if (req_size >= max) + 49 5.5e-04 0 0 0 0 14 6.4e-04 : make_ahead_window(mapping, filp, ra, 1); : : goto out; : } @@ -511,7 +511,7 @@ : */ : if (!sequential) { : ra_off(ra); - 16 2.0e-04 0 0 0 0 2 9.6e-05 : blockable_page_cache_readahead(mapping, filp, offset, + 6 6.7e-05 0 0 0 0 5 2.3e-04 : blockable_page_cache_readahead(mapping, filp, offset, : newsize, ra, 1); : goto out; : } @@ -521,8 +521,8 @@ : * occurence (ie we have an existing window) : */ : - 27 3.4e-04 0 0 0 0 14 6.7e-04 : if (ra->ahead_start == 0) { /* no ahead window yet */ - 8 1.0e-04 0 0 0 0 4 1.9e-04 : if (!make_ahead_window(mapping, filp, ra, 0)) + 24 2.7e-04 0 0 0 0 15 6.8e-04 : if (ra->ahead_start == 0) { /* no ahead window yet */ + 2 2.2e-05 0 0 0 0 2 9.1e-05 : if (!make_ahead_window(mapping, filp, ra, 0)) : goto out; : } : /* @@ -532,15 +532,15 @@ : * we get called back on the first page of the ahead window which : * will allow us to submit more IO. : */ - 6 7.5e-05 0 0 0 0 4 1.9e-04 : if (ra->prev_page >= ra->ahead_start) { + 6 6.7e-05 0 0 0 0 5 2.3e-04 : if (ra->prev_page >= ra->ahead_start) { : ra->start = ra->ahead_start; - 1 1.2e-05 0 0 0 0 3 1.4e-04 : ra->size = ra->ahead_size; - 0 0 0 0 0 0 1 4.8e-05 : make_ahead_window(mapping, filp, ra, 0); + : ra->size = ra->ahead_size; + 1 1.1e-05 0 0 0 0 0 0 : make_ahead_window(mapping, filp, ra, 0); : } : - 148 0.0018 0 0 0 0 32 0.0015 :out: + 166 0.0019 0 0 0 0 33 0.0015 :out: : return ra->prev_page + 1; - 616 0.0077 0 0 0 0 187 0.0089 :} + 761 0.0086 0 0 0 0 159 0.0072 :} : :/* : * handle_ra_miss() is called when it is known that a page which should have @@ -564,23 +564,23 @@ : * sensible upper limit. : */ :unsigned long max_sane_readahead(unsigned long nr) - :{ /* max_sane_readahead total: 1 1.2e-05 0 0 0 0 0 0 */ + 7 7.9e-05 0 0 0 0 0 0 :{ /* max_sane_readahead total: 7 7.9e-05 0 0 0 0 0 0 */ : unsigned long active; : unsigned long inactive; : unsigned long free; : : __get_zone_counts(&active, &inactive, &free, NODE_DATA(numa_node_id())); : return min(nr, (inactive + free) / 2); - 1 1.2e-05 0 0 0 0 0 0 :} + :} /* * Total samples for file : "mm/readahead.c" * - * 18749 0.2336 0 0 2 0.0702 5547 0.2650 + * 19963 0.2245 0 0 2 0.0679 5592 0.2540 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/rmap.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/rmap.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/rmap.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/rmap.c 2006-03-12 07:20:05.000000000 -0500 @@ -81,102 +81,102 @@ : :/* This must be called under the mmap_sem. */ :int anon_vma_prepare(struct vm_area_struct *vma) - 18223 0.2271 0 0 3 0.1054 4929 0.2355 :{ /* anon_vma_prepare total: 29395 0.3663 0 0 3 0.1054 9486 0.4532 */ + 18713 0.2105 0 0 1 0.0339 5796 0.2632 :{ /* anon_vma_prepare total: 33543 0.3772 0 0 1 0.0339 8997 0.4086 */ : struct anon_vma *anon_vma = vma->anon_vma; : : might_sleep(); - 1202 0.0150 0 0 0 0 376 0.0180 : if (unlikely(!anon_vma)) { - 27 3.4e-04 0 0 0 0 11 5.3e-04 : struct mm_struct *mm = vma->vm_mm; + 3226 0.0363 0 0 0 0 825 0.0375 : if (unlikely(!anon_vma)) { + 6 6.7e-05 0 0 0 0 3 1.4e-04 : struct mm_struct *mm = vma->vm_mm; : struct anon_vma *allocated, *locked; : - 218 0.0027 0 0 0 0 65 0.0031 : anon_vma = find_mergeable_anon_vma(vma); - 1 1.2e-05 0 0 0 0 6 2.9e-04 : if (anon_vma) { + 334 0.0038 0 0 0 0 74 0.0034 : anon_vma = find_mergeable_anon_vma(vma); + 0 0 0 0 0 0 1 4.5e-05 : if (anon_vma) { : allocated = NULL; : locked = anon_vma; : spin_lock(&locked->lock); : } else { : anon_vma = anon_vma_alloc(); - 37 4.6e-04 0 0 0 0 9 4.3e-04 : if (unlikely(!anon_vma)) + 46 5.2e-04 0 0 0 0 3 1.4e-04 : if (unlikely(!anon_vma)) : return -ENOMEM; : allocated = anon_vma; : locked = NULL; : } : : /* page_table_lock to protect against threads */ - 3 3.7e-05 0 0 0 0 2 9.6e-05 : spin_lock(&mm->page_table_lock); - 302 0.0038 0 0 0 0 45 0.0021 : if (likely(!vma->anon_vma)) { + : spin_lock(&mm->page_table_lock); + 305 0.0034 0 0 0 0 23 0.0010 : if (likely(!vma->anon_vma)) { : vma->anon_vma = anon_vma; - 78 9.7e-04 0 0 0 0 6 2.9e-04 : list_add(&vma->anon_vma_node, &anon_vma->head); + 141 0.0016 0 0 0 0 9 4.1e-04 : list_add(&vma->anon_vma_node, &anon_vma->head); : allocated = NULL; : } : spin_unlock(&mm->page_table_lock); : - 20 2.5e-04 0 0 0 0 4 1.9e-04 : if (locked) + 10 1.1e-04 0 0 0 0 0 0 : if (locked) : spin_unlock(&locked->lock); - 24 3.0e-04 0 0 0 0 0 0 : if (unlikely(allocated)) + 43 4.8e-04 0 0 0 0 2 9.1e-05 : if (unlikely(allocated)) : anon_vma_free(allocated); : } : return 0; - 6957 0.0867 0 0 0 0 3209 0.1533 :} + 9325 0.1049 0 0 0 0 1751 0.0795 :} : :void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) :{ - 30 3.7e-04 0 0 0 0 2 9.6e-05 : BUG_ON(vma->anon_vma != next->anon_vma); /* __anon_vma_merge total: 39 4.9e-04 0 0 0 0 2 9.6e-05 */ - : list_del(&next->anon_vma_node); - :} + 31 3.5e-04 0 0 0 0 4 1.8e-04 : BUG_ON(vma->anon_vma != next->anon_vma); /* __anon_vma_merge total: 36 4.0e-04 0 0 0 0 5 2.3e-04 */ + 1 1.1e-05 0 0 0 0 0 0 : list_del(&next->anon_vma_node); + 1 1.1e-05 0 0 0 0 0 0 :} : :void __anon_vma_link(struct vm_area_struct *vma) - 588 0.0073 0 0 0 0 80 0.0038 :{ - 991 0.0123 0 0 0 0 614 0.0293 : struct anon_vma *anon_vma = vma->anon_vma; /* __anon_vma_link total: 1919 0.0239 0 0 0 0 772 0.0369 */ + 620 0.0070 0 0 0 0 110 0.0050 :{ + 1057 0.0119 0 0 0 0 643 0.0292 : struct anon_vma *anon_vma = vma->anon_vma; /* __anon_vma_link total: 2052 0.0231 0 0 0 0 851 0.0386 */ : - 46 5.7e-04 0 0 0 0 14 6.7e-04 : if (anon_vma) { - 33 4.1e-04 0 0 0 0 4 1.9e-04 : list_add(&vma->anon_vma_node, &anon_vma->head); + 37 4.2e-04 0 0 0 0 13 5.9e-04 : if (anon_vma) { + 29 3.3e-04 0 0 0 0 8 3.6e-04 : list_add(&vma->anon_vma_node, &anon_vma->head); : validate_anon_vma(vma); : } - 243 0.0030 0 0 0 0 56 0.0027 :} + 292 0.0033 0 0 0 0 69 0.0031 :} : :void anon_vma_link(struct vm_area_struct *vma) - 137 0.0017 0 0 0 0 19 9.1e-04 :{ /* anon_vma_link total: 434 0.0054 0 0 0 0 91 0.0043 */ - 7 8.7e-05 0 0 0 0 1 4.8e-05 : struct anon_vma *anon_vma = vma->anon_vma; + 143 0.0016 0 0 0 0 18 8.2e-04 :{ /* anon_vma_link total: 526 0.0059 0 0 0 0 134 0.0061 */ + 11 1.2e-04 0 0 0 0 0 0 : struct anon_vma *anon_vma = vma->anon_vma; : - 16 2.0e-04 0 0 0 0 12 5.7e-04 : if (anon_vma) { - 63 7.9e-04 0 0 0 0 20 9.6e-04 : spin_lock(&anon_vma->lock); - 9 1.1e-04 0 0 0 0 2 9.6e-05 : list_add(&vma->anon_vma_node, &anon_vma->head); + 22 2.5e-04 0 0 0 0 15 6.8e-04 : if (anon_vma) { + 88 9.9e-04 0 0 0 0 33 0.0015 : spin_lock(&anon_vma->lock); + 8 9.0e-05 0 0 0 0 4 1.8e-04 : list_add(&vma->anon_vma_node, &anon_vma->head); : validate_anon_vma(vma); : spin_unlock(&anon_vma->lock); : } - 96 0.0012 0 0 0 0 11 5.3e-04 :} + 89 0.0010 0 0 0 0 22 1.0e-03 :} : :void anon_vma_unlink(struct vm_area_struct *vma) - 567 0.0071 0 0 0 0 195 0.0093 :{ /* anon_vma_unlink total: 5045 0.0629 0 0 2 0.0702 1253 0.0599 */ - 110 0.0014 0 0 0 0 55 0.0026 : struct anon_vma *anon_vma = vma->anon_vma; + 644 0.0072 0 0 0 0 172 0.0078 :{ /* anon_vma_unlink total: 5589 0.0629 0 0 1 0.0339 1207 0.0548 */ + 117 0.0013 0 0 0 0 43 0.0020 : struct anon_vma *anon_vma = vma->anon_vma; : int empty; : - 189 0.0024 0 0 1 0.0351 93 0.0044 : if (!anon_vma) + 162 0.0018 0 0 0 0 95 0.0043 : if (!anon_vma) : return; : - 758 0.0094 0 0 0 0 236 0.0113 : spin_lock(&anon_vma->lock); + 828 0.0093 0 0 0 0 202 0.0092 : spin_lock(&anon_vma->lock); : validate_anon_vma(vma); - 242 0.0030 0 0 1 0.0351 107 0.0051 : list_del(&vma->anon_vma_node); + 272 0.0031 0 0 0 0 85 0.0039 : list_del(&vma->anon_vma_node); : : /* We must garbage collect the anon_vma if it's empty */ : empty = list_empty(&anon_vma->head); : spin_unlock(&anon_vma->lock); : - 461 0.0057 0 0 0 0 102 0.0049 : if (empty) + 571 0.0064 0 0 0 0 120 0.0054 : if (empty) : anon_vma_free(anon_vma); - 1642 0.0205 0 0 0 0 337 0.0161 :} + 1911 0.0215 0 0 1 0.0339 344 0.0156 :} : :static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags) - 4 5.0e-05 0 0 0 0 2 9.6e-05 :{ /* anon_vma_ctor total: 9 1.1e-04 0 0 0 0 5 2.4e-04 */ - 2 2.5e-05 0 0 0 0 2 9.6e-05 : if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == + 3 3.4e-05 0 0 0 0 2 9.1e-05 :{ /* anon_vma_ctor total: 9 1.0e-04 0 0 0 0 4 1.8e-04 */ + 1 1.1e-05 0 0 0 0 2 9.1e-05 : if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == : SLAB_CTOR_CONSTRUCTOR) { : struct anon_vma *anon_vma = data; : - 2 2.5e-05 0 0 0 0 0 0 : spin_lock_init(&anon_vma->lock); - 1 1.2e-05 0 0 0 0 0 0 : INIT_LIST_HEAD(&anon_vma->head); + 4 4.5e-05 0 0 0 0 0 0 : spin_lock_init(&anon_vma->lock); + 1 1.1e-05 0 0 0 0 0 0 : INIT_LIST_HEAD(&anon_vma->head); : } - 0 0 0 0 0 0 1 4.8e-05 :} + :} : :void __init anon_vma_init(void) :{ @@ -189,23 +189,23 @@ : * tricky: page_lock_anon_vma rely on RCU to guard against the races. : */ :static struct anon_vma *page_lock_anon_vma(struct page *page) - :{ + 170 0.0019 0 0 0 0 105 0.0048 :{ /* page_lock_anon_vma total: 2155 0.0242 0 0 0 0 1838 0.0835 */ : struct anon_vma *anon_vma = NULL; : unsigned long anon_mapping; : : rcu_read_lock(); - : anon_mapping = (unsigned long) page->mapping; + 67 7.5e-04 0 0 0 0 49 0.0022 : anon_mapping = (unsigned long) page->mapping; : if (!(anon_mapping & PAGE_MAPPING_ANON)) : goto out; : if (!page_mapped(page)) : goto out; : - : anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); - : spin_lock(&anon_vma->lock); + 66 7.4e-04 0 0 0 0 59 0.0027 : anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); + 1306 0.0147 0 0 0 0 1092 0.0496 : spin_lock(&anon_vma->lock); :out: : rcu_read_unlock(); : return anon_vma; - :} + 465 0.0052 0 0 0 0 448 0.0203 :} : :#ifdef CONFIG_MIGRATION :/* @@ -246,10 +246,10 @@ : pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); : unsigned long address; : - : address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); - : if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { + 7 7.9e-05 0 0 0 0 5 2.3e-04 : address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); + 203 0.0023 0 0 0 0 37 0.0017 : if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { : /* page should be within any vma from prio_tree_next */ - : BUG_ON(!PageAnon(page)); + 21 2.4e-04 0 0 0 0 5 2.3e-04 : BUG_ON(!PageAnon(page)); : return -EFAULT; : } : return address; @@ -281,7 +281,7 @@ : */ :pte_t *page_check_address(struct page *page, struct mm_struct *mm, : unsigned long address, spinlock_t **ptlp) - :{ /* page_check_address total: 1 1.2e-05 0 0 0 0 0 0 */ + 697 0.0078 0 0 0 0 393 0.0178 :{ /* page_check_address total: 24473 0.2752 0 0 4 0.1358 7677 0.3487 */ : pgd_t *pgd; : pud_t *pud; : pmd_t *pmd; @@ -289,33 +289,33 @@ : spinlock_t *ptl; : : pgd = pgd_offset(mm, address); - : if (!pgd_present(*pgd)) + 485 0.0055 0 0 0 0 91 0.0041 : if (!pgd_present(*pgd)) : return NULL; : : pud = pud_offset(pgd, address); - : if (!pud_present(*pud)) + 1404 0.0158 0 0 0 0 484 0.0220 : if (!pud_present(*pud)) : return NULL; : : pmd = pmd_offset(pud, address); - : if (!pmd_present(*pmd)) + 216 0.0024 0 0 0 0 75 0.0034 : if (!pmd_present(*pmd)) : return NULL; : - : pte = pte_offset_map(pmd, address); + 155 0.0017 0 0 0 0 109 0.0050 : pte = pte_offset_map(pmd, address); : /* Make a quick check before getting the lock */ - : if (!pte_present(*pte)) { + 147 0.0017 0 0 0 0 128 0.0058 : if (!pte_present(*pte)) { : pte_unmap(pte); : return NULL; : } : - 1 1.2e-05 0 0 0 0 0 0 : ptl = pte_lockptr(mm, pmd); - : spin_lock(ptl); - : if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { - : *ptlp = ptl; + 6797 0.0764 0 0 2 0.0679 3109 0.1412 : ptl = pte_lockptr(mm, pmd); + 1612 0.0181 0 0 0 0 442 0.0201 : spin_lock(ptl); + 7808 0.0878 0 0 1 0.0339 1883 0.0855 : if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { + 73 8.2e-04 0 0 0 0 13 5.9e-04 : *ptlp = ptl; : return pte; : } : pte_unmap_unlock(pte, ptl); : return NULL; - :} + 4550 0.0512 0 0 1 0.0339 865 0.0393 :} : :/* : * Subfunctions of page_referenced: page_referenced_one called @@ -323,35 +323,35 @@ : */ :static int page_referenced_one(struct page *page, : struct vm_area_struct *vma, unsigned int *mapcount) - 2 2.5e-05 0 0 0 0 0 0 :{ /* page_referenced_one total: 5 6.2e-05 0 0 0 0 4 1.9e-04 */ - 1 1.2e-05 0 0 0 0 3 1.4e-04 : struct mm_struct *mm = vma->vm_mm; + 543 0.0061 0 0 0 0 125 0.0057 :{ /* page_referenced_one total: 12703 0.1429 0 0 2 0.0679 4454 0.2023 */ + 3687 0.0415 0 0 0 0 1813 0.0823 : struct mm_struct *mm = vma->vm_mm; : unsigned long address; : pte_t *pte; : spinlock_t *ptl; : int referenced = 0; : : address = vma_address(page, vma); - : if (address == -EFAULT) + 53 6.0e-04 0 0 0 0 26 0.0012 : if (address == -EFAULT) : goto out; : - : pte = page_check_address(page, mm, address, &ptl); - : if (!pte) + 105 0.0012 0 0 0 0 35 0.0016 : pte = page_check_address(page, mm, address, &ptl); + 866 0.0097 0 0 1 0.0339 250 0.0114 : if (!pte) : goto out; : - 1 1.2e-05 0 0 0 0 0 0 : if (ptep_clear_flush_young(vma, address, pte)) + 3391 0.0381 0 0 1 0.0339 895 0.0406 : if (ptep_clear_flush_young(vma, address, pte)) : referenced++; : : /* Pretend the page is referenced if the task has the : swap token and is in the middle of a page fault. */ - : if (mm != current->mm && has_swap_token(mm) && + 193 0.0022 0 0 0 0 85 0.0039 : if (mm != current->mm && has_swap_token(mm) && : rwsem_is_locked(&mm->mmap_sem)) - : referenced++; + 19 2.1e-04 0 0 0 0 4 1.8e-04 : referenced++; : - : (*mapcount)--; + 1190 0.0134 0 0 0 0 162 0.0074 : (*mapcount)--; : pte_unmap_unlock(pte, ptl); - :out: + 8 9.0e-05 0 0 0 0 1 4.5e-05 :out: : return referenced; - 1 1.2e-05 0 0 0 0 0 0 :} + 1377 0.0155 0 0 0 0 483 0.0219 :} : :static int page_referenced_anon(struct page *page) :{ @@ -386,9 +386,9 @@ : * This function is only called from page_referenced for object-based pages. : */ :static int page_referenced_file(struct page *page) - :{ + 4 4.5e-05 0 0 0 0 0 0 :{ /* page_referenced_file total: 684 0.0077 0 0 0 0 156 0.0071 */ : unsigned int mapcount; - : struct address_space *mapping = page->mapping; + 0 0 0 0 0 0 1 4.5e-05 : struct address_space *mapping = page->mapping; : pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); : struct vm_area_struct *vma; : struct prio_tree_iter iter; @@ -407,7 +407,7 @@ : * structure at mapping cannot be freed and reused yet, : * so we can safely take mapping->i_mmap_lock. : */ - : BUG_ON(!PageLocked(page)); + 2 2.2e-05 0 0 0 0 1 4.5e-05 : BUG_ON(!PageLocked(page)); : : spin_lock(&mapping->i_mmap_lock); : @@ -415,22 +415,22 @@ : * i_mmap_lock does not stabilize mapcount at all, but mapcount : * is more likely to be accurate if we note it after spinning. : */ - : mapcount = page_mapcount(page); + 1 1.1e-05 0 0 0 0 0 0 : mapcount = page_mapcount(page); : - : vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { - : if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) + 275 0.0031 0 0 0 0 50 0.0023 : vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { + 29 3.3e-04 0 0 0 0 8 3.6e-04 : if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) : == (VM_LOCKED|VM_MAYSHARE)) { : referenced++; : break; : } - : referenced += page_referenced_one(page, vma, &mapcount); - : if (!mapcount) + 59 6.6e-04 0 0 0 0 13 5.9e-04 : referenced += page_referenced_one(page, vma, &mapcount); + 295 0.0033 0 0 0 0 78 0.0035 : if (!mapcount) : break; : } : : spin_unlock(&mapping->i_mmap_lock); : return referenced; - :} + 11 1.2e-04 0 0 0 0 2 9.1e-05 :} : :/** : * page_referenced - test if the page was referenced @@ -441,7 +441,7 @@ : * returns the number of ptes which referenced the page. : */ :int page_referenced(struct page *page, int is_locked) - 5 6.2e-05 0 0 0 0 2 9.6e-05 :{ /* page_referenced total: 21 2.6e-04 0 0 0 0 12 5.7e-04 */ + 1405 0.0158 0 0 0 0 485 0.0220 :{ /* page_referenced total: 5930 0.0667 0 0 0 0 2092 0.0950 */ : int referenced = 0; : : if (page_test_and_clear_young(page)) @@ -450,21 +450,21 @@ : if (TestClearPageReferenced(page)) : referenced++; : - 4 5.0e-05 0 0 0 0 4 1.9e-04 : if (page_mapped(page) && page->mapping) { + 272 0.0031 0 0 0 0 150 0.0068 : if (page_mapped(page) && page->mapping) { : if (PageAnon(page)) - : referenced += page_referenced_anon(page); - : else if (is_locked) - : referenced += page_referenced_file(page); + 197 0.0022 0 0 0 0 80 0.0036 : referenced += page_referenced_anon(page); + 1 1.1e-05 0 0 0 0 1 4.5e-05 : else if (is_locked) + 1 1.1e-05 0 0 0 0 1 4.5e-05 : referenced += page_referenced_file(page); : else if (TestSetPageLocked(page)) : referenced++; : else { : if (page->mapping) : referenced += page_referenced_file(page); - : unlock_page(page); + 9 1.0e-04 0 0 0 0 3 1.4e-04 : unlock_page(page); : } : } : return referenced; - 5 6.2e-05 0 0 0 0 0 0 :} + 636 0.0072 0 0 0 0 328 0.0149 :} : :/** : * page_set_anon_rmap - setup new anonymous rmap @@ -474,21 +474,21 @@ : */ :static void __page_set_anon_rmap(struct page *page, : struct vm_area_struct *vma, unsigned long address) - 3573 0.0445 0 0 1 0.0351 873 0.0417 :{ /* __page_set_anon_rmap total: 38703 0.4823 0 0 4 0.1405 11335 0.5415 */ - 8785 0.1095 0 0 0 0 2775 0.1326 : struct anon_vma *anon_vma = vma->anon_vma; + 3212 0.0361 0 0 1 0.0339 808 0.0367 :{ /* __page_set_anon_rmap total: 53059 0.5967 0 0 4 0.1358 11286 0.5126 */ + 16773 0.1886 0 0 1 0.0339 2960 0.1344 : struct anon_vma *anon_vma = vma->anon_vma; : - 5914 0.0737 0 0 2 0.0702 1200 0.0573 : BUG_ON(!anon_vma); + 11972 0.1346 0 0 1 0.0339 1435 0.0652 : BUG_ON(!anon_vma); : anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; - 765 0.0095 0 0 0 0 100 0.0048 : page->mapping = (struct address_space *) anon_vma; + 783 0.0088 0 0 0 0 95 0.0043 : page->mapping = (struct address_space *) anon_vma; : - 562 0.0070 0 0 0 0 51 0.0024 : page->index = linear_page_index(vma, address); + 403 0.0045 0 0 0 0 43 0.0020 : page->index = linear_page_index(vma, address); : : /* : * nr_mapped state can be updated without turning off : * interrupts because it is not modified via interrupt. : */ - 3618 0.0451 0 0 0 0 600 0.0287 : __inc_page_state(nr_mapped); - 14887 0.1855 0 0 1 0.0351 5701 0.2724 :} + 3699 0.0416 0 0 0 0 518 0.0235 : __inc_page_state(nr_mapped); + 15693 0.1765 0 0 1 0.0339 5396 0.2451 :} : :/** : * page_add_anon_rmap - add pte mapping to an anonymous page @@ -500,11 +500,11 @@ : */ :void page_add_anon_rmap(struct page *page, : struct vm_area_struct *vma, unsigned long address) - :{ - : if (atomic_inc_and_test(&page->_mapcount)) + 50 5.6e-04 0 0 0 0 3 1.4e-04 :{ /* page_add_anon_rmap total: 61 6.9e-04 0 0 0 0 4 1.8e-04 */ + 2 2.2e-05 0 0 0 0 0 0 : if (atomic_inc_and_test(&page->_mapcount)) : __page_set_anon_rmap(page, vma, address); : /* else checking page index and mapping is racy */ - :} + 9 1.0e-04 0 0 0 0 1 4.5e-05 :} : :/* : * page_add_new_anon_rmap - add pte mapping to a new anonymous page @@ -517,10 +517,10 @@ : */ :void page_add_new_anon_rmap(struct page *page, : struct vm_area_struct *vma, unsigned long address) - 1813 0.0226 0 0 0 0 336 0.0161 :{ /* page_add_new_anon_rmap total: 8982 0.1119 0 0 1 0.0351 2943 0.1406 */ - 4059 0.0506 0 0 0 0 645 0.0308 : atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ - 3 3.7e-05 0 0 0 0 0 0 : __page_set_anon_rmap(page, vma, address); - 3107 0.0387 0 0 1 0.0351 1962 0.0937 :} + 819 0.0092 0 0 0 0 177 0.0080 :{ /* page_add_new_anon_rmap total: 9526 0.1071 0 0 0 0 2646 0.1202 */ + 4049 0.0455 0 0 0 0 578 0.0263 : atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ + 13 1.5e-04 0 0 0 0 0 0 : __page_set_anon_rmap(page, vma, address); + 4645 0.0522 0 0 0 0 1891 0.0859 :} : :/** : * page_add_file_rmap - add pte mapping to a file page @@ -529,13 +529,13 @@ : * The caller needs to hold the pte lock. : */ :void page_add_file_rmap(struct page *page) - 3552 0.0443 0 0 2 0.0702 799 0.0382 :{ /* page_add_file_rmap total: 34542 0.4304 0 0 3 0.1054 8361 0.3994 */ - 2 2.5e-05 0 0 0 0 0 0 : BUG_ON(PageAnon(page)); - 16015 0.1996 0 0 1 0.0351 3804 0.1817 : BUG_ON(!pfn_valid(page_to_pfn(page))); - : - 5468 0.0681 0 0 0 0 806 0.0385 : if (atomic_inc_and_test(&page->_mapcount)) - 599 0.0075 0 0 0 0 196 0.0094 : __inc_page_state(nr_mapped); - 7461 0.0930 0 0 0 0 2480 0.1185 :} + 4587 0.0516 0 0 0 0 767 0.0348 :{ /* page_add_file_rmap total: 43528 0.4895 0 0 4 0.1358 7303 0.3317 */ + 5 5.6e-05 0 0 0 0 0 0 : BUG_ON(PageAnon(page)); + 22404 0.2520 0 0 1 0.0339 3143 0.1427 : BUG_ON(!pfn_valid(page_to_pfn(page))); + : + 2132 0.0240 0 0 0 0 346 0.0157 : if (atomic_inc_and_test(&page->_mapcount)) + 1650 0.0186 0 0 0 0 279 0.0127 : __inc_page_state(nr_mapped); + 11535 0.1297 0 0 3 0.1018 2507 0.1139 :} : :/** : * page_remove_rmap - take down pte mapping from a page @@ -544,16 +544,16 @@ : * The caller needs to hold the pte lock. : */ :void page_remove_rmap(struct page *page) - 3740 0.0466 0 0 0 0 3327 0.1589 :{ /* page_remove_rmap total: 17855 0.2225 0 0 1 0.0351 15620 0.7462 */ - 1495 0.0186 0 0 0 0 1890 0.0903 : if (atomic_add_negative(-1, &page->_mapcount)) { - 2133 0.0266 0 0 0 0 1444 0.0690 : if (page_mapcount(page) < 0) { + 3740 0.0421 0 0 3 0.1018 3164 0.1437 :{ /* page_remove_rmap total: 18267 0.2054 0 0 5 0.1697 14491 0.6581 */ + 1416 0.0159 0 0 0 0 1768 0.0803 : if (atomic_add_negative(-1, &page->_mapcount)) { + 2305 0.0259 0 0 0 0 1261 0.0573 : if (page_mapcount(page) < 0) { : printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); : printk (KERN_EMERG " page->flags = %lx\n", page->flags); : printk (KERN_EMERG " page->count = %x\n", page_count(page)); : printk (KERN_EMERG " page->mapping = %p\n", page->mapping); : } : - 1223 0.0152 0 0 0 0 1774 0.0847 : BUG_ON(page_mapcount(page) < 0); + 1323 0.0149 0 0 0 0 1502 0.0682 : BUG_ON(page_mapcount(page) < 0); : /* : * It would be tidy to reset the PageAnon mapping here, : * but that might overwrite a racing page_add_anon_rmap @@ -565,9 +565,9 @@ : */ : if (page_test_and_clear_dirty(page)) : set_page_dirty(page); - 1900 0.0237 0 0 0 0 652 0.0311 : __dec_page_state(nr_mapped); + 2021 0.0227 0 0 0 0 633 0.0287 : __dec_page_state(nr_mapped); : } - 6705 0.0835 0 0 1 0.0351 6084 0.2907 :} + 6825 0.0768 0 0 2 0.0679 5818 0.2642 :} : :/* : * Subfunctions of try_to_unmap: try_to_unmap_one called @@ -575,7 +575,7 @@ : */ :static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, : int ignore_refs) - 0 0 0 0 0 0 2 9.6e-05 :{ /* try_to_unmap_one total: 2 2.5e-05 0 0 0 0 4 1.9e-04 */ + 36 4.0e-04 0 0 0 0 20 9.1e-04 :{ /* try_to_unmap_one total: 324 0.0036 0 0 0 0 143 0.0065 */ : struct mm_struct *mm = vma->vm_mm; : unsigned long address; : pte_t *pte; @@ -587,8 +587,8 @@ : if (address == -EFAULT) : goto out; : - : pte = page_check_address(page, mm, address, &ptl); - : if (!pte) + 19 2.1e-04 0 0 0 0 8 3.6e-04 : pte = page_check_address(page, mm, address, &ptl); + 1 1.1e-05 0 0 0 0 1 4.5e-05 : if (!pte) : goto out; : : /* @@ -605,43 +605,43 @@ : : /* Nuke the page table entry. */ : flush_cache_page(vma, address, page_to_pfn(page)); - : pteval = ptep_clear_flush(vma, address, pte); + 1 1.1e-05 0 0 0 0 0 0 : pteval = ptep_clear_flush(vma, address, pte); : : /* Move the dirty bit to the physical page now the pte is gone. */ - : if (pte_dirty(pteval)) + 52 5.8e-04 0 0 0 0 6 2.7e-04 : if (pte_dirty(pteval)) : set_page_dirty(page); : : /* Update high watermark before we lower rss */ - : update_hiwater_rss(mm); + 1 1.1e-05 0 0 0 0 2 9.1e-05 : update_hiwater_rss(mm); : - : if (PageAnon(page)) { + 9 1.0e-04 0 0 0 0 5 2.3e-04 : if (PageAnon(page)) { : swp_entry_t entry = { .val = page_private(page) }; : /* : * Store the swap location in the pte. : * See handle_pte_fault() ... : */ - : BUG_ON(!PageSwapCache(page)); - : swap_duplicate(entry); - : if (list_empty(&mm->mmlist)) { + 3 3.4e-05 0 0 0 0 2 9.1e-05 : BUG_ON(!PageSwapCache(page)); + 9 1.0e-04 0 0 0 0 2 9.1e-05 : swap_duplicate(entry); + 4 4.5e-05 0 0 0 0 1 4.5e-05 : if (list_empty(&mm->mmlist)) { : spin_lock(&mmlist_lock); : if (list_empty(&mm->mmlist)) : list_add(&mm->mmlist, &init_mm.mmlist); : spin_unlock(&mmlist_lock); : } : set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); - 0 0 0 0 0 0 1 4.8e-05 : BUG_ON(pte_file(*pte)); - : dec_mm_counter(mm, anon_rss); + 1 1.1e-05 0 0 0 0 4 1.8e-04 : BUG_ON(pte_file(*pte)); + 2 2.2e-05 0 0 0 0 8 3.6e-04 : dec_mm_counter(mm, anon_rss); : } else : dec_mm_counter(mm, file_rss); : - : page_remove_rmap(page); - : page_cache_release(page); + 7 7.9e-05 0 0 0 0 3 1.4e-04 : page_remove_rmap(page); + 25 2.8e-04 0 0 0 0 16 7.3e-04 : page_cache_release(page); : :out_unmap: : pte_unmap_unlock(pte, ptl); :out: : return ret; - :} + 9 1.0e-04 0 0 0 0 1 4.5e-05 :} : :/* : * objrmap doesn't work for nonlinear VMAs because the assumption that @@ -860,30 +860,30 @@ : * SWAP_FAIL - the page is unswappable : */ :int try_to_unmap(struct page *page, int ignore_refs) - :{ /* try_to_unmap total: 2 2.5e-05 0 0 0 0 0 0 */ + 7 7.9e-05 0 0 0 0 3 1.4e-04 :{ /* try_to_unmap total: 144 0.0016 0 0 0 0 67 0.0030 */ : int ret; : : BUG_ON(!PageLocked(page)); : - : if (PageAnon(page)) + 3 3.4e-05 0 0 0 0 0 0 : if (PageAnon(page)) : ret = try_to_unmap_anon(page, ignore_refs); : else : ret = try_to_unmap_file(page, ignore_refs); : - : if (!page_mapped(page)) + 3 3.4e-05 0 0 0 0 2 9.1e-05 : if (!page_mapped(page)) : ret = SWAP_SUCCESS; : return ret; - :} + 11 1.2e-04 0 0 0 0 4 1.8e-04 :} : /* * Total samples for file : "mm/rmap.c" * - * 130728 1.6289 0 0 14 0.4917 48138 2.2997 + * 202238 2.2745 0 0 21 0.7128 60144 2.7315 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/shmem.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/shmem.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/shmem.c 1969-12-31 19:00:00.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/shmem.c 2006-03-12 07:20:06.000000000 -0500 @@ -0,0 +1,2380 @@ + :/* + : * Resizable virtual memory filesystem for Linux. + : * + : * Copyright (C) 2000 Linus Torvalds. + : * 2000 Transmeta Corp. + : * 2000-2001 Christoph Rohland + : * 2000-2001 SAP AG + : * 2002 Red Hat Inc. + : * Copyright (C) 2002-2005 Hugh Dickins. + : * Copyright (C) 2002-2005 VERITAS Software Corporation. + : * Copyright (C) 2004 Andi Kleen, SuSE Labs + : * + : * Extended attribute support for tmpfs: + : * Copyright (c) 2004, Luke Kenneth Casson Leighton + : * Copyright (c) 2004 Red Hat, Inc., James Morris + : * + : * This file is released under the GPL. + : */ + : + :/* + : * This virtual memory filesystem is heavily based on the ramfs. It + : * extends ramfs by the ability to use swap and honor resource limits + : * which makes it a completely usable filesystem. + : */ + : + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + :#include + : + :/* This magic number is used in glibc for posix shared memory */ + : + :#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) + :#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) + :#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) + : + :#define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) + :#define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT) + : + :#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) + : + :/* info->flags needs VM_flags to handle pagein/truncate races efficiently */ + :#define SHMEM_PAGEIN VM_READ + :#define SHMEM_TRUNCATE VM_WRITE + : + :/* Definition to limit shmem_truncate's steps between cond_rescheds */ + :#define LATENCY_LIMIT 64 + : + :/* Pretend that each entry is of this size in directory's i_size */ + :#define BOGO_DIRENT_SIZE 20 + : + :/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ + :enum sgp_type { + : SGP_QUICK, /* don't try more than file page cache lookup */ + : SGP_READ, /* don't exceed i_size, don't allocate page */ + : SGP_CACHE, /* don't exceed i_size, may allocate page */ + : SGP_WRITE, /* may exceed i_size, may allocate page */ + :}; + : + :static int shmem_getpage(struct inode *inode, unsigned long idx, + : struct page **pagep, enum sgp_type sgp, int *type); + : + :static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) + :{ + : /* + : * The above definition of ENTRIES_PER_PAGE, and the use of + : * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: + : * might be reconsidered if it ever diverges from PAGE_SIZE. + : */ + : return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT); + :} + : + :static inline void shmem_dir_free(struct page *page) + :{ + : __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); + :} + : + :static struct page **shmem_dir_map(struct page *page) + :{ + : return (struct page **)kmap_atomic(page, KM_USER0); + :} + : + :static inline void shmem_dir_unmap(struct page **dir) + :{ + : kunmap_atomic(dir, KM_USER0); + :} + : + :static swp_entry_t *shmem_swp_map(struct page *page) + :{ + : return (swp_entry_t *)kmap_atomic(page, KM_USER1); + :} + : + :static inline void shmem_swp_balance_unmap(void) + :{ + : /* + : * When passing a pointer to an i_direct entry, to code which + : * also handles indirect entries and so will shmem_swp_unmap, + : * we must arrange for the preempt count to remain in balance. + : * What kmap_atomic of a lowmem page does depends on config + : * and architecture, so pretend to kmap_atomic some lowmem page. + : */ + : (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); + :} + : + :static inline void shmem_swp_unmap(swp_entry_t *entry) + :{ + : kunmap_atomic(entry, KM_USER1); + :} + : + :static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) + :{ + : return sb->s_fs_info; + :} + : + :/* + : * shmem_file_setup pre-accounts the whole fixed size of a VM object, + : * for shared memory and for shared anonymous (/dev/zero) mappings + : * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), + : * consistent with the pre-accounting of private mappings ... + : */ + :static inline int shmem_acct_size(unsigned long flags, loff_t size) + :{ + : return (flags & VM_ACCOUNT)? + : security_vm_enough_memory(VM_ACCT(size)): 0; + :} + : + :static inline void shmem_unacct_size(unsigned long flags, loff_t size) + :{ + : if (flags & VM_ACCOUNT) + : vm_unacct_memory(VM_ACCT(size)); + :} + : + :/* + : * ... whereas tmpfs objects are accounted incrementally as + : * pages are allocated, in order to allow huge sparse files. + : * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, + : * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. + : */ + :static inline int shmem_acct_block(unsigned long flags) + :{ + : return (flags & VM_ACCOUNT)? + : 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE)); + :} + : + :static inline void shmem_unacct_blocks(unsigned long flags, long pages) + :{ + : if (!(flags & VM_ACCOUNT)) + : vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); + :} + : + :static struct super_operations shmem_ops; + :static struct address_space_operations shmem_aops; + :static struct file_operations shmem_file_operations; + :static struct inode_operations shmem_inode_operations; + :static struct inode_operations shmem_dir_inode_operations; + :static struct vm_operations_struct shmem_vm_ops; + : + :static struct backing_dev_info shmem_backing_dev_info __read_mostly = { + : .ra_pages = 0, /* No readahead */ + : .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, + : .unplug_io_fn = default_unplug_io_fn, + :}; + : + :static LIST_HEAD(shmem_swaplist); + :static DEFINE_SPINLOCK(shmem_swaplist_lock); + : + :static void shmem_free_blocks(struct inode *inode, long pages) + :{ + : struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + : if (sbinfo->max_blocks) { + : spin_lock(&sbinfo->stat_lock); + : sbinfo->free_blocks += pages; + : inode->i_blocks -= pages*BLOCKS_PER_PAGE; + : spin_unlock(&sbinfo->stat_lock); + : } + :} + : + :/* + : * shmem_recalc_inode - recalculate the size of an inode + : * + : * @inode: inode to recalc + : * + : * We have to calculate the free blocks since the mm can drop + : * undirtied hole pages behind our back. + : * + : * But normally info->alloced == inode->i_mapping->nrpages + info->swapped + : * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) + : * + : * It has to be called with the spinlock held. + : */ + :static void shmem_recalc_inode(struct inode *inode) + 4 4.5e-05 0 0 0 0 0 0 :{ /* shmem_recalc_inode total: 4 4.5e-05 0 0 0 0 1 4.5e-05 */ + : struct shmem_inode_info *info = SHMEM_I(inode); + : long freed; + : + : freed = info->alloced - info->swapped - inode->i_mapping->nrpages; + 0 0 0 0 0 0 1 4.5e-05 : if (freed > 0) { + : info->alloced -= freed; + : shmem_unacct_blocks(info->flags, freed); + : shmem_free_blocks(inode, freed); + : } + :} + : + :/* + : * shmem_swp_entry - find the swap vector position in the info structure + : * + : * @info: info structure for the inode + : * @index: index of the page to find + : * @page: optional page to add to the structure. Has to be preset to + : * all zeros + : * + : * If there is no space allocated yet it will return NULL when + : * page is NULL, else it will use the page for the needed block, + : * setting it to NULL on return to indicate that it has been used. + : * + : * The swap vector is organized the following way: + : * + : * There are SHMEM_NR_DIRECT entries directly stored in the + : * shmem_inode_info structure. So small files do not need an addional + : * allocation. + : * + : * For pages with index > SHMEM_NR_DIRECT there is the pointer + : * i_indirect which points to a page which holds in the first half + : * doubly indirect blocks, in the second half triple indirect blocks: + : * + : * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the + : * following layout (for SHMEM_NR_DIRECT == 16): + : * + : * i_indirect -> dir --> 16-19 + : * | +-> 20-23 + : * | + : * +-->dir2 --> 24-27 + : * | +-> 28-31 + : * | +-> 32-35 + : * | +-> 36-39 + : * | + : * +-->dir3 --> 40-43 + : * +-> 44-47 + : * +-> 48-51 + : * +-> 52-55 + : */ + :static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page) + 2 2.2e-05 0 0 0 0 0 0 :{ /* shmem_swp_entry total: 2 2.2e-05 0 0 0 0 1 4.5e-05 */ + : unsigned long offset; + : struct page **dir; + : struct page *subdir; + : + : if (index < SHMEM_NR_DIRECT) { + : shmem_swp_balance_unmap(); + : return info->i_direct+index; + : } + : if (!info->i_indirect) { + : if (page) { + : info->i_indirect = *page; + : *page = NULL; + : } + : return NULL; /* need another page */ + : } + : + : index -= SHMEM_NR_DIRECT; + : offset = index % ENTRIES_PER_PAGE; + : index /= ENTRIES_PER_PAGE; + : dir = shmem_dir_map(info->i_indirect); + : + : if (index >= ENTRIES_PER_PAGE/2) { + : index -= ENTRIES_PER_PAGE/2; + : dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE; + : index %= ENTRIES_PER_PAGE; + : subdir = *dir; + : if (!subdir) { + : if (page) { + : *dir = *page; + : *page = NULL; + : } + : shmem_dir_unmap(dir); + : return NULL; /* need another page */ + : } + : shmem_dir_unmap(dir); + : dir = shmem_dir_map(subdir); + : } + : + : dir += index; + : subdir = *dir; + : if (!subdir) { + : if (!page || !(subdir = *page)) { + : shmem_dir_unmap(dir); + : return NULL; /* need a page */ + : } + : *dir = subdir; + : *page = NULL; + : } + : shmem_dir_unmap(dir); + : return shmem_swp_map(subdir) + offset; + :} + : + :static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) + 2 2.2e-05 0 0 0 0 0 0 :{ /* shmem_swp_set total: 3 3.4e-05 0 0 0 0 0 0 */ + 1 1.1e-05 0 0 0 0 0 0 : long incdec = value? 1: -1; + : + : entry->val = value; + : info->swapped += incdec; + : if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) { + : struct page *page = kmap_atomic_to_page(entry); + : set_page_private(page, page_private(page) + incdec); + : } + :} + : + :/* + : * shmem_swp_alloc - get the position of the swap entry for the page. + : * If it does not exist allocate the entry. + : * + : * @info: info structure for the inode + : * @index: index of the page to find + : * @sgp: check and recheck i_size? skip allocation? + : */ + :static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) + :{ + : struct inode *inode = &info->vfs_inode; + : struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + : struct page *page = NULL; + : swp_entry_t *entry; + : + : if (sgp != SGP_WRITE && + : ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) + : return ERR_PTR(-EINVAL); + : + : while (!(entry = shmem_swp_entry(info, index, &page))) { + : if (sgp == SGP_READ) + : return shmem_swp_map(ZERO_PAGE(0)); + : /* + : * Test free_blocks against 1 not 0, since we have 1 data + : * page (and perhaps indirect index pages) yet to allocate: + : * a waste to allocate index if we cannot allocate data. + : */ + : if (sbinfo->max_blocks) { + : spin_lock(&sbinfo->stat_lock); + : if (sbinfo->free_blocks <= 1) { + : spin_unlock(&sbinfo->stat_lock); + : return ERR_PTR(-ENOSPC); + : } + : sbinfo->free_blocks--; + : inode->i_blocks += BLOCKS_PER_PAGE; + : spin_unlock(&sbinfo->stat_lock); + : } + : + : spin_unlock(&info->lock); + : page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO); + : if (page) + : set_page_private(page, 0); + : spin_lock(&info->lock); + : + : if (!page) { + : shmem_free_blocks(inode, 1); + : return ERR_PTR(-ENOMEM); + : } + : if (sgp != SGP_WRITE && + : ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { + : entry = ERR_PTR(-EINVAL); + : break; + : } + : if (info->next_index <= index) + : info->next_index = index + 1; + : } + : if (page) { + : /* another task gave its page, or truncated the file */ + : shmem_free_blocks(inode, 1); + : shmem_dir_free(page); + : } + : if (info->next_index <= index && !IS_ERR(entry)) + : info->next_index = index + 1; + : return entry; + :} + : + :/* + : * shmem_free_swp - free some swap entries in a directory + : * + : * @dir: pointer to the directory + : * @edir: pointer after last entry of the directory + : */ + :static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir) + :{ + : swp_entry_t *ptr; + : int freed = 0; + : + : for (ptr = dir; ptr < edir; ptr++) { + : if (ptr->val) { + : free_swap_and_cache(*ptr); + : *ptr = (swp_entry_t){0}; + : freed++; + : } + : } + : return freed; + :} + : + :static int shmem_map_and_free_swp(struct page *subdir, + : int offset, int limit, struct page ***dir) + :{ + : swp_entry_t *ptr; + : int freed = 0; + : + : ptr = shmem_swp_map(subdir); + : for (; offset < limit; offset += LATENCY_LIMIT) { + : int size = limit - offset; + : if (size > LATENCY_LIMIT) + : size = LATENCY_LIMIT; + : freed += shmem_free_swp(ptr+offset, ptr+offset+size); + : if (need_resched()) { + : shmem_swp_unmap(ptr); + : if (*dir) { + : shmem_dir_unmap(*dir); + : *dir = NULL; + : } + : cond_resched(); + : ptr = shmem_swp_map(subdir); + : } + : } + : shmem_swp_unmap(ptr); + : return freed; + :} + : + :static void shmem_free_pages(struct list_head *next) + :{ + : struct page *page; + : int freed = 0; + : + : do { + : page = container_of(next, struct page, lru); + : next = next->next; + : shmem_dir_free(page); + : freed++; + : if (freed >= LATENCY_LIMIT) { + : cond_resched(); + : freed = 0; + : } + : } while (next); + :} + : + :static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) + :{ + : struct shmem_inode_info *info = SHMEM_I(inode); + : unsigned long idx; + : unsigned long size; + : unsigned long limit; + : unsigned long stage; + : unsigned long diroff; + : struct page **dir; + : struct page *topdir; + : struct page *middir; + : struct page *subdir; + : swp_entry_t *ptr; + : LIST_HEAD(pages_to_free); + : long nr_pages_to_free = 0; + : long nr_swaps_freed = 0; + : int offset; + : int freed; + : int punch_hole = 0; + : + : inode->i_ctime = inode->i_mtime = CURRENT_TIME; + : idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + : if (idx >= info->next_index) + : return; + : + : spin_lock(&info->lock); + : info->flags |= SHMEM_TRUNCATE; + : if (likely(end == (loff_t) -1)) { + : limit = info->next_index; + : info->next_index = idx; + : } else { + : limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + : if (limit > info->next_index) + : limit = info->next_index; + : punch_hole = 1; + : } + : + : topdir = info->i_indirect; + : if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { + : info->i_indirect = NULL; + : nr_pages_to_free++; + : list_add(&topdir->lru, &pages_to_free); + : } + : spin_unlock(&info->lock); + : + : if (info->swapped && idx < SHMEM_NR_DIRECT) { + : ptr = info->i_direct; + : size = limit; + : if (size > SHMEM_NR_DIRECT) + : size = SHMEM_NR_DIRECT; + : nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size); + : } + : if (!topdir) + : goto done2; + : + : BUG_ON(limit <= SHMEM_NR_DIRECT); + : limit -= SHMEM_NR_DIRECT; + : idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; + : offset = idx % ENTRIES_PER_PAGE; + : idx -= offset; + : + : dir = shmem_dir_map(topdir); + : stage = ENTRIES_PER_PAGEPAGE/2; + : if (idx < ENTRIES_PER_PAGEPAGE/2) { + : middir = topdir; + : diroff = idx/ENTRIES_PER_PAGE; + : } else { + : dir += ENTRIES_PER_PAGE/2; + : dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE; + : while (stage <= idx) + : stage += ENTRIES_PER_PAGEPAGE; + : middir = *dir; + : if (*dir) { + : diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % + : ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; + : if (!diroff && !offset) { + : *dir = NULL; + : nr_pages_to_free++; + : list_add(&middir->lru, &pages_to_free); + : } + : shmem_dir_unmap(dir); + : dir = shmem_dir_map(middir); + : } else { + : diroff = 0; + : offset = 0; + : idx = stage; + : } + : } + : + : for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) { + : if (unlikely(idx == stage)) { + : shmem_dir_unmap(dir); + : dir = shmem_dir_map(topdir) + + : ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; + : while (!*dir) { + : dir++; + : idx += ENTRIES_PER_PAGEPAGE; + : if (idx >= limit) + : goto done1; + : } + : stage = idx + ENTRIES_PER_PAGEPAGE; + : middir = *dir; + : *dir = NULL; + : nr_pages_to_free++; + : list_add(&middir->lru, &pages_to_free); + : shmem_dir_unmap(dir); + : cond_resched(); + : dir = shmem_dir_map(middir); + : diroff = 0; + : } + : subdir = dir[diroff]; + : if (subdir && page_private(subdir)) { + : size = limit - idx; + : if (size > ENTRIES_PER_PAGE) + : size = ENTRIES_PER_PAGE; + : freed = shmem_map_and_free_swp(subdir, + : offset, size, &dir); + : if (!dir) + : dir = shmem_dir_map(middir); + : nr_swaps_freed += freed; + : if (offset) + : spin_lock(&info->lock); + : set_page_private(subdir, page_private(subdir) - freed); + : if (offset) + : spin_unlock(&info->lock); + : if (!punch_hole) + : BUG_ON(page_private(subdir) > offset); + : } + : if (offset) + : offset = 0; + : else if (subdir && !page_private(subdir)) { + : dir[diroff] = NULL; + : nr_pages_to_free++; + : list_add(&subdir->lru, &pages_to_free); + : } + : } + :done1: + : shmem_dir_unmap(dir); + :done2: + : if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { + : /* + : * Call truncate_inode_pages again: racing shmem_unuse_inode + : * may have swizzled a page in from swap since vmtruncate or + : * generic_delete_inode did it, before we lowered next_index. + : * Also, though shmem_getpage checks i_size before adding to + : * cache, no recheck after: so fix the narrow window there too. + : */ + : truncate_inode_pages_range(inode->i_mapping, start, end); + : } + : + : spin_lock(&info->lock); + : info->flags &= ~SHMEM_TRUNCATE; + : info->swapped -= nr_swaps_freed; + : if (nr_pages_to_free) + : shmem_free_blocks(inode, nr_pages_to_free); + : shmem_recalc_inode(inode); + : spin_unlock(&info->lock); + : + : /* + : * Empty swap vector directory pages to be freed? + : */ + : if (!list_empty(&pages_to_free)) { + : pages_to_free.prev->next = NULL; + : shmem_free_pages(pages_to_free.next); + : } + :} + : + :static void shmem_truncate(struct inode *inode) + :{ + : shmem_truncate_range(inode, inode->i_size, (loff_t)-1); + :} + : + :static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) + :{ + : struct inode *inode = dentry->d_inode; + : struct page *page = NULL; + : int error; + : + : if (attr->ia_valid & ATTR_SIZE) { + : if (attr->ia_size < inode->i_size) { + : /* + : * If truncating down to a partial page, then + : * if that page is already allocated, hold it + : * in memory until the truncation is over, so + : * truncate_partial_page cannnot miss it were + : * it assigned to swap. + : */ + : if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { + : (void) shmem_getpage(inode, + : attr->ia_size>>PAGE_CACHE_SHIFT, + : &page, SGP_READ, NULL); + : } + : /* + : * Reset SHMEM_PAGEIN flag so that shmem_truncate can + : * detect if any pages might have been added to cache + : * after truncate_inode_pages. But we needn't bother + : * if it's being fully truncated to zero-length: the + : * nrpages check is efficient enough in that case. + : */ + : if (attr->ia_size) { + : struct shmem_inode_info *info = SHMEM_I(inode); + : spin_lock(&info->lock); + : info->flags &= ~SHMEM_PAGEIN; + : spin_unlock(&info->lock); + : } + : } + : } + : + : error = inode_change_ok(inode, attr); + : if (!error) + : error = inode_setattr(inode, attr); + : if (page) + : page_cache_release(page); + : return error; + :} + : + :static void shmem_delete_inode(struct inode *inode) + :{ + : struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + : struct shmem_inode_info *info = SHMEM_I(inode); + : + : if (inode->i_op->truncate == shmem_truncate) { + : truncate_inode_pages(inode->i_mapping, 0); + : shmem_unacct_size(info->flags, inode->i_size); + : inode->i_size = 0; + : shmem_truncate(inode); + : if (!list_empty(&info->swaplist)) { + : spin_lock(&shmem_swaplist_lock); + : list_del_init(&info->swaplist); + : spin_unlock(&shmem_swaplist_lock); + : } + : } + : BUG_ON(inode->i_blocks); + : if (sbinfo->max_inodes) { + : spin_lock(&sbinfo->stat_lock); + : sbinfo->free_inodes++; + : spin_unlock(&sbinfo->stat_lock); + : } + : clear_inode(inode); + :} + : + :static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) + :{ + : swp_entry_t *ptr; + : + : for (ptr = dir; ptr < edir; ptr++) { + : if (ptr->val == entry.val) + : return ptr - dir; + : } + : return -1; + :} + : + :static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) + :{ + : struct inode *inode; + : unsigned long idx; + : unsigned long size; + : unsigned long limit; + : unsigned long stage; + : struct page **dir; + : struct page *subdir; + : swp_entry_t *ptr; + : int offset; + : + : idx = 0; + : ptr = info->i_direct; + : spin_lock(&info->lock); + : limit = info->next_index; + : size = limit; + : if (size > SHMEM_NR_DIRECT) + : size = SHMEM_NR_DIRECT; + : offset = shmem_find_swp(entry, ptr, ptr+size); + : if (offset >= 0) { + : shmem_swp_balance_unmap(); + : goto found; + : } + : if (!info->i_indirect) + : goto lost2; + : + : dir = shmem_dir_map(info->i_indirect); + : stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2; + : + : for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { + : if (unlikely(idx == stage)) { + : shmem_dir_unmap(dir-1); + : dir = shmem_dir_map(info->i_indirect) + + : ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; + : while (!*dir) { + : dir++; + : idx += ENTRIES_PER_PAGEPAGE; + : if (idx >= limit) + : goto lost1; + : } + : stage = idx + ENTRIES_PER_PAGEPAGE; + : subdir = *dir; + : shmem_dir_unmap(dir); + : dir = shmem_dir_map(subdir); + : } + : subdir = *dir; + : if (subdir && page_private(subdir)) { + : ptr = shmem_swp_map(subdir); + : size = limit - idx; + : if (size > ENTRIES_PER_PAGE) + : size = ENTRIES_PER_PAGE; + : offset = shmem_find_swp(entry, ptr, ptr+size); + : if (offset >= 0) { + : shmem_dir_unmap(dir); + : goto found; + : } + : shmem_swp_unmap(ptr); + : } + : } + :lost1: + : shmem_dir_unmap(dir-1); + :lost2: + : spin_unlock(&info->lock); + : return 0; + :found: + : idx += offset; + : inode = &info->vfs_inode; + : if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) { + : info->flags |= SHMEM_PAGEIN; + : shmem_swp_set(info, ptr + offset, 0); + : } + : shmem_swp_unmap(ptr); + : spin_unlock(&info->lock); + : /* + : * Decrement swap count even when the entry is left behind: + : * try_to_unuse will skip over mms, then reincrement count. + : */ + : swap_free(entry); + : return 1; + :} + : + :/* + : * shmem_unuse() search for an eventually swapped out shmem page. + : */ + :int shmem_unuse(swp_entry_t entry, struct page *page) + :{ + : struct list_head *p, *next; + : struct shmem_inode_info *info; + : int found = 0; + : + : spin_lock(&shmem_swaplist_lock); + : list_for_each_safe(p, next, &shmem_swaplist) { + : info = list_entry(p, struct shmem_inode_info, swaplist); + : if (!info->swapped) + : list_del_init(&info->swaplist); + : else if (shmem_unuse_inode(info, entry, page)) { + : /* move head to start search for next from here */ + : list_move_tail(&shmem_swaplist, &info->swaplist); + : found = 1; + : break; + : } + : } + : spin_unlock(&shmem_swaplist_lock); + : return found; + :} + : + :/* + : * Move the page from the page cache to the swap cache. + : */ + :static int shmem_writepage(struct page *page, struct writeback_control *wbc) + :{ /* shmem_writepage total: 12 1.3e-04 0 0 0 0 5 2.3e-04 */ + : struct shmem_inode_info *info; + : swp_entry_t *entry, swap; + : struct address_space *mapping; + : unsigned long index; + : struct inode *inode; + : + : BUG_ON(!PageLocked(page)); + : BUG_ON(page_mapped(page)); + : + : mapping = page->mapping; + : index = page->index; + : inode = mapping->host; + : info = SHMEM_I(inode); + : if (info->flags & VM_LOCKED) + : goto redirty; + : swap = get_swap_page(); + 0 0 0 0 0 0 1 4.5e-05 : if (!swap.val) + : goto redirty; + : + : spin_lock(&info->lock); + : shmem_recalc_inode(inode); + 0 0 0 0 0 0 1 4.5e-05 : if (index >= info->next_index) { + : BUG_ON(!(info->flags & SHMEM_TRUNCATE)); + : goto unlock; + : } + : entry = shmem_swp_entry(info, index, NULL); + 4 4.5e-05 0 0 0 0 1 4.5e-05 : BUG_ON(!entry); + : BUG_ON(entry->val); + : + 2 2.2e-05 0 0 0 0 1 4.5e-05 : if (move_to_swap_cache(page, swap) == 0) { + : shmem_swp_set(info, entry, swap.val); + : shmem_swp_unmap(entry); + : spin_unlock(&info->lock); + : if (list_empty(&info->swaplist)) { + : spin_lock(&shmem_swaplist_lock); + : /* move instead of add in case we're racing */ + : list_move_tail(&info->swaplist, &shmem_swaplist); + : spin_unlock(&shmem_swaplist_lock); + : } + 1 1.1e-05 0 0 0 0 1 4.5e-05 : unlock_page(page); + : return 0; + : } + : + : shmem_swp_unmap(entry); + :unlock: + : spin_unlock(&info->lock); + : swap_free(swap); + :redirty: + : set_page_dirty(page); + : return AOP_WRITEPAGE_ACTIVATE; /* Return with the page locked */ + 2 2.2e-05 0 0 0 0 0 0 :} + : + :#ifdef CONFIG_NUMA + :static int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes) + :{ + : char *nodelist = strchr(value, ':'); + : int err = 1; + : + : if (nodelist) { + : /* NUL-terminate policy string */ + : *nodelist++ = '\0'; + : if (nodelist_parse(nodelist, *policy_nodes)) + : goto out; + : } + : if (!strcmp(value, "default")) { + : *policy = MPOL_DEFAULT; + : /* Don't allow a nodelist */ + : if (!nodelist) + : err = 0; + : } else if (!strcmp(value, "prefer")) { + : *policy = MPOL_PREFERRED; + : /* Insist on a nodelist of one node only */ + : if (nodelist) { + : char *rest = nodelist; + : while (isdigit(*rest)) + : rest++; + : if (!*rest) + : err = 0; + : } + : } else if (!strcmp(value, "bind")) { + : *policy = MPOL_BIND; + : /* Insist on a nodelist */ + : if (nodelist) + : err = 0; + : } else if (!strcmp(value, "interleave")) { + : *policy = MPOL_INTERLEAVE; + : /* Default to nodes online if no nodelist */ + : if (!nodelist) + : *policy_nodes = node_online_map; + : err = 0; + : } + :out: + : /* Restore string for error message */ + : if (nodelist) + : *--nodelist = ':'; + : return err; + :} + : + :static struct page *shmem_swapin_async(struct shared_policy *p, + : swp_entry_t entry, unsigned long idx) + :{ + : struct page *page; + : struct vm_area_struct pvma; + : + : /* Create a pseudo vma that just contains the policy */ + : memset(&pvma, 0, sizeof(struct vm_area_struct)); + : pvma.vm_end = PAGE_SIZE; + : pvma.vm_pgoff = idx; + : pvma.vm_policy = mpol_shared_policy_lookup(p, idx); + : page = read_swap_cache_async(entry, &pvma, 0); + : mpol_free(pvma.vm_policy); + : return page; + :} + : + :struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry, + : unsigned long idx) + :{ + : struct shared_policy *p = &info->policy; + : int i, num; + : struct page *page; + : unsigned long offset; + : + : num = valid_swaphandles(entry, &offset); + : for (i = 0; i < num; offset++, i++) { + : page = shmem_swapin_async(p, + : swp_entry(swp_type(entry), offset), idx); + : if (!page) + : break; + : page_cache_release(page); + : } + : lru_add_drain(); /* Push any new pages onto the LRU now */ + : return shmem_swapin_async(p, entry, idx); + :} + : + :static struct page * + :shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, + : unsigned long idx) + :{ + : struct vm_area_struct pvma; + : struct page *page; + : + : memset(&pvma, 0, sizeof(struct vm_area_struct)); + : pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); + : pvma.vm_pgoff = idx; + : pvma.vm_end = PAGE_SIZE; + : page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0); + : mpol_free(pvma.vm_policy); + : return page; + :} + :#else + :static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes) + :{ + : return 1; + :} + : + :static inline struct page * + :shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) + :{ + : swapin_readahead(entry, 0, NULL); + : return read_swap_cache_async(entry, NULL, 0); + :} + : + :static inline struct page * + :shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) + :{ + : return alloc_page(gfp | __GFP_ZERO); + :} + :#endif + : + :/* + : * shmem_getpage - either get the page from swap or allocate a new one + : * + : * If we allocate a new one we do not mark it dirty. That's up to the + : * vm. If we swap it in we mark it dirty since we also free the swap + : * entry since a page cannot live in both the swap and page cache + : */ + :static int shmem_getpage(struct inode *inode, unsigned long idx, + : struct page **pagep, enum sgp_type sgp, int *type) + :{ + : struct address_space *mapping = inode->i_mapping; + : struct shmem_inode_info *info = SHMEM_I(inode); + : struct shmem_sb_info *sbinfo; + : struct page *filepage = *pagep; + : struct page *swappage; + : swp_entry_t *entry; + : swp_entry_t swap; + : int error; + : + : if (idx >= SHMEM_MAX_INDEX) + : return -EFBIG; + : /* + : * Normally, filepage is NULL on entry, and either found + : * uptodate immediately, or allocated and zeroed, or read + : * in under swappage, which is then assigned to filepage. + : * But shmem_prepare_write passes in a locked filepage, + : * which may be found not uptodate by other callers too, + : * and may need to be copied from the swappage read in. + : */ + :repeat: + : if (!filepage) + : filepage = find_lock_page(mapping, idx); + : if (filepage && PageUptodate(filepage)) + : goto done; + : error = 0; + : if (sgp == SGP_QUICK) + : goto failed; + : + : spin_lock(&info->lock); + : shmem_recalc_inode(inode); + : entry = shmem_swp_alloc(info, idx, sgp); + : if (IS_ERR(entry)) { + : spin_unlock(&info->lock); + : error = PTR_ERR(entry); + : goto failed; + : } + : swap = *entry; + : + : if (swap.val) { + : /* Look it up and read it in.. */ + : swappage = lookup_swap_cache(swap); + : if (!swappage) { + : shmem_swp_unmap(entry); + : spin_unlock(&info->lock); + : /* here we actually do the io */ + : if (type && *type == VM_FAULT_MINOR) { + : inc_page_state(pgmajfault); + : *type = VM_FAULT_MAJOR; + : } + : swappage = shmem_swapin(info, swap, idx); + : if (!swappage) { + : spin_lock(&info->lock); + : entry = shmem_swp_alloc(info, idx, sgp); + : if (IS_ERR(entry)) + : error = PTR_ERR(entry); + : else { + : if (entry->val == swap.val) + : error = -ENOMEM; + : shmem_swp_unmap(entry); + : } + : spin_unlock(&info->lock); + : if (error) + : goto failed; + : goto repeat; + : } + : wait_on_page_locked(swappage); + : page_cache_release(swappage); + : goto repeat; + : } + : + : /* We have to do this with page locked to prevent races */ + : if (TestSetPageLocked(swappage)) { + : shmem_swp_unmap(entry); + : spin_unlock(&info->lock); + : wait_on_page_locked(swappage); + : page_cache_release(swappage); + : goto repeat; + : } + : if (!PageSwapCache(swappage)) { + : /* Page migration has occured */ + : shmem_swp_unmap(entry); + : spin_unlock(&info->lock); + : unlock_page(swappage); + : page_cache_release(swappage); + : goto repeat; + : } + : if (PageWriteback(swappage)) { + : shmem_swp_unmap(entry); + : spin_unlock(&info->lock); + : wait_on_page_writeback(swappage); + : unlock_page(swappage); + : page_cache_release(swappage); + : goto repeat; + : } + : if (!PageUptodate(swappage)) { + : shmem_swp_unmap(entry); + : spin_unlock(&info->lock); + : unlock_page(swappage); + : page_cache_release(swappage); + : error = -EIO; + : goto failed; + : } + : + : if (filepage) { + : shmem_swp_set(info, entry, 0); + : shmem_swp_unmap(entry); + : delete_from_swap_cache(swappage); + : spin_unlock(&info->lock); + : copy_highpage(filepage, swappage); + : unlock_page(swappage); + : page_cache_release(swappage); + : flush_dcache_page(filepage); + : SetPageUptodate(filepage); + : set_page_dirty(filepage); + : swap_free(swap); + : } else if (!(error = move_from_swap_cache( + : swappage, idx, mapping))) { + : info->flags |= SHMEM_PAGEIN; + : shmem_swp_set(info, entry, 0); + : shmem_swp_unmap(entry); + : spin_unlock(&info->lock); + : filepage = swappage; + : swap_free(swap); + : } else { + : shmem_swp_unmap(entry); + : spin_unlock(&info->lock); + : unlock_page(swappage); + : page_cache_release(swappage); + : if (error == -ENOMEM) { + : /* let kswapd refresh zone for GFP_ATOMICs */ + : blk_congestion_wait(WRITE, HZ/50); + : } + : goto repeat; + : } + : } else if (sgp == SGP_READ && !filepage) { + : shmem_swp_unmap(entry); + : filepage = find_get_page(mapping, idx); + : if (filepage && + : (!PageUptodate(filepage) || TestSetPageLocked(filepage))) { + : spin_unlock(&info->lock); + : wait_on_page_locked(filepage); + : page_cache_release(filepage); + : filepage = NULL; + : goto repeat; + : } + : spin_unlock(&info->lock); + : } else { + : shmem_swp_unmap(entry); + : sbinfo = SHMEM_SB(inode->i_sb); + : if (sbinfo->max_blocks) { + : spin_lock(&sbinfo->stat_lock); + : if (sbinfo->free_blocks == 0 || + : shmem_acct_block(info->flags)) { + : spin_unlock(&sbinfo->stat_lock); + : spin_unlock(&info->lock); + : error = -ENOSPC; + : goto failed; + : } + : sbinfo->free_blocks--; + : inode->i_blocks += BLOCKS_PER_PAGE; + : spin_unlock(&sbinfo->stat_lock); + : } else if (shmem_acct_block(info->flags)) { + : spin_unlock(&info->lock); + : error = -ENOSPC; + : goto failed; + : } + : + : if (!filepage) { + : spin_unlock(&info->lock); + : filepage = shmem_alloc_page(mapping_gfp_mask(mapping), + : info, + : idx); + : if (!filepage) { + : shmem_unacct_blocks(info->flags, 1); + : shmem_free_blocks(inode, 1); + : error = -ENOMEM; + : goto failed; + : } + : + : spin_lock(&info->lock); + : entry = shmem_swp_alloc(info, idx, sgp); + : if (IS_ERR(entry)) + : error = PTR_ERR(entry); + : else { + : swap = *entry; + : shmem_swp_unmap(entry); + : } + : if (error || swap.val || 0 != add_to_page_cache_lru( + : filepage, mapping, idx, GFP_ATOMIC)) { + : spin_unlock(&info->lock); + : page_cache_release(filepage); + : shmem_unacct_blocks(info->flags, 1); + : shmem_free_blocks(inode, 1); + : filepage = NULL; + : if (error) + : goto failed; + : goto repeat; + : } + : info->flags |= SHMEM_PAGEIN; + : } + : + : info->alloced++; + : spin_unlock(&info->lock); + : flush_dcache_page(filepage); + : SetPageUptodate(filepage); + : } + :done: + : if (*pagep != filepage) { + : unlock_page(filepage); + : *pagep = filepage; + : } + : return 0; + : + :failed: + : if (*pagep != filepage) { + : unlock_page(filepage); + : page_cache_release(filepage); + : } + : return error; + :} + : + :struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type) + :{ + : struct inode *inode = vma->vm_file->f_dentry->d_inode; + : struct page *page = NULL; + : unsigned long idx; + : int error; + : + : idx = (address - vma->vm_start) >> PAGE_SHIFT; + : idx += vma->vm_pgoff; + : idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT; + : if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode)) + : return NOPAGE_SIGBUS; + : + : error = shmem_getpage(inode, idx, &page, SGP_CACHE, type); + : if (error) + : return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS; + : + : mark_page_accessed(page); + : return page; + :} + : + :static int shmem_populate(struct vm_area_struct *vma, + : unsigned long addr, unsigned long len, + : pgprot_t prot, unsigned long pgoff, int nonblock) + :{ + : struct inode *inode = vma->vm_file->f_dentry->d_inode; + : struct mm_struct *mm = vma->vm_mm; + : enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE; + : unsigned long size; + : + : size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; + : if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size) + : return -EINVAL; + : + : while ((long) len > 0) { + : struct page *page = NULL; + : int err; + : /* + : * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE + : */ + : err = shmem_getpage(inode, pgoff, &page, sgp, NULL); + : if (err) + : return err; + : /* Page may still be null, but only if nonblock was set. */ + : if (page) { + : mark_page_accessed(page); + : err = install_page(mm, vma, addr, page, prot); + : if (err) { + : page_cache_release(page); + : return err; + : } + : } else if (vma->vm_flags & VM_NONLINEAR) { + : /* No page was found just because we can't read it in + : * now (being here implies nonblock != 0), but the page + : * may exist, so set the PTE to fault it in later. */ + : err = install_file_pte(mm, vma, addr, pgoff, prot); + : if (err) + : return err; + : } + : + : len -= PAGE_SIZE; + : addr += PAGE_SIZE; + : pgoff++; + : } + : return 0; + :} + : + :#ifdef CONFIG_NUMA + :int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) + :{ + : struct inode *i = vma->vm_file->f_dentry->d_inode; + : return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); + :} + : + :struct mempolicy * + :shmem_get_policy(struct vm_area_struct *vma, unsigned long addr) + :{ + : struct inode *i = vma->vm_file->f_dentry->d_inode; + : unsigned long idx; + : + : idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; + : return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); + :} + :#endif + : + :int shmem_lock(struct file *file, int lock, struct user_struct *user) + :{ + : struct inode *inode = file->f_dentry->d_inode; + : struct shmem_inode_info *info = SHMEM_I(inode); + : int retval = -ENOMEM; + : + : spin_lock(&info->lock); + : if (lock && !(info->flags & VM_LOCKED)) { + : if (!user_shm_lock(inode->i_size, user)) + : goto out_nomem; + : info->flags |= VM_LOCKED; + : } + : if (!lock && (info->flags & VM_LOCKED) && user) { + : user_shm_unlock(inode->i_size, user); + : info->flags &= ~VM_LOCKED; + : } + : retval = 0; + :out_nomem: + : spin_unlock(&info->lock); + : return retval; + :} + : + :int shmem_mmap(struct file *file, struct vm_area_struct *vma) + :{ + : file_accessed(file); + : vma->vm_ops = &shmem_vm_ops; + : return 0; + :} + : + :static struct inode * + :shmem_get_inode(struct super_block *sb, int mode, dev_t dev) + :{ + : struct inode *inode; + : struct shmem_inode_info *info; + : struct shmem_sb_info *sbinfo = SHMEM_SB(sb); + : + : if (sbinfo->max_inodes) { + : spin_lock(&sbinfo->stat_lock); + : if (!sbinfo->free_inodes) { + : spin_unlock(&sbinfo->stat_lock); + : return NULL; + : } + : sbinfo->free_inodes--; + : spin_unlock(&sbinfo->stat_lock); + : } + : + : inode = new_inode(sb); + : if (inode) { + : inode->i_mode = mode; + : inode->i_uid = current->fsuid; + : inode->i_gid = current->fsgid; + : inode->i_blksize = PAGE_CACHE_SIZE; + : inode->i_blocks = 0; + : inode->i_mapping->a_ops = &shmem_aops; + : inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; + : inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + : info = SHMEM_I(inode); + : memset(info, 0, (char *)inode - (char *)info); + : spin_lock_init(&info->lock); + : INIT_LIST_HEAD(&info->swaplist); + : + : switch (mode & S_IFMT) { + : default: + : init_special_inode(inode, mode, dev); + : break; + : case S_IFREG: + : inode->i_op = &shmem_inode_operations; + : inode->i_fop = &shmem_file_operations; + : mpol_shared_policy_init(&info->policy, sbinfo->policy, + : &sbinfo->policy_nodes); + : break; + : case S_IFDIR: + : inode->i_nlink++; + : /* Some things misbehave if size == 0 on a directory */ + : inode->i_size = 2 * BOGO_DIRENT_SIZE; + : inode->i_op = &shmem_dir_inode_operations; + : inode->i_fop = &simple_dir_operations; + : break; + : case S_IFLNK: + : /* + : * Must not load anything in the rbtree, + : * mpol_free_shared_policy will not be called. + : */ + : mpol_shared_policy_init(&info->policy, MPOL_DEFAULT, + : NULL); + : break; + : } + : } else if (sbinfo->max_inodes) { + : spin_lock(&sbinfo->stat_lock); + : sbinfo->free_inodes++; + : spin_unlock(&sbinfo->stat_lock); + : } + : return inode; + :} + : + :#ifdef CONFIG_TMPFS + :static struct inode_operations shmem_symlink_inode_operations; + :static struct inode_operations shmem_symlink_inline_operations; + : + :/* + : * Normally tmpfs makes no use of shmem_prepare_write, but it + : * lets a tmpfs file be used read-write below the loop driver. + : */ + :static int + :shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) + :{ + : struct inode *inode = page->mapping->host; + : return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL); + :} + : + :static ssize_t + :shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) + :{ + : struct inode *inode = file->f_dentry->d_inode; + : loff_t pos; + : unsigned long written; + : ssize_t err; + : + : if ((ssize_t) count < 0) + : return -EINVAL; + : + : if (!access_ok(VERIFY_READ, buf, count)) + : return -EFAULT; + : + : mutex_lock(&inode->i_mutex); + : + : pos = *ppos; + : written = 0; + : + : err = generic_write_checks(file, &pos, &count, 0); + : if (err || !count) + : goto out; + : + : err = remove_suid(file->f_dentry); + : if (err) + : goto out; + : + : inode->i_ctime = inode->i_mtime = CURRENT_TIME; + : + : do { + : struct page *page = NULL; + : unsigned long bytes, index, offset; + : char *kaddr; + : int left; + : + : offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ + : index = pos >> PAGE_CACHE_SHIFT; + : bytes = PAGE_CACHE_SIZE - offset; + : if (bytes > count) + : bytes = count; + : + : /* + : * We don't hold page lock across copy from user - + : * what would it guard against? - so no deadlock here. + : * But it still may be a good idea to prefault below. + : */ + : + : err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL); + : if (err) + : break; + : + : left = bytes; + : if (PageHighMem(page)) { + : volatile unsigned char dummy; + : __get_user(dummy, buf); + : __get_user(dummy, buf + bytes - 1); + : + : kaddr = kmap_atomic(page, KM_USER0); + : left = __copy_from_user_inatomic(kaddr + offset, + : buf, bytes); + : kunmap_atomic(kaddr, KM_USER0); + : } + : if (left) { + : kaddr = kmap(page); + : left = __copy_from_user(kaddr + offset, buf, bytes); + : kunmap(page); + : } + : + : written += bytes; + : count -= bytes; + : pos += bytes; + : buf += bytes; + : if (pos > inode->i_size) + : i_size_write(inode, pos); + : + : flush_dcache_page(page); + : set_page_dirty(page); + : mark_page_accessed(page); + : page_cache_release(page); + : + : if (left) { + : pos -= left; + : written -= left; + : err = -EFAULT; + : break; + : } + : + : /* + : * Our dirty pages are not counted in nr_dirty, + : * and we do not attempt to balance dirty pages. + : */ + : + : cond_resched(); + : } while (count); + : + : *ppos = pos; + : if (written) + : err = written; + :out: + : mutex_unlock(&inode->i_mutex); + : return err; + :} + : + :static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) + :{ + : struct inode *inode = filp->f_dentry->d_inode; + : struct address_space *mapping = inode->i_mapping; + : unsigned long index, offset; + : + : index = *ppos >> PAGE_CACHE_SHIFT; + : offset = *ppos & ~PAGE_CACHE_MASK; + : + : for (;;) { + : struct page *page = NULL; + : unsigned long end_index, nr, ret; + : loff_t i_size = i_size_read(inode); + : + : end_index = i_size >> PAGE_CACHE_SHIFT; + : if (index > end_index) + : break; + : if (index == end_index) { + : nr = i_size & ~PAGE_CACHE_MASK; + : if (nr <= offset) + : break; + : } + : + : desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL); + : if (desc->error) { + : if (desc->error == -EINVAL) + : desc->error = 0; + : break; + : } + : + : /* + : * We must evaluate after, since reads (unlike writes) + : * are called without i_mutex protection against truncate + : */ + : nr = PAGE_CACHE_SIZE; + : i_size = i_size_read(inode); + : end_index = i_size >> PAGE_CACHE_SHIFT; + : if (index == end_index) { + : nr = i_size & ~PAGE_CACHE_MASK; + : if (nr <= offset) { + : if (page) + : page_cache_release(page); + : break; + : } + : } + : nr -= offset; + : + : if (page) { + : /* + : * If users can be writing to this page using arbitrary + : * virtual addresses, take care about potential aliasing + : * before reading the page on the kernel side. + : */ + : if (mapping_writably_mapped(mapping)) + : flush_dcache_page(page); + : /* + : * Mark the page accessed if we read the beginning. + : */ + : if (!offset) + : mark_page_accessed(page); + : } else { + : page = ZERO_PAGE(0); + : page_cache_get(page); + : } + : + : /* + : * Ok, we have the page, and it's up-to-date, so + : * now we can copy it to user space... + : * + : * The actor routine returns how many bytes were actually used.. + : * NOTE! This may not be the same as how much of a user buffer + : * we filled up (we may be padding etc), so we can only update + : * "pos" here (the actor routine has to update the user buffer + : * pointers and the remaining count). + : */ + : ret = actor(desc, page, offset, nr); + : offset += ret; + : index += offset >> PAGE_CACHE_SHIFT; + : offset &= ~PAGE_CACHE_MASK; + : + : page_cache_release(page); + : if (ret != nr || !desc->count) + : break; + : + : cond_resched(); + : } + : + : *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; + : file_accessed(filp); + :} + : + :static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) + :{ + : read_descriptor_t desc; + : + : if ((ssize_t) count < 0) + : return -EINVAL; + : if (!access_ok(VERIFY_WRITE, buf, count)) + : return -EFAULT; + : if (!count) + : return 0; + : + : desc.written = 0; + : desc.count = count; + : desc.arg.buf = buf; + : desc.error = 0; + : + : do_shmem_file_read(filp, ppos, &desc, file_read_actor); + : if (desc.written) + : return desc.written; + : return desc.error; + :} + : + :static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos, + : size_t count, read_actor_t actor, void *target) + :{ + : read_descriptor_t desc; + : + : if (!count) + : return 0; + : + : desc.written = 0; + : desc.count = count; + : desc.arg.data = target; + : desc.error = 0; + : + : do_shmem_file_read(in_file, ppos, &desc, actor); + : if (desc.written) + : return desc.written; + : return desc.error; + :} + : + :static int shmem_statfs(struct super_block *sb, struct kstatfs *buf) + :{ + : struct shmem_sb_info *sbinfo = SHMEM_SB(sb); + : + : buf->f_type = TMPFS_SUPER_MAGIC; + : buf->f_bsize = PAGE_CACHE_SIZE; + : buf->f_namelen = NAME_MAX; + : spin_lock(&sbinfo->stat_lock); + : if (sbinfo->max_blocks) { + : buf->f_blocks = sbinfo->max_blocks; + : buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; + : } + : if (sbinfo->max_inodes) { + : buf->f_files = sbinfo->max_inodes; + : buf->f_ffree = sbinfo->free_inodes; + : } + : /* else leave those fields 0 like simple_statfs */ + : spin_unlock(&sbinfo->stat_lock); + : return 0; + :} + : + :/* + : * File creation. Allocate an inode, and we're done.. + : */ + :static int + :shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) + :{ + : struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev); + : int error = -ENOSPC; + : + : if (inode) { + : error = security_inode_init_security(inode, dir, NULL, NULL, + : NULL); + : if (error) { + : if (error != -EOPNOTSUPP) { + : iput(inode); + : return error; + : } + : error = 0; + : } + : if (dir->i_mode & S_ISGID) { + : inode->i_gid = dir->i_gid; + : if (S_ISDIR(mode)) + : inode->i_mode |= S_ISGID; + : } + : dir->i_size += BOGO_DIRENT_SIZE; + : dir->i_ctime = dir->i_mtime = CURRENT_TIME; + : d_instantiate(dentry, inode); + : dget(dentry); /* Extra count - pin the dentry in core */ + : } + : return error; + :} + : + :static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) + :{ + : int error; + : + : if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) + : return error; + : dir->i_nlink++; + : return 0; + :} + : + :static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, + : struct nameidata *nd) + :{ + : return shmem_mknod(dir, dentry, mode | S_IFREG, 0); + :} + : + :/* + : * Link a file.. + : */ + :static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) + :{ + : struct inode *inode = old_dentry->d_inode; + : struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + : + : /* + : * No ordinary (disk based) filesystem counts links as inodes; + : * but each new link needs a new dentry, pinning lowmem, and + : * tmpfs dentries cannot be pruned until they are unlinked. + : */ + : if (sbinfo->max_inodes) { + : spin_lock(&sbinfo->stat_lock); + : if (!sbinfo->free_inodes) { + : spin_unlock(&sbinfo->stat_lock); + : return -ENOSPC; + : } + : sbinfo->free_inodes--; + : spin_unlock(&sbinfo->stat_lock); + : } + : + : dir->i_size += BOGO_DIRENT_SIZE; + : inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; + : inode->i_nlink++; + : atomic_inc(&inode->i_count); /* New dentry reference */ + : dget(dentry); /* Extra pinning count for the created dentry */ + : d_instantiate(dentry, inode); + : return 0; + :} + : + :static int shmem_unlink(struct inode *dir, struct dentry *dentry) + :{ + : struct inode *inode = dentry->d_inode; + : + : if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) { + : struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + : if (sbinfo->max_inodes) { + : spin_lock(&sbinfo->stat_lock); + : sbinfo->free_inodes++; + : spin_unlock(&sbinfo->stat_lock); + : } + : } + : + : dir->i_size -= BOGO_DIRENT_SIZE; + : inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; + : inode->i_nlink--; + : dput(dentry); /* Undo the count from "create" - this does all the work */ + : return 0; + :} + : + :static int shmem_rmdir(struct inode *dir, struct dentry *dentry) + :{ + : if (!simple_empty(dentry)) + : return -ENOTEMPTY; + : + : dir->i_nlink--; + : return shmem_unlink(dir, dentry); + :} + : + :/* + : * The VFS layer already does all the dentry stuff for rename, + : * we just have to decrement the usage count for the target if + : * it exists so that the VFS layer correctly free's it when it + : * gets overwritten. + : */ + :static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) + :{ + : struct inode *inode = old_dentry->d_inode; + : int they_are_dirs = S_ISDIR(inode->i_mode); + : + : if (!simple_empty(new_dentry)) + : return -ENOTEMPTY; + : + : if (new_dentry->d_inode) { + : (void) shmem_unlink(new_dir, new_dentry); + : if (they_are_dirs) + : old_dir->i_nlink--; + : } else if (they_are_dirs) { + : old_dir->i_nlink--; + : new_dir->i_nlink++; + : } + : + : old_dir->i_size -= BOGO_DIRENT_SIZE; + : new_dir->i_size += BOGO_DIRENT_SIZE; + : old_dir->i_ctime = old_dir->i_mtime = + : new_dir->i_ctime = new_dir->i_mtime = + : inode->i_ctime = CURRENT_TIME; + : return 0; + :} + : + :static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) + :{ + : int error; + : int len; + : struct inode *inode; + : struct page *page = NULL; + : char *kaddr; + : struct shmem_inode_info *info; + : + : len = strlen(symname) + 1; + : if (len > PAGE_CACHE_SIZE) + : return -ENAMETOOLONG; + : + : inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); + : if (!inode) + : return -ENOSPC; + : + : error = security_inode_init_security(inode, dir, NULL, NULL, + : NULL); + : if (error) { + : if (error != -EOPNOTSUPP) { + : iput(inode); + : return error; + : } + : error = 0; + : } + : + : info = SHMEM_I(inode); + : inode->i_size = len-1; + : if (len <= (char *)inode - (char *)info) { + : /* do it inline */ + : memcpy(info, symname, len); + : inode->i_op = &shmem_symlink_inline_operations; + : } else { + : error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); + : if (error) { + : iput(inode); + : return error; + : } + : inode->i_op = &shmem_symlink_inode_operations; + : kaddr = kmap_atomic(page, KM_USER0); + : memcpy(kaddr, symname, len); + : kunmap_atomic(kaddr, KM_USER0); + : set_page_dirty(page); + : page_cache_release(page); + : } + : if (dir->i_mode & S_ISGID) + : inode->i_gid = dir->i_gid; + : dir->i_size += BOGO_DIRENT_SIZE; + : dir->i_ctime = dir->i_mtime = CURRENT_TIME; + : d_instantiate(dentry, inode); + : dget(dentry); + : return 0; + :} + : + :static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) + :{ + : nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); + : return NULL; + :} + : + :static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) + :{ + : struct page *page = NULL; + : int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); + : nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); + : return page; + :} + : + :static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) + :{ + : if (!IS_ERR(nd_get_link(nd))) { + : struct page *page = cookie; + : kunmap(page); + : mark_page_accessed(page); + : page_cache_release(page); + : } + :} + : + :static struct inode_operations shmem_symlink_inline_operations = { + : .readlink = generic_readlink, + : .follow_link = shmem_follow_link_inline, + :}; + : + :static struct inode_operations shmem_symlink_inode_operations = { + : .truncate = shmem_truncate, + : .readlink = generic_readlink, + : .follow_link = shmem_follow_link, + : .put_link = shmem_put_link, + :}; + : + :static int shmem_parse_options(char *options, int *mode, uid_t *uid, + : gid_t *gid, unsigned long *blocks, unsigned long *inodes, + : int *policy, nodemask_t *policy_nodes) + :{ + : char *this_char, *value, *rest; + : + : while (options != NULL) { + : this_char = options; + : for (;;) { + : /* + : * NUL-terminate this option: unfortunately, + : * mount options form a comma-separated list, + : * but mpol's nodelist may also contain commas. + : */ + : options = strchr(options, ','); + : if (options == NULL) + : break; + : options++; + : if (!isdigit(*options)) { + : options[-1] = '\0'; + : break; + : } + : } + : if (!*this_char) + : continue; + : if ((value = strchr(this_char,'=')) != NULL) { + : *value++ = 0; + : } else { + : printk(KERN_ERR + : "tmpfs: No value for mount option '%s'\n", + : this_char); + : return 1; + : } + : + : if (!strcmp(this_char,"size")) { + : unsigned long long size; + : size = memparse(value,&rest); + : if (*rest == '%') { + : size <<= PAGE_SHIFT; + : size *= totalram_pages; + : do_div(size, 100); + : rest++; + : } + : if (*rest) + : goto bad_val; + : *blocks = size >> PAGE_CACHE_SHIFT; + : } else if (!strcmp(this_char,"nr_blocks")) { + : *blocks = memparse(value,&rest); + : if (*rest) + : goto bad_val; + : } else if (!strcmp(this_char,"nr_inodes")) { + : *inodes = memparse(value,&rest); + : if (*rest) + : goto bad_val; + : } else if (!strcmp(this_char,"mode")) { + : if (!mode) + : continue; + : *mode = simple_strtoul(value,&rest,8); + : if (*rest) + : goto bad_val; + : } else if (!strcmp(this_char,"uid")) { + : if (!uid) + : continue; + : *uid = simple_strtoul(value,&rest,0); + : if (*rest) + : goto bad_val; + : } else if (!strcmp(this_char,"gid")) { + : if (!gid) + : continue; + : *gid = simple_strtoul(value,&rest,0); + : if (*rest) + : goto bad_val; + : } else if (!strcmp(this_char,"mpol")) { + : if (shmem_parse_mpol(value,policy,policy_nodes)) + : goto bad_val; + : } else { + : printk(KERN_ERR "tmpfs: Bad mount option %s\n", + : this_char); + : return 1; + : } + : } + : return 0; + : + :bad_val: + : printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", + : value, this_char); + : return 1; + : + :} + : + :static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) + :{ + : struct shmem_sb_info *sbinfo = SHMEM_SB(sb); + : unsigned long max_blocks = sbinfo->max_blocks; + : unsigned long max_inodes = sbinfo->max_inodes; + : int policy = sbinfo->policy; + : nodemask_t policy_nodes = sbinfo->policy_nodes; + : unsigned long blocks; + : unsigned long inodes; + : int error = -EINVAL; + : + : if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks, + : &max_inodes, &policy, &policy_nodes)) + : return error; + : + : spin_lock(&sbinfo->stat_lock); + : blocks = sbinfo->max_blocks - sbinfo->free_blocks; + : inodes = sbinfo->max_inodes - sbinfo->free_inodes; + : if (max_blocks < blocks) + : goto out; + : if (max_inodes < inodes) + : goto out; + : /* + : * Those tests also disallow limited->unlimited while any are in + : * use, so i_blocks will always be zero when max_blocks is zero; + : * but we must separately disallow unlimited->limited, because + : * in that case we have no record of how much is already in use. + : */ + : if (max_blocks && !sbinfo->max_blocks) + : goto out; + : if (max_inodes && !sbinfo->max_inodes) + : goto out; + : + : error = 0; + : sbinfo->max_blocks = max_blocks; + : sbinfo->free_blocks = max_blocks - blocks; + : sbinfo->max_inodes = max_inodes; + : sbinfo->free_inodes = max_inodes - inodes; + : sbinfo->policy = policy; + : sbinfo->policy_nodes = policy_nodes; + :out: + : spin_unlock(&sbinfo->stat_lock); + : return error; + :} + :#endif + : + :static void shmem_put_super(struct super_block *sb) + :{ + : kfree(sb->s_fs_info); + : sb->s_fs_info = NULL; + :} + : + :static int shmem_fill_super(struct super_block *sb, + : void *data, int silent) + :{ + : struct inode *inode; + : struct dentry *root; + : int mode = S_IRWXUGO | S_ISVTX; + : uid_t uid = current->fsuid; + : gid_t gid = current->fsgid; + : int err = -ENOMEM; + : struct shmem_sb_info *sbinfo; + : unsigned long blocks = 0; + : unsigned long inodes = 0; + : int policy = MPOL_DEFAULT; + : nodemask_t policy_nodes = node_online_map; + : + :#ifdef CONFIG_TMPFS + : /* + : * Per default we only allow half of the physical ram per + : * tmpfs instance, limiting inodes to one per page of lowmem; + : * but the internal instance is left unlimited. + : */ + : if (!(sb->s_flags & MS_NOUSER)) { + : blocks = totalram_pages / 2; + : inodes = totalram_pages - totalhigh_pages; + : if (inodes > blocks) + : inodes = blocks; + : if (shmem_parse_options(data, &mode, &uid, &gid, &blocks, + : &inodes, &policy, &policy_nodes)) + : return -EINVAL; + : } + :#else + : sb->s_flags |= MS_NOUSER; + :#endif + : + : /* Round up to L1_CACHE_BYTES to resist false sharing */ + : sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info), + : L1_CACHE_BYTES), GFP_KERNEL); + : if (!sbinfo) + : return -ENOMEM; + : + : spin_lock_init(&sbinfo->stat_lock); + : sbinfo->max_blocks = blocks; + : sbinfo->free_blocks = blocks; + : sbinfo->max_inodes = inodes; + : sbinfo->free_inodes = inodes; + : sbinfo->policy = policy; + : sbinfo->policy_nodes = policy_nodes; + : + : sb->s_fs_info = sbinfo; + : sb->s_maxbytes = SHMEM_MAX_BYTES; + : sb->s_blocksize = PAGE_CACHE_SIZE; + : sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + : sb->s_magic = TMPFS_SUPER_MAGIC; + : sb->s_op = &shmem_ops; + : + : inode = shmem_get_inode(sb, S_IFDIR | mode, 0); + : if (!inode) + : goto failed; + : inode->i_uid = uid; + : inode->i_gid = gid; + : root = d_alloc_root(inode); + : if (!root) + : goto failed_iput; + : sb->s_root = root; + : return 0; + : + :failed_iput: + : iput(inode); + :failed: + : shmem_put_super(sb); + : return err; + :} + : + :static kmem_cache_t *shmem_inode_cachep; + : + :static struct inode *shmem_alloc_inode(struct super_block *sb) + :{ + : struct shmem_inode_info *p; + : p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL); + : if (!p) + : return NULL; + : return &p->vfs_inode; + :} + : + :static void shmem_destroy_inode(struct inode *inode) + :{ + : if ((inode->i_mode & S_IFMT) == S_IFREG) { + : /* only struct inode is valid if it's an inline symlink */ + : mpol_free_shared_policy(&SHMEM_I(inode)->policy); + : } + : kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); + :} + : + :static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) + :{ + : struct shmem_inode_info *p = (struct shmem_inode_info *) foo; + : + : if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == + : SLAB_CTOR_CONSTRUCTOR) { + : inode_init_once(&p->vfs_inode); + : } + :} + : + :static int init_inodecache(void) + :{ + : shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", + : sizeof(struct shmem_inode_info), + : 0, 0, init_once, NULL); + : if (shmem_inode_cachep == NULL) + : return -ENOMEM; + : return 0; + :} + : + :static void destroy_inodecache(void) + :{ + : if (kmem_cache_destroy(shmem_inode_cachep)) + : printk(KERN_INFO "shmem_inode_cache: not all structures were freed\n"); + :} + : + :static struct address_space_operations shmem_aops = { + : .writepage = shmem_writepage, + : .set_page_dirty = __set_page_dirty_nobuffers, + :#ifdef CONFIG_TMPFS + : .prepare_write = shmem_prepare_write, + : .commit_write = simple_commit_write, + :#endif + :}; + : + :static struct file_operations shmem_file_operations = { + : .mmap = shmem_mmap, + :#ifdef CONFIG_TMPFS + : .llseek = generic_file_llseek, + : .read = shmem_file_read, + : .write = shmem_file_write, + : .fsync = simple_sync_file, + : .sendfile = shmem_file_sendfile, + :#endif + :}; + : + :static struct inode_operations shmem_inode_operations = { + : .truncate = shmem_truncate, + : .setattr = shmem_notify_change, + : .truncate_range = shmem_truncate_range, + :}; + : + :static struct inode_operations shmem_dir_inode_operations = { + :#ifdef CONFIG_TMPFS + : .create = shmem_create, + : .lookup = simple_lookup, + : .link = shmem_link, + : .unlink = shmem_unlink, + : .symlink = shmem_symlink, + : .mkdir = shmem_mkdir, + : .rmdir = shmem_rmdir, + : .mknod = shmem_mknod, + : .rename = shmem_rename, + :#endif + :}; + : + :static struct super_operations shmem_ops = { + : .alloc_inode = shmem_alloc_inode, + : .destroy_inode = shmem_destroy_inode, + :#ifdef CONFIG_TMPFS + : .statfs = shmem_statfs, + : .remount_fs = shmem_remount_fs, + :#endif + : .delete_inode = shmem_delete_inode, + : .drop_inode = generic_delete_inode, + : .put_super = shmem_put_super, + :}; + : + :static struct vm_operations_struct shmem_vm_ops = { + : .nopage = shmem_nopage, + : .populate = shmem_populate, + :#ifdef CONFIG_NUMA + : .set_policy = shmem_set_policy, + : .get_policy = shmem_get_policy, + :#endif + :}; + : + : + :static struct super_block *shmem_get_sb(struct file_system_type *fs_type, + : int flags, const char *dev_name, void *data) + :{ + : return get_sb_nodev(fs_type, flags, data, shmem_fill_super); + :} + : + :static struct file_system_type tmpfs_fs_type = { + : .owner = THIS_MODULE, + : .name = "tmpfs", + : .get_sb = shmem_get_sb, + : .kill_sb = kill_litter_super, + :}; + :static struct vfsmount *shm_mnt; + : + :static int __init init_tmpfs(void) + :{ + : int error; + : + : error = init_inodecache(); + : if (error) + : goto out3; + : + : error = register_filesystem(&tmpfs_fs_type); + : if (error) { + : printk(KERN_ERR "Could not register tmpfs\n"); + : goto out2; + : } + :#ifdef CONFIG_TMPFS + : devfs_mk_dir("shm"); + :#endif + : shm_mnt = do_kern_mount(tmpfs_fs_type.name, MS_NOUSER, + : tmpfs_fs_type.name, NULL); + : if (IS_ERR(shm_mnt)) { + : error = PTR_ERR(shm_mnt); + : printk(KERN_ERR "Could not kern_mount tmpfs\n"); + : goto out1; + : } + : return 0; + : + :out1: + : unregister_filesystem(&tmpfs_fs_type); + :out2: + : destroy_inodecache(); + :out3: + : shm_mnt = ERR_PTR(error); + : return error; + :} + :module_init(init_tmpfs) + : + :/* + : * shmem_file_setup - get an unlinked file living in tmpfs + : * + : * @name: name for dentry (to be seen in /proc//maps + : * @size: size to be set for the file + : * + : */ + :struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) + :{ + : int error; + : struct file *file; + : struct inode *inode; + : struct dentry *dentry, *root; + : struct qstr this; + : + : if (IS_ERR(shm_mnt)) + : return (void *)shm_mnt; + : + : if (size < 0 || size > SHMEM_MAX_BYTES) + : return ERR_PTR(-EINVAL); + : + : if (shmem_acct_size(flags, size)) + : return ERR_PTR(-ENOMEM); + : + : error = -ENOMEM; + : this.name = name; + : this.len = strlen(name); + : this.hash = 0; /* will go */ + : root = shm_mnt->mnt_root; + : dentry = d_alloc(root, &this); + : if (!dentry) + : goto put_memory; + : + : error = -ENFILE; + : file = get_empty_filp(); + : if (!file) + : goto put_dentry; + : + : error = -ENOSPC; + : inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); + : if (!inode) + : goto close_file; + : + : SHMEM_I(inode)->flags = flags & VM_ACCOUNT; + : d_instantiate(dentry, inode); + : inode->i_size = size; + : inode->i_nlink = 0; /* It is unlinked */ + : file->f_vfsmnt = mntget(shm_mnt); + : file->f_dentry = dentry; + : file->f_mapping = inode->i_mapping; + : file->f_op = &shmem_file_operations; + : file->f_mode = FMODE_WRITE | FMODE_READ; + : return file; + : + :close_file: + : put_filp(file); + :put_dentry: + : dput(dentry); + :put_memory: + : shmem_unacct_size(flags, size); + : return ERR_PTR(error); + :} + : + :/* + : * shmem_zero_setup - setup a shared anonymous mapping + : * + : * @vma: the vma to be mmapped is prepared by do_mmap_pgoff + : */ + :int shmem_zero_setup(struct vm_area_struct *vma) + :{ + : struct file *file; + : loff_t size = vma->vm_end - vma->vm_start; + : + : file = shmem_file_setup("dev/zero", size, vma->vm_flags); + : if (IS_ERR(file)) + : return PTR_ERR(file); + : + : if (vma->vm_file) + : fput(vma->vm_file); + : vma->vm_file = file; + : vma->vm_ops = &shmem_vm_ops; + : return 0; + :} +/* + * Total samples for file : "mm/shmem.c" + * + * 18 2.0e-04 0 0 0 0 6 2.7e-04 + */ + + +/* + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 + * + * Interpretation of command line: + * Output annotated source file with samples + * Output all files + * + * CPU: AMD64 processors, speed 2600 MHz (estimated) + * Counted CPU_CLK_UNHALTED events (Cycles outside of halt state) with a unit mask of 0x00 (No unit mask) count 10000 + * Counted HARDWARE_INTERRUPTS events (Number of taken hardware interrupts) with a unit mask of 0x00 (No unit mask) count 10000 + * Counted MISALIGNED_DATA_REFS events (Misaligned data references) with a unit mask of 0x00 (No unit mask) count 10000 + * Counted RETIRED_INSNS events (Retired instructions (includes exceptions, interrupts, re-syncs)) with a unit mask of 0x00 (No unit mask) count 10000 + */ diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/slab.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/slab.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/slab.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/slab.c 2006-03-12 07:20:05.000000000 -0500 @@ -684,7 +684,7 @@ : :static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) :{ - 4 5.0e-05 0 0 0 0 3 1.4e-04 : return cachep->array[smp_processor_id()]; + 5 5.6e-05 0 0 0 0 6 2.7e-04 : return cachep->array[smp_processor_id()]; :} : :static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags) @@ -875,32 +875,32 @@ : :static void __drain_alien_cache(struct kmem_cache *cachep, : struct array_cache *ac, int node) - 200 0.0025 0 0 0 0 23 0.0011 :{ /* __drain_alien_cache total: 422 0.0053 0 0 0 0 97 0.0046 */ - 51 6.4e-04 0 0 0 0 13 6.2e-04 : struct kmem_list3 *rl3 = cachep->nodelists[node]; + 154 0.0017 0 0 0 0 12 5.4e-04 :{ /* __drain_alien_cache total: 403 0.0045 0 0 0 0 86 0.0039 */ + 61 6.9e-04 0 0 0 0 7 3.2e-04 : struct kmem_list3 *rl3 = cachep->nodelists[node]; : - 7 8.7e-05 0 0 0 0 1 4.8e-05 : if (ac->avail) { - 38 4.7e-04 0 0 0 0 2 9.6e-05 : spin_lock(&rl3->list_lock); - 36 4.5e-04 0 0 0 0 20 9.6e-04 : free_block(cachep, ac->entry, ac->avail, node); - 6 7.5e-05 0 0 0 0 8 3.8e-04 : ac->avail = 0; + 4 4.5e-05 0 0 0 0 0 0 : if (ac->avail) { + 41 4.6e-04 0 0 0 0 3 1.4e-04 : spin_lock(&rl3->list_lock); + 54 6.1e-04 0 0 0 0 26 0.0012 : free_block(cachep, ac->entry, ac->avail, node); + 9 1.0e-04 0 0 0 0 5 2.3e-04 : ac->avail = 0; : spin_unlock(&rl3->list_lock); : } - 79 9.8e-04 0 0 0 0 27 0.0013 :} + 70 7.9e-04 0 0 0 0 26 0.0012 :} : :static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien) - 12 1.5e-04 0 0 0 0 5 2.4e-04 :{ /* drain_alien_cache total: 1628 0.0203 0 0 1 0.0351 257 0.0123 */ + 15 1.7e-04 0 0 0 0 3 1.4e-04 :{ /* drain_alien_cache total: 1399 0.0157 0 0 1 0.0339 227 0.0103 */ : int i = 0; : struct array_cache *ac; : unsigned long flags; : : for_each_online_node(i) { - 52 6.5e-04 0 0 0 0 7 3.3e-04 : ac = alien[i]; - 7 8.7e-05 0 0 0 0 3 1.4e-04 : if (ac) { - 92 0.0011 0 0 0 0 15 7.2e-04 : spin_lock_irqsave(&ac->lock, flags); - 212 0.0026 0 0 0 0 21 0.0010 : __drain_alien_cache(cachep, ac, i); - 7 8.7e-05 0 0 0 0 4 1.9e-04 : spin_unlock_irqrestore(&ac->lock, flags); + 43 4.8e-04 0 0 0 0 18 8.2e-04 : ac = alien[i]; + 1 1.1e-05 0 0 0 0 6 2.7e-04 : if (ac) { + 64 7.2e-04 0 0 0 0 11 5.0e-04 : spin_lock_irqsave(&ac->lock, flags); + 193 0.0022 0 0 1 0.0339 14 6.4e-04 : __drain_alien_cache(cachep, ac, i); + 7 7.9e-05 0 0 0 0 4 1.8e-04 : spin_unlock_irqrestore(&ac->lock, flags); : } : } - 14 1.7e-04 0 0 0 0 16 7.6e-04 :} + 12 1.3e-04 0 0 0 0 8 3.6e-04 :} :#else : :#define drain_alien_cache(cachep, alien) do { } while (0) @@ -1349,23 +1349,23 @@ : * Interface to system's page release. : */ :static void kmem_freepages(struct kmem_cache *cachep, void *addr) - 2 2.5e-05 0 0 0 0 1 4.8e-05 :{ /* kmem_freepages total: 184 0.0023 0 0 0 0 71 0.0034 */ - 26 3.2e-04 0 0 0 0 5 2.4e-04 : unsigned long i = (1 << cachep->gfporder); - 13 1.6e-04 0 0 0 0 5 2.4e-04 : struct page *page = virt_to_page(addr); + 4 4.5e-05 0 0 0 0 0 0 :{ /* kmem_freepages total: 320 0.0036 0 0 0 0 68 0.0031 */ + 49 5.5e-04 0 0 0 0 4 1.8e-04 : unsigned long i = (1 << cachep->gfporder); + 24 2.7e-04 0 0 0 0 3 1.4e-04 : struct page *page = virt_to_page(addr); : const unsigned long nr_freed = i; : - 27 3.4e-04 0 0 0 0 8 3.8e-04 : while (i--) { - 18 2.2e-04 0 0 0 0 1 4.8e-05 : if (!TestClearPageSlab(page)) + 41 4.6e-04 0 0 0 0 12 5.4e-04 : while (i--) { + 22 2.5e-04 0 0 0 0 5 2.3e-04 : if (!TestClearPageSlab(page)) : BUG(); - 19 2.4e-04 0 0 0 0 2 9.6e-05 : page++; + 29 3.3e-04 0 0 0 0 2 9.1e-05 : page++; : } - 18 2.2e-04 0 0 0 0 4 1.9e-04 : sub_page_state(nr_slab, nr_freed); - 1 1.2e-05 0 0 0 0 1 4.8e-05 : if (current->reclaim_state) - : current->reclaim_state->reclaimed_slab += nr_freed; - 11 1.4e-04 0 0 0 0 1 4.8e-05 : free_pages((unsigned long)addr, cachep->gfporder); - 15 1.9e-04 0 0 0 0 10 4.8e-04 : if (cachep->flags & SLAB_RECLAIM_ACCOUNT) - 6 7.5e-05 0 0 0 0 5 2.4e-04 : atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages); - 15 1.9e-04 0 0 0 0 15 7.2e-04 :} + 32 3.6e-04 0 0 0 0 4 1.8e-04 : sub_page_state(nr_slab, nr_freed); + 3 3.4e-05 0 0 0 0 0 0 : if (current->reclaim_state) + 2 2.2e-05 0 0 0 0 0 0 : current->reclaim_state->reclaimed_slab += nr_freed; + 20 2.2e-04 0 0 0 0 1 4.5e-05 : free_pages((unsigned long)addr, cachep->gfporder); + 29 3.3e-04 0 0 0 0 11 5.0e-04 : if (cachep->flags & SLAB_RECLAIM_ACCOUNT) + 6 6.7e-05 0 0 0 0 6 2.7e-04 : atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages); + 33 3.7e-04 0 0 0 0 11 5.0e-04 :} : :static void kmem_rcu_free(struct rcu_head *head) :{ @@ -1565,11 +1565,11 @@ :#else :static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) :{ - 5 6.2e-05 0 0 0 0 0 0 : if (cachep->dtor) { + 2 2.2e-05 0 0 0 0 0 0 : if (cachep->dtor) { : int i; - : for (i = 0; i < cachep->num; i++) { + 2 2.2e-05 0 0 0 0 1 4.5e-05 : for (i = 0; i < cachep->num; i++) { : void *objp = slabp->s_mem + cachep->buffer_size * i; - 7 8.7e-05 0 0 0 0 4 1.9e-04 : (cachep->dtor) (objp, cachep, 0); + 8 9.0e-05 0 0 0 0 4 1.8e-04 : (cachep->dtor) (objp, cachep, 0); : } : } :} @@ -1581,11 +1581,11 @@ : * The cache-lock is not held/needed. : */ :static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) - 18 2.2e-04 0 0 0 0 3 1.4e-04 :{ /* slab_destroy total: 81 0.0010 0 0 0 0 26 0.0012 */ - 1 1.2e-05 0 0 0 0 0 0 : void *addr = slabp->s_mem - slabp->colouroff; + 25 2.8e-04 0 0 0 0 6 2.7e-04 :{ /* slab_destroy total: 116 0.0013 0 0 0 0 30 0.0014 */ + : void *addr = slabp->s_mem - slabp->colouroff; : : slab_destroy_objs(cachep, slabp); - 27 3.4e-04 0 0 0 0 9 4.3e-04 : if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { + 31 3.5e-04 0 0 0 0 5 2.3e-04 : if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { : struct slab_rcu *slab_rcu; : : slab_rcu = (struct slab_rcu *)slabp; @@ -1593,11 +1593,11 @@ : slab_rcu->addr = addr; : call_rcu(&slab_rcu->head, kmem_rcu_free); : } else { - 4 5.0e-05 0 0 0 0 4 1.9e-04 : kmem_freepages(cachep, addr); - 9 1.1e-04 0 0 0 0 3 1.4e-04 : if (OFF_SLAB(cachep)) - : kmem_cache_free(cachep->slabp_cache, slabp); + 10 1.1e-04 0 0 0 0 3 1.4e-04 : kmem_freepages(cachep, addr); + 24 2.7e-04 0 0 0 0 3 1.4e-04 : if (OFF_SLAB(cachep)) + 1 1.1e-05 0 0 0 0 2 9.1e-05 : kmem_cache_free(cachep->slabp_cache, slabp); : } - 8 1.0e-04 0 0 0 0 3 1.4e-04 :} + 11 1.2e-04 0 0 0 0 5 2.3e-04 :} : :/* For setting up all the kmem_list3s for cache whose buffer_size is same : as size of kmem_list3. */ @@ -2221,16 +2221,16 @@ : : if (OFF_SLAB(cachep)) { : /* Slab management obj is off-slab. */ - 1 1.2e-05 0 0 0 0 0 0 : slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags); + 2 2.2e-05 0 0 0 0 0 0 : slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags); : if (!slabp) : return NULL; : } else { - : slabp = objp + colour_off; - 11 1.4e-04 0 0 0 0 2 9.6e-05 : colour_off += cachep->slab_size; + 0 0 0 0 0 0 1 4.5e-05 : slabp = objp + colour_off; + 2 2.2e-05 0 0 0 0 1 4.5e-05 : colour_off += cachep->slab_size; : } - 6 7.5e-05 0 0 0 0 2 9.6e-05 : slabp->inuse = 0; - : slabp->colouroff = colour_off; - 12 1.5e-04 0 0 0 0 4 1.9e-04 : slabp->s_mem = objp + colour_off; + 13 1.5e-04 0 0 0 0 0 0 : slabp->inuse = 0; + 0 0 0 0 0 0 1 4.5e-05 : slabp->colouroff = colour_off; + 10 1.1e-04 0 0 0 0 0 0 : slabp->s_mem = objp + colour_off; : : return slabp; :} @@ -2301,25 +2301,25 @@ :} : :static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid) - 386 0.0048 0 0 0 0 332 0.0159 :{ - 402 0.0050 0 0 0 0 260 0.0124 : void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size); /* slab_get_obj total: 950 0.0118 0 0 0 0 800 0.0382 */ + 363 0.0041 0 0 0 0 326 0.0148 :{ + 437 0.0049 0 0 0 0 296 0.0134 : void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size); /* slab_get_obj total: 1003 0.0113 0 0 0 0 862 0.0391 */ : kmem_bufctl_t next; : - 15 1.9e-04 0 0 0 0 12 5.7e-04 : slabp->inuse++; + 13 1.5e-04 0 0 0 0 5 2.3e-04 : slabp->inuse++; : next = slab_bufctl(slabp)[slabp->free]; :#if DEBUG : slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; : WARN_ON(slabp->nodeid != nodeid); :#endif - 59 7.4e-04 0 0 0 0 72 0.0034 : slabp->free = next; + 80 9.0e-04 0 0 0 0 83 0.0038 : slabp->free = next; : : return objp; - 88 0.0011 0 0 0 0 124 0.0059 :} + 110 0.0012 0 0 0 0 152 0.0069 :} : :static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp, : int nodeid) :{ - 269 0.0034 0 0 0 0 134 0.0064 : unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size; + 384 0.0043 0 0 0 0 217 0.0099 : unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size; : :#if DEBUG : /* Verify that the slab belongs to the intended node */ @@ -2331,8 +2331,8 @@ : BUG(); : } :#endif - 9585 0.1194 0 0 3 0.1054 2420 0.1156 : slab_bufctl(slabp)[objnr] = slabp->free; - 60 7.5e-04 0 0 0 0 2 9.6e-05 : slabp->free = objnr; + 11725 0.1319 0 0 1 0.0339 3054 0.1387 : slab_bufctl(slabp)[objnr] = slabp->free; + 66 7.4e-04 0 0 0 0 2 9.1e-05 : slabp->free = objnr; : slabp->inuse--; :} : @@ -2343,12 +2343,12 @@ : : /* Nasty!!!!!! I hope this is OK. */ : i = 1 << cachep->gfporder; - 108 0.0013 0 0 0 0 16 7.6e-04 : page = virt_to_page(objp); + 99 0.0011 0 0 0 0 11 5.0e-04 : page = virt_to_page(objp); : do { : page_set_cache(page, cachep); : page_set_slab(page, slabp); - 2 2.5e-05 0 0 0 0 1 4.8e-05 : page++; - 7 8.7e-05 0 0 0 0 3 1.4e-04 : } while (--i); + 6 6.7e-05 0 0 0 0 1 4.5e-05 : page++; + 7 7.9e-05 0 0 0 0 1 4.5e-05 : } while (--i); :} : :/* @@ -2356,7 +2356,7 @@ : * kmem_cache_alloc() when there are no active objs left in a cache. : */ :static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) - 77 9.6e-04 0 0 0 0 17 8.1e-04 :{ /* cache_grow total: 1563 0.0195 0 0 0 0 631 0.0301 */ + 64 7.2e-04 0 0 0 0 31 0.0014 :{ /* cache_grow total: 1672 0.0188 0 0 0 0 708 0.0322 */ : struct slab *slabp; : void *objp; : size_t offset; @@ -2367,14 +2367,14 @@ : /* Be lazy and only check for valid flags here, : * keeping it out of the critical path in kmem_cache_alloc(). : */ - 2 2.5e-05 0 0 0 0 0 0 : if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)) + 3 3.4e-05 0 0 0 0 1 4.5e-05 : if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)) : BUG(); - 4 5.0e-05 0 0 0 0 2 9.6e-05 : if (flags & SLAB_NO_GROW) + 8 9.0e-05 0 0 0 0 2 9.1e-05 : if (flags & SLAB_NO_GROW) : return 0; : : ctor_flags = SLAB_CTOR_CONSTRUCTOR; - 3 3.7e-05 0 0 0 0 1 4.8e-05 : local_flags = (flags & SLAB_LEVEL_MASK); - 35 4.4e-04 0 0 0 0 5 2.4e-04 : if (!(local_flags & __GFP_WAIT)) + 1 1.1e-05 0 0 0 0 1 4.5e-05 : local_flags = (flags & SLAB_LEVEL_MASK); + 40 4.5e-04 0 0 0 0 16 7.3e-04 : if (!(local_flags & __GFP_WAIT)) : /* : * Not allowed to sleep. Need to tell a constructor about : * this - it might need to know... @@ -2383,20 +2383,20 @@ : : /* Take the l3 list lock to change the colour_next on this node */ : check_irq_off(); - 2 2.5e-05 0 0 0 0 0 0 : l3 = cachep->nodelists[nodeid]; - 3 3.7e-05 0 0 0 0 1 4.8e-05 : spin_lock(&l3->list_lock); + 4 4.5e-05 0 0 0 0 1 4.5e-05 : l3 = cachep->nodelists[nodeid]; + 5 5.6e-05 0 0 0 0 0 0 : spin_lock(&l3->list_lock); : : /* Get colour for the slab, and cal the next value. */ - 57 7.1e-04 0 0 0 0 13 6.2e-04 : offset = l3->colour_next; + 51 5.7e-04 0 0 0 0 16 7.3e-04 : offset = l3->colour_next; : l3->colour_next++; - 32 4.0e-04 0 0 0 0 13 6.2e-04 : if (l3->colour_next >= cachep->colour) - 0 0 0 0 0 0 1 4.8e-05 : l3->colour_next = 0; + 33 3.7e-04 0 0 0 0 10 4.5e-04 : if (l3->colour_next >= cachep->colour) + 1 1.1e-05 0 0 0 0 0 0 : l3->colour_next = 0; : spin_unlock(&l3->list_lock); : - 33 4.1e-04 0 0 0 0 6 2.9e-04 : offset *= cachep->colour_off; + 24 2.7e-04 0 0 0 0 6 2.7e-04 : offset *= cachep->colour_off; : - 2 2.5e-05 0 0 0 0 4 1.9e-04 : if (local_flags & __GFP_WAIT) - 3 3.7e-05 0 0 0 0 2 9.6e-05 : local_irq_enable(); + 7 7.9e-05 0 0 0 0 4 1.8e-04 : if (local_flags & __GFP_WAIT) + 3 3.4e-05 0 0 0 0 1 4.5e-05 : local_irq_enable(); : : /* : * The test for missing atomic flag is performed here, rather than @@ -2409,27 +2409,27 @@ : /* Get mem for the objs. : * Attempt to allocate a physical page from 'nodeid', : */ - 30 3.7e-04 0 0 0 0 7 3.3e-04 : if (!(objp = kmem_getpages(cachep, flags, nodeid))) + 49 5.5e-04 0 0 0 0 2 9.1e-05 : if (!(objp = kmem_getpages(cachep, flags, nodeid))) : goto failed; : : /* Get slab management. */ - 0 0 0 0 0 0 2 9.6e-05 : if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags))) + 2 2.2e-05 0 0 0 0 1 4.5e-05 : if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags))) : goto opps1; : - 3 3.7e-05 0 0 0 0 0 0 : slabp->nodeid = nodeid; + 2 2.2e-05 0 0 0 0 0 0 : slabp->nodeid = nodeid; : set_slab_attr(cachep, slabp, objp); : : cache_init_objs(cachep, slabp, ctor_flags); : - 1 1.2e-05 0 0 0 0 1 4.8e-05 : if (local_flags & __GFP_WAIT) - : local_irq_disable(); + 3 3.4e-05 0 0 0 0 2 9.1e-05 : if (local_flags & __GFP_WAIT) + 1 1.1e-05 0 0 0 0 0 0 : local_irq_disable(); : check_irq_off(); - 15 1.9e-04 0 0 0 0 19 9.1e-04 : spin_lock(&l3->list_lock); + 17 1.9e-04 0 0 0 0 19 8.6e-04 : spin_lock(&l3->list_lock); : : /* Make slab active. */ - 34 4.2e-04 0 0 0 0 12 5.7e-04 : list_add_tail(&slabp->list, &(l3->slabs_free)); + 34 3.8e-04 0 0 0 0 12 5.4e-04 : list_add_tail(&slabp->list, &(l3->slabs_free)); : STATS_INC_GROWN(cachep); - 2 2.5e-05 0 0 0 0 0 0 : l3->free_objects += cachep->num; + 2 2.2e-05 0 0 0 0 0 0 : l3->free_objects += cachep->num; : spin_unlock(&l3->list_lock); : return 1; : opps1: @@ -2438,7 +2438,7 @@ : if (local_flags & __GFP_WAIT) : local_irq_disable(); : return 0; - 53 6.6e-04 0 0 0 0 2 9.6e-05 :} + 48 5.4e-04 0 0 0 0 5 2.3e-04 :} : :#if DEBUG : @@ -2573,7 +2573,7 @@ :#endif : :static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) - 177 0.0022 0 0 0 0 23 0.0011 :{ /* cache_alloc_refill total: 5093 0.0635 0 0 1 0.0351 1541 0.0736 */ + 177 0.0020 0 0 0 0 24 0.0011 :{ /* cache_alloc_refill total: 5317 0.0598 0 0 0 0 1696 0.0770 */ : int batchcount; : struct kmem_list3 *l3; : struct array_cache *ac; @@ -2581,86 +2581,86 @@ : check_irq_off(); : ac = cpu_cache_get(cachep); : retry: - 4 5.0e-05 0 0 0 0 5 2.4e-04 : batchcount = ac->batchcount; - 29 3.6e-04 0 0 0 0 9 4.3e-04 : if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { + 10 1.1e-04 0 0 0 0 1 4.5e-05 : batchcount = ac->batchcount; + 19 2.1e-04 0 0 0 0 3 1.4e-04 : if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { : /* if there was little recent activity on this : * cache, then perform only a partial refill. : * Otherwise we could generate refill bouncing. : */ : batchcount = BATCHREFILL_LIMIT; : } - 96 0.0012 0 0 0 0 14 6.7e-04 : l3 = cachep->nodelists[numa_node_id()]; + 93 0.0010 0 0 0 0 23 0.0010 : l3 = cachep->nodelists[numa_node_id()]; : - 22 2.7e-04 0 0 0 0 2 9.6e-05 : BUG_ON(ac->avail > 0 || !l3); - 62 7.7e-04 0 0 0 0 8 3.8e-04 : spin_lock(&l3->list_lock); + 26 2.9e-04 0 0 0 0 6 2.7e-04 : BUG_ON(ac->avail > 0 || !l3); + 47 5.3e-04 0 0 0 0 5 2.3e-04 : spin_lock(&l3->list_lock); : - 583 0.0073 0 0 0 0 124 0.0059 : if (l3->shared) { + 480 0.0054 0 0 0 0 118 0.0054 : if (l3->shared) { : struct array_cache *shared_array = l3->shared; - 4 5.0e-05 0 0 0 0 1 4.8e-05 : if (shared_array->avail) { + 9 1.0e-04 0 0 0 0 4 1.8e-04 : if (shared_array->avail) { : if (batchcount > shared_array->avail) - 1 1.2e-05 0 0 0 0 0 0 : batchcount = shared_array->avail; + 2 2.2e-05 0 0 0 0 0 0 : batchcount = shared_array->avail; : shared_array->avail -= batchcount; - 3 3.7e-05 0 0 0 0 0 0 : ac->avail = batchcount; - 1238 0.0154 0 0 1 0.0351 62 0.0030 : memcpy(ac->entry, + 2 2.2e-05 0 0 0 0 0 0 : ac->avail = batchcount; + 1301 0.0146 0 0 0 0 65 0.0030 : memcpy(ac->entry, : &(shared_array->entry[shared_array->avail]), : sizeof(void *) * batchcount); - : shared_array->touched = 1; + 2 2.2e-05 0 0 0 0 0 0 : shared_array->touched = 1; : goto alloc_done; : } : } - 266 0.0033 0 0 0 0 29 0.0014 : while (batchcount > 0) { + 278 0.0031 0 0 0 0 26 0.0012 : while (batchcount > 0) { : struct list_head *entry; : struct slab *slabp; : /* Get slab alloc is to come from. */ - 1 1.2e-05 0 0 0 0 3 1.4e-04 : entry = l3->slabs_partial.next; - 98 0.0012 0 0 0 0 21 0.0010 : if (entry == &l3->slabs_partial) { + 3 3.4e-05 0 0 0 0 2 9.1e-05 : entry = l3->slabs_partial.next; + 92 0.0010 0 0 0 0 13 5.9e-04 : if (entry == &l3->slabs_partial) { : l3->free_touched = 1; - 2 2.5e-05 0 0 0 0 0 0 : entry = l3->slabs_free.next; - 50 6.2e-04 0 0 0 0 9 4.3e-04 : if (entry == &l3->slabs_free) + 2 2.2e-05 0 0 0 0 0 0 : entry = l3->slabs_free.next; + 39 4.4e-04 0 0 0 0 5 2.3e-04 : if (entry == &l3->slabs_free) : goto must_grow; : } : - 120 0.0015 0 0 0 0 20 9.6e-04 : slabp = list_entry(entry, struct slab, list); + 116 0.0013 0 0 0 0 20 9.1e-04 : slabp = list_entry(entry, struct slab, list); : check_slabp(cachep, slabp); : check_spinlock_acquired(cachep); - 1258 0.0157 0 0 0 0 463 0.0221 : while (slabp->inuse < cachep->num && batchcount--) { + 1395 0.0157 0 0 0 0 479 0.0218 : while (slabp->inuse < cachep->num && batchcount--) { : STATS_INC_ALLOCED(cachep); : STATS_INC_ACTIVE(cachep); : STATS_SET_HIGH(cachep); : - 433 0.0054 0 0 0 0 482 0.0230 : ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, - 66 8.2e-04 0 0 0 0 9 4.3e-04 : numa_node_id()); + 435 0.0049 0 0 0 0 557 0.0253 : ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, + 86 9.7e-04 0 0 0 0 23 0.0010 : numa_node_id()); : } : check_slabp(cachep, slabp); : : /* move slabp to correct slabp list: */ : list_del(&slabp->list); - 29 3.6e-04 0 0 0 0 29 0.0014 : if (slabp->free == BUFCTL_END) - 5 6.2e-05 0 0 0 0 0 0 : list_add(&slabp->list, &l3->slabs_full); + 30 3.4e-04 0 0 0 0 36 0.0016 : if (slabp->free == BUFCTL_END) + 11 1.2e-04 0 0 0 0 0 0 : list_add(&slabp->list, &l3->slabs_full); : else : list_add(&slabp->list, &l3->slabs_partial); : } : : must_grow: - 107 0.0013 0 0 0 0 12 5.7e-04 : l3->free_objects -= ac->avail; + 105 0.0012 0 0 0 0 19 8.6e-04 : l3->free_objects -= ac->avail; : alloc_done: : spin_unlock(&l3->list_lock); : - 66 8.2e-04 0 0 0 0 6 2.9e-04 : if (unlikely(!ac->avail)) { + 78 8.8e-04 0 0 0 0 4 1.8e-04 : if (unlikely(!ac->avail)) { : int x; - 21 2.6e-04 0 0 0 0 5 2.4e-04 : x = cache_grow(cachep, flags, numa_node_id()); + 28 3.1e-04 0 0 0 0 15 6.8e-04 : x = cache_grow(cachep, flags, numa_node_id()); : : // cache_grow can reenable interrupts, then ac could change. : ac = cpu_cache_get(cachep); - 1 1.2e-05 0 0 0 0 0 0 : if (!x && ac->avail == 0) // no objects in sight? abort + : if (!x && ac->avail == 0) // no objects in sight? abort : return NULL; : - 10 1.2e-04 0 0 0 0 4 1.9e-04 : if (!ac->avail) // objects refilled by interrupt? + 11 1.2e-04 0 0 0 0 5 2.3e-04 : if (!ac->avail) // objects refilled by interrupt? : goto retry; : } - 34 4.2e-04 0 0 0 0 5 2.4e-04 : ac->touched = 1; - 44 5.5e-04 0 0 0 0 9 4.3e-04 : return ac->entry[--ac->avail]; - 20 2.5e-04 0 0 0 0 17 8.1e-04 :} + 47 5.3e-04 0 0 0 0 15 6.8e-04 : ac->touched = 1; + 72 8.1e-04 0 0 0 0 12 5.4e-04 : return ac->entry[--ac->avail]; + 21 2.4e-04 0 0 0 0 12 5.4e-04 :} : :static inline void :cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags) @@ -2771,24 +2771,24 @@ : * A interface to enable slab creation on nodeid : */ :static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) - 22 2.7e-04 0 0 0 0 15 7.2e-04 :{ /* __cache_alloc_node total: 1048 0.0131 0 0 0 0 334 0.0160 */ + 35 3.9e-04 0 0 0 0 20 9.1e-04 :{ /* __cache_alloc_node total: 1353 0.0152 0 0 0 0 458 0.0208 */ : struct list_head *entry; : struct slab *slabp; : struct kmem_list3 *l3; : void *obj; : int x; : - 8 1.0e-04 0 0 0 0 4 1.9e-04 : l3 = cachep->nodelists[nodeid]; - 38 4.7e-04 0 0 0 0 16 7.6e-04 : BUG_ON(!l3); + 19 2.1e-04 0 0 0 0 11 5.0e-04 : l3 = cachep->nodelists[nodeid]; + 36 4.0e-04 0 0 0 0 15 6.8e-04 : BUG_ON(!l3); : : retry: : check_irq_off(); - 8 1.0e-04 0 0 0 0 5 2.4e-04 : spin_lock(&l3->list_lock); - 30 3.7e-04 0 0 0 0 27 0.0013 : entry = l3->slabs_partial.next; - 13 1.6e-04 0 0 0 0 0 0 : if (entry == &l3->slabs_partial) { + 14 1.6e-04 0 0 0 0 9 4.1e-04 : spin_lock(&l3->list_lock); + 47 5.3e-04 0 0 0 0 47 0.0021 : entry = l3->slabs_partial.next; + 21 2.4e-04 0 0 0 0 4 1.8e-04 : if (entry == &l3->slabs_partial) { : l3->free_touched = 1; : entry = l3->slabs_free.next; - 1 1.2e-05 0 0 0 0 0 0 : if (entry == &l3->slabs_free) + 2 2.2e-05 0 0 0 0 1 4.5e-05 : if (entry == &l3->slabs_free) : goto must_grow; : } : @@ -2800,17 +2800,17 @@ : STATS_INC_ACTIVE(cachep); : STATS_SET_HIGH(cachep); : - 35 4.4e-04 0 0 0 0 9 4.3e-04 : BUG_ON(slabp->inuse == cachep->num); + 40 4.5e-04 0 0 0 0 16 7.3e-04 : BUG_ON(slabp->inuse == cachep->num); : - 27 3.4e-04 0 0 0 0 15 7.2e-04 : obj = slab_get_obj(cachep, slabp, nodeid); + 48 5.4e-04 0 0 0 0 19 8.6e-04 : obj = slab_get_obj(cachep, slabp, nodeid); : check_slabp(cachep, slabp); : vx_slab_alloc(cachep, flags); - 19 2.4e-04 0 0 0 0 3 1.4e-04 : l3->free_objects--; + 18 2.0e-04 0 0 0 0 3 1.4e-04 : l3->free_objects--; : /* move slabp to correct slabp list: */ : list_del(&slabp->list); : : if (slabp->free == BUFCTL_END) { - 10 1.2e-04 0 0 0 0 2 9.6e-05 : list_add(&slabp->list, &l3->slabs_full); + 20 2.2e-04 0 0 0 0 4 1.8e-04 : list_add(&slabp->list, &l3->slabs_full); : } else { : list_add(&slabp->list, &l3->slabs_partial); : } @@ -2820,15 +2820,15 @@ : : must_grow: : spin_unlock(&l3->list_lock); - : x = cache_grow(cachep, flags, nodeid); + 1 1.1e-05 0 0 0 0 0 0 : x = cache_grow(cachep, flags, nodeid); : - 1 1.2e-05 0 0 0 0 0 0 : if (!x) + 1 1.1e-05 0 0 0 0 0 0 : if (!x) : return NULL; : : goto retry; : done: : return obj; - 129 0.0016 0 0 0 0 61 0.0029 :} + 151 0.0017 0 0 0 0 54 0.0025 :} :#endif : :/* @@ -2836,32 +2836,32 @@ : */ :static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, : int node) - 153 0.0019 0 0 0 0 28 0.0013 :{ /* free_block total: 13822 0.1722 0 0 5 0.1756 3463 0.1654 */ + 181 0.0020 0 0 0 0 36 0.0016 :{ /* free_block total: 16795 0.1889 0 0 1 0.0339 4400 0.1998 */ : int i; : struct kmem_list3 *l3; : : // printk("·· free_block(%x) = %dx%x\n", cachep->gfpflags, nr_objects, cachep->objsize); - 264 0.0033 0 0 0 0 14 6.7e-04 : for (i = 0; i < nr_objects; i++) { - 98 0.0012 0 0 0 0 3 1.4e-04 : void *objp = objpp[i]; + 386 0.0043 0 0 0 0 33 0.0015 : for (i = 0; i < nr_objects; i++) { + 154 0.0017 0 0 0 0 9 4.1e-04 : void *objp = objpp[i]; : struct slab *slabp; : : slabp = virt_to_slab(objp); - 123 0.0015 0 0 0 0 35 0.0017 : l3 = cachep->nodelists[node]; + 146 0.0016 0 0 0 0 51 0.0023 : l3 = cachep->nodelists[node]; : list_del(&slabp->list); : check_spinlock_acquired_node(cachep, node); : check_slabp(cachep, slabp); : slab_put_obj(cachep, slabp, objp, node); : STATS_DEC_ACTIVE(cachep); - 50 6.2e-04 0 0 0 0 1 4.8e-05 : l3->free_objects++; + 76 8.5e-04 0 0 0 0 4 1.8e-04 : l3->free_objects++; : check_slabp(cachep, slabp); : : /* fixup slab chains */ - 2 2.5e-05 0 0 0 0 0 0 : if (slabp->inuse == 0) { - 35 4.4e-04 0 0 0 0 7 3.3e-04 : if (l3->free_objects > l3->free_limit) { - 2 2.5e-05 0 0 0 0 0 0 : l3->free_objects -= cachep->num; - 7 8.7e-05 0 0 0 0 2 9.6e-05 : slab_destroy(cachep, slabp); + 1 1.1e-05 0 0 0 0 0 0 : if (slabp->inuse == 0) { + 53 6.0e-04 0 0 0 0 6 2.7e-04 : if (l3->free_objects > l3->free_limit) { + 2 2.2e-05 0 0 0 0 0 0 : l3->free_objects -= cachep->num; + 5 5.6e-05 0 0 0 0 4 1.8e-04 : slab_destroy(cachep, slabp); : } else { - 3 3.7e-05 0 0 0 0 0 0 : list_add(&slabp->list, &l3->slabs_free); + 4 4.5e-05 0 0 0 0 1 4.5e-05 : list_add(&slabp->list, &l3->slabs_free); : } : } else { : /* Unconditionally move a slab to the end of the @@ -2871,35 +2871,35 @@ : list_add_tail(&slabp->list, &l3->slabs_partial); : } : } - 194 0.0024 0 0 0 0 40 0.0019 :} + 167 0.0019 0 0 0 0 40 0.0018 :} : :static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) - 65 8.1e-04 0 0 0 0 9 4.3e-04 :{ /* cache_flusharray total: 1522 0.0190 0 0 0 0 103 0.0049 */ + 104 0.0012 0 0 0 0 14 6.4e-04 :{ /* cache_flusharray total: 1655 0.0186 0 0 0 0 127 0.0058 */ : int batchcount; : struct kmem_list3 *l3; - 1 1.2e-05 0 0 0 0 1 4.8e-05 : int node = numa_node_id(); + 5 5.6e-05 0 0 0 0 0 0 : int node = numa_node_id(); : - 1 1.2e-05 0 0 0 0 1 4.8e-05 : batchcount = ac->batchcount; + 1 1.1e-05 0 0 0 0 1 4.5e-05 : batchcount = ac->batchcount; :#if DEBUG : BUG_ON(!batchcount || batchcount > ac->avail); :#endif : check_irq_off(); : l3 = cachep->nodelists[node]; - 6 7.5e-05 0 0 0 0 0 0 : spin_lock(&l3->list_lock); - 103 0.0013 0 0 0 0 15 7.2e-04 : if (l3->shared) { + 4 4.5e-05 0 0 0 0 3 1.4e-04 : spin_lock(&l3->list_lock); + 104 0.0012 0 0 0 0 20 9.1e-04 : if (l3->shared) { : struct array_cache *shared_array = l3->shared; - 1 1.2e-05 0 0 0 0 0 0 : int max = shared_array->limit - shared_array->avail; - 1 1.2e-05 0 0 0 0 0 0 : if (max) { + : int max = shared_array->limit - shared_array->avail; + 1 1.1e-05 0 0 0 0 0 0 : if (max) { : if (batchcount > max) : batchcount = max; - 1269 0.0158 0 0 0 0 71 0.0034 : memcpy(&(shared_array->entry[shared_array->avail]), + 1343 0.0151 0 0 0 0 77 0.0035 : memcpy(&(shared_array->entry[shared_array->avail]), : ac->entry, sizeof(void *) * batchcount); - 2 2.5e-05 0 0 0 0 0 0 : shared_array->avail += batchcount; + 7 7.9e-05 0 0 0 0 1 4.5e-05 : shared_array->avail += batchcount; : goto free_done; : } : } : - 42 5.2e-04 0 0 0 0 2 9.6e-05 : free_block(cachep, ac->entry, batchcount, node); + 51 5.7e-04 0 0 0 0 4 1.8e-04 : free_block(cachep, ac->entry, batchcount, node); : free_done: :#if STATS : { @@ -2920,10 +2920,10 @@ : } :#endif : spin_unlock(&l3->list_lock); - 9 1.1e-04 0 0 0 0 1 4.8e-05 : ac->avail -= batchcount; - 6 7.5e-05 0 0 0 0 2 9.6e-05 : memmove(ac->entry, &(ac->entry[batchcount]), + 9 1.0e-04 0 0 0 0 4 1.8e-04 : ac->avail -= batchcount; + 7 7.9e-05 0 0 0 0 1 4.5e-05 : memmove(ac->entry, &(ac->entry[batchcount]), : sizeof(void *) * ac->avail); - 14 1.7e-04 0 0 0 0 0 0 :} + 19 2.1e-04 0 0 0 0 1 4.5e-05 :} : :/* : * __cache_free @@ -2993,9 +2993,9 @@ : * if the cache has no available objects. : */ :void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) - 8052 0.1003 0 0 0 0 2378 0.1136 :{ /* kmem_cache_alloc total: 66047 0.8230 0 0 9 0.3161 20041 0.9574 */ + 7789 0.0876 0 0 2 0.0679 2149 0.0976 :{ /* kmem_cache_alloc total: 77564 0.8723 0 0 11 0.3734 23537 1.0689 */ : return __cache_alloc(cachep, flags, __builtin_return_address(0)); - 4418 0.0551 0 0 0 0 2119 0.1012 :} + 4854 0.0546 0 0 0 0 2558 0.1162 :} :EXPORT_SYMBOL(kmem_cache_alloc); : :/** @@ -3076,7 +3076,7 @@ :EXPORT_SYMBOL(kmem_cache_alloc_node); : :void *kmalloc_node(size_t size, gfp_t flags, int node) - :{ + 1 1.1e-05 0 0 0 0 0 0 :{ /* kmalloc_node total: 1 1.1e-05 0 0 0 0 0 0 */ : struct kmem_cache *cachep; : : cachep = kmem_find_general_cachep(size, flags); @@ -3127,9 +3127,9 @@ :#ifndef CONFIG_DEBUG_SLAB : :void *__kmalloc(size_t size, gfp_t flags) - 221 0.0028 0 0 0 0 31 0.0015 :{ /* __kmalloc total: 1246 0.0155 0 0 0 0 172 0.0082 */ + 217 0.0024 0 0 0 0 32 0.0015 :{ /* __kmalloc total: 1460 0.0164 0 0 1 0.0339 242 0.0110 */ : return __do_kmalloc(size, flags, NULL); - 21 2.6e-04 0 0 0 0 10 4.8e-04 :} + 47 5.3e-04 0 0 0 0 16 7.3e-04 :} :EXPORT_SYMBOL(__kmalloc); : :#else @@ -3200,13 +3200,13 @@ : * cache. : */ :void kmem_cache_free(struct kmem_cache *cachep, void *objp) - 4296 0.0535 0 0 0 0 1191 0.0569 :{ /* kmem_cache_free total: 91349 1.1383 0 0 14 0.4917 23768 1.1355 */ + 5064 0.0570 0 0 1 0.0339 1210 0.0550 :{ /* kmem_cache_free total: 109249 1.2287 0 0 22 0.7468 25825 1.1729 */ : unsigned long flags; : - 5464 0.0681 0 0 0 0 2050 0.0979 : local_irq_save(flags); + 5678 0.0639 0 0 0 0 2034 0.0924 : local_irq_save(flags); : __cache_free(cachep, objp); - 940 0.0117 0 0 0 0 236 0.0113 : local_irq_restore(flags); - 42148 0.5252 0 0 8 0.2810 9318 0.4451 :} + 1294 0.0146 0 0 0 0 245 0.0111 : local_irq_restore(flags); + 44986 0.5059 0 0 10 0.3394 8761 0.3979 :} :EXPORT_SYMBOL(kmem_cache_free); : :/** @@ -3219,19 +3219,19 @@ : * or you will run into trouble. : */ :void kfree(const void *objp) - 1602 0.0200 0 0 0 0 518 0.0247 :{ /* kfree total: 6313 0.0787 0 0 0 0 1980 0.0946 */ + 1765 0.0199 0 0 1 0.0339 513 0.0233 :{ /* kfree total: 7597 0.0854 0 0 1 0.0339 2005 0.0911 */ : struct kmem_cache *c; : unsigned long flags; : - 1030 0.0128 0 0 0 0 200 0.0096 : if (unlikely(!objp)) + 1201 0.0135 0 0 0 0 223 0.0101 : if (unlikely(!objp)) : return; - 42 5.2e-04 0 0 0 0 18 8.6e-04 : local_irq_save(flags); + 59 6.6e-04 0 0 0 0 25 0.0011 : local_irq_save(flags); : kfree_debugcheck(objp); : c = virt_to_cache(objp); - 7 8.7e-05 0 0 0 0 2 9.6e-05 : mutex_debug_check_no_locks_freed(objp, obj_size(c)); + 6 6.7e-05 0 0 0 0 3 1.4e-04 : mutex_debug_check_no_locks_freed(objp, obj_size(c)); : __cache_free(c, (void *)objp); - 111 0.0014 0 0 0 0 14 6.7e-04 : local_irq_restore(flags); - 2339 0.0291 0 0 0 0 942 0.0450 :} + 133 0.0015 0 0 0 0 10 4.5e-04 : local_irq_restore(flags); + 2829 0.0318 0 0 0 0 894 0.0406 :} :EXPORT_SYMBOL(kfree); : :#ifdef CONFIG_SMP @@ -3444,23 +3444,23 @@ : :static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, : int force, int node) - 20 2.5e-04 0 0 0 0 12 5.7e-04 :{ /* drain_array_locked total: 379 0.0047 0 0 0 0 59 0.0028 */ + 20 2.2e-04 0 0 0 0 7 3.2e-04 :{ /* drain_array_locked total: 316 0.0036 0 0 0 0 51 0.0023 */ : int tofree; : : check_spinlock_acquired_node(cachep, node); - 71 8.8e-04 0 0 0 0 3 1.4e-04 : if (ac->touched && !force) { - 118 0.0015 0 0 0 0 11 5.3e-04 : ac->touched = 0; - 84 0.0010 0 0 0 0 12 5.7e-04 : } else if (ac->avail) { - 47 5.9e-04 0 0 0 0 6 2.9e-04 : tofree = force ? ac->avail : (ac->limit + 4) / 5; - 2 2.5e-05 0 0 0 0 0 0 : if (tofree > ac->avail) { - : tofree = (ac->avail + 1) / 2; - : } - : free_block(cachep, ac->entry, tofree, node); - 2 2.5e-05 0 0 0 0 0 0 : ac->avail -= tofree; - : memmove(ac->entry, &(ac->entry[tofree]), + 45 5.1e-04 0 0 0 0 4 1.8e-04 : if (ac->touched && !force) { + 111 0.0012 0 0 0 0 11 5.0e-04 : ac->touched = 0; + 52 5.8e-04 0 0 0 0 8 3.6e-04 : } else if (ac->avail) { + 48 5.4e-04 0 0 0 0 3 1.4e-04 : tofree = force ? ac->avail : (ac->limit + 4) / 5; + : if (tofree > ac->avail) { + 1 1.1e-05 0 0 0 0 0 0 : tofree = (ac->avail + 1) / 2; + : } + 1 1.1e-05 0 0 0 0 0 0 : free_block(cachep, ac->entry, tofree, node); + : ac->avail -= tofree; + 2 2.2e-05 0 0 0 0 0 0 : memmove(ac->entry, &(ac->entry[tofree]), : sizeof(void *) * ac->avail); : } - 35 4.4e-04 0 0 0 0 15 7.2e-04 :} + 36 4.0e-04 0 0 0 0 18 8.2e-04 :} : :/** : * cache_reap - Reclaim memory from caches. @@ -3475,62 +3475,62 @@ : * try again on the next iteration. : */ :static void cache_reap(void *unused) - :{ /* cache_reap total: 853 0.0106 0 0 0 0 162 0.0077 */ + :{ /* cache_reap total: 750 0.0084 0 0 0 0 140 0.0064 */ : struct list_head *walk; : struct kmem_list3 *l3; : - 7 8.7e-05 0 0 0 0 0 0 : if (!mutex_trylock(&cache_chain_mutex)) { + 8 9.0e-05 0 0 0 0 1 4.5e-05 : if (!mutex_trylock(&cache_chain_mutex)) { : /* Give up. Setup the next iteration. */ : schedule_delayed_work(&__get_cpu_var(reap_work), : REAPTIMEOUT_CPUC); : return; : } : - 4 5.0e-05 0 0 0 0 1 4.8e-05 : list_for_each(walk, &cache_chain) { + 2 2.2e-05 0 0 0 0 0 0 : list_for_each(walk, &cache_chain) { : struct kmem_cache *searchp; : struct list_head *p; : int tofree; : struct slab *slabp; : - 7 8.7e-05 0 0 0 0 1 4.8e-05 : searchp = list_entry(walk, struct kmem_cache, next); + 10 1.1e-04 0 0 0 0 4 1.8e-04 : searchp = list_entry(walk, struct kmem_cache, next); : - 2 2.5e-05 0 0 0 0 0 0 : if (searchp->flags & SLAB_NO_REAP) + 2 2.2e-05 0 0 0 0 0 0 : if (searchp->flags & SLAB_NO_REAP) : goto next; : : check_irq_on(); : - 1 1.2e-05 0 0 0 0 0 0 : l3 = searchp->nodelists[numa_node_id()]; - 155 0.0019 0 0 0 0 19 9.1e-04 : if (l3->alien) - 2 2.5e-05 0 0 0 0 2 9.6e-05 : drain_alien_cache(searchp, l3->alien); - 8 1.0e-04 0 0 0 0 7 3.3e-04 : spin_lock_irq(&l3->list_lock); + 5 5.6e-05 0 0 0 0 2 9.1e-05 : l3 = searchp->nodelists[numa_node_id()]; + 133 0.0015 0 0 0 0 8 3.6e-04 : if (l3->alien) + 2 2.2e-05 0 0 0 0 1 4.5e-05 : drain_alien_cache(searchp, l3->alien); + 4 4.5e-05 0 0 0 0 4 1.8e-04 : spin_lock_irq(&l3->list_lock); : - 1 1.2e-05 0 0 0 0 4 1.9e-04 : drain_array_locked(searchp, cpu_cache_get(searchp), 0, - 30 3.7e-04 0 0 0 0 22 0.0011 : numa_node_id()); + 2 2.2e-05 0 0 0 0 1 4.5e-05 : drain_array_locked(searchp, cpu_cache_get(searchp), 0, + 23 2.6e-04 0 0 0 0 13 5.9e-04 : numa_node_id()); : - 13 1.6e-04 0 0 0 0 9 4.3e-04 : if (time_after(l3->next_reap, jiffies)) + 12 1.3e-04 0 0 0 0 11 5.0e-04 : if (time_after(l3->next_reap, jiffies)) : goto next_unlock; : - 151 0.0019 0 0 0 0 14 6.7e-04 : l3->next_reap = jiffies + REAPTIMEOUT_LIST3; + 153 0.0017 0 0 0 0 9 4.1e-04 : l3->next_reap = jiffies + REAPTIMEOUT_LIST3; : : if (l3->shared) - 7 8.7e-05 0 0 0 0 2 9.6e-05 : drain_array_locked(searchp, l3->shared, 0, + 7 7.9e-05 0 0 0 0 2 9.1e-05 : drain_array_locked(searchp, l3->shared, 0, : numa_node_id()); : - 128 0.0016 0 0 0 0 16 7.6e-04 : if (l3->free_touched) { - 5 6.2e-05 0 0 0 0 1 4.8e-05 : l3->free_touched = 0; + 121 0.0014 0 0 0 0 10 4.5e-04 : if (l3->free_touched) { + 3 3.4e-05 0 0 0 0 2 9.1e-05 : l3->free_touched = 0; : goto next_unlock; : } : - 152 0.0019 0 0 0 0 51 0.0024 : tofree = + 136 0.0015 0 0 0 0 58 0.0026 : tofree = : (l3->free_limit + 5 * searchp->num - : 1) / (5 * searchp->num); : do { - 3 3.7e-05 0 0 0 0 0 0 : p = l3->slabs_free.next; - 2 2.5e-05 0 0 0 0 0 0 : if (p == &(l3->slabs_free)) + : p = l3->slabs_free.next; + 2 2.2e-05 0 0 0 0 0 0 : if (p == &(l3->slabs_free)) : break; : : slabp = list_entry(p, struct slab, list); - 2 2.5e-05 0 0 0 0 0 0 : BUG_ON(slabp->inuse); + 0 0 0 0 0 0 1 4.5e-05 : BUG_ON(slabp->inuse); : list_del(&slabp->list); : STATS_INC_REAPED(searchp); : @@ -3543,18 +3543,18 @@ : spin_unlock_irq(&l3->list_lock); : slab_destroy(searchp, slabp); : spin_lock_irq(&l3->list_lock); - 1 1.2e-05 0 0 0 0 0 0 : } while (--tofree > 0); + : } while (--tofree > 0); : next_unlock: - 2 2.5e-05 0 0 0 0 1 4.8e-05 : spin_unlock_irq(&l3->list_lock); + 1 1.1e-05 0 0 0 0 0 0 : spin_unlock_irq(&l3->list_lock); : next: : cond_resched(); : } : check_irq_on(); - 1 1.2e-05 0 0 0 0 0 0 : mutex_unlock(&cache_chain_mutex); + 3 3.4e-05 0 0 0 0 0 0 : mutex_unlock(&cache_chain_mutex); : drain_remote_pages(); : /* Setup the next iteration */ - 1 1.2e-05 0 0 0 0 0 0 : schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); - 1 1.2e-05 0 0 0 0 0 0 :} + 2 2.2e-05 0 0 0 0 0 0 : schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); + 1 1.1e-05 0 0 0 0 0 0 :} : :#ifdef CONFIG_PROC_FS : @@ -3808,12 +3808,12 @@ /* * Total samples for file : "mm/slab.c" * - * 91867 1.1447 0 0 12 0.4215 24740 1.1819 + * 100031 1.1250 0 0 16 0.5431 25374 1.1524 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/slab_vs.h annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/slab_vs.h --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/slab_vs.h 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/slab_vs.h 2006-03-12 07:20:06.000000000 -0500 @@ -4,7 +4,7 @@ :{ : int what = cachep->gfpflags & GFP_ZONEMASK; : - 6 7.5e-05 0 0 0 0 0 0 : if (!current->vx_info) + 7 7.9e-05 0 0 0 0 1 4.5e-05 : if (!current->vx_info) : return; : : atomic_add(cachep->buffer_size, ¤t->vx_info->cacct.slab[what]); @@ -24,12 +24,12 @@ /* * Total samples for file : "mm/slab_vs.h" * - * 6 7.5e-05 0 0 0 0 0 0 + * 7 7.9e-05 0 0 0 0 1 4.5e-05 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/swap.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/swap.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/swap.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/swap.c 2006-03-12 07:20:05.000000000 -0500 @@ -46,12 +46,12 @@ :} : :void put_page(struct page *page) - 1469 0.0183 0 0 1 0.0351 256 0.0122 :{ - 468 0.0058 0 0 1 0.0351 134 0.0064 : if (unlikely(PageCompound(page))) + 1882 0.0212 0 0 0 0 250 0.0114 :{ + 664 0.0075 0 0 0 0 164 0.0074 : if (unlikely(PageCompound(page))) : put_compound_page(page); - 666 0.0083 0 0 0 0 114 0.0054 : else if (put_page_testzero(page)) - : __page_cache_release(page); - 763 0.0095 0 0 1 0.0351 109 0.0052 :} + 839 0.0094 0 0 0 0 152 0.0069 : else if (put_page_testzero(page)) + 1 1.1e-05 0 0 0 0 0 0 : __page_cache_release(page); + 1330 0.0150 0 0 0 0 176 0.0080 :} :EXPORT_SYMBOL(put_page); : :/* @@ -70,48 +70,48 @@ : * Returns zero if it cleared PG_writeback. : */ :int rotate_reclaimable_page(struct page *page) - :{ /* rotate_reclaimable_page total: 7 8.7e-05 0 0 0 0 2 9.6e-05 */ + 38 4.3e-04 0 0 0 0 2 9.1e-05 :{ /* rotate_reclaimable_page total: 416 0.0047 0 0 0 0 100 0.0045 */ : struct zone *zone; : unsigned long flags; : - : if (PageLocked(page)) + 3 3.4e-05 0 0 0 0 0 0 : if (PageLocked(page)) : return 1; - 3 3.7e-05 0 0 0 0 0 0 : if (PageDirty(page)) + 156 0.0018 0 0 0 0 12 5.4e-04 : if (PageDirty(page)) : return 1; : if (PageActive(page)) : return 1; - : if (!PageLRU(page)) + 3 3.4e-05 0 0 0 0 0 0 : if (!PageLRU(page)) : return 1; : : zone = page_zone(page); - : spin_lock_irqsave(&zone->lru_lock, flags); - 1 1.2e-05 0 0 0 0 0 0 : if (PageLRU(page) && !PageActive(page)) { + 132 0.0015 0 0 0 0 63 0.0029 : spin_lock_irqsave(&zone->lru_lock, flags); + 4 4.5e-05 0 0 0 0 1 4.5e-05 : if (PageLRU(page) && !PageActive(page)) { : list_del(&page->lru); - 0 0 0 0 0 0 1 4.8e-05 : list_add_tail(&page->lru, &zone->inactive_list); - : inc_page_state(pgrotated); + 1 1.1e-05 0 0 0 0 2 9.1e-05 : list_add_tail(&page->lru, &zone->inactive_list); + 1 1.1e-05 0 0 0 0 0 0 : inc_page_state(pgrotated); : } - : if (!test_clear_page_writeback(page)) + 29 3.3e-04 0 0 0 0 3 1.4e-04 : if (!test_clear_page_writeback(page)) : BUG(); - 2 2.5e-05 0 0 0 0 0 0 : spin_unlock_irqrestore(&zone->lru_lock, flags); + 19 2.1e-04 0 0 0 0 2 9.1e-05 : spin_unlock_irqrestore(&zone->lru_lock, flags); : return 0; - :} + 10 1.1e-04 0 0 0 0 3 1.4e-04 :} : :/* : * FIXME: speed this up? : */ :void fastcall activate_page(struct page *page) - 138 0.0017 0 0 0 0 30 0.0014 :{ /* activate_page total: 1709 0.0213 0 0 1 0.0351 332 0.0159 */ + 157 0.0018 0 0 0 0 36 0.0016 :{ /* activate_page total: 1705 0.0192 0 0 1 0.0339 367 0.0167 */ : struct zone *zone = page_zone(page); : - 4 5.0e-05 0 0 0 0 0 0 : spin_lock_irq(&zone->lru_lock); - 43 5.4e-04 0 0 0 0 6 2.9e-04 : if (PageLRU(page) && !PageActive(page)) { + 5 5.6e-05 0 0 0 0 4 1.8e-04 : spin_lock_irq(&zone->lru_lock); + 38 4.3e-04 0 0 0 0 5 2.3e-04 : if (PageLRU(page) && !PageActive(page)) { : del_page_from_inactive_list(zone, page); : SetPageActive(page); : add_page_to_active_list(zone, page); : inc_page_state(pgactivate); : } - 130 0.0016 0 0 0 0 35 0.0017 : spin_unlock_irq(&zone->lru_lock); - 58 7.2e-04 0 0 0 0 20 9.6e-04 :} + 170 0.0019 0 0 0 0 52 0.0024 : spin_unlock_irq(&zone->lru_lock); + 50 5.6e-04 0 0 0 0 17 7.7e-04 :} : :/* : * Mark a page as having seen activity. @@ -121,14 +121,14 @@ : * active,unreferenced -> active,referenced : */ :void fastcall mark_page_accessed(struct page *page) - 13529 0.1686 0 0 1 0.0351 4034 0.1927 :{ /* mark_page_accessed total: 29783 0.3711 0 0 4 0.1405 10036 0.4794 */ - 3616 0.0451 0 0 1 0.0351 1205 0.0576 : if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { - 3 3.7e-05 0 0 0 0 2 9.6e-05 : activate_page(page); + 13467 0.1515 0 0 1 0.0339 4054 0.1841 :{ /* mark_page_accessed total: 30144 0.3390 0 0 5 0.1697 10341 0.4696 */ + 4026 0.0453 0 0 0 0 1405 0.0638 : if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { + 1 1.1e-05 0 0 0 0 1 4.5e-05 : activate_page(page); : ClearPageReferenced(page); - 3394 0.0423 0 0 0 0 1086 0.0519 : } else if (!PageReferenced(page)) { + 3814 0.0429 0 0 2 0.0679 1325 0.0602 : } else if (!PageReferenced(page)) { : SetPageReferenced(page); : } - 8814 0.1098 0 0 2 0.0702 3612 0.1726 :} + 8322 0.0936 0 0 2 0.0679 3425 0.1555 :} : :EXPORT_SYMBOL(mark_page_accessed); : @@ -140,42 +140,42 @@ :static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, }; : :void fastcall lru_cache_add(struct page *page) - 1 1.2e-05 0 0 0 0 0 0 :{ /* lru_cache_add total: 56 7.0e-04 0 0 0 0 14 6.7e-04 */ - 38 4.7e-04 0 0 0 0 6 2.9e-04 : struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); + :{ /* lru_cache_add total: 82 9.2e-04 0 0 0 0 18 8.2e-04 */ + 54 6.1e-04 0 0 0 0 10 4.5e-04 : struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); : : page_cache_get(page); - 0 0 0 0 0 0 2 9.6e-05 : if (!pagevec_add(pvec, page)) - : __pagevec_lru_add(pvec); + 2 2.2e-05 0 0 0 0 0 0 : if (!pagevec_add(pvec, page)) + 1 1.1e-05 0 0 0 0 0 0 : __pagevec_lru_add(pvec); : put_cpu_var(lru_add_pvecs); - 6 7.5e-05 0 0 0 0 4 1.9e-04 :} + 10 1.1e-04 0 0 0 0 3 1.4e-04 :} : :void fastcall lru_cache_add_active(struct page *page) - 1700 0.0212 0 0 0 0 200 0.0096 :{ /* lru_cache_add_active total: 56573 0.7049 0 0 9 0.3161 11620 0.5551 */ - 37502 0.4673 0 0 6 0.2107 6188 0.2956 : struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); + 1281 0.0144 0 0 0 0 185 0.0084 :{ /* lru_cache_add_active total: 103503 1.1640 0 0 11 0.3734 13296 0.6038 */ + 79899 0.8986 0 0 8 0.2716 7768 0.3528 : struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); : : page_cache_get(page); - 1955 0.0244 0 0 0 0 750 0.0358 : if (!pagevec_add(pvec, page)) - 4957 0.0618 0 0 0 0 971 0.0464 : __pagevec_lru_add_active(pvec); + 1142 0.0128 0 0 0 0 488 0.0222 : if (!pagevec_add(pvec, page)) + 7604 0.0855 0 0 2 0.0679 932 0.0423 : __pagevec_lru_add_active(pvec); : put_cpu_var(lru_add_active_pvecs); - 7601 0.0947 0 0 3 0.1054 2466 0.1178 :} + 9205 0.1035 0 0 1 0.0339 2341 0.1063 :} : :static void __lru_add_drain(int cpu) - 180 0.0022 0 0 0 0 109 0.0052 :{ /* __lru_add_drain total: 1686 0.0210 0 0 0 0 846 0.0404 */ - 626 0.0078 0 0 0 0 243 0.0116 : struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); + 211 0.0024 0 0 0 0 97 0.0044 :{ /* __lru_add_drain total: 1913 0.0215 0 0 0 0 854 0.0388 */ + 758 0.0085 0 0 0 0 254 0.0115 : struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); : : /* CPU is dead, so no locking needed. */ - 56 7.0e-04 0 0 0 0 8 3.8e-04 : if (pagevec_count(pvec)) - 4 5.0e-05 0 0 0 0 1 4.8e-05 : __pagevec_lru_add(pvec); - 230 0.0029 0 0 0 0 115 0.0055 : pvec = &per_cpu(lru_add_active_pvecs, cpu); - 39 4.9e-04 0 0 0 0 53 0.0025 : if (pagevec_count(pvec)) - 5 6.2e-05 0 0 0 0 4 1.9e-04 : __pagevec_lru_add_active(pvec); - 546 0.0068 0 0 0 0 313 0.0150 :} + 67 7.5e-04 0 0 0 0 18 8.2e-04 : if (pagevec_count(pvec)) + 1 1.1e-05 0 0 0 0 0 0 : __pagevec_lru_add(pvec); + 286 0.0032 0 0 0 0 141 0.0064 : pvec = &per_cpu(lru_add_active_pvecs, cpu); + 36 4.0e-04 0 0 0 0 40 0.0018 : if (pagevec_count(pvec)) + 3 3.4e-05 0 0 0 0 6 2.7e-04 : __pagevec_lru_add_active(pvec); + 551 0.0062 0 0 0 0 298 0.0135 :} : :void lru_add_drain(void) - 558 0.0070 0 0 0 0 126 0.0060 :{ /* lru_add_drain total: 1116 0.0139 0 0 0 0 704 0.0336 */ - 31 3.9e-04 0 0 0 0 10 4.8e-04 : __lru_add_drain(get_cpu()); + 622 0.0070 0 0 0 0 161 0.0073 :{ /* lru_add_drain total: 1261 0.0142 0 0 0 0 731 0.0332 */ + 23 2.6e-04 0 0 0 0 10 4.5e-04 : __lru_add_drain(get_cpu()); : put_cpu(); - 527 0.0066 0 0 0 0 568 0.0271 :} + 616 0.0069 0 0 0 0 560 0.0254 :} : :#ifdef CONFIG_NUMA :static void lru_add_drain_per_cpu(void *dummy) @@ -208,19 +208,19 @@ : * freed via pagevecs. But it gets used by networking. : */ :void fastcall __page_cache_release(struct page *page) - 3 3.7e-05 0 0 0 0 0 0 :{ /* __page_cache_release total: 14 1.7e-04 0 0 0 0 0 0 */ + 6 6.7e-05 0 0 0 0 2 9.1e-05 :{ /* __page_cache_release total: 170 0.0019 0 0 0 0 37 0.0017 */ : unsigned long flags; : struct zone *zone = page_zone(page); : - 7 8.7e-05 0 0 0 0 0 0 : spin_lock_irqsave(&zone->lru_lock, flags); + 40 4.5e-04 0 0 0 0 3 1.4e-04 : spin_lock_irqsave(&zone->lru_lock, flags); : if (TestClearPageLRU(page)) : del_page_from_lru(zone, page); - : if (page_count(page) != 0) + 2 2.2e-05 0 0 0 0 0 0 : if (page_count(page) != 0) : page = NULL; - : spin_unlock_irqrestore(&zone->lru_lock, flags); - : if (page) + 1 1.1e-05 0 0 0 0 0 0 : spin_unlock_irqrestore(&zone->lru_lock, flags); + 46 5.2e-04 0 0 0 0 13 5.9e-04 : if (page) : free_hot_page(page); - :} + 2 2.2e-05 0 0 0 0 4 1.8e-04 :} : :EXPORT_SYMBOL(__page_cache_release); : @@ -237,17 +237,17 @@ : * via the LRU. If it did, give up: shrink_cache will free it. : */ :void release_pages(struct page **pages, int nr, int cold) - 529 0.0066 0 0 0 0 472 0.0225 :{ /* release_pages total: 104602 1.3034 0 0 27 0.9484 47282 2.2588 */ + 588 0.0066 0 0 1 0.0339 492 0.0223 :{ /* release_pages total: 123441 1.3883 0 0 24 0.8147 48347 2.1957 */ : int i; : struct pagevec pages_to_free; : struct zone *zone = NULL; : : pagevec_init(&pages_to_free, cold); - 10438 0.1301 0 0 1 0.0351 7440 0.3554 : for (i = 0; i < nr; i++) { - 3426 0.0427 0 0 0 0 2567 0.1226 : struct page *page = pages[i]; + 10836 0.1219 0 0 3 0.1018 6922 0.3144 : for (i = 0; i < nr; i++) { + 3498 0.0393 0 0 1 0.0339 2496 0.1134 : struct page *page = pages[i]; : struct zone *pagezone; : - 3550 0.0442 0 0 0 0 922 0.0440 : if (unlikely(PageCompound(page))) { + 4163 0.0468 0 0 0 0 1041 0.0473 : if (unlikely(PageCompound(page))) { : if (zone) { : spin_unlock_irq(&zone->lru_lock); : zone = NULL; @@ -256,32 +256,32 @@ : continue; : } : - 10174 0.1268 0 0 2 0.0702 6290 0.3005 : if (!put_page_testzero(page)) + 10498 0.1181 0 0 2 0.0679 6478 0.2942 : if (!put_page_testzero(page)) : continue; : : pagezone = page_zone(page); - 3744 0.0467 0 0 0 0 1013 0.0484 : if (pagezone != zone) { - 210 0.0026 0 0 0 0 90 0.0043 : if (zone) - 2 2.5e-05 0 0 0 0 0 0 : spin_unlock_irq(&zone->lru_lock); + 4881 0.0549 0 0 1 0.0339 1223 0.0555 : if (pagezone != zone) { + 397 0.0045 0 0 1 0.0339 118 0.0054 : if (zone) + 14 1.6e-04 0 0 0 0 2 9.1e-05 : spin_unlock_irq(&zone->lru_lock); : zone = pagezone; - 218 0.0027 0 0 0 0 148 0.0071 : spin_lock_irq(&zone->lru_lock); + 251 0.0028 0 0 0 0 122 0.0055 : spin_lock_irq(&zone->lru_lock); : } - 721 0.0090 0 0 0 0 100 0.0048 : if (TestClearPageLRU(page)) + 834 0.0094 0 0 0 0 139 0.0063 : if (TestClearPageLRU(page)) : del_page_from_lru(zone, page); - 156 0.0019 0 0 0 0 72 0.0034 : if (page_count(page) == 0) { - 3718 0.0463 0 0 1 0.0351 3059 0.1461 : if (!pagevec_add(&pages_to_free, page)) { - 2737 0.0341 0 0 1 0.0351 1093 0.0522 : spin_unlock_irq(&zone->lru_lock); + 158 0.0018 0 0 0 0 72 0.0033 : if (page_count(page) == 0) { + 3696 0.0416 0 0 1 0.0339 2745 0.1247 : if (!pagevec_add(&pages_to_free, page)) { + 3357 0.0378 0 0 1 0.0339 1103 0.0501 : spin_unlock_irq(&zone->lru_lock); : __pagevec_free(&pages_to_free); : pagevec_reinit(&pages_to_free); : zone = NULL; /* No lock is held */ : } : } : } - 6056 0.0755 0 0 3 0.1054 2961 0.1415 : if (zone) - 108 0.0013 0 0 0 0 62 0.0030 : spin_unlock_irq(&zone->lru_lock); + 6574 0.0739 0 0 1 0.0339 3134 0.1423 : if (zone) + 108 0.0012 0 0 0 0 66 0.0030 : spin_unlock_irq(&zone->lru_lock); : : pagevec_free(&pages_to_free); - 1207 0.0150 0 0 0 0 368 0.0176 :} + 1267 0.0142 0 0 0 0 383 0.0174 :} : :/* : * The pages which we're about to release may be in the deferred lru-addition @@ -294,11 +294,11 @@ : * mutual recursion. : */ :void __pagevec_release(struct pagevec *pvec) - 32 4.0e-04 0 0 0 0 23 0.0011 :{ /* __pagevec_release total: 109 0.0014 0 0 0 0 74 0.0035 */ + 73 8.2e-04 0 0 0 0 76 0.0035 :{ /* __pagevec_release total: 296 0.0033 0 0 0 0 215 0.0098 */ : lru_add_drain(); - 63 7.9e-04 0 0 0 0 32 0.0015 : release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); + 183 0.0021 0 0 0 0 113 0.0051 : release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); : pagevec_reinit(pvec); - 11 1.4e-04 0 0 0 0 19 9.1e-04 :} + 36 4.0e-04 0 0 0 0 24 0.0011 :} : :EXPORT_SYMBOL(__pagevec_release); : @@ -308,79 +308,79 @@ : * This function reinitialises the caller's pagevec. : */ :void __pagevec_release_nonlru(struct pagevec *pvec) - 7 8.7e-05 0 0 0 0 0 0 :{ /* __pagevec_release_nonlru total: 24 3.0e-04 0 0 0 0 12 5.7e-04 */ + 25 2.8e-04 0 0 0 0 7 3.2e-04 :{ /* __pagevec_release_nonlru total: 283 0.0032 0 0 0 0 189 0.0086 */ : int i; : struct pagevec pages_to_free; : : pagevec_init(&pages_to_free, pvec->cold); - : for (i = 0; i < pagevec_count(pvec); i++) { - 5 6.2e-05 0 0 0 0 2 9.6e-05 : struct page *page = pvec->pages[i]; + 3 3.4e-05 0 0 0 0 2 9.1e-05 : for (i = 0; i < pagevec_count(pvec); i++) { + 74 8.3e-04 0 0 0 0 31 0.0014 : struct page *page = pvec->pages[i]; : - 1 1.2e-05 0 0 0 0 0 0 : BUG_ON(PageLRU(page)); - 1 1.2e-05 0 0 0 0 0 0 : if (put_page_testzero(page)) + 24 2.7e-04 0 0 0 0 10 4.5e-04 : BUG_ON(PageLRU(page)); + 21 2.4e-04 0 0 0 0 16 7.3e-04 : if (put_page_testzero(page)) : pagevec_add(&pages_to_free, page); : } : pagevec_free(&pages_to_free); : pagevec_reinit(pvec); - 2 2.5e-05 0 0 0 0 2 9.6e-05 :} + 8 9.0e-05 0 0 0 0 7 3.2e-04 :} : :/* : * Add the passed pages to the LRU, then drop the caller's refcount : * on them. Reinitialises the caller's pagevec. : */ :void __pagevec_lru_add(struct pagevec *pvec) - 283 0.0035 0 0 0 0 87 0.0042 :{ /* __pagevec_lru_add total: 1855 0.0231 0 0 0 0 524 0.0250 */ + 342 0.0038 0 0 0 0 116 0.0053 :{ /* __pagevec_lru_add total: 2093 0.0235 0 0 0 0 548 0.0249 */ : int i; : struct zone *zone = NULL; : - 157 0.0020 0 0 0 0 17 8.1e-04 : for (i = 0; i < pagevec_count(pvec); i++) { - 184 0.0023 0 0 0 0 72 0.0034 : struct page *page = pvec->pages[i]; + 195 0.0022 0 0 0 0 62 0.0028 : for (i = 0; i < pagevec_count(pvec); i++) { + 212 0.0024 0 0 0 0 49 0.0022 : struct page *page = pvec->pages[i]; : struct zone *pagezone = page_zone(page); : - 117 0.0015 0 0 0 0 15 7.2e-04 : if (pagezone != zone) { - 21 2.6e-04 0 0 0 0 7 3.3e-04 : if (zone) + 117 0.0013 0 0 0 0 12 5.4e-04 : if (pagezone != zone) { + 21 2.4e-04 0 0 0 0 4 1.8e-04 : if (zone) : spin_unlock_irq(&zone->lru_lock); : zone = pagezone; - 395 0.0049 0 0 0 0 107 0.0051 : spin_lock_irq(&zone->lru_lock); + 448 0.0050 0 0 0 0 107 0.0049 : spin_lock_irq(&zone->lru_lock); : } - 71 8.8e-04 0 0 0 0 27 0.0013 : if (TestSetPageLRU(page)) + 110 0.0012 0 0 0 0 29 0.0013 : if (TestSetPageLRU(page)) : BUG(); : add_page_to_inactive_list(zone, page); : } - 198 0.0025 0 0 0 0 52 0.0025 : if (zone) - 4 5.0e-05 0 0 0 0 1 4.8e-05 : spin_unlock_irq(&zone->lru_lock); - 45 5.6e-04 0 0 0 0 24 0.0011 : release_pages(pvec->pages, pvec->nr, pvec->cold); + 233 0.0026 0 0 0 0 54 0.0025 : if (zone) + 9 1.0e-04 0 0 0 0 4 1.8e-04 : spin_unlock_irq(&zone->lru_lock); + 57 6.4e-04 0 0 0 0 18 8.2e-04 : release_pages(pvec->pages, pvec->nr, pvec->cold); : pagevec_reinit(pvec); - 42 5.2e-04 0 0 0 0 17 8.1e-04 :} + 39 4.4e-04 0 0 0 0 16 7.3e-04 :} : :EXPORT_SYMBOL(__pagevec_lru_add); : :void __pagevec_lru_add_active(struct pagevec *pvec) - 4808 0.0599 0 0 2 0.0702 1036 0.0495 :{ /* __pagevec_lru_add_active total: 56275 0.7012 0 0 9 0.3161 16003 0.7645 */ + 6474 0.0728 0 0 0 0 1372 0.0623 :{ /* __pagevec_lru_add_active total: 79080 0.8894 0 0 12 0.4073 16878 0.7665 */ : int i; : struct zone *zone = NULL; : - 137 0.0017 0 0 0 0 36 0.0017 : for (i = 0; i < pagevec_count(pvec); i++) { - 4555 0.0568 0 0 1 0.0351 719 0.0343 : struct page *page = pvec->pages[i]; + 138 0.0016 0 0 0 0 31 0.0014 : for (i = 0; i < pagevec_count(pvec); i++) { + 5738 0.0645 0 0 2 0.0679 794 0.0361 : struct page *page = pvec->pages[i]; : struct zone *pagezone = page_zone(page); : - 2016 0.0251 0 0 0 0 339 0.0162 : if (pagezone != zone) { - 537 0.0067 0 0 0 0 97 0.0046 : if (zone) - 0 0 0 0 0 0 1 4.8e-05 : spin_unlock_irq(&zone->lru_lock); + 3330 0.0375 0 0 0 0 533 0.0242 : if (pagezone != zone) { + 683 0.0077 0 0 0 0 150 0.0068 : if (zone) + 2 2.2e-05 0 0 0 0 1 4.5e-05 : spin_unlock_irq(&zone->lru_lock); : zone = pagezone; - 2942 0.0367 0 0 0 0 561 0.0268 : spin_lock_irq(&zone->lru_lock); + 3404 0.0383 0 0 0 0 546 0.0248 : spin_lock_irq(&zone->lru_lock); : } - 1857 0.0231 0 0 1 0.0351 334 0.0160 : if (TestSetPageLRU(page)) + 2335 0.0263 0 0 0 0 375 0.0170 : if (TestSetPageLRU(page)) : BUG(); - 2029 0.0253 0 0 0 0 743 0.0355 : if (TestSetPageActive(page)) + 2139 0.0241 0 0 0 0 639 0.0290 : if (TestSetPageActive(page)) : BUG(); : add_page_to_active_list(zone, page); : } - 3199 0.0399 0 0 0 0 1061 0.0507 : if (zone) - 40 5.0e-04 0 0 0 0 3 1.4e-04 : spin_unlock_irq(&zone->lru_lock); - 332 0.0041 0 0 0 0 311 0.0149 : release_pages(pvec->pages, pvec->nr, pvec->cold); + 3292 0.0370 0 0 0 0 1098 0.0499 : if (zone) + 42 4.7e-04 0 0 0 0 1 4.5e-05 : spin_unlock_irq(&zone->lru_lock); + 347 0.0039 0 0 0 0 264 0.0120 : release_pages(pvec->pages, pvec->nr, pvec->cold); : pagevec_reinit(pvec); - 245 0.0031 0 0 0 0 270 0.0129 :} + 261 0.0029 0 0 0 0 242 0.0110 :} : :/* : * Try to drop buffers from the pages in a pagevec @@ -417,20 +417,20 @@ : */ :unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, : pgoff_t start, unsigned nr_pages) - 74 9.2e-04 0 0 0 0 33 0.0016 :{ /* pagevec_lookup total: 168 0.0021 0 0 0 0 55 0.0026 */ - 79 9.8e-04 0 0 0 0 12 5.7e-04 : pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); + 46 5.2e-04 0 0 0 0 6 2.7e-04 :{ /* pagevec_lookup total: 98 0.0011 0 0 0 0 14 6.4e-04 */ + 45 5.1e-04 0 0 0 0 5 2.3e-04 : pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); : return pagevec_count(pvec); - 15 1.9e-04 0 0 0 0 10 4.8e-04 :} + 7 7.9e-05 0 0 0 0 3 1.4e-04 :} : :EXPORT_SYMBOL(pagevec_lookup); : :unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, : pgoff_t *index, int tag, unsigned nr_pages) - 42 5.2e-04 0 0 0 0 9 4.3e-04 :{ /* pagevec_lookup_tag total: 134 0.0017 0 0 0 0 38 0.0018 */ - 58 7.2e-04 0 0 0 0 21 0.0010 : pvec->nr = find_get_pages_tag(mapping, index, tag, + 38 4.3e-04 0 0 0 0 17 7.7e-04 :{ /* pagevec_lookup_tag total: 126 0.0014 0 0 0 0 42 0.0019 */ + 56 6.3e-04 0 0 0 0 20 9.1e-04 : pvec->nr = find_get_pages_tag(mapping, index, tag, : nr_pages, pvec->pages); : return pagevec_count(pvec); - 34 4.2e-04 0 0 0 0 8 3.8e-04 :} + 32 3.6e-04 0 0 0 0 5 2.3e-04 :} : :EXPORT_SYMBOL(pagevec_lookup_tag); : @@ -444,18 +444,18 @@ :static DEFINE_PER_CPU(long, committed_space) = 0; : :void vm_acct_memory(long pages) - 133 0.0017 0 0 0 0 34 0.0016 :{ /* vm_acct_memory total: 3206 0.0399 0 0 0 0 631 0.0301 */ + 127 0.0014 0 0 0 0 48 0.0022 :{ /* vm_acct_memory total: 4483 0.0504 0 0 1 0.0339 769 0.0349 */ : long *local; : : preempt_disable(); - 1223 0.0152 0 0 0 0 189 0.0090 : local = &__get_cpu_var(committed_space); - 273 0.0034 0 0 0 0 81 0.0039 : *local += pages; - 617 0.0077 0 0 0 0 142 0.0068 : if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { + 1396 0.0157 0 0 0 0 218 0.0099 : local = &__get_cpu_var(committed_space); + 580 0.0065 0 0 0 0 106 0.0048 : *local += pages; + 963 0.0108 0 0 1 0.0339 185 0.0084 : if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { : atomic_add(*local, &vm_committed_space); - 254 0.0032 0 0 0 0 54 0.0026 : *local = 0; + 293 0.0033 0 0 0 0 49 0.0022 : *local = 0; : } : preempt_enable(); - 695 0.0087 0 0 0 0 131 0.0063 :} + 1115 0.0125 0 0 0 0 163 0.0074 :} : :#ifdef CONFIG_HOTPLUG_CPU : @@ -479,22 +479,22 @@ : :#ifdef CONFIG_SMP :void percpu_counter_mod(struct percpu_counter *fbc, long amount) - 410 0.0051 0 0 0 0 64 0.0031 :{ /* percpu_counter_mod total: 1315 0.0164 0 0 0 0 252 0.0120 */ + 485 0.0055 0 0 1 0.0339 60 0.0027 :{ /* percpu_counter_mod total: 1550 0.0174 0 0 1 0.0339 275 0.0125 */ : long count; : long *pcount; - : int cpu = get_cpu(); + 4 4.5e-05 0 0 0 0 0 0 : int cpu = get_cpu(); : - 68 8.5e-04 0 0 0 0 24 0.0011 : pcount = per_cpu_ptr(fbc->counters, cpu); - 24 3.0e-04 0 0 0 0 8 3.8e-04 : count = *pcount + amount; - 165 0.0021 0 0 0 0 28 0.0013 : if (count >= FBC_BATCH || count <= -FBC_BATCH) { - : spin_lock(&fbc->lock); - 49 6.1e-04 0 0 0 0 9 4.3e-04 : fbc->count += count; + 84 9.4e-04 0 0 0 0 12 5.4e-04 : pcount = per_cpu_ptr(fbc->counters, cpu); + 44 4.9e-04 0 0 0 0 10 4.5e-04 : count = *pcount + amount; + 218 0.0025 0 0 0 0 41 0.0019 : if (count >= FBC_BATCH || count <= -FBC_BATCH) { + 0 0 0 0 0 0 1 4.5e-05 : spin_lock(&fbc->lock); + 49 5.5e-04 0 0 0 0 15 6.8e-04 : fbc->count += count; : spin_unlock(&fbc->lock); : count = 0; : } - 3 3.7e-05 0 0 0 0 1 4.8e-05 : *pcount = count; + 1 1.1e-05 0 0 0 0 1 4.5e-05 : *pcount = count; : put_cpu(); - 596 0.0074 0 0 0 0 117 0.0056 :} + 663 0.0075 0 0 0 0 133 0.0060 :} :EXPORT_SYMBOL(percpu_counter_mod); :#endif : @@ -519,12 +519,12 @@ /* * Total samples for file : "mm/swap.c" * - * 165815 2.0661 0 0 28 0.9835 56428 2.6957 + * 225412 2.5351 0 0 32 1.0862 58925 2.6761 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/swapfile.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/swapfile.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/swapfile.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/swapfile.c 2006-03-12 07:20:05.000000000 -0500 @@ -58,13 +58,13 @@ :static DECLARE_RWSEM(swap_unplug_sem); : :void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) - :{ + :{ /* swap_unplug_io_fn total: 43 4.8e-04 0 0 0 0 9 4.1e-04 */ : swp_entry_t entry; : : down_read(&swap_unplug_sem); : entry.val = page_private(page); : if (PageSwapCache(page)) { - : struct block_device *bdev = swap_info[swp_type(entry)].bdev; + 0 0 0 0 0 0 1 4.5e-05 : struct block_device *bdev = swap_info[swp_type(entry)].bdev; : struct backing_dev_info *bdi; : : /* @@ -77,11 +77,11 @@ : */ : WARN_ON(page_count(page) <= 1); : - : bdi = bdev->bd_inode->i_mapping->backing_dev_info; + 13 1.5e-04 0 0 0 0 3 1.4e-04 : bdi = bdev->bd_inode->i_mapping->backing_dev_info; : blk_run_backing_dev(bdi, page); : } : up_read(&swap_unplug_sem); - :} + 2 2.2e-05 0 0 0 0 1 4.5e-05 :} : :#define SWAPFILE_CLUSTER 256 :#define LATENCY_LIMIT 256 @@ -101,10 +101,10 @@ : * But we do now try to find an empty cluster. -Andrea : */ : - : si->flags += SWP_SCANNING; - : if (unlikely(!si->cluster_nr)) { + 1 1.1e-05 0 0 0 0 0 0 : si->flags += SWP_SCANNING; + 7 7.9e-05 0 0 0 0 3 1.4e-04 : if (unlikely(!si->cluster_nr)) { : si->cluster_nr = SWAPFILE_CLUSTER - 1; - : if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) + 1 1.1e-05 0 0 0 0 2 9.1e-05 : if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) : goto lowest; : spin_unlock(&swap_lock); : @@ -112,16 +112,16 @@ : last_in_cluster = offset + SWAPFILE_CLUSTER - 1; : : /* Locate the first empty (unaligned) cluster */ - : for (; last_in_cluster <= si->highest_bit; offset++) { - 1 1.2e-05 0 0 0 0 0 0 : if (si->swap_map[offset]) + 90 0.0010 0 0 0 0 99 0.0045 : for (; last_in_cluster <= si->highest_bit; offset++) { + 103 0.0012 0 0 0 0 131 0.0059 : if (si->swap_map[offset]) : last_in_cluster = offset + SWAPFILE_CLUSTER; - : else if (offset == last_in_cluster) { + 5 5.6e-05 0 0 0 0 4 1.8e-04 : else if (offset == last_in_cluster) { : spin_lock(&swap_lock); : si->cluster_next = offset-SWAPFILE_CLUSTER-1; : goto cluster; : } - : if (unlikely(--latency_ration < 0)) { - : cond_resched(); + 73 8.2e-04 0 0 0 0 69 0.0031 : if (unlikely(--latency_ration < 0)) { + 2 2.2e-05 0 0 0 0 1 4.5e-05 : cond_resched(); : latency_ration = LATENCY_LIMIT; : } : } @@ -129,33 +129,33 @@ : goto lowest; : } : - : si->cluster_nr--; + 13 1.5e-04 0 0 0 0 5 2.3e-04 : si->cluster_nr--; :cluster: : offset = si->cluster_next; - : if (offset > si->highest_bit) + 4 4.5e-05 0 0 0 0 2 9.1e-05 : if (offset > si->highest_bit) :lowest: offset = si->lowest_bit; - :checks: if (!(si->flags & SWP_WRITEOK)) + 3 3.4e-05 0 0 0 0 1 4.5e-05 :checks: if (!(si->flags & SWP_WRITEOK)) : goto no_page; - : if (!si->highest_bit) + 0 0 0 0 0 0 2 9.1e-05 : if (!si->highest_bit) : goto no_page; - 1 1.2e-05 0 0 0 0 0 0 : if (!si->swap_map[offset]) { - : if (offset == si->lowest_bit) + 12 1.3e-04 0 0 0 0 1 4.5e-05 : if (!si->swap_map[offset]) { + 9 1.0e-04 0 0 0 0 2 9.1e-05 : if (offset == si->lowest_bit) : si->lowest_bit++; - : if (offset == si->highest_bit) + 12 1.3e-04 0 0 0 0 0 0 : if (offset == si->highest_bit) : si->highest_bit--; - : si->inuse_pages++; - : if (si->inuse_pages == si->pages) { + 3 3.4e-05 0 0 0 0 0 0 : si->inuse_pages++; + 1 1.1e-05 0 0 0 0 0 0 : if (si->inuse_pages == si->pages) { : si->lowest_bit = si->max; : si->highest_bit = 0; : } : si->swap_map[offset] = 1; - : si->cluster_next = offset + 1; + 3 3.4e-05 0 0 0 0 0 0 : si->cluster_next = offset + 1; : si->flags -= SWP_SCANNING; : return offset; : } : : spin_unlock(&swap_lock); - : while (++offset <= si->highest_bit) { + 1 1.1e-05 0 0 0 0 0 0 : while (++offset <= si->highest_bit) { : if (!si->swap_map[offset]) { : spin_lock(&swap_lock); : goto checks; @@ -174,34 +174,34 @@ :} : :swp_entry_t get_swap_page(void) - 1 1.2e-05 0 0 0 0 1 4.8e-05 :{ /* get_swap_page total: 5 6.2e-05 0 0 0 0 1 4.8e-05 */ + 3 3.4e-05 0 0 0 0 2 9.1e-05 :{ /* get_swap_page total: 468 0.0053 0 0 0 0 384 0.0174 */ : struct swap_info_struct *si; : pgoff_t offset; : int type, next; : int wrapped = 0; : - : spin_lock(&swap_lock); - : if (nr_swap_pages <= 0) + 7 7.9e-05 0 0 0 0 2 9.1e-05 : spin_lock(&swap_lock); + 42 4.7e-04 0 0 0 0 12 5.4e-04 : if (nr_swap_pages <= 0) : goto noswap; : nr_swap_pages--; : - 2 2.5e-05 0 0 0 0 0 0 : for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { - : si = swap_info + type; + 19 2.1e-04 0 0 0 0 6 2.7e-04 : for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { + 8 9.0e-05 0 0 0 0 6 2.7e-04 : si = swap_info + type; : next = si->next; - : if (next < 0 || + 6 6.7e-05 0 0 0 0 12 5.4e-04 : if (next < 0 || : (!wrapped && si->prio != swap_info[next].prio)) { - : next = swap_list.head; - : wrapped++; + 6 6.7e-05 0 0 0 0 2 9.1e-05 : next = swap_list.head; + 2 2.2e-05 0 0 0 0 0 0 : wrapped++; : } : - : if (!si->highest_bit) + 2 2.2e-05 0 0 0 0 2 9.1e-05 : if (!si->highest_bit) : continue; - : if (!(si->flags & SWP_WRITEOK)) + 3 3.4e-05 0 0 0 0 0 0 : if (!(si->flags & SWP_WRITEOK)) : continue; : - : swap_list.next = next; + 6 6.7e-05 0 0 0 0 3 1.4e-04 : swap_list.next = next; : offset = scan_swap_map(si); - : if (offset) { + 1 1.1e-05 0 0 0 0 0 0 : if (offset) { : spin_unlock(&swap_lock); : return swp_entry(type, offset); : } @@ -212,7 +212,7 @@ :noswap: : spin_unlock(&swap_lock); : return (swp_entry_t) {0}; - :} + 10 1.1e-04 0 0 0 0 8 3.6e-04 :} : :swp_entry_t get_swap_page_of_type(int type) :{ @@ -235,24 +235,24 @@ :} : :static struct swap_info_struct * swap_info_get(swp_entry_t entry) - :{ /* swap_info_get total: 2 2.5e-05 0 0 0 0 0 0 */ + 43 4.8e-04 0 0 0 0 15 6.8e-04 :{ /* swap_info_get total: 319 0.0036 0 0 0 0 97 0.0044 */ : struct swap_info_struct * p; : unsigned long offset, type; : - : if (!entry.val) + 21 2.4e-04 0 0 0 0 11 5.0e-04 : if (!entry.val) : goto out; - : type = swp_type(entry); + 13 1.5e-04 0 0 0 0 4 1.8e-04 : type = swp_type(entry); : if (type >= nr_swapfiles) : goto bad_nofile; - : p = & swap_info[type]; - : if (!(p->flags & SWP_USED)) + 23 2.6e-04 0 0 0 0 5 2.3e-04 : p = & swap_info[type]; + 13 1.5e-04 0 0 0 0 3 1.4e-04 : if (!(p->flags & SWP_USED)) : goto bad_device; : offset = swp_offset(entry); - : if (offset >= p->max) + 24 2.7e-04 0 0 0 0 1 4.5e-05 : if (offset >= p->max) : goto bad_offset; - : if (!p->swap_map[offset]) + 4 4.5e-05 0 0 0 0 0 0 : if (!p->swap_map[offset]) : goto bad_free; - 1 1.2e-05 0 0 0 0 0 0 : spin_lock(&swap_lock); + 102 0.0011 0 0 0 0 22 1.0e-03 : spin_lock(&swap_lock); : return p; : :bad_free: @@ -268,43 +268,43 @@ : printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val); :out: : return NULL; - 1 1.2e-05 0 0 0 0 0 0 :} + 68 7.6e-04 0 0 0 0 35 0.0016 :} : :static int swap_entry_free(struct swap_info_struct *p, unsigned long offset) - :{ - : int count = p->swap_map[offset]; + 30 3.4e-04 0 0 0 0 1 4.5e-05 :{ + 7 7.9e-05 0 0 0 0 1 4.5e-05 : int count = p->swap_map[offset]; /* swap_entry_free total: 93 0.0010 0 0 0 0 37 0.0017 */ : : if (count < SWAP_MAP_MAX) { - : count--; + 1 1.1e-05 0 0 0 0 3 1.4e-04 : count--; : p->swap_map[offset] = count; - : if (!count) { - : if (offset < p->lowest_bit) + 11 1.2e-04 0 0 0 0 3 1.4e-04 : if (!count) { + 4 4.5e-05 0 0 0 0 5 2.3e-04 : if (offset < p->lowest_bit) : p->lowest_bit = offset; - : if (offset > p->highest_bit) + 2 2.2e-05 0 0 0 0 1 4.5e-05 : if (offset > p->highest_bit) : p->highest_bit = offset; - : if (p->prio > swap_info[swap_list.next].prio) + 10 1.1e-04 0 0 0 0 6 2.7e-04 : if (p->prio > swap_info[swap_list.next].prio) : swap_list.next = p - swap_info; - : nr_swap_pages++; + 4 4.5e-05 0 0 0 0 7 3.2e-04 : nr_swap_pages++; : p->inuse_pages--; : } : } : return count; - :} + 24 2.7e-04 0 0 0 0 10 4.5e-04 :} : :/* : * Caller has made sure that the swapdevice corresponding to entry : * is still around or has not been recycled. : */ :void swap_free(swp_entry_t entry) - :{ + 58 6.5e-04 0 0 0 0 12 5.4e-04 :{ /* swap_free total: 109 0.0012 0 0 0 0 45 0.0020 */ : struct swap_info_struct * p; : - : p = swap_info_get(entry); - : if (p) { - : swap_entry_free(p, swp_offset(entry)); + 5 5.6e-05 0 0 0 0 1 4.5e-05 : p = swap_info_get(entry); + 10 1.1e-04 0 0 0 0 4 1.8e-04 : if (p) { + 4 4.5e-05 0 0 0 0 2 9.1e-05 : swap_entry_free(p, swp_offset(entry)); : spin_unlock(&swap_lock); : } - :} + 30 3.4e-04 0 0 0 0 26 0.0012 :} : :/* : * How many references to page are currently swapped out? @@ -330,47 +330,47 @@ : * if there are no other references to it. : */ :int can_share_swap_page(struct page *page) - 2232 0.0278 0 0 0 0 378 0.0181 :{ /* can_share_swap_page total: 2579 0.0321 0 0 0 0 429 0.0205 */ + 2519 0.0283 0 0 0 0 426 0.0193 :{ /* can_share_swap_page total: 2936 0.0330 0 0 0 0 484 0.0220 */ : int count; : - 65 8.1e-04 0 0 0 0 5 2.4e-04 : BUG_ON(!PageLocked(page)); + 52 5.8e-04 0 0 0 0 0 0 : BUG_ON(!PageLocked(page)); : count = page_mapcount(page); - 11 1.4e-04 0 0 0 0 8 3.8e-04 : if (count <= 1 && PageSwapCache(page)) - 178 0.0022 0 0 0 0 25 0.0012 : count += page_swapcount(page); + 23 2.6e-04 0 0 0 0 5 2.3e-04 : if (count <= 1 && PageSwapCache(page)) + 214 0.0024 0 0 0 0 31 0.0014 : count += page_swapcount(page); : return count == 1; - 36 4.5e-04 0 0 0 0 3 1.4e-04 :} + 41 4.6e-04 0 0 0 0 5 2.3e-04 :} : :/* : * Work out if there are any other processes sharing this : * swap cache page. Free it if you can. Return success. : */ :int remove_exclusive_swap_page(struct page *page) - :{ /* remove_exclusive_swap_page total: 1 1.2e-05 0 0 0 0 3 1.4e-04 */ + 13 1.5e-04 0 0 0 0 2 9.1e-05 :{ /* remove_exclusive_swap_page total: 116 0.0013 0 0 0 0 75 0.0034 */ : int retval; : struct swap_info_struct * p; : swp_entry_t entry; : - : BUG_ON(PagePrivate(page)); - : BUG_ON(!PageLocked(page)); + 5 5.6e-05 0 0 0 0 1 4.5e-05 : BUG_ON(PagePrivate(page)); + 3 3.4e-05 0 0 0 0 0 0 : BUG_ON(!PageLocked(page)); : - : if (!PageSwapCache(page)) + 3 3.4e-05 0 0 0 0 1 4.5e-05 : if (!PageSwapCache(page)) : return 0; - : if (PageWriteback(page)) + 1 1.1e-05 0 0 0 0 0 0 : if (PageWriteback(page)) : return 0; - 0 0 0 0 0 0 1 4.8e-05 : if (page_count(page) != 2) /* 2: us + cache */ + 9 1.0e-04 0 0 0 0 2 9.1e-05 : if (page_count(page) != 2) /* 2: us + cache */ : return 0; : : entry.val = page_private(page); - : p = swap_info_get(entry); - 0 0 0 0 0 0 1 4.8e-05 : if (!p) + 6 6.7e-05 0 0 0 0 0 0 : p = swap_info_get(entry); + 11 1.2e-04 0 0 0 0 13 5.9e-04 : if (!p) : return 0; : : /* Is the only swap cache user the cache itself? */ : retval = 0; - 1 1.2e-05 0 0 0 0 1 4.8e-05 : if (p->swap_map[swp_offset(entry)] == 1) { + 5 5.6e-05 0 0 0 0 9 4.1e-04 : if (p->swap_map[swp_offset(entry)] == 1) { : /* Recheck the page count with the swapcache lock held.. */ - : write_lock_irq(&swapper_space.tree_lock); - : if ((page_count(page) == 2) && !PageWriteback(page)) { + 1 1.1e-05 0 0 0 0 0 0 : write_lock_irq(&swapper_space.tree_lock); + 3 3.4e-05 0 0 0 0 4 1.8e-04 : if ((page_count(page) == 2) && !PageWriteback(page)) { : __delete_from_swap_cache(page); : SetPageDirty(page); : retval = 1; @@ -379,33 +379,33 @@ : } : spin_unlock(&swap_lock); : - : if (retval) { + 10 1.1e-04 0 0 0 0 3 1.4e-04 : if (retval) { : swap_free(entry); : page_cache_release(page); : } : : return retval; - :} + 14 1.6e-04 0 0 0 0 20 9.1e-04 :} : :/* : * Free the swap entry like above, but also try to : * free the page cache entry if it is the last user. : */ :void free_swap_and_cache(swp_entry_t entry) - :{ + 7 7.9e-05 0 0 0 0 9 4.1e-04 :{ /* free_swap_and_cache total: 114 0.0013 0 0 0 0 38 0.0017 */ : struct swap_info_struct * p; : struct page *page = NULL; : : p = swap_info_get(entry); : if (p) { - : if (swap_entry_free(p, swp_offset(entry)) == 1) - : page = find_trylock_page(&swapper_space, entry.val); + 2 2.2e-05 0 0 0 0 6 2.7e-04 : if (swap_entry_free(p, swp_offset(entry)) == 1) + 0 0 0 0 0 0 2 9.1e-05 : page = find_trylock_page(&swapper_space, entry.val); : spin_unlock(&swap_lock); : } - : if (page) { + 11 1.2e-04 0 0 0 0 0 0 : if (page) { : int one_user; : - : BUG_ON(PagePrivate(page)); + 1 1.1e-05 0 0 0 0 1 4.5e-05 : BUG_ON(PagePrivate(page)); : page_cache_get(page); : one_user = (page_count(page) == 2); : /* Only cache user (+us), or swap space full? Free it! */ @@ -413,10 +413,10 @@ : delete_from_swap_cache(page); : SetPageDirty(page); : } - : unlock_page(page); - : page_cache_release(page); + 4 4.5e-05 0 0 0 0 4 1.8e-04 : unlock_page(page); + 2 2.2e-05 0 0 0 0 0 0 : page_cache_release(page); : } - :} + 6 6.7e-05 0 0 0 0 3 1.4e-04 :} : :/* : * No need to decide whether this PTE shares the swap entry with others, @@ -866,14 +866,14 @@ : * corresponds to page offset `offset'. : */ :sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset) - :{ - : struct swap_extent *se = sis->curr_swap_extent; + 7 7.9e-05 0 0 0 0 0 0 :{ + 4 4.5e-05 0 0 0 0 0 0 : struct swap_extent *se = sis->curr_swap_extent; /* map_swap_page total: 68 7.6e-04 0 0 0 0 8 3.6e-04 */ : struct swap_extent *start_se = se; : : for ( ; ; ) { : struct list_head *lh; : - : if (se->start_page <= offset && + 52 5.8e-04 0 0 0 0 5 2.3e-04 : if (se->start_page <= offset && : offset < (se->start_page + se->nr_pages)) { : return se->start_block + (offset - se->start_page); : } @@ -884,7 +884,7 @@ : sis->curr_swap_extent = se; : BUG_ON(se == start_se); /* It *must* be present */ : } - :} + 5 5.6e-05 0 0 0 0 3 1.4e-04 :} : :/* : * Free all of a swapdev's extent information @@ -1621,23 +1621,23 @@ :} : :void si_swapinfo(struct sysinfo *val) - 149 0.0019 0 0 0 0 6 2.9e-04 :{ /* si_swapinfo total: 364 0.0045 0 0 0 0 26 0.0012 */ + 133 0.0015 0 0 0 0 7 3.2e-04 :{ /* si_swapinfo total: 327 0.0037 0 0 0 0 35 0.0016 */ : unsigned int i; : unsigned long nr_to_be_unused = 0; : : spin_lock(&swap_lock); - 138 0.0017 0 0 0 0 9 4.3e-04 : for (i = 0; i < nr_swapfiles; i++) { - 28 3.5e-04 0 0 0 0 4 1.9e-04 : if (!(swap_info[i].flags & SWP_USED) || + 130 0.0015 0 0 0 0 8 3.6e-04 : for (i = 0; i < nr_swapfiles; i++) { + 25 2.8e-04 0 0 0 0 8 3.6e-04 : if (!(swap_info[i].flags & SWP_USED) || : (swap_info[i].flags & SWP_WRITEOK)) : continue; - 38 4.7e-04 0 0 0 0 3 1.4e-04 : nr_to_be_unused += swap_info[i].inuse_pages; + 28 3.1e-04 0 0 0 0 2 9.1e-05 : nr_to_be_unused += swap_info[i].inuse_pages; : } - 6 7.5e-05 0 0 0 0 0 0 : val->freeswap = nr_swap_pages + nr_to_be_unused; + 6 6.7e-05 0 0 0 0 2 9.1e-05 : val->freeswap = nr_swap_pages + nr_to_be_unused; : val->totalswap = total_swap_pages + nr_to_be_unused; : spin_unlock(&swap_lock); - : if (vx_flags(VXF_VIRT_MEM, 0)) + 0 0 0 0 0 0 5 2.3e-04 : if (vx_flags(VXF_VIRT_MEM, 0)) : vx_vsi_swapinfo(val); - 4 5.0e-05 0 0 0 0 4 1.9e-04 :} + 5 5.6e-05 0 0 0 0 3 1.4e-04 :} : :/* : * Verify that a swap entry is valid and increment its swap map count. @@ -1646,26 +1646,26 @@ : * "permanent", but will be reclaimed by the next swapoff. : */ :int swap_duplicate(swp_entry_t entry) - :{ + 25 2.8e-04 0 0 0 0 9 4.1e-04 :{ : struct swap_info_struct * p; : unsigned long offset, type; : int result = 0; : : type = swp_type(entry); - : if (type >= nr_swapfiles) /* swap_duplicate total: 2 2.5e-05 0 0 0 0 0 0 */ + 3 3.4e-05 0 0 0 0 3 1.4e-04 : if (type >= nr_swapfiles) /* swap_duplicate total: 107 0.0012 0 0 0 0 37 0.0017 */ : goto bad_file; : p = type + swap_info; : offset = swp_offset(entry); : - : spin_lock(&swap_lock); - 1 1.2e-05 0 0 0 0 0 0 : if (offset < p->max && p->swap_map[offset]) { - : if (p->swap_map[offset] < SWAP_MAP_MAX - 1) { - : p->swap_map[offset]++; + 2 2.2e-05 0 0 0 0 0 0 : spin_lock(&swap_lock); + 32 3.6e-04 0 0 0 0 12 5.4e-04 : if (offset < p->max && p->swap_map[offset]) { + 2 2.2e-05 0 0 0 0 0 0 : if (p->swap_map[offset] < SWAP_MAP_MAX - 1) { + 1 1.1e-05 0 0 0 0 0 0 : p->swap_map[offset]++; : result = 1; : } else if (p->swap_map[offset] <= SWAP_MAP_MAX) { : if (swap_overflow++ < 5) : printk(KERN_WARNING "swap_dup: swap entry overflow\n"); - : p->swap_map[offset] = SWAP_MAP_MAX; + 16 1.8e-04 0 0 0 0 1 4.5e-05 : p->swap_map[offset] = SWAP_MAP_MAX; : result = 1; : } : } @@ -1676,11 +1676,11 @@ :bad_file: : printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val); : goto out; - 1 1.2e-05 0 0 0 0 0 0 :} + 9 1.0e-04 0 0 0 0 3 1.4e-04 :} : :struct swap_info_struct * :get_swap_info_struct(unsigned type) - :{ + 43 4.8e-04 0 0 0 0 1 4.5e-05 :{ /* get_swap_info_struct total: 43 4.8e-04 0 0 0 0 1 4.5e-05 */ : return &swap_info[type]; :} : @@ -1689,43 +1689,43 @@ : * reference on the swaphandle, it doesn't matter if it becomes unused. : */ :int valid_swaphandles(swp_entry_t entry, unsigned long *offset) - :{ + 1 1.1e-05 0 0 0 0 0 0 :{ /* valid_swaphandles total: 44 4.9e-04 0 0 0 0 8 3.6e-04 */ : int ret = 0, i = 1 << page_cluster; : unsigned long toff; - : struct swap_info_struct *swapdev = swp_type(entry) + swap_info; + 18 2.0e-04 0 0 0 0 0 0 : struct swap_info_struct *swapdev = swp_type(entry) + swap_info; : - : if (!page_cluster) /* no readahead */ + 1 1.1e-05 0 0 0 0 0 0 : if (!page_cluster) /* no readahead */ : return 0; - : toff = (swp_offset(entry) >> page_cluster) << page_cluster; + 1 1.1e-05 0 0 0 0 0 0 : toff = (swp_offset(entry) >> page_cluster) << page_cluster; : if (!toff) /* first page is swap header */ : toff++, i--; : *offset = toff; : - : spin_lock(&swap_lock); + 1 1.1e-05 0 0 0 0 0 0 : spin_lock(&swap_lock); : do { : /* Don't read-ahead past the end of the swap area */ - : if (toff >= swapdev->max) + 18 2.0e-04 0 0 0 0 4 1.8e-04 : if (toff >= swapdev->max) : break; : /* Don't read in free or bad pages */ : if (!swapdev->swap_map[toff]) : break; - : if (swapdev->swap_map[toff] == SWAP_MAP_BAD) + 4 4.5e-05 0 0 0 0 0 0 : if (swapdev->swap_map[toff] == SWAP_MAP_BAD) : break; : toff++; : ret++; : } while (--i); : spin_unlock(&swap_lock); : return ret; - :} + 0 0 0 0 0 0 4 1.8e-04 :} /* * Total samples for file : "mm/swapfile.c" * - * 2895 0.0361 0 0 0 0 449 0.0215 + * 4522 0.0509 0 0 0 0 1187 0.0539 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/swap_state.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/swap_state.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/swap_state.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/swap_state.c 2006-03-12 07:20:05.000000000 -0500 @@ -70,29 +70,29 @@ : */ :static int __add_to_swap_cache(struct page *page, swp_entry_t entry, : gfp_t gfp_mask) - :{ /* __add_to_swap_cache total: 4 5.0e-05 0 0 0 0 2 9.6e-05 */ + 14 1.6e-04 0 0 0 0 11 5.0e-04 :{ /* __add_to_swap_cache total: 342 0.0038 0 0 0 0 153 0.0069 */ : int error; : - : BUG_ON(PageSwapCache(page)); - : BUG_ON(PagePrivate(page)); - : error = radix_tree_preload(gfp_mask); - : if (!error) { + 1 1.1e-05 0 0 0 0 1 4.5e-05 : BUG_ON(PageSwapCache(page)); + 13 1.5e-04 0 0 0 0 6 2.7e-04 : BUG_ON(PagePrivate(page)); + 12 1.3e-04 0 0 0 0 1 4.5e-05 : error = radix_tree_preload(gfp_mask); + 5 5.6e-05 0 0 0 0 4 1.8e-04 : if (!error) { : write_lock_irq(&swapper_space.tree_lock); - 2 2.5e-05 0 0 0 0 2 9.6e-05 : error = radix_tree_insert(&swapper_space.page_tree, + 51 5.7e-04 0 0 0 0 56 0.0025 : error = radix_tree_insert(&swapper_space.page_tree, : entry.val, page); - : if (!error) { + 8 9.0e-05 0 0 0 0 4 1.8e-04 : if (!error) { : page_cache_get(page); : SetPageLocked(page); : SetPageSwapCache(page); - : set_page_private(page, entry.val); + 1 1.1e-05 0 0 0 0 2 9.1e-05 : set_page_private(page, entry.val); : total_swapcache_pages++; : pagecache_acct(1); : } - : write_unlock_irq(&swapper_space.tree_lock); + 24 2.7e-04 0 0 0 0 2 9.1e-05 : write_unlock_irq(&swapper_space.tree_lock); : radix_tree_preload_end(); : } : return error; - :} + 26 2.9e-04 0 0 0 0 16 7.3e-04 :} : :static int add_to_swap_cache(struct page *page, swp_entry_t entry) :{ @@ -121,19 +121,19 @@ : * been verified to be in the swap cache. : */ :void __delete_from_swap_cache(struct page *page) - :{ /* __delete_from_swap_cache total: 1 1.2e-05 0 0 0 0 0 0 */ - : BUG_ON(!PageLocked(page)); - : BUG_ON(!PageSwapCache(page)); - : BUG_ON(PageWriteback(page)); - : BUG_ON(PagePrivate(page)); + 12 1.3e-04 0 0 0 0 1 4.5e-05 :{ /* __delete_from_swap_cache total: 140 0.0016 0 0 0 0 73 0.0033 */ + 4 4.5e-05 0 0 0 0 0 0 : BUG_ON(!PageLocked(page)); + 13 1.5e-04 0 0 0 0 8 3.6e-04 : BUG_ON(!PageSwapCache(page)); + 3 3.4e-05 0 0 0 0 2 9.1e-05 : BUG_ON(PageWriteback(page)); + 4 4.5e-05 0 0 0 0 3 1.4e-04 : BUG_ON(PagePrivate(page)); : - : radix_tree_delete(&swapper_space.page_tree, page_private(page)); - : set_page_private(page, 0); + 3 3.4e-05 0 0 0 0 4 1.8e-04 : radix_tree_delete(&swapper_space.page_tree, page_private(page)); + 8 9.0e-05 0 0 0 0 2 9.1e-05 : set_page_private(page, 0); : ClearPageSwapCache(page); - 1 1.2e-05 0 0 0 0 0 0 : total_swapcache_pages--; + 49 5.5e-04 0 0 0 0 20 9.1e-04 : total_swapcache_pages--; : pagecache_acct(-1); - : INC_CACHE_INFO(del_total); - :} + 2 2.2e-05 0 0 0 0 1 4.5e-05 : INC_CACHE_INFO(del_total); + 5 5.6e-05 0 0 0 0 4 1.8e-04 :} : :/** : * add_to_swap - allocate swap space for a page @@ -143,16 +143,16 @@ : * swap cache. Caller needs to hold the page lock. : */ :int add_to_swap(struct page * page, gfp_t gfp_mask) - 1 1.2e-05 0 0 0 0 0 0 :{ /* add_to_swap total: 4 5.0e-05 0 0 0 0 0 0 */ + 18 2.0e-04 0 0 0 0 4 1.8e-04 :{ /* add_to_swap total: 173 0.0019 0 0 0 0 31 0.0014 */ : swp_entry_t entry; : int err; : - : if (!PageLocked(page)) + 14 1.6e-04 0 0 0 0 3 1.4e-04 : if (!PageLocked(page)) : BUG(); : : for (;;) { - : entry = get_swap_page(); - : if (!entry.val) + 0 0 0 0 0 0 1 4.5e-05 : entry = get_swap_page(); + 2 2.2e-05 0 0 0 0 2 9.1e-05 : if (!entry.val) : return 0; : : /* @@ -166,14 +166,14 @@ : /* : * Add it to the swap cache and mark it dirty : */ - : err = __add_to_swap_cache(page, entry, + 4 4.5e-05 0 0 0 0 2 9.1e-05 : err = __add_to_swap_cache(page, entry, : gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN); : - 2 2.5e-05 0 0 0 0 0 0 : switch (err) { + 90 0.0010 0 0 0 0 8 3.6e-04 : switch (err) { : case 0: /* Success */ : SetPageUptodate(page); : SetPageDirty(page); - 1 1.2e-05 0 0 0 0 0 0 : INC_CACHE_INFO(add_total); + 28 3.1e-04 0 0 0 0 7 3.2e-04 : INC_CACHE_INFO(add_total); : return 1; : case -EEXIST: : /* Raced with "speculative" read_swap_cache_async */ @@ -186,7 +186,7 @@ : return 0; : } : } - :} + 7 7.9e-05 0 0 0 0 3 1.4e-04 :} : :/* : * This must be called only on pages that have @@ -195,32 +195,32 @@ : * the caller has a reference on the page. : */ :void delete_from_swap_cache(struct page *page) - :{ + :{ /* delete_from_swap_cache total: 17 1.9e-04 0 0 0 0 9 4.1e-04 */ : swp_entry_t entry; : : entry.val = page_private(page); : : write_lock_irq(&swapper_space.tree_lock); - : __delete_from_swap_cache(page); - : write_unlock_irq(&swapper_space.tree_lock); + 10 1.1e-04 0 0 0 0 3 1.4e-04 : __delete_from_swap_cache(page); + 2 2.2e-05 0 0 0 0 2 9.1e-05 : write_unlock_irq(&swapper_space.tree_lock); : - : swap_free(entry); - : page_cache_release(page); - :} + 0 0 0 0 0 0 4 1.8e-04 : swap_free(entry); + 1 1.1e-05 0 0 0 0 0 0 : page_cache_release(page); + 3 3.4e-05 0 0 0 0 0 0 :} : :/* : * Strange swizzling function only for use by shmem_writepage : */ :int move_to_swap_cache(struct page *page, swp_entry_t entry) - :{ /* move_to_swap_cache total: 1 1.2e-05 0 0 0 0 0 0 */ - 1 1.2e-05 0 0 0 0 0 0 : int err = __add_to_swap_cache(page, entry, GFP_ATOMIC); + :{ /* move_to_swap_cache total: 11 1.2e-04 0 0 0 0 4 1.8e-04 */ + 6 6.7e-05 0 0 0 0 0 0 : int err = __add_to_swap_cache(page, entry, GFP_ATOMIC); : if (!err) { : remove_from_page_cache(page); - : page_cache_release(page); /* pagecache ref */ - : if (!swap_duplicate(entry)) + 2 2.2e-05 0 0 0 0 1 4.5e-05 : page_cache_release(page); /* pagecache ref */ + 2 2.2e-05 0 0 0 0 2 9.1e-05 : if (!swap_duplicate(entry)) : BUG(); : SetPageDirty(page); - : INC_CACHE_INFO(add_total); + 1 1.1e-05 0 0 0 0 1 4.5e-05 : INC_CACHE_INFO(add_total); : } else if (err == -EEXIST) : INC_CACHE_INFO(exist_race); : return err; @@ -273,21 +273,21 @@ : * them. They are removed from the LRU and freed if this is their last use. : */ :void free_pages_and_swap_cache(struct page **pages, int nr) - 647 0.0081 0 0 0 0 56 0.0027 :{ /* free_pages_and_swap_cache total: 16489 0.2055 0 0 7 0.2459 8982 0.4291 */ + 780 0.0088 0 0 0 0 62 0.0028 :{ /* free_pages_and_swap_cache total: 18298 0.2058 0 0 1 0.0339 8592 0.3902 */ : struct page **pagep = pages; : - 382 0.0048 0 0 0 0 94 0.0045 : lru_add_drain(); - 786 0.0098 0 0 0 0 323 0.0154 : while (nr) { - 537 0.0067 0 0 0 0 316 0.0151 : int todo = min(nr, PAGEVEC_SIZE); + 391 0.0044 0 0 0 0 79 0.0036 : lru_add_drain(); + 728 0.0082 0 0 0 0 257 0.0117 : while (nr) { + 572 0.0064 0 0 0 0 316 0.0144 : int todo = min(nr, PAGEVEC_SIZE); : int i; : - 3741 0.0466 0 0 0 0 2462 0.1176 : for (i = 0; i < todo; i++) - 2074 0.0258 0 0 1 0.0351 1208 0.0577 : free_swap_cache(pagep[i]); - 4705 0.0586 0 0 5 0.1756 2578 0.1232 : release_pages(pagep, todo, 0); - 128 0.0016 0 0 0 0 22 0.0011 : pagep += todo; - 447 0.0056 0 0 0 0 284 0.0136 : nr -= todo; + 4013 0.0451 0 0 0 0 2359 0.1071 : for (i = 0; i < todo; i++) + 2386 0.0268 0 0 0 0 1186 0.0539 : free_swap_cache(pagep[i]); + 5557 0.0625 0 0 0 0 2502 0.1136 : release_pages(pagep, todo, 0); + 140 0.0016 0 0 0 0 24 0.0011 : pagep += todo; + 504 0.0057 0 0 0 0 248 0.0113 : nr -= todo; : } - 196 0.0024 0 0 0 0 131 0.0063 :} + 218 0.0025 0 0 0 0 110 0.0050 :} : :/* : * Lookup a swap entry in the swap cache. A found page will be returned @@ -296,17 +296,17 @@ : * lock before returning. : */ :struct page * lookup_swap_cache(swp_entry_t entry) - :{ + 1 1.1e-05 0 0 0 0 0 0 :{ /* lookup_swap_cache total: 100 0.0011 0 0 0 0 10 4.5e-04 */ : struct page *page; : - : page = find_get_page(&swapper_space, entry.val); + 36 4.0e-04 0 0 0 0 2 9.1e-05 : page = find_get_page(&swapper_space, entry.val); : : if (page) - : INC_CACHE_INFO(find_success); + 15 1.7e-04 0 0 0 0 1 4.5e-05 : INC_CACHE_INFO(find_success); : : INC_CACHE_INFO(find_total); : return page; - :} + 48 5.4e-04 0 0 0 0 7 3.2e-04 :} : :/* : * Locate a page of swap in physical memory, reserving swap cache space @@ -316,7 +316,7 @@ : */ :struct page *read_swap_cache_async(swp_entry_t entry, : struct vm_area_struct *vma, unsigned long addr) - :{ + 13 1.5e-04 0 0 0 0 3 1.4e-04 :{ /* read_swap_cache_async total: 87 9.8e-04 0 0 0 0 20 9.1e-04 */ : struct page *found_page, *new_page = NULL; : int err; : @@ -326,16 +326,16 @@ : * called after lookup_swap_cache() failed, re-calling : * that would confuse statistics. : */ - : found_page = find_get_page(&swapper_space, entry.val); + 2 2.2e-05 0 0 0 0 0 0 : found_page = find_get_page(&swapper_space, entry.val); : if (found_page) : break; : : /* : * Get a new page to read into from swap. : */ - : if (!new_page) { - : new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr); - : if (!new_page) + 12 1.3e-04 0 0 0 0 2 9.1e-05 : if (!new_page) { + 7 7.9e-05 0 0 0 0 1 4.5e-05 : new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr); + 1 1.1e-05 0 0 0 0 2 9.1e-05 : if (!new_page) : break; /* Out of memory */ : } : @@ -355,24 +355,24 @@ : * Initiate read into locked page and return. : */ : lru_cache_add_active(new_page); - : swap_readpage(NULL, new_page); + 6 6.7e-05 0 0 0 0 3 1.4e-04 : swap_readpage(NULL, new_page); : return new_page; : } : } while (err != -ENOENT && err != -ENOMEM); : - : if (new_page) + 7 7.9e-05 0 0 0 0 1 4.5e-05 : if (new_page) : page_cache_release(new_page); : return found_page; - :} + 17 1.9e-04 0 0 0 0 4 1.8e-04 :} /* * Total samples for file : "mm/swap_state.c" * - * 13651 0.1701 0 0 6 0.2107 7476 0.3571 + * 15902 0.1788 0 0 0 0 7360 0.3343 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/thrash.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/thrash.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/thrash.c 1969-12-31 19:00:00.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/thrash.c 2006-03-12 07:20:06.000000000 -0500 @@ -0,0 +1,126 @@ + :/* + : * mm/thrash.c + : * + : * Copyright (C) 2004, Red Hat, Inc. + : * Copyright (C) 2004, Rik van Riel + : * Released under the GPL, see the file COPYING for details. + : * + : * Simple token based thrashing protection, using the algorithm + : * described in: http://www.cs.wm.edu/~sjiang/token.pdf + : */ + :#include + :#include + :#include + :#include + : + :static DEFINE_SPINLOCK(swap_token_lock); + :static unsigned long swap_token_timeout; + :static unsigned long swap_token_check; + :struct mm_struct * swap_token_mm = &init_mm; + : + :#define SWAP_TOKEN_CHECK_INTERVAL (HZ * 2) + :#define SWAP_TOKEN_TIMEOUT (300 * HZ) + :/* + : * Currently disabled; Needs further code to work at HZ * 300. + : */ + :unsigned long swap_token_default_timeout = SWAP_TOKEN_TIMEOUT; + : + :/* + : * Take the token away if the process had no page faults + : * in the last interval, or if it has held the token for + : * too long. + : */ + :#define SWAP_TOKEN_ENOUGH_RSS 1 + :#define SWAP_TOKEN_TIMED_OUT 2 + :static int should_release_swap_token(struct mm_struct *mm) + :{ + : int ret = 0; + : if (!mm->recent_pagein) + : ret = SWAP_TOKEN_ENOUGH_RSS; + : else if (time_after(jiffies, swap_token_timeout)) + : ret = SWAP_TOKEN_TIMED_OUT; + 1 1.1e-05 0 0 0 0 0 0 : mm->recent_pagein = 0; + : return ret; + :} + : + :/* + : * Try to grab the swapout protection token. We only try to + : * grab it once every TOKEN_CHECK_INTERVAL, both to prevent + : * SMP lock contention and to check that the process that held + : * the token before is no longer thrashing. + : */ + :void grab_swap_token(void) + 16 1.8e-04 0 0 0 0 2 9.1e-05 :{ /* grab_swap_token total: 44 4.9e-04 0 0 0 0 6 2.7e-04 */ + : struct mm_struct *mm; + : int reason; + : + : /* We have the token. Let others know we still need it. */ + : if (has_swap_token(current->mm)) { + : current->mm->recent_pagein = 1; + : if (unlikely(!swap_token_default_timeout)) + : disable_swap_token(); + : return; + : } + : + 9 1.0e-04 0 0 0 0 2 9.1e-05 : if (time_after(jiffies, swap_token_check)) { + : + : if (!swap_token_default_timeout) { + : swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL; + : return; + : } + : + : /* ... or if we recently held the token. */ + : if (time_before(jiffies, current->mm->swap_token_time)) + : return; + : + 1 1.1e-05 0 0 0 0 0 0 : if (!spin_trylock(&swap_token_lock)) + : return; + : + : swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL; + : + : mm = swap_token_mm; + : if ((reason = should_release_swap_token(mm))) { + : unsigned long eligible = jiffies; + : if (reason == SWAP_TOKEN_TIMED_OUT) { + : eligible += swap_token_default_timeout; + : } + 1 1.1e-05 0 0 0 0 0 0 : mm->swap_token_time = eligible; + : swap_token_timeout = jiffies + swap_token_default_timeout; + : swap_token_mm = current->mm; + : } + : spin_unlock(&swap_token_lock); + : } + : return; + 15 1.7e-04 0 0 0 0 2 9.1e-05 :} + : + :/* Called on process exit. */ + :void __put_swap_token(struct mm_struct *mm) + :{ + : spin_lock(&swap_token_lock); + : if (likely(mm == swap_token_mm)) { + : mm->swap_token_time = jiffies + SWAP_TOKEN_CHECK_INTERVAL; + : swap_token_mm = &init_mm; + : swap_token_check = jiffies; + : } + : spin_unlock(&swap_token_lock); + :} +/* + * Total samples for file : "mm/thrash.c" + * + * 43 4.8e-04 0 0 0 0 6 2.7e-04 + */ + + +/* + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 + * + * Interpretation of command line: + * Output annotated source file with samples + * Output all files + * + * CPU: AMD64 processors, speed 2600 MHz (estimated) + * Counted CPU_CLK_UNHALTED events (Cycles outside of halt state) with a unit mask of 0x00 (No unit mask) count 10000 + * Counted HARDWARE_INTERRUPTS events (Number of taken hardware interrupts) with a unit mask of 0x00 (No unit mask) count 10000 + * Counted MISALIGNED_DATA_REFS events (Misaligned data references) with a unit mask of 0x00 (No unit mask) count 10000 + * Counted RETIRED_INSNS events (Retired instructions (includes exceptions, interrupts, re-syncs)) with a unit mask of 0x00 (No unit mask) count 10000 + */ diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/truncate.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/truncate.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/truncate.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/truncate.c 2006-03-12 07:20:05.000000000 -0500 @@ -35,19 +35,19 @@ : */ :static void :truncate_complete_page(struct address_space *mapping, struct page *page) - 59 7.4e-04 0 0 0 0 11 5.3e-04 :{ /* truncate_complete_page total: 452 0.0056 0 0 0 0 122 0.0058 */ + 48 5.4e-04 0 0 0 0 7 3.2e-04 :{ /* truncate_complete_page total: 278 0.0031 0 0 0 0 94 0.0043 */ : if (page->mapping != mapping) : return; : - 5 6.2e-05 0 0 0 0 3 1.4e-04 : if (PagePrivate(page)) - 8 1.0e-04 0 0 0 0 0 0 : do_invalidatepage(page, 0); + 2 2.2e-05 0 0 0 0 4 1.8e-04 : if (PagePrivate(page)) + 13 1.5e-04 0 0 0 0 4 1.8e-04 : do_invalidatepage(page, 0); : : clear_page_dirty(page); : ClearPageUptodate(page); : ClearPageMappedToDisk(page); - 12 1.5e-04 0 0 0 0 1 4.8e-05 : remove_from_page_cache(page); - 70 8.7e-04 0 0 0 0 23 0.0011 : page_cache_release(page); /* pagecache ref */ - 64 8.0e-04 0 0 0 0 32 0.0015 :} + 6 6.7e-05 0 0 0 0 0 0 : remove_from_page_cache(page); + 54 6.1e-04 0 0 0 0 30 0.0014 : page_cache_release(page); /* pagecache ref */ + 18 2.0e-04 0 0 0 0 7 3.2e-04 :} : :/* : * This is for invalidate_inode_pages(). That function can be called at @@ -60,14 +60,14 @@ : */ :static int :invalidate_complete_page(struct address_space *mapping, struct page *page) - :{ - : if (page->mapping != mapping) + 0 0 0 0 0 0 2 9.1e-05 :{ /* invalidate_complete_page total: 14 1.6e-04 0 0 0 0 4 1.8e-04 */ + 1 1.1e-05 0 0 0 0 0 0 : if (page->mapping != mapping) : return 0; : : if (PagePrivate(page) && !try_to_release_page(page, 0)) : return 0; : - : write_lock_irq(&mapping->tree_lock); + 7 7.9e-05 0 0 0 0 0 0 : write_lock_irq(&mapping->tree_lock); : if (PageDirty(page)) { : write_unlock_irq(&mapping->tree_lock); : return 0; @@ -75,11 +75,11 @@ : : BUG_ON(PagePrivate(page)); : __remove_from_page_cache(page); - : write_unlock_irq(&mapping->tree_lock); + 1 1.1e-05 0 0 0 0 0 0 : write_unlock_irq(&mapping->tree_lock); : ClearPageUptodate(page); - : page_cache_release(page); /* pagecache ref */ + 1 1.1e-05 0 0 0 0 0 0 : page_cache_release(page); /* pagecache ref */ : return 1; - :} + 1 1.1e-05 0 0 0 0 1 4.5e-05 :} : :/** : * truncate_inode_pages - truncate range of pages specified by start and @@ -107,64 +107,64 @@ : */ :void truncate_inode_pages_range(struct address_space *mapping, : loff_t lstart, loff_t lend) - 122 0.0015 0 0 0 0 20 9.6e-04 :{ /* truncate_inode_pages_range total: 1135 0.0141 0 0 0 0 283 0.0135 */ - : const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; + 140 0.0016 0 0 0 0 19 8.6e-04 :{ /* truncate_inode_pages_range total: 889 0.0100 0 0 0 0 197 0.0089 */ + 1 1.1e-05 0 0 0 0 0 0 : const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; : pgoff_t end; - 4 5.0e-05 0 0 0 0 10 4.8e-04 : const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); + 3 3.4e-05 0 0 0 0 3 1.4e-04 : const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); : struct pagevec pvec; : pgoff_t next; : int i; : - 11 1.4e-04 0 0 0 0 4 1.9e-04 : if (mapping->nrpages == 0) + 13 1.5e-04 0 0 0 0 4 1.8e-04 : if (mapping->nrpages == 0) : return; : - 20 2.5e-04 0 0 0 0 7 3.3e-04 : BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); - 5 6.2e-05 0 0 0 0 2 9.6e-05 : end = (lend >> PAGE_CACHE_SHIFT); + 11 1.2e-04 0 0 0 0 0 0 : BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); + 2 2.2e-05 0 0 0 0 0 0 : end = (lend >> PAGE_CACHE_SHIFT); : : pagevec_init(&pvec, 0); : next = start; - 71 8.8e-04 0 0 0 0 17 8.1e-04 : while (next <= end && + 56 6.3e-04 0 0 0 0 8 3.6e-04 : while (next <= end && : pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { - 15 1.9e-04 0 0 0 0 3 1.4e-04 : for (i = 0; i < pagevec_count(&pvec); i++) { - 15 1.9e-04 0 0 0 0 1 4.8e-05 : struct page *page = pvec.pages[i]; - 12 1.5e-04 0 0 0 0 3 1.4e-04 : pgoff_t page_index = page->index; + 8 9.0e-05 0 0 0 0 1 4.5e-05 : for (i = 0; i < pagevec_count(&pvec); i++) { + 10 1.1e-04 0 0 0 0 7 3.2e-04 : struct page *page = pvec.pages[i]; + 5 5.6e-05 0 0 0 0 1 4.5e-05 : pgoff_t page_index = page->index; : - 136 0.0017 0 0 0 0 24 0.0011 : if (page_index > end) { + 123 0.0014 0 0 0 0 43 0.0020 : if (page_index > end) { : next = page_index; : break; : } : - 8 1.0e-04 0 0 0 0 0 0 : if (page_index > next) + 10 1.1e-04 0 0 0 0 0 0 : if (page_index > next) : next = page_index; - 4 5.0e-05 0 0 0 0 0 0 : next++; - 49 6.1e-04 0 0 0 0 3 1.4e-04 : if (TestSetPageLocked(page)) + : next++; + 10 1.1e-04 0 0 0 0 3 1.4e-04 : if (TestSetPageLocked(page)) : continue; - 6 7.5e-05 0 0 0 0 2 9.6e-05 : if (PageWriteback(page)) { + 4 4.5e-05 0 0 0 0 2 9.1e-05 : if (PageWriteback(page)) { : unlock_page(page); : continue; : } - 28 3.5e-04 0 0 0 0 3 1.4e-04 : truncate_complete_page(mapping, page); - 188 0.0023 0 0 0 0 87 0.0042 : unlock_page(page); + 5 5.6e-05 0 0 0 0 2 9.1e-05 : truncate_complete_page(mapping, page); + 111 0.0012 0 0 0 0 55 0.0025 : unlock_page(page); : } : pagevec_release(&pvec); - 1 1.2e-05 0 0 0 0 1 4.8e-05 : cond_resched(); + : cond_resched(); : } : - 9 1.1e-04 0 0 0 0 1 4.8e-05 : if (partial) { + 3 3.4e-05 0 0 0 0 0 0 : if (partial) { : struct page *page = find_lock_page(mapping, start - 1); : if (page) { : wait_on_page_writeback(page); : truncate_partial_page(page, partial); : unlock_page(page); - 2 2.5e-05 0 0 0 0 0 0 : page_cache_release(page); + 1 1.1e-05 0 0 0 0 0 0 : page_cache_release(page); : } : } : : next = start; : for ( ; ; ) { - 39 4.9e-04 0 0 0 0 7 3.3e-04 : cond_resched(); - 10 1.2e-04 0 0 0 0 11 5.3e-04 : if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { - 1 1.2e-05 0 0 0 0 4 1.9e-04 : if (next == start) + 31 3.5e-04 0 0 0 0 4 1.8e-04 : cond_resched(); + 2 2.2e-05 0 0 0 0 1 4.5e-05 : if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { + 2 2.2e-05 0 0 0 0 1 4.5e-05 : if (next == start) : break; : next = start; : continue; @@ -188,7 +188,7 @@ : } : pagevec_release(&pvec); : } - 190 0.0024 0 0 0 0 35 0.0017 :} + 224 0.0025 0 0 0 0 27 0.0012 :} :EXPORT_SYMBOL(truncate_inode_pages_range); : :/** @@ -199,9 +199,9 @@ : * Called under (and serialised by) inode->i_mutex. : */ :void truncate_inode_pages(struct address_space *mapping, loff_t lstart) - 7 8.7e-05 0 0 0 0 7 3.3e-04 :{ /* truncate_inode_pages total: 204 0.0025 0 0 0 0 49 0.0023 */ - 146 0.0018 0 0 0 0 14 6.7e-04 : truncate_inode_pages_range(mapping, lstart, (loff_t)-1); - 51 6.4e-04 0 0 0 0 28 0.0013 :} + 8 9.0e-05 0 0 0 0 6 2.7e-04 :{ /* truncate_inode_pages total: 178 0.0020 0 0 0 0 24 0.0011 */ + 150 0.0017 0 0 0 0 8 3.6e-04 : truncate_inode_pages_range(mapping, lstart, (loff_t)-1); + 20 2.2e-04 0 0 0 0 10 4.5e-04 :} :EXPORT_SYMBOL(truncate_inode_pages); : :/** @@ -219,16 +219,16 @@ : */ :unsigned long invalidate_mapping_pages(struct address_space *mapping, : pgoff_t start, pgoff_t end) - :{ + 1 1.1e-05 0 0 0 0 1 4.5e-05 :{ /* invalidate_mapping_pages total: 17 1.9e-04 0 0 0 0 3 1.4e-04 */ : struct pagevec pvec; : pgoff_t next = start; : unsigned long ret = 0; : int i; : : pagevec_init(&pvec, 0); - : while (next <= end && + 2 2.2e-05 0 0 0 0 0 0 : while (next <= end && : pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { - : for (i = 0; i < pagevec_count(&pvec); i++) { + 0 0 0 0 0 0 1 4.5e-05 : for (i = 0; i < pagevec_count(&pvec); i++) { : struct page *page = pvec.pages[i]; : : if (TestSetPageLocked(page)) { @@ -237,26 +237,26 @@ : } : if (page->index > next) : next = page->index; - : next++; - : if (PageDirty(page) || PageWriteback(page)) + 1 1.1e-05 0 0 0 0 0 0 : next++; + 1 1.1e-05 0 0 0 0 0 0 : if (PageDirty(page) || PageWriteback(page)) : goto unlock; : if (page_mapped(page)) : goto unlock; - : ret += invalidate_complete_page(mapping, page); + 5 5.6e-05 0 0 0 0 1 4.5e-05 : ret += invalidate_complete_page(mapping, page); :unlock: : unlock_page(page); - : if (next > end) + 1 1.1e-05 0 0 0 0 0 0 : if (next > end) : break; : } : pagevec_release(&pvec); : } : return ret; - :} + 1 1.1e-05 0 0 0 0 0 0 :} : :unsigned long invalidate_inode_pages(struct address_space *mapping) - :{ - : return invalidate_mapping_pages(mapping, 0, ~0UL); - :} + :{ /* invalidate_inode_pages total: 1 1.1e-05 0 0 0 0 1 4.5e-05 */ + 1 1.1e-05 0 0 0 0 0 0 : return invalidate_mapping_pages(mapping, 0, ~0UL); + 0 0 0 0 0 0 1 4.5e-05 :} : :EXPORT_SYMBOL(invalidate_inode_pages); : @@ -357,12 +357,12 @@ /* * Total samples for file : "mm/truncate.c" * - * 1368 0.0170 0 0 0 0 364 0.0174 + * 1118 0.0126 0 0 0 0 264 0.0120 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/util.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/util.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/util.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/util.c 2006-03-12 07:20:06.000000000 -0500 @@ -23,29 +23,29 @@ : * @gfp: the GFP mask used in the kmalloc() call when allocating memory : */ :char *kstrdup(const char *s, gfp_t gfp) - 6 7.5e-05 0 0 0 0 0 0 :{ /* kstrdup total: 39 4.9e-04 0 0 0 0 6 2.9e-04 */ + 1 1.1e-05 0 0 0 0 1 4.5e-05 :{ /* kstrdup total: 35 3.9e-04 0 0 0 0 7 3.2e-04 */ : size_t len; : char *buf; : - 11 1.4e-04 0 0 0 0 1 4.8e-05 : if (!s) + 8 9.0e-05 0 0 0 0 1 4.5e-05 : if (!s) : return NULL; : - 7 8.7e-05 0 0 0 0 2 9.6e-05 : len = strlen(s) + 1; + 4 4.5e-05 0 0 0 0 3 1.4e-04 : len = strlen(s) + 1; : buf = kmalloc(len, gfp); - 1 1.2e-05 0 0 0 0 0 0 : if (buf) - 12 1.5e-04 0 0 0 0 3 1.4e-04 : memcpy(buf, s, len); + 4 4.5e-05 0 0 0 0 0 0 : if (buf) + 15 1.7e-04 0 0 0 0 1 4.5e-05 : memcpy(buf, s, len); : return buf; - 2 2.5e-05 0 0 0 0 0 0 :} + 3 3.4e-05 0 0 0 0 1 4.5e-05 :} :EXPORT_SYMBOL(kstrdup); /* * Total samples for file : "mm/util.c" * - * 39 4.9e-04 0 0 0 0 6 2.9e-04 + * 35 3.9e-04 0 0 0 0 7 3.2e-04 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples diff -NurpP annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/vmscan.c annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/vmscan.c --- annotated-0/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/vmscan.c 2006-03-12 07:18:54.000000000 -0500 +++ annotated-1/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/mm/vmscan.c 2006-03-12 07:20:05.000000000 -0500 @@ -184,26 +184,26 @@ : * Returns the number of slab objects which we shrunk. : */ :int shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages) - 0 0 0 0 0 0 1 4.8e-05 :{ /* shrink_slab total: 38 4.7e-04 0 0 0 0 6 2.9e-04 */ + 4 4.5e-05 0 0 0 0 1 4.5e-05 :{ /* shrink_slab total: 348 0.0039 0 0 0 0 64 0.0029 */ : struct shrinker *shrinker; : int ret = 0; : - 1 1.2e-05 0 0 0 0 0 0 : if (scanned == 0) + 18 2.0e-04 0 0 0 0 1 4.5e-05 : if (scanned == 0) : scanned = SWAP_CLUSTER_MAX; : - : if (!down_read_trylock(&shrinker_rwsem)) + 3 3.4e-05 0 0 0 0 0 0 : if (!down_read_trylock(&shrinker_rwsem)) : return 1; /* Assume we'll be able to shrink next time */ : - 5 6.2e-05 0 0 0 0 0 0 : list_for_each_entry(shrinker, &shrinker_list, list) { + 41 4.6e-04 0 0 0 0 1 4.5e-05 : list_for_each_entry(shrinker, &shrinker_list, list) { : unsigned long long delta; : unsigned long total_scan; - 1 1.2e-05 0 0 0 0 0 0 : unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask); + 21 2.4e-04 0 0 0 0 6 2.7e-04 : unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask); : - 1 1.2e-05 0 0 0 0 0 0 : delta = (4 * scanned) / shrinker->seeks; + 6 6.7e-05 0 0 0 0 0 0 : delta = (4 * scanned) / shrinker->seeks; : delta *= max_pass; : do_div(delta, lru_pages + 1); - 24 3.0e-04 0 0 0 0 4 1.9e-04 : shrinker->nr += delta; - : if (shrinker->nr < 0) { + 202 0.0023 0 0 0 0 49 0.0022 : shrinker->nr += delta; + 4 4.5e-05 0 0 0 0 1 4.5e-05 : if (shrinker->nr < 0) { : printk(KERN_ERR "%s: nr=%ld\n", : __FUNCTION__, shrinker->nr); : shrinker->nr = max_pass; @@ -214,24 +214,24 @@ : * never try to free more than twice the estimate number of : * freeable entries. : */ - 2 2.5e-05 0 0 0 0 0 0 : if (shrinker->nr > max_pass * 2) + 1 1.1e-05 0 0 0 0 0 0 : if (shrinker->nr > max_pass * 2) : shrinker->nr = max_pass * 2; : - : total_scan = shrinker->nr; - : shrinker->nr = 0; + 3 3.4e-05 0 0 0 0 0 0 : total_scan = shrinker->nr; + 4 4.5e-05 0 0 0 0 0 0 : shrinker->nr = 0; : - : while (total_scan >= SHRINK_BATCH) { + 2 2.2e-05 0 0 0 0 0 0 : while (total_scan >= SHRINK_BATCH) { : long this_scan = SHRINK_BATCH; : int shrink_ret; : int nr_before; : : nr_before = (*shrinker->shrinker)(0, gfp_mask); : shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); - : if (shrink_ret == -1) + 1 1.1e-05 0 0 0 0 0 0 : if (shrink_ret == -1) : break; : if (shrink_ret < nr_before) : ret += nr_before - shrink_ret; - : mod_page_state(slabs_scanned, this_scan); + 1 1.1e-05 0 0 0 0 0 0 : mod_page_state(slabs_scanned, this_scan); : total_scan -= this_scan; : : cond_resched(); @@ -241,7 +241,7 @@ : } : up_read(&shrinker_rwsem); : return ret; - 2 2.5e-05 0 0 0 0 1 4.8e-05 :} + 11 1.2e-04 0 0 0 0 2 9.1e-05 :} : :/* Called without lock on whether page is mapped, so answer is unstable */ :static inline int page_mapping_inuse(struct page *page) @@ -309,7 +309,7 @@ : * pageout is called by shrink_list() for each dirty page. Calls ->writepage(). : */ :static pageout_t pageout(struct page *page, struct address_space *mapping) - :{ /* pageout total: 2 2.5e-05 0 0 0 0 2 9.6e-05 */ + 11 1.2e-04 0 0 0 0 8 3.6e-04 :{ /* pageout total: 305 0.0034 0 0 0 0 120 0.0054 */ : /* : * If the page is dirty, only perform writeback if that write : * will be non-blocking. To prevent this allocation from being @@ -327,9 +327,9 @@ : * congestion state of the swapdevs. Easy to fix, if needed. : * See swapfile.c:page_queue_congested(). : */ - : if (!is_page_cache_freeable(page)) + 10 1.1e-04 0 0 0 0 0 0 : if (!is_page_cache_freeable(page)) : return PAGE_KEEP; - : if (!mapping) { + 4 4.5e-05 0 0 0 0 1 4.5e-05 : if (!mapping) { : /* : * Some data journaling orphaned pages can have : * page->mapping == NULL while being dirty with clean buffers. @@ -343,29 +343,29 @@ : } : return PAGE_KEEP; : } - : if (mapping->a_ops->writepage == NULL) + 7 7.9e-05 0 0 0 0 3 1.4e-04 : if (mapping->a_ops->writepage == NULL) : return PAGE_ACTIVATE; - : if (!may_write_to_queue(mapping->backing_dev_info)) + 6 6.7e-05 0 0 0 0 4 1.8e-04 : if (!may_write_to_queue(mapping->backing_dev_info)) : return PAGE_KEEP; : - : if (clear_page_dirty_for_io(page)) { + 98 0.0011 0 0 0 0 39 0.0018 : if (clear_page_dirty_for_io(page)) { : int res; : struct writeback_control wbc = { : .sync_mode = WB_SYNC_NONE, : .nr_to_write = SWAP_CLUSTER_MAX, : .nonblocking = 1, : .for_reclaim = 1, - 0 0 0 0 0 0 1 4.8e-05 : }; + 106 0.0012 0 0 0 0 42 0.0019 : }; : : SetPageReclaim(page); - 2 2.5e-05 0 0 0 0 1 4.8e-05 : res = mapping->a_ops->writepage(page, &wbc); - : if (res < 0) + 18 2.0e-04 0 0 0 0 5 2.3e-04 : res = mapping->a_ops->writepage(page, &wbc); + 1 1.1e-05 0 0 0 0 0 0 : if (res < 0) : handle_write_error(mapping, page, res); - : if (res == AOP_WRITEPAGE_ACTIVATE) { + 3 3.4e-05 0 0 0 0 2 9.1e-05 : if (res == AOP_WRITEPAGE_ACTIVATE) { : ClearPageReclaim(page); : return PAGE_ACTIVATE; : } - : if (!PageWriteback(page)) { + 6 6.7e-05 0 0 0 0 3 1.4e-04 : if (!PageWriteback(page)) { : /* synchronous write or broken a_ops? */ : ClearPageReclaim(page); : } @@ -374,44 +374,44 @@ : } : : return PAGE_CLEAN; - :} + 5 5.6e-05 0 0 0 0 1 4.5e-05 :} : :static int remove_mapping(struct address_space *mapping, struct page *page) - 2 2.5e-05 0 0 0 0 2 9.6e-05 :{ /* remove_mapping total: 85 0.0011 0 0 0 0 33 0.0016 */ - : if (!mapping) + 18 2.0e-04 0 0 0 0 23 0.0010 :{ /* remove_mapping total: 848 0.0095 0 0 0 0 520 0.0236 */ + 31 3.5e-04 0 0 0 0 5 2.3e-04 : if (!mapping) : return 0; /* truncate got there first */ : - : write_lock_irq(&mapping->tree_lock); + 7 7.9e-05 0 0 0 0 0 0 : write_lock_irq(&mapping->tree_lock); : : /* : * The non-racy check for busy page. It is critical to check : * PageDirty _after_ making sure that the page is freeable and : * not in use by anybody. (pagecache + us == 2) : */ - 1 1.2e-05 0 0 0 0 0 0 : if (unlikely(page_count(page) != 2)) + 17 1.9e-04 0 0 0 0 9 4.1e-04 : if (unlikely(page_count(page) != 2)) : goto cannot_free; : smp_rmb(); - : if (unlikely(PageDirty(page))) + 8 9.0e-05 0 0 0 0 0 0 : if (unlikely(PageDirty(page))) : goto cannot_free; : - : if (PageSwapCache(page)) { + 10 1.1e-04 0 0 0 0 1 4.5e-05 : if (PageSwapCache(page)) { : swp_entry_t swap = { .val = page_private(page) }; - : __delete_from_swap_cache(page); - : write_unlock_irq(&mapping->tree_lock); - : swap_free(swap); + 2 2.2e-05 0 0 0 0 0 0 : __delete_from_swap_cache(page); + 23 2.6e-04 0 0 0 0 4 1.8e-04 : write_unlock_irq(&mapping->tree_lock); + 2 2.2e-05 0 0 0 0 8 3.6e-04 : swap_free(swap); : __put_page(page); /* The pagecache ref */ : return 1; : } : : __remove_from_page_cache(page); - 10 1.2e-04 0 0 0 0 1 4.8e-05 : write_unlock_irq(&mapping->tree_lock); + 33 3.7e-04 0 0 0 0 10 4.5e-04 : write_unlock_irq(&mapping->tree_lock); : __put_page(page); : return 1; : :cannot_free: : write_unlock_irq(&mapping->tree_lock); : return 0; - 6 7.5e-05 0 0 0 0 4 1.9e-04 :} + 125 0.0014 0 0 0 0 90 0.0041 :} : :/* : * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed @@ -1070,19 +1070,19 @@ : */ :static int isolate_lru_pages(int nr_to_scan, struct list_head *src, : struct list_head *dst, int *scanned) - 1 1.2e-05 0 0 0 0 0 0 :{ /* isolate_lru_pages total: 1192 0.0149 0 0 0 0 147 0.0070 */ + 32 3.6e-04 0 0 0 0 9 4.1e-04 :{ /* isolate_lru_pages total: 22505 0.2531 0 0 5 0.1697 2834 0.1287 */ : int nr_taken = 0; : struct page *page; : int scan = 0; : - 425 0.0053 0 0 0 0 37 0.0018 : while (scan++ < nr_to_scan && !list_empty(src)) { - 4 5.0e-05 0 0 0 0 1 4.8e-05 : page = lru_to_page(src); - 7 8.7e-05 0 0 0 0 2 9.6e-05 : prefetchw_prev_lru_page(page, src, flags); + 6917 0.0778 0 0 0 0 1081 0.0491 : while (scan++ < nr_to_scan && !list_empty(src)) { + 97 0.0011 0 0 0 0 18 8.2e-04 : page = lru_to_page(src); + 216 0.0024 0 0 0 0 25 0.0011 : prefetchw_prev_lru_page(page, src, flags); : - 16 2.0e-04 0 0 0 0 3 1.4e-04 : if (!TestClearPageLRU(page)) + 510 0.0057 0 0 0 0 68 0.0031 : if (!TestClearPageLRU(page)) : BUG(); : list_del(&page->lru); - : if (get_page_testone(page)) { + 193 0.0022 0 0 0 0 28 0.0013 : if (get_page_testone(page)) { : /* : * It is being freed elsewhere : */ @@ -1092,13 +1092,13 @@ : continue; : } else { : list_add(&page->lru, dst); - 2 2.5e-05 0 0 0 0 0 0 : nr_taken++; + 70 7.9e-04 0 0 0 0 7 3.2e-04 : nr_taken++; : } : } : - 10 1.2e-04 0 0 0 0 3 1.4e-04 : *scanned = scan; + 262 0.0029 0 0 0 0 42 0.0019 : *scanned = scan; : return nr_taken; - 28 3.5e-04 0 0 0 0 5 2.4e-04 :} + 636 0.0072 0 0 1 0.0339 122 0.0055 :} : :/* : * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed @@ -1112,55 +1112,55 @@ : pagevec_init(&pvec, 1); : : lru_add_drain(); - 1 1.2e-05 0 0 0 0 1 4.8e-05 : spin_lock_irq(&zone->lru_lock); - 1 1.2e-05 0 0 0 0 0 0 : while (max_scan > 0) { + 16 1.8e-04 0 0 0 0 6 2.7e-04 : spin_lock_irq(&zone->lru_lock); + 18 2.0e-04 0 0 0 0 1 4.5e-05 : while (max_scan > 0) { : struct page *page; : int nr_taken; : int nr_scan; : int nr_freed; : - : nr_taken = isolate_lru_pages(sc->swap_cluster_max, + 5 5.6e-05 0 0 0 0 1 4.5e-05 : nr_taken = isolate_lru_pages(sc->swap_cluster_max, : &zone->inactive_list, : &page_list, &nr_scan); - 1 1.2e-05 0 0 0 0 0 0 : zone->nr_inactive -= nr_taken; - : zone->pages_scanned += nr_scan; + 2 2.2e-05 0 0 0 0 1 4.5e-05 : zone->nr_inactive -= nr_taken; + 1 1.1e-05 0 0 0 0 0 0 : zone->pages_scanned += nr_scan; : spin_unlock_irq(&zone->lru_lock); : - : if (nr_taken == 0) + 3 3.4e-05 0 0 0 0 1 4.5e-05 : if (nr_taken == 0) : goto done; : : max_scan -= nr_scan; : nr_freed = shrink_list(&page_list, sc); : : local_irq_disable(); - : if (current_is_kswapd()) { - : __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); - : __mod_page_state(kswapd_steal, nr_freed); + 2 2.2e-05 0 0 0 0 0 0 : if (current_is_kswapd()) { + 6 6.7e-05 0 0 0 0 1 4.5e-05 : __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); + 2 2.2e-05 0 0 0 0 1 4.5e-05 : __mod_page_state(kswapd_steal, nr_freed); : } else - : __mod_page_state_zone(zone, pgscan_direct, nr_scan); - 1 1.2e-05 0 0 0 0 2 9.6e-05 : __mod_page_state_zone(zone, pgsteal, nr_freed); + 1 1.1e-05 0 0 0 0 0 0 : __mod_page_state_zone(zone, pgscan_direct, nr_scan); + 20 2.2e-04 0 0 0 0 2 9.1e-05 : __mod_page_state_zone(zone, pgsteal, nr_freed); : - : spin_lock(&zone->lru_lock); + 27 3.0e-04 0 0 0 0 17 7.7e-04 : spin_lock(&zone->lru_lock); : /* : * Put back any unfreeable pages. : */ - : while (!list_empty(&page_list)) { - : page = lru_to_page(&page_list); - 0 0 0 0 0 0 1 4.8e-05 : if (TestSetPageLRU(page)) + 27 3.0e-04 0 0 0 0 5 2.3e-04 : while (!list_empty(&page_list)) { + 12 1.3e-04 0 0 0 0 8 3.6e-04 : page = lru_to_page(&page_list); + 2 2.2e-05 0 0 0 0 1 4.5e-05 : if (TestSetPageLRU(page)) : BUG(); : list_del(&page->lru); - : if (PageActive(page)) + 5 5.6e-05 0 0 0 0 4 1.8e-04 : if (PageActive(page)) : add_page_to_active_list(zone, page); : else : add_page_to_inactive_list(zone, page); : if (!pagevec_add(&pvec, page)) { - : spin_unlock_irq(&zone->lru_lock); - : __pagevec_release(&pvec); + 4 4.5e-05 0 0 0 0 4 1.8e-04 : spin_unlock_irq(&zone->lru_lock); + 1 1.1e-05 0 0 0 0 0 0 : __pagevec_release(&pvec); : spin_lock_irq(&zone->lru_lock); : } : } : } - : spin_unlock_irq(&zone->lru_lock); + 1 1.1e-05 0 0 0 0 3 1.4e-04 : spin_unlock_irq(&zone->lru_lock); :done: : pagevec_release(&pvec); :} @@ -1188,10 +1188,10 @@ : int pgmoved; : int pgdeactivate = 0; : int pgscanned; - : int nr_pages = sc->nr_to_scan; - 1 1.2e-05 0 0 0 0 0 0 : LIST_HEAD(l_hold); /* The pages which were snipped off */ - : LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ - : LIST_HEAD(l_active); /* Pages to go onto the active_list */ + 7 7.9e-05 0 0 0 0 0 0 : int nr_pages = sc->nr_to_scan; + 3 3.4e-05 0 0 0 0 2 9.1e-05 : LIST_HEAD(l_hold); /* The pages which were snipped off */ + 1 1.1e-05 0 0 0 0 0 0 : LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ + 6 6.7e-05 0 0 0 0 6 2.7e-04 : LIST_HEAD(l_active); /* Pages to go onto the active_list */ : struct page *page; : struct pagevec pvec; : int reclaim_mapped = 0; @@ -1205,7 +1205,7 @@ : * `distress' is a measure of how much trouble we're having : * reclaiming pages. 0 -> no problems. 100 -> great trouble. : */ - : distress = 100 >> zone->prev_priority; + 3 3.4e-05 0 0 0 0 3 1.4e-04 : distress = 100 >> zone->prev_priority; : : /* : * The point of this algorithm is to decide when to start @@ -1233,87 +1233,87 @@ : * Now use this metric to decide whether to start moving mapped : * memory onto the inactive list. : */ - 13 1.6e-04 0 0 0 0 5 2.4e-04 : if (swap_tendency >= 100) + 369 0.0041 0 0 0 0 86 0.0039 : if (swap_tendency >= 100) : reclaim_mapped = 1; : } : - 1 1.2e-05 0 0 0 0 0 0 : lru_add_drain(); - 0 0 0 0 0 0 1 4.8e-05 : spin_lock_irq(&zone->lru_lock); - 7 8.7e-05 0 0 0 0 3 1.4e-04 : pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, + 2 2.2e-05 0 0 0 0 0 0 : lru_add_drain(); + 10 1.1e-04 0 0 0 0 6 2.7e-04 : spin_lock_irq(&zone->lru_lock); + 121 0.0014 0 0 0 0 58 0.0026 : pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, : &l_hold, &pgscanned); - : zone->pages_scanned += pgscanned; - : zone->nr_active -= pgmoved; - : spin_unlock_irq(&zone->lru_lock); - : - 13 1.6e-04 0 0 0 0 9 4.3e-04 : while (!list_empty(&l_hold)) { - 8 1.0e-04 0 0 0 0 9 4.3e-04 : cond_resched(); - 1 1.2e-05 0 0 0 0 1 4.8e-05 : page = lru_to_page(&l_hold); + 2 2.2e-05 0 0 0 0 0 0 : zone->pages_scanned += pgscanned; + 5 5.6e-05 0 0 0 0 14 6.4e-04 : zone->nr_active -= pgmoved; + 4 4.5e-05 0 0 0 0 1 4.5e-05 : spin_unlock_irq(&zone->lru_lock); + : + 172 0.0019 0 0 0 0 82 0.0037 : while (!list_empty(&l_hold)) { + 76 8.5e-04 0 0 0 0 25 0.0011 : cond_resched(); + 83 9.3e-04 0 0 0 0 39 0.0018 : page = lru_to_page(&l_hold); : list_del(&page->lru); - 8 1.0e-04 0 0 0 0 9 4.3e-04 : if (page_mapped(page)) { - 1 1.2e-05 0 0 0 0 0 0 : if (!reclaim_mapped || + 510 0.0057 0 0 0 0 221 0.0100 : if (page_mapped(page)) { + 3370 0.0379 0 0 0 0 1035 0.0470 : if (!reclaim_mapped || : (total_swap_pages == 0 && PageAnon(page)) || : page_referenced(page, 0)) { - : list_add(&page->lru, &l_active); + 10 1.1e-04 0 0 0 0 7 3.2e-04 : list_add(&page->lru, &l_active); : continue; : } : } - 3 3.7e-05 0 0 0 0 5 2.4e-04 : list_add(&page->lru, &l_inactive); + 38 4.3e-04 0 0 0 0 24 0.0011 : list_add(&page->lru, &l_inactive); : } : : pagevec_init(&pvec, 1); : pgmoved = 0; - 2 2.5e-05 0 0 0 0 5 2.4e-04 : spin_lock_irq(&zone->lru_lock); - 10 1.2e-04 0 0 0 0 0 0 : while (!list_empty(&l_inactive)) { - 4 5.0e-05 0 0 0 0 0 0 : page = lru_to_page(&l_inactive); - 11 1.4e-04 0 0 0 0 2 9.6e-05 : prefetchw_prev_lru_page(page, &l_inactive, flags); - 3 3.7e-05 0 0 0 0 12 5.7e-04 : if (TestSetPageLRU(page)) + 314 0.0035 0 0 0 0 114 0.0052 : spin_lock_irq(&zone->lru_lock); + 52 5.8e-04 0 0 0 0 7 3.2e-04 : while (!list_empty(&l_inactive)) { + 22 2.5e-04 0 0 0 0 12 5.4e-04 : page = lru_to_page(&l_inactive); + 60 6.7e-04 0 0 0 0 8 3.6e-04 : prefetchw_prev_lru_page(page, &l_inactive, flags); + 46 5.2e-04 0 0 0 0 39 0.0018 : if (TestSetPageLRU(page)) : BUG(); - 2 2.5e-05 0 0 0 0 3 1.4e-04 : if (!TestClearPageActive(page)) + 17 1.9e-04 0 0 0 0 13 5.9e-04 : if (!TestClearPageActive(page)) : BUG(); : list_move(&page->lru, &zone->inactive_list); : pgmoved++; - 3 3.7e-05 0 0 0 0 1 4.8e-05 : if (!pagevec_add(&pvec, page)) { + 19 2.1e-04 0 0 0 0 1 4.5e-05 : if (!pagevec_add(&pvec, page)) { : zone->nr_inactive += pgmoved; - 10 1.2e-04 0 0 0 0 5 2.4e-04 : spin_unlock_irq(&zone->lru_lock); + 26 2.9e-04 0 0 0 0 9 4.1e-04 : spin_unlock_irq(&zone->lru_lock); : pgdeactivate += pgmoved; : pgmoved = 0; - 0 0 0 0 0 0 2 9.6e-05 : if (buffer_heads_over_limit) + 3 3.4e-05 0 0 0 0 5 2.3e-04 : if (buffer_heads_over_limit) : pagevec_strip(&pvec); - 1 1.2e-05 0 0 0 0 0 0 : __pagevec_release(&pvec); - 5 6.2e-05 0 0 0 0 0 0 : spin_lock_irq(&zone->lru_lock); + 3 3.4e-05 0 0 0 0 1 4.5e-05 : __pagevec_release(&pvec); + 30 3.4e-04 0 0 0 0 13 5.9e-04 : spin_lock_irq(&zone->lru_lock); : } : } - : zone->nr_inactive += pgmoved; - : pgdeactivate += pgmoved; - 1 1.2e-05 0 0 0 0 0 0 : if (buffer_heads_over_limit) { + 17 1.9e-04 0 0 0 0 3 1.4e-04 : zone->nr_inactive += pgmoved; + 1 1.1e-05 0 0 0 0 0 0 : pgdeactivate += pgmoved; + 8 9.0e-05 0 0 0 0 0 0 : if (buffer_heads_over_limit) { : spin_unlock_irq(&zone->lru_lock); : pagevec_strip(&pvec); : spin_lock_irq(&zone->lru_lock); : } : : pgmoved = 0; - 2 2.5e-05 0 0 0 0 1 4.8e-05 : while (!list_empty(&l_active)) { - : page = lru_to_page(&l_active); - 0 0 0 0 0 0 3 1.4e-04 : prefetchw_prev_lru_page(page, &l_active, flags); - 2 2.5e-05 0 0 0 0 2 9.6e-05 : if (TestSetPageLRU(page)) + 264 0.0030 0 0 0 0 46 0.0021 : while (!list_empty(&l_active)) { + 104 0.0012 0 0 0 0 44 0.0020 : page = lru_to_page(&l_active); + 113 0.0013 0 0 0 0 81 0.0037 : prefetchw_prev_lru_page(page, &l_active, flags); + 74 8.3e-04 0 0 0 0 46 0.0021 : if (TestSetPageLRU(page)) : BUG(); - 0 0 0 0 0 0 2 9.6e-05 : BUG_ON(!PageActive(page)); - : list_move(&page->lru, &zone->active_list); + 111 0.0012 0 0 0 0 117 0.0053 : BUG_ON(!PageActive(page)); + 1 1.1e-05 0 0 0 0 0 0 : list_move(&page->lru, &zone->active_list); : pgmoved++; - 2 2.5e-05 0 0 0 0 0 0 : if (!pagevec_add(&pvec, page)) { - 1 1.2e-05 0 0 0 0 4 1.9e-04 : zone->nr_active += pgmoved; + 178 0.0020 0 0 0 0 15 6.8e-04 : if (!pagevec_add(&pvec, page)) { + 164 0.0018 0 0 0 0 106 0.0048 : zone->nr_active += pgmoved; : pgmoved = 0; - : spin_unlock_irq(&zone->lru_lock); - 0 0 0 0 0 0 1 4.8e-05 : __pagevec_release(&pvec); - 2 2.5e-05 0 0 0 0 1 4.8e-05 : spin_lock_irq(&zone->lru_lock); + 1 1.1e-05 0 0 0 0 0 0 : spin_unlock_irq(&zone->lru_lock); + 24 2.7e-04 0 0 0 0 40 0.0018 : __pagevec_release(&pvec); + 162 0.0018 0 0 0 0 40 0.0018 : spin_lock_irq(&zone->lru_lock); : } : } - : zone->nr_active += pgmoved; + 14 1.6e-04 0 0 0 0 3 1.4e-04 : zone->nr_active += pgmoved; : spin_unlock(&zone->lru_lock); : - 1 1.2e-05 0 0 0 0 0 0 : __mod_page_state_zone(zone, pgrefill, pgscanned); - : __mod_page_state(pgdeactivate, pgdeactivate); - 0 0 0 0 0 0 1 4.8e-05 : local_irq_enable(); + 30 3.4e-04 0 0 0 0 5 2.3e-04 : __mod_page_state_zone(zone, pgrefill, pgscanned); + 6 6.7e-05 0 0 0 0 0 0 : __mod_page_state(pgdeactivate, pgdeactivate); + 2 2.2e-05 0 0 0 0 3 1.4e-04 : local_irq_enable(); : : pagevec_release(&pvec); :} @@ -1323,7 +1323,7 @@ : */ :static void :shrink_zone(struct zone *zone, struct scan_control *sc) - 1 1.2e-05 0 0 0 0 0 0 :{ /* shrink_zone total: 444 0.0055 0 0 0 0 221 0.0106 */ + 14 1.6e-04 0 0 0 0 1 4.5e-05 :{ /* shrink_zone total: 15552 0.1749 0 0 1 0.0339 6013 0.2731 */ : unsigned long nr_active; : unsigned long nr_inactive; : @@ -1333,40 +1333,40 @@ : * Add one to `nr_to_scan' just to make sure that the kernel will : * slowly sift through the active list. : */ - : zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1; - : nr_active = zone->nr_scan_active; - : if (nr_active >= sc->swap_cluster_max) - : zone->nr_scan_active = 0; + 3 3.4e-05 0 0 0 0 5 2.3e-04 : zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1; + 1 1.1e-05 0 0 0 0 0 0 : nr_active = zone->nr_scan_active; + 11 1.2e-04 0 0 0 0 6 2.7e-04 : if (nr_active >= sc->swap_cluster_max) + 0 0 0 0 0 0 1 4.5e-05 : zone->nr_scan_active = 0; : else : nr_active = 0; : - 1 1.2e-05 0 0 0 0 0 0 : zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1; + 18 2.0e-04 0 0 0 0 3 1.4e-04 : zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1; : nr_inactive = zone->nr_scan_inactive; - : if (nr_inactive >= sc->swap_cluster_max) - 1 1.2e-05 0 0 0 0 0 0 : zone->nr_scan_inactive = 0; + 12 1.3e-04 0 0 0 0 3 1.4e-04 : if (nr_inactive >= sc->swap_cluster_max) + 7 7.9e-05 0 0 0 0 2 9.1e-05 : zone->nr_scan_inactive = 0; : else : nr_inactive = 0; : - 4 5.0e-05 0 0 0 0 1 4.8e-05 : while (nr_active || nr_inactive) { - 0 0 0 0 0 0 1 4.8e-05 : if (nr_active) { - : sc->nr_to_scan = min(nr_active, + 37 4.2e-04 0 0 0 0 24 0.0011 : while (nr_active || nr_inactive) { + 17 1.9e-04 0 0 0 0 4 1.8e-04 : if (nr_active) { + 2 2.2e-05 0 0 0 0 2 9.1e-05 : sc->nr_to_scan = min(nr_active, : (unsigned long)sc->swap_cluster_max); - 0 0 0 0 0 0 1 4.8e-05 : nr_active -= sc->nr_to_scan; + : nr_active -= sc->nr_to_scan; : refill_inactive_zone(zone, sc); : } : - : if (nr_inactive) { - : sc->nr_to_scan = min(nr_inactive, + 7 7.9e-05 0 0 0 0 2 9.1e-05 : if (nr_inactive) { + 2 2.2e-05 0 0 0 0 2 9.1e-05 : sc->nr_to_scan = min(nr_inactive, : (unsigned long)sc->swap_cluster_max); : nr_inactive -= sc->nr_to_scan; : shrink_cache(zone, sc); : } : } : - : throttle_vm_writeout(); + 4 4.5e-05 0 0 0 0 1 4.5e-05 : throttle_vm_writeout(); : : atomic_dec(&zone->reclaim_in_progress); - 1 1.2e-05 0 0 0 0 0 0 :} + 5 5.6e-05 0 0 0 0 0 0 :} : :/* : * This is the direct reclaim path, for page-allocating processes. We only @@ -1423,7 +1423,7 @@ : * allocation attempt will fail. : */ :int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) - :{ + 1 1.1e-05 0 0 0 0 1 4.5e-05 :{ /* try_to_free_pages total: 40 4.5e-04 0 0 0 0 6 2.7e-04 */ : int priority; : int ret = 0; : int total_scanned = 0, total_reclaimed = 0; @@ -1433,23 +1433,23 @@ : int i; : : sc.gfp_mask = gfp_mask; - : sc.may_writepage = !laptop_mode; + 3 3.4e-05 0 0 0 0 0 0 : sc.may_writepage = !laptop_mode; : sc.may_swap = 1; : : inc_page_state(allocstall); : - : for (i = 0; zones[i] != NULL; i++) { + 2 2.2e-05 0 0 0 0 1 4.5e-05 : for (i = 0; zones[i] != NULL; i++) { : struct zone *zone = zones[i]; : : if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) : continue; : : zone->temp_priority = DEF_PRIORITY; - : lru_pages += zone->nr_active + zone->nr_inactive; + 3 3.4e-05 0 0 0 0 0 0 : lru_pages += zone->nr_active + zone->nr_inactive; : } : : for (priority = DEF_PRIORITY; priority >= 0; priority--) { - : sc.nr_mapped = read_page_state(nr_mapped); + 0 0 0 0 0 0 1 4.5e-05 : sc.nr_mapped = read_page_state(nr_mapped); : sc.nr_scanned = 0; : sc.nr_reclaimed = 0; : sc.priority = priority; @@ -1458,13 +1458,13 @@ : disable_swap_token(); : shrink_caches(zones, &sc); : shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); - : if (reclaim_state) { + 1 1.1e-05 0 0 0 0 0 0 : if (reclaim_state) { : sc.nr_reclaimed += reclaim_state->reclaimed_slab; : reclaim_state->reclaimed_slab = 0; : } : total_scanned += sc.nr_scanned; : total_reclaimed += sc.nr_reclaimed; - : if (total_reclaimed >= sc.swap_cluster_max) { + 2 2.2e-05 0 0 0 0 1 4.5e-05 : if (total_reclaimed >= sc.swap_cluster_max) { : ret = 1; : goto out; : } @@ -1476,7 +1476,7 @@ : * that's undesirable in laptop mode, where we *want* lumpy : * writeout. So in laptop mode, write out the whole world. : */ - : if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) { + 6 6.7e-05 0 0 0 0 0 0 : if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) { : wakeup_pdflush(laptop_mode ? 0 : total_scanned); : sc.may_writepage = 1; : } @@ -1486,7 +1486,7 @@ : blk_congestion_wait(WRITE, HZ/10); : } :out: - : for (i = 0; zones[i] != 0; i++) { + 1 1.1e-05 0 0 0 0 0 0 : for (i = 0; zones[i] != 0; i++) { : struct zone *zone = zones[i]; : : if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) @@ -1495,7 +1495,7 @@ : zone->prev_priority = zone->temp_priority; : } : return ret; - :} + 1 1.1e-05 0 0 0 0 0 0 :} : :/* : * For kswapd, balance_pgdat() will work across all this node's zones until @@ -1523,57 +1523,57 @@ : * across the zones. : */ :static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order) - :{ /* balance_pgdat total: 15 1.9e-04 0 0 0 0 4 1.9e-04 */ + 10 1.1e-04 0 0 0 0 0 0 :{ /* balance_pgdat total: 200 0.0022 0 0 0 0 57 0.0026 */ : int to_free = nr_pages; : int all_zones_ok; : int priority; : int i; : int total_scanned, total_reclaimed; - : struct reclaim_state *reclaim_state = current->reclaim_state; + 2 2.2e-05 0 0 0 0 0 0 : struct reclaim_state *reclaim_state = current->reclaim_state; : struct scan_control sc; : :loop_again: : total_scanned = 0; : total_reclaimed = 0; - : sc.gfp_mask = GFP_KERNEL; - : sc.may_writepage = !laptop_mode; + 1 1.1e-05 0 0 0 0 0 0 : sc.gfp_mask = GFP_KERNEL; + 4 4.5e-05 0 0 0 0 1 4.5e-05 : sc.may_writepage = !laptop_mode; : sc.may_swap = 1; - 1 1.2e-05 0 0 0 0 0 0 : sc.nr_mapped = read_page_state(nr_mapped); + : sc.nr_mapped = read_page_state(nr_mapped); : - : inc_page_state(pageoutrun); + 0 0 0 0 0 0 1 4.5e-05 : inc_page_state(pageoutrun); : - 2 2.5e-05 0 0 0 0 1 4.8e-05 : for (i = 0; i < pgdat->nr_zones; i++) { + 11 1.2e-04 0 0 0 0 0 0 : for (i = 0; i < pgdat->nr_zones; i++) { : struct zone *zone = pgdat->node_zones + i; : - : zone->temp_priority = DEF_PRIORITY; + 1 1.1e-05 0 0 0 0 1 4.5e-05 : zone->temp_priority = DEF_PRIORITY; : } : - : for (priority = DEF_PRIORITY; priority >= 0; priority--) { + 1 1.1e-05 0 0 0 0 1 4.5e-05 : for (priority = DEF_PRIORITY; priority >= 0; priority--) { : int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ : unsigned long lru_pages = 0; : : /* The swap token gets in the way of swapout... */ - : if (!priority) + 2 2.2e-05 0 0 0 0 0 0 : if (!priority) : disable_swap_token(); : : all_zones_ok = 1; : - : if (nr_pages == 0) { + 1 1.1e-05 0 0 0 0 1 4.5e-05 : if (nr_pages == 0) { : /* : * Scan in the highmem->dma direction for the highest : * zone which needs scanning : */ - 1 1.2e-05 0 0 0 0 0 0 : for (i = pgdat->nr_zones - 1; i >= 0; i--) { - : struct zone *zone = pgdat->node_zones + i; + 5 5.6e-05 0 0 0 0 1 4.5e-05 : for (i = pgdat->nr_zones - 1; i >= 0; i--) { + 2 2.2e-05 0 0 0 0 0 0 : struct zone *zone = pgdat->node_zones + i; : : if (!populated_zone(zone)) : continue; : - 1 1.2e-05 0 0 0 0 0 0 : if (zone->all_unreclaimable && + 0 0 0 0 0 0 3 1.4e-04 : if (zone->all_unreclaimable && : priority != DEF_PRIORITY) : continue; : - : if (!zone_watermark_ok(zone, order, + 4 4.5e-05 0 0 0 0 1 4.5e-05 : if (!zone_watermark_ok(zone, order, : zone->pages_high, 0, 0)) { : end_zone = i; : goto scan; @@ -1583,11 +1583,11 @@ : } else { : end_zone = pgdat->nr_zones - 1; : } - 1 1.2e-05 0 0 0 0 0 0 :scan: - : for (i = 0; i <= end_zone; i++) { - : struct zone *zone = pgdat->node_zones + i; + 4 4.5e-05 0 0 0 0 1 4.5e-05 :scan: + 15 1.7e-04 0 0 0 0 4 1.8e-04 : for (i = 0; i <= end_zone; i++) { + 1 1.1e-05 0 0 0 0 1 4.5e-05 : struct zone *zone = pgdat->node_zones + i; : - 2 2.5e-05 0 0 0 0 0 0 : lru_pages += zone->nr_active + zone->nr_inactive; + 3 3.4e-05 0 0 0 0 1 4.5e-05 : lru_pages += zone->nr_active + zone->nr_inactive; : } : : /* @@ -1599,38 +1599,38 @@ : * pages behind kswapd's direction of progress, which would : * cause too much scanning of the lower zones. : */ - 2 2.5e-05 0 0 0 0 0 0 : for (i = 0; i <= end_zone; i++) { - : struct zone *zone = pgdat->node_zones + i; + 39 4.4e-04 0 0 0 0 6 2.7e-04 : for (i = 0; i <= end_zone; i++) { + 4 4.5e-05 0 0 0 0 6 2.7e-04 : struct zone *zone = pgdat->node_zones + i; : int nr_slab; : : if (!populated_zone(zone)) : continue; : - 0 0 0 0 0 0 1 4.8e-05 : if (zone->all_unreclaimable && priority != DEF_PRIORITY) + 2 2.2e-05 0 0 0 0 0 0 : if (zone->all_unreclaimable && priority != DEF_PRIORITY) : continue; : - : if (nr_pages == 0) { /* Not software suspend */ - 0 0 0 0 0 0 1 4.8e-05 : if (!zone_watermark_ok(zone, order, + 2 2.2e-05 0 0 0 0 3 1.4e-04 : if (nr_pages == 0) { /* Not software suspend */ + 2 2.2e-05 0 0 0 0 1 4.5e-05 : if (!zone_watermark_ok(zone, order, : zone->pages_high, end_zone, 0)) : all_zones_ok = 0; : } : zone->temp_priority = priority; - : if (zone->prev_priority > priority) + 1 1.1e-05 0 0 0 0 0 0 : if (zone->prev_priority > priority) : zone->prev_priority = priority; : sc.nr_scanned = 0; : sc.nr_reclaimed = 0; - : sc.priority = priority; - : sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; - : shrink_zone(zone, &sc); + 1 1.1e-05 0 0 0 0 0 0 : sc.priority = priority; + 0 0 0 0 0 0 1 4.5e-05 : sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; + 3 3.4e-05 0 0 0 0 3 1.4e-04 : shrink_zone(zone, &sc); : reclaim_state->reclaimed_slab = 0; - 3 3.7e-05 0 0 0 0 0 0 : nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, + 38 4.3e-04 0 0 0 0 5 2.3e-04 : nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, : lru_pages); : sc.nr_reclaimed += reclaim_state->reclaimed_slab; - : total_reclaimed += sc.nr_reclaimed; - : total_scanned += sc.nr_scanned; + 2 2.2e-05 0 0 0 0 1 4.5e-05 : total_reclaimed += sc.nr_reclaimed; + 1 1.1e-05 0 0 0 0 0 0 : total_scanned += sc.nr_scanned; : if (zone->all_unreclaimable) : continue; - : if (nr_slab == 0 && zone->pages_scanned >= + 1 1.1e-05 0 0 0 0 0 0 : if (nr_slab == 0 && zone->pages_scanned >= : (zone->nr_active + zone->nr_inactive) * 4) : zone->all_unreclaimable = 1; : /* @@ -1638,20 +1638,20 @@ : * the reclaim ratio is low, start doing writepage : * even in laptop mode : */ - : if (total_scanned > SWAP_CLUSTER_MAX * 2 && + 7 7.9e-05 0 0 0 0 4 1.8e-04 : if (total_scanned > SWAP_CLUSTER_MAX * 2 && : total_scanned > total_reclaimed+total_reclaimed/2) - : sc.may_writepage = 1; + 1 1.1e-05 0 0 0 0 0 0 : sc.may_writepage = 1; : } - : if (nr_pages && to_free > total_reclaimed) + 5 5.6e-05 0 0 0 0 1 4.5e-05 : if (nr_pages && to_free > total_reclaimed) : continue; /* swsusp: need to do more work */ - : if (all_zones_ok) + 1 1.1e-05 0 0 0 0 0 0 : if (all_zones_ok) : break; /* kswapd: all done */ : /* : * OK, kswapd is getting into trouble. Take a nap, then take : * another pass across the zones. : */ : if (total_scanned && priority < DEF_PRIORITY - 2) - : blk_congestion_wait(WRITE, HZ/10); + 1 1.1e-05 0 0 0 0 0 0 : blk_congestion_wait(WRITE, HZ/10); : : /* : * We do this so kswapd doesn't build up large priorities for @@ -1659,22 +1659,22 @@ : * matches the direct reclaim path behaviour in terms of impact : * on zone->*_priority. : */ - : if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages)) + 8 9.0e-05 0 0 0 0 3 1.4e-04 : if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages)) : break; : } - 2 2.5e-05 0 0 0 0 1 4.8e-05 :out: - : for (i = 0; i < pgdat->nr_zones; i++) { - : struct zone *zone = pgdat->node_zones + i; + 1 1.1e-05 0 0 0 0 0 0 :out: + 7 7.9e-05 0 0 0 0 5 2.3e-04 : for (i = 0; i < pgdat->nr_zones; i++) { + 0 0 0 0 0 0 1 4.5e-05 : struct zone *zone = pgdat->node_zones + i; : : zone->prev_priority = zone->temp_priority; : } - : if (!all_zones_ok) { + 4 4.5e-05 0 0 0 0 0 0 : if (!all_zones_ok) { : cond_resched(); : goto loop_again; : } : : return total_reclaimed; - :} + 2 2.2e-05 0 0 0 0 0 0 :} : :/* : * The background pageout daemon, started as a kernel thread @@ -1690,7 +1690,7 @@ : * (most normal use), this basically shouldn't matter. : */ :static int kswapd(void *p) - :{ /* kswapd total: 1 1.2e-05 0 0 0 0 0 0 */ + :{ /* kswapd total: 17 1.9e-04 0 0 0 0 2 9.1e-05 */ : unsigned long order; : pg_data_t *pgdat = (pg_data_t*)p; : struct task_struct *tsk = current; @@ -1726,7 +1726,7 @@ : : try_to_freeze(); : - : prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); + 3 3.4e-05 0 0 0 0 0 0 : prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); : new_order = pgdat->kswapd_max_order; : pgdat->kswapd_max_order = 0; : if (order < new_order) { @@ -1739,9 +1739,9 @@ : schedule(); : order = pgdat->kswapd_max_order; : } - 1 1.2e-05 0 0 0 0 0 0 : finish_wait(&pgdat->kswapd_wait, &wait); + 10 1.1e-04 0 0 0 0 2 9.1e-05 : finish_wait(&pgdat->kswapd_wait, &wait); : - : balance_pgdat(pgdat, 0, order); + 1 1.1e-05 0 0 0 0 0 0 : balance_pgdat(pgdat, 0, order); : } : return 0; :} @@ -1750,23 +1750,23 @@ : * A zone is low on free memory, so wake its kswapd task to service it. : */ :void wakeup_kswapd(struct zone *zone, int order) - :{ + 38 4.3e-04 0 0 0 0 31 0.0014 :{ /* wakeup_kswapd total: 108 0.0012 0 0 0 0 123 0.0056 */ : pg_data_t *pgdat; : - : if (!populated_zone(zone)) + 1 1.1e-05 0 0 0 0 0 0 : if (!populated_zone(zone)) : return; : - : pgdat = zone->zone_pgdat; - : if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) + 3 3.4e-05 0 0 0 0 8 3.6e-04 : pgdat = zone->zone_pgdat; + 13 1.5e-04 0 0 0 0 16 7.3e-04 : if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) : return; - : if (pgdat->kswapd_max_order < order) + 2 2.2e-05 0 0 0 0 6 2.7e-04 : if (pgdat->kswapd_max_order < order) : pgdat->kswapd_max_order = order; : if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) : return; - : if (!waitqueue_active(&pgdat->kswapd_wait)) + 28 3.1e-04 0 0 0 0 32 0.0015 : if (!waitqueue_active(&pgdat->kswapd_wait)) : return; - : wake_up_interruptible(&pgdat->kswapd_wait); - :} + 2 2.2e-05 0 0 0 0 0 0 : wake_up_interruptible(&pgdat->kswapd_wait); + 21 2.4e-04 0 0 0 0 30 0.0014 :} : :#ifdef CONFIG_PM :/* @@ -1952,12 +1952,12 @@ /* * Total samples for file : "mm/vmscan.c" * - * 696 0.0087 0 0 0 0 164 0.0078 + * 17101 0.1923 0 0 1 0.0339 4397 0.1997 */ /* - * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-0 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x0 + * Command line: opannotate --source --search-dirs=/src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/ --output-dir=annotated-1 /src/linux-2.6.16-rc5-vs2.1.1-rc12-X2/vmlinux session:rc12x2 * * Interpretation of command line: * Output annotated source file with samples