From 78afd5612deb8268bafc8b6507d72341d5ed9aac Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 22 Mar 2011 16:33:12 -0700 Subject: mm: add __GFP_OTHER_NODE flag Add a new __GFP_OTHER_NODE flag to tell the low level numa statistics in zone_statistics() that an allocation is on behalf of another thread. This way the local and remote counters can be still correct, even when background daemons like khugepaged are changing memory mappings. This only affects the accounting, but I think it's worth doing that right to avoid confusing users. I first tried to just pass down the right node, but this required a lot of changes to pass down this parameter and at least one addition of a 10th argument to a 9 argument function. Using the flag is a lot less intrusive. Open: should be also used for migration? [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Andi Kleen Cc: Andrea Arcangeli Reviewed-by: KAMEZAWA Hiroyuki Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmstat.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux/vmstat.h') diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 833e676d6d92..461c0119664f 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -220,12 +220,12 @@ static inline unsigned long node_page_state(int node, zone_page_state(&zones[ZONE_MOVABLE], item); } -extern void zone_statistics(struct zone *, struct zone *); +extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); #else #define node_page_state(node, item) global_page_state(item) -#define zone_statistics(_zl,_z) do { } while (0) +#define zone_statistics(_zl, _z, gfp) do { } while (0) #endif /* CONFIG_NUMA */ -- cgit v1.2.3-71-gd317 From 81ab4201fb7d91d6b0cd9ad5b4b16776e4bed145 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 14 Apr 2011 15:22:06 -0700 Subject: mm: add VM counters for transparent hugepages I found it difficult to make sense of transparent huge pages without having any counters for its actions. Add some counters to vmstat for allocation of transparent hugepages and fallback to smaller pages. Optional patch, but useful for development and understanding the system. Contains improvements from Andrea Arcangeli and Johannes Weiner [akpm@linux-foundation.org: coding-style fixes] [hannes@cmpxchg.org: fix vmstat_text[] entries] Signed-off-by: Andi Kleen Acked-by: Andrea Arcangeli Reviewed-by: KAMEZAWA Hiroyuki Signed-off-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmstat.h | 7 +++++++ mm/huge_memory.c | 25 +++++++++++++++++++++---- mm/vmstat.c | 9 +++++++++ 3 files changed, 37 insertions(+), 4 deletions(-) (limited to 'include/linux/vmstat.h') diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 461c0119664f..2b3831b58aa4 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -58,6 +58,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ UNEVICTABLE_MLOCKFREED, +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + THP_FAULT_ALLOC, + THP_FAULT_FALLBACK, + THP_COLLAPSE_ALLOC, + THP_COLLAPSE_ALLOC_FAILED, + THP_SPLIT, +#endif NR_VM_EVENT_ITEMS }; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0a619e0e2e0b..1722683bde23 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -680,8 +680,11 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, return VM_FAULT_OOM; page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), vma, haddr, numa_node_id(), 0); - if (unlikely(!page)) + if (unlikely(!page)) { + count_vm_event(THP_FAULT_FALLBACK); goto out; + } + count_vm_event(THP_FAULT_ALLOC); if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { put_page(page); goto out; @@ -909,11 +912,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, new_page = NULL; if (unlikely(!new_page)) { + count_vm_event(THP_FAULT_FALLBACK); ret = do_huge_pmd_wp_page_fallback(mm, vma, address, pmd, orig_pmd, page, haddr); put_page(page); goto out; } + count_vm_event(THP_FAULT_ALLOC); if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { put_page(new_page); @@ -1390,6 +1395,7 @@ int split_huge_page(struct page *page) BUG_ON(!PageSwapBacked(page)); __split_huge_page(page, anon_vma); + count_vm_event(THP_SPLIT); BUG_ON(PageCompound(page)); out_unlock: @@ -1784,9 +1790,11 @@ static void collapse_huge_page(struct mm_struct *mm, node, __GFP_OTHER_NODE); if (unlikely(!new_page)) { up_read(&mm->mmap_sem); + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); *hpage = ERR_PTR(-ENOMEM); return; } + count_vm_event(THP_COLLAPSE_ALLOC); if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { up_read(&mm->mmap_sem); put_page(new_page); @@ -2151,8 +2159,11 @@ static void khugepaged_do_scan(struct page **hpage) #ifndef CONFIG_NUMA if (!*hpage) { *hpage = alloc_hugepage(khugepaged_defrag()); - if (unlikely(!*hpage)) + if (unlikely(!*hpage)) { + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); break; + } + count_vm_event(THP_COLLAPSE_ALLOC); } #else if (IS_ERR(*hpage)) @@ -2192,8 +2203,11 @@ static struct page *khugepaged_alloc_hugepage(void) do { hpage = alloc_hugepage(khugepaged_defrag()); - if (!hpage) + if (!hpage) { + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); khugepaged_alloc_sleep(); + } else + count_vm_event(THP_COLLAPSE_ALLOC); } while (unlikely(!hpage) && likely(khugepaged_enabled())); return hpage; @@ -2210,8 +2224,11 @@ static void khugepaged_loop(void) while (likely(khugepaged_enabled())) { #ifndef CONFIG_NUMA hpage = khugepaged_alloc_hugepage(); - if (unlikely(!hpage)) + if (unlikely(!hpage)) { + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); break; + } + count_vm_event(THP_COLLAPSE_ALLOC); #else if (IS_ERR(hpage)) { khugepaged_alloc_sleep(); diff --git a/mm/vmstat.c b/mm/vmstat.c index 8cb0f0a703e5..897ea9e88238 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -948,7 +948,16 @@ static const char * const vmstat_text[] = { "unevictable_pgs_cleared", "unevictable_pgs_stranded", "unevictable_pgs_mlockfreed", + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + "thp_fault_alloc", + "thp_fault_fallback", + "thp_collapse_alloc", + "thp_collapse_alloc_failed", + "thp_split", #endif + +#endif /* CONFIG_VM_EVENTS_COUNTERS */ }; static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, -- cgit v1.2.3-71-gd317