Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit0ca1634

Browse files
hnaztorvalds
authored andcommitted
thp: mincore transparent hugepage support
Handle transparent huge page pmd entries natively instead of splittingthem into subpages.Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>Reviewed-by: Rik van Riel <riel@redhat.com>Signed-off-by: Andrew Morton <akpm@linux-foundation.org>Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parentf2d6bfe commit0ca1634

File tree

3 files changed

+35
-1
lines changed

3 files changed

+35
-1
lines changed

‎include/linux/huge_mm.h‎

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@ extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
1919
externintzap_huge_pmd(structmmu_gather*tlb,
2020
structvm_area_struct*vma,
2121
pmd_t*pmd);
22+
externintmincore_huge_pmd(structvm_area_struct*vma,pmd_t*pmd,
23+
unsigned longaddr,unsigned longend,
24+
unsignedchar*vec);
2225

2326
enumtransparent_hugepage_flag {
2427
TRANSPARENT_HUGEPAGE_FLAG,

‎mm/huge_memory.c‎

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -923,6 +923,31 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
923923
returnret;
924924
}
925925

926+
intmincore_huge_pmd(structvm_area_struct*vma,pmd_t*pmd,
927+
unsigned longaddr,unsigned longend,
928+
unsignedchar*vec)
929+
{
930+
intret=0;
931+
932+
spin_lock(&vma->vm_mm->page_table_lock);
933+
if (likely(pmd_trans_huge(*pmd))) {
934+
ret= !pmd_trans_splitting(*pmd);
935+
spin_unlock(&vma->vm_mm->page_table_lock);
936+
if (unlikely(!ret))
937+
wait_split_huge_page(vma->anon_vma,pmd);
938+
else {
939+
/*
940+
* All logical pages in the range are present
941+
* if backed by a huge page.
942+
*/
943+
memset(vec,1, (end-addr) >>PAGE_SHIFT);
944+
}
945+
}else
946+
spin_unlock(&vma->vm_mm->page_table_lock);
947+
948+
returnret;
949+
}
950+
926951
pmd_t*page_check_address_pmd(structpage*page,
927952
structmm_struct*mm,
928953
unsigned longaddress,

‎mm/mincore.c‎

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,13 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
154154
pmd=pmd_offset(pud,addr);
155155
do {
156156
next=pmd_addr_end(addr,end);
157-
split_huge_page_pmd(vma->vm_mm,pmd);
157+
if (pmd_trans_huge(*pmd)) {
158+
if (mincore_huge_pmd(vma,pmd,addr,next,vec)) {
159+
vec+= (next-addr) >>PAGE_SHIFT;
160+
continue;
161+
}
162+
/* fall through */
163+
}
158164
if (pmd_none_or_clear_bad(pmd))
159165
mincore_unmapped_range(vma,addr,next,vec);
160166
else

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp