Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit8510e69

Browse files
JoonsooKimtorvalds
authored andcommitted
mm/page_alloc: fix memalloc_nocma_{save/restore} APIs
Currently, memalloc_nocma_{save/restore} API that prevents CMA areain page allocation is implemented by using current_gfp_context(). However,there are two problems of this implementation.First, this doesn't work for allocation fastpath. In the fastpath,original gfp_mask is used since current_gfp_context() is introduced inorder to control reclaim and it is on slowpath. So, CMA area can beallocated through the allocation fastpath even ifmemalloc_nocma_{save/restore} APIs are used. Currently, there is justone user for these APIs and it has a fallback method to prevent actualproblem.Second, clearing __GFP_MOVABLE in current_gfp_context() has a side effectto exclude the memory on the ZONE_MOVABLE for allocation target.To fix these problems, this patch changes the implementation to excludeCMA area in page allocation. Main point of this change is using thealloc_flags. alloc_flags is mainly used to control allocation so it fitsfor excluding CMA area in allocation.Fixes:d7fefcc (mm/cma: add PF flag to force non cma alloc)Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>Signed-off-by: Andrew Morton <akpm@linux-foundation.org>Reviewed-by: Vlastimil Babka <vbabka@suse.cz>Cc: Christoph Hellwig <hch@infradead.org>Cc: Roman Gushchin <guro@fb.com>Cc: Mike Kravetz <mike.kravetz@oracle.com>Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>Cc: Michal Hocko <mhocko@suse.com>Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>Link:http://lkml.kernel.org/r/1595468942-29687-1-git-send-email-iamjoonsoo.kim@lge.comSigned-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent182f3d7 commit8510e69

File tree

2 files changed

+22
-17
lines changed

2 files changed

+22
-17
lines changed

‎include/linux/sched/mm.h‎

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -175,12 +175,10 @@ static inline bool in_vfork(struct task_struct *tsk)
175175
* Applies per-task gfp context to the given allocation flags.
176176
* PF_MEMALLOC_NOIO implies GFP_NOIO
177177
* PF_MEMALLOC_NOFS implies GFP_NOFS
178-
* PF_MEMALLOC_NOCMA implies no allocation from CMA region.
179178
*/
180179
staticinlinegfp_tcurrent_gfp_context(gfp_tflags)
181180
{
182-
if (unlikely(current->flags&
183-
(PF_MEMALLOC_NOIO |PF_MEMALLOC_NOFS |PF_MEMALLOC_NOCMA))) {
181+
if (unlikely(current->flags& (PF_MEMALLOC_NOIO |PF_MEMALLOC_NOFS))) {
184182
/*
185183
* NOIO implies both NOIO and NOFS and it is a weaker context
186184
* so always make sure it makes precedence
@@ -189,10 +187,6 @@ static inline gfp_t current_gfp_context(gfp_t flags)
189187
flags &= ~(__GFP_IO |__GFP_FS);
190188
elseif (current->flags&PF_MEMALLOC_NOFS)
191189
flags &= ~__GFP_FS;
192-
#ifdefCONFIG_CMA
193-
if (current->flags&PF_MEMALLOC_NOCMA)
194-
flags &= ~__GFP_MOVABLE;
195-
#endif
196190
}
197191
returnflags;
198192
}

‎mm/page_alloc.c‎

Lines changed: 21 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2785,7 +2785,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
27852785
* allocating from CMA when over half of the zone's free memory
27862786
* is in the CMA area.
27872787
*/
2788-
if (migratetype==MIGRATE_MOVABLE&&
2788+
if (alloc_flags&ALLOC_CMA&&
27892789
zone_page_state(zone,NR_FREE_CMA_PAGES)>
27902790
zone_page_state(zone,NR_FREE_PAGES) /2) {
27912791
page=__rmqueue_cma_fallback(zone,order);
@@ -2796,7 +2796,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
27962796
retry:
27972797
page=__rmqueue_smallest(zone,order,migratetype);
27982798
if (unlikely(!page)) {
2799-
if (migratetype==MIGRATE_MOVABLE)
2799+
if (alloc_flags&ALLOC_CMA)
28002800
page=__rmqueue_cma_fallback(zone,order);
28012801

28022802
if (!page&&__rmqueue_fallback(zone,order,migratetype,
@@ -3687,6 +3687,20 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
36873687
returnalloc_flags;
36883688
}
36893689

3690+
staticinlineunsignedintcurrent_alloc_flags(gfp_tgfp_mask,
3691+
unsignedintalloc_flags)
3692+
{
3693+
#ifdefCONFIG_CMA
3694+
unsignedintpflags=current->flags;
3695+
3696+
if (!(pflags&PF_MEMALLOC_NOCMA)&&
3697+
gfp_migratetype(gfp_mask)==MIGRATE_MOVABLE)
3698+
alloc_flags |=ALLOC_CMA;
3699+
3700+
#endif
3701+
returnalloc_flags;
3702+
}
3703+
36903704
/*
36913705
* get_page_from_freelist goes through the zonelist trying to allocate
36923706
* a page.
@@ -4333,10 +4347,8 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
43334347
}elseif (unlikely(rt_task(current))&& !in_interrupt())
43344348
alloc_flags |=ALLOC_HARDER;
43354349

4336-
#ifdefCONFIG_CMA
4337-
if (gfp_migratetype(gfp_mask)==MIGRATE_MOVABLE)
4338-
alloc_flags |=ALLOC_CMA;
4339-
#endif
4350+
alloc_flags=current_alloc_flags(gfp_mask,alloc_flags);
4351+
43404352
returnalloc_flags;
43414353
}
43424354

@@ -4637,7 +4649,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
46374649

46384650
reserve_flags=__gfp_pfmemalloc_flags(gfp_mask);
46394651
if (reserve_flags)
4640-
alloc_flags=reserve_flags;
4652+
alloc_flags=current_alloc_flags(gfp_mask,reserve_flags);
46414653

46424654
/*
46434655
* Reset the nodemask and zonelist iterators if memory policies can be
@@ -4714,7 +4726,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
47144726

47154727
/* Avoid allocations with no watermarks from looping endlessly */
47164728
if (tsk_is_oom_victim(current)&&
4717-
(alloc_flags==ALLOC_OOM||
4729+
(alloc_flags&ALLOC_OOM||
47184730
(gfp_mask&__GFP_NOMEMALLOC)))
47194731
gotonopage;
47204732

@@ -4806,8 +4818,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
48064818
if (should_fail_alloc_page(gfp_mask,order))
48074819
return false;
48084820

4809-
if (IS_ENABLED(CONFIG_CMA)&&ac->migratetype==MIGRATE_MOVABLE)
4810-
*alloc_flags |=ALLOC_CMA;
4821+
*alloc_flags=current_alloc_flags(gfp_mask,*alloc_flags);
48114822

48124823
return true;
48134824
}

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp