Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings
forked fromtorvalds/linux

Commit5a27aa8

Browse files
vwooltorvalds
authored andcommitted
z3fold: add kref refcounting
With both coming and already present locking optimizations, introducingkref to reference-count z3fold objects is the right thing to do.Moreover, it makes buddied list no longer necessary, and allows for asimpler handling of headless pages.[akpm@linux-foundation.org: coding-style fixes]Link:http://lkml.kernel.org/r/20170131214650.8ea78033d91ded233f552bc0@gmail.comSigned-off-by: Vitaly Wool <vitalywool@gmail.com>Reviewed-by: Dan Streetman <ddstreet@ieee.org>Signed-off-by: Andrew Morton <akpm@linux-foundation.org>Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent2f1e5e4 commit5a27aa8

File tree

1 file changed

+69
-86
lines changed

1 file changed

+69
-86
lines changed

‎mm/z3fold.c‎

Lines changed: 69 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ enum buddy {
5252
*z3fold page, except for HEADLESS pages
5353
* @buddy:links the z3fold page into the relevant list in the pool
5454
* @page_lock:per-page lock
55+
* @refcount:reference cound for the z3fold page
5556
* @first_chunks:the size of the first buddy in chunks, 0 if free
5657
* @middle_chunks:the size of the middle buddy in chunks, 0 if free
5758
* @last_chunks:the size of the last buddy in chunks, 0 if free
@@ -60,6 +61,7 @@ enum buddy {
6061
structz3fold_header {
6162
structlist_headbuddy;
6263
spinlock_tpage_lock;
64+
structkrefrefcount;
6365
unsigned shortfirst_chunks;
6466
unsigned shortmiddle_chunks;
6567
unsigned shortlast_chunks;
@@ -95,8 +97,6 @@ struct z3fold_header {
9597
* @unbuddied:array of lists tracking z3fold pages that contain 2- buddies;
9698
*the lists each z3fold page is added to depends on the size of
9799
*its free region.
98-
* @buddied:list tracking the z3fold pages that contain 3 buddies;
99-
*these z3fold pages are full
100100
* @lru:list tracking the z3fold pages in LRU order by most recently
101101
*added buddy.
102102
* @pages_nr:number of z3fold pages in the pool.
@@ -109,7 +109,6 @@ struct z3fold_header {
109109
structz3fold_pool {
110110
spinlock_tlock;
111111
structlist_headunbuddied[NCHUNKS];
112-
structlist_headbuddied;
113112
structlist_headlru;
114113
atomic64_tpages_nr;
115114
conststructz3fold_ops*ops;
@@ -121,8 +120,7 @@ struct z3fold_pool {
121120
* Internal z3fold page flags
122121
*/
123122
enumz3fold_page_flags {
124-
UNDER_RECLAIM=0,
125-
PAGE_HEADLESS,
123+
PAGE_HEADLESS=0,
126124
MIDDLE_CHUNK_MAPPED,
127125
};
128126

@@ -146,11 +144,11 @@ static struct z3fold_header *init_z3fold_page(struct page *page)
146144
structz3fold_header*zhdr=page_address(page);
147145

148146
INIT_LIST_HEAD(&page->lru);
149-
clear_bit(UNDER_RECLAIM,&page->private);
150147
clear_bit(PAGE_HEADLESS,&page->private);
151148
clear_bit(MIDDLE_CHUNK_MAPPED,&page->private);
152149

153150
spin_lock_init(&zhdr->page_lock);
151+
kref_init(&zhdr->refcount);
154152
zhdr->first_chunks=0;
155153
zhdr->middle_chunks=0;
156154
zhdr->last_chunks=0;
@@ -161,9 +159,24 @@ static struct z3fold_header *init_z3fold_page(struct page *page)
161159
}
162160

163161
/* Resets the struct page fields and frees the page */
164-
staticvoidfree_z3fold_page(structz3fold_header*zhdr)
162+
staticvoidfree_z3fold_page(structpage*page)
165163
{
166-
__free_page(virt_to_page(zhdr));
164+
__free_page(page);
165+
}
166+
167+
staticvoidrelease_z3fold_page(structkref*ref)
168+
{
169+
structz3fold_header*zhdr;
170+
structpage*page;
171+
172+
zhdr=container_of(ref,structz3fold_header,refcount);
173+
page=virt_to_page(zhdr);
174+
175+
if (!list_empty(&zhdr->buddy))
176+
list_del(&zhdr->buddy);
177+
if (!list_empty(&page->lru))
178+
list_del(&page->lru);
179+
free_z3fold_page(page);
167180
}
168181

169182
/* Lock a z3fold page */
@@ -178,7 +191,6 @@ static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
178191
spin_unlock(&zhdr->page_lock);
179192
}
180193

181-
182194
/*
183195
* Encodes the handle of a particular buddy within a z3fold page
184196
* Pool lock should be held as this function accesses first_num
@@ -257,7 +269,6 @@ static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
257269
spin_lock_init(&pool->lock);
258270
for_each_unbuddied_list(i,0)
259271
INIT_LIST_HEAD(&pool->unbuddied[i]);
260-
INIT_LIST_HEAD(&pool->buddied);
261272
INIT_LIST_HEAD(&pool->lru);
262273
atomic64_set(&pool->pages_nr,0);
263274
pool->ops=ops;
@@ -378,6 +389,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
378389
spin_unlock(&pool->lock);
379390
continue;
380391
}
392+
kref_get(&zhdr->refcount);
381393
list_del_init(&zhdr->buddy);
382394
spin_unlock(&pool->lock);
383395

@@ -394,10 +406,12 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
394406
elseif (zhdr->middle_chunks==0)
395407
bud=MIDDLE;
396408
else {
409+
z3fold_page_unlock(zhdr);
397410
spin_lock(&pool->lock);
398-
list_add(&zhdr->buddy,&pool->buddied);
411+
if (kref_put(&zhdr->refcount,
412+
release_z3fold_page))
413+
atomic64_dec(&pool->pages_nr);
399414
spin_unlock(&pool->lock);
400-
z3fold_page_unlock(zhdr);
401415
pr_err("No free chunks in unbuddied\n");
402416
WARN_ON(1);
403417
continue;
@@ -438,9 +452,6 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
438452
/* Add to unbuddied list */
439453
freechunks=num_free_chunks(zhdr);
440454
list_add(&zhdr->buddy,&pool->unbuddied[freechunks]);
441-
}else {
442-
/* Add to buddied list */
443-
list_add(&zhdr->buddy,&pool->buddied);
444455
}
445456

446457
headless:
@@ -504,52 +515,29 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
504515
}
505516
}
506517

507-
if (test_bit(UNDER_RECLAIM,&page->private)) {
508-
/* z3fold page is under reclaim, reclaim will free */
509-
if (bud!=HEADLESS)
510-
z3fold_page_unlock(zhdr);
511-
return;
512-
}
513-
514-
/* Remove from existing buddy list */
515-
if (bud!=HEADLESS) {
516-
spin_lock(&pool->lock);
517-
/*
518-
* this object may have been removed from its list by
519-
* z3fold_alloc(). In that case we just do nothing,
520-
* z3fold_alloc() will allocate an object and add the page
521-
* to the relevant list.
522-
*/
523-
if (!list_empty(&zhdr->buddy)) {
524-
list_del(&zhdr->buddy);
525-
}else {
526-
spin_unlock(&pool->lock);
527-
z3fold_page_unlock(zhdr);
528-
return;
529-
}
530-
spin_unlock(&pool->lock);
531-
}
532-
533-
if (bud==HEADLESS||
534-
(zhdr->first_chunks==0&&zhdr->middle_chunks==0&&
535-
zhdr->last_chunks==0)) {
536-
/* z3fold page is empty, free */
518+
if (bud==HEADLESS) {
537519
spin_lock(&pool->lock);
538520
list_del(&page->lru);
539521
spin_unlock(&pool->lock);
540-
clear_bit(PAGE_HEADLESS,&page->private);
541-
if (bud!=HEADLESS)
542-
z3fold_page_unlock(zhdr);
543-
free_z3fold_page(zhdr);
522+
free_z3fold_page(page);
544523
atomic64_dec(&pool->pages_nr);
545524
}else {
546-
z3fold_compact_page(zhdr);
547-
/* Add to the unbuddied list */
525+
if (zhdr->first_chunks!=0||zhdr->middle_chunks!=0||
526+
zhdr->last_chunks!=0) {
527+
z3fold_compact_page(zhdr);
528+
/* Add to the unbuddied list */
529+
spin_lock(&pool->lock);
530+
if (!list_empty(&zhdr->buddy))
531+
list_del(&zhdr->buddy);
532+
freechunks=num_free_chunks(zhdr);
533+
list_add(&zhdr->buddy,&pool->unbuddied[freechunks]);
534+
spin_unlock(&pool->lock);
535+
}
536+
z3fold_page_unlock(zhdr);
548537
spin_lock(&pool->lock);
549-
freechunks=num_free_chunks(zhdr);
550-
list_add(&zhdr->buddy,&pool->unbuddied[freechunks]);
538+
if (kref_put(&zhdr->refcount,release_z3fold_page))
539+
atomic64_dec(&pool->pages_nr);
551540
spin_unlock(&pool->lock);
552-
z3fold_page_unlock(zhdr);
553541
}
554542

555543
}
@@ -608,13 +596,13 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
608596
return-EINVAL;
609597
}
610598
page=list_last_entry(&pool->lru,structpage,lru);
611-
list_del(&page->lru);
599+
list_del_init(&page->lru);
612600

613-
/* Protect z3fold page against free */
614-
set_bit(UNDER_RECLAIM,&page->private);
615601
zhdr=page_address(page);
616602
if (!test_bit(PAGE_HEADLESS,&page->private)) {
617-
list_del(&zhdr->buddy);
603+
if (!list_empty(&zhdr->buddy))
604+
list_del_init(&zhdr->buddy);
605+
kref_get(&zhdr->refcount);
618606
spin_unlock(&pool->lock);
619607
z3fold_page_lock(zhdr);
620608
/*
@@ -655,30 +643,19 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
655643
gotonext;
656644
}
657645
next:
658-
if (!test_bit(PAGE_HEADLESS,&page->private))
659-
z3fold_page_lock(zhdr);
660-
clear_bit(UNDER_RECLAIM,&page->private);
661-
if ((test_bit(PAGE_HEADLESS,&page->private)&&ret==0)||
662-
(zhdr->first_chunks==0&&zhdr->last_chunks==0&&
663-
zhdr->middle_chunks==0)) {
664-
/*
665-
* All buddies are now free, free the z3fold page and
666-
* return success.
667-
*/
668-
if (!test_and_clear_bit(PAGE_HEADLESS,&page->private))
669-
z3fold_page_unlock(zhdr);
670-
free_z3fold_page(zhdr);
671-
atomic64_dec(&pool->pages_nr);
672-
return0;
673-
}elseif (!test_bit(PAGE_HEADLESS,&page->private)) {
674-
if (zhdr->first_chunks!=0&&
675-
zhdr->last_chunks!=0&&
676-
zhdr->middle_chunks!=0) {
677-
/* Full, add to buddied list */
678-
spin_lock(&pool->lock);
679-
list_add(&zhdr->buddy,&pool->buddied);
680-
spin_unlock(&pool->lock);
646+
if (test_bit(PAGE_HEADLESS,&page->private)) {
647+
if (ret==0) {
648+
free_z3fold_page(page);
649+
return0;
681650
}else {
651+
spin_lock(&pool->lock);
652+
}
653+
}else {
654+
z3fold_page_lock(zhdr);
655+
if ((zhdr->first_chunks||zhdr->last_chunks||
656+
zhdr->middle_chunks)&&
657+
!(zhdr->first_chunks&&zhdr->last_chunks&&
658+
zhdr->middle_chunks)) {
682659
z3fold_compact_page(zhdr);
683660
/* add to unbuddied list */
684661
spin_lock(&pool->lock);
@@ -687,13 +664,19 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
687664
&pool->unbuddied[freechunks]);
688665
spin_unlock(&pool->lock);
689666
}
690-
}
691-
692-
if (!test_bit(PAGE_HEADLESS,&page->private))
693667
z3fold_page_unlock(zhdr);
668+
spin_lock(&pool->lock);
669+
if (kref_put(&zhdr->refcount,release_z3fold_page)) {
670+
atomic64_dec(&pool->pages_nr);
671+
return0;
672+
}
673+
}
694674

695-
spin_lock(&pool->lock);
696-
/* add to beginning of LRU */
675+
/*
676+
* Add to the beginning of LRU.
677+
* Pool lock has to be kept here to ensure the page has
678+
* not already been released
679+
*/
697680
list_add(&page->lru,&pool->lru);
698681
}
699682
spin_unlock(&pool->lock);

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp