Skip to content

Commit d6c1ea1

Browse files
aagitsfrothwell
authored andcommitted
mm: gup: pack has_pinned in MMF_HAS_PINNED
has_pinned 32bit can be packed in the MMF_HAS_PINNED bit as a noop cleanup. Any atomic_inc/dec to the mm cacheline shared by all threads in pin-fast would reintroduce a loss of SMP scalability to pin-fast, so there's no future potential usefulness to keep an atomic in the mm for this. set_bit(MMF_HAS_PINNED) will be theoretically a bit slower than WRITE_ONCE (atomic_set is equivalent to WRITE_ONCE), but the set_bit (just like atomic_set after this commit) has to be still issued only once per "mm", so the difference between the two will be lost in the noise. will-it-scale "mmap2" shows no change in performance with enterprise config as expected. will-it-scale "pin_fast" retains the > 4000% SMP scalability performance improvement against upstream as expected. This is a noop as far as overall performance and SMP scalability are concerned. [[email protected]: fix build for task_mmu.c, introduce mm_set_has_pinned_flag, fix comments] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Andrea Arcangeli <[email protected]> Signed-off-by: Peter Xu <[email protected]> Reviewed-by: John Hubbard <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Jan Kara <[email protected]> Cc: Jann Horn <[email protected]> Cc: Jason Gunthorpe <[email protected]> Cc: Kirill Shutemov <[email protected]> Cc: Kirill Tkhai <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Oleg Nesterov <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Stephen Rothwell <[email protected]>
1 parent 21ffd7b commit d6c1ea1

File tree

6 files changed

+25
-17
lines changed

6 files changed

+25
-17
lines changed

fs/proc/task_mmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1047,7 +1047,7 @@ static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr,
10471047
return false;
10481048
if (!is_cow_mapping(vma->vm_flags))
10491049
return false;
1050-
if (likely(!atomic_read(&vma->vm_mm->has_pinned)))
1050+
if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
10511051
return false;
10521052
page = vm_normal_page(vma, addr, pte);
10531053
if (!page)

include/linux/mm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1341,7 +1341,7 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
13411341
if (!is_cow_mapping(vma->vm_flags))
13421342
return false;
13431343

1344-
if (!atomic_read(&vma->vm_mm->has_pinned))
1344+
if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
13451345
return false;
13461346

13471347
return page_maybe_dma_pinned(page);

include/linux/mm_types.h

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -435,16 +435,6 @@ struct mm_struct {
435435
*/
436436
atomic_t mm_count;
437437

438-
/**
439-
* @has_pinned: Whether this mm has pinned any pages. This can
440-
* be either replaced in the future by @pinned_vm when it
441-
* becomes stable, or grow into a counter on its own. We're
442-
* aggresive on this bit now - even if the pinned pages were
443-
* unpinned later on, we'll still keep this bit set for the
444-
* lifecycle of this mm just for simplicity.
445-
*/
446-
atomic_t has_pinned;
447-
448438
/**
449439
* @write_protect_seq: Locked when any thread is write
450440
* protecting pages mapped by this mm to enforce a later COW,

include/linux/sched/coredump.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,14 @@ static inline int get_dumpable(struct mm_struct *mm)
7373
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
7474
#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
7575
#define MMF_MULTIPROCESS 27 /* mm is shared between processes */
76+
/*
77+
* MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
78+
* replaced in the future by mm.pinned_vm when it becomes stable, or grow into
79+
* a counter on its own. We're aggresive on this bit for now: even if the
80+
* pinned pages were unpinned later on, we'll still keep this bit set for the
81+
* lifecycle of this mm, just for simplicity.
82+
*/
83+
#define MMF_HAS_PINNED 28 /* FOLL_PIN has run, never cleared */
7684
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
7785

7886
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\

kernel/fork.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1029,7 +1029,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
10291029
mm_pgtables_bytes_init(mm);
10301030
mm->map_count = 0;
10311031
mm->locked_vm = 0;
1032-
atomic_set(&mm->has_pinned, 0);
10331032
atomic64_set(&mm->pinned_vm, 0);
10341033
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
10351034
spin_lock_init(&mm->page_table_lock);

mm/gup.c

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1270,6 +1270,17 @@ int fixup_user_fault(struct mm_struct *mm,
12701270
}
12711271
EXPORT_SYMBOL_GPL(fixup_user_fault);
12721272

1273+
/*
1274+
* Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
1275+
* lifecycle. Avoid setting the bit unless necessary, or it might cause write
1276+
* cache bouncing on large SMP machines for concurrent pinned gups.
1277+
*/
1278+
static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
1279+
{
1280+
if (!test_bit(MMF_HAS_PINNED, mm_flags))
1281+
set_bit(MMF_HAS_PINNED, mm_flags);
1282+
}
1283+
12731284
/*
12741285
* Please note that this function, unlike __get_user_pages will not
12751286
* return 0 for nr_pages > 0 without FOLL_NOWAIT
@@ -1292,8 +1303,8 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
12921303
BUG_ON(*locked != 1);
12931304
}
12941305

1295-
if ((flags & FOLL_PIN) && !atomic_read(&mm->has_pinned))
1296-
atomic_set(&mm->has_pinned, 1);
1306+
if (flags & FOLL_PIN)
1307+
mm_set_has_pinned_flag(&mm->flags);
12971308

12981309
/*
12991310
* FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
@@ -2613,8 +2624,8 @@ static int internal_get_user_pages_fast(unsigned long start,
26132624
FOLL_FAST_ONLY)))
26142625
return -EINVAL;
26152626

2616-
if ((gup_flags & FOLL_PIN) && !atomic_read(&current->mm->has_pinned))
2617-
atomic_set(&current->mm->has_pinned, 1);
2627+
if (gup_flags & FOLL_PIN)
2628+
mm_set_has_pinned_flag(&current->mm->flags);
26182629

26192630
if (!(gup_flags & FOLL_FAST_ONLY))
26202631
might_lock_read(&current->mm->mmap_lock);

0 commit comments

Comments
 (0)