Skip to content

Commit 31defc3

Browse files
tzussmanakpm00
authored andcommitted
userfaultfd: remove (VM_)BUG_ON()s
BUG_ON() is deprecated [1]. Convert all the BUG_ON()s and VM_BUG_ON()s to use VM_WARN_ON_ONCE(). There are a few additional cases that are converted or modified: - Convert the printk(KERN_WARNING ...) in handle_userfault() to use pr_warn(). - Convert the WARN_ON_ONCE()s in move_pages() to use VM_WARN_ON_ONCE(), as the relevant conditions are already checked in validate_range() in move_pages()'s caller. - Convert the VM_WARN_ON()'s in move_pages() to VM_WARN_ON_ONCE(). These cases should never happen and are similar to those in mfill_atomic() and mfill_atomic_hugetlb(), which were previously BUG_ON()s. move_pages() was added later than those functions and makes use of VM_WARN_ON() as a replacement for the deprecated BUG_ON(), but. VM_WARN_ON_ONCE() is likely a better direct replacement. - Convert the WARN_ON() for !VM_MAYWRITE in userfaultfd_unregister() and userfaultfd_register_range() to VM_WARN_ON_ONCE(). This condition is enforced in userfaultfd_register() so it should never happen, and can be converted to a debug check. [1] https://www.kernel.org/doc/html/v6.15/process/coding-style.html#use-warn-rather-than-bug Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Tal Zussman <[email protected]> Cc: Al Viro <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Christian Brauner <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Jan Kara <[email protected]> Cc: Jason A. Donenfeld <[email protected]> Cc: Peter Xu <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 23ec90e commit 31defc3

File tree

2 files changed

+62
-65
lines changed

2 files changed

+62
-65
lines changed

fs/userfaultfd.c

Lines changed: 29 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -165,14 +165,14 @@ static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
165165
static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
166166
{
167167
if (refcount_dec_and_test(&ctx->refcount)) {
168-
VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
169-
VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
170-
VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
171-
VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
172-
VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
173-
VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
174-
VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
175-
VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
168+
VM_WARN_ON_ONCE(spin_is_locked(&ctx->fault_pending_wqh.lock));
169+
VM_WARN_ON_ONCE(waitqueue_active(&ctx->fault_pending_wqh));
170+
VM_WARN_ON_ONCE(spin_is_locked(&ctx->fault_wqh.lock));
171+
VM_WARN_ON_ONCE(waitqueue_active(&ctx->fault_wqh));
172+
VM_WARN_ON_ONCE(spin_is_locked(&ctx->event_wqh.lock));
173+
VM_WARN_ON_ONCE(waitqueue_active(&ctx->event_wqh));
174+
VM_WARN_ON_ONCE(spin_is_locked(&ctx->fd_wqh.lock));
175+
VM_WARN_ON_ONCE(waitqueue_active(&ctx->fd_wqh));
176176
mmdrop(ctx->mm);
177177
kmem_cache_free(userfaultfd_ctx_cachep, ctx);
178178
}
@@ -383,12 +383,12 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
383383
if (!ctx)
384384
goto out;
385385

386-
BUG_ON(ctx->mm != mm);
386+
VM_WARN_ON_ONCE(ctx->mm != mm);
387387

388388
/* Any unrecognized flag is a bug. */
389-
VM_BUG_ON(reason & ~__VM_UFFD_FLAGS);
389+
VM_WARN_ON_ONCE(reason & ~__VM_UFFD_FLAGS);
390390
/* 0 or > 1 flags set is a bug; we expect exactly 1. */
391-
VM_BUG_ON(!reason || (reason & (reason - 1)));
391+
VM_WARN_ON_ONCE(!reason || (reason & (reason - 1)));
392392

393393
if (ctx->features & UFFD_FEATURE_SIGBUS)
394394
goto out;
@@ -411,12 +411,11 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
411411
* to be sure not to return SIGBUS erroneously on
412412
* nowait invocations.
413413
*/
414-
BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
414+
VM_WARN_ON_ONCE(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
415415
#ifdef CONFIG_DEBUG_VM
416416
if (printk_ratelimit()) {
417-
printk(KERN_WARNING
418-
"FAULT_FLAG_ALLOW_RETRY missing %x\n",
419-
vmf->flags);
417+
pr_warn("FAULT_FLAG_ALLOW_RETRY missing %x\n",
418+
vmf->flags);
420419
dump_stack();
421420
}
422421
#endif
@@ -602,7 +601,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
602601
*/
603602
out:
604603
atomic_dec(&ctx->mmap_changing);
605-
VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0);
604+
VM_WARN_ON_ONCE(atomic_read(&ctx->mmap_changing) < 0);
606605
userfaultfd_ctx_put(ctx);
607606
}
608607

@@ -710,7 +709,7 @@ void dup_userfaultfd_fail(struct list_head *fcs)
710709
struct userfaultfd_ctx *ctx = fctx->new;
711710

712711
atomic_dec(&octx->mmap_changing);
713-
VM_BUG_ON(atomic_read(&octx->mmap_changing) < 0);
712+
VM_WARN_ON_ONCE(atomic_read(&octx->mmap_changing) < 0);
714713
userfaultfd_ctx_put(octx);
715714
userfaultfd_ctx_put(ctx);
716715

@@ -1317,8 +1316,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
13171316
do {
13181317
cond_resched();
13191318

1320-
BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1321-
!!(cur->vm_flags & __VM_UFFD_FLAGS));
1319+
VM_WARN_ON_ONCE(!!cur->vm_userfaultfd_ctx.ctx ^
1320+
!!(cur->vm_flags & __VM_UFFD_FLAGS));
13221321

13231322
/* check not compatible vmas */
13241323
ret = -EINVAL;
@@ -1372,7 +1371,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
13721371

13731372
found = true;
13741373
} for_each_vma_range(vmi, cur, end);
1375-
BUG_ON(!found);
1374+
VM_WARN_ON_ONCE(!found);
13761375

13771376
ret = userfaultfd_register_range(ctx, vma, vm_flags, start, end,
13781377
wp_async);
@@ -1464,8 +1463,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
14641463
do {
14651464
cond_resched();
14661465

1467-
BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1468-
!!(cur->vm_flags & __VM_UFFD_FLAGS));
1466+
VM_WARN_ON_ONCE(!!cur->vm_userfaultfd_ctx.ctx ^
1467+
!!(cur->vm_flags & __VM_UFFD_FLAGS));
14691468

14701469
/*
14711470
* Prevent unregistering through a different userfaultfd than
@@ -1487,7 +1486,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
14871486

14881487
found = true;
14891488
} for_each_vma_range(vmi, cur, end);
1490-
BUG_ON(!found);
1489+
VM_WARN_ON_ONCE(!found);
14911490

14921491
vma_iter_set(&vmi, start);
14931492
prev = vma_prev(&vmi);
@@ -1504,7 +1503,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
15041503

15051504
VM_WARN_ON_ONCE(vma->vm_userfaultfd_ctx.ctx != ctx);
15061505
VM_WARN_ON_ONCE(!vma_can_userfault(vma, vma->vm_flags, wp_async));
1507-
WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1506+
VM_WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE));
15081507

15091508
if (vma->vm_start > start)
15101509
start = vma->vm_start;
@@ -1569,7 +1568,7 @@ static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
15691568
* len == 0 means wake all and we don't want to wake all here,
15701569
* so check it again to be sure.
15711570
*/
1572-
VM_BUG_ON(!range.len);
1571+
VM_WARN_ON_ONCE(!range.len);
15731572

15741573
wake_userfault(ctx, &range);
15751574
ret = 0;
@@ -1626,7 +1625,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
16261625
return -EFAULT;
16271626
if (ret < 0)
16281627
goto out;
1629-
BUG_ON(!ret);
1628+
VM_WARN_ON_ONCE(!ret);
16301629
/* len == 0 would wake all */
16311630
range.len = ret;
16321631
if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
@@ -1681,7 +1680,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
16811680
if (ret < 0)
16821681
goto out;
16831682
/* len == 0 would wake all */
1684-
BUG_ON(!ret);
1683+
VM_WARN_ON_ONCE(!ret);
16851684
range.len = ret;
16861685
if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
16871686
range.start = uffdio_zeropage.range.start;
@@ -1793,7 +1792,7 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
17931792
goto out;
17941793

17951794
/* len == 0 would wake all */
1796-
BUG_ON(!ret);
1795+
VM_WARN_ON_ONCE(!ret);
17971796
range.len = ret;
17981797
if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) {
17991798
range.start = uffdio_continue.range.start;
@@ -1850,7 +1849,7 @@ static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long
18501849
goto out;
18511850

18521851
/* len == 0 would wake all */
1853-
BUG_ON(!ret);
1852+
VM_WARN_ON_ONCE(!ret);
18541853
range.len = ret;
18551854
if (!(uffdio_poison.mode & UFFDIO_POISON_MODE_DONTWAKE)) {
18561855
range.start = uffdio_poison.range.start;
@@ -2111,7 +2110,7 @@ static int new_userfaultfd(int flags)
21112110
struct file *file;
21122111
int fd;
21132112

2114-
BUG_ON(!current->mm);
2113+
VM_WARN_ON_ONCE(!current->mm);
21152114

21162115
/* Check the UFFD_* constants for consistency. */
21172116
BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS);

mm/userfaultfd.c

Lines changed: 33 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -561,7 +561,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
561561
}
562562

563563
while (src_addr < src_start + len) {
564-
BUG_ON(dst_addr >= dst_start + len);
564+
VM_WARN_ON_ONCE(dst_addr >= dst_start + len);
565565

566566
/*
567567
* Serialize via vma_lock and hugetlb_fault_mutex.
@@ -602,7 +602,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
602602
if (unlikely(err == -ENOENT)) {
603603
up_read(&ctx->map_changing_lock);
604604
uffd_mfill_unlock(dst_vma);
605-
BUG_ON(!folio);
605+
VM_WARN_ON_ONCE(!folio);
606606

607607
err = copy_folio_from_user(folio,
608608
(const void __user *)src_addr, true);
@@ -614,7 +614,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
614614
dst_vma = NULL;
615615
goto retry;
616616
} else
617-
BUG_ON(folio);
617+
VM_WARN_ON_ONCE(folio);
618618

619619
if (!err) {
620620
dst_addr += vma_hpagesize;
@@ -635,9 +635,9 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
635635
out:
636636
if (folio)
637637
folio_put(folio);
638-
BUG_ON(copied < 0);
639-
BUG_ON(err > 0);
640-
BUG_ON(!copied && !err);
638+
VM_WARN_ON_ONCE(copied < 0);
639+
VM_WARN_ON_ONCE(err > 0);
640+
VM_WARN_ON_ONCE(!copied && !err);
641641
return copied ? copied : err;
642642
}
643643
#else /* !CONFIG_HUGETLB_PAGE */
@@ -711,12 +711,12 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
711711
/*
712712
* Sanitize the command parameters:
713713
*/
714-
BUG_ON(dst_start & ~PAGE_MASK);
715-
BUG_ON(len & ~PAGE_MASK);
714+
VM_WARN_ON_ONCE(dst_start & ~PAGE_MASK);
715+
VM_WARN_ON_ONCE(len & ~PAGE_MASK);
716716

717717
/* Does the address range wrap, or is the span zero-sized? */
718-
BUG_ON(src_start + len <= src_start);
719-
BUG_ON(dst_start + len <= dst_start);
718+
VM_WARN_ON_ONCE(src_start + len <= src_start);
719+
VM_WARN_ON_ONCE(dst_start + len <= dst_start);
720720

721721
src_addr = src_start;
722722
dst_addr = dst_start;
@@ -775,7 +775,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
775775
while (src_addr < src_start + len) {
776776
pmd_t dst_pmdval;
777777

778-
BUG_ON(dst_addr >= dst_start + len);
778+
VM_WARN_ON_ONCE(dst_addr >= dst_start + len);
779779

780780
dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
781781
if (unlikely(!dst_pmd)) {
@@ -818,7 +818,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
818818

819819
up_read(&ctx->map_changing_lock);
820820
uffd_mfill_unlock(dst_vma);
821-
BUG_ON(!folio);
821+
VM_WARN_ON_ONCE(!folio);
822822

823823
kaddr = kmap_local_folio(folio, 0);
824824
err = copy_from_user(kaddr,
@@ -832,7 +832,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
832832
flush_dcache_folio(folio);
833833
goto retry;
834834
} else
835-
BUG_ON(folio);
835+
VM_WARN_ON_ONCE(folio);
836836

837837
if (!err) {
838838
dst_addr += PAGE_SIZE;
@@ -852,9 +852,9 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
852852
out:
853853
if (folio)
854854
folio_put(folio);
855-
BUG_ON(copied < 0);
856-
BUG_ON(err > 0);
857-
BUG_ON(!copied && !err);
855+
VM_WARN_ON_ONCE(copied < 0);
856+
VM_WARN_ON_ONCE(err > 0);
857+
VM_WARN_ON_ONCE(!copied && !err);
858858
return copied ? copied : err;
859859
}
860860

@@ -940,11 +940,11 @@ int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
940940
/*
941941
* Sanitize the command parameters:
942942
*/
943-
BUG_ON(start & ~PAGE_MASK);
944-
BUG_ON(len & ~PAGE_MASK);
943+
VM_WARN_ON_ONCE(start & ~PAGE_MASK);
944+
VM_WARN_ON_ONCE(len & ~PAGE_MASK);
945945

946946
/* Does the address range wrap, or is the span zero-sized? */
947-
BUG_ON(start + len <= start);
947+
VM_WARN_ON_ONCE(start + len <= start);
948948

949949
mmap_read_lock(dst_mm);
950950

@@ -1738,15 +1738,13 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
17381738
ssize_t moved = 0;
17391739

17401740
/* Sanitize the command parameters. */
1741-
if (WARN_ON_ONCE(src_start & ~PAGE_MASK) ||
1742-
WARN_ON_ONCE(dst_start & ~PAGE_MASK) ||
1743-
WARN_ON_ONCE(len & ~PAGE_MASK))
1744-
goto out;
1741+
VM_WARN_ON_ONCE(src_start & ~PAGE_MASK);
1742+
VM_WARN_ON_ONCE(dst_start & ~PAGE_MASK);
1743+
VM_WARN_ON_ONCE(len & ~PAGE_MASK);
17451744

17461745
/* Does the address range wrap, or is the span zero-sized? */
1747-
if (WARN_ON_ONCE(src_start + len <= src_start) ||
1748-
WARN_ON_ONCE(dst_start + len <= dst_start))
1749-
goto out;
1746+
VM_WARN_ON_ONCE(src_start + len < src_start);
1747+
VM_WARN_ON_ONCE(dst_start + len < dst_start);
17501748

17511749
err = uffd_move_lock(mm, dst_start, src_start, &dst_vma, &src_vma);
17521750
if (err)
@@ -1896,9 +1894,9 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
18961894
up_read(&ctx->map_changing_lock);
18971895
uffd_move_unlock(dst_vma, src_vma);
18981896
out:
1899-
VM_WARN_ON(moved < 0);
1900-
VM_WARN_ON(err > 0);
1901-
VM_WARN_ON(!moved && !err);
1897+
VM_WARN_ON_ONCE(moved < 0);
1898+
VM_WARN_ON_ONCE(err > 0);
1899+
VM_WARN_ON_ONCE(!moved && !err);
19021900
return moved ? moved : err;
19031901
}
19041902

@@ -1985,10 +1983,10 @@ int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
19851983
for_each_vma_range(vmi, vma, end) {
19861984
cond_resched();
19871985

1988-
BUG_ON(!vma_can_userfault(vma, vm_flags, wp_async));
1989-
BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1990-
vma->vm_userfaultfd_ctx.ctx != ctx);
1991-
WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1986+
VM_WARN_ON_ONCE(!vma_can_userfault(vma, vm_flags, wp_async));
1987+
VM_WARN_ON_ONCE(vma->vm_userfaultfd_ctx.ctx &&
1988+
vma->vm_userfaultfd_ctx.ctx != ctx);
1989+
VM_WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE));
19921990

19931991
/*
19941992
* Nothing to do: this vma is already registered into this
@@ -2064,8 +2062,8 @@ void userfaultfd_release_all(struct mm_struct *mm,
20642062
prev = NULL;
20652063
for_each_vma(vmi, vma) {
20662064
cond_resched();
2067-
BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
2068-
!!(vma->vm_flags & __VM_UFFD_FLAGS));
2065+
VM_WARN_ON_ONCE(!!vma->vm_userfaultfd_ctx.ctx ^
2066+
!!(vma->vm_flags & __VM_UFFD_FLAGS));
20692067
if (vma->vm_userfaultfd_ctx.ctx != ctx) {
20702068
prev = vma;
20712069
continue;

0 commit comments

Comments
 (0)