88#include <linux/srcu.h>
99#include <linux/interval_tree.h>
1010
11- struct mmu_notifier_mm ;
11+ struct mmu_notifier_subscriptions ;
1212struct mmu_notifier ;
1313struct mmu_notifier_range ;
1414struct mmu_interval_notifier ;
@@ -73,7 +73,7 @@ struct mmu_notifier_ops {
7373 * through the gart alias address, so leading to memory
7474 * corruption.
7575 */
76- void (* release )(struct mmu_notifier * mn ,
76+ void (* release )(struct mmu_notifier * subscription ,
7777 struct mm_struct * mm );
7878
7979 /*
@@ -85,7 +85,7 @@ struct mmu_notifier_ops {
8585 * Start-end is necessary in case the secondary MMU is mapping the page
8686 * at a smaller granularity than the primary MMU.
8787 */
88- int (* clear_flush_young )(struct mmu_notifier * mn ,
88+ int (* clear_flush_young )(struct mmu_notifier * subscription ,
8989 struct mm_struct * mm ,
9090 unsigned long start ,
9191 unsigned long end );
@@ -95,7 +95,7 @@ struct mmu_notifier_ops {
9595 * latter, it is supposed to test-and-clear the young/accessed bitflag
9696 * in the secondary pte, but it may omit flushing the secondary tlb.
9797 */
98- int (* clear_young )(struct mmu_notifier * mn ,
98+ int (* clear_young )(struct mmu_notifier * subscription ,
9999 struct mm_struct * mm ,
100100 unsigned long start ,
101101 unsigned long end );
@@ -106,15 +106,15 @@ struct mmu_notifier_ops {
106106 * frequently used without actually clearing the flag or tearing
107107 * down the secondary mapping on the page.
108108 */
109- int (* test_young )(struct mmu_notifier * mn ,
109+ int (* test_young )(struct mmu_notifier * subscription ,
110110 struct mm_struct * mm ,
111111 unsigned long address );
112112
113113 /*
114114 * change_pte is called in cases that pte mapping to page is changed:
115115 * for example, when ksm remaps pte to point to a new shared page.
116116 */
117- void (* change_pte )(struct mmu_notifier * mn ,
117+ void (* change_pte )(struct mmu_notifier * subscription ,
118118 struct mm_struct * mm ,
119119 unsigned long address ,
120120 pte_t pte );
@@ -169,9 +169,9 @@ struct mmu_notifier_ops {
169169 * invalidate_range_end.
170170 *
171171 */
172- int (* invalidate_range_start )(struct mmu_notifier * mn ,
172+ int (* invalidate_range_start )(struct mmu_notifier * subscription ,
173173 const struct mmu_notifier_range * range );
174- void (* invalidate_range_end )(struct mmu_notifier * mn ,
174+ void (* invalidate_range_end )(struct mmu_notifier * subscription ,
175175 const struct mmu_notifier_range * range );
176176
177177 /*
@@ -192,8 +192,10 @@ struct mmu_notifier_ops {
192192 * of what was passed to invalidate_range_start()/end(), if
193193 * called between those functions.
194194 */
195- void (* invalidate_range )(struct mmu_notifier * mn , struct mm_struct * mm ,
196- unsigned long start , unsigned long end );
195+ void (* invalidate_range )(struct mmu_notifier * subscription ,
196+ struct mm_struct * mm ,
197+ unsigned long start ,
198+ unsigned long end );
197199
198200 /*
199201 * These callbacks are used with the get/put interface to manage the
@@ -206,7 +208,7 @@ struct mmu_notifier_ops {
206208 * and cannot sleep.
207209 */
208210 struct mmu_notifier * (* alloc_notifier )(struct mm_struct * mm );
209- void (* free_notifier )(struct mmu_notifier * mn );
211+ void (* free_notifier )(struct mmu_notifier * subscription );
210212};
211213
212214/*
@@ -235,7 +237,7 @@ struct mmu_notifier {
235237 * was required but mmu_notifier_range_blockable(range) is false.
236238 */
237239struct mmu_interval_notifier_ops {
238- bool (* invalidate )(struct mmu_interval_notifier * mni ,
240+ bool (* invalidate )(struct mmu_interval_notifier * interval_sub ,
239241 const struct mmu_notifier_range * range ,
240242 unsigned long cur_seq );
241243};
@@ -265,7 +267,7 @@ struct mmu_notifier_range {
265267
266268static inline int mm_has_notifiers (struct mm_struct * mm )
267269{
268- return unlikely (mm -> mmu_notifier_mm );
270+ return unlikely (mm -> notifier_subscriptions );
269271}
270272
271273struct mmu_notifier * mmu_notifier_get_locked (const struct mmu_notifier_ops * ops ,
@@ -280,30 +282,31 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
280282 up_write (& mm -> mmap_sem );
281283 return ret ;
282284}
283- void mmu_notifier_put (struct mmu_notifier * mn );
285+ void mmu_notifier_put (struct mmu_notifier * subscription );
284286void mmu_notifier_synchronize (void );
285287
286- extern int mmu_notifier_register (struct mmu_notifier * mn ,
288+ extern int mmu_notifier_register (struct mmu_notifier * subscription ,
287289 struct mm_struct * mm );
288- extern int __mmu_notifier_register (struct mmu_notifier * mn ,
290+ extern int __mmu_notifier_register (struct mmu_notifier * subscription ,
289291 struct mm_struct * mm );
290- extern void mmu_notifier_unregister (struct mmu_notifier * mn ,
292+ extern void mmu_notifier_unregister (struct mmu_notifier * subscription ,
291293 struct mm_struct * mm );
292294
293- unsigned long mmu_interval_read_begin (struct mmu_interval_notifier * mni );
294- int mmu_interval_notifier_insert (struct mmu_interval_notifier * mni ,
295+ unsigned long
296+ mmu_interval_read_begin (struct mmu_interval_notifier * interval_sub );
297+ int mmu_interval_notifier_insert (struct mmu_interval_notifier * interval_sub ,
295298 struct mm_struct * mm , unsigned long start ,
296299 unsigned long length ,
297300 const struct mmu_interval_notifier_ops * ops );
298301int mmu_interval_notifier_insert_locked (
299- struct mmu_interval_notifier * mni , struct mm_struct * mm ,
302+ struct mmu_interval_notifier * interval_sub , struct mm_struct * mm ,
300303 unsigned long start , unsigned long length ,
301304 const struct mmu_interval_notifier_ops * ops );
302- void mmu_interval_notifier_remove (struct mmu_interval_notifier * mni );
305+ void mmu_interval_notifier_remove (struct mmu_interval_notifier * interval_sub );
303306
304307/**
305308 * mmu_interval_set_seq - Save the invalidation sequence
306- * @mni - The mni passed to invalidate
309+ * @interval_sub - The subscription passed to invalidate
307310 * @cur_seq - The cur_seq passed to the invalidate() callback
308311 *
309312 * This must be called unconditionally from the invalidate callback of a
@@ -314,15 +317,16 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
314317 * If the caller does not call mmu_interval_read_begin() or
315318 * mmu_interval_read_retry() then this call is not required.
316319 */
317- static inline void mmu_interval_set_seq (struct mmu_interval_notifier * mni ,
318- unsigned long cur_seq )
320+ static inline void
321+ mmu_interval_set_seq (struct mmu_interval_notifier * interval_sub ,
322+ unsigned long cur_seq )
319323{
320- WRITE_ONCE (mni -> invalidate_seq , cur_seq );
324+ WRITE_ONCE (interval_sub -> invalidate_seq , cur_seq );
321325}
322326
323327/**
324328 * mmu_interval_read_retry - End a read side critical section against a VA range
325- * mni : The range
329+ * interval_sub : The subscription
326330 * seq: The return of the paired mmu_interval_read_begin()
327331 *
328332 * This MUST be called under a user provided lock that is also held
@@ -334,15 +338,16 @@ static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
334338 * Returns true if an invalidation collided with this critical section, and
335339 * the caller should retry.
336340 */
337- static inline bool mmu_interval_read_retry (struct mmu_interval_notifier * mni ,
338- unsigned long seq )
341+ static inline bool
342+ mmu_interval_read_retry (struct mmu_interval_notifier * interval_sub ,
343+ unsigned long seq )
339344{
340- return mni -> invalidate_seq != seq ;
345+ return interval_sub -> invalidate_seq != seq ;
341346}
342347
343348/**
344349 * mmu_interval_check_retry - Test if a collision has occurred
345- * mni : The range
350+ * interval_sub : The subscription
346351 * seq: The return of the matching mmu_interval_read_begin()
347352 *
348353 * This can be used in the critical section between mmu_interval_read_begin()
@@ -357,14 +362,15 @@ static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
357362 * This call can be used as part of loops and other expensive operations to
358363 * expedite a retry.
359364 */
360- static inline bool mmu_interval_check_retry (struct mmu_interval_notifier * mni ,
361- unsigned long seq )
365+ static inline bool
366+ mmu_interval_check_retry (struct mmu_interval_notifier * interval_sub ,
367+ unsigned long seq )
362368{
363369 /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
364- return READ_ONCE (mni -> invalidate_seq ) != seq ;
370+ return READ_ONCE (interval_sub -> invalidate_seq ) != seq ;
365371}
366372
367- extern void __mmu_notifier_mm_destroy (struct mm_struct * mm );
373+ extern void __mmu_notifier_subscriptions_destroy (struct mm_struct * mm );
368374extern void __mmu_notifier_release (struct mm_struct * mm );
369375extern int __mmu_notifier_clear_flush_young (struct mm_struct * mm ,
370376 unsigned long start ,
@@ -480,15 +486,15 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
480486 __mmu_notifier_invalidate_range (mm , start , end );
481487}
482488
483- static inline void mmu_notifier_mm_init (struct mm_struct * mm )
489+ static inline void mmu_notifier_subscriptions_init (struct mm_struct * mm )
484490{
485- mm -> mmu_notifier_mm = NULL ;
491+ mm -> notifier_subscriptions = NULL ;
486492}
487493
488- static inline void mmu_notifier_mm_destroy (struct mm_struct * mm )
494+ static inline void mmu_notifier_subscriptions_destroy (struct mm_struct * mm )
489495{
490496 if (mm_has_notifiers (mm ))
491- __mmu_notifier_mm_destroy (mm );
497+ __mmu_notifier_subscriptions_destroy (mm );
492498}
493499
494500
@@ -692,11 +698,11 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
692698{
693699}
694700
695- static inline void mmu_notifier_mm_init (struct mm_struct * mm )
701+ static inline void mmu_notifier_subscriptions_init (struct mm_struct * mm )
696702{
697703}
698704
699- static inline void mmu_notifier_mm_destroy (struct mm_struct * mm )
705+ static inline void mmu_notifier_subscriptions_destroy (struct mm_struct * mm )
700706{
701707}
702708
0 commit comments