@@ -135,7 +135,6 @@ static void page_cache_delete(struct address_space *mapping,
135135 }
136136
137137 VM_BUG_ON_FOLIO (!folio_test_locked (folio ), folio );
138- VM_BUG_ON_FOLIO (nr != 1 && shadow , folio );
139138
140139 xas_store (& xas , shadow );
141140 xas_init_marks (& xas );
@@ -286,7 +285,7 @@ static void page_cache_delete_batch(struct address_space *mapping,
286285 struct folio_batch * fbatch )
287286{
288287 XA_STATE (xas , & mapping -> i_pages , fbatch -> folios [0 ]-> index );
289- int total_pages = 0 ;
288+ long total_pages = 0 ;
290289 int i = 0 ;
291290 struct folio * folio ;
292291
@@ -313,18 +312,12 @@ static void page_cache_delete_batch(struct address_space *mapping,
313312
314313 WARN_ON_ONCE (!folio_test_locked (folio ));
315314
316- if (folio -> index == xas .xa_index )
317- folio -> mapping = NULL ;
315+ folio -> mapping = NULL ;
318316 /* Leave folio->index set: truncation lookup relies on it */
319317
320- /*
321- * Move to the next folio in the batch if this is a regular
322- * folio or the index is of the last sub-page of this folio.
323- */
324- if (folio -> index + folio_nr_pages (folio ) - 1 == xas .xa_index )
325- i ++ ;
318+ i ++ ;
326319 xas_store (& xas , NULL );
327- total_pages ++ ;
320+ total_pages += folio_nr_pages ( folio ) ;
328321 }
329322 mapping -> nrpages -= total_pages ;
330323}
@@ -2089,24 +2082,27 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
20892082 indices [fbatch -> nr ] = xas .xa_index ;
20902083 if (!folio_batch_add (fbatch , folio ))
20912084 break ;
2092- goto next ;
2085+ continue ;
20932086unlock :
20942087 folio_unlock (folio );
20952088put :
20962089 folio_put (folio );
2097- next :
2098- if (!xa_is_value (folio ) && folio_test_large (folio )) {
2099- xas_set (& xas , folio -> index + folio_nr_pages (folio ));
2100- /* Did we wrap on 32-bit? */
2101- if (!xas .xa_index )
2102- break ;
2103- }
21042090 }
21052091 rcu_read_unlock ();
21062092
21072093 return folio_batch_count (fbatch );
21082094}
21092095
2096+ static inline
2097+ bool folio_more_pages (struct folio * folio , pgoff_t index , pgoff_t max )
2098+ {
2099+ if (!folio_test_large (folio ) || folio_test_hugetlb (folio ))
2100+ return false;
2101+ if (index >= max )
2102+ return false;
2103+ return index < folio -> index + folio_nr_pages (folio ) - 1 ;
2104+ }
2105+
21102106/**
21112107 * find_get_pages_range - gang pagecache lookup
21122108 * @mapping: The address_space to search
@@ -2145,11 +2141,17 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
21452141 if (xa_is_value (folio ))
21462142 continue ;
21472143
2144+ again :
21482145 pages [ret ] = folio_file_page (folio , xas .xa_index );
21492146 if (++ ret == nr_pages ) {
21502147 * start = xas .xa_index + 1 ;
21512148 goto out ;
21522149 }
2150+ if (folio_more_pages (folio , xas .xa_index , end )) {
2151+ xas .xa_index ++ ;
2152+ folio_ref_inc (folio );
2153+ goto again ;
2154+ }
21532155 }
21542156
21552157 /*
@@ -2207,9 +2209,15 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
22072209 if (unlikely (folio != xas_reload (& xas )))
22082210 goto put_page ;
22092211
2210- pages [ret ] = & folio -> page ;
2212+ again :
2213+ pages [ret ] = folio_file_page (folio , xas .xa_index );
22112214 if (++ ret == nr_pages )
22122215 break ;
2216+ if (folio_more_pages (folio , xas .xa_index , ULONG_MAX )) {
2217+ xas .xa_index ++ ;
2218+ folio_ref_inc (folio );
2219+ goto again ;
2220+ }
22132221 continue ;
22142222put_page :
22152223 folio_put (folio );
@@ -2334,8 +2342,7 @@ static void filemap_get_read_batch(struct address_space *mapping,
23342342 break ;
23352343 if (folio_test_readahead (folio ))
23362344 break ;
2337- xas .xa_index = folio -> index + folio_nr_pages (folio ) - 1 ;
2338- xas .xa_offset = (xas .xa_index >> xas .xa_shift ) & XA_CHUNK_MASK ;
2345+ xas_advance (& xas , folio -> index + folio_nr_pages (folio ) - 1 );
23392346 continue ;
23402347put_folio :
23412348 folio_put (folio );
@@ -3284,6 +3291,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
32843291 addr = vma -> vm_start + ((start_pgoff - vma -> vm_pgoff ) << PAGE_SHIFT );
32853292 vmf -> pte = pte_offset_map_lock (vma -> vm_mm , vmf -> pmd , addr , & vmf -> ptl );
32863293 do {
3294+ again :
32873295 page = folio_file_page (folio , xas .xa_index );
32883296 if (PageHWPoison (page ))
32893297 goto unlock ;
@@ -3305,9 +3313,18 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
33053313 do_set_pte (vmf , page , addr );
33063314 /* no need to invalidate: a not-present page won't be cached */
33073315 update_mmu_cache (vma , addr , vmf -> pte );
3316+ if (folio_more_pages (folio , xas .xa_index , end_pgoff )) {
3317+ xas .xa_index ++ ;
3318+ folio_ref_inc (folio );
3319+ goto again ;
3320+ }
33083321 folio_unlock (folio );
33093322 continue ;
33103323unlock :
3324+ if (folio_more_pages (folio , xas .xa_index , end_pgoff )) {
3325+ xas .xa_index ++ ;
3326+ goto again ;
3327+ }
33113328 folio_unlock (folio );
33123329 folio_put (folio );
33133330 } while ((folio = next_map_page (mapping , & xas , end_pgoff )) != NULL );
0 commit comments