@@ -140,12 +140,13 @@ static int copy_compressed_data_to_page(struct btrfs_fs_info *fs_info,
140140 u32 * cur_out )
141141{
142142 const u32 sectorsize = fs_info -> sectorsize ;
143+ const u32 min_folio_shift = PAGE_SHIFT + fs_info -> block_min_order ;
143144 u32 sector_bytes_left ;
144145 u32 orig_out ;
145146 struct folio * cur_folio ;
146147 char * kaddr ;
147148
148- if ((* cur_out / PAGE_SIZE ) >= max_nr_folio )
149+ if ((* cur_out >> min_folio_shift ) >= max_nr_folio )
149150 return - E2BIG ;
150151
151152 /*
@@ -154,18 +155,17 @@ static int copy_compressed_data_to_page(struct btrfs_fs_info *fs_info,
154155 */
155156 ASSERT ((* cur_out / sectorsize ) == (* cur_out + LZO_LEN - 1 ) / sectorsize );
156157
157- cur_folio = out_folios [* cur_out / PAGE_SIZE ];
158+ cur_folio = out_folios [* cur_out >> min_folio_shift ];
158159 /* Allocate a new page */
159160 if (!cur_folio ) {
160161 cur_folio = btrfs_alloc_compr_folio (fs_info );
161162 if (!cur_folio )
162163 return - ENOMEM ;
163- out_folios [* cur_out / PAGE_SIZE ] = cur_folio ;
164+ out_folios [* cur_out >> min_folio_shift ] = cur_folio ;
164165 }
165166
166- kaddr = kmap_local_folio (cur_folio , 0 );
167- write_compress_length (kaddr + offset_in_page (* cur_out ),
168- compressed_size );
167+ kaddr = kmap_local_folio (cur_folio , offset_in_folio (cur_folio , * cur_out ));
168+ write_compress_length (kaddr , compressed_size );
169169 * cur_out += LZO_LEN ;
170170
171171 orig_out = * cur_out ;
@@ -177,20 +177,20 @@ static int copy_compressed_data_to_page(struct btrfs_fs_info *fs_info,
177177
178178 kunmap_local (kaddr );
179179
180- if ((* cur_out / PAGE_SIZE ) >= max_nr_folio )
180+ if ((* cur_out >> min_folio_shift ) >= max_nr_folio )
181181 return - E2BIG ;
182182
183- cur_folio = out_folios [* cur_out / PAGE_SIZE ];
183+ cur_folio = out_folios [* cur_out >> min_folio_shift ];
184184 /* Allocate a new page */
185185 if (!cur_folio ) {
186186 cur_folio = btrfs_alloc_compr_folio (fs_info );
187187 if (!cur_folio )
188188 return - ENOMEM ;
189- out_folios [* cur_out / PAGE_SIZE ] = cur_folio ;
189+ out_folios [* cur_out >> min_folio_shift ] = cur_folio ;
190190 }
191191 kaddr = kmap_local_folio (cur_folio , 0 );
192192
193- memcpy (kaddr + offset_in_page ( * cur_out ),
193+ memcpy (kaddr + offset_in_folio ( cur_folio , * cur_out ),
194194 compressed_data + * cur_out - orig_out , copy_len );
195195
196196 * cur_out += copy_len ;
@@ -221,6 +221,7 @@ int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
221221 struct btrfs_fs_info * fs_info = inode -> root -> fs_info ;
222222 struct workspace * workspace = list_entry (ws , struct workspace , list );
223223 const u32 sectorsize = fs_info -> sectorsize ;
224+ const u32 min_folio_size = btrfs_min_folio_size (fs_info );
224225 struct address_space * mapping = inode -> vfs_inode .i_mapping ;
225226 struct folio * folio_in = NULL ;
226227 char * sizes_ptr ;
@@ -287,8 +288,8 @@ int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
287288 goto out ;
288289 }
289290
290- /* Check if we have reached page boundary */
291- if (PAGE_ALIGNED (cur_in )) {
291+ /* Check if we have reached folio boundary. */
292+ if (IS_ALIGNED (cur_in , min_folio_size )) {
292293 folio_put (folio_in );
293294 folio_in = NULL ;
294295 }
@@ -305,7 +306,7 @@ int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
305306out :
306307 if (folio_in )
307308 folio_put (folio_in );
308- * out_folios = DIV_ROUND_UP (cur_out , PAGE_SIZE );
309+ * out_folios = DIV_ROUND_UP (cur_out , min_folio_size );
309310 return ret ;
310311}
311312
@@ -317,15 +318,16 @@ int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode,
317318static void copy_compressed_segment (struct compressed_bio * cb ,
318319 char * dest , u32 len , u32 * cur_in )
319320{
321+ struct btrfs_fs_info * fs_info = cb_to_fs_info (cb );
322+ const u32 min_folio_shift = PAGE_SHIFT + fs_info -> block_min_order ;
320323 u32 orig_in = * cur_in ;
321324
322325 while (* cur_in < orig_in + len ) {
323- struct folio * cur_folio ;
324- u32 copy_len = min_t (u32 , PAGE_SIZE - offset_in_page ( * cur_in ) ,
325- orig_in + len - * cur_in );
326+ struct folio * cur_folio = cb -> compressed_folios [ * cur_in >> min_folio_shift ] ;
327+ u32 copy_len = min_t (u32 , orig_in + len - * cur_in ,
328+ folio_size ( cur_folio ) - offset_in_folio ( cur_folio , * cur_in ) );
326329
327330 ASSERT (copy_len );
328- cur_folio = cb -> compressed_folios [* cur_in / PAGE_SIZE ];
329331
330332 memcpy_from_folio (dest + * cur_in - orig_in , cur_folio ,
331333 offset_in_folio (cur_folio , * cur_in ), copy_len );
@@ -339,6 +341,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
339341 struct workspace * workspace = list_entry (ws , struct workspace , list );
340342 const struct btrfs_fs_info * fs_info = cb -> bbio .inode -> root -> fs_info ;
341343 const u32 sectorsize = fs_info -> sectorsize ;
344+ const u32 min_folio_shift = PAGE_SHIFT + fs_info -> block_min_order ;
342345 char * kaddr ;
343346 int ret ;
344347 /* Compressed data length, can be unaligned */
@@ -385,10 +388,10 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
385388 */
386389 ASSERT (cur_in / sectorsize ==
387390 (cur_in + LZO_LEN - 1 ) / sectorsize );
388- cur_folio = cb -> compressed_folios [cur_in / PAGE_SIZE ];
391+ cur_folio = cb -> compressed_folios [cur_in >> min_folio_shift ];
389392 ASSERT (cur_folio );
390393 kaddr = kmap_local_folio (cur_folio , 0 );
391- seg_len = read_compress_length (kaddr + offset_in_page ( cur_in ));
394+ seg_len = read_compress_length (kaddr + offset_in_folio ( cur_folio , cur_in ));
392395 kunmap_local (kaddr );
393396 cur_in += LZO_LEN ;
394397
0 commit comments