@@ -639,6 +639,42 @@ static void SlowCopyContainerAnnotations(uptr old_storage_beg,
639639 }
640640}
641641
642+ // This function is basically the same as SlowCopyContainerAnnotations,
643+ // but goes through elements in reversed order
644+ static void SlowRCopyContainerAnnotations (uptr old_storage_beg,
645+ uptr old_storage_end,
646+ uptr new_storage_beg,
647+ uptr new_storage_end) {
648+ constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
649+ uptr new_internal_beg = RoundDownTo (new_storage_beg, granularity);
650+ uptr new_internal_end = RoundDownTo (new_storage_end, granularity);
651+ uptr old_ptr = old_storage_end;
652+ uptr new_ptr = new_storage_end;
653+
654+ while (new_ptr > new_storage_beg) {
655+ uptr granule_begin = RoundDownTo (new_ptr - 1 , granularity);
656+ uptr unpoisoned_bytes = 0 ;
657+
658+ for (; new_ptr != granule_begin && new_ptr != new_storage_beg;
659+ --new_ptr, --old_ptr) {
660+ if (unpoisoned_bytes == 0 && !AddressIsPoisoned (old_ptr - 1 )) {
661+ unpoisoned_bytes = new_ptr - granule_begin;
662+ }
663+ }
664+
665+ if (new_ptr >= new_internal_end && !AddressIsPoisoned (new_storage_end)) {
666+ continue ;
667+ }
668+
669+ if (granule_begin == new_ptr || unpoisoned_bytes != 0 ) {
670+ AnnotateContainerGranuleAccessibleBytes (granule_begin, unpoisoned_bytes);
671+ } else if (!AddressIsPoisoned (new_storage_beg)) {
672+ AnnotateContainerGranuleAccessibleBytes (granule_begin,
673+ new_storage_beg - granule_begin);
674+ }
675+ }
676+ }
677+
642678// This function copies ASan memory annotations (poisoned/unpoisoned states)
643679// from one buffer to another.
644680// It's main purpose is to help with relocating trivially relocatable objects,
@@ -678,9 +714,61 @@ void __sanitizer_copy_contiguous_container_annotations(
678714 &stack);
679715 }
680716
681- if (old_storage_beg == old_storage_end)
717+ if (old_storage_beg == old_storage_end || old_storage_beg == new_storage_beg )
682718 return ;
719+ // The only edge cases involve edge granules when the container starts or
720+ // ends within a granule. We already know that the container's start and end
721+ // points lie in different granules.
722+ uptr old_external_end = RoundUpTo (old_storage_end, granularity);
723+ if (old_storage_beg < new_storage_beg &&
724+ new_storage_beg <= old_external_end) {
725+ // In this case, we have to copy elements in reversed order, because
726+ // destination buffer starts in the middle of the source buffer (or shares
727+ // first granule with it).
728+ // It still may be possible to optimize, but reversed order has to be kept.
729+ if (old_storage_beg % granularity != new_storage_beg % granularity ||
730+ WithinOneGranule (new_storage_beg, new_storage_end)) {
731+ SlowRCopyContainerAnnotations (old_storage_beg, old_storage_end,
732+ new_storage_beg, new_storage_end);
733+ return ;
734+ }
683735
736+ uptr new_internal_end = RoundDownTo (new_storage_end, granularity);
737+ if (new_internal_end != new_storage_end &&
738+ AddressIsPoisoned (new_storage_end)) {
739+ // Last granule
740+ uptr old_internal_end = RoundDownTo (old_storage_end, granularity);
741+ if (AddressIsPoisoned (old_storage_end)) {
742+ CopyGranuleAnnotation (new_internal_end, old_internal_end);
743+ } else {
744+ AnnotateContainerGranuleAccessibleBytes (
745+ new_internal_end, old_storage_end - old_internal_end);
746+ }
747+ }
748+
749+ uptr new_internal_beg = RoundUpTo (new_storage_beg, granularity);
750+ if (new_internal_end > new_internal_beg) {
751+ uptr old_internal_beg = RoundUpTo (old_storage_beg, granularity);
752+ __builtin_memmove ((u8 *)MemToShadow (new_internal_beg),
753+ (u8 *)MemToShadow (old_internal_beg),
754+ (new_internal_end - new_internal_beg) / granularity);
755+ }
756+
757+ if (new_internal_beg != new_storage_beg) {
758+ // First granule
759+ uptr new_external_beg = RoundDownTo (new_storage_beg, granularity);
760+ uptr old_external_beg = RoundDownTo (old_storage_beg, granularity);
761+ if (!AddressIsPoisoned (old_storage_beg)) {
762+ CopyGranuleAnnotation (new_external_beg, old_external_beg);
763+ } else if (!AddressIsPoisoned (new_storage_beg)) {
764+ AnnotateContainerGranuleAccessibleBytes (
765+ new_external_beg, new_storage_beg - new_external_beg);
766+ }
767+ }
768+ return ;
769+ }
770+
771+ // Simple copy of annotations of all internal granules.
684772 if (old_storage_beg % granularity != new_storage_beg % granularity ||
685773 WithinOneGranule (new_storage_beg, new_storage_end)) {
686774 SlowCopyContainerAnnotations (old_storage_beg, old_storage_end,
@@ -689,16 +777,6 @@ void __sanitizer_copy_contiguous_container_annotations(
689777 }
690778
691779 uptr new_internal_beg = RoundUpTo (new_storage_beg, granularity);
692- uptr new_internal_end = RoundDownTo (new_storage_end, granularity);
693- if (new_internal_end > new_internal_beg) {
694- uptr old_internal_beg = RoundUpTo (old_storage_beg, granularity);
695- __builtin_memcpy ((u8 *)MemToShadow (new_internal_beg),
696- (u8 *)MemToShadow (old_internal_beg),
697- (new_internal_end - new_internal_beg) / granularity);
698- }
699- // The only remaining cases involve edge granules when the container starts or
700- // ends within a granule. We already know that the container's start and end
701- // points lie in different granules.
702780 if (new_internal_beg != new_storage_beg) {
703781 // First granule
704782 uptr new_external_beg = RoundDownTo (new_storage_beg, granularity);
@@ -710,6 +788,15 @@ void __sanitizer_copy_contiguous_container_annotations(
710788 new_external_beg, new_storage_beg - new_external_beg);
711789 }
712790 }
791+
792+ uptr new_internal_end = RoundDownTo (new_storage_end, granularity);
793+ if (new_internal_end > new_internal_beg) {
794+ uptr old_internal_beg = RoundUpTo (old_storage_beg, granularity);
795+ __builtin_memmove ((u8 *)MemToShadow (new_internal_beg),
796+ (u8 *)MemToShadow (old_internal_beg),
797+ (new_internal_end - new_internal_beg) / granularity);
798+ }
799+
713800 if (new_internal_end != new_storage_end &&
714801 AddressIsPoisoned (new_storage_end)) {
715802 // Last granule
0 commit comments