Skip to content

Commit 93ab376

Browse files
committed
Add support for overlapping containers
Adds support for overlapping containers. Adds test case for that.
1 parent 1c89e0f commit 93ab376

File tree

2 files changed

+173
-16
lines changed

2 files changed

+173
-16
lines changed

compiler-rt/lib/asan/asan_poisoning.cpp

Lines changed: 98 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -639,6 +639,42 @@ static void SlowCopyContainerAnnotations(uptr old_storage_beg,
639639
}
640640
}
641641

642+
// This function is basically the same as SlowCopyContainerAnnotations,
643+
// but goes through elements in reversed order
644+
static void SlowRCopyContainerAnnotations(uptr old_storage_beg,
645+
uptr old_storage_end,
646+
uptr new_storage_beg,
647+
uptr new_storage_end) {
648+
constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
649+
uptr new_internal_beg = RoundDownTo(new_storage_beg, granularity);
650+
uptr new_internal_end = RoundDownTo(new_storage_end, granularity);
651+
uptr old_ptr = old_storage_end;
652+
uptr new_ptr = new_storage_end;
653+
654+
while (new_ptr > new_storage_beg) {
655+
uptr granule_begin = RoundDownTo(new_ptr - 1, granularity);
656+
uptr unpoisoned_bytes = 0;
657+
658+
for (; new_ptr != granule_begin && new_ptr != new_storage_beg;
659+
--new_ptr, --old_ptr) {
660+
if (unpoisoned_bytes == 0 && !AddressIsPoisoned(old_ptr - 1)) {
661+
unpoisoned_bytes = new_ptr - granule_begin;
662+
}
663+
}
664+
665+
if (new_ptr >= new_internal_end && !AddressIsPoisoned(new_storage_end)) {
666+
continue;
667+
}
668+
669+
if (granule_begin == new_ptr || unpoisoned_bytes != 0) {
670+
AnnotateContainerGranuleAccessibleBytes(granule_begin, unpoisoned_bytes);
671+
} else if (!AddressIsPoisoned(new_storage_beg)) {
672+
AnnotateContainerGranuleAccessibleBytes(granule_begin,
673+
new_storage_beg - granule_begin);
674+
}
675+
}
676+
}
677+
642678
// This function copies ASan memory annotations (poisoned/unpoisoned states)
643679
// from one buffer to another.
644680
// It's main purpose is to help with relocating trivially relocatable objects,
@@ -678,9 +714,61 @@ void __sanitizer_copy_contiguous_container_annotations(
678714
&stack);
679715
}
680716

681-
if (old_storage_beg == old_storage_end)
717+
if (old_storage_beg == old_storage_end || old_storage_beg == new_storage_beg)
682718
return;
719+
// The only edge cases involve edge granules when the container starts or
720+
// ends within a granule. We already know that the container's start and end
721+
// points lie in different granules.
722+
uptr old_external_end = RoundUpTo(old_storage_end, granularity);
723+
if (old_storage_beg < new_storage_beg &&
724+
new_storage_beg <= old_external_end) {
725+
// In this case, we have to copy elements in reversed order, because
726+
// destination buffer starts in the middle of the source buffer (or shares
727+
// first granule with it).
728+
// It still may be possible to optimize, but reversed order has to be kept.
729+
if (old_storage_beg % granularity != new_storage_beg % granularity ||
730+
WithinOneGranule(new_storage_beg, new_storage_end)) {
731+
SlowRCopyContainerAnnotations(old_storage_beg, old_storage_end,
732+
new_storage_beg, new_storage_end);
733+
return;
734+
}
683735

736+
uptr new_internal_end = RoundDownTo(new_storage_end, granularity);
737+
if (new_internal_end != new_storage_end &&
738+
AddressIsPoisoned(new_storage_end)) {
739+
// Last granule
740+
uptr old_internal_end = RoundDownTo(old_storage_end, granularity);
741+
if (AddressIsPoisoned(old_storage_end)) {
742+
CopyGranuleAnnotation(new_internal_end, old_internal_end);
743+
} else {
744+
AnnotateContainerGranuleAccessibleBytes(
745+
new_internal_end, old_storage_end - old_internal_end);
746+
}
747+
}
748+
749+
uptr new_internal_beg = RoundUpTo(new_storage_beg, granularity);
750+
if (new_internal_end > new_internal_beg) {
751+
uptr old_internal_beg = RoundUpTo(old_storage_beg, granularity);
752+
__builtin_memmove((u8 *)MemToShadow(new_internal_beg),
753+
(u8 *)MemToShadow(old_internal_beg),
754+
(new_internal_end - new_internal_beg) / granularity);
755+
}
756+
757+
if (new_internal_beg != new_storage_beg) {
758+
// First granule
759+
uptr new_external_beg = RoundDownTo(new_storage_beg, granularity);
760+
uptr old_external_beg = RoundDownTo(old_storage_beg, granularity);
761+
if (!AddressIsPoisoned(old_storage_beg)) {
762+
CopyGranuleAnnotation(new_external_beg, old_external_beg);
763+
} else if (!AddressIsPoisoned(new_storage_beg)) {
764+
AnnotateContainerGranuleAccessibleBytes(
765+
new_external_beg, new_storage_beg - new_external_beg);
766+
}
767+
}
768+
return;
769+
}
770+
771+
// Simple copy of annotations of all internal granules.
684772
if (old_storage_beg % granularity != new_storage_beg % granularity ||
685773
WithinOneGranule(new_storage_beg, new_storage_end)) {
686774
SlowCopyContainerAnnotations(old_storage_beg, old_storage_end,
@@ -689,16 +777,6 @@ void __sanitizer_copy_contiguous_container_annotations(
689777
}
690778

691779
uptr new_internal_beg = RoundUpTo(new_storage_beg, granularity);
692-
uptr new_internal_end = RoundDownTo(new_storage_end, granularity);
693-
if (new_internal_end > new_internal_beg) {
694-
uptr old_internal_beg = RoundUpTo(old_storage_beg, granularity);
695-
__builtin_memcpy((u8 *)MemToShadow(new_internal_beg),
696-
(u8 *)MemToShadow(old_internal_beg),
697-
(new_internal_end - new_internal_beg) / granularity);
698-
}
699-
// The only remaining cases involve edge granules when the container starts or
700-
// ends within a granule. We already know that the container's start and end
701-
// points lie in different granules.
702780
if (new_internal_beg != new_storage_beg) {
703781
// First granule
704782
uptr new_external_beg = RoundDownTo(new_storage_beg, granularity);
@@ -710,6 +788,15 @@ void __sanitizer_copy_contiguous_container_annotations(
710788
new_external_beg, new_storage_beg - new_external_beg);
711789
}
712790
}
791+
792+
uptr new_internal_end = RoundDownTo(new_storage_end, granularity);
793+
if (new_internal_end > new_internal_beg) {
794+
uptr old_internal_beg = RoundUpTo(old_storage_beg, granularity);
795+
__builtin_memmove((u8 *)MemToShadow(new_internal_beg),
796+
(u8 *)MemToShadow(old_internal_beg),
797+
(new_internal_end - new_internal_beg) / granularity);
798+
}
799+
713800
if (new_internal_end != new_storage_end &&
714801
AddressIsPoisoned(new_storage_end)) {
715802
// Last granule

compiler-rt/test/asan/TestCases/move_container_annotations.cpp

Lines changed: 75 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ void TestNonOverlappingContainers(size_t capacity, size_t off_old,
111111
assert(!__asan_address_is_poisoned(cur));
112112
}
113113
}
114-
//In every granule, poisoned memory should be after last expected unpoisoned.
114+
115115
char *next;
116116
for (cur = new_beg; cur + kGranularity <= new_end; cur = next) {
117117
next = RoundUp(cur + 1);
@@ -124,15 +124,14 @@ void TestNonOverlappingContainers(size_t capacity, size_t off_old,
124124
}
125125
}
126126
// [cur; new_end) is not checked yet.
127-
// If new_buffer were not poisoned, it cannot be poisoned and we can ignore
128-
// a separate check.
127+
// If new_buffer were not poisoned, it cannot be poisoned.
129128
// If new_buffer were poisoned, it should be same as earlier.
130-
if (cur < new_end && poison_new) {
129+
if (cur < new_end) {
131130
size_t unpoisoned = count_unpoisoned(poison_states, new_end - cur);
132131
if (unpoisoned > 0) {
133132
assert(!__asan_address_is_poisoned(cur + unpoisoned - 1));
134133
}
135-
if (cur + unpoisoned < new_end) {
134+
if (cur + unpoisoned < new_end && poison_new) {
136135
assert(__asan_address_is_poisoned(cur + unpoisoned));
137136
}
138137
}
@@ -148,13 +147,84 @@ void TestNonOverlappingContainers(size_t capacity, size_t off_old,
148147
delete[] new_buffer;
149148
}
150149

150+
void TestOverlappingContainers(size_t capacity, size_t off_old, size_t off_new,
151+
int poison_buffers) {
152+
size_t buffer_size = capacity + off_old + off_new + kGranularity * 3;
153+
char *buffer = new char[buffer_size];
154+
char *buffer_end = buffer + buffer_size;
155+
bool poison_whole = poison_buffers % 2 == 1;
156+
bool poison_new = poison_buffers / 2 == 1;
157+
char *old_beg = buffer + kGranularity + off_old;
158+
char *new_beg = buffer + kGranularity + off_new;
159+
char *old_end = old_beg + capacity;
160+
char *new_end = new_beg + capacity;
161+
162+
for (int i = 0; i < 35; i++) {
163+
if (poison_whole)
164+
__asan_poison_memory_region(buffer, buffer_size);
165+
if (poison_new)
166+
__asan_poison_memory_region(new_beg, new_end - new_beg);
167+
168+
RandomPoison(old_beg, old_end);
169+
std::deque<int> poison_states = GetPoisonedState(old_beg, old_end);
170+
__sanitizer_copy_contiguous_container_annotations(old_beg, old_end, new_beg,
171+
new_end);
172+
// This variable is used only when buffer ends in the middle of a granule.
173+
bool can_modify_last_granule = __asan_address_is_poisoned(new_end);
174+
175+
// If whole buffer were poisoned, expected state of memory before first container
176+
// is undetermined.
177+
// If old buffer were not poisoned, that memory should still be unpoisoned.
178+
char *cur;
179+
if (!poison_whole) {
180+
for (cur = buffer; cur < old_beg && cur < new_beg; ++cur) {
181+
assert(!__asan_address_is_poisoned(cur));
182+
}
183+
}
184+
185+
// Memory after end of both containers should be the same as at the beginning.
186+
for (cur = (old_end > new_end) ? old_end : new_end; cur < buffer_end;
187+
++cur) {
188+
assert(__asan_address_is_poisoned(cur) == poison_whole);
189+
}
190+
191+
char *next;
192+
for (cur = new_beg; cur + kGranularity <= new_end; cur = next) {
193+
next = RoundUp(cur + 1);
194+
size_t unpoisoned = count_unpoisoned(poison_states, next - cur);
195+
if (unpoisoned > 0) {
196+
assert(!__asan_address_is_poisoned(cur + unpoisoned - 1));
197+
}
198+
if (cur + unpoisoned < next) {
199+
assert(__asan_address_is_poisoned(cur + unpoisoned));
200+
}
201+
}
202+
// [cur; new_end) is not checked yet, if container ends in the middle of a granule.
203+
// It can be poisoned, only if non-container bytes in that granule were poisoned.
204+
// Otherwise, it should be unpoisoned.
205+
if (cur < new_end) {
206+
size_t unpoisoned = count_unpoisoned(poison_states, new_end - cur);
207+
if (unpoisoned > 0) {
208+
assert(!__asan_address_is_poisoned(cur + unpoisoned - 1));
209+
}
210+
if (cur + unpoisoned < new_end && can_modify_last_granule) {
211+
assert(__asan_address_is_poisoned(cur + unpoisoned));
212+
}
213+
}
214+
}
215+
216+
__asan_unpoison_memory_region(buffer, buffer_size);
217+
delete[] buffer;
218+
}
219+
151220
int main(int argc, char **argv) {
152221
int n = argc == 1 ? 64 : atoi(argv[1]);
153222
for (size_t j = 0; j < kGranularity + 2; j++) {
154223
for (size_t k = 0; k < kGranularity + 2; k++) {
155224
for (int i = 0; i <= n; i++) {
156225
for (int poison = 0; poison < 4; ++poison) {
157226
TestNonOverlappingContainers(i, j, k, poison);
227+
TestOverlappingContainers(i, j, k, poison);
158228
}
159229
}
160230
}

0 commit comments

Comments
 (0)