Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 8 additions & 3 deletions compiler/rustc_codegen_cranelift/src/constant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,9 @@ use std::cmp::Ordering;
use cranelift_module::*;
use rustc_data_structures::fx::FxHashSet;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::interpret::{AllocId, GlobalAlloc, Scalar, read_target_uint};
use rustc_middle::mir::interpret::{
AllocId, GlobalAlloc, PointerArithmetic, Scalar, read_target_uint,
};
use rustc_middle::ty::{ExistentialTraitRef, ScalarInt};

use crate::prelude::*;
Expand Down Expand Up @@ -138,8 +140,11 @@ pub(crate) fn codegen_const_value<'tcx>(
let base_addr = match fx.tcx.global_alloc(alloc_id) {
GlobalAlloc::Memory(alloc) => {
if alloc.inner().len() == 0 {
assert_eq!(offset, Size::ZERO);
fx.bcx.ins().iconst(fx.pointer_type, alloc.inner().align.bytes() as i64)
let val = alloc.inner().align.bytes().wrapping_add(offset.bytes());
fx.bcx.ins().iconst(
fx.pointer_type,
fx.tcx.truncate_to_target_usize(val) as i64,
)
} else {
let data_id = data_id_for_alloc_id(
&mut fx.constants_cx,
Expand Down
6 changes: 3 additions & 3 deletions compiler/rustc_codegen_gcc/src/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use rustc_codegen_ssa::traits::{
BaseTypeCodegenMethods, ConstCodegenMethods, MiscCodegenMethods, StaticCodegenMethods,
};
use rustc_middle::mir::Mutability;
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, PointerArithmetic, Scalar};
use rustc_middle::ty::layout::LayoutOf;

use crate::context::CodegenCx;
Expand Down Expand Up @@ -247,8 +247,8 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> {
// This avoids generating a zero-sized constant value and actually needing a
// real address at runtime.
if alloc.inner().len() == 0 {
assert_eq!(offset.bytes(), 0);
let val = self.const_usize(alloc.inner().align.bytes());
let val = alloc.inner().align.bytes().wrapping_add(offset.bytes());
let val = self.const_usize(self.tcx.truncate_to_target_usize(val));
return if matches!(layout.primitive(), Pointer(_)) {
self.context.new_cast(None, val, ty)
} else {
Expand Down
6 changes: 3 additions & 3 deletions compiler/rustc_codegen_llvm/src/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_hashes::Hash128;
use rustc_hir::def_id::DefId;
use rustc_middle::bug;
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, PointerArithmetic, Scalar};
use rustc_middle::ty::TyCtxt;
use rustc_session::cstore::DllImport;
use tracing::debug;
Expand Down Expand Up @@ -281,8 +281,8 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> {
// This avoids generating a zero-sized constant value and actually needing a
// real address at runtime.
if alloc.inner().len() == 0 {
assert_eq!(offset.bytes(), 0);
let llval = self.const_usize(alloc.inner().align.bytes());
let val = alloc.inner().align.bytes().wrapping_add(offset.bytes());
let llval = self.const_usize(self.tcx.truncate_to_target_usize(val));
return if matches!(layout.primitive(), Pointer(_)) {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
Expand Down
18 changes: 18 additions & 0 deletions tests/ui/consts/zst_no_llvm_alloc.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,21 @@
//@ run-pass

// We need some non-1 alignment to test we use the alignment of the type in the compiler.
#[repr(align(4))]
struct Foo;

static FOO: Foo = Foo;

// This tests for regression of https://github.com/rust-lang/rust/issues/147516
//
// THe compiler will codegen `&Zst` without creating a real allocation, just a properly aligned
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
// THe compiler will codegen `&Zst` without creating a real allocation, just a properly aligned
// The compiler will codegen `&Zst` without creating a real allocation, just a properly aligned

// `usize` (i.e., ptr::dangling). However, code can add an arbitrary offset from that base
// allocation. We confirm here that we correctly codegen that offset combined with the necessary
// alignment of the base &() as a 1-ZST and &Foo as a 4-ZST.
const A: *const () = (&() as *const ()).wrapping_byte_add(2);
const B: *const () = (&Foo as *const _ as *const ()).wrapping_byte_add(usize::MAX);
const C: *const () = (&Foo as *const _ as *const ()).wrapping_byte_add(2);

fn main() {
// There's no stable guarantee that these are true.
// However, we want them to be true so that our LLVM IR and runtime are a bit faster:
Expand All @@ -15,6 +26,13 @@ fn main() {
let x: &'static Foo = &Foo;
assert_eq!(x as *const Foo as usize, 4);

// * A 1-aligned ZST (1-ZST) is placed at 0x1. Then offsetting that by 2 results in 3.
// * Foo is a 4-aligned ZST, so is placed at 0x4. +2 = 6
// * Foo is a 4-aligned ZST, so is placed at 0x4. +usize::MAX = -1 (same bit pattern) = 3
assert_eq!(A.addr(), 3);
assert_eq!(B.addr(), 3);
assert_eq!(C.addr(), 6);

// The exact addresses returned by these library functions are not necessarily stable guarantees
// but for now we assert that we're still matching.
#[allow(dangling_pointers_from_temporaries)]
Expand Down
Loading