From b85cfea046403962b3b9d535b0b838fc87809ff0 Mon Sep 17 00:00:00 2001 From: Daniel Date: Sun, 23 Mar 2025 14:25:21 +0200 Subject: [PATCH] Added comments when using unsafe. --- nalgebra-glm/src/common.rs | 4 ++++ nalgebra-lapack/src/cholesky.rs | 4 ++++ nalgebra-lapack/src/eigen.rs | 2 ++ nalgebra-lapack/src/generalized_eigenvalues.rs | 2 ++ nalgebra-lapack/src/hessenberg.rs | 3 +++ nalgebra-lapack/src/lu.rs | 2 ++ nalgebra-lapack/src/qr.rs | 4 ++++ nalgebra-lapack/src/qz.rs | 2 ++ nalgebra-lapack/src/schur.rs | 2 ++ nalgebra-lapack/src/svd.rs | 4 +++- nalgebra-lapack/src/symmetric_eigen.rs | 2 ++ nalgebra-sparse/src/csc.rs | 2 ++ nalgebra-sparse/src/csr.rs | 2 ++ nalgebra-sparse/src/factorization/cholesky.rs | 2 ++ nalgebra-sparse/src/ops/serial/cs.rs | 2 ++ 15 files changed, 38 insertions(+), 1 deletion(-) diff --git a/nalgebra-glm/src/common.rs b/nalgebra-glm/src/common.rs index f41fafbba..a2dc5a83f 100644 --- a/nalgebra-glm/src/common.rs +++ b/nalgebra-glm/src/common.rs @@ -133,6 +133,8 @@ pub fn clamp_vec( /// /// The floating-point value's bit-level representation is preserved. /// +/// Using unsafe is sound because the bitwise representation of f32 fits in i32 +/// /// # See also: /// /// * [`float_bits_to_int_vec()`] @@ -167,6 +169,8 @@ pub fn float_bits_to_int_vec(v: &TVec) -> TVec { /// /// The floating-point value's bit-level representation is preserved. /// +/// Using unsafe is sound because the bitwise representation of f32 fits in i32 +/// /// # See also: /// /// * [`float_bits_to_int()`] diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs index 462cdddb3..c3b8c143b 100644 --- a/nalgebra-lapack/src/cholesky.rs +++ b/nalgebra-lapack/src/cholesky.rs @@ -161,6 +161,7 @@ where lapack_check!(info); // Copy lower triangle to upper triangle. + // Using unsafe to ensure the bounds i and j are always valid indices, for i in 0..dim { for j in i + 1..dim { unsafe { *self.l.get_unchecked_mut((i, j)) = *self.l.get_unchecked((j, i)) }; @@ -196,6 +197,9 @@ pub trait CholeskyScalar: Scalar + Copy { fn xpotri(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32); } +/// This macro uses unsafe to manually ensure memory safety for external functions +/// For incorrectly sized and initialized matrices and arrays, undefined behavior will occur +/// Also, it helps with pointer dereferencing macro_rules! cholesky_scalar_impl( ($N: ty, $xpotrf: path, $xpotrs: path, $xpotri: path) => ( impl CholeskyScalar for $N { diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 6ccf8804e..3456366b0 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -370,6 +370,8 @@ pub trait EigenScalar: Scalar { ) -> i32; } +/// This macro uses unsafe to manually ensure memory safety for external function xgeev +/// For incorrectly sized and initialized matrices and arrays, undefined behavior will occur macro_rules! real_eigensystem_scalar_impl ( ($N: ty, $xgeev: path) => ( impl EigenScalar for $N { diff --git a/nalgebra-lapack/src/generalized_eigenvalues.rs b/nalgebra-lapack/src/generalized_eigenvalues.rs index 33312868f..7852b8f73 100644 --- a/nalgebra-lapack/src/generalized_eigenvalues.rs +++ b/nalgebra-lapack/src/generalized_eigenvalues.rs @@ -289,6 +289,8 @@ pub trait GeneralizedEigenScalar: Scalar { ) -> i32; } +/// This macro uses unsafe to manually ensure memory safety for external function xggev +/// For incorrectly sized and initialized matrices and arrays, undefined behavior will occur macro_rules! generalized_eigen_scalar_impl ( ($N: ty, $xggev: path) => ( impl GeneralizedEigenScalar for $N { diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index 80117afd5..9d7f073be 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -187,6 +187,8 @@ pub trait HessenbergReal: HessenbergScalar { ) -> i32; } +/// This macro uses unsafe to manually ensure memory safety for external function xgehrd +/// For incorrectly sized and initialized matrices and arrays, undefined behavior will occur macro_rules! hessenberg_scalar_impl( ($N: ty, $xgehrd: path) => ( impl HessenbergScalar for $N { @@ -209,6 +211,7 @@ macro_rules! hessenberg_scalar_impl( ) ); +/// This macro uses unsafe to manually ensure memory safety for external function xorghr macro_rules! hessenberg_real_impl( ($N: ty, $xorghr: path) => ( impl HessenbergReal for $N { diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index f38e675ca..d4fc87b1d 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -344,6 +344,8 @@ pub trait LUScalar: Scalar + Copy { fn xgetri_work_size(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32], info: &mut i32) -> i32; } +/// This macro uses unsafe to manually ensure memory safety for external functions +/// For incorrectly sized and initialized matrices and arrays, undefined behavior will occur macro_rules! lup_scalar_impl( ($N: ty, $xgetrf: path, $xlaswp: path, $xgetrs: path, $xgetri: path) => ( impl LUScalar for $N { diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index 404642155..a185c9b1c 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -217,6 +217,8 @@ pub trait QRReal: QRScalar { ) -> i32; } +/// This macro uses unsafe to manually ensure memory safety for external function xgeqrf +/// For incorrectly sized and initialized matrices and arrays, undefined behavior will occur macro_rules! qr_scalar_impl( ($N: ty, $xgeqrf: path) => ( impl QRScalar for $N { @@ -239,6 +241,8 @@ macro_rules! qr_scalar_impl( ) ); +/// This macro uses unsafe to manually ensure memory safety for external function xorgqr +/// For incorrectly sized and initialized matrices and arrays, undefined behavior will occur macro_rules! qr_real_impl( ($N: ty, $xorgqr: path) => ( impl QRReal for $N { diff --git a/nalgebra-lapack/src/qz.rs b/nalgebra-lapack/src/qz.rs index ec113e970..748819f82 100644 --- a/nalgebra-lapack/src/qz.rs +++ b/nalgebra-lapack/src/qz.rs @@ -253,6 +253,8 @@ pub trait QZScalar: Scalar { ) -> i32; } +/// This macro uses unsafe to manually ensure memory safety for external function xgges +/// For incorrectly sized and initialized matrices and arrays, undefined behavior will occur macro_rules! qz_scalar_impl ( ($N: ty, $xgges: path) => ( impl QZScalar for $N { diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 1cc486f9b..b9ba0132f 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -202,6 +202,8 @@ pub trait SchurScalar: Scalar { ) -> i32; } +/// This macro uses unsafe to manually ensure memory safety for external function xgees +/// For incorrectly sized and initialized matrices and arrays, undefined behavior will occur macro_rules! real_eigensystem_scalar_impl ( ($N: ty, $xgees: path) => ( impl SchurScalar for $N { diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 1d2f2e248..7d879af7d 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -105,7 +105,9 @@ macro_rules! svd_impl( let mut lwork = -1 as i32; let mut info = 0; let mut iwork = vec![0; 8 * cmp::min(nrows.value(), ncols.value())]; - + + // Using unsafe to manually ensure memory safety for external function lapack_func + // Rust cannot check the slices' or raw poinnters' safety unsafe { $lapack_func(job, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(), lda, &mut s.as_mut_slice(), u.as_mut_slice(), ldu as i32, vt.as_mut_slice(), diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index 6fda636ca..10635906f 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -186,6 +186,8 @@ pub trait SymmetricEigenScalar: Scalar { -> i32; } +/// This macro uses unsafe to manually ensure memory safety for external function xsyev +/// For incorrectly sized and initialized matrices and arrays, undefined behavior will occur macro_rules! real_eigensystem_scalar_impl ( ($N: ty, $xsyev: path) => ( impl SymmetricEigenScalar for $N { diff --git a/nalgebra-sparse/src/csc.rs b/nalgebra-sparse/src/csc.rs index de4af43d6..aceba01e9 100644 --- a/nalgebra-sparse/src/csc.rs +++ b/nalgebra-sparse/src/csc.rs @@ -204,6 +204,8 @@ impl CscMatrix { true, ); + // Using unsafe for efficiency and performance reasons in a custom memory layout + // SparsityPattern assumes offset and indices are valid; may lead to undefined behavior match result { Ok(()) => { let pattern = unsafe { diff --git a/nalgebra-sparse/src/csr.rs b/nalgebra-sparse/src/csr.rs index 7a38aeeb0..3bb55964c 100644 --- a/nalgebra-sparse/src/csr.rs +++ b/nalgebra-sparse/src/csr.rs @@ -205,6 +205,8 @@ impl CsrMatrix { true, ); + // Using unsafe for efficiency and performance reasons in a custom memory layout + // SparsityPattern assumes offset and indices are valid; may lead to undefined behavior match result { Ok(()) => { let pattern = unsafe { diff --git a/nalgebra-sparse/src/factorization/cholesky.rs b/nalgebra-sparse/src/factorization/cholesky.rs index f84e621f1..e6cd22439 100644 --- a/nalgebra-sparse/src/factorization/cholesky.rs +++ b/nalgebra-sparse/src/factorization/cholesky.rs @@ -197,6 +197,8 @@ impl CscCholesky { self.work_c.clear(); self.work_c.extend_from_slice(self.l_factor.col_offsets()); + // Using unsafe to ensure validity of raw pointers and slices obtained with get_unchecked + // Also, data integrity is enforced in case the matrix is not positive definite unsafe { for k in 0..n { // Scatter the k-th column of the original matrix with the values provided. diff --git a/nalgebra-sparse/src/ops/serial/cs.rs b/nalgebra-sparse/src/ops/serial/cs.rs index ec2690358..16b3c7589 100644 --- a/nalgebra-sparse/src/ops/serial/cs.rs +++ b/nalgebra-sparse/src/ops/serial/cs.rs @@ -44,6 +44,7 @@ where let alpha_aik = alpha.clone() * a_ik.clone(); for (j, b_kj) in b_lane_k.minor_indices().iter().zip(b_lane_k.values()) { // use a dense scatter vector to accumulate non-zeros quickly + // using unsafe to ensure *j index and get_unchecked memory access are always valid unsafe { *scratchpad_values.get_unchecked_mut(*j) += alpha_aik.clone() * b_kj.clone(); } @@ -51,6 +52,7 @@ where } //Get indices from C pattern and gather from the dense scratchpad_values + //Using unsafe to ensure *index is always valid let (indices, values) = c_lane_i.indices_and_values_mut(); values .iter_mut()