clippy: enforce safety block comments

BUG=b:316174930
TEST=none

Change-Id: I5c7811b2c548155aa003e4b71a54bbc16e2f2588
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/5120567
Commit-Queue: Vikram Auradkar <auradkar@google.com>
Reviewed-by: Dennis Kempin <denniskempin@google.com>
This commit is contained in:
Vikram Auradkar 2023-12-12 20:59:50 +00:00 committed by crosvm LUCI
parent 90c2ff0432
commit 2768f223ee
241 changed files with 2668 additions and 578 deletions

View file

@ -25,4 +25,5 @@ rustflags = [
"-Aclippy::unreadable_literal",
"-Aclippy::useless_let_if_seq",
"-Aclippy::useless_transmute",
"-Dclippy::undocumented_unsafe_blocks",
]

View file

@ -258,6 +258,7 @@ impl AudioMemoryMapping {
warn!("Accessing unallocated region");
return &mut self.zero_buffer;
}
// SAFETY:
// safe because the region returned is owned by self.memory_mapping
unsafe { slice::from_raw_parts_mut(self.memory_mapping.as_ptr().add(offset), len) }
}

View file

@ -37,6 +37,7 @@ use std::cmp::min;
/// let layout = Layout::from_size_align(size, mem::align_of::<Header>()).unwrap();
/// let mut allocation = LayoutAllocation::zeroed(layout);
///
/// // SAFETY:
/// // Safe to obtain an exclusive reference because there are no other
/// // references to the allocation yet and all-zero is a valid bit pattern for
/// // our header.
@ -57,10 +58,9 @@ impl LayoutAllocation {
/// incompatible with its type, for example an uninitialized bool or enum.
pub fn uninitialized(layout: Layout) -> Self {
let ptr = if layout.size() > 0 {
unsafe {
// Safe as long as we guarantee layout.size() > 0.
alloc(layout)
}
// SAFETY:
// Safe as long as we guarantee layout.size() > 0.
unsafe { alloc(layout) }
} else {
layout.align() as *mut u8
};
@ -77,10 +77,9 @@ impl LayoutAllocation {
/// one of the fields has type NonZeroUsize.
pub fn zeroed(layout: Layout) -> Self {
let ptr = if layout.size() > 0 {
unsafe {
// Safe as long as we guarantee layout.size() > 0.
alloc_zeroed(layout)
}
// SAFETY:
// Safe as long as we guarantee layout.size() > 0.
unsafe { alloc_zeroed(layout) }
} else {
layout.align() as *mut u8
};
@ -159,8 +158,9 @@ impl LayoutAllocation {
impl Drop for LayoutAllocation {
fn drop(&mut self) {
if self.layout.size() > 0 {
// SAFETY:
// Safe as long as we guarantee layout.size() > 0.
unsafe {
// Safe as long as we guarantee layout.size() > 0.
dealloc(self.ptr, self.layout);
}
}
@ -178,6 +178,8 @@ mod tests {
fn test_as_slice_u32() {
let layout = Layout::from_size_align(size_of::<u32>() * 15, align_of::<u32>()).unwrap();
let allocation = LayoutAllocation::zeroed(layout);
// SAFETY:
// Slice less than the allocation size, which will return a slice of only the requested length.
let slice: &[u32] = unsafe { allocation.as_slice(15) };
assert_eq!(slice.len(), 15);
assert_eq!(slice[0], 0);
@ -189,6 +191,7 @@ mod tests {
let layout = Layout::from_size_align(size_of::<u32>() * 15, align_of::<u32>()).unwrap();
let allocation = LayoutAllocation::zeroed(layout);
// SAFETY:
// Slice less than the allocation size, which will return a slice of only the requested length.
let slice: &[u32] = unsafe { allocation.as_slice(5) };
assert_eq!(slice.len(), 5);
@ -199,6 +202,7 @@ mod tests {
let layout = Layout::from_size_align(size_of::<u32>() * 15, align_of::<u32>()).unwrap();
let allocation = LayoutAllocation::zeroed(layout);
// SAFETY:
// Slice more than the allocation size, which will clamp the returned slice len to the limit.
let slice: &[u32] = unsafe { allocation.as_slice(100) };
assert_eq!(slice.len(), 15);
@ -210,6 +214,7 @@ mod tests {
let layout = Layout::from_size_align(size_of::<u32>() * 15 + 2, align_of::<u32>()).unwrap();
let allocation = LayoutAllocation::zeroed(layout);
// SAFETY:
// Slice as many u32s as possible, which should return a slice that only includes the full
// u32s, not the trailing 2 bytes.
let slice: &[u32] = unsafe { allocation.as_slice(100) };

View file

@ -111,6 +111,7 @@ impl TryFrom<&dyn AsRawDescriptor> for SafeDescriptor {
/// TODO(b/191800567): this API has sharp edges on Windows. We should evaluate making some
/// adjustments to smooth those edges.
fn try_from(rd: &dyn AsRawDescriptor) -> std::result::Result<Self, Self::Error> {
// SAFETY:
// Safe because the underlying raw descriptor is guaranteed valid by rd's existence.
//
// Note that we are cloning the underlying raw descriptor since we have no guarantee of
@ -129,6 +130,7 @@ impl TryFrom<&dyn AsRawDescriptor> for SafeDescriptor {
impl From<File> for SafeDescriptor {
fn from(f: File) -> SafeDescriptor {
// SAFETY:
// Safe because we own the File at this point.
unsafe { SafeDescriptor::from_raw_descriptor(f.into_raw_descriptor()) }
}

View file

@ -398,7 +398,9 @@ pub mod with_as_descriptor {
{
super::deserialize_descriptor(de)
.map(IntoRawDescriptor::into_raw_descriptor)
.map(|rd| unsafe { T::from_raw_descriptor(rd) })
.map(|rd|
// SAFETY: rd is expected to be valid for the duration of the call.
unsafe { T::from_raw_descriptor(rd) })
}
}
@ -462,9 +464,9 @@ mod tests {
use super::super::SerializeDescriptors;
fn deserialize<T: DeserializeOwned>(json: &str, descriptors: &[RawDescriptor]) -> T {
let safe_descriptors = descriptors
.iter()
.map(|&v| unsafe { SafeDescriptor::from_raw_descriptor(v) });
let safe_descriptors = descriptors.iter().map(|&v|
// SAFETY: `descriptor` is expected to be valid.
unsafe { SafeDescriptor::from_raw_descriptor(v) });
deserialize_with_descriptors(|| serde_json::from_str(json), safe_descriptors).unwrap()
}

View file

@ -35,6 +35,7 @@ pub struct IoBufMut<'a> {
impl<'a> IoBufMut<'a> {
pub fn new(buf: &mut [u8]) -> IoBufMut<'a> {
// SAFETY:
// Safe because buf's memory is of the supplied length, and
// guaranteed to exist for the lifetime of the returned value.
unsafe { Self::from_raw_parts(buf.as_mut_ptr(), buf.len()) }
@ -74,6 +75,7 @@ impl<'a> IoBufMut<'a> {
self.iobuf.set_len(self.len() - count);
// SAFETY:
// Safe because we've checked that `count <= self.len()` so both the starting and resulting
// pointer are within the bounds of the allocation.
self.iobuf.set_ptr(unsafe { self.as_mut_ptr().add(count) });
@ -114,6 +116,7 @@ impl<'a> IoBufMut<'a> {
#[allow(clippy::wrong_self_convention)]
#[inline]
pub fn as_iobufs<'slice>(iovs: &'slice [IoBufMut<'_>]) -> &'slice [IoBuf] {
// SAFETY:
// Safe because `IoBufMut` is ABI-compatible with `IoBuf`.
unsafe { slice::from_raw_parts(iovs.as_ptr() as *const IoBuf, iovs.len()) }
}
@ -121,6 +124,7 @@ impl<'a> IoBufMut<'a> {
/// Converts a mutable slice of `IoBufMut`s into a mutable slice of `IoBuf`s.
#[inline]
pub fn as_iobufs_mut<'slice>(iovs: &'slice mut [IoBufMut<'_>]) -> &'slice mut [IoBuf] {
// SAFETY:
// Safe because `IoBufMut` is ABI-compatible with `IoBuf`.
unsafe { slice::from_raw_parts_mut(iovs.as_mut_ptr() as *mut IoBuf, iovs.len()) }
}
@ -138,11 +142,13 @@ impl<'a> AsMut<IoBuf> for IoBufMut<'a> {
}
}
// SAFETY:
// It's safe to implement Send + Sync for this type for the same reason that `std::io::IoSliceMut`
// is Send + Sync. Internally, it contains a pointer and a length. The integer length is safely Send
// + Sync. There's nothing wrong with sending a pointer between threads and de-referencing the
// pointer requires an unsafe block anyway. See also https://github.com/rust-lang/rust/pull/70342.
unsafe impl<'a> Send for IoBufMut<'a> {}
// SAFETY: See comments for impl Send
unsafe impl<'a> Sync for IoBufMut<'a> {}
impl<'a> Debug for IoBufMut<'a> {

View file

@ -135,6 +135,7 @@ impl MemoryMapping {
match self.mapping.size().checked_sub(offset) {
Some(size_past_offset) => {
let bytes_copied = min(size_past_offset, buf.len());
// SAFETY:
// The bytes_copied equation above ensures we don't copy bytes out of range of
// either buf or this slice. We also know that the buffers do not overlap because
// slices can never occupy the same memory as a volatile slice.
@ -151,6 +152,7 @@ impl MemoryMapping {
match self.size().checked_sub(offset) {
Some(size_past_offset) => {
let bytes_copied = min(size_past_offset, buf.len());
// SAFETY:
// The bytes_copied equation above ensures we don't copy bytes out of range of
// either buf or this slice. We also know that the buffers do not overlap because
// slices can never occupy the same memory as a volatile slice.
@ -182,6 +184,7 @@ impl MemoryMapping {
/// ```
pub fn write_obj<T: AsBytes>(&self, val: T, offset: usize) -> Result<()> {
self.mapping.range_end(offset, size_of::<T>())?;
// SAFETY:
// This is safe because we checked the bounds above.
unsafe {
write_unaligned(self.as_ptr().add(offset) as *mut T, val);
@ -210,6 +213,7 @@ impl MemoryMapping {
/// ```
pub fn read_obj<T: FromBytes>(&self, offset: usize) -> Result<T> {
self.mapping.range_end(offset, size_of::<T>())?;
// SAFETY:
// This is safe because by definition Copy types can have their bits set arbitrarily and
// still be valid.
unsafe {
@ -242,6 +246,7 @@ impl MemoryMapping {
// Make sure writes to memory have been committed before performing I/O that could
// potentially depend on them.
fence(Ordering::SeqCst);
// SAFETY:
// This is safe because we checked the bounds above.
unsafe {
write_volatile(self.as_ptr().add(offset) as *mut T, val);
@ -273,6 +278,7 @@ impl MemoryMapping {
/// ```
pub fn read_obj_volatile<T: FromBytes>(&self, offset: usize) -> Result<T> {
self.mapping.range_end(offset, size_of::<T>())?;
// SAFETY:
// This is safe because by definition Copy types can have their bits set arbitrarily and
// still be valid.
unsafe {
@ -410,6 +416,7 @@ impl VolatileMemory for MemoryMapping {
offset,
})?;
// SAFETY:
// Safe because we checked that offset + count was within our range and we only ever hand
// out volatile accessors.
Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
@ -422,6 +429,7 @@ impl VolatileMemory for MemoryMapping {
/// Safe when implementers guarantee `ptr`..`ptr+size` is an mmaped region owned by this object that
/// can't be unmapped during the `MappedRegion`'s lifetime.
pub unsafe trait MappedRegion: Send + Sync {
// SAFETY:
/// Returns a pointer to the beginning of the memory region. Should only be
/// used for passing this region to ioctls for setting guest memory.
fn as_ptr(&self) -> *mut u8;
@ -456,6 +464,7 @@ pub unsafe trait MappedRegion: Send + Sync {
}
}
// SAFETY:
// Safe because it exclusively forwards calls to a safe implementation.
unsafe impl MappedRegion for MemoryMapping {
fn as_ptr(&self) -> *mut u8 {
@ -473,6 +482,10 @@ pub struct ExternalMapping {
pub size: usize,
}
// SAFETY:
// `ptr`..`ptr+size` is an mmaped region and is owned by this object. Caller
// needs to ensure that the region is not unmapped during the `MappedRegion`'s
// lifetime.
unsafe impl MappedRegion for ExternalMapping {
/// used for passing this region to ioctls for setting guest memory.
fn as_ptr(&self) -> *mut u8 {

View file

@ -84,7 +84,9 @@ impl AcpiNotifyEvent {
// https://github.com/rust-lang/rust/issues/79089,
// before using device_class further cast it to u8.
let device_class: &[u8; 20usize] =
// SAFETY: trivially safe
unsafe { ::std::mem::transmute(&acpi_event.device_class) };
// SAFETY: trivially safe
let bus_id: &[u8; 15usize] = unsafe { ::std::mem::transmute(&acpi_event.bus_id) };
Ok(AcpiNotifyEvent {

View file

@ -20,9 +20,10 @@ extern "C" {
/// Drops all capabilities (permitted, inheritable, and effective) from the current process.
pub fn drop_capabilities() -> Result<()> {
// SAFETY:
// Safe because we do not actually manipulate any memory handled by libcap
// and we check errors.
unsafe {
// Safe because we do not actually manipulate any memory handled by libcap
// and we check errors.
let caps = cap_init();
if caps.is_null() {
return errno_result();

View file

@ -13,8 +13,10 @@ impl PartialEq for SafeDescriptor {
return true;
}
// SAFETY:
// safe because we only use the return value and libc says it's always successful
let pid = unsafe { libc::getpid() };
// SAFETY:
// safe because we are passing everything by value and checking the return value
let ret = unsafe {
libc::syscall(

View file

@ -55,21 +55,24 @@ impl EventExt for crate::Event {
impl PlatformEvent {
/// Creates a new blocking eventfd with an initial value of 0.
pub fn new() -> Result<PlatformEvent> {
// SAFETY:
// This is safe because eventfd merely allocated an eventfd for our process and we handle
// the error case.
let ret = unsafe { eventfd(0, 0) };
if ret < 0 {
return errno_result();
}
// This is safe because we checked ret for success and know the kernel gave us an fd that we
// own.
Ok(PlatformEvent {
// SAFETY:
// This is safe because we checked ret for success and know the kernel gave us an fd that we
// own.
event_handle: unsafe { SafeDescriptor::from_raw_descriptor(ret) },
})
}
/// See `EventExt::write_count`.
pub fn write_count(&self, v: u64) -> Result<()> {
// SAFETY:
// This is safe because we made this fd and the pointer we pass can not overflow because we
// give the syscall's size parameter properly.
let ret = unsafe {
@ -88,9 +91,10 @@ impl PlatformEvent {
/// See `EventExt::read_count`.
pub fn read_count(&self) -> Result<u64> {
let mut buf: u64 = 0;
// SAFETY:
// This is safe because we made this fd and the pointer we pass can not overflow because
// we give the syscall's size parameter properly.
let ret = unsafe {
// This is safe because we made this fd and the pointer we pass can not overflow because
// we give the syscall's size parameter properly.
read(
self.as_raw_descriptor(),
&mut buf as *mut u64 as *mut c_void,
@ -121,6 +125,7 @@ impl PlatformEvent {
revents: 0,
};
let timeoutspec: libc::timespec = duration_to_timespec(timeout);
// SAFETY:
// Safe because this only modifies |pfd| and we check the return value
let ret = unsafe {
libc::ppoll(

View file

@ -21,6 +21,7 @@ fn lseek(fd: &dyn AsRawDescriptor, offset: u64, option: LseekOption) -> Result<u
LseekOption::Data => libc::SEEK_DATA,
LseekOption::Hole => libc::SEEK_HOLE,
};
// SAFETY:
// safe because this doesn't modify any memory.
let ret = unsafe { libc::lseek64(fd.as_raw_descriptor(), offset as i64, whence) };
if ret < 0 {

View file

@ -24,6 +24,7 @@ pub enum FileFlags {
impl FileFlags {
pub fn from_file(file: &dyn AsRawDescriptor) -> Result<FileFlags> {
// SAFETY:
// Trivially safe because fcntl with the F_GETFL command is totally safe and we check for
// error.
let flags = unsafe { fcntl(file.as_raw_descriptor(), F_GETFL) };

View file

@ -15,9 +15,11 @@ use crate::syscall;
#[allow(clippy::unnecessary_cast)]
pub fn get_filesystem_type(file: &File) -> Result<i64> {
let mut statfs_buf = MaybeUninit::<libc::statfs64>::uninit();
// SAFETY:
// Safe because we just got the memory space with exact required amount and
// passing that on.
syscall!(unsafe { fstatfs64(file.as_raw_fd(), statfs_buf.as_mut_ptr()) })?;
// SAFETY:
// Safe because the kernel guarantees the struct is initialized.
let statfs_buf = unsafe { statfs_buf.assume_init() };
Ok(statfs_buf.f_type as i64)

View file

@ -120,6 +120,7 @@ impl Syslog for PlatformSyslog {
// libraries in use that hard depend on libc's syslogger. Remove this and go back to making the
// connection directly once minjail is ready.
fn openlog_and_get_socket() -> Result<UnixDatagram, Error> {
// SAFETY:
// closelog first in case there was already a file descriptor open. Safe because it takes no
// arguments and just closes an open file descriptor. Does nothing if the file descriptor
// was not already open.
@ -137,6 +138,7 @@ fn openlog_and_get_socket() -> Result<UnixDatagram, Error> {
.map_err(Error::GetLowestFd)?
.as_raw_fd();
// SAFETY: See comments for each unsafe line in the block.
unsafe {
// Safe because openlog accesses no pointers because `ident` is null, only valid flags are
// used, and it returns no error.
@ -152,6 +154,7 @@ fn openlog_and_get_socket() -> Result<UnixDatagram, Error> {
}
fn get_localtime() -> tm {
// SAFETY: See comments for each unsafe line in the block.
unsafe {
// Safe because tm is just a struct of plain data.
let mut tm: tm = mem::zeroed();

View file

@ -60,6 +60,7 @@ impl dyn MappedRegion {
pub fn msync(&self, offset: usize, size: usize) -> Result<()> {
validate_includes_range(self.size(), offset, size)?;
// SAFETY:
// Safe because the MemoryMapping/MemoryMappingArena interface ensures our pointer and size
// are correct, and we've validated that `offset`..`offset+size` is in the range owned by
// this `MappedRegion`.
@ -86,11 +87,13 @@ pub struct MemoryMapping {
size: usize,
}
// SAFETY:
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for MemoryMapping {}
// SAFETY: See safety comments for impl Send
unsafe impl Sync for MemoryMapping {}
impl MemoryMapping {
@ -108,6 +111,7 @@ impl MemoryMapping {
/// * `size` - Size of memory region in bytes.
/// * `prot` - Protection (e.g. readable/writable) of the memory region.
pub fn new_protection(size: usize, prot: Protection) -> Result<MemoryMapping> {
// SAFETY:
// This is safe because we are creating an anonymous mapping in a place not already used by
// any other area in this process.
unsafe { MemoryMapping::try_mmap(None, size, prot.into(), None) }
@ -161,9 +165,10 @@ impl MemoryMapping {
prot: Protection,
populate: bool,
) -> Result<MemoryMapping> {
// SAFETY:
// This is safe because we are creating an anonymous mapping in a place not already used
// by any other area in this process.
unsafe {
// This is safe because we are creating an anonymous mapping in a place not already used
// by any other area in this process.
MemoryMapping::try_mmap_populate(None, size, prot.into(), Some((fd, offset)), populate)
}
}
@ -256,6 +261,8 @@ impl MemoryMapping {
}
// Map private for read-only seal. See below for upstream relax of the restriction.
// - https://lore.kernel.org/bpf/20231013103208.kdffpyerufr4ygnw@quack3/T/
// SAFETY:
// Safe because no third parameter is expected and we check the return result.
let seals = unsafe { libc::fcntl(fd.as_raw_descriptor(), libc::F_GET_SEALS) };
if (seals >= 0) && (seals & libc::F_SEAL_WRITE != 0) {
flags &= !libc::MAP_SHARED;
@ -288,6 +295,7 @@ impl MemoryMapping {
/// Madvise the kernel to unmap on fork.
pub fn use_dontfork(&self) -> Result<()> {
// SAFETY:
// This is safe because we call madvise with a valid address and size, and we check the
// return value.
let ret = unsafe {
@ -314,6 +322,7 @@ impl MemoryMapping {
return Ok(());
}
// SAFETY:
// This is safe because we call madvise with a valid address and size, and we check the
// return value.
let ret = unsafe {
@ -332,6 +341,7 @@ impl MemoryMapping {
/// Calls msync with MS_SYNC on the mapping.
pub fn msync(&self) -> Result<()> {
// SAFETY:
// This is safe since we use the exact address and length of a known
// good memory mapping.
let ret = unsafe {
@ -352,6 +362,8 @@ impl MemoryMapping {
pub fn remove_range(&self, mem_offset: usize, count: usize) -> Result<()> {
self.range_end(mem_offset, count)
.map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
// SAFETY: Safe because all the args to madvise are valid and the return
// value is checked.
let ret = unsafe {
// madvising away the region is the same as the guest changing it.
// Next time it is read, it may return zero pages.
@ -384,6 +396,7 @@ impl MemoryMapping {
// Validation
self.range_end(mem_offset, count)
.map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
// SAFETY:
// Safe because populating the pages from the backed file does not affect the Rust memory
// safety.
let ret = unsafe {
@ -418,6 +431,7 @@ impl MemoryMapping {
// Validation
self.range_end(mem_offset, count)
.map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
// SAFETY:
// Safe because dropping the page cache does not affect the Rust memory safety.
let ret = unsafe {
libc::madvise(
@ -448,6 +462,7 @@ impl MemoryMapping {
self.range_end(mem_offset, count)
.map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
let addr = self.addr as usize + mem_offset;
// SAFETY:
// Safe because MLOCK_ONFAULT only affects the swap behavior of the kernel, so it has no
// impact on rust semantics.
let ret = unsafe { libc::mlock2(addr as *mut _, count, libc::MLOCK_ONFAULT) };
@ -479,6 +494,7 @@ impl MemoryMapping {
// Validation
self.range_end(mem_offset, count)
.map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
// SAFETY:
// Safe because munlock(2) does not affect the Rust memory safety.
let ret = unsafe { libc::munlock((self.addr as usize + mem_offset) as *mut _, count) };
if ret < 0 {
@ -498,6 +514,7 @@ impl MemoryMapping {
}
}
// SAFETY:
// Safe because the pointer and size point to a memory range owned by this MemoryMapping that won't
// be unmapped until it's Dropped.
unsafe impl MappedRegion for MemoryMapping {
@ -512,6 +529,7 @@ unsafe impl MappedRegion for MemoryMapping {
impl Drop for MemoryMapping {
fn drop(&mut self) {
// SAFETY:
// This is safe because we mmap the area at addr ourselves, and nobody
// else is holding a reference to it.
unsafe {
@ -527,11 +545,13 @@ pub struct MemoryMappingArena {
size: usize,
}
// SAFETY:
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for MemoryMappingArena {}
// SAFETY: See safety comments for impl Send
unsafe impl Sync for MemoryMappingArena {}
impl MemoryMappingArena {
@ -635,6 +655,7 @@ impl MemoryMappingArena {
}
validate_includes_range(self.size(), offset, size)?;
// SAFETY:
// This is safe since the range has been validated.
let mmap = unsafe {
match fd {
@ -665,6 +686,7 @@ impl MemoryMappingArena {
}
}
// SAFETY:
// Safe because the pointer and size point to a memory range owned by this MemoryMappingArena that
// won't be unmapped until it's Dropped.
unsafe impl MappedRegion for MemoryMappingArena {
@ -712,6 +734,7 @@ impl From<CrateMemoryMapping> for MemoryMappingArena {
impl Drop for MemoryMappingArena {
fn drop(&mut self) {
// SAFETY:
// This is safe because we own this memory range, and nobody else is holding a reference to
// it.
unsafe {
@ -902,6 +925,7 @@ mod tests {
fn slice_addr() {
let m = MemoryMappingBuilder::new(5).build().unwrap();
let s = m.get_slice(2, 3).unwrap();
// SAFETY: all addresses are known to exist.
assert_eq!(s.as_ptr(), unsafe { m.as_ptr().offset(2) });
}

View file

@ -124,6 +124,7 @@ pub type Mode = libc::mode_t;
/// elsewhere.
#[inline(always)]
pub fn getpid() -> Pid {
// SAFETY:
// Safe because this syscall can never fail and we give it a valid syscall number.
unsafe { syscall(SYS_getpid as c_long) as Pid }
}
@ -131,12 +132,14 @@ pub fn getpid() -> Pid {
/// Safe wrapper for the geppid Linux systemcall.
#[inline(always)]
pub fn getppid() -> Pid {
// SAFETY:
// Safe because this syscall can never fail and we give it a valid syscall number.
unsafe { syscall(SYS_getppid as c_long) as Pid }
}
/// Safe wrapper for the gettid Linux systemcall.
pub fn gettid() -> Pid {
// SAFETY:
// Calling the gettid() sycall is always safe.
unsafe { syscall(SYS_gettid as c_long) as Pid }
}
@ -144,6 +147,7 @@ pub fn gettid() -> Pid {
/// Safe wrapper for `geteuid(2)`.
#[inline(always)]
pub fn geteuid() -> Uid {
// SAFETY:
// trivially safe
unsafe { libc::geteuid() }
}
@ -151,6 +155,7 @@ pub fn geteuid() -> Uid {
/// Safe wrapper for `getegid(2)`.
#[inline(always)]
pub fn getegid() -> Gid {
// SAFETY:
// trivially safe
unsafe { libc::getegid() }
}
@ -176,6 +181,7 @@ pub fn flock<F: AsRawDescriptor>(file: &F, op: FlockOperation, nonblocking: bool
operation |= libc::LOCK_NB;
}
// SAFETY:
// Safe since we pass in a valid fd and flock operation, and check the return value.
syscall!(unsafe { libc::flock(file.as_raw_descriptor(), operation) }).map(|_| ())
}
@ -222,6 +228,7 @@ pub fn fallocate<F: AsRawDescriptor>(
len as libc::off64_t
};
// SAFETY:
// Safe since we pass in a valid fd and fallocate mode, validate offset and len,
// and check the return value.
syscall!(unsafe { libc::fallocate64(file.as_raw_descriptor(), mode.into(), offset, len) })
@ -232,10 +239,12 @@ pub fn fallocate<F: AsRawDescriptor>(
pub fn fstat<F: AsRawDescriptor>(f: &F) -> Result<libc::stat64> {
let mut st = MaybeUninit::<libc::stat64>::zeroed();
// SAFETY:
// Safe because the kernel will only write data in `st` and we check the return
// value.
syscall!(unsafe { libc::fstat64(f.as_raw_descriptor(), st.as_mut_ptr()) })?;
// SAFETY:
// Safe because the kernel guarantees that the struct is now fully initialized.
Ok(unsafe { st.assume_init() })
}
@ -252,7 +261,7 @@ ioctl_io_nr!(BLKDISCARD, BLOCK_IO_TYPE, 119);
/// Discards the given range of a block file.
pub fn discard_block<F: AsRawDescriptor>(file: &F, offset: u64, len: u64) -> Result<()> {
let range: [u64; 2] = [offset, len];
// # Safety
// SAFETY:
// Safe because
// - we check the return value.
// - ioctl(BLKDISCARD) does not hold the descriptor after the call.
@ -287,6 +296,7 @@ impl AsRawPid for std::process::Child {
pub fn wait_for_pid<A: AsRawPid>(pid: A, options: c_int) -> Result<(Option<Pid>, ExitStatus)> {
let pid = pid.as_raw_pid();
let mut status: c_int = 1;
// SAFETY:
// Safe because status is owned and the error is checked.
let ret = unsafe { libc::waitpid(pid, &mut status, options) };
if ret < 0 {
@ -324,6 +334,7 @@ pub fn wait_for_pid<A: AsRawPid>(pid: A, options: c_int) -> Result<(Option<Pid>,
/// }
/// ```
pub fn reap_child() -> Result<Pid> {
// SAFETY:
// Safe because we pass in no memory, prevent blocking with WNOHANG, and check for error.
let ret = unsafe { waitpid(-1, ptr::null_mut(), WNOHANG) };
if ret == -1 {
@ -338,6 +349,7 @@ pub fn reap_child() -> Result<Pid> {
/// On success, this kills all processes in the current process group, including the current
/// process, meaning this will not return. This is equivalent to a call to `kill(0, SIGKILL)`.
pub fn kill_process_group() -> Result<()> {
// SAFETY: Safe because pid is 'self group' and return value doesn't matter.
unsafe { kill(0, SIGKILL) }?;
// Kill succeeded, so this process never reaches here.
unreachable!();
@ -349,12 +361,14 @@ pub fn kill_process_group() -> Result<()> {
pub fn pipe(close_on_exec: bool) -> Result<(File, File)> {
let flags = if close_on_exec { O_CLOEXEC } else { 0 };
let mut pipe_fds = [-1; 2];
// SAFETY:
// Safe because pipe2 will only write 2 element array of i32 to the given pointer, and we check
// for error.
let ret = unsafe { pipe2(&mut pipe_fds[0], flags) };
if ret == -1 {
errno_result()
} else {
// SAFETY:
// Safe because both fds must be valid for pipe2 to have returned sucessfully and we have
// exclusive ownership of them.
Ok(unsafe {
@ -370,6 +384,7 @@ pub fn pipe(close_on_exec: bool) -> Result<(File, File)> {
///
/// Returns the new size of the pipe or an error if the OS fails to set the pipe size.
pub fn set_pipe_size(fd: RawFd, size: usize) -> Result<usize> {
// SAFETY:
// Safe because fcntl with the `F_SETPIPE_SZ` arg doesn't touch memory.
syscall!(unsafe { fcntl(fd, libc::F_SETPIPE_SZ, size as c_int) }).map(|ret| ret as usize)
}
@ -450,12 +465,14 @@ pub fn validate_raw_descriptor(raw_descriptor: RawDescriptor) -> Result<RawDescr
pub fn validate_raw_fd(raw_fd: RawFd) -> Result<RawFd> {
// Checking that close-on-exec isn't set helps filter out FDs that were opened by
// crosvm as all crosvm FDs are close on exec.
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let flags = unsafe { libc::fcntl(raw_fd, libc::F_GETFD) };
if flags < 0 || (flags & libc::FD_CLOEXEC) != 0 {
return Err(Error::new(libc::EBADF));
}
// SAFETY:
// Duplicate the fd to ensure that we don't accidentally close an fd previously
// opened by another subsystem. Safe because this doesn't modify any memory and
// we check the return value.
@ -476,6 +493,7 @@ pub fn poll_in<F: AsRawDescriptor>(fd: &F) -> bool {
events: libc::POLLIN,
revents: 0,
};
// SAFETY:
// Safe because we give a valid pointer to a list (of 1) FD and check the return value.
let ret = unsafe { libc::poll(&mut fds, 1, 0) };
// An error probably indicates an invalid FD, or an FD that can't be polled. Returning false in
@ -515,6 +533,7 @@ pub fn safe_descriptor_from_path<P: AsRef<Path>>(path: P) -> Result<Option<SafeD
.ok_or_else(|| Error::new(EINVAL))?;
let validated_fd = validate_raw_fd(raw_descriptor)?;
Ok(Some(
// SAFETY:
// Safe because nothing else has access to validated_fd after this call.
unsafe { SafeDescriptor::from_raw_descriptor(validated_fd) },
))
@ -543,9 +562,11 @@ pub fn open_file_or_duplicate<P: AsRef<Path>>(path: P, options: &OpenOptions) ->
pub fn max_open_files() -> Result<u64> {
let mut buf = mem::MaybeUninit::<libc::rlimit64>::zeroed();
// SAFETY:
// Safe because this will only modify `buf` and we check the return value.
let res = unsafe { libc::prlimit64(0, libc::RLIMIT_NOFILE, ptr::null(), buf.as_mut_ptr()) };
if res == 0 {
// SAFETY:
// Safe because the kernel guarantees that the struct is fully initialized.
let limit = unsafe { buf.assume_init() };
Ok(limit.rlim_max)
@ -624,6 +645,7 @@ impl sched_attr {
}
pub fn sched_setattr(pid: Pid, attr: &mut sched_attr, flags: u32) -> Result<()> {
// SAFETY: Safe becuase all the args are valid and the return valud is checked.
let ret = unsafe {
libc::syscall(
libc::SYS_sched_setattr,

View file

@ -143,6 +143,7 @@ impl UnixSeqpacketListener {
///
/// The returned socket has the close-on-exec flag set.
pub fn accept(&self) -> io::Result<UnixSeqpacket> {
// SAFETY:
// Safe because we own this fd and the kernel will not write to null pointers.
match unsafe {
libc::accept4(
@ -154,11 +155,11 @@ impl UnixSeqpacketListener {
} {
-1 => Err(io::Error::last_os_error()),
fd => {
// Safe because we checked the return value of accept. Therefore, the return value
// must be a valid socket.
Ok(UnixSeqpacket::from(unsafe {
SafeDescriptor::from_raw_descriptor(fd)
}))
Ok(UnixSeqpacket::from(
// SAFETY: Safe because we checked the return value of accept. Therefore, the
// return value must be a valid socket.
unsafe { SafeDescriptor::from_raw_descriptor(fd) },
))
}
}
}

View file

@ -179,6 +179,7 @@ impl AsRawDescriptor for NetlinkGenericSocket {
impl NetlinkGenericSocket {
/// Create and bind a new `NETLINK_GENERIC` socket.
pub fn new(nl_groups: u32) -> Result<Self> {
// SAFETY:
// Safe because we check the return value and convert the raw fd into a SafeDescriptor.
let sock = unsafe {
let fd = libc::socket(
@ -193,12 +194,14 @@ impl NetlinkGenericSocket {
SafeDescriptor::from_raw_descriptor(fd)
};
// SAFETY:
// This MaybeUninit dance is needed because sockaddr_nl has a private padding field and
// doesn't implement Default. Safe because all 0s is valid data for sockaddr_nl.
let mut sa = unsafe { MaybeUninit::<libc::sockaddr_nl>::zeroed().assume_init() };
sa.nl_family = libc::AF_NETLINK as libc::sa_family_t;
sa.nl_groups = nl_groups;
// SAFETY:
// Safe because we pass a descriptor that we own and valid pointer/size for sockaddr.
unsafe {
let res = libc::bind(
@ -223,6 +226,7 @@ impl NetlinkGenericSocket {
.map_err(|_| Error::new(EINVAL))?;
let allocation = LayoutAllocation::uninitialized(layout);
// SAFETY:
// Safe because we pass a valid, owned socket fd and a valid pointer/size for the buffer.
let bytes_read = unsafe {
let res = libc::recv(self.sock.as_raw_fd(), allocation.as_ptr(), buf_size, 0);
@ -252,6 +256,7 @@ impl NetlinkGenericSocket {
.unwrap();
let mut allocation = LayoutAllocation::zeroed(layout);
// SAFETY:
// Safe because the data in allocation was initialized up to `buf_size` and is
// sufficiently aligned.
let data = unsafe { allocation.as_mut_slice(buf_size) };
@ -288,6 +293,7 @@ impl NetlinkGenericSocket {
let payload_end = payload_start + family_name.len();
data[payload_start..payload_end].copy_from_slice(family_name.as_bytes());
// SAFETY:
// Safe because we pass a valid, owned socket fd and a valid pointer/size for the buffer.
unsafe {
let res = libc::send(
@ -430,6 +436,7 @@ pub struct NetlinkGenericRead {
impl NetlinkGenericRead {
pub fn iter(&self) -> NetlinkMessageIter {
// SAFETY:
// Safe because the data in allocation was initialized up to `self.len` by `recv()` and is
// sufficiently aligned.
let data = unsafe { &self.allocation.as_slice(self.len) };

View file

@ -61,12 +61,15 @@ pub struct EventContext<T> {
impl<T: EventToken> EventContext<T> {
/// Creates a new `EventContext`.
pub fn new() -> Result<EventContext<T>> {
// SAFETY:
// Safe because we check the return value.
let epoll_fd = unsafe { epoll_create1(EPOLL_CLOEXEC) };
if epoll_fd < 0 {
return errno_result();
}
Ok(EventContext {
// SAFETY:
// Safe because epoll_fd is valid.
epoll_ctx: unsafe { File::from_raw_descriptor(epoll_fd) },
tokens: PhantomData,
})
@ -122,6 +125,7 @@ impl<T: EventToken> EventContext<T> {
events: event_type.into(),
u64: token.as_raw_token(),
};
// SAFETY:
// Safe because we give a valid epoll FD and FD to watch, as well as a valid epoll_event
// structure. Then we check the return value.
let ret = unsafe {
@ -145,6 +149,7 @@ impl<T: EventToken> EventContext<T> {
events: event_type.into(),
u64: token.as_raw_token(),
};
// SAFETY:
// Safe because we give a valid epoll FD and FD to modify, as well as a valid epoll_event
// structure. Then we check the return value.
let ret = unsafe {
@ -169,6 +174,7 @@ impl<T: EventToken> EventContext<T> {
/// Failure to do so will cause the `wait` method to always return immediately, causing ~100%
/// CPU load.
pub fn delete(&self, fd: &dyn AsRawDescriptor) -> Result<()> {
// SAFETY:
// Safe because we give a valid epoll FD and FD to stop watching. Then we check the return
// value.
let ret = unsafe {
@ -203,12 +209,12 @@ impl<T: EventToken> EventContext<T> {
/// This may return earlier than `timeout` with zero events if the duration indicated exceeds
/// system limits.
pub fn wait_timeout(&self, timeout: Duration) -> Result<SmallVec<[TriggeredEvent<T>; 16]>> {
// SAFETY:
// `MaybeUnint<T>` has the same layout as plain `T` (`epoll_event` in our case).
// We submit an uninitialized array to the `epoll_wait` system call, which returns how many
// elements it initialized, and then we convert only the initialized `MaybeUnint` values
// into `epoll_event` structures after the call.
let mut epoll_events: [MaybeUninit<epoll_event>; EVENT_CONTEXT_MAX_EVENTS] =
// SAFETY:
// `MaybeUnint<T>` has the same layout as plain `T` (`epoll_event` in our case).
// We submit an uninitialized array to the `epoll_wait` system call, which returns how many
// elements it initialized, and then we convert only the initialized `MaybeUnint` values
// into `epoll_event` structures after the call.
unsafe { MaybeUninit::uninit().assume_init() };
let timeout_millis = if timeout.as_secs() as i64 == i64::max_value() {
@ -227,6 +233,7 @@ impl<T: EventToken> EventContext<T> {
};
let ret = {
let max_events = epoll_events.len() as c_int;
// SAFETY:
// Safe because we give an epoll context and a properly sized epoll_events array
// pointer, which we trust the kernel to fill in properly. The `transmute` is safe,
// since `MaybeUnint<T>` has the same layout as `T`, and the `epoll_wait` syscall will

View file

@ -13,6 +13,7 @@ pub fn set_rt_prio_limit(limit: u64) -> Result<()> {
rlim_cur: limit,
rlim_max: limit,
};
// SAFETY:
// Safe because the kernel doesn't modify memory that is accessible to the process here.
let res = unsafe { libc::setrlimit64(libc::RLIMIT_RTPRIO, &rt_limit_arg) };
@ -25,13 +26,15 @@ pub fn set_rt_prio_limit(limit: u64) -> Result<()> {
/// Sets the current thread to be scheduled using the round robin real time class with `priority`.
pub fn set_rt_round_robin(priority: i32) -> Result<()> {
// SAFETY:
// Safe because sched_param only contains primitive types for which zero
// initialization is valid.
let mut sched_param: libc::sched_param = unsafe { MaybeUninit::zeroed().assume_init() };
sched_param.sched_priority = priority;
// Safe because the kernel doesn't modify memory that is accessible to the process here.
let res =
// SAFETY:
// Safe because the kernel doesn't modify memory that is accessible to the process here.
unsafe { libc::pthread_setschedparam(libc::pthread_self(), libc::SCHED_RR, &sched_param) };
if res != 0 {

View file

@ -98,6 +98,7 @@ where
let tz = std::env::var("TZ").unwrap_or_default();
// SAFETY:
// Safe because the program is still single threaded.
// We own the jail object and nobody else will try to reuse it.
let pid = match unsafe { jail.fork(Some(&keep_rds)) }? {
@ -119,6 +120,7 @@ where
[..std::cmp::min(MAX_THREAD_LABEL_LEN, debug_label.len())];
match CString::new(debug_label_trimmed) {
Ok(thread_name) => {
// SAFETY:
// Safe because thread_name is a valid pointer and setting name of this
// thread should be safe.
let _ = unsafe {
@ -150,6 +152,7 @@ where
None => "process.rs: no debug label".to_owned(),
},
// Can't use safe wrapper because jail crate depends on base
// SAFETY:
// Safe because it's only doing a read within bound checked by static assert
unsafe {*(&jail as *const Minijail as *const usize)}
);

View file

@ -26,8 +26,10 @@ struct CpuSet(cpu_set_t);
impl CpuSet {
pub fn new() -> CpuSet {
// SAFETY:
// cpu_set_t is a C struct and can be safely initialized with zeroed memory.
let mut cpuset: cpu_set_t = unsafe { mem::MaybeUninit::zeroed().assume_init() };
// SAFETY:
// Safe because we pass a valid cpuset pointer.
unsafe { CPU_ZERO(&mut cpuset) };
CpuSet(cpuset)
@ -36,6 +38,7 @@ impl CpuSet {
pub fn to_cpus(&self) -> Vec<usize> {
let mut cpus = Vec::new();
for i in 0..(CPU_SETSIZE as usize) {
// SAFETY: Safe because `i` and `self.0` are valid.
if unsafe { CPU_ISSET(i, &self.0) } {
cpus.push(i);
}
@ -48,6 +51,7 @@ impl FromIterator<usize> for CpuSet {
fn from_iter<I: IntoIterator<Item = usize>>(cpus: I) -> Self {
let mut cpuset = CpuSet::new();
for cpu in cpus {
// SAFETY:
// Safe because we pass a valid cpu index and cpuset.0 is a valid pointer.
unsafe { CPU_SET(cpu, &mut cpuset.0) };
}
@ -78,6 +82,7 @@ pub fn set_cpu_affinity<I: IntoIterator<Item = usize>>(cpus: I) -> Result<()> {
})
.collect::<Result<CpuSet>>()?;
// SAFETY:
// Safe because we pass 0 for the current thread, and cpuset is a valid pointer and only
// used for the duration of this call.
crate::syscall!(unsafe { sched_setaffinity(0, mem::size_of_val(&cpuset), &cpuset) })?;
@ -88,6 +93,7 @@ pub fn set_cpu_affinity<I: IntoIterator<Item = usize>>(cpus: I) -> Result<()> {
pub fn get_cpu_affinity() -> Result<Vec<usize>> {
let mut cpu_set = CpuSet::new();
// SAFETY:
// Safe because we pass 0 for the current thread, and cpu_set.0 is a valid pointer and only
// used for the duration of this call.
crate::syscall!(unsafe { sched_getaffinity(0, mem::size_of_val(&cpu_set.0), &mut cpu_set.0) })?;
@ -115,6 +121,7 @@ pub fn enable_core_scheduling() -> Result<()> {
PIDTYPE_PGID,
}
// SAFETY: Safe because we check the return value to prctl.
let ret = unsafe {
prctl(
PR_SCHED_CORE,

View file

@ -40,6 +40,8 @@ use crate::SharedMemory;
const MFD_CLOEXEC: c_uint = 0x0001;
const MFD_NOEXEC_SEAL: c_uint = 0x0008;
// SAFETY: It is caller's responsibility to ensure the args are valid and check the
// return value of the function.
unsafe fn memfd_create(name: *const c_char, flags: c_uint) -> c_int {
syscall(SYS_memfd_create as c_long, name, flags) as c_int
}
@ -165,15 +167,19 @@ impl PlatformSharedMemory for SharedMemory {
}
let shm_name = debug_name.as_ptr() as *const c_char;
// SAFETY:
// The following are safe because we give a valid C string and check the
// results of the memfd_create call.
let fd = unsafe { memfd_create(shm_name, flags) };
if fd < 0 {
return errno_result();
}
// SAFETY: Safe because fd is valid.
let descriptor = unsafe { SafeDescriptor::from_raw_descriptor(fd) };
// Set the size of the memfd.
// SAFETY: Safe because we check the return value to ftruncate64 and all the args to the
// function are valid.
let ret = unsafe { ftruncate64(descriptor.as_raw_descriptor(), size as off64_t) };
if ret < 0 {
return errno_result();
@ -219,6 +225,8 @@ impl SharedMemoryLinux for SharedMemory {
}
fn get_seals(&self) -> Result<MemfdSeals> {
// SAFETY: Safe because we check the return value to fcntl and all the args to the
// function are valid.
let ret = unsafe { fcntl(self.descriptor.as_raw_descriptor(), F_GET_SEALS) };
if ret < 0 {
return errno_result();
@ -227,6 +235,8 @@ impl SharedMemoryLinux for SharedMemory {
}
fn add_seals(&mut self, seals: MemfdSeals) -> Result<()> {
// SAFETY: Safe because we check the return value to fcntl and all the args to the
// function are valid.
let ret = unsafe { fcntl(self.descriptor.as_raw_descriptor(), F_ADD_SEALS, seals) };
if ret < 0 {
return errno_result();

View file

@ -277,12 +277,14 @@ extern "C" {
/// Returns the minimum (inclusive) real-time signal number.
#[allow(non_snake_case)]
pub fn SIGRTMIN() -> c_int {
// SAFETY: trivially safe
unsafe { __libc_current_sigrtmin() }
}
/// Returns the maximum (inclusive) real-time signal number.
#[allow(non_snake_case)]
pub fn SIGRTMAX() -> c_int {
// SAFETY: trivially safe
unsafe { __libc_current_sigrtmax() }
}
@ -311,11 +313,13 @@ pub unsafe fn register_signal_handler(num: c_int, handler: extern "C" fn(c_int))
/// Resets the signal handler of signum `num` back to the default.
pub fn clear_signal_handler(num: c_int) -> Result<()> {
// SAFETY:
// Safe because sigaction is owned and expected to be initialized ot zeros.
let mut sigact: sigaction = unsafe { mem::zeroed() };
sigact.sa_flags = SA_RESTART;
sigact.sa_sigaction = SIG_DFL;
// SAFETY:
// Safe because sigact is owned, and this is restoring the default signal handler.
let ret = unsafe { sigaction(num, &sigact, null_mut()) };
if ret < 0 {
@ -345,9 +349,11 @@ pub unsafe fn register_rt_signal_handler(num: c_int, handler: extern "C" fn(c_in
///
/// This is a helper function used when we want to manipulate signals.
pub fn create_sigset(signals: &[c_int]) -> Result<sigset_t> {
// SAFETY:
// sigset will actually be initialized by sigemptyset below.
let mut sigset: sigset_t = unsafe { mem::zeroed() };
// SAFETY:
// Safe - return value is checked.
let ret = unsafe { sigemptyset(&mut sigset) };
if ret < 0 {
@ -355,6 +361,7 @@ pub fn create_sigset(signals: &[c_int]) -> Result<sigset_t> {
}
for signal in signals {
// SAFETY:
// Safe - return value is checked.
let ret = unsafe { sigaddset(&mut sigset, *signal) };
if ret < 0 {
@ -373,6 +380,7 @@ pub fn wait_for_signal(signals: &[c_int], timeout: Option<Duration>) -> Result<c
match timeout {
Some(timeout) => {
let ts = duration_to_timespec(timeout);
// SAFETY:
// Safe - return value is checked.
let ret = handle_eintr_errno!(unsafe { sigtimedwait(&sigset, null_mut(), &ts) });
if ret < 0 {
@ -383,6 +391,7 @@ pub fn wait_for_signal(signals: &[c_int], timeout: Option<Duration>) -> Result<c
}
None => {
let mut ret: c_int = 0;
// SAFETY: Safe because args are valid and the return value is checked.
let err = handle_eintr_rc!(unsafe { sigwait(&sigset, &mut ret as *mut c_int) });
if err != 0 {
Err(ErrnoError::new(err))
@ -397,6 +406,7 @@ pub fn wait_for_signal(signals: &[c_int], timeout: Option<Duration>) -> Result<c
pub fn get_blocked_signals() -> SignalResult<Vec<c_int>> {
let mut mask = Vec::new();
// SAFETY:
// Safe - return values are checked.
unsafe {
let mut old_sigset: sigset_t = mem::zeroed();
@ -422,6 +432,7 @@ pub fn get_blocked_signals() -> SignalResult<Vec<c_int>> {
pub fn block_signal(num: c_int) -> SignalResult<()> {
let sigset = create_sigset(&[num]).map_err(Error::CreateSigset)?;
// SAFETY:
// Safe - return values are checked.
unsafe {
let mut old_sigset: sigset_t = mem::zeroed();
@ -447,6 +458,7 @@ pub fn block_signal(num: c_int) -> SignalResult<()> {
pub fn unblock_signal(num: c_int) -> SignalResult<()> {
let sigset = create_sigset(&[num]).map_err(Error::CreateSigset)?;
// SAFETY:
// Safe - return value is checked.
let ret = unsafe { pthread_sigmask(SIG_UNBLOCK, &sigset, null_mut()) };
if ret < 0 {
@ -460,6 +472,7 @@ pub fn clear_signal(num: c_int) -> SignalResult<()> {
let sigset = create_sigset(&[num]).map_err(Error::CreateSigset)?;
while {
// SAFETY:
// This is safe as we are rigorously checking return values
// of libc calls.
unsafe {
@ -535,6 +548,7 @@ pub unsafe trait Killable {
return Err(ErrnoError::new(EINVAL));
}
// SAFETY:
// Safe because we ensure we are using a valid pthread handle, a valid signal number, and
// check the return result.
let ret = unsafe { pthread_kill(self.pthread_handle(), num) };
@ -545,6 +559,7 @@ pub unsafe trait Killable {
}
}
// SAFETY:
// Safe because we fulfill our contract of returning a genuine pthread handle.
unsafe impl<T> Killable for JoinHandle<T> {
fn pthread_handle(&self) -> pthread_t {

View file

@ -67,6 +67,7 @@ impl SignalFd {
pub fn new(signal: c_int) -> Result<SignalFd> {
let sigset = signal::create_sigset(&[signal]).map_err(Error::CreateSigset)?;
// SAFETY:
// This is safe as we check the return value and know that fd is valid.
let fd = unsafe { signalfd(-1, &sigset, SFD_CLOEXEC | SFD_NONBLOCK) };
if fd < 0 {
@ -76,6 +77,7 @@ impl SignalFd {
// Mask out the normal handler for the signal.
signal::block_signal(signal).map_err(Error::CreateBlockSignal)?;
// SAFETY:
// This is safe because we checked fd for success and know the
// kernel gave us an fd that we own.
unsafe {
@ -88,10 +90,12 @@ impl SignalFd {
/// Read a siginfo struct from the signalfd, if available.
pub fn read(&self) -> Result<Option<signalfd_siginfo>> {
// SAFETY:
// signalfd_siginfo doesn't have a default, so just zero it.
let mut siginfo: signalfd_siginfo = unsafe { mem::zeroed() };
let siginfo_size = mem::size_of::<signalfd_siginfo>();
// SAFETY:
// This read is safe since we've got the space allocated for a
// single signalfd_siginfo, and that's exactly how much we're
// reading. Handling of EINTR is not required since SFD_NONBLOCK
@ -166,6 +170,7 @@ mod tests {
let sigid = SIGRTMIN() + 1;
let sigrt_fd = SignalFd::new(sigid).unwrap();
// SAFETY: Safe because sigid is valid and return value is checked.
let ret = unsafe { raise(sigid) };
assert_eq!(ret, 0);
@ -178,6 +183,7 @@ mod tests {
let sigid = SIGRTMIN() + 2;
let sigrt_fd = SignalFd::new(sigid).unwrap();
// SAFETY: Safe because sigset and sigid are valid and return value is checked.
unsafe {
let mut sigset: sigset_t = mem::zeroed();
pthread_sigmask(0, null(), &mut sigset as *mut sigset_t);
@ -187,6 +193,7 @@ mod tests {
mem::drop(sigrt_fd);
// The signal should no longer be masked.
// SAFETY: Safe because sigset and sigid are valid and return value is checked.
unsafe {
let mut sigset: sigset_t = mem::zeroed();
pthread_sigmask(0, null(), &mut sigset as *mut sigset_t);

View file

@ -24,20 +24,26 @@ use crate::unix::add_fd_flags;
use crate::unix::clear_fd_flags;
fn modify_mode<F: FnOnce(&mut termios)>(fd: RawFd, f: F) -> Result<()> {
// Safety:
// Safe because we check the return value of isatty.
if unsafe { isatty(fd) } != 1 {
return Ok(());
}
// Safety:
// The following pair are safe because termios gets totally overwritten by tcgetattr and we
// check the return result.
let mut termios: termios = unsafe { zeroed() };
// Safety:
// The following pair are safe because termios gets totally overwritten by tcgetattr and we
// check the return result.
let ret = unsafe { tcgetattr(fd, &mut termios as *mut _) };
if ret < 0 {
return errno_result();
}
let mut new_termios = termios;
f(&mut new_termios);
// SAFETY:
// Safe because the syscall will only read the extent of termios and we check the return result.
let ret = unsafe { tcsetattr(fd, TCSANOW, &new_termios as *const _) };
if ret < 0 {
@ -47,6 +53,8 @@ fn modify_mode<F: FnOnce(&mut termios)>(fd: RawFd, f: F) -> Result<()> {
Ok(())
}
/// # Safety
///
/// Safe only when the FD given is valid and reading the fd will have no Rust safety implications.
unsafe fn read_raw(fd: RawFd, out: &mut [u8]) -> Result<usize> {
let ret = read(fd, out.as_mut_ptr() as *mut _, out.len());
@ -63,6 +71,7 @@ unsafe fn read_raw(fd: RawFd, out: &mut [u8]) -> Result<usize> {
/// around stdin that the stdlib usually uses. If other code is using stdin, it is undefined who
/// will get the underlying bytes.
pub fn read_raw_stdin(out: &mut [u8]) -> Result<usize> {
// SAFETY:
// Safe because reading from stdin shouldn't have any safety implications.
unsafe { read_raw(STDIN_FILENO, out) }
}
@ -99,6 +108,7 @@ pub unsafe trait Terminal {
}
}
// # SAFETY:
// Safe because we return a genuine terminal fd that never changes and shares our lifetime.
unsafe impl Terminal for Stdin {
fn tty_fd(&self) -> RawFd {

View file

@ -37,14 +37,16 @@ impl Timer {
/// Creates a new timerfd. The timer is initally disarmed and must be armed by calling
/// `reset`.
pub fn new() -> Result<Timer> {
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe { timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC) };
if ret < 0 {
return errno_result();
}
// Safe because we uniquely own the file descriptor.
Ok(Timer {
// SAFETY:
// Safe because we uniquely own the file descriptor.
handle: unsafe { SafeDescriptor::from_raw_descriptor(ret) },
interval: None,
})
@ -61,6 +63,7 @@ impl Timer {
it_value: duration_to_timespec(dur.unwrap_or_default()),
};
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe { timerfd_settime(self.as_raw_descriptor(), 0, &spec, ptr::null_mut()) };
if ret < 0 {
@ -87,6 +90,7 @@ impl TimerTrait for Timer {
revents: 0,
};
// SAFETY:
// Safe because this only modifies |pfd| and we check the return value
let ret = handle_eintr_errno!(unsafe {
libc::ppoll(
@ -113,6 +117,7 @@ impl TimerTrait for Timer {
fn mark_waited(&mut self) -> Result<bool> {
let mut count = 0u64;
// SAFETY:
// The timerfd is in non-blocking mode, so this should return immediately.
let ret = unsafe {
libc::read(
@ -134,9 +139,11 @@ impl TimerTrait for Timer {
}
fn resolution(&self) -> Result<Duration> {
// SAFETY:
// Safe because we are zero-initializing a struct with only primitive member fields.
let mut res: libc::timespec = unsafe { mem::zeroed() };
// SAFETY:
// Safe because it only modifies a local struct and we check the return value.
let ret = unsafe { clock_getres(CLOCK_MONOTONIC, &mut res) };

View file

@ -214,6 +214,7 @@ pub struct VsockSocket {
impl VsockSocket {
pub fn new() -> io::Result<Self> {
// SAFETY: trivially safe
let fd = unsafe { libc::socket(libc::AF_VSOCK, libc::SOCK_STREAM | libc::SOCK_CLOEXEC, 0) };
if fd < 0 {
Err(io::Error::last_os_error())
@ -237,6 +238,7 @@ impl VsockSocket {
..Default::default()
};
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe {
libc::bind(
@ -265,6 +267,7 @@ impl VsockSocket {
..Default::default()
};
// SAFETY:
// Safe because this just connects a vsock socket, and the return value is checked.
let ret = unsafe {
libc::connect(
@ -282,6 +285,7 @@ impl VsockSocket {
}
pub fn listen(self) -> io::Result<VsockListener> {
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe { libc::listen(self.fd, 1) };
if ret < 0 {
@ -295,8 +299,9 @@ impl VsockSocket {
pub fn local_port(&self) -> io::Result<u32> {
let mut svm: sockaddr_vm = Default::default();
// Safe because we give a valid pointer for addrlen and check the length.
let mut addrlen = size_of::<sockaddr_vm>() as socklen_t;
// SAFETY:
// Safe because we give a valid pointer for addrlen and check the length.
let ret = unsafe {
// Get the socket address that was actually bound.
libc::getsockname(
@ -317,6 +322,7 @@ impl VsockSocket {
}
pub fn try_clone(&self) -> io::Result<Self> {
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let dup_fd = unsafe { libc::fcntl(self.fd, libc::F_DUPFD_CLOEXEC, 0) };
if dup_fd < 0 {
@ -327,6 +333,7 @@ impl VsockSocket {
}
pub fn set_nonblocking(&mut self, nonblocking: bool) -> io::Result<()> {
// SAFETY:
// Safe because the fd is valid and owned by this stream.
unsafe { set_nonblocking(self.fd, nonblocking) }
}
@ -348,6 +355,7 @@ impl AsRawFd for VsockSocket {
impl Drop for VsockSocket {
fn drop(&mut self) {
// SAFETY:
// Safe because this doesn't modify any memory and we are the only
// owner of the file descriptor.
unsafe { libc::close(self.fd) };
@ -382,6 +390,7 @@ impl VsockStream {
impl io::Read for VsockStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
// SAFETY:
// Safe because this will only modify the contents of |buf| and we check the return value.
let ret = unsafe {
libc::read(
@ -400,6 +409,7 @@ impl io::Read for VsockStream {
impl io::Write for VsockStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe {
libc::write(
@ -459,8 +469,9 @@ impl VsockListener {
pub fn accept(&self) -> io::Result<(VsockStream, SocketAddr)> {
let mut svm: sockaddr_vm = Default::default();
// Safe because this will only modify |svm| and we check the return value.
let mut socklen: socklen_t = size_of::<sockaddr_vm>() as socklen_t;
// SAFETY:
// Safe because this will only modify |svm| and we check the return value.
let fd = unsafe {
libc::accept4(
self.sock.as_raw_fd(),

View file

@ -43,6 +43,7 @@ pub fn clone_descriptor(descriptor: &dyn AsRawDescriptor) -> Result<RawDescripto
/// `fd`. The cloned fd will have the `FD_CLOEXEC` flag set but will not share any other file
/// descriptor flags with `fd`.
fn clone_fd(fd: &dyn AsRawFd) -> Result<RawFd> {
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_DUPFD_CLOEXEC, 0) };
if ret < 0 {
@ -60,6 +61,7 @@ pub fn clear_descriptor_cloexec<A: AsRawDescriptor>(fd_owner: &A) -> Result<()>
/// Clears CLOEXEC flag on fd
fn clear_fd_cloexec<A: AsRawFd>(fd_owner: &A) -> Result<()> {
let fd = fd_owner.as_raw_fd();
// SAFETY:
// Safe because fd is read only.
let flags = unsafe { libc::fcntl(fd, libc::F_GETFD) };
if flags == -1 {
@ -67,6 +69,7 @@ fn clear_fd_cloexec<A: AsRawFd>(fd_owner: &A) -> Result<()> {
}
let masked_flags = flags & !libc::FD_CLOEXEC;
// SAFETY:
// Safe because this has no side effect(s) on the current process.
if masked_flags != flags && unsafe { libc::fcntl(fd, libc::F_SETFD, masked_flags) } == -1 {
errno_result()
@ -77,6 +80,8 @@ fn clear_fd_cloexec<A: AsRawFd>(fd_owner: &A) -> Result<()> {
impl Drop for SafeDescriptor {
fn drop(&mut self) {
// SAFETY:
// Safe because descriptor is valid.
let _ = unsafe { libc::close(self.descriptor) };
}
}
@ -101,6 +106,7 @@ impl SafeDescriptor {
/// Clones this descriptor, internally creating a new descriptor. The new SafeDescriptor will
/// share the same underlying count within the kernel.
pub fn try_clone(&self) -> Result<SafeDescriptor> {
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let descriptor = unsafe { libc::fcntl(self.descriptor, libc::F_DUPFD_CLOEXEC, 0) };
if descriptor < 0 {
@ -113,6 +119,7 @@ impl SafeDescriptor {
impl From<SafeDescriptor> for File {
fn from(s: SafeDescriptor) -> File {
// SAFETY:
// Safe because we own the SafeDescriptor at this point.
unsafe { File::from_raw_fd(s.into_raw_descriptor()) }
}
@ -120,6 +127,7 @@ impl From<SafeDescriptor> for File {
impl From<SafeDescriptor> for TcpListener {
fn from(s: SafeDescriptor) -> Self {
// SAFETY:
// Safe because we own the SafeDescriptor at this point.
unsafe { Self::from_raw_fd(s.into_raw_descriptor()) }
}
@ -127,6 +135,7 @@ impl From<SafeDescriptor> for TcpListener {
impl From<SafeDescriptor> for TcpStream {
fn from(s: SafeDescriptor) -> Self {
// SAFETY:
// Safe because we own the SafeDescriptor at this point.
unsafe { Self::from_raw_fd(s.into_raw_descriptor()) }
}
@ -134,6 +143,7 @@ impl From<SafeDescriptor> for TcpStream {
impl From<SafeDescriptor> for UnixStream {
fn from(s: SafeDescriptor) -> Self {
// SAFETY:
// Safe because we own the SafeDescriptor at this point.
unsafe { Self::from_raw_fd(s.into_raw_descriptor()) }
}

View file

@ -16,17 +16,24 @@ use crate::syscall;
///
/// Returns an error if the OS indicates the flags can't be retrieved.
fn get_fd_flags(fd: RawFd) -> Result<c_int> {
// Safe because no third parameter is expected and we check the return result.
syscall!(unsafe { fcntl(fd, F_GETFL) })
syscall!(
// SAFETY:
// Safe because no third parameter is expected and we check the return result.
unsafe { fcntl(fd, F_GETFL) }
)
}
/// Sets the file flags set for the given `RawFD`.
///
/// Returns an error if the OS indicates the flags can't be retrieved.
fn set_fd_flags(fd: RawFd, flags: c_int) -> Result<()> {
// Safe because we supply the third parameter and we check the return result.
// fcntlt is trusted not to modify the memory of the calling process.
syscall!(unsafe { fcntl(fd, F_SETFL, flags) }).map(|_| ())
syscall!(
// SAFETY:
// Safe because we supply the third parameter and we check the return result.
// fcntlt is trusted not to modify the memory of the calling process.
unsafe { fcntl(fd, F_SETFL, flags) }
)
.map(|_| ())
}
/// Performs a logical OR of the given flags with the FD's flags, setting the given bits for the

View file

@ -31,6 +31,7 @@ macro_rules! volatile_impl {
($ty:ty) => {
impl FileReadWriteVolatile for $ty {
fn read_volatile(&mut self, slice: $crate::VolatileSlice) -> std::io::Result<usize> {
// SAFETY:
// Safe because only bytes inside the slice are accessed and the kernel is expected
// to handle arbitrary memory for I/O.
let ret = unsafe {
@ -58,6 +59,7 @@ macro_rules! volatile_impl {
return Ok(0);
}
// SAFETY:
// Safe because only bytes inside the buffers are accessed and the kernel is
// expected to handle arbitrary memory for I/O.
let ret = unsafe {
@ -75,6 +77,7 @@ macro_rules! volatile_impl {
}
fn write_volatile(&mut self, slice: $crate::VolatileSlice) -> std::io::Result<usize> {
// SAFETY:
// Safe because only bytes inside the slice are accessed and the kernel is expected
// to handle arbitrary memory for I/O.
let ret = unsafe {
@ -102,6 +105,7 @@ macro_rules! volatile_impl {
return Ok(0);
}
// SAFETY:
// Safe because only bytes inside the buffers are accessed and the kernel is
// expected to handle arbitrary memory for I/O.
let ret = unsafe {
@ -130,6 +134,7 @@ macro_rules! volatile_at_impl {
slice: $crate::VolatileSlice,
offset: u64,
) -> std::io::Result<usize> {
// SAFETY:
// Safe because only bytes inside the slice are accessed and the kernel is expected
// to handle arbitrary memory for I/O.
let ret = unsafe {
@ -160,6 +165,7 @@ macro_rules! volatile_at_impl {
return Ok(0);
}
// SAFETY:
// Safe because only bytes inside the buffers are accessed and the kernel is
// expected to handle arbitrary memory for I/O.
let ret = unsafe {
@ -182,6 +188,7 @@ macro_rules! volatile_at_impl {
slice: $crate::VolatileSlice,
offset: u64,
) -> std::io::Result<usize> {
// SAFETY:
// Safe because only bytes inside the slice are accessed and the kernel is expected
// to handle arbitrary memory for I/O.
let ret = unsafe {
@ -212,6 +219,7 @@ macro_rules! volatile_at_impl {
return Ok(0);
}
// SAFETY:
// Safe because only bytes inside the buffers are accessed and the kernel is
// expected to handle arbitrary memory for I/O.
let ret = unsafe {

View file

@ -189,6 +189,7 @@ mod tests {
libc::__error()
}
// SAFETY: trivially safe
unsafe {
*errno_location() = e;
}

View file

@ -81,9 +81,11 @@ pub(in crate::sys) fn socket(
sock_type: c_int,
protocol: c_int,
) -> io::Result<SafeDescriptor> {
// SAFETY:
// Safe socket initialization since we handle the returned error.
match unsafe { libc::socket(domain, sock_type, protocol) } {
-1 => Err(io::Error::last_os_error()),
// SAFETY:
// Safe because we own the file descriptor.
fd => Ok(unsafe { SafeDescriptor::from_raw_descriptor(fd) }),
}
@ -95,16 +97,20 @@ pub(in crate::sys) fn socketpair(
protocol: c_int,
) -> io::Result<(SafeDescriptor, SafeDescriptor)> {
let mut fds = [0, 0];
// SAFETY:
// Safe because we give enough space to store all the fds and we check the return value.
match unsafe { libc::socketpair(domain, sock_type, protocol, fds.as_mut_ptr()) } {
-1 => Err(io::Error::last_os_error()),
// Safe because we own the file descriptors.
_ => Ok(unsafe {
(
SafeDescriptor::from_raw_descriptor(fds[0]),
SafeDescriptor::from_raw_descriptor(fds[1]),
)
}),
_ => Ok(
// SAFETY:
// Safe because we own the file descriptors.
unsafe {
(
SafeDescriptor::from_raw_descriptor(fds[0]),
SafeDescriptor::from_raw_descriptor(fds[1]),
)
},
),
}
}
@ -130,6 +136,7 @@ impl TcpSocket {
let ret = match sockaddr {
SocketAddr::V4(a) => {
let sin = sockaddrv4_to_lib_c(&a);
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
unsafe {
libc::bind(
@ -141,6 +148,7 @@ impl TcpSocket {
}
SocketAddr::V6(a) => {
let sin6 = sockaddrv6_to_lib_c(&a);
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
unsafe {
libc::bind(
@ -169,6 +177,7 @@ impl TcpSocket {
let ret = match sockaddr {
SocketAddr::V4(a) => {
let sin = sockaddrv4_to_lib_c(&a);
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
unsafe {
libc::connect(
@ -180,6 +189,7 @@ impl TcpSocket {
}
SocketAddr::V6(a) => {
let sin6 = sockaddrv6_to_lib_c(&a);
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
unsafe {
libc::connect(
@ -200,6 +210,7 @@ impl TcpSocket {
}
pub fn listen(self) -> io::Result<TcpListener> {
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe { libc::listen(self.as_raw_descriptor(), 1) };
if ret < 0 {
@ -216,8 +227,9 @@ impl TcpSocket {
InetVersion::V4 => {
let mut sin = sockaddrv4_to_lib_c(&SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), 0));
// Safe because we give a valid pointer for addrlen and check the length.
let mut addrlen = size_of::<sockaddr_in>() as socklen_t;
// SAFETY:
// Safe because we give a valid pointer for addrlen and check the length.
let ret = unsafe {
// Get the socket address that was actually bound.
libc::getsockname(
@ -244,8 +256,9 @@ impl TcpSocket {
0,
));
// Safe because we give a valid pointer for addrlen and check the length.
let mut addrlen = size_of::<sockaddr_in6>() as socklen_t;
// SAFETY:
// Safe because we give a valid pointer for addrlen and check the length.
let ret = unsafe {
// Get the socket address that was actually bound.
libc::getsockname(
@ -279,6 +292,7 @@ pub(in crate::sys) fn sun_path_offset() -> usize {
// Prefer 0 to null() so that we do not need to subtract from the `sub_path` pointer.
#[allow(clippy::zero_ptr)]
let addr = 0 as *const libc::sockaddr_un;
// SAFETY:
// Safe because we only use the dereference to create a pointer to the desired field in
// calculating the offset.
unsafe { &(*addr).sun_path as *const _ as usize }
@ -302,6 +316,7 @@ impl UnixSeqpacket {
pub fn connect<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let descriptor = socket(libc::AF_UNIX, libc::SOCK_SEQPACKET, 0)?;
let (addr, len) = sockaddr_un(path.as_ref())?;
// SAFETY:
// Safe connect since we handle the error and use the right length generated from
// `sockaddr_un`.
unsafe {
@ -325,6 +340,8 @@ impl UnixSeqpacket {
/// Gets the number of bytes that can be read from this socket without blocking.
pub fn get_readable_bytes(&self) -> io::Result<usize> {
let mut byte_count = 0i32;
// SAFETY:
// Safe because self has valid raw descriptor and return value are checked.
let ret = unsafe { libc::ioctl(self.as_raw_descriptor(), libc::FIONREAD, &mut byte_count) };
if ret < 0 {
Err(io::Error::last_os_error())
@ -345,6 +362,7 @@ impl UnixSeqpacket {
#[cfg(debug_assertions)]
let buf = &mut 0 as *mut _ as *mut _;
// SAFETY:
// This form of recvfrom doesn't modify any data because all null pointers are used. We only
// use the return value and check for errors on an FD owned by this structure.
let ret = unsafe {
@ -375,6 +393,7 @@ impl UnixSeqpacket {
/// # Errors
/// Returns error when `libc::write` failed.
pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
// SAFETY:
// Safe since we make sure the input `count` == `buf.len()` and handle the returned error.
unsafe {
let ret = libc::write(
@ -401,6 +420,7 @@ impl UnixSeqpacket {
/// # Errors
/// Returns error when `libc::read` failed.
pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
// SAFETY:
// Safe since we make sure the input `count` == `buf.len()` and handle the returned error.
unsafe {
let ret = libc::read(
@ -466,6 +486,7 @@ impl UnixSeqpacket {
tv_usec: 0,
},
};
// SAFETY:
// Safe because we own the fd, and the length of the pointer's data is the same as the
// passed in length parameter. The level argument is valid, the kind is assumed to be valid,
// and the return value is checked.
@ -498,6 +519,7 @@ impl UnixSeqpacket {
/// Sets the blocking mode for this socket.
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
let mut nonblocking = nonblocking as libc::c_int;
// SAFETY:
// Safe because the return value is checked, and this ioctl call sets the nonblocking mode
// and does not continue holding the file descriptor after the call.
let ret = unsafe { libc::ioctl(self.as_raw_descriptor(), libc::FIONBIO, &mut nonblocking) };
@ -575,6 +597,7 @@ impl UnixSeqpacketListener {
.expect("fd should be an integer");
let mut result: c_int = 0;
let mut result_len = size_of::<c_int>() as libc::socklen_t;
// SAFETY: Safe because fd and other args are valid and the return value is checked.
let ret = unsafe {
libc::getsockopt(
fd,
@ -593,6 +616,7 @@ impl UnixSeqpacketListener {
"specified descriptor is not a listening socket",
));
}
// SAFETY:
// Safe because we validated the socket file descriptor.
let descriptor = unsafe { SafeDescriptor::from_raw_descriptor(fd) };
return Ok(UnixSeqpacketListener {
@ -604,6 +628,7 @@ impl UnixSeqpacketListener {
let descriptor = socket(libc::AF_UNIX, libc::SOCK_SEQPACKET, 0)?;
let (addr, len) = sockaddr_un(path.as_ref())?;
// SAFETY:
// Safe connect since we handle the error and use the right length generated from
// `sockaddr_un`.
unsafe {
@ -638,6 +663,7 @@ impl UnixSeqpacketListener {
let elapsed = Instant::now().saturating_duration_since(start);
let remaining = timeout.checked_sub(elapsed).unwrap_or(Duration::ZERO);
let cur_timeout_ms = i32::try_from(remaining.as_millis()).unwrap_or(i32::MAX);
// SAFETY:
// Safe because we give a valid pointer to a list (of 1) FD and we check
// the return value.
match unsafe { libc::poll(&mut fds, 1, cur_timeout_ms) }.cmp(&0) {
@ -665,6 +691,7 @@ impl UnixSeqpacketListener {
- &addr.sun_family as *const _ as usize)
as libc::socklen_t;
let mut len = mem::size_of::<libc::sockaddr_un>() as libc::socklen_t;
// SAFETY:
// Safe because the length given matches the length of the data of the given pointer, and we
// check the return value.
let ret = unsafe {
@ -699,6 +726,7 @@ impl UnixSeqpacketListener {
/// Sets the blocking mode for this socket.
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
let mut nonblocking = nonblocking as libc::c_int;
// SAFETY:
// Safe because the return value is checked, and this ioctl call sets the nonblocking mode
// and does not continue holding the file descriptor after the call.
let ret = unsafe { libc::ioctl(self.as_raw_descriptor(), libc::FIONBIO, &mut nonblocking) };

View file

@ -100,6 +100,7 @@ impl CmsgBuffer {
} else {
CmsgBuffer::Heap(
vec![
// SAFETY:
// Safe because cmsghdr only contains primitive types for
// which zero initialization is valid.
unsafe { MaybeUninit::<cmsghdr>::zeroed().assume_init() };
@ -125,6 +126,7 @@ fn raw_sendmsg(fd: RawFd, iovec: &[iovec], out_fds: &[RawFd]) -> io::Result<usiz
let cmsg_capacity = CMSG_SPACE(size_of_val(out_fds));
let mut cmsg_buffer = CmsgBuffer::with_capacity(cmsg_capacity);
// SAFETY:
// msghdr on musl has private __pad1 and __pad2 fields that cannot be initialized.
// Safe because msghdr only contains primitive types for which zero
// initialization is valid.
@ -133,6 +135,7 @@ fn raw_sendmsg(fd: RawFd, iovec: &[iovec], out_fds: &[RawFd]) -> io::Result<usiz
msg.msg_iovlen = iovec.len().try_into().unwrap();
if !out_fds.is_empty() {
// SAFETY:
// msghdr on musl has an extra __pad1 field, initialize the whole struct to zero.
// Safe because cmsghdr only contains primitive types for which zero
// initialization is valid.
@ -140,9 +143,12 @@ fn raw_sendmsg(fd: RawFd, iovec: &[iovec], out_fds: &[RawFd]) -> io::Result<usiz
cmsg.cmsg_len = CMSG_LEN(size_of_val(out_fds)).try_into().unwrap();
cmsg.cmsg_level = SOL_SOCKET;
cmsg.cmsg_type = SCM_RIGHTS;
// SAFETY: See call specific comments within unsafe block.
unsafe {
// SAFETY:
// Safe because cmsg_buffer was allocated to be large enough to contain cmsghdr.
write_unaligned(cmsg_buffer.as_mut_ptr(), cmsg);
// SAFETY:
// Safe because the cmsg_buffer was allocated to be large enough to hold out_fds.len()
// file descriptors.
copy_nonoverlapping(
@ -156,6 +162,7 @@ fn raw_sendmsg(fd: RawFd, iovec: &[iovec], out_fds: &[RawFd]) -> io::Result<usiz
msg.msg_controllen = cmsg_capacity.try_into().unwrap();
}
// SAFETY:
// Safe because the msghdr was properly constructed from valid (or null) pointers of the
// indicated length and we check the return value.
let write_count = unsafe { sendmsg(fd, &msg, 0) };
@ -178,6 +185,7 @@ fn raw_recvmsg(
let cmsg_capacity = CMSG_SPACE(max_fds * size_of::<RawFd>());
let mut cmsg_buffer = CmsgBuffer::with_capacity(cmsg_capacity);
// SAFETY:
// msghdr on musl has private __pad1 and __pad2 fields that cannot be initialized.
// Safe because msghdr only contains primitive types for which zero
// initialization is valid.
@ -190,6 +198,7 @@ fn raw_recvmsg(
msg.msg_controllen = cmsg_capacity.try_into().unwrap();
}
// SAFETY:
// Safe because the msghdr was properly constructed from valid (or null) pointers of the
// indicated length and we check the return value.
let total_read = unsafe { recvmsg(fd, &mut msg, 0) };
@ -205,6 +214,7 @@ fn raw_recvmsg(
let mut cmsg_ptr = msg.msg_control as *mut cmsghdr;
let mut in_fds: Vec<SafeDescriptor> = Vec::with_capacity(max_fds);
while !cmsg_ptr.is_null() {
// SAFETY:
// Safe because we checked that cmsg_ptr was non-null, and the loop is constructed such that
// that only happens when there is at least sizeof(cmsghdr) space after the pointer to read.
let cmsg = unsafe { (cmsg_ptr as *mut cmsghdr).read_unaligned() };
@ -378,6 +388,7 @@ pub unsafe trait AsIobuf: Sized {
fn as_iobuf_mut_slice(bufs: &mut [Self]) -> &mut [iovec];
}
// SAFETY:
// Safe because there are no other mutable references to the memory described by `IoSlice` and it is
// guaranteed to be ABI-compatible with `iovec`.
unsafe impl<'a> AsIobuf for IoSlice<'a> {
@ -389,16 +400,19 @@ unsafe impl<'a> AsIobuf for IoSlice<'a> {
}
fn as_iobuf_slice(bufs: &[Self]) -> &[iovec] {
// SAFETY:
// Safe because `IoSlice` is guaranteed to be ABI-compatible with `iovec`.
unsafe { slice::from_raw_parts(bufs.as_ptr() as *const iovec, bufs.len()) }
}
fn as_iobuf_mut_slice(bufs: &mut [Self]) -> &mut [iovec] {
// SAFETY:
// Safe because `IoSlice` is guaranteed to be ABI-compatible with `iovec`.
unsafe { slice::from_raw_parts_mut(bufs.as_mut_ptr() as *mut iovec, bufs.len()) }
}
}
// SAFETY:
// Safe because there are no other references to the memory described by `IoSliceMut` and it is
// guaranteed to be ABI-compatible with `iovec`.
unsafe impl<'a> AsIobuf for IoSliceMut<'a> {
@ -410,16 +424,19 @@ unsafe impl<'a> AsIobuf for IoSliceMut<'a> {
}
fn as_iobuf_slice(bufs: &[Self]) -> &[iovec] {
// SAFETY:
// Safe because `IoSliceMut` is guaranteed to be ABI-compatible with `iovec`.
unsafe { slice::from_raw_parts(bufs.as_ptr() as *const iovec, bufs.len()) }
}
fn as_iobuf_mut_slice(bufs: &mut [Self]) -> &mut [iovec] {
// SAFETY:
// Safe because `IoSliceMut` is guaranteed to be ABI-compatible with `iovec`.
unsafe { slice::from_raw_parts_mut(bufs.as_mut_ptr() as *mut iovec, bufs.len()) }
}
}
// SAFETY:
// Safe because volatile slices are only ever accessed with other volatile interfaces and the
// pointer and size are guaranteed to be accurate.
unsafe impl<'a> AsIobuf for VolatileSlice<'a> {
@ -455,6 +472,7 @@ mod tests {
($len:literal) => {
assert_eq!(
CMSG_SPACE(size_of::<[RawFd; $len]>()) as libc::c_uint,
// SAFETY: trivially safe
unsafe { libc::CMSG_SPACE(size_of::<[RawFd; $len]>() as libc::c_uint) }
);
};
@ -530,6 +548,7 @@ mod tests {
assert_ne!(file.as_raw_fd(), s2.as_raw_descriptor());
assert_ne!(file.as_raw_fd(), evt.as_raw_descriptor());
// SAFETY: trivially safe
file.write_all(unsafe { from_raw_parts(&1203u64 as *const u64 as *const u8, 8) })
.expect("failed to write to sent fd");
@ -564,6 +583,7 @@ mod tests {
let mut file = File::from(files.swap_remove(0));
// SAFETY: trivially safe
file.write_all(unsafe { from_raw_parts(&1203u64 as *const u64 as *const u8, 8) })
.expect("failed to write to sent fd");

View file

@ -91,6 +91,7 @@ impl StreamChannel {
// (see sys::decode_error_kind) on Windows, so we preserve this behavior on POSIX even
// though one could argue ErrorKind::UnexpectedEof is a closer match to the true error.
SocketType::Message(sock) => {
// SAFETY:
// Safe because buf is valid, we pass buf's size to recv to bound the return
// length, and we check the return code.
let retval = unsafe {

View file

@ -12,6 +12,7 @@ use crate::Result;
/// Safe wrapper for `sysconf(_SC_IOV_MAX)`.
#[inline(always)]
pub fn iov_max() -> usize {
// SAFETY:
// Trivially safe
unsafe { sysconf(_SC_IOV_MAX) as usize }
}
@ -19,6 +20,7 @@ pub fn iov_max() -> usize {
/// Safe wrapper for `sysconf(_SC_PAGESIZE)`.
#[inline(always)]
pub fn pagesize() -> usize {
// SAFETY:
// Trivially safe
unsafe { sysconf(_SC_PAGESIZE) as usize }
}
@ -26,6 +28,7 @@ pub fn pagesize() -> usize {
/// Returns the number of online logical cores on the system.
#[inline(always)]
pub fn number_of_logical_cores() -> Result<usize> {
// SAFETY:
// Safe because we pass a flag for this call and the host supports this system call
Ok(unsafe { sysconf(_SC_NPROCESSORS_CONF) } as usize)
}

View file

@ -25,6 +25,7 @@ impl Console {
impl Read for Console {
fn read(&mut self, out: &mut [u8]) -> Result<usize> {
let mut num_of_bytes_read: u32 = 0;
// SAFETY:
// Safe because `out` is guarenteed to be a valid mutable array
// and `num_of_bytes_read` is a valid u32.
let res = unsafe {

View file

@ -46,6 +46,7 @@ impl PartialEq for SafeDescriptor {
impl Drop for SafeDescriptor {
fn drop(&mut self) {
// SAFETY: trivially safe
unsafe { CloseHandle(self.descriptor) };
}
}
@ -61,11 +62,13 @@ static mut KERNELBASE_LIBRARY: MaybeUninit<HMODULE> = MaybeUninit::uninit();
fn compare_object_handles(first: RawHandle, second: RawHandle) -> bool {
KERNELBASE_INIT.call_once(|| {
// SAFETY: trivially safe
unsafe {
*KERNELBASE_LIBRARY.as_mut_ptr() =
libloaderapi::LoadLibraryW(win32_wide_string("Kernelbase").as_ptr());
};
});
// SAFETY: the return value is checked.
let handle = unsafe { KERNELBASE_LIBRARY.assume_init() };
if handle.is_null() {
return first == second;
@ -73,11 +76,13 @@ fn compare_object_handles(first: RawHandle, second: RawHandle) -> bool {
let addr = CString::new("CompareObjectHandles").unwrap();
let addr_ptr = addr.as_ptr();
// SAFETY: the return value is checked.
let symbol = unsafe { libloaderapi::GetProcAddress(handle, addr_ptr) };
if symbol.is_null() {
return first == second;
}
// SAFETY: trivially safe
let func = unsafe {
std::mem::transmute::<
*mut winapi::shared::minwindef::__some_function,
@ -102,6 +107,7 @@ impl SafeDescriptor {
/// Clones this descriptor, internally creating a new descriptor. The new SafeDescriptor will
/// share the same underlying count within the kernel.
pub fn try_clone(&self) -> Result<SafeDescriptor> {
// SAFETY:
// Safe because `duplicate_handle` will return a valid handle, or at the very least error
// out.
Ok(unsafe {
@ -110,15 +116,19 @@ impl SafeDescriptor {
}
}
// SAFETY:
// On Windows, RawHandles are represented by raw pointers but are not used as such in
// rust code, and are therefore safe to send between threads.
unsafe impl Send for SafeDescriptor {}
// SAFETY: See comments for impl Send
unsafe impl Sync for SafeDescriptor {}
// SAFETY:
// On Windows, RawHandles are represented by raw pointers but are opaque to the
// userspace and cannot be derefenced by rust code, and are therefore safe to
// send between threads.
unsafe impl Send for Descriptor {}
// SAFETY: See comments for impl Send
unsafe impl Sync for Descriptor {}
macro_rules! AsRawDescriptor {
@ -134,6 +144,7 @@ macro_rules! AsRawDescriptor {
macro_rules! FromRawDescriptor {
($name:ident) => {
impl FromRawDescriptor for $name {
// SAFETY: It is caller's responsibility to ensure that the descriptor is valid.
unsafe fn from_raw_descriptor(descriptor: RawDescriptor) -> Self {
return $name::from_raw_handle(descriptor);
}
@ -171,6 +182,7 @@ fn clone_equality() {
use crate::Event;
let evt = Event::new().unwrap();
// SAFETY: Given evt is created above and is valid.
let descriptor = unsafe { SafeDescriptor::from_raw_descriptor(evt.into_raw_descriptor()) };
assert_eq!(descriptor, descriptor);
@ -181,6 +193,7 @@ fn clone_equality() {
);
let evt2 = Event::new().unwrap();
// SAFETY: Given evt2 is created above and is valid.
let another = unsafe { SafeDescriptor::from_raw_descriptor(evt2.into_raw_descriptor()) };
assert_ne!(descriptor, another);

View file

@ -73,6 +73,7 @@ impl EventExt for Event {
impl PlatformEvent {
pub fn new_with_manual_reset(manual_reset: bool) -> Result<PlatformEvent> {
// SAFETY: Safe because return value is checked.
let handle = unsafe {
CreateEventA(
SecurityAttributes::new_with_security_descriptor(
@ -89,12 +90,15 @@ impl PlatformEvent {
return errno_result();
}
Ok(PlatformEvent {
event_handle: unsafe { SafeDescriptor::from_raw_descriptor(handle) },
event_handle:
// SAFETY: Safe because the descriptor is valid.
unsafe { SafeDescriptor::from_raw_descriptor(handle) },
})
}
pub fn create_event_with_name(name: &str) -> Result<PlatformEvent> {
let event_str = CString::new(String::from(name)).unwrap();
// SAFETY: Safe because return value is checked.
let handle = unsafe {
CreateEventA(
SecurityAttributes::new_with_security_descriptor(
@ -111,7 +115,9 @@ impl PlatformEvent {
return errno_result();
}
Ok(PlatformEvent {
event_handle: unsafe { SafeDescriptor::from_raw_descriptor(handle) },
event_handle:
// SAFETY: Safe because the descriptor is valid.
unsafe { SafeDescriptor::from_raw_descriptor(handle) },
})
}
@ -122,17 +128,21 @@ impl PlatformEvent {
pub fn open(name: &str) -> Result<PlatformEvent> {
let event_str = CString::new(String::from(name)).unwrap();
// SAFETY: Safe because return value is checked.
let handle = unsafe { OpenEventA(EVENT_MODIFY_STATE, FALSE, event_str.as_ptr()) };
if handle.is_null() {
return errno_result();
}
Ok(PlatformEvent {
event_handle: unsafe { SafeDescriptor::from_raw_descriptor(handle) },
event_handle:
// SAFETY: Safe because the descriptor is valid.
unsafe { SafeDescriptor::from_raw_descriptor(handle) },
})
}
/// See `Event::signal`.
pub fn signal(&self) -> Result<()> {
// SAFETY: Safe because the descriptor is valid.
let event_result = unsafe { SetEvent(self.event_handle.as_raw_descriptor()) };
if event_result == 0 {
return errno_result();
@ -141,6 +151,7 @@ impl PlatformEvent {
}
pub fn reset(&self) -> Result<()> {
// SAFETY: Safe because the descriptor is valid.
let res = unsafe { ResetEvent(self.event_handle.as_raw_descriptor()) };
if res == 0 {
errno_result()
@ -156,6 +167,7 @@ impl PlatformEvent {
None => INFINITE,
};
// SAFETY:
// Safe because we pass an event object handle owned by this PlatformEvent.
let wait_result = match unsafe {
WaitForSingleObject(self.event_handle.as_raw_descriptor(), milliseconds)
@ -189,6 +201,7 @@ impl PlatformEvent {
pub fn try_clone(&self) -> Result<PlatformEvent> {
let mut event_clone: HANDLE = MaybeUninit::uninit().as_mut_ptr();
// SAFETY: Safe because return value is checked.
let duplicate_result = unsafe {
DuplicateHandle(
GetCurrentProcess(),
@ -203,7 +216,10 @@ impl PlatformEvent {
if duplicate_result == 0 {
return errno_result();
}
Ok(unsafe { PlatformEvent::from_raw_descriptor(event_clone) })
Ok(
// SAFETY: Safe because the descriptor is valid.
unsafe { PlatformEvent::from_raw_descriptor(event_clone) },
)
}
}
@ -214,6 +230,7 @@ impl AsRawDescriptor for PlatformEvent {
}
impl FromRawDescriptor for PlatformEvent {
// SAFETY: Safe because the descriptor is expected to be valid.
unsafe fn from_raw_descriptor(descriptor: RawDescriptor) -> Self {
PlatformEvent {
event_handle: SafeDescriptor::from_raw_descriptor(descriptor),
@ -245,10 +262,12 @@ impl From<SafeDescriptor> for PlatformEvent {
}
}
// Safety:
// PlatformEvent is safe for send & Sync despite containing a raw handle to its
// file mapping object. As long as the instance to PlatformEvent stays alive, this
// pointer will be a valid handle.
unsafe impl Send for PlatformEvent {}
// Safety: See comments for impl Send
unsafe impl Sync for PlatformEvent {}
#[cfg(test)]
@ -277,10 +296,12 @@ mod tests {
evt.signal().unwrap();
// Wait for the notification.
// SAFETY: Safe because return value is checked.
let result = unsafe { WaitForSingleObject(evt.as_raw_descriptor(), INFINITE) };
assert_eq!(result, WAIT_OBJECT_0);
// The notification should have reset since we already received it.
// SAFETY: Safe because return value is checked.
let result = unsafe { WaitForSingleObject(evt.as_raw_descriptor(), 0) };
assert_eq!(result, WAIT_TIMEOUT);
}
@ -291,15 +312,18 @@ mod tests {
evt.signal().unwrap();
// Wait for the notification.
// SAFETY: Safe because return value is checked.
let result = unsafe { WaitForSingleObject(evt.as_raw_descriptor(), INFINITE) };
assert_eq!(result, WAIT_OBJECT_0);
// The notification should still be active because read wasn't called.
// SAFETY: Safe because return value is checked.
let result = unsafe { WaitForSingleObject(evt.as_raw_descriptor(), 0) };
assert_eq!(result, WAIT_OBJECT_0);
// Read and ensure the notification has cleared.
evt.wait().expect("Failed to read event.");
// SAFETY: Safe because return value is checked.
let result = unsafe { WaitForSingleObject(evt.as_raw_descriptor(), 0) };
assert_eq!(result, WAIT_TIMEOUT);
}

View file

@ -15,9 +15,10 @@ use crate::WriteZeroesAt;
impl FileReadWriteVolatile for File {
fn read_volatile(&mut self, slice: VolatileSlice) -> Result<usize> {
let mut bytes = 0;
// SAFETY:
// Safe because only bytes inside the slice are accessed and the kernel is expected
// to handle arbitrary memory for I/O.
let mut bytes = 0;
let ret = unsafe {
winapi::um::fileapi::ReadFile(
self.as_raw_descriptor(),
@ -55,9 +56,10 @@ impl FileReadWriteVolatile for File {
}
fn write_volatile(&mut self, slice: VolatileSlice) -> Result<usize> {
let mut bytes = 0;
// SAFETY:
// Safe because only bytes inside the slice are accessed and the kernel is expected
// to handle arbitrary memory for I/O.
let mut bytes = 0;
let ret = unsafe {
winapi::um::fileapi::WriteFile(
self.as_raw_descriptor(),
@ -101,10 +103,11 @@ impl FileReadWriteAtVolatile for File {
// The unix implementation uses pread, which doesn't modify the file
// pointer. Windows doesn't have an option for that, unfortunately.
// Safe because only bytes inside the slice are accessed and the kernel is expected
// to handle arbitrary memory for I/O.
let mut bytes = 0;
// SAFETY:
// Safe because only bytes inside the slice are accessed and the kernel is expected
// to handle arbitrary memory for I/O.
let ret = unsafe {
let mut overlapped: winapi::um::minwinbase::OVERLAPPED = std::mem::zeroed();
overlapped.u.s_mut().Offset = offset as u32;
@ -149,10 +152,11 @@ impl FileReadWriteAtVolatile for File {
// The unix implementation uses pwrite, which doesn't modify the file
// pointer. Windows doesn't have an option for that, unfortunately.
// Safe because only bytes inside the slice are accessed and the kernel is expected
// to handle arbitrary memory for I/O.
let mut bytes = 0;
// SAFETY:
// Safe because only bytes inside the slice are accessed and the kernel is expected
// to handle arbitrary memory for I/O.
let ret = unsafe {
let mut overlapped: winapi::um::minwinbase::OVERLAPPED = std::mem::zeroed();
overlapped.u.s_mut().Offset = offset as u32;

View file

@ -34,6 +34,7 @@ pub fn open_file_or_duplicate<P: AsRef<Path>>(path: P, options: &OpenOptions) ->
/// # Safety
/// handle *must* be File. We accept all AsRawDescriptors for convenience.
pub fn set_sparse_file<T: AsRawDescriptor>(handle: &T) -> io::Result<()> {
// SAFETY:
// Safe because we check the return value and handle is guaranteed to be a
// valid file handle by the caller.
let result = unsafe {
@ -60,7 +61,7 @@ struct FileAllocatedRangeBuffer {
/// # Safety
/// Within this scope it is not possible to use LARGE_INTEGER as something else.
fn large_integer_as_u64(lint: &LARGE_INTEGER) -> u64 {
// # Safety
// SAFETY:
// Safe because we use LARGE_INTEGER only as i64 or as u64 within this scope.
unsafe { *lint.QuadPart() as u64 }
}
@ -90,7 +91,7 @@ pub fn get_allocated_ranges<T: AsRawDescriptor>(descriptor: &T) -> Result<Vec<Ra
let mut ranges = vec![];
let mut file_size = *LargeInteger::new(0);
// # Safety
// SAFETY:
// Safe because we check return value.
unsafe {
let failed = GetFileSizeEx(descriptor.as_raw_descriptor(), &mut file_size);
@ -114,7 +115,7 @@ pub fn get_allocated_ranges<T: AsRawDescriptor>(descriptor: &T) -> Result<Vec<Ra
loop {
let mut bytes_ret: u32 = 0;
// # Safety
// SAFETY:
// Safe because we return error on failure and all references have
// bounded lifetime.
// If the `alloc_ranges` buffer is smaller than the actual allocated ranges,

View file

@ -12,6 +12,7 @@ use crate::Result;
/// only when the emulator is in the foreground, and will persist only until the next user
/// interaction with the window
pub fn give_foregrounding_permission(process_id: DWORD) -> Result<()> {
// SAFETY:
// Safe because this API does not modify memory, and process_id remains in scope for
// the duration of the call.
match unsafe { AllowSetForegroundWindow(process_id) } {

View file

@ -372,6 +372,7 @@ mod tests {
f.sync_all().expect("Failed to sync all.");
// read the compression status
// SAFETY: safe because return value is checked.
let ecode = unsafe {
super::super::ioctl::ioctl_with_mut_ref(&f, FSCTL_GET_COMPRESSION, &mut compressed)
};
@ -396,6 +397,7 @@ mod tests {
// https://github.com/rust-lang/rust/blob/master/src/libstd/sys/windows/fs.rs#L260
// For now I'm just going to leave this test as-is.
//
// SAFETY: safe because return value is checked.
let f = unsafe {
File::from_raw_handle(CreateFileW(
to_u16s(file_path).unwrap().as_ptr(),
@ -410,6 +412,7 @@ mod tests {
};
let ecode =
// SAFETY: safe because return value is checked.
unsafe { super::super::ioctl::ioctl_with_ref(&f, FSCTL_SET_COMPRESSION, &compressed) };
assert_eq!(ecode, 0);
@ -418,6 +421,7 @@ mod tests {
// is writing anything to the compressed pointer.
compressed = 0;
// SAFETY: safe because return value is checked.
let ecode = unsafe {
super::super::ioctl::ioctl_with_mut_ref(&f, FSCTL_GET_COMPRESSION, &mut compressed)
};
@ -462,6 +466,7 @@ mod tests {
// https://github.com/rust-lang/rust/blob/master/src/libstd/sys/windows/fs.rs#L260
// For now I'm just going to leave this test as-is.
//
// SAFETY: safe because return value is checked.
let f = unsafe {
File::from_raw_handle(CreateFileW(
to_u16s(file_path).unwrap().as_ptr(),
@ -477,6 +482,7 @@ mod tests {
// now we call ioctl_with_val, which isn't particularly any more helpful than
// ioctl_with_ref except for the cases where the input is only a word long
// SAFETY: safe because return value is checked.
let ecode = unsafe {
super::super::ioctl::ioctl_with_val(&f, FSCTL_SET_COMPRESSION, compressed.into())
};
@ -487,6 +493,7 @@ mod tests {
// is writing anything to the compressed pointer.
compressed = 0;
// SAFETY: safe because return value is checked.
let ecode = unsafe {
super::super::ioctl::ioctl_with_mut_ref(&f, FSCTL_GET_COMPRESSION, &mut compressed)
};

View file

@ -41,6 +41,7 @@ impl dyn MappedRegion {
pub fn msync(&self, offset: usize, size: usize) -> Result<()> {
validate_includes_range(self.size(), offset, size)?;
// SAFETY:
// Safe because the MemoryMapping/MemoryMappingArena interface ensures our pointer and size
// are correct, and we've validated that `offset`..`offset+size` is in the range owned by
// this `MappedRegion`.
@ -68,11 +69,13 @@ pub struct MemoryMapping {
pub(crate) size: usize,
}
// SAFETY:
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for MemoryMapping {}
// SAFETY: See comments for impl Send
unsafe impl Sync for MemoryMapping {}
impl MemoryMapping {
@ -123,6 +126,9 @@ impl MemoryMapping {
}
}
// SAFETY:
// Safe because the pointer and size point to a memory range owned by this MemoryMapping that won't
// be unmapped until it's Dropped.
unsafe impl MappedRegion for MemoryMapping {
fn as_ptr(&self) -> *mut u8 {
self.addr as *mut u8
@ -171,6 +177,7 @@ impl<'a> MemoryMappingBuilder<'a> {
// handle for it first. That handle is then provided to Self::wrap, which
// performs the actual mmap (creating a mapped view).
//
// SAFETY:
// Safe because self.descriptor is guaranteed to be a valid handle.
let mapping_handle = unsafe {
create_file_mapping(
@ -182,6 +189,7 @@ impl<'a> MemoryMappingBuilder<'a> {
}
.map_err(Error::StdSyscallFailed)?;
// SAFETY:
// The above comment block is why the SafeDescriptor wrap is safe.
Some(unsafe { SafeDescriptor::from_raw_descriptor(mapping_handle) })
} else {
@ -219,6 +227,7 @@ impl<'a> MemoryMappingBuilder<'a> {
file_descriptor: Option<&'a dyn AsRawDescriptor>,
) -> Result<CrateMemoryMapping> {
let file_descriptor = match file_descriptor {
// SAFETY:
// Safe because `duplicate_handle` will return a handle or at least error out.
Some(descriptor) => unsafe {
Some(SafeDescriptor::from_raw_descriptor(
@ -280,6 +289,7 @@ mod tests {
let shm = SharedMemory::new("test", 1028).unwrap();
let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 5).unwrap());
let s = m.get_slice(2, 3).unwrap();
// SAFETY: trivially safe
assert_eq!(s.as_ptr(), unsafe { m.as_ptr().offset(2) });
}

View file

@ -46,6 +46,7 @@ impl MemoryMapping {
/// * `size` - Size of memory region in bytes.
/// * `prot` - Protection (e.g. readable/writable) of the memory region.
pub fn new_protection(size: usize, prot: Protection) -> Result<MemoryMapping> {
// SAFETY:
// This is safe because we are creating an anonymous mapping in a place not already used by
// any other area in this process.
unsafe { MemoryMapping::try_mmap(None, size, prot.into(), None) }
@ -64,6 +65,7 @@ impl MemoryMapping {
file_handle: RawDescriptor,
size: usize,
) -> Result<MemoryMapping> {
// SAFETY:
// This is safe because we are creating an anonymous mapping in a place not already used by
// any other area in this process.
unsafe {
@ -89,6 +91,7 @@ impl MemoryMapping {
offset: u64,
prot: Protection,
) -> Result<MemoryMapping> {
// SAFETY:
// This is safe because we are creating an anonymous mapping in a place not already used by
// any other area in this process.
unsafe {
@ -198,6 +201,7 @@ impl MemoryMapping {
/// Calls FlushViewOfFile on the mapped memory range, ensuring all changes that would
/// be written to disk are written immediately
pub fn msync(&self) -> Result<()> {
// SAFETY:
// Safe because self can only be created as a successful memory mapping
unsafe {
if FlushViewOfFile(self.addr, self.size) == 0 {
@ -210,6 +214,7 @@ impl MemoryMapping {
impl Drop for MemoryMapping {
fn drop(&mut self) {
// SAFETY:
// This is safe because we MapViewOfFile the area at addr ourselves, and nobody
// else is holding a reference to it.
unsafe {
@ -240,6 +245,7 @@ mod tests {
#[test]
fn map_invalid_fd() {
// SAFETY: trivially safe to create an invalid File.
let descriptor = unsafe { std::fs::File::from_raw_descriptor(ptr::null_mut()) };
let res = MemoryMappingBuilder::new(1024)
.from_file(&descriptor)

View file

@ -29,7 +29,7 @@ pub struct MultiProcessMutex {
impl MultiProcessMutex {
pub fn new() -> Result<Self> {
// Trivially safe (no memory passed, error checked).
// SAFETY: Trivially safe (no memory passed, error checked).
//
// Note that we intentionally make this handle uninheritable by default via the mutex attrs.
let lock_handle = unsafe {
@ -44,6 +44,7 @@ impl MultiProcessMutex {
Err(Error::last())
} else {
Ok(Self {
// SAFETY:
// Safe because the handle is valid & we own it exclusively.
lock: unsafe { SafeDescriptor::from_raw_descriptor(lock_handle) },
})
@ -63,6 +64,7 @@ impl MultiProcessMutex {
/// Tries to lock the mutex, returning a RAII guard similar to std::sync::Mutex if we obtained
/// the lock within the timeout.
pub fn try_lock(&self, timeout_ms: u32) -> Option<MultiProcessMutexGuard> {
// SAFETY:
// Safe because the mutex handle is guaranteed to exist.
match unsafe { WaitForSingleObject(self.lock.as_raw_descriptor(), timeout_ms) } {
WAIT_OBJECT_0 => Some(MultiProcessMutexGuard { lock: &self.lock }),
@ -93,6 +95,7 @@ pub struct MultiProcessMutexGuard<'a> {
impl<'a> Drop for MultiProcessMutexGuard<'a> {
fn drop(&mut self) {
// SAFETY: We own the descriptor and is expected to be valid.
if unsafe { ReleaseMutex(self.lock.as_raw_descriptor()) } == 0 {
panic!("Failed to unlock mutex: {:?}.", Error::last())
}

View file

@ -137,6 +137,7 @@ impl OverlappedWrapper {
}
}
// SAFETY:
// Safe because all of the contained fields may be safely sent to another thread.
unsafe impl Send for OverlappedWrapper {}
@ -242,6 +243,7 @@ impl From<&BlockingMode> for DWORD {
}
/// Sets the handle state for a named pipe in a rust friendly way.
/// SAFETY:
/// This is safe if the pipe handle is open.
unsafe fn set_named_pipe_handle_state(
pipe_handle: RawDescriptor,
@ -377,9 +379,10 @@ pub fn create_server_pipe(
// This sets flags so there will be an error if >1 instance (server end)
// of this pipe name is opened because we expect exactly one.
// SAFETY:
// Safe because security attributes are valid, pipe_name is valid C string,
// and we're checking the return code
let server_handle = unsafe {
// Safe because security attributes are valid, pipe_name is valid C string,
// and we're checking the return code
CreateNamedPipeA(
c_pipe_name.as_ptr(),
/* dwOpenMode= */
@ -405,6 +408,7 @@ pub fn create_server_pipe(
if server_handle == INVALID_HANDLE_VALUE {
Err(io::Error::last_os_error())
} else {
// SAFETY: Safe because server_handle is valid.
unsafe {
Ok(PipeConnection {
handle: SafeDescriptor::from_raw_descriptor(server_handle),
@ -448,12 +452,14 @@ pub fn create_client_pipe(
let mut client_mode = framing_mode.to_readmode() | DWORD::from(blocking_mode);
// SAFETY:
// Safe because client_handle's open() call did not return an error.
unsafe {
set_named_pipe_handle_state(client_handle, &mut client_mode)?;
}
Ok(PipeConnection {
// SAFETY:
// Safe because client_handle is valid
handle: unsafe { SafeDescriptor::from_raw_descriptor(client_handle) },
framing_mode: *framing_mode,
@ -604,6 +610,7 @@ impl PipeConnection {
overlapped_wrapper: &mut OverlappedWrapper,
exit_event: &Event,
) -> Result<()> {
// SAFETY:
// Safe because we are providing a valid buffer slice and also providing a valid
// overlapped struct.
match unsafe { self.read_overlapped(buf, overlapped_wrapper) } {
@ -665,6 +672,7 @@ impl PipeConnection {
pub fn get_available_byte_count(&self) -> io::Result<u32> {
let mut total_bytes_avail: DWORD = 0;
// SAFETY:
// Safe because the underlying pipe handle is guaranteed to be open, and the output values
// live at valid memory locations.
fail_if_zero!(unsafe {
@ -736,6 +744,7 @@ impl PipeConnection {
buf: &[T],
overlapped: Option<&mut OVERLAPPED>,
) -> Result<usize> {
// SAFETY:
// Safe because buf points to memory valid until the write completes and we pass a valid
// length for that memory.
unsafe {
@ -753,6 +762,7 @@ impl PipeConnection {
let mut client_mode = DWORD::from(blocking_mode) | self.framing_mode.to_readmode();
self.blocking_mode = *blocking_mode;
// SAFETY:
// Safe because the pipe has not been closed (it is managed by this object).
unsafe { set_named_pipe_handle_state(self.handle.as_raw_descriptor(), &mut client_mode) }
}
@ -827,6 +837,7 @@ impl PipeConnection {
overlapped_wrapper: &mut OverlappedWrapper,
should_block: bool,
) -> Result<()> {
// SAFETY:
// Safe because the handle is valid and we're checking the return
// code according to the documentation
//
@ -917,6 +928,7 @@ impl PipeConnection {
));
}
let mut size_transferred = 0;
// SAFETY:
// Safe as long as `overlapped_struct` isn't copied and also contains a valid event.
// Also the named pipe handle must created with `FILE_FLAG_OVERLAPPED`.
fail_if_zero!(unsafe {
@ -934,12 +946,15 @@ impl PipeConnection {
/// Cancels I/O Operations in the current process. Since `lpOverlapped` is null, this will
/// cancel all I/O requests for the file handle passed in.
pub fn cancel_io(&mut self) -> Result<()> {
fail_if_zero!(unsafe {
CancelIoEx(
self.handle.as_raw_descriptor(),
/* lpOverlapped= */ std::ptr::null_mut(),
)
});
fail_if_zero!(
// SAFETY: descriptor is valid and the return value is checked.
unsafe {
CancelIoEx(
self.handle.as_raw_descriptor(),
/* lpOverlapped= */ std::ptr::null_mut(),
)
}
);
Ok(())
}
@ -979,6 +994,7 @@ impl PipeConnection {
/// call this if you are sure the client is reading the
/// data!
pub fn flush_data_blocking(&self) -> Result<()> {
// SAFETY:
// Safe because the only buffers interacted with are
// outside of Rust memory
fail_if_zero!(unsafe { FlushFileBuffers(self.as_raw_descriptor()) });
@ -987,6 +1003,7 @@ impl PipeConnection {
/// For a server pipe, disconnect all clients, discarding any buffered data.
pub fn disconnect_clients(&self) -> Result<()> {
// SAFETY:
// Safe because we own the handle passed in and know it will remain valid for the duration
// of the call. Discarded buffers are not managed by rust.
fail_if_zero!(unsafe { DisconnectNamedPipe(self.as_raw_descriptor()) });
@ -1006,11 +1023,14 @@ impl IntoRawDescriptor for PipeConnection {
}
}
// SAFETY: Send safety is ensured by inner fields.
unsafe impl Send for PipeConnection {}
// SAFETY: Sync safety is ensured by inner fields.
unsafe impl Sync for PipeConnection {}
impl io::Read for PipeConnection {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
// SAFETY:
// This is safe because PipeConnection::read is always safe for u8
unsafe { PipeConnection::read(self, buf) }
}
@ -1118,16 +1138,16 @@ impl MultiPartMessagePipe {
Ok(())
}
/// # Safety
/// `buf` and `overlapped_wrapper` will be in use for the duration of
/// the overlapped operation. These must not be reused and must live until
/// after `get_overlapped_result()` has been called which is done right
/// after this call.
fn write_overlapped_blocking_message_internal<T: PipeSendable>(
pipe: &mut PipeConnection,
buf: &[T],
overlapped_wrapper: &mut OverlappedWrapper,
) -> Result<()> {
// Safety:
// `buf` and `overlapped_wrapper` will be in use for the duration of
// the overlapped operation. These must not be reused and must live until
// after `get_overlapped_result()` has been called which is done right
// after this call.
unsafe {
pipe.write_overlapped(buf, overlapped_wrapper)?;
}
@ -1229,6 +1249,7 @@ mod tests {
let (p1, p2) = pair(&FramingMode::Byte, &BlockingMode::Wait, 0).unwrap();
// Test both forward and reverse direction since the underlying APIs are a bit asymmetrical
// SAFETY: trivially safe with pipe created and return value checked.
unsafe {
for (dir, sender, receiver) in [("1 -> 2", &p1, &p2), ("2 -> 1", &p2, &p1)].iter() {
println!("{}", dir);
@ -1284,6 +1305,7 @@ mod tests {
let (p1, p2) = pair(&FramingMode::Message, &BlockingMode::Wait, 0).unwrap();
// Test both forward and reverse direction since the underlying APIs are a bit asymmetrical
// SAFETY: trivially safe with pipe created and return value checked.
unsafe {
for (dir, sender, receiver) in [("1 -> 2", &p1, &p2), ("2 -> 1", &p2, &p1)].iter() {
println!("{}", dir);
@ -1310,6 +1332,7 @@ mod tests {
let mut recv_buffer: [u8; 1] = [0; 1];
// Test both forward and reverse direction since the underlying APIs are a bit asymmetrical
// SAFETY: trivially safe with PipeConnection created and return value checked.
unsafe {
for (dir, sender, receiver) in [("1 -> 2", &p1, &p2), ("2 -> 1", &p2, &p1)].iter() {
println!("{}", dir);
@ -1362,6 +1385,7 @@ mod tests {
)
.unwrap();
// SAFETY:
// Safe because `read_overlapped` can be called since overlapped struct is created.
unsafe {
let mut p1_overlapped_wrapper =
@ -1419,9 +1443,9 @@ mod tests {
let res = unsafe { p1.write_overlapped(&data, &mut overlapped_wrapper) };
assert!(res.is_ok());
// SAFETY: safe because we know the unsafe re-use of overlapped wrapper
// will error out.
let res =
// SAFETY: safe because we know the unsafe re-use of overlapped wrapper
// will error out.
unsafe { p2.write_overlapped(&[75, 77, 54, 82, 76, 65], &mut overlapped_wrapper) };
assert!(res.is_err());

View file

@ -34,6 +34,7 @@ static mut NT_LIBRARY: MaybeUninit<HMODULE> = MaybeUninit::uninit();
#[inline]
fn init_ntdll() -> Result<HINSTANCE> {
NT_INIT.call_once(|| {
// SAFETY: return value is checked.
unsafe {
*NT_LIBRARY.as_mut_ptr() =
libloaderapi::LoadLibraryW(win32_wide_string("ntdll").as_ptr());
@ -44,6 +45,7 @@ fn init_ntdll() -> Result<HINSTANCE> {
};
});
// SAFETY: NT_LIBRARY initialized above.
let handle = unsafe { NT_LIBRARY.assume_init() };
if handle.is_null() {
Err(Error::from(io::Error::new(
@ -56,6 +58,7 @@ fn init_ntdll() -> Result<HINSTANCE> {
}
fn get_symbol(handle: HMODULE, proc_name: &str) -> Result<*mut minwindef::__some_function> {
// SAFETY: return value is checked.
let symbol = unsafe { libloaderapi::GetProcAddress(handle, win32_string(proc_name).as_ptr()) };
if symbol.is_null() {
Err(Error::last())
@ -68,6 +71,7 @@ fn get_symbol(handle: HMODULE, proc_name: &str) -> Result<*mut minwindef::__some
pub fn nt_query_timer_resolution() -> Result<(Duration, Duration)> {
let handle = init_ntdll()?;
// SAFETY: trivially safe
let func = unsafe {
std::mem::transmute::<
*mut minwindef::__some_function,
@ -99,6 +103,7 @@ pub fn nt_query_timer_resolution() -> Result<(Duration, Duration)> {
pub fn nt_set_timer_resolution(resolution: Duration) -> Result<()> {
let handle = init_ntdll()?;
// SAFETY: trivially safe
let func = unsafe {
std::mem::transmute::<
*mut minwindef::__some_function,
@ -150,10 +155,11 @@ pub fn set_time_period(res: Duration, begin: bool) -> Result<()> {
panic!("time(Begin|End)Period does not support values above u32::MAX.",);
}
// Trivially safe. Note that the casts are safe because we know res is within u32's range.
let ret = if begin {
// SAFETY: Trivially safe. Note that the casts are safe because we know res is within u32's range.
unsafe { timeBeginPeriod(res.as_millis() as u32) }
} else {
// SAFETY: Trivially safe. Note that the casts are safe because we know res is within u32's range.
unsafe { timeEndPeriod(res.as_millis() as u32) }
};
if ret != TIMERR_NOERROR {

View file

@ -16,6 +16,7 @@ use super::errno_result;
use super::Result;
pub fn set_audio_thread_priority() -> Result<SafeMultimediaHandle> {
// SAFETY:
// Safe because we know Pro Audio is part of windows and we down task_index.
let multimedia_handle = unsafe {
let mut task_index: u32 = 0;
@ -28,6 +29,7 @@ pub fn set_audio_thread_priority() -> Result<SafeMultimediaHandle> {
if multimedia_handle.is_null() {
warn!(
"Failed to set audio thread to Pro Audio. Error: {}",
// SAFETY: trivially safe
unsafe { GetLastError() }
);
errno_result()
@ -38,6 +40,7 @@ pub fn set_audio_thread_priority() -> Result<SafeMultimediaHandle> {
pub fn set_thread_priority(thread_priority: i32) -> Result<()> {
let res =
// SAFETY:
// Safe because priority level value is valid and a valid thread handle will be passed in
unsafe { SetThreadPriority(GetCurrentThread(), thread_priority) };
if res == 0 {
@ -53,13 +56,16 @@ pub struct SafeMultimediaHandle {
impl Drop for SafeMultimediaHandle {
fn drop(&mut self) {
// SAFETY:
// Safe because we `multimedia_handle` is defined in the same thread and is created in the
// function above. `multimedia_handle` needs be created from `AvSetMmThreadCharacteristicsA`.
// This will also drop the `mulitmedia_handle`.
if unsafe { AvRevertMmThreadCharacteristics(self.multimedia_handle) } == FALSE {
warn!("Failed to revert audio thread. Error: {}", unsafe {
GetLastError()
});
warn!(
"Failed to revert audio thread. Error: {}",
// SAFETY: trivially safe
unsafe { GetLastError() }
);
}
}
}
@ -77,6 +83,7 @@ mod test {
#[test]
#[ignore]
fn test_mm_handle_is_dropped() {
// SAFETY:
// Safe because the only the only unsafe functions called are to get the thread
// priority.
unsafe {

View file

@ -37,6 +37,7 @@ pub(crate) fn file_punch_hole(handle: &File, offset: u64, length: u64) -> io::Re
BeyondFinalZero: *end_offset,
};
// SAFETY:
// Safe because we check the return value and all values should be set
let result = unsafe { super::ioctl::ioctl_with_ref(handle, FSCTL_SET_ZERO_DATA, &zero_data) };

View file

@ -37,6 +37,7 @@ pub fn set_cpu_affinity<I: IntoIterator<Item = usize>>(cpus: I) -> Result<usize>
}
pub fn set_cpu_affinity_mask(affinity_mask: usize) -> Result<usize> {
// SAFETY: trivially safe as return value is checked.
let res: usize = unsafe {
let thread_handle = GetCurrentThread();
SetThreadAffinityMask(thread_handle, affinity_mask)
@ -63,6 +64,7 @@ mod tests {
fn cpu_affinity() {
let mut process_affinity_mask: usize = 0;
let mut system_affinity_mask: usize = 0;
// SAFETY: trivially safe as return value is checked.
let res = unsafe {
GetProcessAffinityMask(
GetCurrentProcess(),

View file

@ -15,13 +15,15 @@ use crate::SharedMemory;
impl PlatformSharedMemory for SharedMemory {
fn new(_debug_name: &CStr, size: u64) -> Result<SharedMemory> {
// Safe because we do not provide a handle.
let mapping_handle =
// SAFETY:
// Safe because we do not provide a handle.
unsafe { create_file_mapping(None, size, PAGE_EXECUTE_READWRITE, None) }
.map_err(super::Error::from)?;
// Safe because we have exclusive ownership of mapping_handle & it is valid.
Self::from_safe_descriptor(
// SAFETY:
// Safe because we have exclusive ownership of mapping_handle & it is valid.
unsafe { SafeDescriptor::from_raw_descriptor(mapping_handle) },
size,
)

View file

@ -203,11 +203,10 @@ impl StreamChannel {
// could stall readers.)
let _read_lock = self.read_lock.lock();
let res = unsafe {
// Safe because no partial reads are possible, and the underlying code bounds the
// read by buf's size.
self.pipe_conn.read(buf)
};
// SAFETY:
// Safe because no partial reads are possible, and the underlying code bounds the
// read by buf's size.
let res = unsafe { self.pipe_conn.read(buf) };
// The entire goal of this complex section is to avoid the need for shared memory between
// each channel end to synchronize the notification state. It is very subtle, modify with

View file

@ -26,8 +26,10 @@ use crate::syslog::Log;
use crate::syslog::Syslog;
use crate::RawDescriptor;
// SAFETY:
// On windows RawDescriptor is !Sync + !Send, but also on windows we don't do anything with them
unsafe impl Sync for crate::syslog::State {}
// SAFETY: See comments for impl Sync
unsafe impl Send for crate::syslog::State {}
pub struct PlatformSyslog {}

View file

@ -19,6 +19,7 @@ struct SystemInfo {
}
static SYSTEM_INFO: Lazy<SystemInfo> = Lazy::new(|| {
// SAFETY:
// Safe because this is a universally available call on modern Windows systems.
let sysinfo = unsafe {
let mut sysinfo = MaybeUninit::<SYSTEM_INFO>::uninit();
@ -51,6 +52,7 @@ pub fn allocation_granularity() -> u64 {
/// Cross-platform wrapper around getting the current process id.
#[inline(always)]
pub fn getpid() -> Pid {
// SAFETY:
// Safe because we only use the return value.
unsafe { GetCurrentProcessId() }
}

View file

@ -35,6 +35,7 @@ pub unsafe trait Terminal {
let descriptor = self.terminal_descriptor();
let mut orig_mode = 0;
// SAFETY:
// Safe because we provide a valid descriptor and pointer and we check the return result.
if unsafe { GetConsoleMode(descriptor, &mut orig_mode) } == 0 {
return Err(Error::last());
@ -43,6 +44,7 @@ pub unsafe trait Terminal {
let new_mode = (orig_mode | ENABLE_VIRTUAL_TERMINAL_INPUT)
& !(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT);
// SAFETY:
// Safe because the syscall will only read the extent of mode and we check the return result.
if unsafe { SetConsoleMode(descriptor, new_mode) } == 0 {
return Err(Error::last());
@ -53,6 +55,7 @@ pub unsafe trait Terminal {
/// Set this terminal's mode to a previous state returned by `set_raw_mode()`.
fn restore_mode(&self, mode: DWORD) -> Result<()> {
// SAFETY:
// Safe because the syscall will only read the extent of mode and we check the return result.
if unsafe { SetConsoleMode(self.terminal_descriptor(), mode) } == 0 {
Err(Error::last())
@ -62,6 +65,7 @@ pub unsafe trait Terminal {
}
}
// SAFETY:
// Safe because we return a genuine terminal descriptor that never changes and shares our lifetime.
unsafe impl Terminal for Stdin {
fn terminal_descriptor(&self) -> RawDescriptor {

View file

@ -38,6 +38,7 @@ impl Timer {
/// `reset`. Note that this timer MAY wake/trigger early due to limitations on
/// SetWaitableTimer (see <https://github.com/rust-lang/rust/issues/43376>).
pub fn new() -> Result<Timer> {
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let handle = unsafe {
CreateWaitableTimerA(
@ -59,8 +60,9 @@ impl Timer {
return errno_result();
}
// Safe because we uniquely own the file descriptor.
Ok(Timer {
// SAFETY:
// Safe because we uniquely own the file descriptor.
handle: unsafe { SafeDescriptor::from_raw_descriptor(handle) },
interval: None,
})
@ -100,6 +102,7 @@ impl TimerTrait for Timer {
None => 0,
};
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe {
SetWaitableTimer(
@ -119,6 +122,7 @@ impl TimerTrait for Timer {
}
fn wait(&mut self) -> Result<()> {
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe { WaitForSingleObject(self.as_raw_descriptor(), INFINITE) };
@ -137,6 +141,7 @@ impl TimerTrait for Timer {
}
fn clear(&mut self) -> Result<()> {
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe { CancelWaitableTimer(self.as_raw_descriptor()) };

View file

@ -198,6 +198,8 @@ impl<T: EventToken> EventContext<T> {
// which always populates the list.
return Err(Error::new(ERROR_INVALID_PARAMETER));
}
// SAFETY: raw handles array is expected to contain valid handles and the return value of
// the function is checked.
let result = unsafe {
WaitForMultipleObjects(
raw_handles_list.len() as DWORD,
@ -254,14 +256,18 @@ impl<T: EventToken> EventContext<T> {
if handles_offset >= handles_len {
break;
}
event_index = (unsafe {
WaitForMultipleObjects(
(raw_handles_list.len() - handles_offset) as DWORD,
raw_handles_list[handles_offset..].as_ptr(),
FALSE, // return when one event is signaled
0, /* instantaneous timeout */
)
} - WAIT_OBJECT_0) as usize;
event_index = (
// SAFETY: raw handles array is expected to contain valid handles and the
// return value of the function is checked.
unsafe {
WaitForMultipleObjects(
(raw_handles_list.len() - handles_offset) as DWORD,
raw_handles_list[handles_offset..].as_ptr(),
FALSE, // return when one event is signaled
0, /* instantaneous timeout */
)
} - WAIT_OBJECT_0
) as usize;
if event_index >= (handles_len - handles_offset) {
// This indicates a failure condition, as return values greater than the length

View file

@ -122,6 +122,7 @@ impl<'a> VolatileSlice<'a> {
pub fn as_iobufs<'mem, 'slice>(
iovs: &'slice [VolatileSlice<'mem>],
) -> &'slice [IoBufMut<'mem>] {
// SAFETY:
// Safe because `VolatileSlice` is ABI-compatible with `IoBufMut`.
unsafe { slice::from_raw_parts(iovs.as_ptr() as *const IoBufMut, iovs.len()) }
}
@ -131,6 +132,7 @@ impl<'a> VolatileSlice<'a> {
pub fn as_iobufs_mut<'mem, 'slice>(
iovs: &'slice mut [VolatileSlice<'mem>],
) -> &'slice mut [IoBufMut<'mem>] {
// SAFETY:
// Safe because `VolatileSlice` is ABI-compatible with `IoBufMut`.
unsafe { slice::from_raw_parts_mut(iovs.as_mut_ptr() as *mut IoBufMut, iovs.len()) }
}
@ -149,6 +151,7 @@ impl<'a> VolatileSlice<'a> {
.checked_sub(count)
.ok_or(VolatileMemoryError::OutOfBounds { addr: new_addr })?;
// SAFETY:
// Safe because the memory has the same lifetime and points to a subset of the memory of the
// original slice.
unsafe { Ok(VolatileSlice::from_raw_parts(new_addr as *mut u8, new_size)) }
@ -174,6 +177,7 @@ impl<'a> VolatileSlice<'a> {
},
)?;
// SAFETY:
// Safe because we have verified that the new memory is a subset of the original slice.
Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
}
@ -196,6 +200,7 @@ impl<'a> VolatileSlice<'a> {
/// # Ok(())
/// # }
pub fn write_bytes(&self, value: u8) {
// SAFETY:
// Safe because the memory is valid and needs only byte alignment.
unsafe {
write_bytes(self.as_mut_ptr(), value, self.size());
@ -230,6 +235,7 @@ impl<'a> VolatileSlice<'a> {
{
let mut addr = self.as_mut_ptr() as *const u8;
for v in buf.iter_mut().take(self.size() / size_of::<T>()) {
// SAFETY: Safe because buf is valid, aligned to type `T` and is initialized.
unsafe {
*v = read_volatile(addr as *const T);
addr = addr.add(size_of::<T>());
@ -253,6 +259,7 @@ impl<'a> VolatileSlice<'a> {
/// # }
/// ```
pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) {
// SAFETY: Safe because slice is valid and is byte aligned.
unsafe {
copy(
self.as_mut_ptr() as *const u8,
@ -293,6 +300,7 @@ impl<'a> VolatileSlice<'a> {
{
let mut addr = self.as_mut_ptr();
for v in buf.iter().take(self.size() / size_of::<T>()) {
// SAFETY: Safe because buf is valid, aligned to type `T` and is mutable.
unsafe {
write_volatile(
addr as *mut T,
@ -318,11 +326,11 @@ impl<'a> VolatileSlice<'a> {
let aligned_tail_addr = tail_addr & !MASK_4BIT;
// Check 16 bytes at once. The addresses should be 16 bytes aligned for better performance.
// SAFETY: Each aligned_addr is within VolatileSlice
if (aligned_head_addr..aligned_tail_addr)
.step_by(16)
.any(|aligned_addr| unsafe { *(aligned_addr as *const u128) } != 0)
{
if (aligned_head_addr..aligned_tail_addr).step_by(16).any(
|aligned_addr|
// SAFETY: Each aligned_addr is within VolatileSlice
unsafe { *(aligned_addr as *const u128) } != 0,
) {
return false;
}
@ -346,7 +354,7 @@ impl<'a> VolatileSlice<'a> {
///
/// This checks byte by byte.
///
/// ## Safety
/// # Safety
///
/// * `head_addr` <= `tail_addr`
/// * Bytes between `head_addr` and `tail_addr` is valid to access.
@ -417,7 +425,10 @@ mod tests {
},
)?;
Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
Ok(
// SAFETY: trivially safe
unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) },
)
}
}

View file

@ -52,10 +52,12 @@ fn safe_descriptor_from_path_none() {
#[test]
#[allow(clippy::eq_op)]
fn clone_equality() {
// SAFETY: Safe because return value is checked.
let ret = unsafe { libc::eventfd(0, 0) };
if ret < 0 {
panic!("failed to create eventfd");
}
// SAFETY: Safe because ret is valid and return value is checked.
let descriptor = unsafe { SafeDescriptor::from_raw_descriptor(ret) };
assert_eq!(descriptor, descriptor);
@ -65,10 +67,13 @@ fn clone_equality() {
descriptor.try_clone().expect("failed to clone eventfd")
);
// SAFETY: Safe because return value is checked.
let ret = unsafe { libc::eventfd(0, 0) };
if ret < 0 {
panic!("failed to create eventfd");
}
// SAFETY: Safe because ret is valid and return value is checked.
let another = unsafe { SafeDescriptor::from_raw_descriptor(ret) };
assert_ne!(descriptor, another);

View file

@ -36,6 +36,7 @@ fn unix_seqpacket_listener_from_fd() {
UnixSeqpacketListener::bind(&socket_path).expect("failed to create UnixSeqpacketListener"),
);
// UnixSeqpacketListener should succeed on a valid listening descriptor.
// SAFETY: Safe because `listener` is valid and the return value is checked.
let good_dup = UnixSeqpacketListener::bind(format!("/proc/self/fd/{}", unsafe {
libc::dup(listener.as_raw_descriptor())
}));
@ -46,6 +47,7 @@ fn unix_seqpacket_listener_from_fd() {
assert!(good_dup_path.is_err());
// UnixSeqpacketListener must fail on an existing non-listener socket.
let s1 = UnixSeqpacket::connect(socket_path.as_path()).expect("UnixSeqpacket::connect failed");
// SAFETY: Safe because `s1` is valid and the return value is checked.
let bad_dup = UnixSeqpacketListener::bind(format!("/proc/self/fd/{}", unsafe {
libc::dup(s1.as_raw_descriptor())
}));

View file

@ -37,9 +37,9 @@ fn test_serialize_tube_new() {
let msg_descriptors = msg_serialize.into_descriptors();
// Deserialize the Tube
let msg_descriptors_safe = msg_descriptors
.into_iter()
.map(|v| unsafe { SafeDescriptor::from_raw_descriptor(v) });
let msg_descriptors_safe = msg_descriptors.into_iter().map(|v|
// SAFETY: Safe because `v` is a valid descriptor
unsafe { SafeDescriptor::from_raw_descriptor(v) });
let tube_deserialized: Tube =
deserialize_with_descriptors(|| serde_json::from_slice(&serialized), msg_descriptors_safe)
.unwrap();

View file

@ -52,6 +52,7 @@ mod test {
assert_eq!(thread_comm, thread_name + "\n");
// SAFETY: child pid is expected to be valid and we wait on the child
unsafe { libc::kill(child.pid, libc::SIGKILL) };
child.wait().unwrap();
}
@ -75,6 +76,7 @@ mod test {
assert_eq!(thread_comm, "123456789012345\n");
// SAFETY: child pid is expected to be valid and we wait on the child
unsafe { libc::kill(child.pid, libc::SIGKILL) };
child.wait().unwrap();
}

View file

@ -135,9 +135,9 @@ fn test_serialize_tube_pair() {
let msg_descriptors = msg_serialize.into_descriptors();
// Deserialize the Tube
let msg_descriptors_safe = msg_descriptors
.into_iter()
.map(|v| unsafe { SafeDescriptor::from_raw_descriptor(v) });
let msg_descriptors_safe = msg_descriptors.into_iter().map(|v|
// SAFETY: `v` is expected to be valid
unsafe { SafeDescriptor::from_raw_descriptor(v) });
let tube_deserialized: Tube =
deserialize_with_descriptors(|| serde_json::from_slice(&serialized), msg_descriptors_safe)
.unwrap();

View file

@ -70,8 +70,9 @@ pub fn common_child_setup(args: CommonChildStartupArgs) -> anyhow::Result<ChildL
..Default::default()
};
if let Some(log_file_descriptor) = args.syslog_file {
// Safe because we are taking ownership of a SafeDescriptor.
let log_file =
// SAFETY:
// Safe because we are taking ownership of a SafeDescriptor.
unsafe { File::from_raw_descriptor(log_file_descriptor.into_raw_descriptor()) };
cfg.pipe = Some(Box::new(log_file));
cfg.log_args.stderr = false;

View file

@ -19,7 +19,9 @@
//! assert!(b == 3);
//! assert!(l == 3);
//!
//! // SAFETY: trivially safe
//! let b_trans: u32 = unsafe { std::mem::transmute(b) };
//! // SAFETY: trivially safe
//! let l_trans: u32 = unsafe { std::mem::transmute(l) };
//!
//! #[cfg(target_endian = "little")]
@ -153,6 +155,7 @@ mod tests {
let v = 0x0123456789ABCDEF as $old_type;
let endian_v: $new_type = From::from(v);
let endian_into: $old_type = endian_v.into();
// SAFETY: trivially safe
let endian_transmute: $old_type = unsafe { transmute(endian_v) };
if $native {

View file

@ -149,6 +149,7 @@ where
/// mut_entries_slice instead.
pub fn entries_slice(&self) -> &[S] {
let valid_length = self.get_valid_len();
// SAFETY:
// Safe because the length has been validated.
unsafe { self.entries[0].get_slice(valid_length) }
}
@ -157,6 +158,7 @@ where
pub fn mut_entries_slice(&mut self) -> &mut [S] {
let valid_length = self.get_valid_len();
self.entries[0].set_len(valid_length);
// SAFETY:
// Safe because the length has been validated.
unsafe { self.entries[0].get_mut_slice(valid_length) }
}

View file

@ -30,6 +30,7 @@ impl ArcWake for Waker {
fn wake_by_ref(arc_self: &Arc<Self>) {
let state = arc_self.0.swap(WOKEN, Ordering::Release);
if state == WAITING {
// SAFETY:
// The thread hasn't already been woken up so wake it up now. Safe because this doesn't
// modify any memory and we check the return value.
let res = unsafe {
@ -71,6 +72,7 @@ pub fn block_on<F: Future>(f: F) -> F::Output {
let state = thread_waker.0.swap(WAITING, Ordering::Acquire);
if state == WAITING {
// SAFETY:
// If we weren't already woken up then wait until we are. Safe because this doesn't
// modify any memory and we check the return value.
let res = unsafe {

View file

@ -201,6 +201,7 @@ impl VecIoWrapper {
}
}
// SAFETY:
// Safe to implement BackingMemory as the vec is only accessible inside the wrapper and these iovecs
// are the only thing allowed to modify it. Nothing else can get a reference to the vec until all
// iovecs are dropped because they borrow Self. Nothing can borrow the owned inner vec until self
@ -208,6 +209,7 @@ impl VecIoWrapper {
unsafe impl BackingMemory for VecIoWrapper {
fn get_volatile_slice(&self, mem_range: MemRegion) -> Result<VolatileSlice<'_>> {
self.check_addrs(&mem_range)?;
// SAFETY:
// Safe because the mem_range range is valid in the backing memory as checked above.
unsafe {
Ok(VolatileSlice::from_raw_parts(

View file

@ -189,6 +189,7 @@ impl Condvar {
oldstate = self.state.load(Ordering::Relaxed);
}
// SAFETY:
// Safe because the spin lock guarantees exclusive access and the reference does not escape
// this function.
let mu = unsafe { &mut *self.mu.get() };
@ -200,6 +201,7 @@ impl Condvar {
_ => panic!("Attempting to use Condvar with more than one RwLock at the same time"),
}
// SAFETY:
// Safe because the spin lock guarantees exclusive access.
unsafe { (*self.waiters.get()).push_back(waiter) };
@ -241,12 +243,14 @@ impl Condvar {
oldstate = self.state.load(Ordering::Relaxed);
}
// SAFETY:
// Safe because the spin lock guarantees exclusive access and the reference does not escape
// this function.
let waiters = unsafe { &mut *self.waiters.get() };
let wake_list = get_wake_list(waiters);
let newstate = if waiters.is_empty() {
// SAFETY:
// Also clear the rwlock associated with this Condvar since there are no longer any
// waiters. Safe because the spin lock guarantees exclusive access.
unsafe { *self.mu.get() = 0 };
@ -299,9 +303,11 @@ impl Condvar {
oldstate = self.state.load(Ordering::Relaxed);
}
// SAFETY:
// Safe because the spin lock guarantees exclusive access to `self.waiters`.
let wake_list = unsafe { (*self.waiters.get()).take() };
// SAFETY:
// Clear the rwlock associated with this Condvar since there are no longer any waiters. Safe
// because we the spin lock guarantees exclusive access.
unsafe { *self.mu.get() = 0 };
@ -337,6 +343,7 @@ impl Condvar {
oldstate = self.state.load(Ordering::Relaxed);
}
// SAFETY:
// Safe because the spin lock provides exclusive access and the reference does not escape
// this function.
let waiters = unsafe { &mut *self.waiters.get() };
@ -344,6 +351,7 @@ impl Condvar {
let waiting_for = waiter.is_waiting_for();
// Don't drop the old waiter now as we're still holding the spin lock.
let old_waiter = if waiter.is_linked() && waiting_for == WaitingFor::Condvar {
// SAFETY:
// Safe because we know that the waiter is still linked and is waiting for the Condvar,
// which guarantees that it is still in `self.waiters`.
let mut cursor = unsafe { waiters.cursor_mut_from_ptr(waiter as *const Waiter) };
@ -361,6 +369,7 @@ impl Condvar {
};
let set_on_release = if waiters.is_empty() {
// SAFETY:
// Clear the rwlock associated with this Condvar since there are no longer any waiters. Safe
// because we the spin lock guarantees exclusive access.
unsafe { *self.mu.get() = 0 };
@ -381,7 +390,11 @@ impl Condvar {
}
}
// TODO(b/315998194): Add safety comment
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl Send for Condvar {}
// TODO(b/315998194): Add safety comment
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl Sync for Condvar {}
impl Default for Condvar {
@ -446,6 +459,7 @@ fn get_wake_list(waiters: &mut WaiterList) -> WaiterList {
fn cancel_waiter(cv: usize, waiter: &Waiter, wake_next: bool) {
let condvar = cv as *const Condvar;
// SAFETY:
// Safe because the thread that owns the waiter being canceled must also own a reference to the
// Condvar, which guarantees that this pointer is valid.
unsafe { (*condvar).cancel_waiter(waiter, wake_next) }
@ -640,6 +654,7 @@ mod test {
while *count == 0 {
count = cv.wait_read(count).await;
}
// SAFETY: Safe because count is valid and is byte aligned.
let _ = unsafe { ptr::read_volatile(&*count as *const usize) };
}

View file

@ -305,15 +305,19 @@ impl RawRwLock {
{
let mut set_on_release = 0;
// Safe because we have acquired the spin lock and it provides exclusive
// access to the waiter queue.
if wait_count < LONG_WAIT_THRESHOLD {
// Add the waiter to the back of the queue.
// SAFETY:
// Safe because we have acquired the spin lock and it provides exclusive
// access to the waiter queue.
unsafe { (*self.waiters.get()).push_back(w.clone()) };
} else {
// This waiter has gone through the queue too many times. Put it in the
// front of the queue and block all other threads from acquiring the lock
// until this one has acquired it at least once.
// SAFETY:
// Safe because we have acquired the spin lock and it provides exclusive
// access to the waiter queue.
unsafe { (*self.waiters.get()).push_front(w.clone()) };
// Set the LONG_WAIT bit to prevent all other threads from acquiring the
@ -459,6 +463,7 @@ impl RawRwLock {
// to be cleared.
let mut clear = SPINLOCK;
// SAFETY:
// Safe because the spinlock guarantees exclusive access to the waiter list and
// the reference does not escape this function.
let waiters = unsafe { &mut *self.waiters.get() };
@ -530,6 +535,7 @@ impl RawRwLock {
oldstate = self.state.load(Ordering::Relaxed);
}
// SAFETY:
// Safe because the spin lock provides exclusive access and the reference does not escape
// this function.
let waiters = unsafe { &mut *self.waiters.get() };
@ -557,6 +563,7 @@ impl RawRwLock {
// Don't drop the old waiter while holding the spin lock.
let old_waiter = if waiter.is_linked() && waiting_for == WaitingFor::Mutex {
// SAFETY:
// We know that the waiter is still linked and is waiting for the rwlock, which
// guarantees that it is still linked into `self.waiters`.
let mut cursor = unsafe { waiters.cursor_mut_from_ptr(waiter as *const Waiter) };
@ -613,12 +620,17 @@ impl RawRwLock {
}
}
// TODO(b/315998194): Add safety comment
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl Send for RawRwLock {}
// TODO(b/315998194): Add safety comment
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl Sync for RawRwLock {}
fn cancel_waiter(raw: usize, waiter: &Waiter, wake_next: bool) {
let raw_rwlock = raw as *const RawRwLock;
// SAFETY:
// Safe because the thread that owns the waiter that is being canceled must also own a reference
// to the rwlock, which ensures that this pointer is valid.
unsafe { (*raw_rwlock).cancel_waiter(waiter, wake_next) }
@ -728,9 +740,10 @@ impl<T: ?Sized> RwLock<T> {
pub async fn lock(&self) -> RwLockWriteGuard<'_, T> {
self.raw.lock().await;
// Safe because we have exclusive access to `self.value`.
RwLockWriteGuard {
mu: self,
// SAFETY:
// Safe because we have exclusive access to `self.value`.
value: unsafe { &mut *self.value.get() },
}
}
@ -750,9 +763,10 @@ impl<T: ?Sized> RwLock<T> {
pub async fn read_lock(&self) -> RwLockReadGuard<'_, T> {
self.raw.read_lock().await;
// Safe because we have shared read-only access to `self.value`.
RwLockReadGuard {
mu: self,
// SAFETY:
// Safe because we have shared read-only access to `self.value`.
value: unsafe { &*self.value.get() },
}
}
@ -762,9 +776,10 @@ impl<T: ?Sized> RwLock<T> {
pub(crate) async fn lock_from_cv(&self) -> RwLockWriteGuard<'_, T> {
self.raw.lock_slow::<Exclusive>(DESIGNATED_WAKER, 0).await;
// Safe because we have exclusive access to `self.value`.
RwLockWriteGuard {
mu: self,
// SAFETY:
// Safe because we have exclusive access to `self.value`.
value: unsafe { &mut *self.value.get() },
}
}
@ -778,9 +793,10 @@ impl<T: ?Sized> RwLock<T> {
.lock_slow::<Shared>(DESIGNATED_WAKER, WRITER_WAITING)
.await;
// Safe because we have exclusive access to `self.value`.
RwLockReadGuard {
mu: self,
// SAFETY:
// Safe because we have exclusive access to `self.value`.
value: unsafe { &*self.value.get() },
}
}
@ -796,13 +812,18 @@ impl<T: ?Sized> RwLock<T> {
}
pub fn get_mut(&mut self) -> &mut T {
// SAFETY:
// Safe because the compiler statically guarantees that are no other references to `self`.
// This is also why we don't need to acquire the lock first.
unsafe { &mut *self.value.get() }
}
}
// TODO(b/315998194): Add safety comment
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
// TODO(b/315998194): Add safety comment
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<T: ?Sized + Send> Sync for RwLock<T> {}
impl<T: ?Sized + Default> Default for RwLock<T> {

View file

@ -70,6 +70,8 @@ impl<T: ?Sized> SpinLock<T> {
hint::spin_loop();
}
// TODO(b/315998194): Add safety comment
#[allow(clippy::undocumented_unsafe_blocks)]
SpinLockGuard {
lock: self,
value: unsafe { &mut *self.value.get() },
@ -84,13 +86,18 @@ impl<T: ?Sized> SpinLock<T> {
/// Returns a mutable reference to the contained value. This method doesn't perform any locking
/// as the compiler will statically guarantee that there are no other references to `self`.
pub fn get_mut(&mut self) -> &mut T {
// SAFETY:
// Safe because the compiler can statically guarantee that there are no other references to
// `self`. This is also why we don't need to acquire the lock.
unsafe { &mut *self.value.get() }
}
}
// TODO(b/315998194): Add safety comment
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<T: ?Sized + Send> Send for SpinLock<T> {}
// TODO(b/315998194): Add safety comment
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<T: ?Sized + Send> Sync for SpinLock<T> {}
impl<T: ?Sized + Default> Default for SpinLock<T> {

View file

@ -52,15 +52,19 @@ impl DefaultLinkOps for AtomicLink {
const NEW: Self::Ops = AtomicLinkOps;
}
// SAFETY:
// Safe because the only way to mutate `AtomicLink` is via the `LinkedListOps` trait whose methods
// are all unsafe and require that the caller has first called `acquire_link` (and had it return
// true) to use them safely.
unsafe impl Send for AtomicLink {}
// SAFETY: See safety comment for impl Send
unsafe impl Sync for AtomicLink {}
#[derive(Copy, Clone, Default)]
pub struct AtomicLinkOps;
// TODO(b/315998194): Add safety comment
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl LinkOps for AtomicLinkOps {
type LinkPtr = NonNull<AtomicLink>;
@ -73,6 +77,8 @@ unsafe impl LinkOps for AtomicLinkOps {
}
}
// TODO(b/315998194): Add safety comment
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl LinkedListOps for AtomicLinkOps {
unsafe fn next(&self, ptr: Self::LinkPtr) -> Option<Self::LinkPtr> {
*ptr.as_ref().next.get()

View file

@ -108,8 +108,9 @@ impl<F: AsRawDescriptor> PollSource<F> {
mut vec: Vec<u8>,
) -> AsyncResult<(usize, Vec<u8>)> {
loop {
// Safe because this will only modify `vec` and we check the return value.
let res = if let Some(offset) = file_offset {
// SAFETY:
// Safe because this will only modify `vec` and we check the return value.
unsafe {
libc::pread64(
self.registered_source.duped_fd.as_raw_fd(),
@ -119,6 +120,8 @@ impl<F: AsRawDescriptor> PollSource<F> {
)
}
} else {
// SAFETY:
// Safe because this will only modify `vec` and we check the return value.
unsafe {
libc::read(
self.registered_source.duped_fd.as_raw_fd(),
@ -158,9 +161,10 @@ impl<F: AsRawDescriptor> PollSource<F> {
.collect::<Vec<VolatileSlice>>();
loop {
// Safe because we trust the kernel not to write path the length given and the length is
// guaranteed to be valid from the pointer by io_slice_mut.
let res = if let Some(offset) = file_offset {
// SAFETY:
// Safe because we trust the kernel not to write path the length given and the length is
// guaranteed to be valid from the pointer by io_slice_mut.
unsafe {
libc::preadv64(
self.registered_source.duped_fd.as_raw_fd(),
@ -170,6 +174,9 @@ impl<F: AsRawDescriptor> PollSource<F> {
)
}
} else {
// SAFETY:
// Safe because we trust the kernel not to write path the length given and the length is
// guaranteed to be valid from the pointer by io_slice_mut.
unsafe {
libc::readv(
self.registered_source.duped_fd.as_raw_fd(),
@ -213,8 +220,9 @@ impl<F: AsRawDescriptor> PollSource<F> {
vec: Vec<u8>,
) -> AsyncResult<(usize, Vec<u8>)> {
loop {
// Safe because this will not modify any memory and we check the return value.
let res = if let Some(offset) = file_offset {
// SAFETY:
// Safe because this will not modify any memory and we check the return value.
unsafe {
libc::pwrite64(
self.registered_source.duped_fd.as_raw_fd(),
@ -224,6 +232,8 @@ impl<F: AsRawDescriptor> PollSource<F> {
)
}
} else {
// SAFETY:
// Safe because this will not modify any memory and we check the return value.
unsafe {
libc::write(
self.registered_source.duped_fd.as_raw_fd(),
@ -264,9 +274,10 @@ impl<F: AsRawDescriptor> PollSource<F> {
.collect::<Vec<VolatileSlice>>();
loop {
// Safe because we trust the kernel not to write path the length given and the length is
// guaranteed to be valid from the pointer by io_slice_mut.
let res = if let Some(offset) = file_offset {
// SAFETY:
// Safe because we trust the kernel not to write path the length given and the length is
// guaranteed to be valid from the pointer by io_slice_mut.
unsafe {
libc::pwritev64(
self.registered_source.duped_fd.as_raw_fd(),
@ -276,6 +287,9 @@ impl<F: AsRawDescriptor> PollSource<F> {
)
}
} else {
// SAFETY:
// Safe because we trust the kernel not to write path the length given and the length is
// guaranteed to be valid from the pointer by io_slice_mut.
unsafe {
libc::writev(
self.registered_source.duped_fd.as_raw_fd(),
@ -302,8 +316,11 @@ impl<F: AsRawDescriptor> PollSource<F> {
}
}
/// # Safety
///
/// Sync all completed write operations to the backing storage.
pub async fn fsync(&self) -> AsyncResult<()> {
// SAFETY: the duped_fd is valid and return value is checked.
let ret = unsafe { libc::fsync(self.registered_source.duped_fd.as_raw_fd()) };
if ret == 0 {
Ok(())
@ -344,6 +361,7 @@ impl<F: AsRawDescriptor> PollSource<F> {
/// Sync all data of completed write operations to the backing storage, avoiding updating extra
/// metadata.
pub async fn fdatasync(&self) -> AsyncResult<()> {
// SAFETY: the duped_fd is valid and return value is checked.
let ret = unsafe { libc::fdatasync(self.registered_source.duped_fd.as_raw_fd()) };
if ret == 0 {
Ok(())

View file

@ -158,15 +158,18 @@ impl From<Error> for io::Error {
static IS_URING_STABLE: Lazy<bool> = Lazy::new(|| {
let mut utsname = MaybeUninit::zeroed();
// SAFETY:
// Safe because this will only modify `utsname` and we check the return value.
let res = unsafe { libc::uname(utsname.as_mut_ptr()) };
if res < 0 {
return false;
}
// SAFETY:
// Safe because the kernel has initialized `utsname`.
let utsname = unsafe { utsname.assume_init() };
// SAFETY:
// Safe because the pointer is valid and the kernel guarantees that this is a valid C string.
let release = unsafe { CStr::from_ptr(utsname.release.as_ptr()) };
@ -423,11 +426,10 @@ impl UringReactor {
raw: &Arc<RawExecutor<UringReactor>>,
fd: &F,
) -> Result<RegisteredSource> {
let duped_fd = unsafe {
// Safe because duplicating an FD doesn't affect memory safety, and the dup'd FD
// will only be added to the poll loop.
File::from_raw_fd(dup_fd(fd.as_raw_descriptor())?)
};
// SAFETY:
// Safe because duplicating an FD doesn't affect memory safety, and the dup'd FD
// will only be added to the poll loop.
let duped_fd = unsafe { File::from_raw_fd(dup_fd(fd.as_raw_descriptor())?) };
Ok(RegisteredSource {
tag: self
@ -555,6 +557,7 @@ impl UringReactor {
let vslice = mem
.get_volatile_slice(mem_range)
.map_err(|_| Error::InvalidOffset)?;
// SAFETY:
// Safe because we guarantee that the memory pointed to by `iovecs` lives until the
// transaction is complete and the completion has been returned from `wait()`.
Ok(unsafe { IoBufMut::from_raw_parts(vslice.as_mut_ptr(), vslice.size()) })
@ -572,10 +575,11 @@ impl UringReactor {
let entry = ring.ops.vacant_entry();
let next_op_token = entry.key();
// SAFETY:
// Safe because all the addresses are within the Memory that an Arc is kept for the
// duration to ensure the memory is valid while the kernel accesses it.
// Tested by `dont_drop_backing_mem_read` unit test.
unsafe {
// Safe because all the addresses are within the Memory that an Arc is kept for the
// duration to ensure the memory is valid while the kernel accesses it.
// Tested by `dont_drop_backing_mem_read` unit test.
self.ctx
.add_readv(
iovecs,
@ -609,6 +613,7 @@ impl UringReactor {
let vslice = mem
.get_volatile_slice(mem_range)
.map_err(|_| Error::InvalidOffset)?;
// SAFETY:
// Safe because we guarantee that the memory pointed to by `iovecs` lives until the
// transaction is complete and the completion has been returned from `wait()`.
Ok(unsafe { IoBufMut::from_raw_parts(vslice.as_mut_ptr(), vslice.size()) })
@ -626,10 +631,11 @@ impl UringReactor {
let entry = ring.ops.vacant_entry();
let next_op_token = entry.key();
// SAFETY:
// Safe because all the addresses are within the Memory that an Arc is kept for the
// duration to ensure the memory is valid while the kernel accesses it.
// Tested by `dont_drop_backing_mem_write` unit test.
unsafe {
// Safe because all the addresses are within the Memory that an Arc is kept for the
// duration to ensure the memory is valid while the kernel accesses it.
// Tested by `dont_drop_backing_mem_write` unit test.
self.ctx
.add_writev(
iovecs,
@ -800,6 +806,7 @@ impl Drop for UringReactor {
}
}
// SAFETY:
// Used to dup the FDs passed to the executor so there is a guarantee they aren't closed while
// waiting in TLS to be added to the main polling context.
unsafe fn dup_fd(fd: RawFd) -> Result<RawFd> {

View file

@ -36,13 +36,14 @@ impl EventAsync {
descriptor: &dyn AsRawDescriptor,
ex: &Executor,
) -> AsyncResult<EventAsync> {
// Safe because:
// a) the underlying Event should be validated by the caller.
// b) we do NOT take ownership of the underlying Event. If we did that would cause an early
// free (and later a double free @ the end of this scope). This is why we have to wrap
// it in ManuallyDrop.
// c) we own the clone that is produced exclusively, so it is safe to take ownership of it.
Self::new_without_reset(
// SAFETY:
// Safe because:
// a) the underlying Event should be validated by the caller.
// b) we do NOT take ownership of the underlying Event. If we did that would cause an early
// free (and later a double free @ the end of this scope). This is why we have to wrap
// it in ManuallyDrop.
// c) we own the clone that is produced exclusively, so it is safe to take ownership of it.
unsafe {
ManuallyDrop::new(Event::from_raw_descriptor(descriptor.as_raw_descriptor()))
}

View file

@ -99,6 +99,7 @@ impl HandleWrapper {
pub fn cancel_sync_io<T>(&mut self, ret: T) -> T {
for handle in &self.handles {
// There isn't much we can do if cancel fails.
// SAFETY: trivially safe
if unsafe { CancelIoEx(handle.as_raw_descriptor(), null_mut()) } == 0 {
warn!(
"Cancel IO for handle:{:?} failed with {}",
@ -188,6 +189,7 @@ impl<F: AsRawDescriptor> Drop for HandleSource<F> {
}
fn get_thread_file(descriptors: Vec<Descriptor>) -> ManuallyDrop<File> {
// SAFETY: trivially safe
// Safe because all callers must exit *before* these handles will be closed (guaranteed by
// HandleSource's Drop impl.).
unsafe {

View file

@ -57,7 +57,7 @@ struct Port {
inner: RawDescriptor,
}
// # Safety
// SAFETY:
// Safe because the Port is dropped before IoCompletionPort goes out of scope
unsafe impl Send for Port {}
@ -90,8 +90,10 @@ unsafe fn get_completion_status(
) -> io::Result<CompletionPacket> {
let mut bytes_transferred = 0;
let mut completion_key = 0;
// SAFETY: trivially safe
let mut overlapped: *mut OVERLAPPED = unsafe { std::mem::zeroed() };
// SAFETY:
// Safe because:
// 1. Memory of pointers passed is stack allocated and lives as long as the syscall.
// 2. We check the error so we don't use invalid output values (e.g. overlapped).
@ -133,7 +135,7 @@ unsafe fn get_completion_status(
unsafe fn poll(port: RawDescriptor) -> Result<Vec<CompletionPacket>> {
let mut completion_packets = vec![];
completion_packets.push(
// Safety: caller has ensured that the handle is valid and is for io completion port
// SAFETY: caller has ensured that the handle is valid and is for io completion port
unsafe {
get_completion_status(port, INFINITE)
.map_err(|e| Error::IocpOperationFailed(SysError::from(e)))?
@ -146,8 +148,9 @@ unsafe fn poll(port: RawDescriptor) -> Result<Vec<CompletionPacket>> {
// get detailed error information for each of the returned overlapped IO operations without
// calling GetOverlappedResult. If we have to do that, then it's cheaper to just get each
// completion packet individually.
// Safety: caller has ensured that the handle is valid and is for io completion port
while completion_packets.len() < ENTRIES_PER_POLL {
// SAFETY:
// Safety: caller has ensured that the handle is valid and is for io completion port
match unsafe { get_completion_status(port, 0) } {
Ok(pkt) => {
completion_packets.push(pkt);
@ -168,7 +171,7 @@ fn iocp_waiter_thread(
) -> Result<()> {
let port = port.lock();
loop {
// Safety: caller has ensured that the handle is valid and is for io completion port
// SAFETY: caller has ensured that the handle is valid and is for io completion port
let packets = unsafe { poll(port.inner)? };
if !packets.is_empty() {
{
@ -265,6 +268,7 @@ impl IoCompletionPort {
/// Posts a completion packet to the IO completion port.
pub fn post_status(&self, bytes_transferred: u32, completion_key: usize) -> Result<()> {
// SAFETY:
// Safe because the IOCP handle is valid.
let res = unsafe {
PostQueuedCompletionStatus(
@ -296,11 +300,12 @@ impl IoCompletionPort {
let mut overlapped_entries: SmallVec<[OVERLAPPED_ENTRY; ENTRIES_PER_POLL]> =
smallvec!(OVERLAPPED_ENTRY::default(); ENTRIES_PER_POLL);
let mut entries_removed: ULONG = 0;
// SAFETY:
// Safe because:
// 1. IOCP is guaranteed to exist by self.
// 2. Memory of pointers passed is stack allocated and lives as long as the syscall.
// 3. We check the error so we don't use invalid output values (e.g. overlapped).
let mut entries_removed: ULONG = 0;
let success = unsafe {
GetQueuedCompletionStatusEx(
self.port.as_raw_descriptor(),
@ -352,7 +357,7 @@ impl IoCompletionPort {
/// Waits for completion events to arrive & returns the completion keys.
pub fn poll_unthreaded(&self) -> Result<SmallVec<[CompletionPacket; ENTRIES_PER_POLL]>> {
// Safety: safe because port is in scope for the duration of the call.
// SAFETY: safe because port is in scope for the duration of the call.
let packets = unsafe { poll(self.port.as_raw_descriptor())? };
let mut completion_packets = SmallVec::with_capacity(ENTRIES_PER_POLL);
for pkt in packets {
@ -398,6 +403,7 @@ impl IoCompletionPort {
}
let mut bytes_transferred = 0;
// SAFETY: trivially safe with return value checked
let success = unsafe {
GetOverlappedResult(
entry.lpCompletionKey as RawDescriptor,
@ -442,10 +448,11 @@ fn create_iocp(
None => null_mut(),
};
// Safe because:
// 1. The file handle is open because we have a reference to it.
// 2. The existing IOCP (if applicable) is valid.
let port =
// SAFETY:
// Safe because:
// 1. The file handle is open because we have a reference to it.
// 2. The existing IOCP (if applicable) is valid.
unsafe { CreateIoCompletionPort(raw_file, raw_existing_iocp, completion_key, concurrency) };
if port.is_null() {
@ -455,6 +462,7 @@ fn create_iocp(
if existing_iocp.is_some() {
Ok(None)
} else {
// SAFETY:
// Safe because:
// 1. We are creating a new IOCP.
// 2. We exclusively own the handle.
@ -502,6 +510,8 @@ mod tests {
iocp.register_descriptor(&f).unwrap();
let buf = [0u8; 16];
// SAFETY: Safe given file is valid, buffers are allocated and initialized and return value
// is checked.
unsafe {
base::windows::write_file(&f, buf.as_ptr(), buf.len(), Some(&mut overlapped)).unwrap()
};
@ -526,6 +536,8 @@ mod tests {
iocp.register_descriptor(&f).unwrap();
let buf = [0u8; 16];
// SAFETY: Safe given file is valid, buffers are allocated and initialized and return value
// is checked.
unsafe {
base::windows::write_file(&f, buf.as_ptr(), buf.len(), Some(&mut overlapped)).unwrap()
};

View file

@ -106,6 +106,7 @@ impl<F: AsRawDescriptor> OverlappedSource<F> {
}
}
/// SAFETY:
/// Safety requirements:
/// Same as base::windows::read_file.
unsafe fn read(
@ -119,6 +120,7 @@ unsafe fn read(
.map_err(|e| AsyncError::OverlappedSource(Error::StdIoReadError(e)))
}
/// SAFETY:
/// Safety requirements:
/// Same as base::windows::write_file.
unsafe fn write(
@ -147,6 +149,7 @@ impl<F: AsRawDescriptor> OverlappedSource<F> {
let overlapped = create_overlapped(file_offset, None);
let mut overlapped_op = self.reg_source.register_overlapped_operation(overlapped)?;
// SAFETY:
// Safe because we pass a pointer to a valid vec and that same vector's length.
unsafe {
read(
@ -192,6 +195,7 @@ impl<F: AsRawDescriptor> OverlappedSource<F> {
AsyncError::OverlappedSource(Error::BackingMemoryVolatileSliceFetchFailed(e))
})?;
// SAFETY:
// Safe because we're passing a volatile slice (valid ptr), and the size of the memory region it refers to.
unsafe {
read(
@ -235,6 +239,7 @@ impl<F: AsRawDescriptor> OverlappedSource<F> {
let overlapped = create_overlapped(file_offset, None);
let mut overlapped_op = self.reg_source.register_overlapped_operation(overlapped)?;
// SAFETY:
// Safe because we pass a pointer to a valid vec and that same vector's length.
unsafe {
write(
@ -281,6 +286,7 @@ impl<F: AsRawDescriptor> OverlappedSource<F> {
AsyncError::OverlappedSource(Error::BackingMemoryVolatileSliceFetchFailed(e))
})?;
// SAFETY:
// Safe because we're passing a volatile slice (valid ptr), and the size of the memory region it refers to.
unsafe {
write(
@ -313,6 +319,7 @@ impl<F: AsRawDescriptor> OverlappedSource<F> {
),
)));
}
// SAFETY:
// Safe because self.source lives as long as file.
let file = ManuallyDrop::new(unsafe {
File::from_raw_descriptor(self.source.as_raw_descriptor())
@ -335,6 +342,7 @@ impl<F: AsRawDescriptor> OverlappedSource<F> {
),
)));
}
// SAFETY:
// Safe because self.source lives as long as file.
let mut file = ManuallyDrop::new(unsafe {
File::from_raw_descriptor(self.source.as_raw_descriptor())
@ -348,6 +356,7 @@ impl<F: AsRawDescriptor> OverlappedSource<F> {
/// Sync all completed write operations to the backing storage.
pub async fn fsync(&self) -> AsyncResult<()> {
// SAFETY:
// Safe because self.source lives at least as long as the blocking pool thread. Note that
// if the blocking pool stalls and shutdown fails, the thread could outlive the file;
// however, this would mean things are already badly broken and we have a similar risk in

View file

@ -90,6 +90,7 @@ where
let mut inner = self.inner.lock();
match inner.wait_state {
WaitState::New => {
// SAFETY:
// Safe because:
// a) the callback only runs when WaitForHandle is alive (we cancel it on
// drop).
@ -128,6 +129,7 @@ where
WaitState::Woken => {
inner.wait_state = WaitState::Finished;
// SAFETY:
// Safe because:
// a) we know a wait was registered and hasn't been unregistered yet.
// b) the callback is not queued because we set WT_EXECUTEONLYONCE, and we know
@ -161,13 +163,14 @@ where
(inner.wait_state, inner.wait_object)
};
// Safe because self.descriptor is valid in any state except New or Finished.
//
// Note: this method call is critical for supplying the safety guarantee relied upon by
// wait_for_handle_waker. Upon return, it ensures that wait_for_handle_waker is not running
// and won't be scheduled again, which makes it safe to drop self.inner_for_callback
// (wait_for_handle_waker has a non owning pointer to self.inner_for_callback).
if current_state != WaitState::New && current_state != WaitState::Finished {
// SAFETY:
// Safe because self.descriptor is valid in any state except New or Finished.
//
// Note: this method call is critical for supplying the safety guarantee relied upon by
// wait_for_handle_waker. Upon return, it ensures that wait_for_handle_waker is not running
// and won't be scheduled again, which makes it safe to drop self.inner_for_callback
// (wait_for_handle_waker has a non owning pointer to self.inner_for_callback).
unsafe { unregister_wait(wait_object) }
}
}

View file

@ -62,6 +62,8 @@ unsafe fn drop_weak_raw<W: WeakWake>(data: *const ()) {
}
pub(crate) fn new_waker<W: WeakWake>(w: Weak<W>) -> Waker {
// TODO(b/315998194): Add safety comment
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
Waker::from_raw(RawWaker::new(
w.into_raw() as *const (),

View file

@ -46,6 +46,7 @@ impl StaticString {
// Safety: pointers are safe to send between threads.
unsafe impl Send for StaticString {}
// SAFETY:
// Safe to share across threads, because `register` is protected by a lock and strings inserted
// are never removed.
unsafe impl Sync for StaticString {}

View file

@ -460,6 +460,7 @@ pub unsafe extern "C" fn crosvm_client_usb_attach(
if let Ok(UsbControlResult::Ok { port }) = do_usb_attach(socket_path, dev_path) {
if !out_port.is_null() {
// SAFETY: trivially safe
unsafe { *out_port = port };
}
true
@ -595,8 +596,11 @@ pub unsafe extern "C" fn crosvm_client_modify_battery(
if battery_type.is_null() || property.is_null() || target.is_null() {
return false;
}
// SAFETY: trivially safe
let battery_type = unsafe { CStr::from_ptr(battery_type) };
// SAFETY: trivially safe
let property = unsafe { CStr::from_ptr(property) };
// SAFETY: trivially safe
let target = unsafe { CStr::from_ptr(target) };
do_modify_battery(

View file

@ -162,6 +162,7 @@ fn proto_error_to_int(e: protobuf::Error) -> c_int {
}
fn fd_cast<F: FromRawFd>(f: File) -> F {
// SAFETY:
// Safe because we are transferring unique ownership.
unsafe { F::from_raw_fd(f.into_raw_fd()) }
}
@ -533,14 +534,20 @@ impl crosvm {
match route.kind {
CROSVM_IRQ_ROUTE_IRQCHIP => {
let irqchip = entry.mut_irqchip();
// SAFETY:
// Safe because route.kind indicates which union field is valid.
irqchip.irqchip = unsafe { route.route.irqchip }.irqchip;
// SAFETY:
// Safe because route.kind indicates which union field is valid.
irqchip.pin = unsafe { route.route.irqchip }.pin;
}
CROSVM_IRQ_ROUTE_MSI => {
let msi = entry.mut_msi();
// SAFETY:
// Safe because route.kind indicates which union field is valid.
msi.address = unsafe { route.route.msi }.address;
// SAFETY:
// Safe because route.kind indicates which union field is valid.
msi.data = unsafe { route.route.msi }.data;
}
_ => return Err(EINVAL),

View file

@ -105,12 +105,14 @@ impl KvmKernelIrqChip {
};
dist_attr.attr = dist_attr_attr;
// SAFETY:
// Safe because we allocated the struct that's being passed in
let ret = unsafe { ioctl_with_ref(&vgic, KVM_SET_DEVICE_ATTR(), &cpu_redist_attr) };
if ret != 0 {
return errno_result();
}
// SAFETY:
// Safe because we allocated the struct that's being passed in
let ret = unsafe { ioctl_with_ref(&vgic, KVM_SET_DEVICE_ATTR(), &dist_attr) };
if ret != 0 {
@ -126,6 +128,7 @@ impl KvmKernelIrqChip {
addr: nr_irqs_ptr as u64,
flags: 0,
};
// SAFETY:
// Safe because we allocated the struct that's being passed in
let ret = unsafe { ioctl_with_ref(&vgic, KVM_SET_DEVICE_ATTR(), &nr_irqs_attr) };
if ret != 0 {
@ -178,6 +181,7 @@ impl IrqChipAArch64 for KvmKernelIrqChip {
flags: 0,
};
// SAFETY:
// Safe because we allocated the struct that's being passed in
let ret = unsafe { ioctl_with_ref(&self.vgic, KVM_SET_DEVICE_ATTR(), &init_gic_attr) };
if ret != 0 {

View file

@ -361,22 +361,25 @@ fn gfn_to_dtt_pte(
mem.get_host_address(GuestAddress(pt_gpa + index))
.context(Error::GetDTTEntry)?
} else {
} else if gfn > dtt_iter.gfn {
// SAFETY:
// Safe because we checked that dtt_iter.ptr is valid and that the dtt_pte
// for gfn lies on the same dtt page as the dtt_pte for dtt_iter.gfn, which
// means the calculated ptr will point to the same page as dtt_iter.ptr
if gfn > dtt_iter.gfn {
unsafe {
dtt_iter
.ptr
.add(mem::size_of::<AtomicU32>() * (gfn - dtt_iter.gfn) as usize)
}
} else {
unsafe {
dtt_iter
.ptr
.sub(mem::size_of::<AtomicU32>() * (dtt_iter.gfn - gfn) as usize)
}
unsafe {
dtt_iter
.ptr
.add(mem::size_of::<AtomicU32>() * (gfn - dtt_iter.gfn) as usize)
}
} else {
// SAFETY:
// Safe because we checked that dtt_iter.ptr is valid and that the dtt_pte
// for gfn lies on the same dtt page as the dtt_pte for dtt_iter.gfn, which
// means the calculated ptr will point to the same page as dtt_iter.ptr
unsafe {
dtt_iter
.ptr
.sub(mem::size_of::<AtomicU32>() * (dtt_iter.gfn - gfn) as usize)
}
};
@ -403,6 +406,7 @@ fn pin_page(
.get_host_address_range(GuestAddress(gpa), PAGE_SIZE_4K as usize)
.context("failed to get host address")? as u64;
// SAFETY:
// Safe because ptr is valid and guaranteed by the gfn_to_dtt_pte.
// Test PINNED flag
if (unsafe { (*leaf_entry).load(Ordering::Relaxed) } & DTTE_PINNED_FLAG) != 0 {
@ -410,9 +414,11 @@ fn pin_page(
return Ok(());
}
// SAFETY:
// Safe because the gpa is valid from the gfn_to_dtt_pte and the host_addr
// is guaranteed by MemoryMapping interface.
if unsafe { vfio_map(vfio_container, gpa, PAGE_SIZE_4K, host_addr) } {
// SAFETY:
// Safe because ptr is valid and guaranteed by the gfn_to_dtt_pte.
// set PINNED flag
unsafe { (*leaf_entry).fetch_or(DTTE_PINNED_FLAG, Ordering::SeqCst) };
@ -467,6 +473,7 @@ fn unpin_page(
};
if force {
// SAFETY:
// Safe because leaf_entry is valid and guaranteed by the gfn_to_dtt_pte.
// This case is for balloon to evict pages so these pages should
// already been locked by balloon and no device driver in VM is
@ -475,6 +482,7 @@ fn unpin_page(
unsafe { (*leaf_entry).fetch_and(!DTTE_ACCESSED_FLAG, Ordering::SeqCst) };
}
// SAFETY:
// Safe because leaf_entry is valid and guaranteed by the gfn_to_dtt_pte.
if let Err(entry) = unsafe {
(*leaf_entry).compare_exchange(DTTE_PINNED_FLAG, 0, Ordering::SeqCst, Ordering::SeqCst)
@ -488,6 +496,7 @@ fn unpin_page(
UnpinResult::NotPinned
} else {
if !force {
// SAFETY:
// Safe because leaf_entry is valid and guaranteed by the gfn_to_dtt_pte.
// The ACCESSED_FLAG is set by the guest if guest requires DMA map for
// this page. It represents whether or not this page is touched by the
@ -526,6 +535,7 @@ fn unpin_page(
if vfio_unmap(vfio_container, gpa, PAGE_SIZE_4K) {
UnpinResult::Unpinned
} else {
// SAFETY:
// Safe because leaf_entry is valid and guaranteed by the gfn_to_dtt_pte.
// make sure the pinned flag is set
unsafe { (*leaf_entry).fetch_or(DTTE_PINNED_FLAG, Ordering::SeqCst) };

View file

@ -272,6 +272,7 @@ impl VfioPlatformDevice {
Err(_e) => break,
};
let host = mmap.as_ptr() as u64;
// SAFETY:
// Safe because the given guest_map_start is valid guest bar address. and
// the host pointer is correct and valid guaranteed by MemoryMapping interface.
match unsafe {

View file

@ -291,6 +291,7 @@ impl ChildProcIntf {
if let Some(swap_device_uffd_sender) = swap_device_uffd_sender {
if let Err(e) = swap_device_uffd_sender.on_process_forked() {
error!("failed to SwapController::on_process_forked: {:?}", e);
// SAFETY:
// exit() is trivially safe.
unsafe { libc::exit(1) };
}
@ -305,6 +306,7 @@ impl ChildProcIntf {
// TODO(crbug.com/992494): Remove this once device shutdown ordering is clearly
// defined.
//
// SAFETY:
// exit() is trivially safe.
// ! Never returns
unsafe { libc::exit(0) };

View file

@ -256,6 +256,8 @@ mod tests {
)
.unwrap();
// TODO(b/315998194): Add safety comment
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
// Check that serial output is sent to the pipe
device.write(serial_bus_address(DATA), &[b'T']);

View file

@ -23,6 +23,7 @@ pub use calibrate::*;
pub use cpuid::*;
fn rdtsc_safe() -> u64 {
// SAFETY:
// Safe because _rdtsc takes no arguments
unsafe { _rdtsc() }
}

View file

@ -450,6 +450,7 @@ mod tests {
}
fn rdtsc_frequency_higher_than_u32() -> u64 {
// SAFETY: trivially safe
unsafe { _rdtsc() }.wrapping_mul(1000)
}
@ -471,6 +472,7 @@ mod tests {
fn test_offset_identification_core_0() {
fn rdtsc_with_core_0_offset_by_100_000() -> u64 {
let mut id = 0u32;
// SAFETY: trivially safe
let mut value = unsafe { __rdtscp(&mut id as *mut u32) };
if id == 0 {
value += 100_000;
@ -513,6 +515,7 @@ mod tests {
fn test_offset_identification_core_1() {
fn rdtsc_with_core_1_offset_by_100_000() -> u64 {
let mut id = 0u32;
// SAFETY: trivially safe
let mut value = unsafe { __rdtscp(&mut id as *mut u32) };
if id == 1 {
value += 100_000;

View file

@ -17,6 +17,7 @@ pub type CpuidCountFn = unsafe fn(u32, u32) -> CpuidResult;
/// combination. `std::arch::x86_64::__cpuid_count` may be used to provide the CPUID information
/// from the host.
pub fn tsc_frequency_cpuid(cpuid_count: CpuidCountFn) -> Option<hypervisor::CpuIdEntry> {
// SAFETY:
// Safe because we pass 0 and 0 for this call and the host supports the `cpuid` instruction.
let result = unsafe { cpuid_count(0, 0) };
if result.eax < 0x15 {
@ -35,6 +36,7 @@ pub fn tsc_frequency_cpuid(cpuid_count: CpuidCountFn) -> Option<hypervisor::CpuI
edx: 0,
},
};
// SAFETY:
// Safe because we pass 0 and 0 for this call and the host supports the `cpuid` instruction.
tsc_freq.cpuid = unsafe { cpuid_count(tsc_freq.function, tsc_freq.index) };
@ -44,6 +46,7 @@ pub fn tsc_frequency_cpuid(cpuid_count: CpuidCountFn) -> Option<hypervisor::CpuI
// The core crystal frequency is missing. Old kernels (<5.3) don't try to derive it from the
// CPU base clock speed. Here, we essentially implement
// https://lore.kernel.org/patchwork/patch/1064690/ so that old kernels can calibrate TSC.
// SAFETY:
// Safe because the host supports `cpuid` instruction.
let cpu_clock = unsafe {
// 0x16 is the base clock frequency leaf.

View file

@ -724,25 +724,45 @@ pub unsafe trait TrbCast: FromBytes + AsBytes + TypedTrb {
}
}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for Trb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for NormalTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for SetupStageTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for DataStageTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for StatusStageTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for IsochTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for LinkTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for EventDataTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for NoopTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for DisableSlotCommandTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for AddressDeviceCommandTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for ConfigureEndpointCommandTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for EvaluateContextCommandTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for ResetEndpointCommandTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for StopEndpointCommandTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for SetTRDequeuePointerCommandTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for ResetDeviceCommandTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for TransferEventTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for CommandCompletionEventTrb {}
// SAFETY: see safety comments for TrbCast
unsafe impl TrbCast for PortStatusChangeEventTrb {}
#[bitfield]

View file

@ -218,6 +218,7 @@ impl KvmVfioPviommu {
addr: 0,
};
// SAFETY:
// Safe as we are the owner of vfio_dev_attr, which is valid.
let ret = unsafe {
ioctl_with_ref(
@ -248,6 +249,7 @@ impl KvmVfioPviommu {
vsid,
};
// SAFETY:
// Safe as we are the owner of device and config which are valid, and we verify the return
// value.
let ret = unsafe { ioctl_with_ref(self, kvm_sys::KVM_PVIOMMU_SET_CONFIG, &config) };
@ -280,6 +282,7 @@ impl KvmVfioPviommu {
addr: addr_of_mut!(info) as usize as u64,
};
// SAFETY:
// Safe as we are the owner of vfio_dev_attr, which is valid.
let ret = unsafe {
ioctl_with_ref(
@ -344,6 +347,7 @@ impl VfioContainer {
// Construct a VfioContainer from an exist container file.
pub fn new_from_container(container: File) -> Result<Self> {
// SAFETY:
// Safe as file is vfio container descriptor and ioctl is defined by kernel.
let version = unsafe { ioctl(&container, VFIO_GET_API_VERSION()) };
if version as u8 != VFIO_API_VERSION {
@ -362,12 +366,14 @@ impl VfioContainer {
}
fn check_extension(&self, val: IommuType) -> bool {
// SAFETY:
// Safe as file is vfio container and make sure val is valid.
let ret = unsafe { ioctl_with_val(self, VFIO_CHECK_EXTENSION(), val as c_ulong) };
ret != 0
}
fn set_iommu(&mut self, val: IommuType) -> i32 {
// SAFETY:
// Safe as file is vfio container and make sure val is valid.
unsafe { ioctl_with_val(self, VFIO_SET_IOMMU(), val as c_ulong) }
}
@ -455,6 +461,7 @@ impl VfioContainer {
..Default::default()
};
// SAFETY:
// Safe as file is vfio container, dma_unmap is constructed by us, and
// we check the return value
let ret = unsafe { ioctl_with_mut_ref(self, VFIO_IOMMU_UNMAP_DMA(), &mut dma_unmap) };
@ -485,6 +492,7 @@ impl VfioContainer {
..Default::default()
};
// SAFETY:
// Safe as file is vfio container, iommu_info has valid values,
// and we check the return value
let ret = unsafe { ioctl_with_mut_ref(self, VFIO_IOMMU_GET_INFO(), &mut iommu_info) };
@ -516,6 +524,7 @@ impl VfioContainer {
..Default::default()
};
// SAFETY:
// Safe as file is vfio container, iommu_info_argsz has valid values,
// and we check the return value
let ret = unsafe { ioctl_with_mut_ref(self, VFIO_IOMMU_GET_INFO(), &mut iommu_info_argsz) };
@ -531,14 +540,16 @@ impl VfioContainer {
iommu_info_argsz.argsz as usize - mem::size_of::<vfio_iommu_type1_info>(),
);
iommu_info[0].argsz = iommu_info_argsz.argsz;
// Safe as file is vfio container, iommu_info has valid values,
// and we check the return value
let ret =
// SAFETY:
// Safe as file is vfio container, iommu_info has valid values,
// and we check the return value
unsafe { ioctl_with_mut_ptr(self, VFIO_IOMMU_GET_INFO(), iommu_info.as_mut_ptr()) };
if ret != 0 {
return Err(VfioError::IommuGetInfo(get_error()));
}
// SAFETY:
// Safe because we initialized iommu_info with enough space, u8 has less strict
// alignment, and since it will no longer be mutated.
let info_bytes = unsafe {
@ -622,6 +633,7 @@ impl VfioContainer {
IommuDevType::CoIommu | IommuDevType::PkvmPviommu | IommuDevType::VirtioIommu => {}
IommuDevType::NoIommu => {
for region in vm.get_memory().regions() {
// SAFETY:
// Safe because the guest regions are guaranteed not to overlap
unsafe {
self.vfio_dma_map(
@ -691,6 +703,8 @@ impl VfioContainer {
}
pub fn clone_as_raw_descriptor(&self) -> Result<RawDescriptor> {
// SAFETY: this call is safe because it doesn't modify any memory and we
// check the return value.
let raw_descriptor = unsafe { libc::dup(self.container.as_raw_descriptor()) };
if raw_descriptor < 0 {
Err(VfioError::ContainerDupError)
@ -729,8 +743,9 @@ impl VfioGroup {
argsz: mem::size_of::<vfio_group_status>() as u32,
flags: 0,
};
// Safe as we are the owner of group_file and group_status which are valid value.
let mut ret =
// SAFETY:
// Safe as we are the owner of group_file and group_status which are valid value.
unsafe { ioctl_with_mut_ref(&group_file, VFIO_GROUP_GET_STATUS(), &mut group_status) };
if ret < 0 {
return Err(VfioError::GetGroupStatus(get_error()));
@ -740,9 +755,10 @@ impl VfioGroup {
return Err(VfioError::GroupViable);
}
let container_raw_descriptor = container.as_raw_descriptor();
// SAFETY:
// Safe as we are the owner of group_file and container_raw_descriptor which are valid value,
// and we verify the ret value
let container_raw_descriptor = container.as_raw_descriptor();
ret = unsafe {
ioctl_with_ref(
&group_file,
@ -796,6 +812,7 @@ impl VfioGroup {
},
};
// SAFETY:
// Safe as we are the owner of vfio_dev_descriptor and vfio_dev_attr which are valid value,
// and we verify the return value.
if 0 != unsafe {
@ -815,12 +832,14 @@ impl VfioGroup {
let path: CString = CString::new(name.as_bytes()).expect("CString::new() failed");
let path_ptr = path.as_ptr();
// SAFETY:
// Safe as we are the owner of self and path_ptr which are valid value.
let ret = unsafe { ioctl_with_ptr(self, VFIO_GROUP_GET_DEVICE_FD(), path_ptr) };
if ret < 0 {
return Err(VfioError::GroupGetDeviceFD(get_error()));
}
// SAFETY:
// Safe as ret is valid descriptor
Ok(unsafe { File::from_raw_descriptor(ret) })
}
@ -1177,6 +1196,7 @@ impl VfioDevice {
let mut device_feature = vec_with_array_field::<vfio_device_feature, u8>(0);
device_feature[0].argsz = mem::size_of::<vfio_device_feature>() as u32;
device_feature[0].flags = VFIO_DEVICE_FEATURE_SET | VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY;
// SAFETY:
// Safe as we are the owner of self and power_management which are valid value
let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_FEATURE(), &device_feature[0]) };
if ret < 0 {
@ -1197,8 +1217,9 @@ impl VfioDevice {
device_feature[0].argsz = (mem::size_of::<vfio_device_feature>() + payload_size) as u32;
device_feature[0].flags =
VFIO_DEVICE_FEATURE_SET | VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP;
// SAFETY:
// Safe as we know vfio_device_low_power_entry_with_wakeup has two 32-bit int fields
unsafe {
// Safe as we know vfio_device_low_power_entry_with_wakeup has two 32-bit int fields
device_feature[0]
.data
.as_mut_slice(payload_size)
@ -1207,6 +1228,7 @@ impl VfioDevice {
.as_slice(),
);
}
// SAFETY:
// Safe as we are the owner of self and power_management which are valid value
let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_FEATURE(), &device_feature[0]) };
if ret < 0 {
@ -1221,6 +1243,7 @@ impl VfioDevice {
let mut device_feature = vec_with_array_field::<vfio_device_feature, u8>(0);
device_feature[0].argsz = mem::size_of::<vfio_device_feature>() as u32;
device_feature[0].flags = VFIO_DEVICE_FEATURE_SET | VFIO_DEVICE_FEATURE_LOW_POWER_EXIT;
// SAFETY:
// Safe as we are the owner of self and power_management which are valid value
let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_FEATURE(), &device_feature[0]) };
if ret < 0 {
@ -1236,15 +1259,18 @@ impl VfioDevice {
let mut dsm = vec_with_array_field::<vfio_acpi_dsm, u8>(count);
dsm[0].argsz = (mem::size_of::<vfio_acpi_dsm>() + mem::size_of_val(args)) as u32;
dsm[0].padding = 0;
// SAFETY:
// Safe as we allocated enough space to hold args
unsafe {
dsm[0].args.as_mut_slice(count).clone_from_slice(args);
}
// SAFETY:
// Safe as we are the owner of self and dsm which are valid value
let ret = unsafe { ioctl_with_mut_ref(&self.dev, VFIO_DEVICE_ACPI_DSM(), &mut dsm[0]) };
if ret < 0 {
Err(VfioError::VfioAcpiDsm(get_error()))
} else {
// SAFETY:
// Safe as we allocated enough space to hold args
let res = unsafe { dsm[0].args.as_slice(count) };
Ok(res.to_vec())
@ -1267,10 +1293,12 @@ impl VfioDevice {
irq_set[0].start = 0;
irq_set[0].count = count as u32;
// SAFETY:
// It is safe as enough space is reserved through vec_with_array_field(u32)<count>.
let data = unsafe { irq_set[0].data.as_mut_slice(count * u32_size) };
data.copy_from_slice(&acpi_notification_eventfd.as_raw_descriptor().to_ne_bytes()[..]);
// SAFETY:
// Safe as we are the owner of self and irq_set which are valid value
let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) };
if ret < 0 {
@ -1289,6 +1317,7 @@ impl VfioDevice {
irq_set[0].start = 0;
irq_set[0].count = 0;
// SAFETY:
// Safe as we are the owner of self and irq_set which are valid value
let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) };
if ret < 0 {
@ -1310,10 +1339,12 @@ impl VfioDevice {
irq_set[0].start = 0;
irq_set[0].count = 1;
// SAFETY:
// It is safe as enough space is reserved through vec_with_array_field(u32)<count>.
let data = unsafe { irq_set[0].data.as_mut_slice(u32_size) };
data.copy_from_slice(&val.to_ne_bytes()[..]);
// SAFETY:
// Safe as we are the owner of self and irq_set which are valid value
let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) };
if ret < 0 {
@ -1345,6 +1376,7 @@ impl VfioDevice {
irq_set[0].start = subindex;
irq_set[0].count = count as u32;
// SAFETY:
// irq_set.data could be none, bool or descriptor according to flags, so irq_set.data
// is u8 default, here irq_set.data is descriptor as u32, so 4 default u8 are combined
// together as u32. It is safe as enough space is reserved through
@ -1359,6 +1391,7 @@ impl VfioDevice {
data = right;
}
// SAFETY:
// Safe as we are the owner of self and irq_set which are valid value
let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) };
if ret < 0 {
@ -1386,6 +1419,7 @@ impl VfioDevice {
irq_set[0].count = 1;
{
// SAFETY:
// irq_set.data could be none, bool or descriptor according to flags, so irq_set.data is
// u8 default, here irq_set.data is descriptor as u32, so 4 default u8 are combined
// together as u32. It is safe as enough space is reserved through
@ -1394,6 +1428,7 @@ impl VfioDevice {
descriptors.copy_from_slice(&descriptor.as_raw_descriptor().to_le_bytes()[..]);
}
// SAFETY:
// Safe as we are the owner of self and irq_set which are valid value
let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) };
if ret < 0 {
@ -1412,6 +1447,7 @@ impl VfioDevice {
irq_set[0].start = 0;
irq_set[0].count = 0;
// SAFETY:
// Safe as we are the owner of self and irq_set which are valid value
let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) };
if ret < 0 {
@ -1430,6 +1466,7 @@ impl VfioDevice {
irq_set[0].start = 0;
irq_set[0].count = 1;
// SAFETY:
// Safe as we are the owner of self and irq_set which are valid value
let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) };
if ret < 0 {
@ -1448,6 +1485,7 @@ impl VfioDevice {
irq_set[0].start = 0;
irq_set[0].count = 1;
// SAFETY:
// Safe as we are the owner of self and irq_set which are valid value
let ret = unsafe { ioctl_with_ref(&self.dev, VFIO_DEVICE_SET_IRQS(), &irq_set[0]) };
if ret < 0 {
@ -1467,6 +1505,7 @@ impl VfioDevice {
..Default::default()
};
// SAFETY:
// Safe as we are the owner of device_file and dev_info which are valid value,
// and we verify the return value.
let ret = unsafe { ioctl_with_mut_ref(device_file, VFIO_DEVICE_GET_INFO(), &mut dev_info) };
@ -1504,6 +1543,7 @@ impl VfioDevice {
index: i,
count: 0,
};
// SAFETY:
// Safe as we are the owner of dev and irq_info which are valid value,
// and we verify the return value.
let ret = unsafe {
@ -1539,9 +1579,10 @@ impl VfioDevice {
size: 0,
offset: 0,
};
// Safe as we are the owner of dev and reg_info which are valid value,
// and we verify the return value.
let ret =
// SAFETY:
// Safe as we are the owner of dev and reg_info which are valid value,
// and we verify the return value.
unsafe { ioctl_with_mut_ref(dev, VFIO_DEVICE_GET_REGION_INFO(), &mut reg_info) };
if ret < 0 {
continue;
@ -1559,6 +1600,7 @@ impl VfioDevice {
region_with_cap[0].region_info.cap_offset = 0;
region_with_cap[0].region_info.size = 0;
region_with_cap[0].region_info.offset = 0;
// SAFETY:
// Safe as we are the owner of dev and region_info which are valid value,
// and we verify the return value.
let ret = unsafe {
@ -1593,27 +1635,33 @@ impl VfioDevice {
if offset + cap_header_sz > region_info_sz {
break;
}
// SAFETY:
// Safe, as cap_header struct is in this function allocated region_with_cap
// vec.
let cap_ptr = unsafe { info_ptr.offset(offset as isize) };
// SAFETY:
// Safe, as cap_header struct is in this function allocated region_with_cap
// vec.
let cap_header = unsafe { &*(cap_ptr as *const vfio_info_cap_header) };
if cap_header.id as u32 == VFIO_REGION_INFO_CAP_SPARSE_MMAP {
if offset + mmap_cap_sz > region_info_sz {
break;
}
// cap_ptr is vfio_region_info_cap_sparse_mmap here
// Safe, this vfio_region_info_cap_sparse_mmap is in this function allocated
// region_with_cap vec.
let sparse_mmap =
// SAFETY:
// Safe, this vfio_region_info_cap_sparse_mmap is in this function
// allocated region_with_cap vec.
unsafe { &*(cap_ptr as *const vfio_region_info_cap_sparse_mmap) };
let area_num = sparse_mmap.nr_areas;
if offset + mmap_cap_sz + area_num * mmap_area_sz > region_info_sz {
break;
}
// Safe, these vfio_region_sparse_mmap_area are in this function allocated
// region_with_cap vec.
let areas =
// SAFETY:
// Safe, these vfio_region_sparse_mmap_area are in this function allocated
// region_with_cap vec.
unsafe { sparse_mmap.areas.as_slice(sparse_mmap.nr_areas as usize) };
for area in areas.iter() {
mmaps.push(*area);
@ -1623,9 +1671,10 @@ impl VfioDevice {
break;
}
// cap_ptr is vfio_region_info_cap_type here
// Safe, this vfio_region_info_cap_type is in this function allocated
// region_with_cap vec
let cap_type_info =
// SAFETY:
// Safe, this vfio_region_info_cap_type is in this function allocated
// region_with_cap vec
unsafe { &*(cap_ptr as *const vfio_region_info_cap_type) };
cap_info = Some((cap_type_info.type_, cap_type_info.subtype));
@ -1776,10 +1825,12 @@ impl VfioDevice {
/// Reads a value from the specified `VfioRegionAddr.addr` + `offset`.
pub fn region_read_from_addr<T: FromBytes>(&self, addr: &VfioRegionAddr, offset: u64) -> T {
let mut val = mem::MaybeUninit::zeroed();
// Safe because we have zero-initialized `size_of::<T>()` bytes.
let buf =
// SAFETY:
// Safe because we have zero-initialized `size_of::<T>()` bytes.
unsafe { slice::from_raw_parts_mut(val.as_mut_ptr() as *mut u8, mem::size_of::<T>()) };
self.region_read(addr.index, buf, addr.addr + offset);
// SAFETY:
// Safe because any bit pattern is valid for a type that implements FromBytes.
unsafe { val.assume_init() }
}

View file

@ -738,6 +738,7 @@ impl io::Write for Writer {
}
let count = cmp::min(rem.len(), b.size());
// SAFETY:
// Safe because we have already verified that `vs` points to valid memory.
unsafe {
copy_nonoverlapping(rem.as_ptr(), b.as_mut_ptr(), count);

View file

@ -113,6 +113,7 @@ pub struct Caps(cap_t);
impl Caps {
/// Get the capabilities for the current thread.
pub fn for_current_thread() -> io::Result<Caps> {
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let caps = unsafe { cap_get_proc() };
if caps.is_null() {
@ -124,6 +125,7 @@ impl Caps {
/// Update the capabilities described by `self` by setting or clearing `caps` in `set`.
pub fn update(&mut self, caps: &[Capability], set: Set, value: Value) -> io::Result<()> {
// SAFETY:
// Safe because this only modifies the memory pointed to by `self.0` and we check the return
// value.
let ret = unsafe {
@ -146,6 +148,7 @@ impl Caps {
/// Apply the capabilities described by `self` to the current thread.
pub fn apply(&self) -> io::Result<()> {
// SAFETY: trivially safe
if unsafe { cap_set_proc(self.0) } == 0 {
Ok(())
} else {
@ -156,6 +159,7 @@ impl Caps {
impl Drop for Caps {
fn drop(&mut self) {
// SAFETY: cap_t is allocated from `Self`
unsafe {
cap_free(self.0);
}

View file

@ -301,6 +301,7 @@ macro_rules! scoped_cred {
impl Drop for $name {
fn drop(&mut self) {
// SAFETY: trivially safe
let res = unsafe { libc::syscall($syscall_nr, -1, self.old, -1) };
if res < 0 {
error!(
@ -337,6 +338,8 @@ thread_local! {
// SAFETY: both calls take no parameters and only return an integer value. The kernel also
// guarantees that they can never fail.
static THREAD_EUID: libc::uid_t = unsafe { libc::syscall(SYS_GETEUID) as libc::uid_t };
// SAFETY: both calls take no parameters and only return an integer value. The kernel also
// guarantees that they can never fail.
static THREAD_EGID: libc::gid_t = unsafe { libc::syscall(SYS_GETEGID) as libc::gid_t };
}
@ -1106,14 +1109,18 @@ impl PassthroughFs {
) -> io::Result<(Option<Handle>, OpenOptions)> {
let open_flags = self.update_open_flags(flags as i32);
let fd_open = syscall!(unsafe {
libc::openat64(
parent_data.as_raw_descriptor(),
name.as_ptr(),
(open_flags | libc::O_CLOEXEC) & !(libc::O_NOFOLLOW | libc::O_DIRECT),
)
})?;
let fd_open = syscall!(
// SAFETY: return value is checked.
unsafe {
libc::openat64(
parent_data.as_raw_descriptor(),
name.as_ptr(),
(open_flags | libc::O_CLOEXEC) & !(libc::O_NOFOLLOW | libc::O_DIRECT),
)
}
)?;
// SAFETY: fd_open is valid
let file_open = unsafe { File::from_raw_descriptor(fd_open) };
let handle = self.next_handle.fetch_add(1, Ordering::Relaxed);
let data = HandleData {
@ -1265,8 +1272,8 @@ impl PassthroughFs {
let policy_size = cmp::min(arg.policy_size, size_of::<fscrypt_policy>() as u64);
arg.policy_size = policy_size;
// SAFETY: the kernel will only write to `arg` and we check the return value.
let res =
// SAFETY: the kernel will only write to `arg` and we check the return value.
unsafe { ioctl_with_mut_ptr(&*data, FS_IOC_GET_ENCRYPTION_POLICY_EX(), &mut arg) };
if res < 0 {
Ok(IoctlReply::Done(Err(io::Error::last_os_error())))
@ -1594,9 +1601,9 @@ impl PassthroughFs {
if res < 0 {
Ok(IoctlReply::Done(Err(io::Error::last_os_error())))
} else {
// SAFETY: this value was initialized by us already and then overwritten by the kernel.
// TODO: Replace with `MaybeUninit::slice_as_ptr` once it is stabilized.
let digest_size =
// SAFETY: this value was initialized by us already and then overwritten by the kernel.
// TODO: Replace with `MaybeUninit::slice_as_ptr` once it is stabilized.
unsafe { addr_of!((*(buf.as_ptr() as *const fsverity_digest)).digest_size).read() };
let outlen = size_of::<fsverity_digest>() as u32 + u32::from(digest_size);
@ -1608,16 +1615,16 @@ impl PassthroughFs {
))));
}
// SAFETY: any bit pattern is valid for `MaybeUninit<u8>` and `fsverity_digest` doesn't
// contain any references.
let buf: [MaybeUninit<u8>; ROUNDED_LEN * size_of::<fsverity_digest>()] =
// SAFETY: any bit pattern is valid for `MaybeUninit<u8>` and `fsverity_digest`
// doesn't contain any references.
unsafe { mem::transmute(buf) };
// SAFETY: Casting to `*const [u8]` is safe because the kernel guarantees that the first
// `outlen` bytes of `buf` are initialized and `MaybeUninit<u8>` is guaranteed to have
// the same layout as `u8`.
// TODO: Replace with `MaybeUninit::slice_assume_init_ref` once it is stabilized.
let buf =
// SAFETY: Casting to `*const [u8]` is safe because the kernel guarantees that the
// first `outlen` bytes of `buf` are initialized and `MaybeUninit<u8>` is guaranteed
// to have the same layout as `u8`.
// TODO: Replace with `MaybeUninit::slice_assume_init_ref` once it is stabilized.
unsafe { &*(&buf[..outlen as usize] as *const [MaybeUninit<u8>] as *const [u8]) };
Ok(IoctlReply::Done(Ok(buf.to_vec())))
}
@ -2301,12 +2308,15 @@ impl FileSystem for PassthroughFs {
}
if valid.contains(SetattrValid::SIZE) {
// SAFETY: this doesn't modify any memory and we check the return value.
syscall!(match data {
Data::Handle(_, fd) => unsafe { libc::ftruncate64(fd, attr.st_size) },
Data::Handle(_, fd) => {
// SAFETY: this doesn't modify any memory and we check the return value.
unsafe { libc::ftruncate64(fd, attr.st_size) }
}
_ => {
// There is no `ftruncateat` so we need to get a new fd and truncate it.
let f = self.open_inode(&inode_data, libc::O_NONBLOCK | libc::O_RDWR)?;
// SAFETY: this doesn't modify any memory and we check the return value.
unsafe { libc::ftruncate64(f.as_raw_descriptor(), attr.st_size) }
}
})?;
@ -2542,6 +2552,7 @@ impl FileSystem for PassthroughFs {
self.find_handle(handle, inode)?
};
// SAFETY:
// Since this method is called whenever an fd is closed in the client, we can emulate that
// behavior by doing the same thing (dup-ing the fd and then immediately closing it). Safe
// because this doesn't modify any memory and we check the return values.
@ -2664,8 +2675,8 @@ impl FileSystem for PassthroughFs {
let path = CString::new(format!("self/fd/{}", file.0.as_raw_descriptor()))
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
// SAFETY: this doesn't modify any memory and we check the return value.
syscall!(self.with_proc_chdir(|| {
// SAFETY: this doesn't modify any memory and we check the return value.
unsafe {
libc::setxattr(
path.as_ptr(),
@ -2677,17 +2688,19 @@ impl FileSystem for PassthroughFs {
}
}))?;
} else {
// For regular files and directories, we can just use fsetxattr.
// SAFETY: this doesn't modify any memory and we check the return value.
syscall!(unsafe {
libc::fsetxattr(
file.0.as_raw_descriptor(),
name.as_ptr(),
value.as_ptr() as *const libc::c_void,
value.len() as libc::size_t,
flags as c_int,
)
})?;
syscall!(
// For regular files and directories, we can just use fsetxattr.
// SAFETY: this doesn't modify any memory and we check the return value.
unsafe {
libc::fsetxattr(
file.0.as_raw_descriptor(),
name.as_ptr(),
value.as_ptr() as *const libc::c_void,
value.len() as libc::size_t,
flags as c_int,
)
}
)?;
}
Ok(())
@ -2788,14 +2801,15 @@ impl FileSystem for PassthroughFs {
let path = CString::new(format!("self/fd/{}", file.0.as_raw_descriptor()))
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
// SAFETY: this doesn't modify any memory and we check the return value.
syscall!(
self.with_proc_chdir(|| unsafe { libc::removexattr(path.as_ptr(), name.as_ptr()) })
)?;
syscall!(self.with_proc_chdir(||
// SAFETY: this doesn't modify any memory and we check the return value.
unsafe { libc::removexattr(path.as_ptr(), name.as_ptr()) }))?;
} else {
// For regular files and directories, we can just use fremovexattr.
// SAFETY: this doesn't modify any memory and we check the return value.
syscall!(unsafe { libc::fremovexattr(file.0.as_raw_descriptor(), name.as_ptr()) })?;
syscall!(
// SAFETY: this doesn't modify any memory and we check the return value.
unsafe { libc::fremovexattr(file.0.as_raw_descriptor(), name.as_ptr()) }
)?;
}
Ok(())
@ -2967,17 +2981,21 @@ impl FileSystem for PassthroughFs {
let src = src_data.as_raw_descriptor();
let dst = dst_data.as_raw_descriptor();
Ok(syscall!(unsafe {
libc::syscall(
libc::SYS_copy_file_range,
src,
&offset_src,
dst,
&offset_dst,
length,
flags,
)
})? as usize)
Ok(syscall!(
// SAFETY: this call is safe because it doesn't modify any memory and we
// check the return value.
unsafe {
libc::syscall(
libc::SYS_copy_file_range,
src,
&offset_src,
dst,
&offset_dst,
length,
flags,
)
}
)? as usize)
}
fn set_up_mapping<M: Mapper>(
@ -3137,6 +3155,8 @@ mod tests {
// SAFETY: both calls take no parameters and only return an integer value. The kernel also
// guarantees that they can never fail.
let uid = unsafe { libc::syscall(SYS_GETEUID) as libc::uid_t };
// SAFETY: both calls take no parameters and only return an integer value. The kernel also
// guarantees that they can never fail.
let gid = unsafe { libc::syscall(SYS_GETEGID) as libc::gid_t };
let pid = std::process::id() as libc::pid_t;
Context { uid, gid, pid }
@ -3265,18 +3285,23 @@ mod tests {
let p = PassthroughFs::new("tag", cfg).expect("Failed to create PassthroughFs");
// Selinux shouldn't get overwritten.
// SAFETY: trivially safe
let selinux = unsafe { CStr::from_bytes_with_nul_unchecked(b"security.selinux\0") };
assert_eq!(p.rewrite_xattr_name(selinux).to_bytes(), selinux.to_bytes());
// user, trusted, and system should not be changed either.
// SAFETY: trivially safe
let user = unsafe { CStr::from_bytes_with_nul_unchecked(b"user.foobar\0") };
assert_eq!(p.rewrite_xattr_name(user).to_bytes(), user.to_bytes());
// SAFETY: trivially safe
let trusted = unsafe { CStr::from_bytes_with_nul_unchecked(b"trusted.foobar\0") };
assert_eq!(p.rewrite_xattr_name(trusted).to_bytes(), trusted.to_bytes());
// SAFETY: trivially safe
let system = unsafe { CStr::from_bytes_with_nul_unchecked(b"system.foobar\0") };
assert_eq!(p.rewrite_xattr_name(system).to_bytes(), system.to_bytes());
// sehash should be re-written.
// SAFETY: trivially safe
let sehash = unsafe { CStr::from_bytes_with_nul_unchecked(b"security.sehash\0") };
assert_eq!(
p.rewrite_xattr_name(sehash).to_bytes(),

View file

@ -32,12 +32,14 @@ pub struct ReadDir<P> {
impl<P: DerefMut<Target = [u8]>> ReadDir<P> {
pub fn new<D: AsRawDescriptor>(dir: &D, offset: libc::off64_t, mut buf: P) -> io::Result<Self> {
// SAFETY:
// Safe because this doesn't modify any memory and we check the return value.
let res = unsafe { libc::lseek64(dir.as_raw_descriptor(), offset, libc::SEEK_SET) };
if res < 0 {
return Err(io::Error::last_os_error());
}
// SAFETY:
// Safe because the kernel guarantees that it will only write to `buf` and we check the
// return value.
let res = unsafe {
@ -117,6 +119,7 @@ fn strip_padding(b: &[u8]) -> &CStr {
.position(|&c| c == 0)
.expect("`b` doesn't contain any nul bytes");
// SAFETY:
// Safe because we are creating this string with the first nul-byte we found so we can
// guarantee that it is nul-terminated and doesn't contain any interior nuls.
unsafe { CStr::from_bytes_with_nul_unchecked(&b[..pos + 1]) }

View file

@ -188,20 +188,27 @@ impl<F: FileSystem + Sync> Worker<F> {
// cases.
const SECBIT_NO_SETUID_FIXUP: i32 = 1 << 2;
// Safe because this doesn't modify any memory and we check the return value.
let mut securebits = syscall!(unsafe { libc::prctl(libc::PR_GET_SECUREBITS) })
.map_err(Error::GetSecurebits)?;
let mut securebits = syscall!(
// SAFETY: Safe because this doesn't modify any memory and we check the return value.
unsafe { libc::prctl(libc::PR_GET_SECUREBITS) }
)
.map_err(Error::GetSecurebits)?;
securebits |= SECBIT_NO_SETUID_FIXUP;
// Safe because this doesn't modify any memory and we check the return value.
syscall!(unsafe { libc::prctl(libc::PR_SET_SECUREBITS, securebits) })
.map_err(Error::SetSecurebits)?;
syscall!(
// SAFETY: Safe because this doesn't modify any memory and we check the return value.
unsafe { libc::prctl(libc::PR_SET_SECUREBITS, securebits) }
)
.map_err(Error::SetSecurebits)?;
// To avoid extra locking, unshare filesystem attributes from parent. This includes the
// current working directory and umask.
// Safe because this doesn't modify any memory and we check the return value.
syscall!(unsafe { libc::unshare(libc::CLONE_FS) }).map_err(Error::UnshareFromParent)?;
syscall!(
// SAFETY: Safe because this doesn't modify any memory and we check the return value.
unsafe { libc::unshare(libc::CLONE_FS) }
)
.map_err(Error::UnshareFromParent)?;
#[derive(EventToken)]
enum Token {

Some files were not shown because too many files have changed in this diff Show more