devices: block: add bounds checks

As reported by the Firecracker team, the block device model doesn't
check if an I/O request starts before the end of the disk but extends
beyond it.  For writes to disks backed by raw files, this could end up
unintentionally extending the size of the disk.

Add bounds checks to the request execution path to catch these
out-of-bounds I/Os and fail them.  While we're here, fix a few other
minor issues: only seek for read and write requests (the 'sector' field
of the request should be ignored for flush, write zeroes, and discard),
and check for overflow when performing the shifts to convert from
sectors to bytes.

BUG=chromium:927393
TEST=cargo test -p devices block

Change-Id: I0dd19299d03a4f0716093091f173a5c507529963
Signed-off-by: Daniel Verkamp <dverkamp@chromium.org>
Reviewed-on: https://chromium-review.googlesource.com/1448852
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Dylan Reid <dgreid@chromium.org>
This commit is contained in:
Daniel Verkamp 2019-01-31 10:20:30 -08:00 committed by chrome-bot
parent c14f2ec270
commit 5329be3634

View file

@ -210,6 +210,7 @@ enum ExecuteError {
ReadOnly { ReadOnly {
request_type: RequestType, request_type: RequestType,
}, },
OutOfRange,
Unsupported(u32), Unsupported(u32),
} }
@ -223,6 +224,7 @@ impl ExecuteError {
ExecuteError::Write { .. } => VIRTIO_BLK_S_IOERR, ExecuteError::Write { .. } => VIRTIO_BLK_S_IOERR,
ExecuteError::DiscardWriteZeroes { .. } => VIRTIO_BLK_S_IOERR, ExecuteError::DiscardWriteZeroes { .. } => VIRTIO_BLK_S_IOERR,
ExecuteError::ReadOnly { .. } => VIRTIO_BLK_S_IOERR, ExecuteError::ReadOnly { .. } => VIRTIO_BLK_S_IOERR,
ExecuteError::OutOfRange { .. } => VIRTIO_BLK_S_IOERR,
ExecuteError::Unsupported(_) => VIRTIO_BLK_S_UNSUPP, ExecuteError::Unsupported(_) => VIRTIO_BLK_S_UNSUPP,
} }
} }
@ -373,6 +375,7 @@ impl Request {
&self, &self,
read_only: bool, read_only: bool,
disk: &mut T, disk: &mut T,
disk_size: u64,
flush_timer: &mut TimerFd, flush_timer: &mut TimerFd,
flush_timer_armed: &mut bool, flush_timer_armed: &mut bool,
mem: &GuestMemory, mem: &GuestMemory,
@ -386,13 +389,35 @@ impl Request {
}); });
} }
disk.seek(SeekFrom::Start(self.sector << SECTOR_SHIFT)) /// Check that a request accesses only data within the disk's current size.
.map_err(|e| ExecuteError::Seek { /// All parameters are in units of bytes.
ioerr: e, fn check_range(
sector: self.sector, io_start: u64,
})?; io_length: u64,
disk_size: u64,
) -> result::Result<(), ExecuteError> {
let io_end = io_start
.checked_add(io_length)
.ok_or(ExecuteError::OutOfRange)?;
if io_end > disk_size {
Err(ExecuteError::OutOfRange)
} else {
Ok(())
}
}
match self.request_type { match self.request_type {
RequestType::In => { RequestType::In => {
let offset = self
.sector
.checked_shl(u32::from(SECTOR_SHIFT))
.ok_or(ExecuteError::OutOfRange)?;
check_range(offset, u64::from(self.data_len), disk_size)?;
disk.seek(SeekFrom::Start(offset))
.map_err(|e| ExecuteError::Seek {
ioerr: e,
sector: self.sector,
})?;
mem.read_to_memory(self.data_addr, disk, self.data_len as usize) mem.read_to_memory(self.data_addr, disk, self.data_len as usize)
.map_err(|e| ExecuteError::Read { .map_err(|e| ExecuteError::Read {
addr: self.data_addr, addr: self.data_addr,
@ -403,6 +428,16 @@ impl Request {
return Ok(self.data_len); return Ok(self.data_len);
} }
RequestType::Out => { RequestType::Out => {
let offset = self
.sector
.checked_shl(u32::from(SECTOR_SHIFT))
.ok_or(ExecuteError::OutOfRange)?;
check_range(offset, u64::from(self.data_len), disk_size)?;
disk.seek(SeekFrom::Start(offset))
.map_err(|e| ExecuteError::Seek {
ioerr: e,
sector: self.sector,
})?;
mem.write_from_memory(self.data_addr, disk, self.data_len as usize) mem.write_from_memory(self.data_addr, disk, self.data_len as usize)
.map_err(|e| ExecuteError::Write { .map_err(|e| ExecuteError::Write {
addr: self.data_addr, addr: self.data_addr,
@ -438,23 +473,29 @@ impl Request {
}); });
} }
let offset = sector
.checked_shl(u32::from(SECTOR_SHIFT))
.ok_or(ExecuteError::OutOfRange)?;
let length = u64::from(num_sectors)
.checked_shl(u32::from(SECTOR_SHIFT))
.ok_or(ExecuteError::OutOfRange)?;
check_range(offset, length, disk_size)?;
if self.request_type == RequestType::Discard { if self.request_type == RequestType::Discard {
// Since Discard is just a hint and some filesystems may not implement // Since Discard is just a hint and some filesystems may not implement
// FALLOC_FL_PUNCH_HOLE, ignore punch_hole errors. // FALLOC_FL_PUNCH_HOLE, ignore punch_hole errors.
let _ = disk.punch_hole( let _ = disk.punch_hole(offset, length);
sector << SECTOR_SHIFT,
(num_sectors as u64) << SECTOR_SHIFT,
);
} else { } else {
disk.seek(SeekFrom::Start(sector << SECTOR_SHIFT)) disk.seek(SeekFrom::Start(offset))
.map_err(|e| ExecuteError::Seek { ioerr: e, sector })?; .map_err(|e| ExecuteError::Seek { ioerr: e, sector })?;
disk.write_zeroes((num_sectors as usize) << SECTOR_SHIFT) disk.write_zeroes(length as usize).map_err(|e| {
.map_err(|e| ExecuteError::DiscardWriteZeroes { ExecuteError::DiscardWriteZeroes {
ioerr: Some(e), ioerr: Some(e),
sector, sector,
num_sectors, num_sectors,
flags, flags,
})?; }
})?;
} }
} }
} }
@ -489,6 +530,8 @@ impl<T: DiskFile> Worker<T> {
) -> bool { ) -> bool {
let queue = &mut self.queues[queue_index]; let queue = &mut self.queues[queue_index];
let disk_size = self.disk_size.lock();
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize]; let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0; let mut used_count = 0;
for avail_desc in queue.iter(&self.mem) { for avail_desc in queue.iter(&self.mem) {
@ -498,6 +541,7 @@ impl<T: DiskFile> Worker<T> {
let status = match request.execute( let status = match request.execute(
self.read_only, self.read_only,
&mut self.disk_image, &mut self.disk_image,
*disk_size,
flush_timer, flush_timer,
flush_timer_armed, flush_timer_armed,
&self.mem, &self.mem,
@ -854,7 +898,7 @@ impl<T: 'static + AsRawFd + DiskFile + Send> VirtioDevice for Block<T> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::fs::File; use std::fs::{File, OpenOptions};
use std::path::PathBuf; use std::path::PathBuf;
use sys_util::TempDir; use sys_util::TempDir;
@ -903,4 +947,87 @@ mod tests {
assert_eq!(0x100000220, b.features()); assert_eq!(0x100000220, b.features());
} }
} }
#[test]
fn read_last_sector() {
let tempdir = TempDir::new("/tmp/block_read_test").unwrap();
let mut path = PathBuf::from(tempdir.as_path().unwrap());
path.push("disk_image");
let mut f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.unwrap();
let disk_size = 0x1000;
f.set_len(disk_size).unwrap();
let mem = GuestMemory::new(&[(GuestAddress(0u64), 4 * 1024 * 1024)])
.expect("Creating guest memory failed.");
let req = Request {
request_type: RequestType::In,
sector: 7, // Disk is 8 sectors long, so this is the last valid sector.
data_addr: GuestAddress(0x1000),
data_len: 512, // Read 1 sector of data.
status_addr: GuestAddress(0),
discard_write_zeroes_seg: None,
};
let mut flush_timer = TimerFd::new().expect("failed to create flush_timer");
let mut flush_timer_armed = false;
assert_eq!(
512,
req.execute(
false,
&mut f,
disk_size,
&mut flush_timer,
&mut flush_timer_armed,
&mem
)
.expect("execute failed"),
);
}
#[test]
fn read_beyond_last_sector() {
let tempdir = TempDir::new("/tmp/block_read_test").unwrap();
let mut path = PathBuf::from(tempdir.as_path().unwrap());
path.push("disk_image");
let mut f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.unwrap();
let disk_size = 0x1000;
f.set_len(disk_size).unwrap();
let mem = GuestMemory::new(&[(GuestAddress(0u64), 4 * 1024 * 1024)])
.expect("Creating guest memory failed.");
let req = Request {
request_type: RequestType::In,
sector: 7, // Disk is 8 sectors long, so this is the last valid sector.
data_addr: GuestAddress(0x1000),
data_len: 512 * 2, // Read 2 sectors of data (overlap the end of the disk).
status_addr: GuestAddress(0),
discard_write_zeroes_seg: None,
};
let mut flush_timer = TimerFd::new().expect("failed to create flush_timer");
let mut flush_timer_armed = false;
req.execute(
false,
&mut f,
disk_size,
&mut flush_timer,
&mut flush_timer_armed,
&mem,
)
.expect_err("execute was supposed to fail");
}
} }