acpi: vGPE: support direct SCI interrupt passthrough

In order to allow handling physical GPE in the guest, implement
physical SCI interrupts forwarding from the host to the guest.

It uses an eventfd based mechanism similar to how we normaly do
forwarding of other level-triggered interrupts. The difference is that
SCI trigger events from kernel are not injected directly to irqchip.
In order to support injecting both physical and virtual SCI interrupts
(so that some GPEs can be handled as physical while other GPEs can be
emulated), SCI trigger event is intercepted by ACPIPMResource which
injects it to irqchip via another eventfd - the same eventfd which is
used for injecting virtual SCI interrupts.

Similarly, resample event for physical forwarded SCI is received
via the same eventfd as for virtual SCI, then forwarded back to kernel.

BUG=b:205072342
TEST=see CL:3492224

Change-Id: I480a3000d69305aabc777e193d3453c476d2dbbd
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/3492221
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Dmytro Maluka <dmy@semihalf.com>
Reviewed-by: Dmitry Torokhov <dtor@chromium.org>
Commit-Queue: Tomasz Nowicki <tnowicki@google.com>
This commit is contained in:
Dmytro Maluka 2022-02-25 15:47:16 +00:00 committed by Commit Bot
parent 87a1a84366
commit 121e33246b
4 changed files with 65 additions and 4 deletions

View file

@ -115,7 +115,7 @@ chromeos = ["base/chromeos", "audio_cras", "devices/chromeos"]
composite-disk = ["protos/composite-disk", "protobuf", "disk/composite-disk"]
default = ["audio", "gpu", "usb"]
default-no-sandbox = []
direct = ["devices/direct"]
direct = ["devices/direct", "x86_64/direct"]
gdb = ["gdbstub", "gdbstub_arch", "arch/gdb", "vm_control/gdb", "x86_64/gdb"]
gfxstream = ["devices/gfxstream"]
gpu = ["devices/gpu"]

View file

@ -37,6 +37,8 @@ struct GpeResource {
pub struct ACPIPMResource {
sci_evt: Event,
sci_evt_resample: Event,
#[cfg(feature = "direct")]
sci_direct_evt: Option<(Event, Event)>,
kill_evt: Option<Event>,
worker_thread: Option<thread::JoinHandle<()>>,
suspend_evt: Event,
@ -51,6 +53,7 @@ impl ACPIPMResource {
pub fn new(
sci_evt: Event,
sci_evt_resample: Event,
#[cfg(feature = "direct")] sci_direct_evt: Option<(Event, Event)>,
suspend_evt: Event,
exit_evt: Event,
) -> ACPIPMResource {
@ -67,6 +70,8 @@ impl ACPIPMResource {
ACPIPMResource {
sci_evt,
sci_evt_resample,
#[cfg(feature = "direct")]
sci_direct_evt,
kill_evt: None,
worker_thread: None,
suspend_evt,
@ -94,10 +99,28 @@ impl ACPIPMResource {
let pm1 = self.pm1.clone();
let gpe0 = self.gpe0.clone();
#[cfg(feature = "direct")]
let sci_direct_evt = if let Some((trigger, resample)) = &self.sci_direct_evt {
Some((
trigger.try_clone().expect("failed to clone event"),
resample.try_clone().expect("failed to clone event"),
))
} else {
None
};
let worker_result = thread::Builder::new()
.name("ACPI PM worker".to_string())
.spawn(move || {
if let Err(e) = run_worker(sci_resample, kill_evt, sci_evt, pm1, gpe0) {
if let Err(e) = run_worker(
sci_resample,
kill_evt,
sci_evt,
pm1,
gpe0,
#[cfg(feature = "direct")]
sci_direct_evt,
) {
error!("{}", e);
}
});
@ -115,10 +138,13 @@ fn run_worker(
sci_evt: Event,
pm1: Arc<Mutex<Pm1Resource>>,
gpe0: Arc<Mutex<GpeResource>>,
#[cfg(feature = "direct")] sci_direct_evt: Option<(Event, Event)>,
) -> Result<(), ACPIPMError> {
#[derive(PollToken)]
enum Token {
InterruptResample,
#[cfg(feature = "direct")]
InterruptTriggerDirect,
Kill,
}
@ -128,6 +154,16 @@ fn run_worker(
])
.map_err(ACPIPMError::CreateWaitContext)?;
#[cfg(feature = "direct")]
if let Some((ref trigger, _)) = sci_direct_evt {
wait_ctx
.add(trigger, Token::InterruptTriggerDirect)
.map_err(ACPIPMError::CreateWaitContext)?;
}
#[cfg(feature = "direct")]
let mut pending_sci_direct_resample: Option<&Event> = None;
loop {
let events = wait_ctx.wait().map_err(ACPIPMError::WaitError)?;
for event in events.iter().filter(|e| e.is_readable) {
@ -135,10 +171,28 @@ fn run_worker(
Token::InterruptResample => {
let _ = sci_resample.read();
#[cfg(feature = "direct")]
if let Some(resample) = pending_sci_direct_resample.take() {
if let Err(e) = resample.write(1) {
error!("ACPIPM: failed to resample sci event: {}", e);
}
}
// Re-trigger SCI if PM1 or GPE status is still not cleared.
pm1.lock().trigger_sci(&sci_evt);
gpe0.lock().trigger_sci(&sci_evt);
}
#[cfg(feature = "direct")]
Token::InterruptTriggerDirect => {
if let Some((ref trigger, ref resample)) = sci_direct_evt {
let _ = trigger.read();
if let Err(e) = sci_evt.write(1) {
error!("ACPIPM: failed to trigger sci event: {}", e);
}
pending_sci_direct_resample = Some(resample);
}
}
Token::Kill => return Ok(()),
}
}

View file

@ -6,6 +6,7 @@ edition = "2021"
[features]
gdb = ["gdbstub_arch", "arch/gdb"]
direct = []
[dependencies]
arch = { path = "../arch" }

View file

@ -1299,8 +1299,14 @@ impl X8664arch {
irq_chip
.register_irq_event(sci_irq, &pm_sci_evt, Some(&pm_sci_evt_resample))
.map_err(Error::RegisterIrqfd)?;
let mut pmresource =
devices::ACPIPMResource::new(pm_sci_evt, pm_sci_evt_resample, suspend_evt, exit_evt);
let mut pmresource = devices::ACPIPMResource::new(
pm_sci_evt,
pm_sci_evt_resample,
#[cfg(feature = "direct")]
None,
suspend_evt,
exit_evt,
);
pmresource.to_aml_bytes(&mut amls);
pmresource.start();