devices: pcie: add pcie port structure

Since all pcie ports (root port, upstream port, downstream port)
have similiar pci config space and handling logic, we could have
a common code base for them to use for code simplicity. This
patch adds the code base PciePort structure that could be used
later for both pcie root port and pcie upstream/downstream port.

BUG=b:199986018
TEST=./tools/presubmit

Change-Id: I23cfaf561432ed2cb977d0d2a020fc05370bb4b0
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/3692429
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
Tested-by: kokoro <noreply+kokoro@google.com>
Commit-Queue: Daniel Verkamp <dverkamp@chromium.org>
This commit is contained in:
Tinghao Zhang 2022-06-07 10:41:38 +08:00 committed by Chromeos LUCI
parent 9b7e350656
commit c96e46c6a9

View file

@ -0,0 +1,445 @@
// Copyright 2022 The ChromiumOS Authors.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::str::FromStr;
use std::sync::Arc;
use crate::pci::pci_configuration::PciCapabilityID;
use crate::pci::{MsiConfig, PciAddress, PciDeviceError};
use crate::pci::pcie::pci_bridge::PciBridgeBusRange;
use crate::pci::pcie::pcie_device::PmcConfig;
use crate::pci::pcie::pcie_host::PcieHostPort;
use crate::pci::pcie::*;
use base::warn;
use data_model::DataInit;
use resources::{Alloc, SystemAllocator};
use sync::Mutex;
// reserve 8MB memory window
const PCIE_BR_MEM_SIZE: u64 = 0x80_0000;
// reserve 64MB prefetch window
const PCIE_BR_PREF_MEM_SIZE: u64 = 0x400_0000;
pub struct PciePort {
device_id: u16,
debug_label: String,
pci_address: Option<PciAddress>,
bus_range: PciBridgeBusRange,
pcie_host: Option<PcieHostPort>,
pcie_cap_reg_idx: Option<usize>,
pmc_cap_reg_idx: Option<usize>,
msi_config: Option<Arc<Mutex<MsiConfig>>>,
pmc_config: PmcConfig,
slot_control: Option<u16>,
slot_status: u16,
root_control: u16,
root_status: u32,
hp_interrupt_pending: bool,
pme_pending_request_id: Option<PciAddress>,
prepare_hotplug: bool,
removed_downstream_valid: bool,
}
impl PciePort {
/// Constructs a new PCIE port
pub fn new(
device_id: u16,
debug_label: String,
primary_bus_num: u8,
secondary_bus_num: u8,
slot_implemented: bool,
) -> Self {
let bus_range = PciBridgeBusRange {
primary: primary_bus_num,
secondary: secondary_bus_num,
subordinate: secondary_bus_num,
};
PciePort {
device_id,
debug_label,
pci_address: None,
bus_range,
pcie_host: None,
pcie_cap_reg_idx: None,
pmc_cap_reg_idx: None,
msi_config: None,
pmc_config: PmcConfig::new(),
slot_control: if slot_implemented {
Some(PCIE_SLTCTL_PIC_OFF | PCIE_SLTCTL_AIC_OFF)
} else {
None
},
slot_status: 0,
root_control: 0,
root_status: 0,
hp_interrupt_pending: false,
pme_pending_request_id: None,
prepare_hotplug: false,
removed_downstream_valid: false,
}
}
pub fn new_from_host(pcie_host: PcieHostPort, slot_implemented: bool) -> Self {
let bus_range = pcie_host.get_bus_range();
PciePort {
device_id: pcie_host.read_device_id(),
debug_label: pcie_host.host_name(),
pci_address: None,
bus_range,
pcie_host: Some(pcie_host),
pcie_cap_reg_idx: None,
pmc_cap_reg_idx: None,
msi_config: None,
pmc_config: PmcConfig::new(),
slot_control: if slot_implemented {
Some(PCIE_SLTCTL_PIC_OFF | PCIE_SLTCTL_AIC_OFF)
} else {
None
},
slot_status: 0,
root_control: 0,
root_status: 0,
hp_interrupt_pending: false,
pme_pending_request_id: None,
prepare_hotplug: false,
removed_downstream_valid: false,
}
}
pub fn get_device_id(&self) -> u16 {
self.device_id
}
pub fn debug_label(&self) -> String {
self.debug_label.clone()
}
pub fn allocate_address(
&mut self,
resources: &mut SystemAllocator,
) -> std::result::Result<PciAddress, PciDeviceError> {
if self.pci_address.is_none() {
match &self.pcie_host {
Some(host) => {
let address = PciAddress::from_str(&host.host_name())
.map_err(|e| PciDeviceError::PciAddressParseFailure(host.host_name(), e))?;
if resources.reserve_pci(
Alloc::PciBar {
bus: address.bus,
dev: address.dev,
func: address.func,
bar: 0,
},
host.host_name(),
) {
self.pci_address = Some(address);
} else {
self.pci_address = None;
}
}
None => match resources.allocate_pci(self.bus_range.primary, self.debug_label()) {
Some(Alloc::PciBar {
bus,
dev,
func,
bar: _,
}) => self.pci_address = Some(PciAddress { bus, dev, func }),
_ => self.pci_address = None,
},
}
}
self.pci_address.ok_or(PciDeviceError::PciAllocationFailed)
}
fn read_pcie_cap(&self, offset: usize, data: &mut u32) {
if offset == PCIE_SLTCTL_OFFSET {
*data = ((self.slot_status as u32) << 16) | (self.get_slot_control() as u32);
} else if offset == PCIE_ROOTCTL_OFFSET {
*data = self.root_control as u32;
} else if offset == PCIE_ROOTSTA_OFFSET {
*data = self.root_status;
}
}
fn write_pcie_cap(&mut self, offset: usize, data: &[u8]) {
self.removed_downstream_valid = false;
match offset {
PCIE_SLTCTL_OFFSET => {
let value = match u16::from_slice(data) {
Some(&v) => v,
None => {
warn!("write SLTCTL isn't word, len: {}", data.len());
return;
}
};
// if slot is populated, power indicator is off,
// it will detach devices
let old_control = self.get_slot_control();
match self.slot_control.as_mut() {
Some(v) => *v = value,
None => return,
}
if (self.slot_status & PCIE_SLTSTA_PDS != 0)
&& (value & PCIE_SLTCTL_PIC_OFF == PCIE_SLTCTL_PIC_OFF)
&& (old_control & PCIE_SLTCTL_PIC_OFF != PCIE_SLTCTL_PIC_OFF)
{
self.removed_downstream_valid = true;
self.slot_status &= !PCIE_SLTSTA_PDS;
self.slot_status |= PCIE_SLTSTA_PDC;
self.trigger_hp_interrupt();
}
if old_control != value {
// send Command completed events
self.slot_status |= PCIE_SLTSTA_CC;
self.trigger_cc_interrupt();
}
}
PCIE_SLTSTA_OFFSET => {
if self.slot_control.is_none() {
return;
}
let value = match u16::from_slice(data) {
Some(v) => *v,
None => {
warn!("write SLTSTA isn't word, len: {}", data.len());
return;
}
};
if value & PCIE_SLTSTA_ABP != 0 {
self.slot_status &= !PCIE_SLTSTA_ABP;
}
if value & PCIE_SLTSTA_PFD != 0 {
self.slot_status &= !PCIE_SLTSTA_PFD;
}
if value & PCIE_SLTSTA_PDC != 0 {
self.slot_status &= !PCIE_SLTSTA_PDC;
}
if value & PCIE_SLTSTA_CC != 0 {
self.slot_status &= !PCIE_SLTSTA_CC;
}
if value & PCIE_SLTSTA_DLLSC != 0 {
self.slot_status &= !PCIE_SLTSTA_DLLSC;
}
}
PCIE_ROOTCTL_OFFSET => match u16::from_slice(data) {
Some(v) => self.root_control = *v,
None => warn!("write root control isn't word, len: {}", data.len()),
},
PCIE_ROOTSTA_OFFSET => match u32::from_slice(data) {
Some(v) => {
if *v & PCIE_ROOTSTA_PME_STATUS != 0 {
if let Some(request_id) = self.pme_pending_request_id {
self.root_status &= !PCIE_ROOTSTA_PME_PENDING;
let req_id = ((request_id.bus as u32) << 8)
| ((request_id.dev as u32) << 3)
| (request_id.func as u32);
self.root_status &= !PCIE_ROOTSTA_PME_REQ_ID_MASK;
self.root_status |= req_id;
self.root_status |= PCIE_ROOTSTA_PME_STATUS;
self.pme_pending_request_id = None;
self.trigger_pme_interrupt();
} else {
self.root_status &= !PCIE_ROOTSTA_PME_STATUS;
if self.hp_interrupt_pending {
self.hp_interrupt_pending = false;
self.trigger_hp_interrupt();
}
}
}
}
None => warn!("write root status isn't dword, len: {}", data.len()),
},
_ => (),
}
}
pub fn read_config(&self, reg_idx: usize, data: &mut u32) {
if let Some(pcie_cap_reg_idx) = self.pcie_cap_reg_idx {
if reg_idx >= pcie_cap_reg_idx && reg_idx < pcie_cap_reg_idx + (PCIE_CAP_LEN / 4) {
let offset = (reg_idx - pcie_cap_reg_idx) * 4;
self.read_pcie_cap(offset, data);
}
}
if let Some(pmc_cap_reg_idx) = self.pmc_cap_reg_idx {
if reg_idx == pmc_cap_reg_idx + PMC_CAP_CONTROL_STATE_OFFSET {
self.pmc_config.read(data);
}
}
if let Some(host) = &self.pcie_host {
host.read_config(reg_idx, data);
}
}
pub fn write_config(&mut self, reg_idx: usize, offset: u64, data: &[u8]) {
if let Some(pcie_cap_reg_idx) = self.pcie_cap_reg_idx {
if reg_idx >= pcie_cap_reg_idx && reg_idx < pcie_cap_reg_idx + (PCIE_CAP_LEN / 4) {
let delta = ((reg_idx - pcie_cap_reg_idx) * 4) + offset as usize;
self.write_pcie_cap(delta, data);
}
}
if let Some(pmc_cap_reg_idx) = self.pmc_cap_reg_idx {
if reg_idx == pmc_cap_reg_idx + PMC_CAP_CONTROL_STATE_OFFSET {
let old_status = self.pmc_config.get_power_status();
self.pmc_config.write(offset, data);
let new_status = self.pmc_config.get_power_status();
if old_status == PciDevicePower::D3
&& new_status == PciDevicePower::D0
&& self.prepare_hotplug
{
if let Some(host) = self.pcie_host.as_mut() {
host.hotplug_probe();
self.prepare_hotplug = false;
}
}
}
}
if let Some(host) = self.pcie_host.as_mut() {
host.write_config(reg_idx, offset, data);
}
}
pub fn set_capability_reg_idx(&mut self, id: PciCapabilityID, reg_idx: usize) {
match id {
PciCapabilityID::PciExpress => self.pcie_cap_reg_idx = Some(reg_idx),
PciCapabilityID::PowerManagement => self.pmc_cap_reg_idx = Some(reg_idx),
_ => (),
}
}
pub fn get_bus_range(&self) -> Option<PciBridgeBusRange> {
Some(self.bus_range)
}
pub fn get_bridge_window_size(&self) -> (u64, u64) {
if let Some(host) = &self.pcie_host {
host.get_bridge_window_size()
} else {
(PCIE_BR_MEM_SIZE, PCIE_BR_PREF_MEM_SIZE)
}
}
fn get_slot_control(&self) -> u16 {
if let Some(slot_control) = self.slot_control {
return slot_control;
}
0
}
pub fn clone_interrupt(&mut self, msi_config: Arc<Mutex<MsiConfig>>) {
self.msi_config = Some(msi_config);
}
pub fn hotplug_implemented(&self) -> bool {
self.slot_control.is_some()
}
fn trigger_interrupt(&self) {
if let Some(msi_config) = &self.msi_config {
let msi_config = msi_config.lock();
if msi_config.is_msi_enabled() {
msi_config.trigger()
}
}
}
fn trigger_cc_interrupt(&self) {
if (self.get_slot_control() & PCIE_SLTCTL_CCIE) != 0
&& (self.slot_status & PCIE_SLTSTA_CC) != 0
{
self.trigger_interrupt()
}
}
fn trigger_hp_interrupt(&self) {
let slot_control = self.get_slot_control();
if (slot_control & PCIE_SLTCTL_HPIE) != 0
&& (self.slot_status & slot_control & (PCIE_SLTCTL_ABPE | PCIE_SLTCTL_PDCE)) != 0
{
self.trigger_interrupt()
}
}
fn trigger_pme_interrupt(&self) {
if (self.root_control & PCIE_ROOTCTL_PME_ENABLE) != 0
&& (self.root_status & PCIE_ROOTSTA_PME_STATUS) != 0
{
self.trigger_interrupt()
}
}
pub fn inject_pme(&mut self) {
if (self.root_status & PCIE_ROOTSTA_PME_STATUS) != 0 {
self.root_status |= PCIE_ROOTSTA_PME_PENDING;
self.pme_pending_request_id = self.pci_address;
} else {
let request_id = self.pci_address.unwrap();
let req_id = ((request_id.bus as u32) << 8)
| ((request_id.dev as u32) << 3)
| (request_id.func as u32);
self.root_status &= !PCIE_ROOTSTA_PME_REQ_ID_MASK;
self.root_status |= req_id;
self.pme_pending_request_id = None;
self.root_status |= PCIE_ROOTSTA_PME_STATUS;
self.trigger_pme_interrupt();
}
}
pub fn trigger_hp_or_pme_interrupt(&mut self) {
if self.pmc_config.should_trigger_pme() {
self.hp_interrupt_pending = true;
self.inject_pme();
} else {
self.trigger_hp_interrupt();
}
}
pub fn is_host(&self) -> bool {
self.pcie_host.is_some()
}
pub fn hot_unplug(&mut self) {
if let Some(host) = self.pcie_host.as_mut() {
host.hot_unplug()
}
}
pub fn is_match(&self, host_addr: PciAddress) -> Option<u8> {
let _ = self.slot_control?;
if (host_addr.bus >= self.bus_range.secondary
&& host_addr.bus <= self.bus_range.subordinate)
|| self.pcie_host.is_none()
{
Some(self.bus_range.secondary)
} else {
None
}
}
pub fn removed_downstream_valid(&self) -> bool {
self.removed_downstream_valid
}
pub fn set_slot_status(&mut self, flag: u16) {
self.slot_status |= flag;
}
pub fn should_trigger_pme(&mut self) -> bool {
self.pmc_config.should_trigger_pme()
}
pub fn prepare_hotplug(&mut self) {
self.prepare_hotplug = true;
}
}