mirror of
https://github.com/salsa-rs/salsa.git
synced 2025-02-03 18:47:53 +00:00
commit
b272cc1321
20 changed files with 797 additions and 451 deletions
|
@ -223,8 +223,8 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream
|
|||
// For input queries, we need `set_foo` etc
|
||||
if let QueryStorage::Input = query.storage {
|
||||
let set_fn_name = Ident::new(&format!("set_{}", fn_name), fn_name.span());
|
||||
let set_constant_fn_name =
|
||||
Ident::new(&format!("set_constant_{}", fn_name), fn_name.span());
|
||||
let set_with_durability_fn_name =
|
||||
Ident::new(&format!("set_{}_with_durability", fn_name), fn_name.span());
|
||||
|
||||
let set_fn_docs = format!(
|
||||
"
|
||||
|
@ -259,7 +259,7 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream
|
|||
|
||||
|
||||
# [doc = #set_constant_fn_docs]
|
||||
fn #set_constant_fn_name(&mut self, #(#key_names: #keys,)* value__: #value);
|
||||
fn #set_with_durability_fn_name(&mut self, #(#key_names: #keys,)* value__: #value, durability__: salsa::Durability);
|
||||
});
|
||||
|
||||
query_fn_definitions.extend(quote! {
|
||||
|
@ -267,8 +267,8 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream
|
|||
<Self as salsa::plumbing::GetQueryTable<#qt>>::get_query_table_mut(self).set((#(#key_names),*), value__)
|
||||
}
|
||||
|
||||
fn #set_constant_fn_name(&mut self, #(#key_names: #keys,)* value__: #value) {
|
||||
<Self as salsa::plumbing::GetQueryTable<#qt>>::get_query_table_mut(self).set_constant((#(#key_names),*), value__)
|
||||
fn #set_with_durability_fn_name(&mut self, #(#key_names: #keys,)* value__: #value, durability__: salsa::Durability) {
|
||||
<Self as salsa::plumbing::GetQueryTable<#qt>>::get_query_table_mut(self).set_with_durability((#(#key_names),*), value__, durability__)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
14
src/debug.rs
14
src/debug.rs
|
@ -1,6 +1,7 @@
|
|||
//! Debugging APIs: these are meant for use when unit-testing or
|
||||
//! debugging your application but aren't ordinarily needed.
|
||||
|
||||
use crate::durability::Durability;
|
||||
use crate::plumbing;
|
||||
use crate::plumbing::QueryStorageOps;
|
||||
use crate::Query;
|
||||
|
@ -17,10 +18,11 @@ pub trait DebugQueryTable {
|
|||
/// Value of this query.
|
||||
type Value;
|
||||
|
||||
/// True if salsa thinks that the value for `key` is a
|
||||
/// **constant**, meaning that it can never change, no matter what
|
||||
/// values the inputs take on from this point.
|
||||
fn is_constant(&self, key: Self::Key) -> bool;
|
||||
/// Returns a lower bound on the durability for the given key.
|
||||
/// This is typically the minimum durability of all values that
|
||||
/// the query accessed, but we may return a lower durability in
|
||||
/// some cases.
|
||||
fn durability(&self, key: Self::Key) -> Durability;
|
||||
|
||||
/// Get the (current) set of the entries in the query table.
|
||||
fn entries<C>(&self) -> C
|
||||
|
@ -56,8 +58,8 @@ where
|
|||
type Key = Q::Key;
|
||||
type Value = Q::Value;
|
||||
|
||||
fn is_constant(&self, key: Q::Key) -> bool {
|
||||
self.storage.is_constant(self.db, &key)
|
||||
fn durability(&self, key: Q::Key) -> Durability {
|
||||
self.storage.durability(self.db, &key)
|
||||
}
|
||||
|
||||
fn entries<C>(&self) -> C
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::runtime::Revision;
|
||||
use crate::revision::Revision;
|
||||
use crate::Database;
|
||||
use std::fmt::Debug;
|
||||
use std::hash::Hasher;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use crate::debug::TableEntry;
|
||||
use crate::durability::Durability;
|
||||
use crate::lru::Lru;
|
||||
use crate::plumbing::CycleDetected;
|
||||
use crate::plumbing::HasQueryGroup;
|
||||
|
@ -122,8 +123,6 @@ where
|
|||
.or_insert_with(|| Arc::new(Slot::new(key.clone())))
|
||||
.clone()
|
||||
}
|
||||
|
||||
fn remove_lru(&self) {}
|
||||
}
|
||||
|
||||
impl<DB, Q, MP> QueryStorageOps<DB, Q> for DerivedStorage<DB, Q, MP>
|
||||
|
@ -134,19 +133,24 @@ where
|
|||
{
|
||||
fn try_fetch(&self, db: &DB, key: &Q::Key) -> Result<Q::Value, CycleDetected> {
|
||||
let slot = self.slot(key);
|
||||
let StampedValue { value, changed_at } = slot.read(db)?;
|
||||
let StampedValue {
|
||||
value,
|
||||
durability,
|
||||
changed_at,
|
||||
} = slot.read(db)?;
|
||||
|
||||
if let Some(evicted) = self.lru_list.record_use(&slot) {
|
||||
evicted.evict();
|
||||
}
|
||||
|
||||
db.salsa_runtime().report_query_read(slot, changed_at);
|
||||
db.salsa_runtime()
|
||||
.report_query_read(slot, durability, changed_at);
|
||||
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
fn is_constant(&self, db: &DB, key: &Q::Key) -> bool {
|
||||
self.slot(key).is_constant(db)
|
||||
fn durability(&self, db: &DB, key: &Q::Key) -> Durability {
|
||||
self.slot(key).durability(db)
|
||||
}
|
||||
|
||||
fn entries<C>(&self, _db: &DB) -> C
|
||||
|
|
|
@ -2,15 +2,15 @@ use crate::debug::TableEntry;
|
|||
use crate::dependency::DatabaseSlot;
|
||||
use crate::dependency::Dependency;
|
||||
use crate::derived::MemoizationPolicy;
|
||||
use crate::durability::Durability;
|
||||
use crate::lru::LruIndex;
|
||||
use crate::lru::LruNode;
|
||||
use crate::plumbing::CycleDetected;
|
||||
use crate::plumbing::GetQueryTable;
|
||||
use crate::plumbing::HasQueryGroup;
|
||||
use crate::plumbing::QueryFunction;
|
||||
use crate::runtime::ChangedAt;
|
||||
use crate::revision::Revision;
|
||||
use crate::runtime::FxIndexSet;
|
||||
use crate::runtime::Revision;
|
||||
use crate::runtime::Runtime;
|
||||
use crate::runtime::RuntimeId;
|
||||
use crate::runtime::StampedValue;
|
||||
|
@ -72,6 +72,9 @@ where
|
|||
/// Last revision when the memoized value was observed to change.
|
||||
changed_at: Revision,
|
||||
|
||||
/// Minimum durability of the inputs to this query.
|
||||
durability: Durability,
|
||||
|
||||
/// The inputs that went into our query, if we are tracking them.
|
||||
inputs: MemoInputs<DB>,
|
||||
}
|
||||
|
@ -79,15 +82,15 @@ where
|
|||
/// An insertion-order-preserving set of queries. Used to track the
|
||||
/// inputs accessed during query execution.
|
||||
pub(super) enum MemoInputs<DB: Database> {
|
||||
// No inputs
|
||||
Constant,
|
||||
|
||||
// Non-empty set of inputs fully known
|
||||
/// Non-empty set of inputs, fully known
|
||||
Tracked {
|
||||
inputs: Arc<FxIndexSet<Dependency<DB>>>,
|
||||
},
|
||||
|
||||
// Unknown quantity of inputs
|
||||
/// Empty set of inputs, fully known.
|
||||
NoInputs,
|
||||
|
||||
/// Unknown quantity of inputs
|
||||
Untracked,
|
||||
}
|
||||
|
||||
|
@ -168,7 +171,8 @@ where
|
|||
}
|
||||
};
|
||||
|
||||
let mut panic_guard = PanicGuard::new(self.database_key(db), self, old_memo, runtime);
|
||||
let database_key = self.database_key(db);
|
||||
let mut panic_guard = PanicGuard::new(&database_key, self, old_memo, runtime);
|
||||
|
||||
// If we have an old-value, it *may* now be stale, since there
|
||||
// has been a new revision since the last time we checked. So,
|
||||
|
@ -181,7 +185,7 @@ where
|
|||
db.salsa_event(|| Event {
|
||||
runtime_id: runtime.id(),
|
||||
kind: EventKind::DidValidateMemoizedValue {
|
||||
database_key: self.database_key(db),
|
||||
database_key: database_key.clone(),
|
||||
},
|
||||
});
|
||||
|
||||
|
@ -193,7 +197,6 @@ where
|
|||
|
||||
// Query was not previously executed, or value is potentially
|
||||
// stale, or value is absent. Let's execute!
|
||||
let database_key = self.database_key(db);
|
||||
let mut result = runtime.execute_query_implementation(db, &database_key, || {
|
||||
info!("{:?}: executing query", self);
|
||||
|
||||
|
@ -215,20 +218,27 @@ where
|
|||
// old value.
|
||||
if let Some(old_memo) = &panic_guard.memo {
|
||||
if let Some(old_value) = &old_memo.value {
|
||||
if MP::memoized_value_eq(&old_value, &result.value) {
|
||||
// Careful: if the value became less durable than it
|
||||
// used to be, that is a "breaking change" that our
|
||||
// consumers must be aware of. Becoming *more* durable
|
||||
// is not. See the test `constant_to_non_constant`.
|
||||
if result.durability >= old_memo.durability
|
||||
&& MP::memoized_value_eq(&old_value, &result.value)
|
||||
{
|
||||
debug!(
|
||||
"read_upgrade({:?}): value is equal, back-dating to {:?}",
|
||||
self, old_memo.changed_at,
|
||||
);
|
||||
|
||||
assert!(old_memo.changed_at <= result.changed_at.revision);
|
||||
result.changed_at.revision = old_memo.changed_at;
|
||||
assert!(old_memo.changed_at <= result.changed_at);
|
||||
result.changed_at = old_memo.changed_at;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let new_value = StampedValue {
|
||||
value: result.value,
|
||||
durability: result.durability,
|
||||
changed_at: result.changed_at,
|
||||
};
|
||||
|
||||
|
@ -239,24 +249,17 @@ where
|
|||
};
|
||||
|
||||
debug!(
|
||||
"read_upgrade({:?}): result.changed_at={:?}, result.dependencies = {:#?}",
|
||||
self, result.changed_at, result.dependencies,
|
||||
"read_upgrade({:?}): result.changed_at={:?}, \
|
||||
result.durability={:?}, result.dependencies = {:#?}",
|
||||
self, result.changed_at, result.durability, result.dependencies,
|
||||
);
|
||||
|
||||
let inputs = match result.dependencies {
|
||||
None => MemoInputs::Untracked,
|
||||
|
||||
Some(dependencies) => {
|
||||
// If all things that we read were constants, then
|
||||
// we don't need to track our inputs: our value
|
||||
// can never be invalidated.
|
||||
//
|
||||
// If OTOH we read at least *some* non-constant
|
||||
// inputs, then we do track our inputs (even the
|
||||
// constants), so that if we run the GC, we know
|
||||
// which constants we looked at.
|
||||
if dependencies.is_empty() || result.changed_at.is_constant {
|
||||
MemoInputs::Constant
|
||||
if dependencies.is_empty() {
|
||||
MemoInputs::NoInputs
|
||||
} else {
|
||||
MemoInputs::Tracked {
|
||||
inputs: Arc::new(dependencies),
|
||||
|
@ -264,11 +267,14 @@ where
|
|||
}
|
||||
}
|
||||
};
|
||||
debug!("read_upgrade({:?}): inputs={:?}", self, inputs);
|
||||
|
||||
panic_guard.memo = Some(Memo {
|
||||
value,
|
||||
changed_at: result.changed_at.revision,
|
||||
changed_at: result.changed_at,
|
||||
verified_at: revision_now,
|
||||
inputs,
|
||||
durability: result.durability,
|
||||
});
|
||||
|
||||
panic_guard.proceed(&new_value);
|
||||
|
@ -334,15 +340,26 @@ where
|
|||
}
|
||||
|
||||
QueryState::Memoized(memo) => {
|
||||
debug!("{:?}: found memoized value", self);
|
||||
debug!(
|
||||
"{:?}: found memoized value, verified_at={:?}, changed_at={:?}",
|
||||
self, memo.verified_at, memo.changed_at,
|
||||
);
|
||||
|
||||
if let Some(value) = memo.probe_memoized_value(revision_now) {
|
||||
info!(
|
||||
"{:?}: returning memoized value changed at {:?}",
|
||||
self, value.changed_at
|
||||
);
|
||||
if let Some(value) = &memo.value {
|
||||
if memo.verified_at == revision_now {
|
||||
let value = StampedValue {
|
||||
durability: memo.durability,
|
||||
changed_at: memo.changed_at,
|
||||
value: value.clone(),
|
||||
};
|
||||
|
||||
return ProbeState::UpToDate(Ok(value));
|
||||
info!(
|
||||
"{:?}: returning memoized value changed at {:?}",
|
||||
self, value.changed_at
|
||||
);
|
||||
|
||||
return ProbeState::UpToDate(Ok(value));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -350,11 +367,17 @@ where
|
|||
ProbeState::StaleOrAbsent(state)
|
||||
}
|
||||
|
||||
pub(super) fn is_constant(&self, _db: &DB) -> bool {
|
||||
pub(super) fn durability(&self, db: &DB) -> Durability {
|
||||
match &*self.state.read() {
|
||||
QueryState::NotComputed => false,
|
||||
QueryState::NotComputed => Durability::LOW,
|
||||
QueryState::InProgress { .. } => panic!("query in progress"),
|
||||
QueryState::Memoized(memo) => memo.inputs.is_constant(),
|
||||
QueryState::Memoized(memo) => {
|
||||
if memo.check_durability(db) {
|
||||
memo.durability
|
||||
} else {
|
||||
Durability::LOW
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -503,7 +526,7 @@ where
|
|||
Q: QueryFunction<DB>,
|
||||
MP: MemoizationPolicy<DB, Q>,
|
||||
{
|
||||
database_key: DB::DatabaseKey,
|
||||
database_key: &'me DB::DatabaseKey,
|
||||
slot: &'me Slot<DB, Q, MP>,
|
||||
memo: Option<Memo<DB, Q>>,
|
||||
runtime: &'me Runtime<DB>,
|
||||
|
@ -516,7 +539,7 @@ where
|
|||
MP: MemoizationPolicy<DB, Q>,
|
||||
{
|
||||
fn new(
|
||||
database_key: DB::DatabaseKey,
|
||||
database_key: &'me DB::DatabaseKey,
|
||||
slot: &'me Slot<DB, Q, MP>,
|
||||
memo: Option<Memo<DB, Q>>,
|
||||
runtime: &'me Runtime<DB>,
|
||||
|
@ -605,28 +628,32 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
impl<DB: Database> MemoInputs<DB> {
|
||||
fn is_constant(&self) -> bool {
|
||||
if let MemoInputs::Constant = self {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB, Q> Memo<DB, Q>
|
||||
where
|
||||
Q: QueryFunction<DB>,
|
||||
DB: Database + HasQueryGroup<Q::Group>,
|
||||
{
|
||||
/// True if this memo is known not to have changed based on its durability.
|
||||
fn check_durability(&self, db: &DB) -> bool {
|
||||
let last_changed = db.salsa_runtime().last_changed_revision(self.durability);
|
||||
debug!(
|
||||
"check_durability(last_changed={:?} <= verified_at={:?}) = {:?}",
|
||||
last_changed,
|
||||
self.verified_at,
|
||||
last_changed <= self.verified_at,
|
||||
);
|
||||
last_changed <= self.verified_at
|
||||
}
|
||||
|
||||
fn validate_memoized_value(
|
||||
&mut self,
|
||||
db: &DB,
|
||||
revision_now: Revision,
|
||||
) -> Option<StampedValue<Q::Value>> {
|
||||
// If we don't have a memoized value, nothing to validate.
|
||||
let value = self.value.as_ref()?;
|
||||
if self.value.is_none() {
|
||||
return None;
|
||||
}
|
||||
|
||||
assert!(self.verified_at != revision_now);
|
||||
let verified_at = self.verified_at;
|
||||
|
@ -637,15 +664,18 @@ where
|
|||
self.inputs,
|
||||
);
|
||||
|
||||
let is_constant = match &mut self.inputs {
|
||||
if self.check_durability(db) {
|
||||
return Some(self.mark_value_as_verified(revision_now));
|
||||
}
|
||||
|
||||
match &self.inputs {
|
||||
// We can't validate values that had untracked inputs; just have to
|
||||
// re-execute.
|
||||
MemoInputs::Untracked { .. } => {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Constant: no changed input
|
||||
MemoInputs::Constant => true,
|
||||
MemoInputs::NoInputs => {}
|
||||
|
||||
// Check whether any of our inputs changed since the
|
||||
// **last point where we were verified** (not since we
|
||||
|
@ -671,46 +701,24 @@ where
|
|||
|
||||
return None;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
self.verified_at = revision_now;
|
||||
Some(StampedValue {
|
||||
changed_at: ChangedAt {
|
||||
is_constant,
|
||||
revision: self.changed_at,
|
||||
},
|
||||
value: value.clone(),
|
||||
})
|
||||
Some(self.mark_value_as_verified(revision_now))
|
||||
}
|
||||
|
||||
/// Returns the memoized value *if* it is known to be update in the given revision.
|
||||
fn probe_memoized_value(&self, revision_now: Revision) -> Option<StampedValue<Q::Value>> {
|
||||
let value = self.value.as_ref()?;
|
||||
fn mark_value_as_verified(&mut self, revision_now: Revision) -> StampedValue<Q::Value> {
|
||||
let value = match &self.value {
|
||||
Some(v) => v.clone(),
|
||||
None => panic!("invoked `verify_value` without a value!"),
|
||||
};
|
||||
self.verified_at = revision_now;
|
||||
|
||||
debug!(
|
||||
"probe_memoized_value(verified_at={:?}, changed_at={:?})",
|
||||
self.verified_at, self.changed_at,
|
||||
);
|
||||
|
||||
if self.verified_at == revision_now {
|
||||
let is_constant = match self.inputs {
|
||||
MemoInputs::Constant => true,
|
||||
_ => false,
|
||||
};
|
||||
|
||||
return Some(StampedValue {
|
||||
changed_at: ChangedAt {
|
||||
is_constant,
|
||||
revision: self.changed_at,
|
||||
},
|
||||
value: value.clone(),
|
||||
});
|
||||
StampedValue {
|
||||
durability: self.durability,
|
||||
changed_at: self.changed_at,
|
||||
value,
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn has_untracked_input(&self) -> bool {
|
||||
|
@ -735,10 +743,10 @@ where
|
|||
impl<DB: Database> std::fmt::Debug for MemoInputs<DB> {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
MemoInputs::Constant => fmt.debug_struct("Constant").finish(),
|
||||
MemoInputs::Tracked { inputs } => {
|
||||
fmt.debug_struct("Tracked").field("inputs", inputs).finish()
|
||||
}
|
||||
MemoInputs::NoInputs => fmt.debug_struct("NoInputs").finish(),
|
||||
MemoInputs::Untracked => fmt.debug_struct("Untracked").finish(),
|
||||
}
|
||||
}
|
||||
|
@ -804,7 +812,7 @@ where
|
|||
std::mem::drop(state);
|
||||
|
||||
let value = rx.recv().unwrap_or_else(|_| db.on_propagated_panic());
|
||||
return value.changed_at.changed_since(revision);
|
||||
return value.changed_at > revision;
|
||||
}
|
||||
|
||||
// Consider a cycle to have changed.
|
||||
|
@ -825,62 +833,74 @@ where
|
|||
return memo.changed_at > revision;
|
||||
}
|
||||
|
||||
let inputs = match &memo.inputs {
|
||||
MemoInputs::Untracked => {
|
||||
// we don't know the full set of
|
||||
// inputs, so if there is a new
|
||||
// revision, we must assume it is
|
||||
// dirty
|
||||
debug!(
|
||||
"maybe_changed_since({:?}: true since untracked inputs",
|
||||
self,
|
||||
);
|
||||
return true;
|
||||
}
|
||||
let maybe_changed;
|
||||
|
||||
MemoInputs::Constant => None,
|
||||
|
||||
MemoInputs::Tracked { inputs } => {
|
||||
// At this point, the value may be dirty (we have
|
||||
// to check the database-keys). If we have a cached
|
||||
// value, we'll just fall back to invoking `read`,
|
||||
// which will do that checking (and a bit more) --
|
||||
// note that we skip the "pure read" part as we
|
||||
// already know the result.
|
||||
assert!(inputs.len() > 0);
|
||||
if memo.value.is_some() {
|
||||
std::mem::drop(state);
|
||||
return match self.read_upgrade(db, revision_now) {
|
||||
Ok(v) => {
|
||||
debug!(
|
||||
"maybe_changed_since({:?}: {:?} since (recomputed) value changed at {:?}",
|
||||
self,
|
||||
v.changed_at.changed_since(revision),
|
||||
v.changed_at,
|
||||
);
|
||||
v.changed_at.changed_since(revision)
|
||||
}
|
||||
Err(CycleDetected) => true,
|
||||
};
|
||||
// If we only depended on constants, and no constant has been
|
||||
// modified since then, we cannot have changed; no need to
|
||||
// trace our inputs.
|
||||
if memo.check_durability(db) {
|
||||
std::mem::drop(state);
|
||||
maybe_changed = false;
|
||||
} else {
|
||||
match &memo.inputs {
|
||||
MemoInputs::Untracked => {
|
||||
// we don't know the full set of
|
||||
// inputs, so if there is a new
|
||||
// revision, we must assume it is
|
||||
// dirty
|
||||
debug!(
|
||||
"maybe_changed_since({:?}: true since untracked inputs",
|
||||
self,
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
Some(inputs.clone())
|
||||
MemoInputs::NoInputs => {
|
||||
std::mem::drop(state);
|
||||
maybe_changed = false;
|
||||
}
|
||||
|
||||
MemoInputs::Tracked { inputs } => {
|
||||
// At this point, the value may be dirty (we have
|
||||
// to check the database-keys). If we have a cached
|
||||
// value, we'll just fall back to invoking `read`,
|
||||
// which will do that checking (and a bit more) --
|
||||
// note that we skip the "pure read" part as we
|
||||
// already know the result.
|
||||
assert!(inputs.len() > 0);
|
||||
if memo.value.is_some() {
|
||||
std::mem::drop(state);
|
||||
return match self.read_upgrade(db, revision_now) {
|
||||
Ok(v) => {
|
||||
debug!(
|
||||
"maybe_changed_since({:?}: {:?} since (recomputed) value changed at {:?}",
|
||||
self,
|
||||
v.changed_at > revision,
|
||||
v.changed_at,
|
||||
);
|
||||
v.changed_at > revision
|
||||
}
|
||||
Err(CycleDetected) => true,
|
||||
};
|
||||
}
|
||||
|
||||
let inputs = inputs.clone();
|
||||
|
||||
// We have a **tracked set of inputs**
|
||||
// (found in `database_keys`) that need to
|
||||
// be validated.
|
||||
std::mem::drop(state);
|
||||
|
||||
// Iterate the inputs and see if any have maybe changed.
|
||||
maybe_changed = inputs
|
||||
.iter()
|
||||
.filter(|input| input.maybe_changed_since(db, revision))
|
||||
.inspect(|input| debug!("{:?}: input `{:?}` may have changed", self, input))
|
||||
.next()
|
||||
.is_some();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// We have a **tracked set of inputs**
|
||||
// (found in `database_keys`) that need to
|
||||
// be validated.
|
||||
std::mem::drop(state);
|
||||
|
||||
// Iterate the inputs and see if any have maybe changed.
|
||||
let maybe_changed = inputs
|
||||
.iter()
|
||||
.flat_map(|inputs| inputs.iter())
|
||||
.filter(|input| input.maybe_changed_since(db, revision))
|
||||
.inspect(|input| debug!("{:?}: input `{:?}` may have changed", self, input))
|
||||
.next()
|
||||
.is_some();
|
||||
}
|
||||
|
||||
// Either way, we have to update our entry.
|
||||
//
|
||||
|
|
49
src/durability.rs
Normal file
49
src/durability.rs
Normal file
|
@ -0,0 +1,49 @@
|
|||
/// Describes how likely a value is to change -- how "durable" it is.
|
||||
/// By default, inputs have `Durability::LOW` and interned values have
|
||||
/// `Durability::HIGH`. But inputs can be explicitly set with other
|
||||
/// durabilities.
|
||||
///
|
||||
/// We use durabilities to optimize the work of "revalidating" a query
|
||||
/// after some input has changed. Ordinarily, in a new revision,
|
||||
/// queries have to trace all their inputs back to the base inputs to
|
||||
/// determine if any of those inputs have changed. But if we know that
|
||||
/// the only changes were to inputs of low durability (the common
|
||||
/// case), and we know that the query only used inputs of medium
|
||||
/// durability or higher, then we can skip that enumeration.
|
||||
///
|
||||
/// Typically, one assigns low durabilites to inputs that the user is
|
||||
/// frequently editing. Medium or high durabilities are used for
|
||||
/// configuration, the source from library crates, or other things
|
||||
/// that are unlikely to be edited.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct Durability(u8);
|
||||
|
||||
impl Durability {
|
||||
/// Low durability: things that change frequently.
|
||||
///
|
||||
/// Example: part of the crate being edited
|
||||
pub const LOW: Durability = Durability(0);
|
||||
|
||||
/// Medium durability: things that change sometimes, but rarely.
|
||||
///
|
||||
/// Example: a Cargo.toml file
|
||||
pub const MEDIUM: Durability = Durability(1);
|
||||
|
||||
/// High durability: things that are not expected to change under
|
||||
/// common usage.
|
||||
///
|
||||
/// Example: the standard library or something from crates.io
|
||||
pub const HIGH: Durability = Durability(2);
|
||||
|
||||
/// The maximum possible durability; equivalent to HIGH but
|
||||
/// "conceptually" distinct (i.e., if we add more durability
|
||||
/// levels, this could change).
|
||||
pub(crate) const MAX: Durability = Self::HIGH;
|
||||
|
||||
/// Number of durability levels.
|
||||
pub(crate) const LEN: usize = 3;
|
||||
|
||||
pub(crate) fn index(self) -> usize {
|
||||
self.0 as usize
|
||||
}
|
||||
}
|
159
src/input.rs
159
src/input.rs
|
@ -1,11 +1,11 @@
|
|||
use crate::debug::TableEntry;
|
||||
use crate::dependency::DatabaseSlot;
|
||||
use crate::durability::Durability;
|
||||
use crate::plumbing::CycleDetected;
|
||||
use crate::plumbing::InputQueryStorageOps;
|
||||
use crate::plumbing::QueryStorageMassOps;
|
||||
use crate::plumbing::QueryStorageOps;
|
||||
use crate::runtime::ChangedAt;
|
||||
use crate::runtime::Revision;
|
||||
use crate::revision::Revision;
|
||||
use crate::runtime::StampedValue;
|
||||
use crate::Database;
|
||||
use crate::Event;
|
||||
|
@ -59,8 +59,6 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
struct IsConstant(bool);
|
||||
|
||||
impl<DB, Q> InputStorage<DB, Q>
|
||||
where
|
||||
Q: Query<DB>,
|
||||
|
@ -69,69 +67,6 @@ where
|
|||
fn slot(&self, key: &Q::Key) -> Option<Arc<Slot<DB, Q>>> {
|
||||
self.slots.read().get(key).cloned()
|
||||
}
|
||||
|
||||
fn set_common(
|
||||
&self,
|
||||
db: &DB,
|
||||
key: &Q::Key,
|
||||
database_key: &DB::DatabaseKey,
|
||||
value: Q::Value,
|
||||
is_constant: IsConstant,
|
||||
) {
|
||||
// The value is changing, so even if we are setting this to a
|
||||
// constant, we still need a new revision.
|
||||
//
|
||||
// CAREFUL: This will block until the global revision lock can
|
||||
// be acquired. If there are still queries executing, they may
|
||||
// need to read from this input. Therefore, we wait to acquire
|
||||
// the lock on `map` until we also hold the global query write
|
||||
// lock.
|
||||
db.salsa_runtime().with_incremented_revision(|next_revision| {
|
||||
let mut slots = self.slots.write();
|
||||
|
||||
db.salsa_event(|| Event {
|
||||
runtime_id: db.salsa_runtime().id(),
|
||||
kind: EventKind::WillChangeInputValue {
|
||||
database_key: database_key.clone(),
|
||||
},
|
||||
});
|
||||
|
||||
// Do this *after* we acquire the lock, so that we are not
|
||||
// racing with somebody else to modify this same cell.
|
||||
// (Otherwise, someone else might write a *newer* revision
|
||||
// into the same cell while we block on the lock.)
|
||||
let changed_at = ChangedAt {
|
||||
is_constant: is_constant.0,
|
||||
revision: next_revision,
|
||||
};
|
||||
|
||||
let stamped_value = StampedValue { value, changed_at };
|
||||
|
||||
match slots.entry(key.clone()) {
|
||||
Entry::Occupied(entry) => {
|
||||
let mut slot_stamped_value = entry.get().stamped_value.write();
|
||||
|
||||
assert!(
|
||||
!slot_stamped_value.changed_at.is_constant,
|
||||
"modifying `{:?}({:?})`, which was previously marked as constant (old value `{:?}`, new value `{:?}`)",
|
||||
Q::default(),
|
||||
entry.key(),
|
||||
slot_stamped_value.value,
|
||||
stamped_value.value,
|
||||
);
|
||||
|
||||
*slot_stamped_value = stamped_value;
|
||||
}
|
||||
|
||||
Entry::Vacant(entry) => {
|
||||
entry.insert(Arc::new(Slot {
|
||||
key: key.clone(),
|
||||
stamped_value: RwLock::new(stamped_value),
|
||||
}));
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB, Q> QueryStorageOps<DB, Q> for InputStorage<DB, Q>
|
||||
|
@ -145,17 +80,23 @@ where
|
|||
None => panic!("no value set for {:?}({:?})", Q::default(), key),
|
||||
};
|
||||
|
||||
let StampedValue { value, changed_at } = slot.stamped_value.read().clone();
|
||||
let StampedValue {
|
||||
value,
|
||||
durability,
|
||||
changed_at,
|
||||
} = slot.stamped_value.read().clone();
|
||||
|
||||
db.salsa_runtime().report_query_read(slot, changed_at);
|
||||
db.salsa_runtime()
|
||||
.report_query_read(slot, durability, changed_at);
|
||||
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
fn is_constant(&self, _db: &DB, key: &Q::Key) -> bool {
|
||||
self.slot(key)
|
||||
.map(|slot| slot.stamped_value.read().changed_at.is_constant)
|
||||
.unwrap_or(false)
|
||||
fn durability(&self, _db: &DB, key: &Q::Key) -> Durability {
|
||||
match self.slot(key) {
|
||||
Some(slot) => slot.stamped_value.read().durability,
|
||||
None => panic!("no value set for {:?}({:?})", Q::default(), key),
|
||||
}
|
||||
}
|
||||
|
||||
fn entries<C>(&self, _db: &DB) -> C
|
||||
|
@ -188,16 +129,72 @@ where
|
|||
Q: Query<DB>,
|
||||
DB: Database,
|
||||
{
|
||||
fn set(&self, db: &DB, key: &Q::Key, database_key: &DB::DatabaseKey, value: Q::Value) {
|
||||
log::debug!("{:?}({:?}) = {:?}", Q::default(), key, value);
|
||||
fn set(
|
||||
&self,
|
||||
db: &DB,
|
||||
key: &Q::Key,
|
||||
database_key: &DB::DatabaseKey,
|
||||
value: Q::Value,
|
||||
durability: Durability,
|
||||
) {
|
||||
log::debug!(
|
||||
"{:?}({:?}) = {:?} ({:?})",
|
||||
Q::default(),
|
||||
key,
|
||||
value,
|
||||
durability
|
||||
);
|
||||
|
||||
self.set_common(db, key, database_key, value, IsConstant(false))
|
||||
}
|
||||
// The value is changing, so we need a new revision (*). We also
|
||||
// need to update the 'last changed' revision by invoking
|
||||
// `guard.mark_durability_as_changed`.
|
||||
//
|
||||
// CAREFUL: This will block until the global revision lock can
|
||||
// be acquired. If there are still queries executing, they may
|
||||
// need to read from this input. Therefore, we wait to acquire
|
||||
// the lock on `map` until we also hold the global query write
|
||||
// lock.
|
||||
//
|
||||
// (*) Technically, since you can't presently access an input
|
||||
// for a non-existent key, and you can't enumerate the set of
|
||||
// keys, we only need a new revision if the key used to
|
||||
// exist. But we may add such methods in the future and this
|
||||
// case doesn't generally seem worth optimizing for.
|
||||
db.salsa_runtime().with_incremented_revision(|guard| {
|
||||
let mut slots = self.slots.write();
|
||||
|
||||
fn set_constant(&self, db: &DB, key: &Q::Key, database_key: &DB::DatabaseKey, value: Q::Value) {
|
||||
log::debug!("{:?}({:?}) = {:?}", Q::default(), key, value);
|
||||
db.salsa_event(|| Event {
|
||||
runtime_id: db.salsa_runtime().id(),
|
||||
kind: EventKind::WillChangeInputValue {
|
||||
database_key: database_key.clone(),
|
||||
},
|
||||
});
|
||||
|
||||
self.set_common(db, key, database_key, value, IsConstant(true))
|
||||
// Do this *after* we acquire the lock, so that we are not
|
||||
// racing with somebody else to modify this same cell.
|
||||
// (Otherwise, someone else might write a *newer* revision
|
||||
// into the same cell while we block on the lock.)
|
||||
let stamped_value = StampedValue {
|
||||
value,
|
||||
durability,
|
||||
changed_at: guard.new_revision(),
|
||||
};
|
||||
|
||||
match slots.entry(key.clone()) {
|
||||
Entry::Occupied(entry) => {
|
||||
let mut slot_stamped_value = entry.get().stamped_value.write();
|
||||
guard.mark_durability_as_changed(slot_stamped_value.durability);
|
||||
*slot_stamped_value = stamped_value;
|
||||
}
|
||||
|
||||
Entry::Vacant(entry) => {
|
||||
entry.insert(Arc::new(Slot {
|
||||
key: key.clone(),
|
||||
stamped_value: RwLock::new(stamped_value),
|
||||
}));
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -220,7 +217,7 @@ where
|
|||
|
||||
debug!("maybe_changed_since: changed_at = {:?}", changed_at);
|
||||
|
||||
changed_at.changed_since(revision)
|
||||
changed_at > revision
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
use crate::debug::TableEntry;
|
||||
use crate::dependency::DatabaseSlot;
|
||||
use crate::durability::Durability;
|
||||
use crate::intern_id::InternId;
|
||||
use crate::plumbing::CycleDetected;
|
||||
use crate::plumbing::HasQueryGroup;
|
||||
use crate::plumbing::QueryStorageMassOps;
|
||||
use crate::plumbing::QueryStorageOps;
|
||||
use crate::runtime::ChangedAt;
|
||||
use crate::runtime::Revision;
|
||||
use crate::revision::Revision;
|
||||
use crate::Query;
|
||||
use crate::{Database, DiscardIf, SweepStrategy};
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
|
@ -18,6 +18,8 @@ use std::fmt::Debug;
|
|||
use std::hash::Hash;
|
||||
use std::sync::Arc;
|
||||
|
||||
const INTERN_DURABILITY: Durability = Durability::HIGH;
|
||||
|
||||
/// Handles storage where the value is 'derived' by executing a
|
||||
/// function (in contrast to "inputs").
|
||||
pub struct InternedStorage<DB, Q>
|
||||
|
@ -323,18 +325,13 @@ where
|
|||
let slot = self.intern_index(db, key);
|
||||
let changed_at = slot.interned_at;
|
||||
let index = slot.index;
|
||||
db.salsa_runtime().report_query_read(
|
||||
slot,
|
||||
ChangedAt {
|
||||
is_constant: false,
|
||||
revision: changed_at,
|
||||
},
|
||||
);
|
||||
db.salsa_runtime()
|
||||
.report_query_read(slot, INTERN_DURABILITY, changed_at);
|
||||
Ok(<Q::Value>::from_intern_id(index))
|
||||
}
|
||||
|
||||
fn is_constant(&self, _db: &DB, _key: &Q::Key) -> bool {
|
||||
false
|
||||
fn durability(&self, _db: &DB, _key: &Q::Key) -> Durability {
|
||||
INTERN_DURABILITY
|
||||
}
|
||||
|
||||
fn entries<C>(&self, _db: &DB) -> C
|
||||
|
@ -360,6 +357,7 @@ where
|
|||
{
|
||||
fn sweep(&self, db: &DB, strategy: SweepStrategy) {
|
||||
let mut tables = self.tables.write();
|
||||
let last_changed = db.salsa_runtime().last_changed_revision(INTERN_DURABILITY);
|
||||
let revision_now = db.salsa_runtime().current_revision();
|
||||
let InternTables {
|
||||
map,
|
||||
|
@ -384,7 +382,7 @@ where
|
|||
// they are removed and also be forced to re-execute.
|
||||
DiscardIf::Always | DiscardIf::Outdated => match &values[intern_index.as_usize()] {
|
||||
InternValue::Present { slot, .. } => {
|
||||
if slot.try_collect(revision_now) {
|
||||
if slot.try_collect(last_changed, revision_now) {
|
||||
values[intern_index.as_usize()] =
|
||||
InternValue::Free { next: *first_free };
|
||||
*first_free = Some(*intern_index);
|
||||
|
@ -427,17 +425,15 @@ where
|
|||
let group_storage = <DB as HasQueryGroup<Q::Group>>::group_storage(db);
|
||||
let interned_storage = IQ::query_storage(group_storage);
|
||||
let slot = interned_storage.lookup_value(db, index);
|
||||
let changed_at = ChangedAt {
|
||||
is_constant: false,
|
||||
revision: slot.interned_at,
|
||||
};
|
||||
let value = slot.value.clone();
|
||||
db.salsa_runtime().report_query_read(slot, changed_at);
|
||||
let interned_at = slot.interned_at;
|
||||
db.salsa_runtime()
|
||||
.report_query_read(slot, INTERN_DURABILITY, interned_at);
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
fn is_constant(&self, _db: &DB, _key: &Q::Key) -> bool {
|
||||
false
|
||||
fn durability(&self, _db: &DB, _key: &Q::Key) -> Durability {
|
||||
INTERN_DURABILITY
|
||||
}
|
||||
|
||||
fn entries<C>(&self, db: &DB) -> C
|
||||
|
@ -504,12 +500,14 @@ impl<K> Slot<K> {
|
|||
}
|
||||
|
||||
/// Invoked during sweeping to try and collect this slot. Fails if
|
||||
/// the slot has been accessed in the current revision. Note that
|
||||
/// this access could be racing with the attempt to collect (in
|
||||
/// the slot has been accessed since the intern durability last
|
||||
/// changed, because in that case there may be outstanding
|
||||
/// references that are still considered valid. Note that this
|
||||
/// access could be racing with the attempt to collect (in
|
||||
/// particular, when verifying dependencies).
|
||||
fn try_collect(&self, revision_now: Revision) -> bool {
|
||||
fn try_collect(&self, last_changed: Revision, revision_now: Revision) -> bool {
|
||||
let accessed_at = self.accessed_at.load().unwrap();
|
||||
if accessed_at < revision_now {
|
||||
if accessed_at < last_changed {
|
||||
match self.accessed_at.compare_exchange(Some(accessed_at), None) {
|
||||
Ok(_) => true,
|
||||
Err(r) => {
|
||||
|
|
11
src/lib.rs
11
src/lib.rs
|
@ -1,6 +1,5 @@
|
|||
#![warn(rust_2018_idioms)]
|
||||
#![warn(missing_docs)]
|
||||
#![allow(dead_code)]
|
||||
|
||||
//! The salsa crate is a crate for incremental recomputation. It
|
||||
//! permits you to define a "database" of queries with both inputs and
|
||||
|
@ -11,10 +10,12 @@
|
|||
mod dependency;
|
||||
mod derived;
|
||||
mod doctest;
|
||||
mod durability;
|
||||
mod input;
|
||||
mod intern_id;
|
||||
mod interned;
|
||||
mod lru;
|
||||
mod revision;
|
||||
mod runtime;
|
||||
|
||||
pub mod debug;
|
||||
|
@ -32,6 +33,7 @@ use derive_new::new;
|
|||
use std::fmt::{self, Debug};
|
||||
use std::hash::Hash;
|
||||
|
||||
pub use crate::durability::Durability;
|
||||
pub use crate::intern_id::InternId;
|
||||
pub use crate::interned::InternKey;
|
||||
pub use crate::runtime::Runtime;
|
||||
|
@ -525,8 +527,7 @@ where
|
|||
where
|
||||
Q::Storage: plumbing::InputQueryStorageOps<DB, Q>,
|
||||
{
|
||||
self.storage
|
||||
.set(self.db, &key, &self.database_key(&key), value);
|
||||
self.set_with_durability(key, value, Durability::LOW);
|
||||
}
|
||||
|
||||
/// Assign a value to an "input query", with the additional
|
||||
|
@ -537,12 +538,12 @@ where
|
|||
/// and cancellation on [the `query_mut` method].
|
||||
///
|
||||
/// [the `query_mut` method]: trait.Database#method.query_mut
|
||||
pub fn set_constant(&self, key: Q::Key, value: Q::Value)
|
||||
pub fn set_with_durability(&self, key: Q::Key, value: Q::Value, durability: Durability)
|
||||
where
|
||||
Q::Storage: plumbing::InputQueryStorageOps<DB, Q>,
|
||||
{
|
||||
self.storage
|
||||
.set_constant(self.db, &key, &self.database_key(&key), value);
|
||||
.set(self.db, &key, &self.database_key(&key), value, durability);
|
||||
}
|
||||
|
||||
/// Sets the size of LRU cache of values for this query table.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#![allow(missing_docs)]
|
||||
|
||||
use crate::debug::TableEntry;
|
||||
use crate::durability::Durability;
|
||||
use crate::Database;
|
||||
use crate::Query;
|
||||
use crate::QueryTable;
|
||||
|
@ -14,7 +15,7 @@ pub use crate::derived::MemoizedStorage;
|
|||
pub use crate::input::InputStorage;
|
||||
pub use crate::interned::InternedStorage;
|
||||
pub use crate::interned::LookupInternedStorage;
|
||||
pub use crate::runtime::Revision;
|
||||
pub use crate::revision::Revision;
|
||||
|
||||
pub struct CycleDetected;
|
||||
|
||||
|
@ -147,8 +148,8 @@ where
|
|||
/// itself.
|
||||
fn try_fetch(&self, db: &DB, key: &Q::Key) -> Result<Q::Value, CycleDetected>;
|
||||
|
||||
/// Check if `key` is (currently) believed to be a constant.
|
||||
fn is_constant(&self, db: &DB, key: &Q::Key) -> bool;
|
||||
/// Returns the durability associated with a given key.
|
||||
fn durability(&self, db: &DB, key: &Q::Key) -> Durability;
|
||||
|
||||
/// Get the (current) set of the entries in the query storage
|
||||
fn entries<C>(&self, db: &DB) -> C
|
||||
|
@ -164,14 +165,13 @@ where
|
|||
DB: Database,
|
||||
Q: Query<DB>,
|
||||
{
|
||||
fn set(&self, db: &DB, key: &Q::Key, database_key: &DB::DatabaseKey, new_value: Q::Value);
|
||||
|
||||
fn set_constant(
|
||||
fn set(
|
||||
&self,
|
||||
db: &DB,
|
||||
key: &Q::Key,
|
||||
database_key: &DB::DatabaseKey,
|
||||
new_value: Q::Value,
|
||||
durability: Durability,
|
||||
);
|
||||
}
|
||||
|
||||
|
|
70
src/revision.rs
Normal file
70
src/revision.rs
Normal file
|
@ -0,0 +1,70 @@
|
|||
use std::num::NonZeroU64;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
/// Value of the initial revision, as a u64. We don't use 0
|
||||
/// because we want to use a `NonZeroU64`.
|
||||
const START_U64: u64 = 1;
|
||||
|
||||
/// A unique identifier for the current version of the database; each
|
||||
/// time an input is changed, the revision number is incremented.
|
||||
/// `Revision` is used internally to track which values may need to be
|
||||
/// recomputed, but is not something you should have to interact with
|
||||
/// directly as a user of salsa.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
|
||||
pub struct Revision {
|
||||
generation: NonZeroU64,
|
||||
}
|
||||
|
||||
impl Revision {
|
||||
pub(crate) fn start() -> Self {
|
||||
Self::from(START_U64)
|
||||
}
|
||||
|
||||
pub(crate) fn from(g: u64) -> Self {
|
||||
Self {
|
||||
generation: NonZeroU64::new(g).unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn next(self) -> Revision {
|
||||
Self::from(self.generation.get() + 1)
|
||||
}
|
||||
|
||||
fn as_u64(self) -> u64 {
|
||||
self.generation.get()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Revision {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(fmt, "R{}", self.generation)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct AtomicRevision {
|
||||
data: AtomicU64,
|
||||
}
|
||||
|
||||
impl AtomicRevision {
|
||||
pub(crate) fn start() -> Self {
|
||||
Self {
|
||||
data: AtomicU64::new(START_U64),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn load(&self) -> Revision {
|
||||
Revision::from(self.data.load(Ordering::SeqCst))
|
||||
}
|
||||
|
||||
pub(crate) fn store(&self, r: Revision) {
|
||||
self.data.store(r.as_u64(), Ordering::SeqCst);
|
||||
}
|
||||
|
||||
/// Increment by 1, returning previous value.
|
||||
pub(crate) fn fetch_then_increment(&self) -> Revision {
|
||||
let v = self.data.fetch_add(1, Ordering::SeqCst);
|
||||
assert!(v != u64::max_value(), "revision overflow");
|
||||
Revision::from(v)
|
||||
}
|
||||
}
|
272
src/runtime.rs
272
src/runtime.rs
|
@ -1,5 +1,7 @@
|
|||
use crate::dependency::DatabaseSlot;
|
||||
use crate::dependency::Dependency;
|
||||
use crate::durability::Durability;
|
||||
use crate::revision::{AtomicRevision, Revision};
|
||||
use crate::{Database, Event, EventKind, SweepStrategy};
|
||||
use lock_api::{RawRwLock, RawRwLockRecursive};
|
||||
use log::debug;
|
||||
|
@ -8,8 +10,7 @@ use rustc_hash::{FxHashMap, FxHasher};
|
|||
use smallvec::SmallVec;
|
||||
use std::fmt::Write;
|
||||
use std::hash::BuildHasherDefault;
|
||||
use std::num::NonZeroU64;
|
||||
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub(crate) type FxIndexSet<K> = indexmap::IndexSet<K, BuildHasherDefault<FxHasher>>;
|
||||
|
@ -115,18 +116,37 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
/// Indicates that some input to the system has changed and hence
|
||||
/// that memoized values **may** be invalidated. This cannot be
|
||||
/// invoked while query computation is in progress.
|
||||
/// A "synthetic write" causes the system to act *as though* some
|
||||
/// input of durability `durability` has changed. This is mostly
|
||||
/// useful for profiling scenarios, but it also has interactions
|
||||
/// with garbage collection. In general, a synthetic write to
|
||||
/// durability level D will cause the system to fully trace all
|
||||
/// queries of durability level D and below. When running a GC, then:
|
||||
///
|
||||
/// As a user of the system, you would not normally invoke this
|
||||
/// method directly. Instead, you would use "input" queries and
|
||||
/// invoke their `set` method. But it can be useful if you have a
|
||||
/// "volatile" input that you must poll from time to time; in that
|
||||
/// case, you can wrap the input with a "no-storage" query and
|
||||
/// invoke this method from time to time.
|
||||
pub fn next_revision(&self) {
|
||||
self.with_incremented_revision(|_| ());
|
||||
/// - Synthetic writes will cause more derived values to be
|
||||
/// *retained*. This is because derived values are only
|
||||
/// retained if they are traced, and a synthetic write can cause
|
||||
/// more things to be traced.
|
||||
/// - Synthetic writes can cause more interned values to be
|
||||
/// *collected*. This is because interned values can only be
|
||||
/// collected if they were not yet traced in the current
|
||||
/// revision. Therefore, if you issue a synthetic write, execute
|
||||
/// some query Q, and then start collecting interned values, you
|
||||
/// will be able to recycle interned values not used in Q.
|
||||
///
|
||||
/// In general, then, one can do a "full GC" that retains only
|
||||
/// those things that are used by some query Q by (a) doing a
|
||||
/// synthetic write at `Durability::HIGH`, (b) executing the query
|
||||
/// Q and then (c) doing a sweep.
|
||||
///
|
||||
/// **WARNING:** Just like an ordinary write, this method triggers
|
||||
/// cancellation. If you invoke it while a snapshot exists, it
|
||||
/// will block until that snapshot is dropped -- if that snapshot
|
||||
/// is owned by the current thread, this could trigger deadlock.
|
||||
pub fn synthetic_write(&self, durability: Durability) {
|
||||
self.with_incremented_revision(|guard| {
|
||||
guard.mark_durability_as_changed(durability);
|
||||
});
|
||||
}
|
||||
|
||||
/// Default implementation for `Database::sweep_all`.
|
||||
|
@ -155,13 +175,25 @@ where
|
|||
/// Read current value of the revision counter.
|
||||
#[inline]
|
||||
pub(crate) fn current_revision(&self) -> Revision {
|
||||
Revision::from(self.shared_state.revision.load(Ordering::SeqCst))
|
||||
self.shared_state.revisions[0].load()
|
||||
}
|
||||
|
||||
/// The revision in which values with durability `d` may have last
|
||||
/// changed. For D0, this is just the current revision. But for
|
||||
/// higher levels of durability, this value may lag behind the
|
||||
/// current revision. If we encounter a value of durability Di,
|
||||
/// then, we can check this function to get a "bound" on when the
|
||||
/// value may have changed, which allows us to skip walking its
|
||||
/// dependencies.
|
||||
#[inline]
|
||||
pub(crate) fn last_changed_revision(&self, d: Durability) -> Revision {
|
||||
self.shared_state.revisions[d.index()].load()
|
||||
}
|
||||
|
||||
/// Read current value of the revision counter.
|
||||
#[inline]
|
||||
fn pending_revision(&self) -> Revision {
|
||||
Revision::from(self.shared_state.pending_revision.load(Ordering::SeqCst))
|
||||
self.shared_state.pending_revision.load()
|
||||
}
|
||||
|
||||
/// Check if the current revision is canceled. If this method ever
|
||||
|
@ -262,7 +294,10 @@ where
|
|||
/// Note that, given our writer model, we can assume that only one
|
||||
/// thread is attempting to increment the global revision at a
|
||||
/// time.
|
||||
pub(crate) fn with_incremented_revision<R>(&self, op: impl FnOnce(Revision) -> R) -> R {
|
||||
pub(crate) fn with_incremented_revision<R>(
|
||||
&self,
|
||||
op: impl FnOnce(&DatabaseWriteLockGuard<'_, DB>) -> R,
|
||||
) -> R {
|
||||
log::debug!("increment_revision()");
|
||||
|
||||
if !self.permits_increment() {
|
||||
|
@ -271,23 +306,22 @@ where
|
|||
|
||||
// Set the `pending_revision` field so that people
|
||||
// know current revision is canceled.
|
||||
let current_revision = self
|
||||
.shared_state
|
||||
.pending_revision
|
||||
.fetch_add(1, Ordering::SeqCst);
|
||||
assert!(current_revision != u64::max_value(), "revision overflow");
|
||||
let current_revision = self.shared_state.pending_revision.fetch_then_increment();
|
||||
|
||||
// To modify the revision, we need the lock.
|
||||
let _lock = self.shared_state.query_lock.write();
|
||||
|
||||
let old_revision = self.shared_state.revision.fetch_add(1, Ordering::SeqCst);
|
||||
let old_revision = self.shared_state.revisions[0].fetch_then_increment();
|
||||
assert_eq!(current_revision, old_revision);
|
||||
|
||||
let new_revision = Revision::from(current_revision + 1);
|
||||
let new_revision = current_revision.next();
|
||||
|
||||
debug!("increment_revision: incremented to {:?}", new_revision);
|
||||
|
||||
op(new_revision)
|
||||
op(&DatabaseWriteLockGuard {
|
||||
runtime: self,
|
||||
new_revision,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn permits_increment(&self) -> bool {
|
||||
|
@ -310,7 +344,8 @@ where
|
|||
});
|
||||
|
||||
// Push the active query onto the stack.
|
||||
let active_query = self.local_state.push_query(database_key);
|
||||
let max_durability = Durability::MAX;
|
||||
let active_query = self.local_state.push_query(database_key, max_durability);
|
||||
|
||||
// Execute user's code, accumulating inputs etc.
|
||||
let value = execute();
|
||||
|
@ -319,11 +354,13 @@ where
|
|||
let ActiveQuery {
|
||||
dependencies,
|
||||
changed_at,
|
||||
durability,
|
||||
..
|
||||
} = active_query.complete();
|
||||
|
||||
ComputedQueryResult {
|
||||
value,
|
||||
durability,
|
||||
changed_at,
|
||||
dependencies,
|
||||
}
|
||||
|
@ -340,10 +377,12 @@ where
|
|||
pub(crate) fn report_query_read<'hack>(
|
||||
&self,
|
||||
database_slot: Arc<dyn DatabaseSlot<DB> + 'hack>,
|
||||
changed_at: ChangedAt,
|
||||
durability: Durability,
|
||||
changed_at: Revision,
|
||||
) {
|
||||
let dependency = Dependency::new(database_slot);
|
||||
self.local_state.report_query_read(dependency, changed_at);
|
||||
self.local_state
|
||||
.report_query_read(dependency, durability, changed_at);
|
||||
}
|
||||
|
||||
/// Reports that the query depends on some state unknown to salsa.
|
||||
|
@ -402,6 +441,38 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
/// Temporary guard that indicates that the database write-lock is
|
||||
/// held. You can get one of these by invoking
|
||||
/// `with_incremented_revision`. It gives access to the new revision
|
||||
/// and a few other operations that only make sense to do while an
|
||||
/// update is happening.
|
||||
pub(crate) struct DatabaseWriteLockGuard<'db, DB>
|
||||
where
|
||||
DB: Database,
|
||||
{
|
||||
runtime: &'db Runtime<DB>,
|
||||
new_revision: Revision,
|
||||
}
|
||||
|
||||
impl<DB> DatabaseWriteLockGuard<'_, DB>
|
||||
where
|
||||
DB: Database,
|
||||
{
|
||||
pub(crate) fn new_revision(&self) -> Revision {
|
||||
self.new_revision
|
||||
}
|
||||
|
||||
/// Indicates that this update modified an input marked as
|
||||
/// "constant". This will force re-evaluation of anything that was
|
||||
/// dependent on constants (which otherwise might not get
|
||||
/// re-evaluated).
|
||||
pub(crate) fn mark_durability_as_changed(&self, d: Durability) {
|
||||
for rev in &self.runtime.shared_state.revisions[1..=d.index()] {
|
||||
rev.store(self.new_revision);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// State that will be common to all threads (when we support multiple threads)
|
||||
struct SharedState<DB: Database> {
|
||||
storage: DB::DatabaseStorage,
|
||||
|
@ -419,22 +490,40 @@ struct SharedState<DB: Database> {
|
|||
/// to ensure a higher-level consistency property.
|
||||
query_lock: RwLock<()>,
|
||||
|
||||
/// Stores the current revision. This is an `AtomicU64` because
|
||||
/// it may be *read* at any point without holding the
|
||||
/// `query_lock`. Updates, however, require the `query_lock` to be
|
||||
/// acquired. (See `query_lock` for details.)
|
||||
revision: AtomicU64,
|
||||
|
||||
/// This is typically equal to `revision` -- set to `revision+1`
|
||||
/// when a new revision is pending (which implies that the current
|
||||
/// revision is canceled).
|
||||
pending_revision: AtomicU64,
|
||||
pending_revision: AtomicRevision,
|
||||
|
||||
/// Stores the "last change" revision for values of each duration.
|
||||
/// This vector is always of length at least 1 (for Durability 0)
|
||||
/// but its total length depends on the number of durations. The
|
||||
/// element at index 0 is special as it represents the "current
|
||||
/// revision". In general, we have the invariant that revisions
|
||||
/// in here are *declining* -- that is, `revisions[i] >=
|
||||
/// revisions[i + 1]`, for all `i`. This is because when you
|
||||
/// modify a value with durability D, that implies that values
|
||||
/// with durability less than D may have changed too.
|
||||
revisions: Vec<AtomicRevision>,
|
||||
|
||||
/// The dependency graph tracks which runtimes are blocked on one
|
||||
/// another, waiting for queries to terminate.
|
||||
dependency_graph: Mutex<DependencyGraph<DB>>,
|
||||
}
|
||||
|
||||
impl<DB: Database> SharedState<DB> {
|
||||
fn with_durabilities(durabilities: usize) -> Self {
|
||||
SharedState {
|
||||
next_id: AtomicUsize::new(1),
|
||||
storage: Default::default(),
|
||||
query_lock: Default::default(),
|
||||
revisions: (0..durabilities).map(|_| AtomicRevision::start()).collect(),
|
||||
pending_revision: AtomicRevision::start(),
|
||||
dependency_graph: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB> std::panic::RefUnwindSafe for SharedState<DB>
|
||||
where
|
||||
DB: Database,
|
||||
|
@ -444,14 +533,7 @@ where
|
|||
|
||||
impl<DB: Database> Default for SharedState<DB> {
|
||||
fn default() -> Self {
|
||||
SharedState {
|
||||
next_id: AtomicUsize::new(1),
|
||||
storage: Default::default(),
|
||||
query_lock: Default::default(),
|
||||
revision: AtomicU64::new(1),
|
||||
pending_revision: AtomicU64::new(1),
|
||||
dependency_graph: Default::default(),
|
||||
}
|
||||
Self::with_durabilities(Durability::LEN)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -469,7 +551,7 @@ where
|
|||
};
|
||||
fmt.debug_struct("SharedState")
|
||||
.field("query_lock", &query_lock)
|
||||
.field("revision", &self.revision)
|
||||
.field("revisions", &self.revisions)
|
||||
.field("pending_revision", &self.pending_revision)
|
||||
.finish()
|
||||
}
|
||||
|
@ -479,11 +561,12 @@ struct ActiveQuery<DB: Database> {
|
|||
/// What query is executing
|
||||
database_key: DB::DatabaseKey,
|
||||
|
||||
/// Maximum revision of all inputs thus far;
|
||||
/// we also track if all inputs have been constant.
|
||||
///
|
||||
/// If we see an untracked input, this is not terribly relevant.
|
||||
changed_at: ChangedAt,
|
||||
/// Minimum durability of inputs observed so far.
|
||||
durability: Durability,
|
||||
|
||||
/// Maximum revision of all inputs observed. If we observe an
|
||||
/// untracked read, this will be set to the most recent revision.
|
||||
changed_at: Revision,
|
||||
|
||||
/// Set of subqueries that were accessed thus far, or `None` if
|
||||
/// there was an untracked the read.
|
||||
|
@ -494,12 +577,12 @@ pub(crate) struct ComputedQueryResult<DB: Database, V> {
|
|||
/// Final value produced
|
||||
pub(crate) value: V,
|
||||
|
||||
/// Maximum revision of all inputs observed; `is_constant` is true
|
||||
/// if all inputs were constants.
|
||||
///
|
||||
/// If we observe an untracked read, this will be set to a
|
||||
/// non-constant value that changed in the most recent revision.
|
||||
pub(crate) changed_at: ChangedAt,
|
||||
/// Minimum durability of inputs observed so far.
|
||||
pub(crate) durability: Durability,
|
||||
|
||||
/// Maximum revision of all inputs observed. If we observe an
|
||||
/// untracked read, this will be set to the most recent revision.
|
||||
pub(crate) changed_at: Revision,
|
||||
|
||||
/// Complete set of subqueries that were accessed, or `None` if
|
||||
/// there was an untracked the read.
|
||||
|
@ -507,39 +590,32 @@ pub(crate) struct ComputedQueryResult<DB: Database, V> {
|
|||
}
|
||||
|
||||
impl<DB: Database> ActiveQuery<DB> {
|
||||
fn new(database_key: DB::DatabaseKey) -> Self {
|
||||
fn new(database_key: DB::DatabaseKey, max_durability: Durability) -> Self {
|
||||
ActiveQuery {
|
||||
database_key,
|
||||
changed_at: ChangedAt {
|
||||
is_constant: true,
|
||||
revision: Revision::start(),
|
||||
},
|
||||
durability: max_durability,
|
||||
changed_at: Revision::start(),
|
||||
dependencies: Some(FxIndexSet::default()),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_read(&mut self, dependency: Dependency<DB>, changed_at: ChangedAt) {
|
||||
let ChangedAt {
|
||||
is_constant,
|
||||
revision,
|
||||
} = changed_at;
|
||||
|
||||
fn add_read(&mut self, dependency: Dependency<DB>, durability: Durability, revision: Revision) {
|
||||
if let Some(set) = &mut self.dependencies {
|
||||
set.insert(dependency);
|
||||
}
|
||||
|
||||
self.changed_at.is_constant &= is_constant;
|
||||
self.changed_at.revision = self.changed_at.revision.max(revision);
|
||||
self.durability = self.durability.min(durability);
|
||||
self.changed_at = self.changed_at.max(revision);
|
||||
}
|
||||
|
||||
fn add_untracked_read(&mut self, changed_at: Revision) {
|
||||
self.dependencies = None;
|
||||
self.changed_at.is_constant = false;
|
||||
self.changed_at.revision = changed_at;
|
||||
self.durability = Durability::LOW;
|
||||
self.changed_at = changed_at;
|
||||
}
|
||||
|
||||
fn add_anon_read(&mut self, changed_at: Revision) {
|
||||
self.changed_at.revision = self.changed_at.revision.max(changed_at);
|
||||
self.changed_at = self.changed_at.max(changed_at);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -551,63 +627,11 @@ pub struct RuntimeId {
|
|||
counter: usize,
|
||||
}
|
||||
|
||||
/// A unique identifier for the current version of the database; each
|
||||
/// time an input is changed, the revision number is incremented.
|
||||
/// `Revision` is used internally to track which values may need to be
|
||||
/// recomputed, but not something you should have to interact with
|
||||
/// directly as a user of salsa.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
|
||||
pub struct Revision {
|
||||
generation: NonZeroU64,
|
||||
}
|
||||
|
||||
impl Revision {
|
||||
fn start() -> Self {
|
||||
Self::from(1)
|
||||
}
|
||||
|
||||
fn from(g: u64) -> Self {
|
||||
Self {
|
||||
generation: NonZeroU64::new(g).unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
fn next(self) -> Revision {
|
||||
Self::from(self.generation.get() + 1)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Revision {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(fmt, "R{}", self.generation)
|
||||
}
|
||||
}
|
||||
|
||||
/// Records when a stamped value changed.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct ChangedAt {
|
||||
// Will this value ever change again?
|
||||
pub(crate) is_constant: bool,
|
||||
|
||||
// At which revision did this value last change? (If this value is
|
||||
// the value of a constant input, this indicates when it became
|
||||
// constant.)
|
||||
pub(crate) revision: Revision,
|
||||
}
|
||||
|
||||
impl ChangedAt {
|
||||
/// True if a value is stored with this `ChangedAt` value has
|
||||
/// changed after `revision`. This is invoked by query storage
|
||||
/// when their dependents are asking them if they have changed.
|
||||
pub(crate) fn changed_since(self, revision: Revision) -> bool {
|
||||
self.revision > revision
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct StampedValue<V> {
|
||||
pub(crate) value: V,
|
||||
pub(crate) changed_at: ChangedAt,
|
||||
pub(crate) durability: Durability,
|
||||
pub(crate) changed_at: Revision,
|
||||
}
|
||||
|
||||
struct DependencyGraph<DB: Database> {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use crate::dependency::Dependency;
|
||||
use crate::durability::Durability;
|
||||
use crate::runtime::ActiveQuery;
|
||||
use crate::runtime::ChangedAt;
|
||||
use crate::runtime::Revision;
|
||||
use crate::Database;
|
||||
use std::cell::Ref;
|
||||
|
@ -29,9 +29,13 @@ impl<DB: Database> Default for LocalState<DB> {
|
|||
}
|
||||
|
||||
impl<DB: Database> LocalState<DB> {
|
||||
pub(super) fn push_query(&self, database_key: &DB::DatabaseKey) -> ActiveQueryGuard<'_, DB> {
|
||||
pub(super) fn push_query(
|
||||
&self,
|
||||
database_key: &DB::DatabaseKey,
|
||||
max_durability: Durability,
|
||||
) -> ActiveQueryGuard<'_, DB> {
|
||||
let mut query_stack = self.query_stack.borrow_mut();
|
||||
query_stack.push(ActiveQuery::new(database_key.clone()));
|
||||
query_stack.push(ActiveQuery::new(database_key.clone(), max_durability));
|
||||
ActiveQueryGuard {
|
||||
local_state: self,
|
||||
push_len: query_stack.len(),
|
||||
|
@ -58,9 +62,14 @@ impl<DB: Database> LocalState<DB> {
|
|||
.map(|active_query| active_query.database_key.clone())
|
||||
}
|
||||
|
||||
pub(super) fn report_query_read(&self, dependency: Dependency<DB>, changed_at: ChangedAt) {
|
||||
pub(super) fn report_query_read(
|
||||
&self,
|
||||
dependency: Dependency<DB>,
|
||||
durability: Durability,
|
||||
changed_at: Revision,
|
||||
) {
|
||||
if let Some(top_query) = self.query_stack.borrow_mut().last_mut() {
|
||||
top_query.add_read(dependency, changed_at);
|
||||
top_query.add_read(dependency, durability, changed_at);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
use crate::db;
|
||||
use crate::group::*;
|
||||
use salsa::debug::DebugQueryTable;
|
||||
use salsa::{Database, SweepStrategy};
|
||||
use salsa::{Database, Durability, SweepStrategy};
|
||||
|
||||
#[test]
|
||||
fn compute_one() {
|
||||
fn compute_one_write_low() {
|
||||
let mut db = db::DatabaseImpl::default();
|
||||
|
||||
// Will compute fibonacci(5)
|
||||
db.set_use_triangular(5, false);
|
||||
db.compute(5);
|
||||
|
||||
db.salsa_runtime().next_revision();
|
||||
db.salsa_runtime().synthetic_write(Durability::LOW);
|
||||
|
||||
assert_keys! {
|
||||
db,
|
||||
|
@ -38,6 +38,44 @@ fn compute_one() {
|
|||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compute_one_write_high() {
|
||||
let mut db = db::DatabaseImpl::default();
|
||||
|
||||
// Will compute fibonacci(5)
|
||||
db.set_use_triangular(5, false);
|
||||
db.compute(5);
|
||||
|
||||
// Doing a synthetic write with durability *high* means that we
|
||||
// will revalidate the things `compute(5)` uses, and hence they
|
||||
// are not discarded.
|
||||
db.salsa_runtime().synthetic_write(Durability::HIGH);
|
||||
|
||||
assert_keys! {
|
||||
db,
|
||||
TriangularQuery => (),
|
||||
FibonacciQuery => (0, 1, 2, 3, 4, 5),
|
||||
ComputeQuery => (5),
|
||||
UseTriangularQuery => (5),
|
||||
MinQuery => (),
|
||||
MaxQuery => (),
|
||||
}
|
||||
|
||||
// Memoized, but will compute fibonacci(5) again
|
||||
db.compute(5);
|
||||
db.sweep_all(SweepStrategy::discard_outdated());
|
||||
|
||||
assert_keys! {
|
||||
db,
|
||||
TriangularQuery => (),
|
||||
FibonacciQuery => (0, 1, 2, 3, 4, 5),
|
||||
ComputeQuery => (5),
|
||||
UseTriangularQuery => (5),
|
||||
MinQuery => (),
|
||||
MaxQuery => (),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compute_switch() {
|
||||
let mut db = db::DatabaseImpl::default();
|
||||
|
@ -78,7 +116,7 @@ fn compute_switch() {
|
|||
}
|
||||
|
||||
// Now run `compute` *again* in next revision.
|
||||
db.salsa_runtime().next_revision();
|
||||
db.salsa_runtime().synthetic_write(Durability::LOW);
|
||||
assert_eq!(db.compute(5), 15);
|
||||
db.sweep_all(SweepStrategy::discard_outdated());
|
||||
|
||||
|
@ -107,7 +145,7 @@ fn compute_all() {
|
|||
db.set_max(6);
|
||||
|
||||
db.compute_all();
|
||||
db.salsa_runtime().next_revision();
|
||||
db.salsa_runtime().synthetic_write(Durability::LOW);
|
||||
db.compute_all();
|
||||
db.sweep_all(SweepStrategy::discard_outdated());
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use crate::db;
|
||||
use crate::group::{FibonacciQuery, GcDatabase};
|
||||
use salsa::debug::DebugQueryTable;
|
||||
use salsa::{Database, SweepStrategy};
|
||||
use salsa::{Database, Durability, SweepStrategy};
|
||||
|
||||
#[test]
|
||||
fn sweep_default() {
|
||||
|
@ -12,7 +12,7 @@ fn sweep_default() {
|
|||
let k: Vec<_> = db.query(FibonacciQuery).entries();
|
||||
assert_eq!(k.len(), 6);
|
||||
|
||||
db.salsa_runtime().next_revision();
|
||||
db.salsa_runtime().synthetic_write(Durability::LOW);
|
||||
|
||||
db.fibonacci(5);
|
||||
db.fibonacci(3);
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use crate::db;
|
||||
use salsa::{Database, InternId, SweepStrategy};
|
||||
use salsa::debug::DebugQueryTable;
|
||||
use salsa::{Database, Durability, InternId, SweepStrategy};
|
||||
|
||||
/// Query group for tests for how interned keys interact with GC.
|
||||
#[salsa::query_group(Intern)]
|
||||
|
@ -73,13 +74,13 @@ fn discard_during_same_revision() {
|
|||
/// exercises precisely that scenario.
|
||||
#[test]
|
||||
fn discard_outdated() {
|
||||
let mut db = db::DatabaseImpl::default();
|
||||
let db = db::DatabaseImpl::default();
|
||||
|
||||
let foo_from_rev0 = db.repeat_intern1("foo");
|
||||
let bar_from_rev0 = db.repeat_intern1("bar");
|
||||
|
||||
// Trigger a new revision.
|
||||
db.set_dummy(());
|
||||
db.salsa_runtime().synthetic_write(Durability::HIGH);
|
||||
|
||||
// In this revision, we use "bar".
|
||||
let bar_from_rev1 = db.repeat_intern1("bar");
|
||||
|
@ -110,3 +111,84 @@ fn discard_outdated() {
|
|||
assert_eq!(db.lookup_intern_str(bar_from_rev1), "bar");
|
||||
assert_eq!(db.lookup_intern_str(baz_from_rev1), "baz");
|
||||
}
|
||||
|
||||
/// Variation on `discard_during_same_revision` --- here we show that
|
||||
/// a synthetic write of level LOW isn't enough to collect interned
|
||||
/// keys (which are considered durability HIGH).
|
||||
#[test]
|
||||
fn discard_durability_after_synthetic_write_low() {
|
||||
let db = db::DatabaseImpl::default();
|
||||
|
||||
// This will assign index 0 for "foo".
|
||||
let foo1a = db.repeat_intern1("foo");
|
||||
assert_eq!(
|
||||
Durability::HIGH,
|
||||
db.query(RepeatIntern1Query).durability("foo")
|
||||
);
|
||||
|
||||
// Trigger a new revision.
|
||||
db.salsa_runtime().synthetic_write(Durability::LOW);
|
||||
|
||||
// If we are not careful, this would remove the interned key for
|
||||
// "foo".
|
||||
db.query(InternStrQuery).sweep(
|
||||
SweepStrategy::default()
|
||||
.discard_everything()
|
||||
.sweep_all_revisions(),
|
||||
);
|
||||
|
||||
// This would then reuse index 0 for "bar".
|
||||
let bar1 = db.intern_str("bar");
|
||||
|
||||
// And here we would assign index *1* to "foo".
|
||||
let foo2 = db.repeat_intern2("foo");
|
||||
|
||||
// But we would still have a cached result with the value 0 and
|
||||
// with high durability, so we can reuse it. That gives an
|
||||
// inconsistent result.
|
||||
let foo1b = db.repeat_intern1("foo");
|
||||
|
||||
assert_ne!(foo2, bar1);
|
||||
assert_eq!(foo1a, foo1b);
|
||||
assert_eq!(foo1b, foo2);
|
||||
}
|
||||
|
||||
/// Variation on previous test in which we do a synthetic write to
|
||||
/// `Durability::HIGH`.
|
||||
#[test]
|
||||
fn discard_durability_after_synthetic_write_high() {
|
||||
let db = db::DatabaseImpl::default();
|
||||
|
||||
// This will assign index 0 for "foo".
|
||||
let foo1a = db.repeat_intern1("foo");
|
||||
assert_eq!(
|
||||
Durability::HIGH,
|
||||
db.query(RepeatIntern1Query).durability("foo")
|
||||
);
|
||||
|
||||
// Trigger a new revision -- marking even high things as having changed.
|
||||
db.salsa_runtime().synthetic_write(Durability::HIGH);
|
||||
|
||||
// We are now able to collect "collect".
|
||||
db.query(InternStrQuery).sweep(
|
||||
SweepStrategy::default()
|
||||
.discard_everything()
|
||||
.sweep_all_revisions(),
|
||||
);
|
||||
|
||||
// So we can reuse index 0 for "bar".
|
||||
let bar1 = db.intern_str("bar");
|
||||
|
||||
// And here we assign index *1* to "foo".
|
||||
let foo2 = db.repeat_intern2("foo");
|
||||
let foo1b = db.repeat_intern1("foo");
|
||||
|
||||
// Thus foo1a (from before the synthetic write) and foo1b (from
|
||||
// after) are different.
|
||||
assert_ne!(foo1a, foo1b);
|
||||
|
||||
// But the things that come after the synthetic write are
|
||||
// consistent.
|
||||
assert_ne!(foo2, bar1);
|
||||
assert_eq!(foo1b, foo2);
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use crate::db;
|
||||
use crate::group::{FibonacciQuery, GcDatabase};
|
||||
use salsa::debug::DebugQueryTable;
|
||||
use salsa::{Database, SweepStrategy};
|
||||
use salsa::{Database, Durability, SweepStrategy};
|
||||
|
||||
// For constant values (like `fibonacci`), we only keep the values
|
||||
// that were used in the latest revision, not the sub-values that
|
||||
|
@ -31,7 +31,7 @@ fn two_rev_nothing() {
|
|||
let k: Vec<_> = db.query(FibonacciQuery).entries();
|
||||
assert_eq!(k.len(), 6);
|
||||
|
||||
db.salsa_runtime().next_revision();
|
||||
db.salsa_runtime().synthetic_write(Durability::LOW);
|
||||
|
||||
// Nothing was used in this revision, so
|
||||
// everything gets collected.
|
||||
|
@ -50,7 +50,7 @@ fn two_rev_one_use() {
|
|||
let k: Vec<_> = db.query(FibonacciQuery).entries();
|
||||
assert_eq!(k.len(), 6);
|
||||
|
||||
db.salsa_runtime().next_revision();
|
||||
db.salsa_runtime().synthetic_write(Durability::LOW);
|
||||
|
||||
db.fibonacci(5);
|
||||
|
||||
|
@ -73,7 +73,7 @@ fn two_rev_two_uses() {
|
|||
let k: Vec<_> = db.query(FibonacciQuery).entries();
|
||||
assert_eq!(k.len(), 6);
|
||||
|
||||
db.salsa_runtime().next_revision();
|
||||
db.salsa_runtime().synthetic_write(Durability::LOW);
|
||||
|
||||
db.fibonacci(5);
|
||||
db.fibonacci(3);
|
||||
|
|
|
@ -1,50 +1,64 @@
|
|||
use crate::implementation::{TestContext, TestContextImpl};
|
||||
use salsa::debug::DebugQueryTable;
|
||||
use salsa::Database;
|
||||
use salsa::{Database, Durability};
|
||||
|
||||
#[salsa::query_group(Constants)]
|
||||
pub(crate) trait ConstantsDatabase: TestContext {
|
||||
#[salsa::input]
|
||||
fn input(&self, key: char) -> usize;
|
||||
|
||||
fn add(&self, keys: (char, char)) -> usize;
|
||||
fn add(&self, key1: char, key2: char) -> usize;
|
||||
|
||||
fn add3(&self, key1: char, key2: char, key3: char) -> usize;
|
||||
}
|
||||
|
||||
fn add(db: &impl ConstantsDatabase, (key1, key2): (char, char)) -> usize {
|
||||
fn add(db: &impl ConstantsDatabase, key1: char, key2: char) -> usize {
|
||||
db.log().add(format!("add({}, {})", key1, key2));
|
||||
db.input(key1) + db.input(key2)
|
||||
}
|
||||
|
||||
fn add3(db: &impl ConstantsDatabase, key1: char, key2: char, key3: char) -> usize {
|
||||
db.log().add(format!("add3({}, {}, {})", key1, key2, key3));
|
||||
db.add(key1, key2) + db.input(key3)
|
||||
}
|
||||
|
||||
// Test we can assign a constant and things will be correctly
|
||||
// recomputed afterwards.
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn invalidate_constant() {
|
||||
let db = &mut TestContextImpl::default();
|
||||
db.set_constant_input('a', 44);
|
||||
db.set_constant_input('a', 66);
|
||||
db.set_input_with_durability('a', 44, Durability::HIGH);
|
||||
db.set_input_with_durability('b', 22, Durability::HIGH);
|
||||
assert_eq!(db.add('a', 'b'), 66);
|
||||
|
||||
db.set_input_with_durability('a', 66, Durability::HIGH);
|
||||
assert_eq!(db.add('a', 'b'), 88);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn invalidate_constant_1() {
|
||||
let db = &mut TestContextImpl::default();
|
||||
|
||||
// Not constant:
|
||||
db.set_input('a', 44);
|
||||
assert_eq!(db.add('a', 'a'), 88);
|
||||
|
||||
// Becomes constant:
|
||||
db.set_constant_input('a', 44);
|
||||
db.set_input_with_durability('a', 44, Durability::HIGH);
|
||||
assert_eq!(db.add('a', 'a'), 88);
|
||||
|
||||
// Invalidates:
|
||||
db.set_constant_input('a', 66);
|
||||
db.set_input_with_durability('a', 33, Durability::HIGH);
|
||||
assert_eq!(db.add('a', 'a'), 66);
|
||||
}
|
||||
|
||||
/// Test that invoking `set` on a constant is an error, even if you
|
||||
/// don't change the value.
|
||||
// Test cases where we assign same value to 'a' after declaring it a
|
||||
// constant.
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn set_after_constant_same_value() {
|
||||
let db = &mut TestContextImpl::default();
|
||||
db.set_constant_input('a', 44);
|
||||
db.set_input_with_durability('a', 44, Durability::HIGH);
|
||||
db.set_input_with_durability('a', 44, Durability::HIGH);
|
||||
db.set_input('a', 44);
|
||||
}
|
||||
|
||||
|
@ -54,28 +68,28 @@ fn not_constant() {
|
|||
|
||||
db.set_input('a', 22);
|
||||
db.set_input('b', 44);
|
||||
assert_eq!(db.add(('a', 'b')), 66);
|
||||
assert!(!db.query(AddQuery).is_constant(('a', 'b')));
|
||||
assert_eq!(db.add('a', 'b'), 66);
|
||||
assert_eq!(Durability::LOW, db.query(AddQuery).durability(('a', 'b')));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_constant() {
|
||||
fn durability() {
|
||||
let db = &mut TestContextImpl::default();
|
||||
|
||||
db.set_constant_input('a', 22);
|
||||
db.set_constant_input('b', 44);
|
||||
assert_eq!(db.add(('a', 'b')), 66);
|
||||
assert!(db.query(AddQuery).is_constant(('a', 'b')));
|
||||
db.set_input_with_durability('a', 22, Durability::HIGH);
|
||||
db.set_input_with_durability('b', 44, Durability::HIGH);
|
||||
assert_eq!(db.add('a', 'b'), 66);
|
||||
assert_eq!(Durability::HIGH, db.query(AddQuery).durability(('a', 'b')));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mixed_constant() {
|
||||
let db = &mut TestContextImpl::default();
|
||||
|
||||
db.set_constant_input('a', 22);
|
||||
db.set_input_with_durability('a', 22, Durability::HIGH);
|
||||
db.set_input('b', 44);
|
||||
assert_eq!(db.add(('a', 'b')), 66);
|
||||
assert!(!db.query(AddQuery).is_constant(('a', 'b')));
|
||||
assert_eq!(db.add('a', 'b'), 66);
|
||||
assert_eq!(Durability::LOW, db.query(AddQuery).durability(('a', 'b')));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -84,14 +98,51 @@ fn becomes_constant_with_change() {
|
|||
|
||||
db.set_input('a', 22);
|
||||
db.set_input('b', 44);
|
||||
assert_eq!(db.add(('a', 'b')), 66);
|
||||
assert!(!db.query(AddQuery).is_constant(('a', 'b')));
|
||||
assert_eq!(db.add('a', 'b'), 66);
|
||||
assert_eq!(Durability::LOW, db.query(AddQuery).durability(('a', 'b')));
|
||||
|
||||
db.set_constant_input('a', 23);
|
||||
assert_eq!(db.add(('a', 'b')), 67);
|
||||
assert!(!db.query(AddQuery).is_constant(('a', 'b')));
|
||||
db.set_input_with_durability('a', 23, Durability::HIGH);
|
||||
assert_eq!(db.add('a', 'b'), 67);
|
||||
assert_eq!(Durability::LOW, db.query(AddQuery).durability(('a', 'b')));
|
||||
|
||||
db.set_constant_input('b', 45);
|
||||
assert_eq!(db.add(('a', 'b')), 68);
|
||||
assert!(db.query(AddQuery).is_constant(('a', 'b')));
|
||||
db.set_input_with_durability('b', 45, Durability::HIGH);
|
||||
assert_eq!(db.add('a', 'b'), 68);
|
||||
assert_eq!(Durability::HIGH, db.query(AddQuery).durability(('a', 'b')));
|
||||
|
||||
db.set_input_with_durability('b', 45, Durability::MEDIUM);
|
||||
assert_eq!(db.add('a', 'b'), 68);
|
||||
assert_eq!(
|
||||
Durability::MEDIUM,
|
||||
db.query(AddQuery).durability(('a', 'b'))
|
||||
);
|
||||
}
|
||||
|
||||
// Test a subtle case in which an input changes from constant to
|
||||
// non-constant, but its value doesn't change. If we're not careful,
|
||||
// this can cause us to incorrectly consider derived values as still
|
||||
// being constant.
|
||||
#[test]
|
||||
fn constant_to_non_constant() {
|
||||
let db = &mut TestContextImpl::default();
|
||||
|
||||
db.set_input_with_durability('a', 11, Durability::HIGH);
|
||||
db.set_input_with_durability('b', 22, Durability::HIGH);
|
||||
db.set_input_with_durability('c', 33, Durability::HIGH);
|
||||
|
||||
// Here, `add3` invokes `add`, which yields 33. Both calls are
|
||||
// constant.
|
||||
assert_eq!(db.add3('a', 'b', 'c'), 66);
|
||||
|
||||
db.set_input('a', 11);
|
||||
|
||||
// Here, `add3` invokes `add`, which *still* yields 33, but which
|
||||
// is no longer constant. Since value didn't change, we might
|
||||
// preserve `add3` unchanged, not noticing that it is no longer
|
||||
// constant.
|
||||
assert_eq!(db.add3('a', 'b', 'c'), 66);
|
||||
|
||||
// In that case, we would not get the correct result here, when
|
||||
// 'a' changes *again*.
|
||||
db.set_input('a', 22);
|
||||
assert_eq!(db.add3('a', 'b', 'c'), 77);
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use crate::implementation::{TestContext, TestContextImpl};
|
||||
use salsa::Database;
|
||||
use salsa::{Database, Durability};
|
||||
|
||||
#[salsa::query_group(MemoizedVolatile)]
|
||||
pub(crate) trait MemoizedVolatileContext: TestContext {
|
||||
|
@ -58,7 +58,7 @@ fn revalidate() {
|
|||
|
||||
// Second generation: volatile will change (to 1) but memoized1
|
||||
// will not (still 0, as 1/2 = 0)
|
||||
query.salsa_runtime().next_revision();
|
||||
query.salsa_runtime().synthetic_write(Durability::LOW);
|
||||
query.memoized2();
|
||||
query.assert_log(&["Memoized1 invoked", "Volatile invoked"]);
|
||||
query.memoized2();
|
||||
|
@ -67,7 +67,7 @@ fn revalidate() {
|
|||
// Third generation: volatile will change (to 2) and memoized1
|
||||
// will too (to 1). Therefore, after validating that Memoized1
|
||||
// changed, we now invoke Memoized2.
|
||||
query.salsa_runtime().next_revision();
|
||||
query.salsa_runtime().synthetic_write(Durability::LOW);
|
||||
|
||||
query.memoized2();
|
||||
query.assert_log(&["Memoized1 invoked", "Volatile invoked", "Memoized2 invoked"]);
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
use crate::implementation::DatabaseImpl;
|
||||
use crate::queries::Database;
|
||||
use salsa::Database as _Database;
|
||||
use salsa::Durability;
|
||||
|
||||
#[test]
|
||||
fn memoized_twice() {
|
||||
|
@ -19,7 +20,7 @@ fn volatile_twice() {
|
|||
let v2 = db.volatile(); // volatiles are cached, so 2nd read returns the same
|
||||
assert_eq!(v1, v2);
|
||||
|
||||
db.salsa_runtime().next_revision(); // clears volatile caches
|
||||
db.salsa_runtime().synthetic_write(Durability::LOW); // clears volatile caches
|
||||
|
||||
let v3 = db.volatile(); // will re-increment the counter
|
||||
let v4 = db.volatile(); // second call will be cached
|
||||
|
@ -39,7 +40,7 @@ fn intermingled() {
|
|||
assert_eq!(v1, v3);
|
||||
assert_eq!(v2, v4);
|
||||
|
||||
db.salsa_runtime().next_revision(); // clears volatile caches
|
||||
db.salsa_runtime().synthetic_write(Durability::LOW); // clears volatile caches
|
||||
|
||||
let v5 = db.memoized(); // re-executes volatile, caches new result
|
||||
let v6 = db.memoized(); // re-use cached result
|
||||
|
|
Loading…
Reference in a new issue