Merge pull request #150 from nikomatsakis/salsa-interned

add interned keys to salsa
This commit is contained in:
Niko Matsakis 2019-03-27 05:16:03 -04:00 committed by GitHub
commit 3925337a3c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 1031 additions and 29 deletions

View file

@ -15,5 +15,5 @@ proc-macro = true
heck = "0.3"
proc-macro2 = "0.4"
quote = "0.6"
syn = { version = "0.15", features = ["full", "extra-traits"] }
syn = { version = "0.15.29", features = ["full", "extra-traits"] }

View file

@ -3,7 +3,7 @@ use heck::CamelCase;
use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::ToTokens;
use syn::{parse_macro_input, FnArg, Ident, ItemTrait, ReturnType, TraitItem};
use syn::{parse_macro_input, parse_quote, FnArg, Ident, ItemTrait, ReturnType, TraitItem, Type};
/// Implementation for `[salsa::query_group]` decorator.
pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream {
@ -60,6 +60,10 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream
storage = QueryStorage::Input;
num_storages += 1;
}
"interned" => {
storage = QueryStorage::Interned;
num_storages += 1;
}
"invoke" => {
invoke = Some(parse_macro_input!(tts as Parenthesized<syn::Path>).0);
}
@ -106,15 +110,56 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream
),
};
// For `#[salsa::interned]` keys, we create a "lookup key" automatically.
//
// For a query like:
//
// fn foo(&self, x: Key1, y: Key2) -> u32
//
// we would create
//
// fn lookup_foo(&self, x: u32) -> (Key1, Key2)
let lookup_query = if let QueryStorage::Interned = storage {
let lookup_query_type = Ident::new(
&format!(
"{}LookupQuery",
method.sig.ident.to_string().to_camel_case()
),
Span::call_site(),
);
let lookup_fn_name = Ident::new(
&format!("lookup_{}", method.sig.ident.to_string()),
method.sig.ident.span(),
);
let keys = &keys;
let lookup_value: Type = parse_quote!((#(#keys),*));
let lookup_keys = vec![value.clone()];
Some(Query {
query_type: lookup_query_type,
fn_name: lookup_fn_name,
attrs: vec![], // FIXME -- some automatically generated docs on this method?
storage: QueryStorage::InternedLookup {
intern_query_type: query_type.clone(),
},
keys: lookup_keys,
value: lookup_value,
invoke: None,
})
} else {
None
};
queries.push(Query {
query_type,
fn_name: method.sig.ident.clone(),
fn_name: method.sig.ident,
attrs,
storage,
keys,
value,
invoke,
});
queries.extend(lookup_query);
}
_ => (),
}
@ -245,6 +290,7 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream
impl<DB__> salsa::plumbing::QueryGroup<DB__> for #group_struct
where
DB__: #trait_name,
DB__: salsa::plumbing::HasQueryGroup<#group_struct>,
DB__: salsa::Database,
{
type GroupStorage = #group_storage<DB__>;
@ -270,15 +316,19 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream
for query in &queries {
let fn_name = &query.fn_name;
let qt = &query.query_type;
let storage = Ident::new(
match query.storage {
QueryStorage::Memoized => "MemoizedStorage",
QueryStorage::Volatile => "VolatileStorage",
QueryStorage::Dependencies => "DependencyStorage",
QueryStorage::Input => "InputStorage",
},
Span::call_site(),
);
let db = quote! {DB};
let storage = match &query.storage {
QueryStorage::Memoized => quote!(salsa::plumbing::MemoizedStorage<#db, Self>),
QueryStorage::Volatile => quote!(salsa::plumbing::VolatileStorage<#db, Self>),
QueryStorage::Dependencies => quote!(salsa::plumbing::DependencyStorage<#db, Self>),
QueryStorage::Input => quote!(salsa::plumbing::InputStorage<#db, Self>),
QueryStorage::Interned => quote!(salsa::plumbing::InternedStorage<#db, Self>),
QueryStorage::InternedLookup { intern_query_type } => {
quote!(salsa::plumbing::LookupInternedStorage<#db, Self, #intern_query_type>)
}
};
let keys = &query.keys;
let value = &query.value;
@ -287,16 +337,17 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream
#[derive(Default, Debug)]
#trait_vis struct #qt;
impl<DB> salsa::Query<DB> for #qt
impl<#db> salsa::Query<#db> for #qt
where
DB: #trait_name,
DB: salsa::plumbing::HasQueryGroup<#group_struct>,
DB: salsa::Database,
{
type Key = (#(#keys),*);
type Value = #value;
type Storage = salsa::plumbing::#storage<DB, Self>;
type Storage = #storage;
type Group = #group_struct;
type GroupStorage = #group_storage<DB>;
type GroupStorage = #group_storage<#db>;
type GroupKey = #group_key;
fn query_storage(group_storage: &Self::GroupStorage) -> &Self::Storage {
@ -309,8 +360,8 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream
}
});
// Implement the QueryFunction trait for all queries except inputs.
if query.storage != QueryStorage::Input {
// Implement the QueryFunction trait for queries which need it.
if query.storage.needs_query_function() {
let span = query.fn_name.span();
let key_names: &Vec<_> = &(0..query.keys.len())
.map(|i| Ident::new(&format!("key{}", i), Span::call_site()))
@ -328,6 +379,7 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream
impl<DB> salsa::plumbing::QueryFunction<DB> for #qt
where
DB: #trait_name,
DB: salsa::plumbing::HasQueryGroup<#group_struct>,
DB: salsa::Database,
{
fn execute(db: &DB, #key_pattern: <Self as salsa::Query<DB>>::Key)
@ -379,6 +431,7 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream
#trait_vis struct #group_storage<DB__>
where
DB__: #trait_name,
DB__: salsa::plumbing::HasQueryGroup<#group_struct>,
DB__: salsa::Database,
{
#storage_fields
@ -387,6 +440,7 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream
impl<DB__> Default for #group_storage<DB__>
where
DB__: #trait_name,
DB__: salsa::plumbing::HasQueryGroup<#group_struct>,
DB__: salsa::Database,
{
#[inline]
@ -440,10 +494,23 @@ struct Query {
invoke: Option<syn::Path>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[derive(Debug, Clone, PartialEq, Eq)]
enum QueryStorage {
Memoized,
Volatile,
Dependencies,
Input,
Interned,
InternedLookup { intern_query_type: Ident },
}
impl QueryStorage {
fn needs_query_function(&self) -> bool {
match self {
QueryStorage::Input | QueryStorage::Interned | QueryStorage::InternedLookup { .. } => {
false
}
QueryStorage::Memoized | QueryStorage::Volatile | QueryStorage::Dependencies => true,
}
}
}

View file

@ -942,15 +942,6 @@ where
fn sweep(&self, db: &DB, strategy: SweepStrategy) {
let mut map_write = self.map.write();
let revision_now = db.salsa_runtime().current_revision();
match (strategy.discard_if, strategy.discard_what) {
(DiscardIf::Always, DiscardWhat::Everything) => {
debug!("sweep({:?}): clearing the table", Q::default());
map_write.clear();
return;
},
(DiscardIf::Never, _) | (_, DiscardWhat::Nothing) => return,
_ => {}
}
map_write.retain(|key, query_state| {
match query_state {
// Leave stuff that is currently being computed -- the
@ -972,6 +963,19 @@ where
revision_now
);
// Check if this memo read something "untracked"
// -- meaning non-deterministic. In this case, we
// can only collect "outdated" data that wasn't
// used in the current revision. This is because
// if we collected something from the current
// revision, we might wind up re-executing the
// query later in the revision and getting a
// distinct result.
let is_volatile = match memo.inputs {
MemoInputs::Untracked => true,
_ => false,
};
// Since we don't acquire a query lock in this
// method, it *is* possible for the revision to
// change while we are executing. However, it is
@ -982,7 +986,19 @@ where
assert!(memo.verified_at <= revision_now);
match strategy.discard_if {
DiscardIf::Never => unreachable!(),
// If we are only discarding outdated things,
// and this is not outdated, keep it.
DiscardIf::Outdated if memo.verified_at == revision_now => true,
// As explained on the `is_volatile` variable
// definition, if this is a volatile entry, we
// can't discard it unless it is outdated.
DiscardIf::Always if is_volatile && memo.verified_at == revision_now => {
true
}
// Otherwise, we can discard -- discard whatever the user requested.
DiscardIf::Outdated | DiscardIf::Always => match strategy.discard_what {
DiscardWhat::Nothing => unreachable!(),
DiscardWhat::Values => {

641
src/interned.rs Normal file
View file

@ -0,0 +1,641 @@
use crate::debug::TableEntry;
use crate::plumbing::CycleDetected;
use crate::plumbing::HasQueryGroup;
use crate::plumbing::QueryStorageMassOps;
use crate::plumbing::QueryStorageOps;
use crate::runtime::ChangedAt;
use crate::runtime::Revision;
use crate::runtime::StampedValue;
use crate::Query;
use crate::{Database, DiscardIf, SweepStrategy};
use parking_lot::RwLock;
use rustc_hash::FxHashMap;
use std::collections::hash_map::Entry;
use std::convert::From;
use std::hash::Hash;
/// Handles storage where the value is 'derived' by executing a
/// function (in contrast to "inputs").
pub struct InternedStorage<DB, Q>
where
Q: Query<DB>,
Q::Value: InternKey,
DB: Database,
{
tables: RwLock<InternTables<Q::Key>>,
}
/// Storage for the looking up interned things.
pub struct LookupInternedStorage<DB, Q, IQ>
where
Q: Query<DB>,
Q::Key: InternKey,
Q::Value: Eq + Hash,
IQ: Query<
DB,
Key = Q::Value,
Value = Q::Key,
Group = Q::Group,
GroupStorage = Q::GroupStorage,
GroupKey = Q::GroupKey,
>,
DB: Database,
{
phantom: std::marker::PhantomData<(DB, Q, IQ)>,
}
struct InternTables<K> {
/// Map from the key to the corresponding intern-index.
map: FxHashMap<K, InternIndex>,
/// For each valid intern-index, stores the interned value. When
/// an interned value is GC'd, the entry is set to
/// `InternValue::Free` with the next free item.
values: Vec<InternValue<K>>,
/// Index of the first free intern-index, if any.
first_free: Option<InternIndex>,
}
/// Trait implemented for the "key" that results from a
/// `#[salsa::intern]` query. This is basically meant to be a
/// "newtype"'d `u32`.
pub trait InternKey {
/// Create an instance of the intern-key from a `u32` value.
fn from_u32(v: u32) -> Self;
/// Extract the `u32` with which the intern-key was created.
fn as_u32(&self) -> u32;
}
impl InternKey for u32 {
fn from_u32(v: u32) -> Self {
v
}
fn as_u32(&self) -> u32 {
*self
}
}
impl InternKey for usize {
fn from_u32(v: u32) -> Self {
v as usize
}
fn as_u32(&self) -> u32 {
assert!(*self <= (std::u32::MAX as usize));
*self as u32
}
}
/// Newtype indicating an index into the intern table.
///
/// NB. In some cases, `InternIndex` values come directly from the
/// user and hence they are not 'trusted' to be valid or in-bounds.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
struct InternIndex {
index: u32,
}
impl InternIndex {
fn index(self) -> usize {
self.index as usize
}
}
impl From<usize> for InternIndex {
fn from(v: usize) -> Self {
InternIndex { index: v.as_u32() }
}
}
impl<T> From<&T> for InternIndex
where
T: InternKey,
{
fn from(v: &T) -> Self {
InternIndex { index: v.as_u32() }
}
}
enum InternValue<K> {
/// The value has not been gc'd.
Present {
value: K,
/// When was this intern'd?
///
/// (This informs the "changed-at" result)
interned_at: Revision,
/// When was it accessed?
///
/// (This informs the garbage collector)
accessed_at: Revision,
},
/// Free-list -- the index is the next
Free { next: Option<InternIndex> },
}
impl<DB, Q> std::panic::RefUnwindSafe for InternedStorage<DB, Q>
where
Q: Query<DB>,
DB: Database,
Q::Key: std::panic::RefUnwindSafe,
Q::Value: InternKey,
Q::Value: std::panic::RefUnwindSafe,
{
}
impl<DB, Q> Default for InternedStorage<DB, Q>
where
Q: Query<DB>,
Q::Key: Eq + Hash,
Q::Value: InternKey,
DB: Database,
{
fn default() -> Self {
InternedStorage {
tables: RwLock::new(InternTables::default()),
}
}
}
impl<DB, Q, IQ> Default for LookupInternedStorage<DB, Q, IQ>
where
Q: Query<DB>,
Q::Key: InternKey,
Q::Value: Eq + Hash,
IQ: Query<
DB,
Key = Q::Value,
Value = Q::Key,
Group = Q::Group,
GroupStorage = Q::GroupStorage,
GroupKey = Q::GroupKey,
>,
DB: Database,
{
fn default() -> Self {
LookupInternedStorage {
phantom: std::marker::PhantomData,
}
}
}
impl<K> Default for InternTables<K>
where
K: Eq + Hash,
{
fn default() -> Self {
Self {
map: Default::default(),
values: Default::default(),
first_free: Default::default(),
}
}
}
impl<DB, Q> InternedStorage<DB, Q>
where
Q: Query<DB>,
Q::Key: Eq + Hash + Clone,
Q::Value: InternKey,
DB: Database,
{
fn intern_index(&self, db: &DB, key: &Q::Key) -> StampedValue<InternIndex> {
if let Some(i) = self.intern_check(db, key) {
return i;
}
let owned_key1 = key.to_owned();
let owned_key2 = owned_key1.clone();
let revision_now = db.salsa_runtime().current_revision();
let mut tables = self.tables.write();
let tables = &mut *tables;
let entry = match tables.map.entry(owned_key1) {
Entry::Vacant(entry) => entry,
Entry::Occupied(entry) => {
// Somebody inserted this key while we were waiting
// for the write lock.
let index = *entry.get();
match &tables.values[index.index()] {
InternValue::Present {
value,
interned_at,
accessed_at,
} => {
debug_assert_eq!(owned_key2, *value);
debug_assert_eq!(*accessed_at, revision_now);
return StampedValue {
value: index,
changed_at: ChangedAt {
is_constant: false,
revision: *interned_at,
},
};
}
InternValue::Free { .. } => {
panic!("key {:?} should be present but is not", key,);
}
}
}
};
let index = match tables.first_free {
None => {
let index = InternIndex::from(tables.values.len());
tables.values.push(InternValue::Present {
value: owned_key2,
interned_at: revision_now,
accessed_at: revision_now,
});
index
}
Some(i) => {
let next_free = match &tables.values[i.index()] {
InternValue::Free { next } => *next,
InternValue::Present { value, .. } => {
panic!(
"index {:?} was supposed to be free but contains {:?}",
i, value
);
}
};
tables.values[i.index()] = InternValue::Present {
value: owned_key2,
interned_at: revision_now,
accessed_at: revision_now,
};
tables.first_free = next_free;
i
}
};
entry.insert(index);
StampedValue {
value: index,
changed_at: ChangedAt {
is_constant: false,
revision: revision_now,
},
}
}
fn intern_check(&self, db: &DB, key: &Q::Key) -> Option<StampedValue<InternIndex>> {
let revision_now = db.salsa_runtime().current_revision();
// First,
{
let tables = self.tables.read();
let &index = tables.map.get(key)?;
match &tables.values[index.index()] {
InternValue::Present {
interned_at,
accessed_at,
..
} => {
if *accessed_at == revision_now {
return Some(StampedValue {
value: index,
changed_at: ChangedAt {
is_constant: false,
revision: *interned_at,
},
});
}
}
InternValue::Free { .. } => {
panic!(
"key {:?} maps to index {:?} is free but should not be",
key, index
);
}
}
}
// Next,
let mut tables = self.tables.write();
let &index = tables.map.get(key)?;
match &mut tables.values[index.index()] {
InternValue::Present {
interned_at,
accessed_at,
..
} => {
*accessed_at = revision_now;
return Some(StampedValue {
value: index,
changed_at: ChangedAt {
is_constant: false,
revision: *interned_at,
},
});
}
InternValue::Free { .. } => {
panic!(
"key {:?} maps to index {:?} is free but should not be",
key, index
);
}
}
}
/// Given an index, lookup and clone its value, updating the
/// `accessed_at` time if necessary.
fn lookup_value<R>(
&self,
db: &DB,
index: InternIndex,
op: impl FnOnce(&Q::Key) -> R,
) -> StampedValue<R> {
let index = index.index();
let revision_now = db.salsa_runtime().current_revision();
{
let tables = self.tables.read();
debug_assert!(
index < tables.values.len(),
"interned key ``{:?}({})` is out of bounds",
Q::default(),
index,
);
match &tables.values[index] {
InternValue::Present {
accessed_at,
interned_at,
value,
} => {
if *accessed_at == revision_now {
return StampedValue {
value: op(value),
changed_at: ChangedAt {
is_constant: false,
revision: *interned_at,
},
};
}
}
InternValue::Free { .. } => panic!(
"interned key `{:?}({})` has been garbage collected",
Q::default(),
index,
),
}
}
let mut tables = self.tables.write();
match &mut tables.values[index] {
InternValue::Present {
accessed_at,
interned_at,
value,
} => {
*accessed_at = revision_now;
return StampedValue {
value: op(value),
changed_at: ChangedAt {
is_constant: false,
revision: *interned_at,
},
};
}
InternValue::Free { .. } => panic!(
"interned key `{:?}({})` has been garbage collected",
Q::default(),
index,
),
}
}
}
impl<DB, Q> QueryStorageOps<DB, Q> for InternedStorage<DB, Q>
where
Q: Query<DB>,
Q::Value: InternKey,
DB: Database,
{
fn try_fetch(
&self,
db: &DB,
key: &Q::Key,
database_key: &DB::DatabaseKey,
) -> Result<Q::Value, CycleDetected> {
let StampedValue { value, changed_at } = self.intern_index(db, key);
db.salsa_runtime()
.report_query_read(database_key, changed_at);
Ok(<Q::Value>::from_u32(value.index))
}
fn maybe_changed_since(
&self,
db: &DB,
revision: Revision,
key: &Q::Key,
_database_key: &DB::DatabaseKey,
) -> bool {
match self.intern_check(db, key) {
Some(StampedValue {
value: _,
changed_at,
}) => changed_at.changed_since(revision),
None => true,
}
}
fn is_constant(&self, _db: &DB, _key: &Q::Key) -> bool {
false
}
fn entries<C>(&self, _db: &DB) -> C
where
C: std::iter::FromIterator<TableEntry<Q::Key, Q::Value>>,
{
let tables = self.tables.read();
tables
.map
.iter()
.map(|(key, index)| {
TableEntry::new(key.clone(), Some(<Q::Value>::from_u32(index.index)))
})
.collect()
}
}
impl<DB, Q> QueryStorageMassOps<DB> for InternedStorage<DB, Q>
where
Q: Query<DB>,
Q::Value: InternKey,
DB: Database,
{
fn sweep(&self, db: &DB, strategy: SweepStrategy) {
let mut tables = self.tables.write();
let revision_now = db.salsa_runtime().current_revision();
let InternTables {
map,
values,
first_free,
} = &mut *tables;
map.retain(|key, intern_index| {
let discard = match strategy.discard_if {
DiscardIf::Never => false,
// NB: Interned keys *never* discard keys unless they
// are outdated, regardless of the sweep strategy. This is
// because interned queries are not deterministic;
// if we were to remove a value from the current revision,
// and the query were later executed again, it would not necessarily
// produce the same intern key the second time. This would wreak
// havoc. See the test `discard_during_same_revision` for an example.
//
// Keys that have not (yet) been accessed during this
// revision don't have this problem. Anything
// dependent on them would regard itself as dirty if
// they are removed and also be forced to re-execute.
DiscardIf::Always | DiscardIf::Outdated => match values[intern_index.index()] {
InternValue::Present { accessed_at, .. } => accessed_at < revision_now,
InternValue::Free { .. } => {
panic!(
"key {:?} maps to index {:?} which is free",
key, intern_index
);
}
},
};
if discard {
values[intern_index.index()] = InternValue::Free { next: *first_free };
*first_free = Some(*intern_index);
}
!discard
});
}
}
impl<DB, Q, IQ> QueryStorageOps<DB, Q> for LookupInternedStorage<DB, Q, IQ>
where
Q: Query<DB>,
Q::Key: InternKey,
Q::Value: Eq + Hash,
IQ: Query<
DB,
Key = Q::Value,
Value = Q::Key,
Storage = InternedStorage<DB, IQ>,
Group = Q::Group,
GroupStorage = Q::GroupStorage,
GroupKey = Q::GroupKey,
>,
DB: Database + HasQueryGroup<Q::Group>,
{
fn try_fetch(
&self,
db: &DB,
key: &Q::Key,
database_key: &DB::DatabaseKey,
) -> Result<Q::Value, CycleDetected> {
let index = InternIndex::from(key);
let group_storage = <DB as HasQueryGroup<Q::Group>>::group_storage(db);
let interned_storage = IQ::query_storage(group_storage);
let StampedValue { value, changed_at } =
interned_storage.lookup_value(db, index, Clone::clone);
db.salsa_runtime()
.report_query_read(database_key, changed_at);
Ok(value)
}
fn maybe_changed_since(
&self,
db: &DB,
revision: Revision,
key: &Q::Key,
_database_key: &DB::DatabaseKey,
) -> bool {
let index = InternIndex::from(key);
// NB. This will **panic** if `key` has been removed from the
// map, whereas you might expect it to return true in that
// event. But I think this is ok. You have to ask yourself,
// where did this (invalid) key K come from? There are two
// options:
//
// ## Some query Q1 obtained the key K by interning a value:
//
// In that case, Q1 has a prior input that computes K. This
// input must be invalid and hence Q1 must be considered to
// have changed, so it shouldn't be asking if we have changed.
//
// ## Some query Q1 was given K as an input:
//
// In that case, the query Q1 must be invoked (ultimately) by
// some query Q2 that computed K. This implies that K must be
// the result of *some* valid interning call, and therefore
// that it should be a valid key now (and not pointing at a
// free slot or out of bounds).
let group_storage = <DB as HasQueryGroup<Q::Group>>::group_storage(db);
let interned_storage = IQ::query_storage(group_storage);
let StampedValue {
value: (),
changed_at,
} = interned_storage.lookup_value(db, index, |_| ());
changed_at.changed_since(revision)
}
fn is_constant(&self, _db: &DB, _key: &Q::Key) -> bool {
false
}
fn entries<C>(&self, db: &DB) -> C
where
C: std::iter::FromIterator<TableEntry<Q::Key, Q::Value>>,
{
let group_storage = <DB as HasQueryGroup<Q::Group>>::group_storage(db);
let interned_storage = IQ::query_storage(group_storage);
let tables = interned_storage.tables.read();
tables
.map
.iter()
.map(|(key, index)| TableEntry::new(<Q::Key>::from_u32(index.index), Some(key.clone())))
.collect()
}
}
impl<DB, Q, IQ> QueryStorageMassOps<DB> for LookupInternedStorage<DB, Q, IQ>
where
Q: Query<DB>,
Q::Key: InternKey,
Q::Value: Eq + Hash,
IQ: Query<
DB,
Key = Q::Value,
Value = Q::Key,
Group = Q::Group,
GroupStorage = Q::GroupStorage,
GroupKey = Q::GroupKey,
>,
DB: Database,
{
fn sweep(&self, _db: &DB, _strategy: SweepStrategy) {}
}

View file

@ -10,6 +10,7 @@
mod derived;
mod input;
mod interned;
mod runtime;
pub mod debug;
@ -26,6 +27,7 @@ use derive_new::new;
use std::fmt::{self, Debug};
use std::hash::Hash;
pub use crate::interned::InternKey;
pub use crate::runtime::Runtime;
pub use crate::runtime::RuntimeId;
@ -406,7 +408,7 @@ pub trait Query<DB: Database>: Debug + Default + Sized + 'static {
type Value: Clone + Debug;
/// Internal struct storing the values for the query.
type Storage: plumbing::QueryStorageOps<DB, Self> + Send + Sync;
type Storage: plumbing::QueryStorageOps<DB, Self>;
/// Associate query group struct.
type Group: plumbing::QueryGroup<

View file

@ -13,6 +13,8 @@ pub use crate::derived::DependencyStorage;
pub use crate::derived::MemoizedStorage;
pub use crate::derived::VolatileStorage;
pub use crate::input::InputStorage;
pub use crate::interned::InternedStorage;
pub use crate::interned::LookupInternedStorage;
pub use crate::runtime::Revision;
pub struct CycleDetected;

View file

@ -1,7 +1,9 @@
use crate::group;
use crate::interned;
use crate::log::{HasLog, Log};
use crate::volatile_tests;
#[salsa::database(group::Gc)]
#[salsa::database(group::Gc, interned::Intern, volatile_tests::Volatile)]
#[derive(Default)]
pub(crate) struct DatabaseImpl {
runtime: salsa::Runtime<DatabaseImpl>,

112
tests/gc/interned.rs Normal file
View file

@ -0,0 +1,112 @@
use crate::db;
use salsa::{Database, SweepStrategy};
/// Query group for tests for how interned keys interact with GC.
#[salsa::query_group(Intern)]
pub(crate) trait InternDatabase {
/// A dummy input that can be used to trigger a new revision.
#[salsa::input]
fn dummy(&self) -> ();
/// Underlying interning query.
#[salsa::interned]
fn intern_str(&self, x: &'static str) -> u32;
/// This just executes the intern query and returns the result.
fn repeat_intern1(&self, x: &'static str) -> u32;
/// Same as `repeat_intern1`. =)
fn repeat_intern2(&self, x: &'static str) -> u32;
}
fn repeat_intern1(db: &impl InternDatabase, x: &'static str) -> u32 {
db.intern_str(x)
}
fn repeat_intern2(db: &impl InternDatabase, x: &'static str) -> u32 {
db.intern_str(x)
}
/// This test highlights the difference between *interned queries* and
/// other non-input queries -- in particular, their results are not
/// *deterministic*. Therefore, we cannot GC values that were created
/// in the current revision; that might cause us to re-execute the
/// query twice on the same key during the same revision, which could
/// yield different results each time, wreaking havoc. This test
/// exercises precisely that scenario.
#[test]
fn discard_during_same_revision() {
let db = db::DatabaseImpl::default();
// This will assign index 0 for "foo".
let foo1a = db.repeat_intern1("foo");
// If we are not careful, this would remove the interned key for
// "foo".
db.query(InternStrQuery).sweep(
SweepStrategy::default()
.discard_everything()
.sweep_all_revisions(),
);
// This would then reuse index 0 for "bar".
let bar1 = db.intern_str("bar");
// And here we would assign index *1* to "foo".
let foo2 = db.repeat_intern2("foo");
// But we would still have a cached result, *from the same
// revision*, with the value 0. So that's inconsistent.
let foo1b = db.repeat_intern1("foo");
assert_ne!(foo2, bar1);
assert_eq!(foo1a, foo1b);
assert_eq!(foo1b, foo2);
}
/// This test highlights the difference between *interned queries* and
/// other non-input queries -- in particular, their results are not
/// *deterministic*. Therefore, we cannot GC values that were created
/// in the current revision; that might cause us to re-execute the
/// query twice on the same key during the same revision, which could
/// yield different results each time, wreaking havoc. This test
/// exercises precisely that scenario.
#[test]
fn discard_outdated() {
let mut db = db::DatabaseImpl::default();
let foo_from_rev0 = db.repeat_intern1("foo");
let bar_from_rev0 = db.repeat_intern1("bar");
// Trigger a new revision.
db.set_dummy(());
// In this revision, we use "bar".
let bar_from_rev1 = db.repeat_intern1("bar");
// This should collect "foo".
db.sweep_all(SweepStrategy::discard_outdated());
// This should be the same as before the GC, as bar
// is not outdated.
let bar2_from_rev1 = db.repeat_intern1("bar");
// This should re-use the index of "foo".
let baz_from_rev1 = db.repeat_intern1("baz");
// This should assign the next index to "foo".
let foo_from_rev1 = db.repeat_intern1("foo");
assert_eq!(bar_from_rev0, bar_from_rev1);
assert_eq!(bar_from_rev0, bar2_from_rev1);
assert_eq!(foo_from_rev0, baz_from_rev1);
assert_ne!(foo_from_rev0, foo_from_rev1);
assert_ne!(foo_from_rev1, bar_from_rev1);
assert_ne!(foo_from_rev1, baz_from_rev1);
assert_eq!(db.lookup_intern_str(foo_from_rev1), "foo");
assert_eq!(db.lookup_intern_str(bar_from_rev1), "bar");
assert_eq!(db.lookup_intern_str(baz_from_rev1), "baz");
}

View file

@ -13,5 +13,7 @@ mod db;
mod derived_tests;
mod discard_values;
mod group;
mod interned;
mod log;
mod shallow_constant_tests;
mod volatile_tests;

View file

@ -0,0 +1,72 @@
use crate::db;
use salsa::{Database, SweepStrategy};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
/// Query group for tests for how interned keys interact with GC.
#[salsa::query_group(Volatile)]
pub(crate) trait VolatileDatabase {
#[salsa::input]
fn atomic_cell(&self) -> Arc<AtomicUsize>;
/// Underlying volatile query.
#[salsa::volatile]
fn volatile(&self) -> usize;
/// This just executes the intern query and returns the result.
fn repeat1(&self) -> usize;
/// Same as `repeat_intern1`. =)
fn repeat2(&self) -> usize;
}
fn volatile(db: &impl VolatileDatabase) -> usize {
db.atomic_cell().load(Ordering::SeqCst)
}
fn repeat1(db: &impl VolatileDatabase) -> usize {
db.volatile()
}
fn repeat2(db: &impl VolatileDatabase) -> usize {
db.volatile()
}
#[test]
fn consistency_no_gc() {
let mut db = db::DatabaseImpl::default();
let cell = Arc::new(AtomicUsize::new(22));
db.set_atomic_cell(cell.clone());
let v1 = db.repeat1();
cell.store(23, Ordering::SeqCst);
let v2 = db.repeat2();
assert_eq!(v1, v2);
}
#[test]
fn consistency_with_gc() {
let mut db = db::DatabaseImpl::default();
let cell = Arc::new(AtomicUsize::new(22));
db.set_atomic_cell(cell.clone());
let v1 = db.repeat1();
cell.store(23, Ordering::SeqCst);
db.query(VolatileQuery).sweep(
SweepStrategy::default()
.discard_everything()
.sweep_all_revisions(),
);
let v2 = db.repeat2();
assert_eq!(v1, v2);
}

86
tests/interned.rs Normal file
View file

@ -0,0 +1,86 @@
//! Test that you can implement a query using a `dyn Trait` setup.
#[salsa::database(InternStorage)]
#[derive(Default)]
struct Database {
runtime: salsa::Runtime<Database>,
}
impl salsa::Database for Database {
fn salsa_runtime(&self) -> &salsa::Runtime<Database> {
&self.runtime
}
}
#[salsa::query_group(InternStorage)]
trait Intern {
#[salsa::interned]
fn intern1(&self, x: String) -> u32;
#[salsa::interned]
fn intern2(&self, x: String, y: String) -> u32;
#[salsa::interned]
fn intern_key(&self, x: String) -> InternKey;
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct InternKey(u32);
impl salsa::InternKey for InternKey {
fn from_u32(v: u32) -> Self {
InternKey(v)
}
fn as_u32(&self) -> u32 {
self.0
}
}
#[test]
fn test_intern1() {
let db = Database::default();
let foo0 = db.intern1(format!("foo"));
let bar0 = db.intern1(format!("bar"));
let foo1 = db.intern1(format!("foo"));
let bar1 = db.intern1(format!("bar"));
assert_eq!(foo0, foo1);
assert_eq!(bar0, bar1);
assert_ne!(foo0, bar0);
assert_eq!(format!("foo"), db.lookup_intern1(foo0));
assert_eq!(format!("bar"), db.lookup_intern1(bar0));
}
#[test]
fn test_intern2() {
let db = Database::default();
let foo0 = db.intern2(format!("x"), format!("foo"));
let bar0 = db.intern2(format!("x"), format!("bar"));
let foo1 = db.intern2(format!("x"), format!("foo"));
let bar1 = db.intern2(format!("x"), format!("bar"));
assert_eq!(foo0, foo1);
assert_eq!(bar0, bar1);
assert_ne!(foo0, bar0);
assert_eq!((format!("x"), format!("foo")), db.lookup_intern2(foo0));
assert_eq!((format!("x"), format!("bar")), db.lookup_intern2(bar0));
}
#[test]
fn test_intern_key() {
let db = Database::default();
let foo0 = db.intern_key(format!("foo"));
let bar0 = db.intern_key(format!("bar"));
let foo1 = db.intern_key(format!("foo"));
let bar1 = db.intern_key(format!("bar"));
assert_eq!(foo0, foo1);
assert_eq!(bar0, bar1);
assert_ne!(foo0, bar0);
assert_eq!(format!("foo"), db.lookup_intern_key(foo0));
assert_eq!(format!("bar"), db.lookup_intern_key(bar0));
}