mirror of
https://github.com/salsa-rs/salsa.git
synced 2025-01-13 00:40:22 +00:00
fix some typos
This commit is contained in:
parent
5ce4662b81
commit
18507f2ff7
10 changed files with 12 additions and 12 deletions
|
@ -263,7 +263,7 @@ let w2 = Word::new(db, "bar".to_string());
|
|||
let w3 = Word::new(db, "foo".to_string());
|
||||
```
|
||||
|
||||
When you create two interned structs with the same field values, you are guaranted to get back the same integer id. So here, we know that `assert_eq!(w1, w3)` is true and `assert_ne!(w1, w2)`.
|
||||
When you create two interned structs with the same field values, you are guaranteed to get back the same integer id. So here, we know that `assert_eq!(w1, w3)` is true and `assert_ne!(w1, w2)`.
|
||||
|
||||
You can access the fields of an interned struct using a getter, like `word.text(db)`. These getters respect the `#[return_ref]` annotation. Like tracked structs, the fields of interned structs are immutable.
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ Then open a PR with a subject line that starts with "RFC:".
|
|||
|
||||
## RFC vs Implementation
|
||||
|
||||
The RFC can be in its own PR, or it can also includ work on the implementation
|
||||
The RFC can be in its own PR, or it can also include work on the implementation
|
||||
together, whatever works best for you.
|
||||
|
||||
## Does my change need an RFC?
|
||||
|
|
|
@ -40,7 +40,7 @@ See:
|
|||
## Cancellation
|
||||
|
||||
Queries that are no longer needed due to concurrent writes or changes in dependencies are cancelled
|
||||
by Salsa. Each accesss of an intermediate query is a potential cancellation point. cancellation is
|
||||
by Salsa. Each access of an intermediate query is a potential cancellation point. Cancellation is
|
||||
implemented via panicking, and Salsa internals are intended to be panic-safe.
|
||||
|
||||
If you have a query that contains a long loop which does not execute any intermediate queries,
|
||||
|
|
|
@ -28,7 +28,7 @@ In addition to the struct itself, we must add an impl of `salsa::Database`:
|
|||
{{#include ../../../calc-example/calc/src/db.rs:db_impl}}
|
||||
```
|
||||
|
||||
## Impementing the `salsa::ParallelDatabase` trait
|
||||
## Implementing the `salsa::ParallelDatabase` trait
|
||||
|
||||
If you want to permit accessing your database from multiple threads at once, then you also need to implement the `ParallelDatabase` trait:
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ fn tracked_fn(args: Args, item_fn: syn::ItemFn) -> syn::Result<TokenStream> {
|
|||
if requires_interning(&item_fn) {
|
||||
return Err(syn::Error::new(
|
||||
s.span(),
|
||||
"tracked functon takes too many argments to have its value set with `specify`",
|
||||
"tracked function takes too many arguments to have its value set with `specify`",
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -606,7 +606,7 @@ fn accumulated_fn(
|
|||
/// * the name of the database argument
|
||||
/// * the name(s) of the key arguments
|
||||
fn fn_args(item_fn: &syn::ItemFn) -> syn::Result<(proc_macro2::Ident, Vec<proc_macro2::Ident>)> {
|
||||
// Check that we have no receiver and that all argments have names
|
||||
// Check that we have no receiver and that all arguments have names
|
||||
if item_fn.sig.inputs.is_empty() {
|
||||
return Err(syn::Error::new(
|
||||
item_fn.sig.span(),
|
||||
|
|
|
@ -84,7 +84,7 @@ pub trait Configuration {
|
|||
type SalsaStruct: for<'db> SalsaStructInDb<DynDb<'db, Self>>;
|
||||
|
||||
/// What key is used to index the memo. Typically a salsa struct id,
|
||||
/// but if this memoized function has multiple argments it will be a `salsa::Id`
|
||||
/// but if this memoized function has multiple arguments it will be a `salsa::Id`
|
||||
/// that results from interning those arguments.
|
||||
type Key: AsId;
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ where
|
|||
|
||||
/// Specify the value for `key` but do not record it is an output.
|
||||
/// This is used for the value fields declared on a tracked struct.
|
||||
/// They are different from other calls to specify beacuse we KNOW they will be given a value by construction,
|
||||
/// They are different from other calls to specify because we KNOW they will be given a value by construction,
|
||||
/// so recording them as an explicit output (and checking them for validity, etc) is pure overhead.
|
||||
pub fn specify_field<'db>(&self, db: &'db DynDb<'db, C>, key: C::Key, value: C::Value)
|
||||
where
|
||||
|
|
|
@ -159,7 +159,7 @@ impl Runtime {
|
|||
/// entity table with the index `entity_index`. Has the following effects:
|
||||
///
|
||||
/// * Add a query read on `DatabaseKeyIndex::for_table(entity_index)`
|
||||
/// * Indentify a unique disambiguator for the hash within the current query,
|
||||
/// * Identify a unique disambiguator for the hash within the current query,
|
||||
/// adding the hash to the current query's disambiguator table.
|
||||
/// * Return that hash + id of the current query.
|
||||
pub(crate) fn disambiguate_entity(
|
||||
|
|
|
@ -136,7 +136,7 @@ where
|
|||
return;
|
||||
}
|
||||
|
||||
// Otherwise, wait until some other storage entites have dropped.
|
||||
// Otherwise, wait until some other storage entities have dropped.
|
||||
// We create a mutex here because the cvar api requires it, but we
|
||||
// don't really need one as the data being protected is actually
|
||||
// the jars above.
|
||||
|
|
|
@ -463,12 +463,12 @@ pub trait Query: Debug + Default + Sized + for<'d> QueryDb<'d> {
|
|||
/// Name of the query method (e.g., `foo`)
|
||||
const QUERY_NAME: &'static str;
|
||||
|
||||
/// Extact storage for this query from the storage for its group.
|
||||
/// Exact storage for this query from the storage for its group.
|
||||
fn query_storage<'a>(
|
||||
group_storage: &'a <Self as QueryDb<'_>>::GroupStorage,
|
||||
) -> &'a Arc<Self::Storage>;
|
||||
|
||||
/// Extact storage for this query from the storage for its group.
|
||||
/// Exact storage for this query from the storage for its group.
|
||||
fn query_storage_mut<'a>(
|
||||
group_storage: &'a <Self as QueryDb<'_>>::GroupStorage,
|
||||
) -> &'a Arc<Self::Storage>;
|
||||
|
|
Loading…
Reference in a new issue