|
@@ -1,567 +1,3 @@
|
|
|
-#![doc = include_str!("../README.md")]
|
|
|
-
|
|
|
pub mod entity;
|
|
|
-mod error;
|
|
|
-mod meta;
|
|
|
-pub mod model;
|
|
|
-pub mod query;
|
|
|
pub mod schema;
|
|
|
-
|
|
|
-use entity::Entity;
|
|
|
-use meta::Metaschema;
|
|
|
-
|
|
|
-pub use microrm_macros::{make_index, Entity, Modelable};
|
|
|
-
|
|
|
-pub use error::Error;
|
|
|
-pub use query::{build::CompareOp, QueryInterface, WithID};
|
|
|
-pub use schema::Schema;
|
|
|
-
|
|
|
-pub mod prelude {
|
|
|
- pub use crate::query::{Filterable, Resolvable, Settable};
|
|
|
-}
|
|
|
-
|
|
|
-use prelude::*;
|
|
|
-
|
|
|
-#[macro_export]
|
|
|
-macro_rules! value_list {
|
|
|
- ( $( $element:expr ),* ) => {
|
|
|
- [ $( &($element) as &dyn $crate::model::Modelable ),* ]
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-// no need to show the re-exports in the documentation
|
|
|
-#[doc(hidden)]
|
|
|
-pub mod re_export {
|
|
|
- pub use lazy_static;
|
|
|
- pub use serde;
|
|
|
- pub use serde_json;
|
|
|
- pub use sqlite;
|
|
|
-}
|
|
|
-
|
|
|
-#[derive(Debug)]
|
|
|
-pub enum DBError {
|
|
|
- ConnectFailure,
|
|
|
- EarlyFailure(sqlite::Error),
|
|
|
- NoSchema,
|
|
|
- DifferentSchema,
|
|
|
- DropFailure,
|
|
|
- CreateFailure,
|
|
|
- SanityCheckFailure,
|
|
|
- InternalFailure(crate::Error),
|
|
|
-}
|
|
|
-
|
|
|
-impl From<crate::Error> for DBError {
|
|
|
- fn from(err: crate::Error) -> Self {
|
|
|
- Self::InternalFailure(err)
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-#[derive(PartialEq, Debug)]
|
|
|
-pub enum CreateMode {
|
|
|
- /// The database must exist and have a valid schema already
|
|
|
- MustExist,
|
|
|
- /// It's fine if the database doesn't exist, but it must have a valid schema if it does
|
|
|
- AllowNewDatabase,
|
|
|
- /// Nuke the contents if need be, just get the database
|
|
|
- AllowSchemaUpdate,
|
|
|
-}
|
|
|
-
|
|
|
-impl std::fmt::Display for DBError {
|
|
|
- fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
|
|
|
- fmt.write_fmt(format_args!("Database error: {:?}", self))
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-impl std::error::Error for DBError {}
|
|
|
-
|
|
|
-/// SQLite database connection
|
|
|
-pub struct DB {
|
|
|
- conn: sqlite::Connection,
|
|
|
- schema_hash: String,
|
|
|
- schema: schema::Schema,
|
|
|
-}
|
|
|
-
|
|
|
-impl DB {
|
|
|
- pub fn new(schema: schema::Schema, path: &str, mode: CreateMode) -> Result<Self, DBError> {
|
|
|
- Self::from_connection(
|
|
|
- sqlite::Connection::open(path).map_err(|_| DBError::ConnectFailure)?,
|
|
|
- schema,
|
|
|
- mode,
|
|
|
- )
|
|
|
- }
|
|
|
-
|
|
|
- /// Mostly for use in tests, but may be useful in some applications as well.
|
|
|
- pub fn new_in_memory(schema: schema::Schema) -> Result<Self, DBError> {
|
|
|
- Self::from_connection(
|
|
|
- sqlite::Connection::open(":memory:").map_err(|_| DBError::ConnectFailure)?,
|
|
|
- schema,
|
|
|
- CreateMode::AllowNewDatabase,
|
|
|
- )
|
|
|
- }
|
|
|
-
|
|
|
- /// Get a query interface for this DB connection
|
|
|
- pub fn query_interface(&self) -> query::QueryInterface {
|
|
|
- query::QueryInterface::new(self)
|
|
|
- }
|
|
|
-
|
|
|
- pub fn recreate_schema(&self) -> Result<(), DBError> {
|
|
|
- self.create_schema()
|
|
|
- }
|
|
|
-
|
|
|
- fn from_connection(
|
|
|
- conn: sqlite::Connection,
|
|
|
- schema: schema::Schema,
|
|
|
- mode: CreateMode,
|
|
|
- ) -> Result<Self, DBError> {
|
|
|
- let sig = Self::calculate_schema_hash(&schema);
|
|
|
- let ret = Self {
|
|
|
- conn,
|
|
|
- schema_hash: sig,
|
|
|
- schema: schema.add::<meta::Metaschema>(),
|
|
|
- };
|
|
|
- ret.check_schema(mode)?;
|
|
|
- Ok(ret)
|
|
|
- }
|
|
|
-
|
|
|
- fn calculate_schema_hash(schema: &schema::Schema) -> String {
|
|
|
- use sha2::Digest;
|
|
|
-
|
|
|
- let mut hasher = sha2::Sha256::new();
|
|
|
- schema
|
|
|
- .drop()
|
|
|
- .iter()
|
|
|
- .map(|sql| hasher.update(sql.as_bytes()))
|
|
|
- .count();
|
|
|
- schema
|
|
|
- .create()
|
|
|
- .iter()
|
|
|
- .map(|sql| hasher.update(sql.as_bytes()))
|
|
|
- .count();
|
|
|
-
|
|
|
- base64::encode(hasher.finalize())
|
|
|
- }
|
|
|
-
|
|
|
- fn check_schema(&self, mode: CreateMode) -> Result<(), DBError> {
|
|
|
- let mut has_metaschema = false;
|
|
|
- self.conn
|
|
|
- .iterate(
|
|
|
- format!(
|
|
|
- "SELECT * FROM \"sqlite_master\" WHERE \"type\"='table' AND \"name\"='{}'",
|
|
|
- Metaschema::table_name()
|
|
|
- ),
|
|
|
- |_row| {
|
|
|
- has_metaschema = true;
|
|
|
- true
|
|
|
- },
|
|
|
- )
|
|
|
- .map_err(DBError::EarlyFailure)?;
|
|
|
-
|
|
|
- if !has_metaschema && mode != CreateMode::MustExist {
|
|
|
- return self.create_schema();
|
|
|
- } else if !has_metaschema && mode == CreateMode::MustExist {
|
|
|
- return Err(DBError::NoSchema);
|
|
|
- }
|
|
|
-
|
|
|
- let qi = query::QueryInterface::new(self);
|
|
|
- let hash: Option<WithID<Metaschema>> =
|
|
|
- qi.get().by(meta::Metaschema::Key, "schema_hash").one()?;
|
|
|
-
|
|
|
- if hash.is_none() {
|
|
|
- if mode == CreateMode::MustExist {
|
|
|
- return Err(DBError::NoSchema);
|
|
|
- }
|
|
|
- return self.create_schema();
|
|
|
- } else if hash.unwrap().value != self.schema_hash {
|
|
|
- if mode != CreateMode::AllowSchemaUpdate {
|
|
|
- return Err(DBError::DifferentSchema);
|
|
|
- }
|
|
|
- self.drop_schema()?;
|
|
|
- return self.create_schema();
|
|
|
- }
|
|
|
-
|
|
|
- Ok(())
|
|
|
- }
|
|
|
-
|
|
|
- fn drop_schema(&self) -> Result<(), DBError> {
|
|
|
- for ds in self.schema.drop() {
|
|
|
- self.conn.execute(ds).map_err(|_| DBError::DropFailure)?;
|
|
|
- }
|
|
|
- Ok(())
|
|
|
- }
|
|
|
-
|
|
|
- fn create_schema(&self) -> Result<(), DBError> {
|
|
|
- for cs in self.schema.create() {
|
|
|
- self.conn.execute(cs).map_err(|_| DBError::CreateFailure)?;
|
|
|
- }
|
|
|
-
|
|
|
- let qi = query::QueryInterface::new(self);
|
|
|
-
|
|
|
- let add_result = qi.add(&meta::Metaschema {
|
|
|
- key: "schema_hash".to_string(),
|
|
|
- value: self.schema_hash.clone(),
|
|
|
- });
|
|
|
-
|
|
|
- assert!(add_result.is_ok());
|
|
|
-
|
|
|
- let sanity_check = qi.get().by(meta::Metaschema::Key, "schema_hash").one();
|
|
|
- assert!(sanity_check.is_ok() && sanity_check.as_ref().unwrap().is_some());
|
|
|
- assert_eq!(sanity_check.unwrap().unwrap().value, self.schema_hash);
|
|
|
-
|
|
|
- Ok(())
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/// Add support for multi-threading to a `DB`.
|
|
|
-///
|
|
|
-/// This is a thread-local cache that carefully maintains the property that no
|
|
|
-/// element of the cache will ever be accessed in any way from another thread. The only
|
|
|
-/// way to maintain this property is to leak all data, so this is best used
|
|
|
-/// in lightly-threaded programs (or at least a context where threads are long-lived).
|
|
|
-/// All cached values are assumed to use interior mutability where needed to maintain state.
|
|
|
-///
|
|
|
-/// This approach ensures that all items can live for the provided lifetime `'l`.
|
|
|
-pub struct DBPool<'a> {
|
|
|
- // normally DB is not Send because the raw sqlite ptr is not Send
|
|
|
- // however we assume sqlite is operating in serialized mode, which means
|
|
|
- // that it is in fact both `Send` and `Sync`
|
|
|
- db: &'a DB,
|
|
|
- // we carefully maintain the invariant here that only the thread with the given `ThreadId`
|
|
|
- // accesses the QueryInterface part of the pair, which means that despite the fact that
|
|
|
- // QueryInterface is neither Send nor Sync can be dismissed in this Send and Sync container
|
|
|
- qi: std::sync::RwLock<Vec<(std::thread::ThreadId, &'a QueryInterface<'a>)>>,
|
|
|
-}
|
|
|
-
|
|
|
-impl<'a> DBPool<'a> {
|
|
|
- pub fn new(db: &'a DB) -> Self {
|
|
|
- Self {
|
|
|
- db,
|
|
|
- qi: std::sync::RwLock::new(Vec::new()),
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /// Get a query interface from this DB pool for the current thread
|
|
|
- pub fn query_interface(&self) -> &query::QueryInterface<'a> {
|
|
|
- let guard = self.qi.read().expect("Couldn't acquire read lock");
|
|
|
- let current_id = std::thread::current().id();
|
|
|
- if let Some(res) = guard
|
|
|
- .iter()
|
|
|
- .find_map(|x| if x.0 == current_id { Some(x.1) } else { None })
|
|
|
- {
|
|
|
- return res;
|
|
|
- }
|
|
|
-
|
|
|
- drop(guard);
|
|
|
- let mut guard = self.qi.write().expect("Couldn't acquire write lock");
|
|
|
- guard.push((current_id, Box::leak(Box::new(self.db.query_interface()))));
|
|
|
- drop(guard);
|
|
|
-
|
|
|
- self.query_interface()
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/// We carefully implement `DBPool` so that it is `Send`.
|
|
|
-unsafe impl<'a> Send for DBPool<'a> {}
|
|
|
-/// We carefully implement `DBPool` so that it is `Sync`.
|
|
|
-unsafe impl<'a> Sync for DBPool<'a> {}
|
|
|
-
|
|
|
-#[cfg(test)]
|
|
|
-mod pool_test {
|
|
|
- trait IsSend: Send {}
|
|
|
- impl IsSend for super::DB {}
|
|
|
- impl<'a> IsSend for super::DBPool<'a> {}
|
|
|
- // we make sure that DBPool is send / sync safe
|
|
|
- trait IsSendAndSync: Send + Sync {}
|
|
|
- impl<'a> IsSendAndSync for super::DBPool<'a> {}
|
|
|
-}
|
|
|
-
|
|
|
-#[cfg(test)]
|
|
|
-mod test_support {
|
|
|
- // use crate::prelude::*;
|
|
|
-
|
|
|
- #[derive(Debug, crate::Entity, serde::Serialize, serde::Deserialize)]
|
|
|
- #[microrm_internal]
|
|
|
- pub struct KVStore {
|
|
|
- pub key: String,
|
|
|
- pub value: String,
|
|
|
- }
|
|
|
-
|
|
|
- // pub const SCHEMA : crate::Schema = crate::Schema::new().entity::<KVStore>();
|
|
|
-
|
|
|
- pub fn random_filename() -> std::path::PathBuf {
|
|
|
- use rand::prelude::Distribution;
|
|
|
- let dist = rand::distributions::Uniform::new('a', 'z');
|
|
|
- let mut db_filename = std::env::temp_dir();
|
|
|
- let mut rng = rand::thread_rng();
|
|
|
- db_filename.push(format!("microrm-{}.db", (0..16).map(|_| dist.sample(&mut rng)).collect::<String>()));
|
|
|
- db_filename
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-#[cfg(test)]
|
|
|
-mod test {
|
|
|
- use crate::prelude::*;
|
|
|
-
|
|
|
- use super::DB;
|
|
|
-
|
|
|
- #[derive(serde::Serialize, serde::Deserialize, crate::Entity)]
|
|
|
- #[microrm_internal]
|
|
|
- pub struct S1 {
|
|
|
- an_id: i32,
|
|
|
- }
|
|
|
-
|
|
|
- fn simple_schema() -> crate::Schema {
|
|
|
- crate::Schema::new().add::<S1>()
|
|
|
- }
|
|
|
-
|
|
|
- #[test]
|
|
|
- fn in_memory_schema() {
|
|
|
- let _db = DB::new_in_memory(simple_schema());
|
|
|
- drop(_db);
|
|
|
- }
|
|
|
-
|
|
|
- #[derive(serde::Serialize, serde::Deserialize, crate::Entity)]
|
|
|
- #[microrm_internal]
|
|
|
- pub struct S2 {
|
|
|
- #[microrm_foreign]
|
|
|
- parent_id: S1ID,
|
|
|
- }
|
|
|
-
|
|
|
- #[test]
|
|
|
- fn simple_foreign_key() {
|
|
|
- let db = DB::new_in_memory(crate::Schema::new().add::<S1>().add::<S2>())
|
|
|
- .expect("Can't connect to in-memory DB");
|
|
|
- let qi = db.query_interface();
|
|
|
-
|
|
|
- let id = qi.add(&S1 { an_id: -1 }).expect("Can't add S1");
|
|
|
- let child_id = qi.add(&S2 { parent_id: id }).expect("Can't add S2");
|
|
|
-
|
|
|
- qi.get()
|
|
|
- .by(S2::ID, &child_id)
|
|
|
- .one()
|
|
|
- .expect("Can't get S2 instance");
|
|
|
- }
|
|
|
-
|
|
|
- microrm_macros::make_index_internal!(S2ParentIndex, S2::ParentId);
|
|
|
-}
|
|
|
-
|
|
|
-#[cfg(test)]
|
|
|
-mod test2 {
|
|
|
- use crate::prelude::*;
|
|
|
-
|
|
|
- #[derive(Debug, crate::Entity, serde::Serialize, serde::Deserialize)]
|
|
|
- #[microrm_internal]
|
|
|
- pub struct KVStore {
|
|
|
- pub key: String,
|
|
|
- pub value: String,
|
|
|
- }
|
|
|
-
|
|
|
- // the !KVStoreIndex here means a type representing a unique index named KVStoreIndex
|
|
|
- microrm_macros::make_index_internal!(!KVStoreIndex, KVStore::Key);
|
|
|
-
|
|
|
- #[test]
|
|
|
- fn dump_test() {
|
|
|
- let schema = crate::Schema::new()
|
|
|
- .add::<KVStore>()
|
|
|
- .index::<KVStoreIndex>();
|
|
|
-
|
|
|
- // dump the schema in case you want to inspect it manually
|
|
|
- for create_sql in schema.create() {
|
|
|
- println!("{};", create_sql);
|
|
|
- }
|
|
|
-
|
|
|
- let db = crate::DB::new_in_memory(schema).unwrap();
|
|
|
- let qi = db.query_interface();
|
|
|
-
|
|
|
- qi.add(&KVStore {
|
|
|
- key: "a_key".to_string(),
|
|
|
- value: "a_value".to_string(),
|
|
|
- })
|
|
|
- .unwrap();
|
|
|
-
|
|
|
- // because KVStoreIndex indexes key, this is a logarithmic lookup
|
|
|
- let qr = qi.get().by(KVStore::Key, "a_key").one();
|
|
|
-
|
|
|
- assert_eq!(qr.is_ok(), true);
|
|
|
- assert_eq!(qr.as_ref().unwrap().is_some(), true);
|
|
|
- assert_eq!(qr.as_ref().unwrap().as_ref().unwrap().key, "a_key");
|
|
|
- assert_eq!(qr.as_ref().unwrap().as_ref().unwrap().value, "a_value");
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-#[cfg(test)]
|
|
|
-mod delete_test {
|
|
|
- use crate::prelude::*;
|
|
|
-
|
|
|
- #[derive(Debug, crate::Entity, serde::Serialize, serde::Deserialize)]
|
|
|
- #[microrm_internal]
|
|
|
- pub struct KVStore {
|
|
|
- pub key: String,
|
|
|
- pub value: String,
|
|
|
- }
|
|
|
-
|
|
|
- #[test]
|
|
|
- fn delete_test() {
|
|
|
- let schema = crate::Schema::new().entity::<KVStore>();
|
|
|
-
|
|
|
- let db = crate::DB::new_in_memory(schema).unwrap();
|
|
|
- let qi = db.query_interface();
|
|
|
-
|
|
|
- qi.add(&KVStore {
|
|
|
- key: "a".to_string(),
|
|
|
- value: "a_value".to_string(),
|
|
|
- })
|
|
|
- .unwrap();
|
|
|
-
|
|
|
- let insert_two = || {
|
|
|
- qi.add(&KVStore {
|
|
|
- key: "a".to_string(),
|
|
|
- value: "a_value".to_string(),
|
|
|
- })
|
|
|
- .unwrap();
|
|
|
-
|
|
|
- qi.add(&KVStore {
|
|
|
- key: "a".to_string(),
|
|
|
- value: "another_value".to_string(),
|
|
|
- })
|
|
|
- .unwrap();
|
|
|
- };
|
|
|
-
|
|
|
- assert!(qi.get().by(KVStore::Key, "a").one().is_ok());
|
|
|
- assert!(qi.delete().by(KVStore::Key, "a").exec().is_ok());
|
|
|
- assert!(qi.get().by(KVStore::Key, "a").one().unwrap().is_none());
|
|
|
-
|
|
|
- insert_two();
|
|
|
-
|
|
|
- let all = qi.get().by(KVStore::Key, "a").all();
|
|
|
- assert!(all.is_ok());
|
|
|
- assert_eq!(all.unwrap().len(), 2);
|
|
|
-
|
|
|
- assert!(qi.delete().by(KVStore::Key, "b").exec().is_ok());
|
|
|
-
|
|
|
- let all = qi.get().by(KVStore::Key, "a").all();
|
|
|
- assert!(all.is_ok());
|
|
|
- assert_eq!(all.unwrap().len(), 2);
|
|
|
-
|
|
|
- assert!(qi
|
|
|
- .delete()
|
|
|
- .by(KVStore::Key, &"a")
|
|
|
- .by(KVStore::Value, &"another_value")
|
|
|
- .exec()
|
|
|
- .is_ok());
|
|
|
-
|
|
|
- let one = qi.get().by(KVStore::Key, "a").one().unwrap();
|
|
|
- assert!(one.is_some());
|
|
|
- assert_eq!(one.unwrap().value, "a_value");
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-#[cfg(test)]
|
|
|
-mod datatypes {
|
|
|
- use crate::prelude::*;
|
|
|
-
|
|
|
- #[derive(crate::Entity,serde::Serialize,serde::Deserialize,PartialEq,Debug)]
|
|
|
- #[microrm_internal]
|
|
|
- pub struct ValueStore {
|
|
|
- pub b: bool,
|
|
|
- pub i_8: i8,
|
|
|
- pub u_8: u8,
|
|
|
- pub i_16: i16,
|
|
|
- pub u_16: u16,
|
|
|
- pub i_32: i32,
|
|
|
- pub u_32: u32,
|
|
|
- pub i_64: i64,
|
|
|
- pub u_64: u64,
|
|
|
- pub s: String,
|
|
|
- pub f_64: f64,
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
- #[test]
|
|
|
- fn store_load_datatypes() {
|
|
|
- let schema = crate::Schema::new().entity::<ValueStore>();
|
|
|
- let db = crate::DB::new_in_memory(schema).unwrap();
|
|
|
-
|
|
|
- let test_values = ValueStore {
|
|
|
- b: false,
|
|
|
- i_8: 42i8,
|
|
|
- u_8: 142u8,
|
|
|
- i_16: 320i16,
|
|
|
- u_16: 20000u16,
|
|
|
- i_32: 1i32 << 20,
|
|
|
- u_32: 3u32 << 30,
|
|
|
- i_64: 1i64 << 40,
|
|
|
- u_64: 3u64 << 62,
|
|
|
- s: "this is a test".to_string(),
|
|
|
- // e**pi
|
|
|
- f_64: 23.140692632779263f64
|
|
|
- };
|
|
|
-
|
|
|
- let id = db.query_interface().add(&test_values).expect("failed to add ValueStore");
|
|
|
-
|
|
|
- let all = db.query_interface().get().by_id(&id).all().expect("failed to get by id");
|
|
|
- assert_eq!(all.len(), 1);
|
|
|
- assert_eq!(all[0].as_ref(), &test_values);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-#[cfg(test)]
|
|
|
-mod disk_tests {
|
|
|
-
|
|
|
- use crate::prelude::*;
|
|
|
-
|
|
|
- #[test]
|
|
|
- fn store_and_load_kv() {
|
|
|
- let path = crate::test_support::random_filename();
|
|
|
- let path_str = path.clone().into_os_string().into_string().unwrap();
|
|
|
-
|
|
|
- {
|
|
|
- let schema = crate::Schema::new().entity::<crate::test_support::KVStore>();
|
|
|
- let db = crate::DB::new(schema, &path_str, crate::CreateMode::AllowNewDatabase).unwrap();
|
|
|
-
|
|
|
- db.query_interface().add(&crate::test_support::KVStore {
|
|
|
- key: "key".into(),
|
|
|
- value: "val".into()
|
|
|
- }).expect("couldn't add");
|
|
|
- }
|
|
|
-
|
|
|
- {
|
|
|
- let schema = crate::Schema::new().entity::<crate::test_support::KVStore>();
|
|
|
- let db = crate::DB::new(schema, &path_str, crate::CreateMode::MustExist).unwrap();
|
|
|
-
|
|
|
- let all = db.query_interface().get::<crate::test_support::KVStore>().all().expect("couldn't get all kv");
|
|
|
-
|
|
|
- assert_eq!(all.len(), 1);
|
|
|
- }
|
|
|
-
|
|
|
- std::fs::remove_file(path).expect("Couldn't remove temporary file!");
|
|
|
- }
|
|
|
-
|
|
|
- #[test]
|
|
|
- fn store_and_load_kv_dbp() {
|
|
|
- let path = crate::test_support::random_filename();
|
|
|
- let path_str = path.clone().into_os_string().into_string().unwrap();
|
|
|
-
|
|
|
- {
|
|
|
- let schema = crate::Schema::new().entity::<crate::test_support::KVStore>();
|
|
|
- let db = crate::DB::new(schema, &path_str, crate::CreateMode::AllowNewDatabase).unwrap();
|
|
|
- let dbp = crate::DBPool::new(&db);
|
|
|
-
|
|
|
- dbp.query_interface().add(&crate::test_support::KVStore {
|
|
|
- key: "key".into(),
|
|
|
- value: "val".into()
|
|
|
- }).expect("couldn't add");
|
|
|
- }
|
|
|
-
|
|
|
- {
|
|
|
- let schema = crate::Schema::new().entity::<crate::test_support::KVStore>();
|
|
|
- let db = crate::DB::new(schema, &path_str, crate::CreateMode::MustExist).unwrap();
|
|
|
- let dbp = crate::DBPool::new(&db);
|
|
|
-
|
|
|
- let all = dbp.query_interface().get::<crate::test_support::KVStore>().all().expect("couldn't get all kv");
|
|
|
-
|
|
|
- assert_eq!(all.len(), 1);
|
|
|
- }
|
|
|
-
|
|
|
- std::fs::remove_file(path).expect("Couldn't remove temporary file!");
|
|
|
- }
|
|
|
-}
|
|
|
+pub mod db;
|