added redb and moved workspaces to use db. using tags instead of environment for workspace

This commit is contained in:
xyroscar
2025-11-26 16:38:11 -08:00
parent ce75694ffb
commit 0d23ffcaec
16 changed files with 1430 additions and 228 deletions

View File

@@ -0,0 +1,130 @@
//! Database initialization and management
use std::path::PathBuf;
use std::sync::Arc;
use directories::ProjectDirs;
use redb::Database as RedbDatabase;
use super::error::{DbError, DbResult};
use super::tables::*;
/// Main database wrapper
pub struct Database {
db: Arc<RedbDatabase>,
}
impl Database {
/// Create or open the database at the default application data directory
pub fn open() -> DbResult<Self> {
let path = Self::get_db_path()?;
// Ensure parent directory exists
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)?;
}
let db = RedbDatabase::create(&path)?;
let database = Self { db: Arc::new(db) };
// Initialize tables
database.init_tables()?;
Ok(database)
}
/// Open database at a specific path (useful for testing)
#[allow(dead_code)]
pub fn open_at(path: PathBuf) -> DbResult<Self> {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)?;
}
let db = RedbDatabase::create(&path)?;
let database = Self { db: Arc::new(db) };
database.init_tables()?;
Ok(database)
}
/// Get the default database path
fn get_db_path() -> DbResult<PathBuf> {
let proj_dirs = ProjectDirs::from("com", "xyroscar", "resona")
.ok_or_else(|| DbError::Io(std::io::Error::new(
std::io::ErrorKind::NotFound,
"Could not determine application data directory",
)))?;
Ok(proj_dirs.data_dir().join("resona.redb"))
}
/// Initialize all tables
fn init_tables(&self) -> DbResult<()> {
let write_txn = self.db.begin_write()?;
// Create main tables
write_txn.open_table(WORKSPACES)?;
write_txn.open_table(WORKSPACE_SYNC_GROUPS)?;
write_txn.open_table(COLLECTIONS)?;
write_txn.open_table(REQUESTS)?;
write_txn.open_table(VARIABLES)?;
write_txn.open_table(APP_SETTINGS)?;
// Create index tables
write_txn.open_table(COLLECTIONS_BY_WORKSPACE)?;
write_txn.open_table(REQUESTS_BY_COLLECTION)?;
write_txn.open_table(REQUESTS_BY_WORKSPACE)?;
write_txn.open_table(VARIABLES_BY_SCOPE)?;
write_txn.open_table(WORKSPACES_BY_SYNC_GROUP)?;
write_txn.commit()?;
Ok(())
}
/// Get a reference to the underlying redb database
#[allow(dead_code)]
pub fn inner(&self) -> &RedbDatabase {
&self.db
}
/// Begin a read transaction
pub fn begin_read(&self) -> DbResult<redb::ReadTransaction> {
Ok(self.db.begin_read()?)
}
/// Begin a write transaction
pub fn begin_write(&self) -> DbResult<redb::WriteTransaction> {
Ok(self.db.begin_write()?)
}
}
impl Clone for Database {
fn clone(&self) -> Self {
Self {
db: Arc::clone(&self.db),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::env::temp_dir;
#[test]
fn test_database_creation() {
let path = temp_dir().join("resona_test.redb");
let _ = std::fs::remove_file(&path); // Clean up any previous test
let db = Database::open_at(path.clone()).expect("Failed to create database");
// Verify tables exist by attempting to read from them
let read_txn = db.begin_read().expect("Failed to begin read transaction");
let _ = read_txn.open_table(WORKSPACES).expect("Workspaces table should exist");
// Clean up
drop(db);
let _ = std::fs::remove_file(&path);
}
}

39
src-tauri/src/db/error.rs Normal file
View File

@@ -0,0 +1,39 @@
//! Database error types
use thiserror::Error;
#[derive(Error, Debug)]
pub enum DbError {
#[error("Database error: {0}")]
Database(#[from] redb::DatabaseError),
#[error("Storage error: {0}")]
Storage(#[from] redb::StorageError),
#[error("Table error: {0}")]
Table(#[from] redb::TableError),
#[error("Transaction error: {0}")]
Transaction(#[from] redb::TransactionError),
#[error("Commit error: {0}")]
Commit(#[from] redb::CommitError),
#[error("Not found: {0}")]
NotFound(String),
#[error("Serialization error: {0}")]
Serialization(String),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
}
pub type DbResult<T> = Result<T, DbError>;
// Implement conversion to tauri::Error for command returns
impl From<DbError> for String {
fn from(err: DbError) -> Self {
err.to_string()
}
}

11
src-tauri/src/db/mod.rs Normal file
View File

@@ -0,0 +1,11 @@
//! Database module for Resona
//!
//! This module handles all database operations using redb as the storage backend.
mod database;
mod error;
mod tables;
pub use database::Database;
pub use error::{DbError, DbResult};
pub use tables::*;

View File

@@ -0,0 +1,47 @@
//! Table definitions for redb
//!
//! All tables are defined here as constants for consistent access across the application.
use redb::TableDefinition;
/// Workspaces table: workspace_id -> workspace JSON
pub const WORKSPACES: TableDefinition<&str, &str> = TableDefinition::new("workspaces");
/// Workspace sync groups table: sync_group_id -> sync_group JSON
pub const WORKSPACE_SYNC_GROUPS: TableDefinition<&str, &str> =
TableDefinition::new("workspace_sync_groups");
/// Collections table: collection_id -> collection JSON
pub const COLLECTIONS: TableDefinition<&str, &str> = TableDefinition::new("collections");
/// Requests table: request_id -> request JSON
pub const REQUESTS: TableDefinition<&str, &str> = TableDefinition::new("requests");
/// Variables table: variable_id -> variable JSON
pub const VARIABLES: TableDefinition<&str, &str> = TableDefinition::new("variables");
/// App settings table: "settings" -> settings JSON (single row)
pub const APP_SETTINGS: TableDefinition<&str, &str> = TableDefinition::new("app_settings");
// Index tables for efficient lookups
/// Collections by workspace index: workspace_id -> collection_ids JSON array
pub const COLLECTIONS_BY_WORKSPACE: TableDefinition<&str, &str> =
TableDefinition::new("idx_collections_by_workspace");
/// Requests by collection index: collection_id -> request_ids JSON array
pub const REQUESTS_BY_COLLECTION: TableDefinition<&str, &str> =
TableDefinition::new("idx_requests_by_collection");
/// Requests by workspace (standalone) index: workspace_id -> request_ids JSON array
pub const REQUESTS_BY_WORKSPACE: TableDefinition<&str, &str> =
TableDefinition::new("idx_requests_by_workspace");
/// Variables by scope index: scope_key -> variable_ids JSON array
/// scope_key format: "global", "workspace:{id}", "collection:{id}", "request:{id}"
pub const VARIABLES_BY_SCOPE: TableDefinition<&str, &str> =
TableDefinition::new("idx_variables_by_scope");
/// Workspaces by sync group index: sync_group_id -> workspace_ids JSON array
pub const WORKSPACES_BY_SYNC_GROUP: TableDefinition<&str, &str> =
TableDefinition::new("idx_workspaces_by_sync_group");

View File

@@ -1,14 +1,44 @@
// Learn more about Tauri commands at https://tauri.app/develop/calling-rust/
#[tauri::command]
fn greet(name: &str) -> String {
format!("Hello, {}! You've been greeted from Rust!", name)
}
// Resona - API Client Application
mod db;
mod workspaces;
use db::Database;
// Re-export workspace commands for generate_handler macro
use workspaces::{
add_workspace_to_sync_group, create_sync_group, create_workspace, delete_sync_group,
delete_workspace, get_sync_group, get_sync_group_for_workspace, get_sync_groups,
get_workspace, get_workspaces, get_workspaces_by_sync_group, remove_workspace_from_sync_group,
update_sync_group, update_workspace,
};
#[cfg_attr(mobile, tauri::mobile_entry_point)]
pub fn run() {
// Initializing the database
let db = Database::open().expect("Failed to initialize database");
tauri::Builder::default()
.plugin(tauri_plugin_opener::init())
.invoke_handler(tauri::generate_handler![greet])
.manage(db)
.invoke_handler(tauri::generate_handler![
// Workspace commands
get_workspaces,
get_workspace,
create_workspace,
update_workspace,
delete_workspace,
// Sync group commands
get_sync_groups,
get_sync_group,
get_sync_group_for_workspace,
create_sync_group,
update_sync_group,
delete_sync_group,
get_workspaces_by_sync_group,
add_workspace_to_sync_group,
remove_workspace_from_sync_group,
])
.run(tauri::generate_context!())
.expect("error while running tauri application");
}

View File

@@ -0,0 +1,143 @@
//! Tauri commands for operations on workspace
use tauri::State;
use crate::db::Database;
use super::types::{
CreateSyncGroupInput, CreateWorkspaceInput, UpdateSyncGroupInput, UpdateWorkspaceInput,
Workspace, WorkspaceSyncGroup,
};
use super::workspace::WorkspaceService;
/// Get all workspaces
#[tauri::command]
pub fn get_workspaces(db: State<Database>) -> Result<Vec<Workspace>, String> {
let service = WorkspaceService::new(db.inner().clone());
service.get_all().map_err(|e| e.to_string())
}
/// Get a workspace by ID
#[tauri::command]
pub fn get_workspace(db: State<Database>, id: String) -> Result<Workspace, String> {
let service = WorkspaceService::new(db.inner().clone());
service.get(&id).map_err(|e| e.to_string())
}
/// Create a new workspace
#[tauri::command]
pub fn create_workspace(
db: State<Database>,
input: CreateWorkspaceInput,
) -> Result<Workspace, String> {
let service = WorkspaceService::new(db.inner().clone());
service.create(input).map_err(|e| e.to_string())
}
/// Update an existing workspace
#[tauri::command]
pub fn update_workspace(
db: State<Database>,
input: UpdateWorkspaceInput,
) -> Result<Workspace, String> {
let service = WorkspaceService::new(db.inner().clone());
service.update(input).map_err(|e| e.to_string())
}
/// Delete a workspace
#[tauri::command]
pub fn delete_workspace(db: State<Database>, id: String) -> Result<(), String> {
let service = WorkspaceService::new(db.inner().clone());
service.delete(&id).map_err(|e| e.to_string())
}
/// Get all sync groups
#[tauri::command]
pub fn get_sync_groups(db: State<Database>) -> Result<Vec<WorkspaceSyncGroup>, String> {
let service = WorkspaceService::new(db.inner().clone());
service.get_all_sync_groups().map_err(|e| e.to_string())
}
/// Get a sync group by ID
#[tauri::command]
pub fn get_sync_group(db: State<Database>, id: String) -> Result<WorkspaceSyncGroup, String> {
let service = WorkspaceService::new(db.inner().clone());
service.get_sync_group(&id).map_err(|e| e.to_string())
}
/// Get sync group for a workspace
#[tauri::command]
pub fn get_sync_group_for_workspace(
db: State<Database>,
workspace_id: String,
) -> Result<Option<WorkspaceSyncGroup>, String> {
let service = WorkspaceService::new(db.inner().clone());
service
.get_sync_group_for_workspace(&workspace_id)
.map_err(|e| e.to_string())
}
/// Create a new sync group
#[tauri::command]
pub fn create_sync_group(
db: State<Database>,
input: CreateSyncGroupInput,
) -> Result<WorkspaceSyncGroup, String> {
let service = WorkspaceService::new(db.inner().clone());
service.create_sync_group(input).map_err(|e| e.to_string())
}
/// Update an existing sync group
#[tauri::command]
pub fn update_sync_group(
db: State<Database>,
input: UpdateSyncGroupInput,
) -> Result<WorkspaceSyncGroup, String> {
let service = WorkspaceService::new(db.inner().clone());
service.update_sync_group(input).map_err(|e| e.to_string())
}
/// Delete a sync group
#[tauri::command]
pub fn delete_sync_group(db: State<Database>, id: String) -> Result<(), String> {
let service = WorkspaceService::new(db.inner().clone());
service.delete_sync_group(&id).map_err(|e| e.to_string())
}
/// Get workspaces by sync group
#[tauri::command]
pub fn get_workspaces_by_sync_group(
db: State<Database>,
sync_group_id: String,
) -> Result<Vec<Workspace>, String> {
let service = WorkspaceService::new(db.inner().clone());
service
.get_workspaces_by_sync_group(&sync_group_id)
.map_err(|e| e.to_string())
}
/// Add a workspace to a sync group
#[tauri::command]
pub fn add_workspace_to_sync_group(
db: State<Database>,
sync_group_id: String,
workspace_id: String,
) -> Result<(), String> {
let service = WorkspaceService::new(db.inner().clone());
service
.add_workspace_to_sync_group(&sync_group_id, &workspace_id)
.map_err(|e| e.to_string())
}
/// Remove a workspace from a sync group
#[tauri::command]
pub fn remove_workspace_from_sync_group(
db: State<Database>,
sync_group_id: String,
workspace_id: String,
) -> Result<(), String> {
let service = WorkspaceService::new(db.inner().clone());
service
.remove_workspace_from_sync_group(&sync_group_id, &workspace_id)
.map_err(|e| e.to_string())
}

View File

@@ -0,0 +1,21 @@
//! Workspaces module
//!
//! Handles workspace management including CRUD operations and sync groups.
mod commands;
mod types;
mod workspace;
// Re-export commands for use in lib.rs
pub use commands::*;
// Re-export types for external use (frontend bindings)
#[allow(unused_imports)]
pub use types::{
CreateSyncGroupInput, CreateWorkspaceInput, UpdateSyncGroupInput, UpdateWorkspaceInput,
Workspace, WorkspaceSyncGroup,
};
// WorkspaceService is used internally by commands
#[allow(unused_imports)]
pub(crate) use workspace::WorkspaceService;

View File

@@ -0,0 +1,97 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct Workspace {
pub id: String,
pub name: String,
pub description: String,
#[serde(default)]
pub tags: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub sync_group_id: Option<String>,
#[serde(default = "Utc::now")]
pub created_at: DateTime<Utc>,
#[serde(default = "Utc::now")]
pub updated_at: DateTime<Utc>,
}
impl Workspace {
pub fn new(name: String, description: String) -> Self {
let now = Utc::now();
Self {
id: Uuid::new_v4().to_string(),
name,
description,
tags: Vec::new(),
sync_group_id: None,
created_at: now,
updated_at: now,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateWorkspaceInput {
pub name: String,
pub description: String,
#[serde(default)]
pub tags: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpdateWorkspaceInput {
pub id: String,
pub name: Option<String>,
pub description: Option<String>,
pub tags: Option<Vec<String>>,
pub sync_group_id: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WorkspaceSyncGroup {
pub id: String,
pub name: String,
pub workspace_ids: Vec<String>,
pub synced_variable_names: Vec<String>,
pub sync_secrets: bool,
#[serde(default = "Utc::now")]
pub created_at: DateTime<Utc>,
#[serde(default = "Utc::now")]
pub updated_at: DateTime<Utc>,
}
impl WorkspaceSyncGroup {
pub fn new(name: String, workspace_ids: Vec<String>) -> Self {
let now = Utc::now();
Self {
id: Uuid::new_v4().to_string(),
name,
workspace_ids,
synced_variable_names: Vec::new(),
sync_secrets: false,
created_at: now,
updated_at: now,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateSyncGroupInput {
pub name: String,
pub workspace_ids: Vec<String>,
#[serde(default)]
pub synced_variable_names: Vec<String>,
#[serde(default)]
pub sync_secrets: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpdateSyncGroupInput {
pub id: String,
pub name: Option<String>,
pub synced_variable_names: Option<Vec<String>>,
pub sync_secrets: Option<bool>,
}

View File

@@ -0,0 +1,572 @@
use chrono::Utc;
use redb::ReadableTable;
use crate::db::{
Database, DbError, DbResult, WORKSPACES, WORKSPACE_SYNC_GROUPS, WORKSPACES_BY_SYNC_GROUP,
};
use super::types::{
CreateSyncGroupInput, CreateWorkspaceInput, UpdateSyncGroupInput, UpdateWorkspaceInput,
Workspace, WorkspaceSyncGroup,
};
pub struct WorkspaceService {
db: Database,
}
impl WorkspaceService {
pub fn new(db: Database) -> Self {
Self { db }
}
pub fn get_all(&self) -> DbResult<Vec<Workspace>> {
let read_txn = self.db.begin_read()?;
let table = read_txn.open_table(WORKSPACES)?;
let mut workspaces = Vec::new();
for entry in table.iter()? {
let (_, value) = entry?;
let workspace: Workspace = serde_json::from_str(value.value())
.map_err(|e| DbError::Serialization(e.to_string()))?;
workspaces.push(workspace);
}
workspaces.sort_by(|a, b| a.name.cmp(&b.name));
Ok(workspaces)
}
pub fn get(&self, id: &str) -> DbResult<Workspace> {
let read_txn = self.db.begin_read()?;
let table = read_txn.open_table(WORKSPACES)?;
let value = table
.get(id)?
.ok_or_else(|| DbError::NotFound(format!("Workspace not found: {}", id)))?;
let workspace: Workspace = serde_json::from_str(value.value())
.map_err(|e| DbError::Serialization(e.to_string()))?;
Ok(workspace)
}
pub fn create(&self, input: CreateWorkspaceInput) -> DbResult<Workspace> {
let mut workspace = Workspace::new(input.name, input.description);
workspace.tags = input.tags;
let json = serde_json::to_string(&workspace)
.map_err(|e| DbError::Serialization(e.to_string()))?;
let write_txn = self.db.begin_write()?;
{
let mut table = write_txn.open_table(WORKSPACES)?;
table.insert(workspace.id.as_str(), json.as_str())?;
}
write_txn.commit()?;
Ok(workspace)
}
pub fn update(&self, input: UpdateWorkspaceInput) -> DbResult<Workspace> {
let mut workspace = self.get(&input.id)?;
if let Some(name) = input.name {
workspace.name = name;
}
if let Some(description) = input.description {
workspace.description = description;
}
if let Some(tags) = input.tags {
workspace.tags = tags;
}
if let Some(sync_group_id) = input.sync_group_id {
workspace.sync_group_id = Some(sync_group_id);
}
workspace.updated_at = Utc::now();
// Write back
let json = serde_json::to_string(&workspace)
.map_err(|e| DbError::Serialization(e.to_string()))?;
let write_txn = self.db.begin_write()?;
{
let mut table = write_txn.open_table(WORKSPACES)?;
table.insert(workspace.id.as_str(), json.as_str())?;
}
write_txn.commit()?;
Ok(workspace)
}
/// Delete a workspace
pub fn delete(&self, id: &str) -> DbResult<()> {
// First get the workspace to check sync_group_id
let workspace = self.get(id)?;
let sync_group_id = workspace.sync_group_id.clone();
let write_txn = self.db.begin_write()?;
// Remove from workspaces table
{
let mut table = write_txn.open_table(WORKSPACES)?;
table.remove(id)?;
}
// Remove from sync group index if applicable
if let Some(group_id) = sync_group_id {
self.remove_from_sync_index(&write_txn, &group_id, id)?;
}
write_txn.commit()?;
Ok(())
}
/// Helper to remove a workspace ID from the sync group index
fn remove_from_sync_index(
&self,
write_txn: &redb::WriteTransaction,
group_id: &str,
workspace_id: &str,
) -> DbResult<()> {
let mut idx_table = write_txn.open_table(WORKSPACES_BY_SYNC_GROUP)?;
// Read current IDs
let ids_json = match idx_table.get(group_id)? {
Some(value) => value.value().to_string(),
None => return Ok(()),
};
let mut ids: Vec<String> = serde_json::from_str(&ids_json)
.map_err(|e| DbError::Serialization(e.to_string()))?;
ids.retain(|i| i != workspace_id);
let new_json = serde_json::to_string(&ids)
.map_err(|e| DbError::Serialization(e.to_string()))?;
idx_table.insert(group_id, new_json.as_str())?;
Ok(())
}
/// Helper to add a workspace ID to the sync group index
fn add_to_sync_index(
&self,
write_txn: &redb::WriteTransaction,
group_id: &str,
workspace_id: &str,
) -> DbResult<()> {
let mut idx_table = write_txn.open_table(WORKSPACES_BY_SYNC_GROUP)?;
// Read current IDs or start with empty
let ids_json = match idx_table.get(group_id)? {
Some(value) => value.value().to_string(),
None => "[]".to_string(),
};
let mut ids: Vec<String> = serde_json::from_str(&ids_json)
.map_err(|e| DbError::Serialization(e.to_string()))?;
if !ids.contains(&workspace_id.to_string()) {
ids.push(workspace_id.to_string());
}
let new_json = serde_json::to_string(&ids)
.map_err(|e| DbError::Serialization(e.to_string()))?;
idx_table.insert(group_id, new_json.as_str())?;
Ok(())
}
// ==================== Sync Group Operations ====================
/// Get all sync groups
pub fn get_all_sync_groups(&self) -> DbResult<Vec<WorkspaceSyncGroup>> {
let read_txn = self.db.begin_read()?;
let table = read_txn.open_table(WORKSPACE_SYNC_GROUPS)?;
let mut groups = Vec::new();
for entry in table.iter()? {
let (_, value) = entry?;
let group: WorkspaceSyncGroup = serde_json::from_str(value.value())
.map_err(|e| DbError::Serialization(e.to_string()))?;
groups.push(group);
}
Ok(groups)
}
/// Get a sync group by ID
pub fn get_sync_group(&self, id: &str) -> DbResult<WorkspaceSyncGroup> {
let read_txn = self.db.begin_read()?;
let table = read_txn.open_table(WORKSPACE_SYNC_GROUPS)?;
let value = table
.get(id)?
.ok_or_else(|| DbError::NotFound(format!("Sync group not found: {}", id)))?;
let group: WorkspaceSyncGroup = serde_json::from_str(value.value())
.map_err(|e| DbError::Serialization(e.to_string()))?;
Ok(group)
}
/// Get sync group for a workspace
pub fn get_sync_group_for_workspace(
&self,
workspace_id: &str,
) -> DbResult<Option<WorkspaceSyncGroup>> {
let workspace = self.get(workspace_id)?;
match workspace.sync_group_id {
Some(group_id) => Ok(Some(self.get_sync_group(&group_id)?)),
None => Ok(None),
}
}
/// Create a new sync group
pub fn create_sync_group(&self, input: CreateSyncGroupInput) -> DbResult<WorkspaceSyncGroup> {
let mut group = WorkspaceSyncGroup::new(input.name, input.workspace_ids.clone());
group.synced_variable_names = input.synced_variable_names;
group.sync_secrets = input.sync_secrets;
let json = serde_json::to_string(&group)
.map_err(|e| DbError::Serialization(e.to_string()))?;
let write_txn = self.db.begin_write()?;
// Insert sync group
{
let mut table = write_txn.open_table(WORKSPACE_SYNC_GROUPS)?;
table.insert(group.id.as_str(), json.as_str())?;
}
// Update index
{
let mut idx_table = write_txn.open_table(WORKSPACES_BY_SYNC_GROUP)?;
let ids_json = serde_json::to_string(&input.workspace_ids)
.map_err(|e| DbError::Serialization(e.to_string()))?;
idx_table.insert(group.id.as_str(), ids_json.as_str())?;
}
// Update each workspace's sync_group_id
{
let mut ws_table = write_txn.open_table(WORKSPACES)?;
for ws_id in &input.workspace_ids {
// Read workspace
let ws_json = match ws_table.get(ws_id.as_str())? {
Some(value) => value.value().to_string(),
None => continue,
};
let mut workspace: Workspace = serde_json::from_str(&ws_json)
.map_err(|e| DbError::Serialization(e.to_string()))?;
workspace.sync_group_id = Some(group.id.clone());
workspace.updated_at = Utc::now();
let new_ws_json = serde_json::to_string(&workspace)
.map_err(|e| DbError::Serialization(e.to_string()))?;
ws_table.insert(ws_id.as_str(), new_ws_json.as_str())?;
}
}
write_txn.commit()?;
Ok(group)
}
/// Update a sync group
pub fn update_sync_group(&self, input: UpdateSyncGroupInput) -> DbResult<WorkspaceSyncGroup> {
// Read existing
let mut group = self.get_sync_group(&input.id)?;
// Apply updates
if let Some(name) = input.name {
group.name = name;
}
if let Some(synced_variable_names) = input.synced_variable_names {
group.synced_variable_names = synced_variable_names;
}
if let Some(sync_secrets) = input.sync_secrets {
group.sync_secrets = sync_secrets;
}
group.updated_at = Utc::now();
// Write back
let json = serde_json::to_string(&group)
.map_err(|e| DbError::Serialization(e.to_string()))?;
let write_txn = self.db.begin_write()?;
{
let mut table = write_txn.open_table(WORKSPACE_SYNC_GROUPS)?;
table.insert(group.id.as_str(), json.as_str())?;
}
write_txn.commit()?;
Ok(group)
}
/// Delete a sync group
pub fn delete_sync_group(&self, id: &str) -> DbResult<()> {
// Get the sync group to find associated workspaces
let group = self.get_sync_group(id)?;
let workspace_ids = group.workspace_ids.clone();
let write_txn = self.db.begin_write()?;
// Remove sync_group_id from all associated workspaces
{
let mut ws_table = write_txn.open_table(WORKSPACES)?;
for ws_id in &workspace_ids {
let ws_json = match ws_table.get(ws_id.as_str())? {
Some(value) => value.value().to_string(),
None => continue,
};
let mut workspace: Workspace = serde_json::from_str(&ws_json)
.map_err(|e| DbError::Serialization(e.to_string()))?;
workspace.sync_group_id = None;
workspace.updated_at = Utc::now();
let new_ws_json = serde_json::to_string(&workspace)
.map_err(|e| DbError::Serialization(e.to_string()))?;
ws_table.insert(ws_id.as_str(), new_ws_json.as_str())?;
}
}
// Remove from sync groups table
{
let mut table = write_txn.open_table(WORKSPACE_SYNC_GROUPS)?;
table.remove(id)?;
}
// Remove from index
{
let mut idx_table = write_txn.open_table(WORKSPACES_BY_SYNC_GROUP)?;
idx_table.remove(id)?;
}
write_txn.commit()?;
Ok(())
}
/// Get workspaces by sync group
pub fn get_workspaces_by_sync_group(&self, sync_group_id: &str) -> DbResult<Vec<Workspace>> {
let read_txn = self.db.begin_read()?;
let idx_table = read_txn.open_table(WORKSPACES_BY_SYNC_GROUP)?;
let workspace_ids: Vec<String> = match idx_table.get(sync_group_id)? {
Some(value) => serde_json::from_str(value.value())
.map_err(|e| DbError::Serialization(e.to_string()))?,
None => return Ok(Vec::new()),
};
drop(idx_table);
drop(read_txn);
let mut workspaces = Vec::new();
for ws_id in workspace_ids {
if let Ok(workspace) = self.get(&ws_id) {
workspaces.push(workspace);
}
}
Ok(workspaces)
}
/// Add a workspace to a sync group
pub fn add_workspace_to_sync_group(
&self,
sync_group_id: &str,
workspace_id: &str,
) -> DbResult<()> {
// Read existing data
let mut group = self.get_sync_group(sync_group_id)?;
let mut workspace = self.get(workspace_id)?;
// Update in memory
if !group.workspace_ids.contains(&workspace_id.to_string()) {
group.workspace_ids.push(workspace_id.to_string());
group.updated_at = Utc::now();
}
workspace.sync_group_id = Some(sync_group_id.to_string());
workspace.updated_at = Utc::now();
// Serialize
let group_json = serde_json::to_string(&group)
.map_err(|e| DbError::Serialization(e.to_string()))?;
let ws_json = serde_json::to_string(&workspace)
.map_err(|e| DbError::Serialization(e.to_string()))?;
// Write all changes
let write_txn = self.db.begin_write()?;
{
let mut table = write_txn.open_table(WORKSPACE_SYNC_GROUPS)?;
table.insert(sync_group_id, group_json.as_str())?;
}
self.add_to_sync_index(&write_txn, sync_group_id, workspace_id)?;
{
let mut ws_table = write_txn.open_table(WORKSPACES)?;
ws_table.insert(workspace_id, ws_json.as_str())?;
}
write_txn.commit()?;
Ok(())
}
/// Remove a workspace from a sync group
pub fn remove_workspace_from_sync_group(
&self,
sync_group_id: &str,
workspace_id: &str,
) -> DbResult<()> {
// Read existing data
let mut group = self.get_sync_group(sync_group_id)?;
let mut workspace = self.get(workspace_id)?;
// Update in memory
group.workspace_ids.retain(|id| id != workspace_id);
group.updated_at = Utc::now();
workspace.sync_group_id = None;
workspace.updated_at = Utc::now();
// Serialize
let group_json = serde_json::to_string(&group)
.map_err(|e| DbError::Serialization(e.to_string()))?;
let ws_json = serde_json::to_string(&workspace)
.map_err(|e| DbError::Serialization(e.to_string()))?;
// Write all changes
let write_txn = self.db.begin_write()?;
{
let mut table = write_txn.open_table(WORKSPACE_SYNC_GROUPS)?;
table.insert(sync_group_id, group_json.as_str())?;
}
self.remove_from_sync_index(&write_txn, sync_group_id, workspace_id)?;
{
let mut ws_table = write_txn.open_table(WORKSPACES)?;
ws_table.insert(workspace_id, ws_json.as_str())?;
}
write_txn.commit()?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::env::temp_dir;
fn create_test_db() -> Database {
let path = temp_dir().join(format!("resona_test_{}.redb", uuid::Uuid::new_v4()));
Database::open_at(path).expect("Failed to create test database")
}
#[test]
fn test_workspace_crud() {
let db = create_test_db();
let service = WorkspaceService::new(db);
let workspace = service
.create(CreateWorkspaceInput {
name: "Test Workspace".to_string(),
description: "A test workspace".to_string(),
tags: vec!["Development".to_string()],
})
.expect("Failed to create workspace");
assert_eq!(workspace.name, "Test Workspace");
assert_eq!(workspace.tags, vec!["Development".to_string()]);
let fetched = service.get(&workspace.id).expect("Failed to get workspace");
assert_eq!(fetched.id, workspace.id);
let updated = service
.update(UpdateWorkspaceInput {
id: workspace.id.clone(),
name: Some("Updated Workspace".to_string()),
description: None,
tags: None,
sync_group_id: None,
})
.expect("Failed to update workspace");
assert_eq!(updated.name, "Updated Workspace");
assert_eq!(updated.description, "A test workspace");
let all = service.get_all().expect("Failed to get all workspaces");
assert_eq!(all.len(), 1);
service
.delete(&workspace.id)
.expect("Failed to delete workspace");
let all = service.get_all().expect("Failed to get all workspaces");
assert_eq!(all.len(), 0);
}
#[test]
fn test_sync_groups() {
let db = create_test_db();
let service = WorkspaceService::new(db);
let ws1 = service
.create(CreateWorkspaceInput {
name: "Workspace 1".to_string(),
description: "First workspace".to_string(),
tags: vec!["Development".to_string()],
})
.expect("Failed to create workspace 1");
let ws2 = service
.create(CreateWorkspaceInput {
name: "Workspace 2".to_string(),
description: "Second workspace".to_string(),
tags: vec!["Production".to_string()],
})
.expect("Failed to create workspace 2");
// Create sync group
let group = service
.create_sync_group(CreateSyncGroupInput {
name: "Test Sync Group".to_string(),
workspace_ids: vec![ws1.id.clone(), ws2.id.clone()],
synced_variable_names: vec!["API_KEY".to_string()],
sync_secrets: false,
})
.expect("Failed to create sync group");
// Verify workspaces are linked
let ws1_updated = service.get(&ws1.id).expect("Failed to get workspace 1");
assert_eq!(ws1_updated.sync_group_id, Some(group.id.clone()));
// Get workspaces by sync group
let grouped = service
.get_workspaces_by_sync_group(&group.id)
.expect("Failed to get workspaces by sync group");
assert_eq!(grouped.len(), 2);
// Delete sync group
service
.delete_sync_group(&group.id)
.expect("Failed to delete sync group");
// Verify workspaces are unlinked
let ws1_final = service.get(&ws1.id).expect("Failed to get workspace 1");
assert_eq!(ws1_final.sync_group_id, None);
}
}