refactor: reorganize project into monorepo with frontend scaffolding
Convert project from single backend to monorepo structure with separate frontend (Vue 3 + TypeScript + Vite) and backend directories. Updates all configuration files and build system to support both workspaces. Ref: T007 (specs/001-modbus-relay-control)
This commit is contained in:
1
backend/.env.example
Normal file
1
backend/.env.example
Normal file
@@ -0,0 +1 @@
|
||||
DATABASE_URL=sqlite://db.sqlite
|
||||
6
backend/.tarpaulin.ci.toml
Normal file
6
backend/.tarpaulin.ci.toml
Normal file
@@ -0,0 +1,6 @@
|
||||
[all]
|
||||
out = ["Xml"]
|
||||
target-dir = "coverage"
|
||||
output-dir = "coverage"
|
||||
fail-under = 60
|
||||
exclude-files = ["target/*"]
|
||||
7
backend/.tarpaulin.local.toml
Normal file
7
backend/.tarpaulin.local.toml
Normal file
@@ -0,0 +1,7 @@
|
||||
[all]
|
||||
out = ["Html", "Lcov"]
|
||||
skip-clean = true
|
||||
target-dir = "coverage"
|
||||
output-dir = "coverage"
|
||||
fail-under = 60
|
||||
exclude-files = ["target/*", "private/*"]
|
||||
38
backend/Cargo.toml
Normal file
38
backend/Cargo.toml
Normal file
@@ -0,0 +1,38 @@
|
||||
[package]
|
||||
name = "sta"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
publish = false
|
||||
authors = ["Lucien Cartier-Tilet <lucien@phundrak.com>"]
|
||||
license = "AGPL-3.0-only"
|
||||
|
||||
[lib]
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
path = "src/main.rs"
|
||||
name = "sta"
|
||||
|
||||
[dependencies]
|
||||
async-trait = "0.1.89"
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
config = { version = "0.15.19", features = ["yaml"] }
|
||||
dotenvy = "0.15.7"
|
||||
governor = "0.10.4"
|
||||
mockall = "0.14.0"
|
||||
poem = { version = "3.1.12", default-features = false, features = ["csrf", "rustls", "test"] }
|
||||
poem-openapi = { version = "5.1.16", features = ["chrono", "swagger-ui"] }
|
||||
serde = "1.0.228"
|
||||
serde_json = "1.0.148"
|
||||
sqlx = { version = "0.8.6", features = ["runtime-tokio", "sqlite", "derive", "migrate"] }
|
||||
thiserror = "2.0.17"
|
||||
tokio = { version = "1.48.0", features = ["macros", "rt-multi-thread"] }
|
||||
tokio-modbus = { version = "0.17.0", default-features = false, features = ["tcp"] }
|
||||
tracing = "0.1.44"
|
||||
tracing-subscriber = { version = "0.3.22", features = ["fmt", "std", "env-filter", "registry", "json", "tracing-log"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.15.0"
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tarpaulin_include)'] }
|
||||
84
backend/bacon.toml
Normal file
84
backend/bacon.toml
Normal file
@@ -0,0 +1,84 @@
|
||||
# This is a configuration file for the bacon tool
|
||||
#
|
||||
# Bacon repository: https://github.com/Canop/bacon
|
||||
# Complete help on configuration: https://dystroy.org/bacon/config/
|
||||
# You can also check bacon's own bacon.toml file
|
||||
# as an example: https://github.com/Canop/bacon/blob/main/bacon.toml
|
||||
|
||||
default_job = "clippy-all"
|
||||
|
||||
[jobs.check]
|
||||
command = ["cargo", "check", "--color", "always"]
|
||||
need_stdout = false
|
||||
|
||||
[jobs.check-all]
|
||||
command = ["cargo", "check", "--all-targets", "--color", "always"]
|
||||
need_stdout = false
|
||||
|
||||
# Run clippy on the default target
|
||||
[jobs.clippy]
|
||||
command = [
|
||||
"cargo", "clippy",
|
||||
"--color", "always",
|
||||
]
|
||||
need_stdout = false
|
||||
|
||||
[jobs.clippy-all]
|
||||
command = [
|
||||
"cargo", "clippy",
|
||||
"--all-targets",
|
||||
"--color", "always",
|
||||
]
|
||||
need_stdout = false
|
||||
|
||||
[jobs.test]
|
||||
command = [
|
||||
"cargo", "test", "--color", "always",
|
||||
"--", "--color", "always", # see https://github.com/Canop/bacon/issues/124
|
||||
]
|
||||
need_stdout = true
|
||||
|
||||
[jobs.doc]
|
||||
command = ["cargo", "doc", "--color", "always", "--no-deps"]
|
||||
need_stdout = false
|
||||
|
||||
# If the doc compiles, then it opens in your browser and bacon switches
|
||||
# to the previous job
|
||||
[jobs.doc-open]
|
||||
command = ["cargo", "doc", "--color", "always", "--no-deps", "--open"]
|
||||
need_stdout = false
|
||||
on_success = "back" # so that we don't open the browser at each change
|
||||
|
||||
# You can run your application and have the result displayed in bacon,
|
||||
# *if* it makes sense for this crate.
|
||||
# Don't forget the `--color always` part or the errors won't be
|
||||
# properly parsed.
|
||||
# If your program never stops (eg a server), you may set `background`
|
||||
# to false to have the cargo run output immediately displayed instead
|
||||
# of waiting for program's end.
|
||||
[jobs.run]
|
||||
command = [
|
||||
"cargo", "run",
|
||||
"--color", "always",
|
||||
# put launch parameters for your program behind a `--` separator
|
||||
]
|
||||
need_stdout = true
|
||||
allow_warnings = true
|
||||
background = true
|
||||
|
||||
# This parameterized job runs the example of your choice, as soon
|
||||
# as the code compiles.
|
||||
# Call it as
|
||||
# bacon ex -- my-example
|
||||
[jobs.ex]
|
||||
command = ["cargo", "run", "--color", "always", "--example"]
|
||||
need_stdout = true
|
||||
allow_warnings = true
|
||||
|
||||
# You may define here keybindings that would be specific to
|
||||
# a project, for example a shortcut to launch a specific job.
|
||||
# Shortcuts to internal functions (scrolling, toggling, etc.)
|
||||
# should go in your personal global prefs.toml file instead.
|
||||
[keybindings]
|
||||
# alt-m = "job:my-job"
|
||||
c = "job:clippy-all" # comment this to have 'c' run clippy on only the default target
|
||||
5
backend/build.rs
Normal file
5
backend/build.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
// generated by `sqlx migrate build-script`
|
||||
fn main() {
|
||||
// trigger recompilation when a new migration is added
|
||||
println!("cargo:rerun-if-changed=../migrations");
|
||||
}
|
||||
17
backend/settings/base.yaml
Normal file
17
backend/settings/base.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
application:
|
||||
port: 3100
|
||||
version: "0.1.0"
|
||||
|
||||
rate_limit:
|
||||
enabled: true
|
||||
burst_size: 10
|
||||
per_seconds: 60
|
||||
|
||||
modbus:
|
||||
host: "192.168.0.200"
|
||||
port: 502
|
||||
slave_id: 0
|
||||
timeout_secs: 5
|
||||
|
||||
relay:
|
||||
label_max_length: 8
|
||||
8
backend/settings/development.yaml
Normal file
8
backend/settings/development.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
frontend_url: http://localhost:3000
|
||||
debug: true
|
||||
|
||||
application:
|
||||
protocol: http
|
||||
host: 127.0.0.1
|
||||
base_url: http://127.0.0.1:3100
|
||||
name: "sta-dev"
|
||||
8
backend/settings/production.yaml
Normal file
8
backend/settings/production.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
debug: false
|
||||
frontend_url: ""
|
||||
|
||||
application:
|
||||
name: "sta-prod"
|
||||
protocol: https
|
||||
host: 0.0.0.0
|
||||
base_url: ""
|
||||
60
backend/src/application/mod.rs
Normal file
60
backend/src/application/mod.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
//! Application layer - Use cases and orchestration logic
|
||||
//!
|
||||
//! This module implements business use cases by coordinating domain entities and
|
||||
//! infrastructure services. It contains the application logic that orchestrates
|
||||
//! domain behavior without implementing domain rules directly.
|
||||
//!
|
||||
//! # Architecture Principles
|
||||
//!
|
||||
//! - **Depends on Domain layer**: Uses domain entities, value objects, and traits
|
||||
//! - **Framework-independent**: No dependencies on HTTP, database, or other infrastructure
|
||||
//! - **Use case driven**: Each module represents a specific business use case
|
||||
//! - **Testable in isolation**: Can be tested with mock infrastructure implementations
|
||||
//!
|
||||
//! # Planned Submodules
|
||||
//!
|
||||
//! - `relay`: Relay control use cases
|
||||
//! - `get_status`: Retrieve current state of one or all relays
|
||||
//! - `toggle_relay`: Switch relay on/off with validation
|
||||
//! - `bulk_control`: Control multiple relays (all on, all off, pattern)
|
||||
//! - `update_label`: Manage relay labels with persistence
|
||||
//! - `get_health`: Check device health and connectivity
|
||||
//!
|
||||
//! # Use Case Pattern
|
||||
//!
|
||||
//! Each use case follows this pattern:
|
||||
//! 1. Accept domain types as input (validated at boundary)
|
||||
//! 2. Orchestrate domain entities and services
|
||||
//! 3. Return domain types or application-specific results
|
||||
//! 4. Depend on traits (RelayController, RelayLabelRepository), not concrete types
|
||||
//!
|
||||
//! # Example Use Case Structure
|
||||
//!
|
||||
//! ```rust,ignore
|
||||
//! pub struct ToggleRelay {
|
||||
//! controller: Arc<dyn RelayController>,
|
||||
//! repository: Arc<dyn RelayLabelRepository>,
|
||||
//! }
|
||||
//!
|
||||
//! impl ToggleRelay {
|
||||
//! pub async fn execute(&self, relay_id: RelayId) -> Result<Relay, ApplicationError> {
|
||||
//! // 1. Read current state
|
||||
//! let current = self.controller.read_relay_state(relay_id).await?;
|
||||
//!
|
||||
//! // 2. Toggle state (domain logic)
|
||||
//! let new_state = current.toggle();
|
||||
//!
|
||||
//! // 3. Write new state
|
||||
//! self.controller.write_relay_state(relay_id, new_state).await?;
|
||||
//!
|
||||
//! // 4. Return updated relay
|
||||
//! Ok(Relay::new(relay_id, new_state))
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! # References
|
||||
//!
|
||||
//! - Architecture: `specs/constitution.md` - Hexagonal Architecture principles
|
||||
//! - Use cases: `specs/001-modbus-relay-control/plan.md` - Implementation plan
|
||||
//! - Domain types: [`crate::domain`] - Domain entities and value objects
|
||||
38
backend/src/domain/mod.rs
Normal file
38
backend/src/domain/mod.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
//! Domain layer - Pure business logic with no external dependencies
|
||||
//!
|
||||
//! This module contains the core business domain for the StA relay control system.
|
||||
//! It follows **Domain-Driven Design** principles with rich domain models and clear
|
||||
//! ubiquitous language.
|
||||
//!
|
||||
//! # Architecture Principles
|
||||
//!
|
||||
//! - **No external dependencies**: Domain layer depends only on Rust standard library
|
||||
//! - **Inward-pointing dependencies**: Infrastructure/Application depend on Domain, never reverse
|
||||
//! - **Rich domain models**: Entities and value objects encapsulate business rules
|
||||
//! - **Ubiquitous language**: Code reflects real-world relay control domain concepts
|
||||
//!
|
||||
//! # Planned Submodules
|
||||
//!
|
||||
//! - `relay`: Core relay domain (RelayId, RelayState, RelayLabel, Relay entity)
|
||||
//! - Value objects with validation (newtypes following TyDD principles)
|
||||
//! - Domain entities (Relay, RelayCollection)
|
||||
//! - Repository traits (RelayLabelRepository)
|
||||
//! - Controller traits (RelayController)
|
||||
//! - Domain errors (DomainError, ValidationError)
|
||||
//!
|
||||
//! # Type-Driven Development (TyDD)
|
||||
//!
|
||||
//! Domain types follow "make illegal states unrepresentable" principle:
|
||||
//! - `RelayId`: Newtype wrapping u8, validated to 1..=8 range
|
||||
//! - `RelayLabel`: String wrapper, validated max 50 chars
|
||||
//! - `RelayState`: Enum (On, Off) - no invalid states possible
|
||||
//!
|
||||
//! See `specs/001-modbus-relay-control/types-design.md` for complete type design.
|
||||
//!
|
||||
//! # References
|
||||
//!
|
||||
//! - Architecture: `specs/constitution.md` - Hexagonal Architecture principles
|
||||
//! - Type design: `specs/001-modbus-relay-control/types-design.md`
|
||||
//! - Domain specification: `specs/001-modbus-relay-control/spec.md`
|
||||
|
||||
pub mod relay;
|
||||
9
backend/src/domain/relay/mod.rs
Normal file
9
backend/src/domain/relay/mod.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
//! Relay domain module.
|
||||
//!
|
||||
//! This module contains the core domain logic for relay control and management,
|
||||
//! including relay types, repository abstractions, and business rules.
|
||||
|
||||
/// Repository trait and error types for relay persistence.
|
||||
pub mod repository;
|
||||
/// Domain types for relay identification and control.
|
||||
pub mod types;
|
||||
15
backend/src/domain/relay/repository.rs
Normal file
15
backend/src/domain/relay/repository.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
use super::types::RelayId;
|
||||
|
||||
/// Errors that can occur during repository operations.
|
||||
///
|
||||
/// This enum provides structured error handling for all data persistence
|
||||
/// operations related to relay management.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum RepositoryError {
|
||||
/// A database operation failed with the given error message.
|
||||
#[error("Database error: {0}")]
|
||||
DatabaseError(String),
|
||||
/// The requested relay was not found in the repository.
|
||||
#[error("Relay not found: {0}")]
|
||||
NotFound(RelayId),
|
||||
}
|
||||
14
backend/src/domain/relay/types.rs
Normal file
14
backend/src/domain/relay/types.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
/// Unique identifier for a relay in the system.
|
||||
///
|
||||
/// Uses the newtype pattern to provide type safety and prevent mixing relay IDs
|
||||
/// with other numeric values. Valid values range from 0-255, corresponding to
|
||||
/// individual relay channels in the Modbus controller.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
#[repr(transparent)]
|
||||
pub struct RelayId(u8);
|
||||
|
||||
impl std::fmt::Display for RelayId {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
78
backend/src/infrastructure/mod.rs
Normal file
78
backend/src/infrastructure/mod.rs
Normal file
@@ -0,0 +1,78 @@
|
||||
//! Infrastructure layer - External integrations and adapters
|
||||
//!
|
||||
//! This module implements the technical infrastructure required by the application,
|
||||
//! including external system integrations, persistence, and communication protocols.
|
||||
//! All infrastructure depends on domain/application layers through trait implementations.
|
||||
//!
|
||||
//! # Architecture Principles
|
||||
//!
|
||||
//! - **Implements domain traits**: Provides concrete implementations of domain interfaces
|
||||
//! - **Depends inward**: Depends on domain/application, never the reverse
|
||||
//! - **Substitutable**: Different implementations can be swapped without changing domain
|
||||
//! - **Framework-specific**: Contains framework and library dependencies (Modbus, SQLx, etc.)
|
||||
//!
|
||||
//! # Planned Submodules
|
||||
//!
|
||||
//! ## `modbus` - Modbus RTU over TCP Integration
|
||||
//!
|
||||
//! - `client`: ModbusRelayController implementation using tokio-modbus
|
||||
//! - `mock`: MockRelayController for testing without hardware
|
||||
//! - `config`: Modbus connection configuration
|
||||
//! - `connection`: Connection pool and health management
|
||||
//!
|
||||
//! Implements: [`domain::relay::controller::RelayController`](crate::domain)
|
||||
//!
|
||||
//! ## `persistence` - SQLite Database with SQLx
|
||||
//!
|
||||
//! - `sqlite_repository`: SqliteRelayLabelRepository implementation
|
||||
//! - `schema.sql`: Database schema with relay_labels table
|
||||
//! - `migrations`: Database migration scripts (if using sqlx-cli)
|
||||
//!
|
||||
//! Implements: [`domain::relay::repository::RelayLabelRepository`](crate::domain)
|
||||
//!
|
||||
//! # Technology Stack
|
||||
//!
|
||||
//! - **Modbus**: `tokio-modbus` 0.17.0 for async Modbus RTU over TCP
|
||||
//! - **Persistence**: `sqlx` 0.8 for compile-time verified SQLite queries
|
||||
//! - **Async Runtime**: `tokio` 1.48 (shared with main application)
|
||||
//! - **Testing**: `mockall` 0.13 for mock implementations
|
||||
//!
|
||||
//! # Implementation Pattern
|
||||
//!
|
||||
//! Each infrastructure adapter:
|
||||
//! 1. Implements domain-defined trait (e.g., `RelayController`)
|
||||
//! 2. Translates domain types to/from external formats
|
||||
//! 3. Handles connection management and retries
|
||||
//! 4. Provides error translation (external errors → domain errors)
|
||||
//!
|
||||
//! # Example: Modbus Controller
|
||||
//!
|
||||
//! ```rust,ignore
|
||||
//! pub struct ModbusRelayController {
|
||||
//! client: Arc<Mutex<ModbusClient>>,
|
||||
//! config: ModbusConfig,
|
||||
//! }
|
||||
//!
|
||||
//! #[async_trait]
|
||||
//! impl RelayController for ModbusRelayController {
|
||||
//! async fn read_relay_state(&self, id: RelayId) -> Result<RelayState, ControllerError> {
|
||||
//! let address = self.config.base_address + id.value() as u16;
|
||||
//! let mut client = self.client.lock().await;
|
||||
//!
|
||||
//! // Read coil from Modbus device
|
||||
//! let result = client.read_coils(address, 1).await
|
||||
//! .map_err(|e| ControllerError::CommunicationError(e.to_string()))?;
|
||||
//!
|
||||
//! // Translate to domain type
|
||||
//! Ok(if result[0] { RelayState::On } else { RelayState::Off })
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! # References
|
||||
//!
|
||||
//! - Architecture: `specs/constitution.md` - Dependency Inversion Principle
|
||||
//! - Implementation: `specs/001-modbus-relay-control/plan.md` - Infrastructure tasks
|
||||
//! - Modbus docs: `docs/Modbus_POE_ETH_Relay.md` - Hardware protocol specification
|
||||
|
||||
pub mod persistence;
|
||||
7
backend/src/infrastructure/persistence/mod.rs
Normal file
7
backend/src/infrastructure/persistence/mod.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
//! Persistence layer implementations.
|
||||
//!
|
||||
//! This module contains the concrete implementations of repository traits
|
||||
//! for data persistence, including SQLite-based storage for relay labels.
|
||||
|
||||
/// `SQLite` repository implementation for relay labels.
|
||||
pub mod sqlite_repository;
|
||||
64
backend/src/infrastructure/persistence/sqlite_repository.rs
Normal file
64
backend/src/infrastructure/persistence/sqlite_repository.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
use sqlx::SqlitePool;
|
||||
|
||||
use crate::domain::relay::repository::RepositoryError;
|
||||
|
||||
/// `SQLite` implementation of the relay label repository.
|
||||
///
|
||||
/// This repository manages persistent storage of relay labels using `SQLite`,
|
||||
/// with automatic schema migrations via `SQLx`.
|
||||
pub struct SqliteRelayLabelRepository {
|
||||
/// The `SQLite` connection pool for database operations.
|
||||
pool: SqlitePool,
|
||||
}
|
||||
|
||||
impl SqliteRelayLabelRepository {
|
||||
/// Creates a new `SQLite` relay label repository.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `db_path` - The `SQLite` database path or connection string (e.g., `"sqlite://data.db"`)
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns `RepositoryError::DatabaseError` if the connection fails or migrations cannot be applied.
|
||||
pub async fn new(db_path: &str) -> Result<Self, RepositoryError> {
|
||||
let pool = SqlitePool::connect(db_path)
|
||||
.await
|
||||
.map_err(|e| RepositoryError::DatabaseError(e.to_string()))?;
|
||||
let repo = Self { pool };
|
||||
repo.run_migrations().await?;
|
||||
Ok(repo)
|
||||
}
|
||||
|
||||
/// Returns a reference to the underlying connection pool.
|
||||
///
|
||||
/// This is primarily used for testing to verify schema and constraints.
|
||||
#[must_use]
|
||||
pub const fn pool(&self) -> &SqlitePool {
|
||||
&self.pool
|
||||
}
|
||||
|
||||
/// Creates a new in-memory `SQLite` relay label repository.
|
||||
///
|
||||
/// This is useful for testing and ephemeral data storage.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns `RepositoryError::DatabaseError` if the in-memory database cannot be created.
|
||||
pub async fn in_memory() -> Result<Self, RepositoryError> {
|
||||
Self::new("sqlite::memory:").await
|
||||
}
|
||||
|
||||
/// Runs all pending database migrations.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns `RepositoryError::DatabaseError` if migrations fail to apply.
|
||||
async fn run_migrations(&self) -> Result<(), RepositoryError> {
|
||||
sqlx::migrate!("../migrations/")
|
||||
.run(&self.pool)
|
||||
.await
|
||||
.map_err(|e| RepositoryError::DatabaseError(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
143
backend/src/lib.rs
Normal file
143
backend/src/lib.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
//! Backend API server for `StA` (Smart Temperature & Appliance Control)
|
||||
//!
|
||||
//! `StA` is a web-based Modbus relay control system that provides `RESTful` API access
|
||||
//! to 8-channel relay devices. The system eliminates the need for specialized Modbus
|
||||
//! software, enabling browser-based relay control for automation and remote management.
|
||||
//!
|
||||
//! # Architecture
|
||||
//!
|
||||
//! This crate follows **Hexagonal Architecture** (Clean Architecture) with strict
|
||||
//! layer separation and inward-pointing dependencies:
|
||||
//!
|
||||
//! - **[`domain`]**: Pure business logic with no external dependencies (relay entities, value objects)
|
||||
//! - **[`application`]**: Use cases and orchestration logic (relay control, label management)
|
||||
//! - **[`infrastructure`]**: External integrations (Modbus TCP, `SQLite` persistence)
|
||||
//! - **[`presentation`]**: API contracts and DTOs (not yet used - see [`route`] for current API)
|
||||
//!
|
||||
//! Traditional modules (will be migrated to hexagonal layers):
|
||||
//! - **[`route`]**: HTTP API endpoints (will move to `presentation`)
|
||||
//! - **[`middleware`]**: Custom middleware (rate limiting, CORS)
|
||||
//! - **[`settings`]**: Configuration management from YAML + env vars
|
||||
//! - **[`startup`]**: Application builder and server configuration
|
||||
//! - **[`telemetry`]**: Logging and tracing setup
|
||||
//!
|
||||
//! # Current Features
|
||||
//!
|
||||
//! - Health check endpoints
|
||||
//! - Application metadata endpoints
|
||||
//! - Rate limiting middleware
|
||||
//! - CORS support
|
||||
//! - `OpenAPI` documentation
|
||||
//!
|
||||
//! # Planned Features (001-modbus-relay-control)
|
||||
//!
|
||||
//! - Modbus RTU over TCP communication with 8-channel relay devices
|
||||
//! - Real-time relay status monitoring
|
||||
//! - Individual relay control (on/off toggle)
|
||||
//! - Bulk relay operations (all on, all off)
|
||||
//! - Persistent relay labels (`SQLite` with `SQLx`)
|
||||
//! - Device health monitoring
|
||||
//!
|
||||
//! See `specs/001-modbus-relay-control/` for detailed specification.
|
||||
|
||||
#![deny(clippy::all)]
|
||||
#![deny(clippy::pedantic)]
|
||||
#![deny(clippy::nursery)]
|
||||
#![warn(missing_docs)]
|
||||
#![allow(clippy::unused_async)]
|
||||
|
||||
/// Custom middleware implementations
|
||||
pub mod middleware;
|
||||
/// API route handlers and endpoints
|
||||
pub mod route;
|
||||
/// Application configuration settings
|
||||
pub mod settings;
|
||||
/// Application startup and server configuration
|
||||
pub mod startup;
|
||||
/// Logging and tracing setup
|
||||
pub mod telemetry;
|
||||
|
||||
/// Domain layer - Pure business logic with no external dependencies
|
||||
///
|
||||
/// Contains domain entities, value objects, and business rules for the relay
|
||||
/// control system. This layer has no dependencies on frameworks or infrastructure.
|
||||
///
|
||||
/// See `specs/constitution.md` for hexagonal architecture principles.
|
||||
pub mod domain;
|
||||
|
||||
/// Application layer - Use cases and orchestration logic
|
||||
///
|
||||
/// Coordinates domain entities to implement business use cases such as relay
|
||||
/// control, label management, and device health monitoring.
|
||||
pub mod application;
|
||||
|
||||
/// Infrastructure layer - External integrations and adapters
|
||||
///
|
||||
/// Implements interfaces defined in domain/application layers for external
|
||||
/// systems: Modbus TCP communication, SQLite persistence, HTTP clients.
|
||||
pub mod infrastructure;
|
||||
|
||||
/// Presentation layer - API contracts and DTOs
|
||||
///
|
||||
/// Defines data transfer objects and API request/response types. Currently
|
||||
/// unused - API handlers are in [`route`] module (legacy structure).
|
||||
pub mod presentation;
|
||||
|
||||
type MaybeListener = Option<poem::listener::TcpListener<String>>;
|
||||
|
||||
fn prepare(listener: MaybeListener) -> startup::Application {
|
||||
dotenvy::dotenv().ok();
|
||||
let settings = settings::Settings::new().expect("Failed to read settings");
|
||||
if !cfg!(test) {
|
||||
let subscriber = telemetry::get_subscriber(settings.debug);
|
||||
telemetry::init_subscriber(subscriber);
|
||||
}
|
||||
tracing::event!(
|
||||
target: "backend",
|
||||
tracing::Level::DEBUG,
|
||||
"Using these settings: {:?}",
|
||||
settings
|
||||
);
|
||||
let application = startup::Application::build(settings, listener);
|
||||
tracing::event!(
|
||||
target: "backend",
|
||||
tracing::Level::INFO,
|
||||
"Listening on http://{}:{}/",
|
||||
application.host(),
|
||||
application.port()
|
||||
);
|
||||
tracing::event!(
|
||||
target: "backend",
|
||||
tracing::Level::INFO,
|
||||
"Documentation available at http://{}:{}/",
|
||||
application.host(),
|
||||
application.port()
|
||||
);
|
||||
application
|
||||
}
|
||||
|
||||
/// Runs the application with the specified TCP listener.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns a `std::io::Error` if the server fails to start or encounters
|
||||
/// an I/O error during runtime (e.g., port already in use, network issues).
|
||||
#[cfg(not(tarpaulin_include))]
|
||||
pub async fn run(listener: MaybeListener) -> Result<(), std::io::Error> {
|
||||
let application = prepare(listener);
|
||||
application.make_app().run().await
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn make_random_tcp_listener() -> poem::listener::TcpListener<String> {
|
||||
let tcp_listener =
|
||||
std::net::TcpListener::bind("127.0.0.1:0").expect("Failed to bind a random TCP listener");
|
||||
let port = tcp_listener.local_addr().unwrap().port();
|
||||
poem::listener::TcpListener::bind(format!("127.0.0.1:{port}"))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn get_test_app() -> startup::App {
|
||||
let tcp_listener = make_random_tcp_listener();
|
||||
prepare(Some(tcp_listener)).make_app().into()
|
||||
}
|
||||
7
backend/src/main.rs
Normal file
7
backend/src/main.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
//! Backend server entry point.
|
||||
|
||||
#[cfg(not(tarpaulin_include))]
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), std::io::Error> {
|
||||
sta::run(None).await
|
||||
}
|
||||
5
backend/src/middleware/mod.rs
Normal file
5
backend/src/middleware/mod.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
//! Custom middleware for the application.
|
||||
//!
|
||||
//! This module contains custom middleware implementations including rate limiting.
|
||||
|
||||
pub mod rate_limit;
|
||||
208
backend/src/middleware/rate_limit.rs
Normal file
208
backend/src/middleware/rate_limit.rs
Normal file
@@ -0,0 +1,208 @@
|
||||
//! Rate limiting middleware using the governor crate.
|
||||
//!
|
||||
//! This middleware implements per-IP rate limiting using the Generic Cell Rate
|
||||
//! Algorithm (GCRA) via the governor crate. It stores rate limiters in memory
|
||||
//! without requiring external dependencies like Redis.
|
||||
|
||||
use std::{net::IpAddr, num::NonZeroU32, sync::Arc, time::Duration};
|
||||
|
||||
use governor::{
|
||||
Quota, RateLimiter,
|
||||
clock::DefaultClock,
|
||||
state::{InMemoryState, NotKeyed},
|
||||
};
|
||||
use poem::{Endpoint, Error, IntoResponse, Middleware, Request, Response, Result};
|
||||
|
||||
/// Rate limiting configuration.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RateLimitConfig {
|
||||
/// Maximum number of requests allowed in the time window (burst size).
|
||||
pub burst_size: u32,
|
||||
/// Time window in seconds for rate limiting.
|
||||
pub per_seconds: u64,
|
||||
}
|
||||
|
||||
impl RateLimitConfig {
|
||||
/// Creates a new rate limit configuration.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `burst_size` - Maximum number of requests allowed in the time window
|
||||
/// * `per_seconds` - Time window in seconds
|
||||
#[must_use]
|
||||
pub const fn new(burst_size: u32, per_seconds: u64) -> Self {
|
||||
Self {
|
||||
burst_size,
|
||||
per_seconds,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a rate limiter from this configuration.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if `burst_size` is zero.
|
||||
#[must_use]
|
||||
pub fn create_limiter(&self) -> RateLimiter<NotKeyed, InMemoryState, DefaultClock> {
|
||||
let quota = Quota::with_period(Duration::from_secs(self.per_seconds))
|
||||
.expect("Failed to create quota")
|
||||
.allow_burst(NonZeroU32::new(self.burst_size).expect("Burst size must be non-zero"));
|
||||
RateLimiter::direct(quota)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RateLimitConfig {
|
||||
fn default() -> Self {
|
||||
// Default: 10 requests per second with burst of 20
|
||||
Self::new(20, 1)
|
||||
}
|
||||
}
|
||||
|
||||
/// Middleware for rate limiting based on IP address.
|
||||
pub struct RateLimit {
|
||||
limiter: Arc<RateLimiter<NotKeyed, InMemoryState, DefaultClock>>,
|
||||
}
|
||||
|
||||
impl RateLimit {
|
||||
/// Creates a new rate limiting middleware with the given configuration.
|
||||
#[must_use]
|
||||
pub fn new(config: &RateLimitConfig) -> Self {
|
||||
Self {
|
||||
limiter: Arc::new(config.create_limiter()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: Endpoint> Middleware<E> for RateLimit {
|
||||
type Output = RateLimitEndpoint<E>;
|
||||
|
||||
fn transform(&self, ep: E) -> Self::Output {
|
||||
RateLimitEndpoint {
|
||||
endpoint: ep,
|
||||
limiter: self.limiter.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The endpoint wrapper that performs rate limiting checks.
|
||||
pub struct RateLimitEndpoint<E> {
|
||||
endpoint: E,
|
||||
limiter: Arc<RateLimiter<NotKeyed, InMemoryState, DefaultClock>>,
|
||||
}
|
||||
|
||||
impl<E: Endpoint> Endpoint for RateLimitEndpoint<E> {
|
||||
type Output = Response;
|
||||
|
||||
async fn call(&self, req: Request) -> Result<Self::Output> {
|
||||
// Check rate limit
|
||||
if self.limiter.check().is_err() {
|
||||
let client_ip = Self::get_client_ip(&req)
|
||||
.map_or_else(|| "unknown".to_string(), |ip| ip.to_string());
|
||||
|
||||
tracing::event!(
|
||||
target: "backend::middleware::rate_limit",
|
||||
tracing::Level::WARN,
|
||||
client_ip = %client_ip,
|
||||
"Rate limit exceeded"
|
||||
);
|
||||
|
||||
return Err(Error::from_status(
|
||||
poem::http::StatusCode::TOO_MANY_REQUESTS,
|
||||
));
|
||||
}
|
||||
|
||||
// Process the request
|
||||
let response = self.endpoint.call(req).await;
|
||||
response.map(IntoResponse::into_response)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E> RateLimitEndpoint<E> {
|
||||
/// Extracts the client IP address from the request.
|
||||
fn get_client_ip(req: &Request) -> Option<IpAddr> {
|
||||
req.remote_addr()
|
||||
.as_socket_addr()
|
||||
.map(std::net::SocketAddr::ip)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn rate_limit_config_new() {
|
||||
let config = RateLimitConfig::new(10, 60);
|
||||
assert_eq!(config.burst_size, 10);
|
||||
assert_eq!(config.per_seconds, 60);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rate_limit_config_default() {
|
||||
let config = RateLimitConfig::default();
|
||||
assert_eq!(config.burst_size, 20);
|
||||
assert_eq!(config.per_seconds, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rate_limit_config_creates_limiter() {
|
||||
let config = RateLimitConfig::new(5, 1);
|
||||
let limiter = config.create_limiter();
|
||||
|
||||
// First 5 requests should succeed
|
||||
for _ in 0..5 {
|
||||
assert!(limiter.check().is_ok());
|
||||
}
|
||||
|
||||
// 6th request should fail
|
||||
assert!(limiter.check().is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn rate_limit_middleware_allows_within_limit() {
|
||||
use poem::{EndpointExt, Route, handler, test::TestClient};
|
||||
|
||||
#[handler]
|
||||
async fn index() -> String {
|
||||
"Hello".to_string()
|
||||
}
|
||||
|
||||
let config = RateLimitConfig::new(5, 60);
|
||||
let app = Route::new()
|
||||
.at("/", poem::get(index))
|
||||
.with(RateLimit::new(&config));
|
||||
let cli = TestClient::new(app);
|
||||
|
||||
// First 5 requests should succeed
|
||||
for _ in 0..5 {
|
||||
let response = cli.get("/").send().await;
|
||||
response.assert_status_is_ok();
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn rate_limit_middleware_blocks_over_limit() {
|
||||
use poem::{EndpointExt, Route, handler, test::TestClient};
|
||||
|
||||
#[handler]
|
||||
async fn index() -> String {
|
||||
"Hello".to_string()
|
||||
}
|
||||
|
||||
let config = RateLimitConfig::new(3, 60);
|
||||
let app = Route::new()
|
||||
.at("/", poem::get(index))
|
||||
.with(RateLimit::new(&config));
|
||||
let cli = TestClient::new(app);
|
||||
|
||||
// First 3 requests should succeed
|
||||
for _ in 0..3 {
|
||||
let response = cli.get("/").send().await;
|
||||
response.assert_status_is_ok();
|
||||
}
|
||||
|
||||
// 4th request should be rate limited
|
||||
let response = cli.get("/").send().await;
|
||||
response.assert_status(poem::http::StatusCode::TOO_MANY_REQUESTS);
|
||||
}
|
||||
}
|
||||
96
backend/src/presentation/mod.rs
Normal file
96
backend/src/presentation/mod.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
//! Presentation layer - API contracts and DTOs
|
||||
//!
|
||||
//! This module defines data transfer objects (DTOs) and API request/response types
|
||||
//! that form the public API contract. It translates between domain types and wire
|
||||
//! formats (JSON) for HTTP communication.
|
||||
//!
|
||||
//! # Current Status
|
||||
//!
|
||||
//! **Currently unused** - API handlers are currently in [`crate::route`] module using
|
||||
//! legacy structure. This module is prepared for future migration to proper hexagonal
|
||||
//! architecture with clear presentation layer separation.
|
||||
//!
|
||||
//! # Architecture Principles
|
||||
//!
|
||||
//! - **API-first design**: Define contracts before implementation
|
||||
//! - **DTO pattern**: Separate domain entities from API representations
|
||||
//! - **Validation at boundary**: Parse and validate input before domain layer
|
||||
//! - **OpenAPI integration**: Generate documentation from code
|
||||
//!
|
||||
//! # Planned Submodules
|
||||
//!
|
||||
//! ## `dto` - Data Transfer Objects
|
||||
//!
|
||||
//! - `relay_dto`: RelayResponse, RelayStatusResponse, BulkControlRequest
|
||||
//! - `label_dto`: UpdateLabelRequest, LabelResponse
|
||||
//! - `health_dto`: HealthResponse, DeviceStatusResponse
|
||||
//! - `error_dto`: ApiError, ValidationError (user-facing errors)
|
||||
//!
|
||||
//! ## `mapper` - Domain ↔ DTO Conversions
|
||||
//!
|
||||
//! - `relay_mapper`: Convert between Relay entity and RelayResponse
|
||||
//! - `error_mapper`: Translate domain errors to HTTP status codes
|
||||
//!
|
||||
//! ## `validator` - Input Validation
|
||||
//!
|
||||
//! - Request validation before domain layer
|
||||
//! - Parse DTOs into domain types (parse, don't validate principle)
|
||||
//!
|
||||
//! # DTO Design Pattern
|
||||
//!
|
||||
//! DTOs are separate from domain types to:
|
||||
//! 1. **Prevent domain leakage**: Domain types stay internal
|
||||
//! 2. **Enable versioning**: API can evolve without changing domain
|
||||
//! 3. **Control serialization**: Explicit JSON representation
|
||||
//! 4. **Validate at boundary**: Convert raw input to validated domain types
|
||||
//!
|
||||
//! # Example DTO Structure
|
||||
//!
|
||||
//! ```rust,ignore
|
||||
//! /// API response for relay status
|
||||
//! #[derive(Serialize, Deserialize, Object)]
|
||||
//! pub struct RelayResponse {
|
||||
//! /// Relay ID (1-8)
|
||||
//! pub id: u8,
|
||||
//! /// Current state ("on" or "off")
|
||||
//! pub state: String,
|
||||
//! /// Optional custom label
|
||||
//! pub label: Option<String>,
|
||||
//! }
|
||||
//!
|
||||
//! impl From<Relay> for RelayResponse {
|
||||
//! fn from(relay: Relay) -> Self {
|
||||
//! Self {
|
||||
//! id: relay.id().value(),
|
||||
//! state: relay.state().to_string(),
|
||||
//! label: relay.label().map(|l| l.to_string()),
|
||||
//! }
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! impl TryFrom<RelayResponse> for Relay {
|
||||
//! type Error = ValidationError;
|
||||
//!
|
||||
//! fn try_from(dto: RelayResponse) -> Result<Self, Self::Error> {
|
||||
//! let id = RelayId::new(dto.id)?;
|
||||
//! let state = RelayState::from_str(&dto.state)?;
|
||||
//! let label = dto.label.map(RelayLabel::new).transpose()?;
|
||||
//! Ok(Relay::new(id, state).with_label(label))
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! # Migration Plan
|
||||
//!
|
||||
//! Current API in [`crate::route`] will be migrated to this layer:
|
||||
//! 1. Define DTOs for all API endpoints
|
||||
//! 2. Implement domain <20> DTO mappers
|
||||
//! 3. Move API handlers to use DTOs
|
||||
//! 4. Generate OpenAPI specs from DTOs
|
||||
//! 5. Remove direct domain type exposure
|
||||
//!
|
||||
//! # References
|
||||
//!
|
||||
//! - Architecture: `specs/constitution.md` - API-First Design principle
|
||||
//! - API design: `specs/001-modbus-relay-control/plan.md` - Presentation layer tasks
|
||||
//! - Domain types: [`crate::domain`] - Types to be wrapped in DTOs
|
||||
38
backend/src/route/health.rs
Normal file
38
backend/src/route/health.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
//! Health check endpoint for monitoring service availability.
|
||||
|
||||
use poem_openapi::{ApiResponse, OpenApi};
|
||||
|
||||
use super::ApiCategory;
|
||||
|
||||
#[derive(ApiResponse)]
|
||||
enum HealthResponse {
|
||||
/// Success
|
||||
#[oai(status = 200)]
|
||||
Ok,
|
||||
/// Too Many Requests - rate limit exceeded
|
||||
#[oai(status = 429)]
|
||||
#[allow(dead_code)]
|
||||
TooManyRequests,
|
||||
}
|
||||
|
||||
/// Health check API for monitoring service availability.
|
||||
#[derive(Default, Clone)]
|
||||
pub struct HealthApi;
|
||||
|
||||
#[OpenApi(tag = "ApiCategory::Health")]
|
||||
impl HealthApi {
|
||||
#[oai(path = "/health", method = "get")]
|
||||
async fn ping(&self) -> HealthResponse {
|
||||
tracing::event!(target: "backend::health", tracing::Level::DEBUG, "Accessing health-check endpoint");
|
||||
HealthResponse::Ok
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn health_check_works() {
|
||||
let app = crate::get_test_app();
|
||||
let cli = poem::test::TestClient::new(app);
|
||||
let resp = cli.get("/api/health").send().await;
|
||||
resp.assert_status_is_ok();
|
||||
resp.assert_text("").await;
|
||||
}
|
||||
86
backend/src/route/meta.rs
Normal file
86
backend/src/route/meta.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
//! Application metadata endpoint for retrieving version and name information.
|
||||
|
||||
use poem::Result;
|
||||
use poem_openapi::{ApiResponse, Object, OpenApi, payload::Json};
|
||||
|
||||
use super::ApiCategory;
|
||||
use crate::settings::ApplicationSettings;
|
||||
|
||||
#[derive(Object, Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
struct Meta {
|
||||
version: String,
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl From<&MetaApi> for Meta {
|
||||
fn from(value: &MetaApi) -> Self {
|
||||
let version = value.version.clone();
|
||||
let name = value.name.clone();
|
||||
Self { version, name }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(ApiResponse)]
|
||||
enum MetaResponse {
|
||||
/// Success
|
||||
#[oai(status = 200)]
|
||||
Meta(Json<Meta>),
|
||||
/// Too Many Requests - rate limit exceeded
|
||||
#[oai(status = 429)]
|
||||
#[allow(dead_code)]
|
||||
TooManyRequests,
|
||||
}
|
||||
|
||||
/// API for retrieving application metadata (name and version).
|
||||
#[derive(Clone)]
|
||||
pub struct MetaApi {
|
||||
name: String,
|
||||
version: String,
|
||||
}
|
||||
|
||||
impl From<&ApplicationSettings> for MetaApi {
|
||||
fn from(value: &ApplicationSettings) -> Self {
|
||||
let name = value.name.clone();
|
||||
let version = value.version.clone();
|
||||
Self { name, version }
|
||||
}
|
||||
}
|
||||
|
||||
#[OpenApi(tag = "ApiCategory::Meta")]
|
||||
impl MetaApi {
|
||||
#[oai(path = "/meta", method = "get")]
|
||||
async fn meta(&self) -> Result<MetaResponse> {
|
||||
tracing::event!(target: "backend::meta", tracing::Level::DEBUG, "Accessing meta endpoint");
|
||||
Ok(MetaResponse::Meta(Json(self.into())))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[tokio::test]
|
||||
async fn meta_endpoint_returns_correct_data() {
|
||||
let app = crate::get_test_app();
|
||||
let cli = poem::test::TestClient::new(app);
|
||||
let resp = cli.get("/api/meta").send().await;
|
||||
resp.assert_status_is_ok();
|
||||
|
||||
let json_value: serde_json::Value = resp.json().await.value().deserialize();
|
||||
|
||||
assert!(
|
||||
json_value.get("version").is_some(),
|
||||
"Response should have version field"
|
||||
);
|
||||
assert!(
|
||||
json_value.get("name").is_some(),
|
||||
"Response should have name field"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn meta_endpoint_returns_200_status() {
|
||||
let app = crate::get_test_app();
|
||||
let cli = poem::test::TestClient::new(app);
|
||||
let resp = cli.get("/api/meta").send().await;
|
||||
resp.assert_status_is_ok();
|
||||
}
|
||||
}
|
||||
37
backend/src/route/mod.rs
Normal file
37
backend/src/route/mod.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
//! API route handlers for the backend server.
|
||||
//!
|
||||
//! This module contains all the HTTP endpoint handlers organized by functionality:
|
||||
//! - Health checks
|
||||
//! - Application metadata
|
||||
|
||||
use poem_openapi::Tags;
|
||||
|
||||
mod health;
|
||||
mod meta;
|
||||
|
||||
use crate::settings::Settings;
|
||||
|
||||
#[derive(Tags)]
|
||||
enum ApiCategory {
|
||||
Health,
|
||||
Meta,
|
||||
}
|
||||
|
||||
pub(crate) struct Api {
|
||||
health: health::HealthApi,
|
||||
meta: meta::MetaApi,
|
||||
}
|
||||
|
||||
impl From<&Settings> for Api {
|
||||
fn from(value: &Settings) -> Self {
|
||||
let health = health::HealthApi;
|
||||
let meta = meta::MetaApi::from(&value.application);
|
||||
Self { health, meta }
|
||||
}
|
||||
}
|
||||
|
||||
impl Api {
|
||||
pub fn apis(self) -> (health::HealthApi, meta::MetaApi) {
|
||||
(self.health, self.meta)
|
||||
}
|
||||
}
|
||||
339
backend/src/settings.rs
Normal file
339
backend/src/settings.rs
Normal file
@@ -0,0 +1,339 @@
|
||||
//! Application configuration settings.
|
||||
//!
|
||||
//! This module provides configuration structures that can be loaded from:
|
||||
//! - YAML configuration files (base.yaml and environment-specific files)
|
||||
//! - Environment variables (prefixed with APP__)
|
||||
//!
|
||||
//! Settings include application details, Modbus connection parameters, relay configuration,
|
||||
//! rate limiting, and environment settings.
|
||||
|
||||
/// Application configuration settings.
|
||||
///
|
||||
/// Loads configuration from YAML files and environment variables.
|
||||
#[derive(Debug, serde::Deserialize, Clone, Default)]
|
||||
pub struct Settings {
|
||||
/// Application-specific settings (name, version, host, port, etc.)
|
||||
pub application: ApplicationSettings,
|
||||
/// Debug mode flag
|
||||
pub debug: bool,
|
||||
/// Frontend URL for CORS configuration
|
||||
pub frontend_url: String,
|
||||
/// Rate limiting configuration
|
||||
#[serde(default)]
|
||||
pub rate_limit: RateLimitSettings,
|
||||
/// Modbus configuration
|
||||
pub modbus: ModbusSettings,
|
||||
/// Relay configuration
|
||||
pub relay: RelaySettings,
|
||||
}
|
||||
|
||||
impl Settings {
|
||||
/// Creates a new `Settings` instance by loading configuration from files and environment variables.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns a `config::ConfigError` if:
|
||||
/// - Configuration files cannot be read or parsed
|
||||
/// - Required configuration values are missing
|
||||
/// - Configuration values cannot be deserialized into the expected types
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if:
|
||||
/// - The current directory cannot be determined
|
||||
/// - The `APP_ENVIRONMENT` variable contains an invalid value (not "dev", "development", "prod", or "production")
|
||||
pub fn new() -> Result<Self, config::ConfigError> {
|
||||
// Use CARGO_MANIFEST_DIR to reliably locate settings regardless of where cargo is run from
|
||||
let base_path = std::env::var("CARGO_MANIFEST_DIR").map_or_else(
|
||||
// Fallback to current_dir for non-cargo builds
|
||||
|_| std::env::current_dir().expect("Failed to determine the current directory"),
|
||||
std::path::PathBuf::from,
|
||||
);
|
||||
println!("Reading settings from directory {}", base_path.display());
|
||||
let settings_directory = base_path.join("settings");
|
||||
let environment: Environment = std::env::var("APP_ENVIRONMENT")
|
||||
.unwrap_or_else(|_| "dev".into())
|
||||
.try_into()
|
||||
.expect("Failed to parse APP_ENVIRONMENT");
|
||||
let environment_filename = format!("{environment}.yaml");
|
||||
// Lower = takes precedence
|
||||
let settings = config::Config::builder()
|
||||
.add_source(config::File::from(settings_directory.join("base.yaml")))
|
||||
.add_source(config::File::from(
|
||||
settings_directory.join(environment_filename),
|
||||
))
|
||||
.add_source(
|
||||
config::Environment::with_prefix("APP")
|
||||
.prefix_separator("__")
|
||||
.separator("__"),
|
||||
)
|
||||
.build()?;
|
||||
settings.try_deserialize()
|
||||
}
|
||||
}
|
||||
|
||||
/// Application-specific configuration settings.
|
||||
#[derive(Debug, serde::Deserialize, Clone, Default)]
|
||||
pub struct ApplicationSettings {
|
||||
/// Application name
|
||||
pub name: String,
|
||||
/// Application version
|
||||
pub version: String,
|
||||
/// Port to bind to
|
||||
pub port: u16,
|
||||
/// Host address to bind to
|
||||
pub host: String,
|
||||
/// Base URL of the application
|
||||
pub base_url: String,
|
||||
/// Protocol (http or https)
|
||||
pub protocol: String,
|
||||
}
|
||||
|
||||
/// Application environment.
|
||||
#[derive(Debug, PartialEq, Eq, Default)]
|
||||
pub enum Environment {
|
||||
/// Development environment
|
||||
#[default]
|
||||
Development,
|
||||
/// Production environment
|
||||
Production,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Environment {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let self_str = match self {
|
||||
Self::Development => "development",
|
||||
Self::Production => "production",
|
||||
};
|
||||
write!(f, "{self_str}")
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for Environment {
|
||||
type Error = String;
|
||||
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
Self::try_from(value.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for Environment {
|
||||
type Error = String;
|
||||
|
||||
fn try_from(value: &str) -> Result<Self, Self::Error> {
|
||||
match value.to_lowercase().as_str() {
|
||||
"development" | "dev" => Ok(Self::Development),
|
||||
"production" | "prod" => Ok(Self::Production),
|
||||
other => Err(format!(
|
||||
"{other} is not a supported environment. Use either `development` or `production`"
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Rate limiting configuration.
|
||||
#[derive(Debug, serde::Deserialize, Clone)]
|
||||
pub struct RateLimitSettings {
|
||||
/// Whether rate limiting is enabled
|
||||
#[serde(default = "default_rate_limit_enabled")]
|
||||
pub enabled: bool,
|
||||
/// Maximum number of requests allowed in the time window (burst size)
|
||||
#[serde(default = "default_burst_size")]
|
||||
pub burst_size: u32,
|
||||
/// Time window in seconds for rate limiting
|
||||
#[serde(default = "default_per_seconds")]
|
||||
pub per_seconds: u64,
|
||||
}
|
||||
|
||||
impl Default for RateLimitSettings {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: default_rate_limit_enabled(),
|
||||
burst_size: default_burst_size(),
|
||||
per_seconds: default_per_seconds(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const fn default_rate_limit_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
const fn default_burst_size() -> u32 {
|
||||
100
|
||||
}
|
||||
|
||||
const fn default_per_seconds() -> u64 {
|
||||
60
|
||||
}
|
||||
|
||||
/// Modbus TCP connection configuration.
|
||||
///
|
||||
/// Configures the connection parameters for communicating with the Modbus relay device
|
||||
/// using Modbus RTU over TCP protocol.
|
||||
#[derive(Debug, serde::Deserialize, Clone)]
|
||||
pub struct ModbusSettings {
|
||||
/// IP address or hostname of the Modbus device
|
||||
pub host: String,
|
||||
/// TCP port for Modbus communication (standard Modbus TCP port is 502)
|
||||
pub port: u16,
|
||||
/// Modbus slave/device ID (unit identifier)
|
||||
pub slave_id: u8,
|
||||
/// Operation timeout in seconds
|
||||
pub timeout_secs: u8,
|
||||
}
|
||||
|
||||
impl Default for ModbusSettings {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
host: "192.168.0.200".to_string(),
|
||||
port: 502,
|
||||
slave_id: 0,
|
||||
timeout_secs: 5,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Relay control configuration.
|
||||
///
|
||||
/// Configures parameters for relay management and labeling.
|
||||
#[derive(Debug, serde::Deserialize, Clone)]
|
||||
pub struct RelaySettings {
|
||||
/// Maximum length for custom relay labels (in characters)
|
||||
pub label_max_length: u8,
|
||||
}
|
||||
|
||||
impl Default for RelaySettings {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
label_max_length: 8,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn environment_display_development() {
|
||||
let env = Environment::Development;
|
||||
assert_eq!(env.to_string(), "development");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn environment_display_production() {
|
||||
let env = Environment::Production;
|
||||
assert_eq!(env.to_string(), "production");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn environment_from_str_development() {
|
||||
assert_eq!(
|
||||
Environment::try_from("development").unwrap(),
|
||||
Environment::Development
|
||||
);
|
||||
assert_eq!(
|
||||
Environment::try_from("dev").unwrap(),
|
||||
Environment::Development
|
||||
);
|
||||
assert_eq!(
|
||||
Environment::try_from("Development").unwrap(),
|
||||
Environment::Development
|
||||
);
|
||||
assert_eq!(
|
||||
Environment::try_from("DEV").unwrap(),
|
||||
Environment::Development
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn environment_from_str_production() {
|
||||
assert_eq!(
|
||||
Environment::try_from("production").unwrap(),
|
||||
Environment::Production
|
||||
);
|
||||
assert_eq!(
|
||||
Environment::try_from("prod").unwrap(),
|
||||
Environment::Production
|
||||
);
|
||||
assert_eq!(
|
||||
Environment::try_from("Production").unwrap(),
|
||||
Environment::Production
|
||||
);
|
||||
assert_eq!(
|
||||
Environment::try_from("PROD").unwrap(),
|
||||
Environment::Production
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn environment_from_str_invalid() {
|
||||
let result = Environment::try_from("invalid");
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().contains("not a supported environment"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn environment_from_string_development() {
|
||||
assert_eq!(
|
||||
Environment::try_from("development".to_string()).unwrap(),
|
||||
Environment::Development
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn environment_from_string_production() {
|
||||
assert_eq!(
|
||||
Environment::try_from("production".to_string()).unwrap(),
|
||||
Environment::Production
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn environment_from_string_invalid() {
|
||||
let result = Environment::try_from("invalid".to_string());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn environment_default_is_development() {
|
||||
let env = Environment::default();
|
||||
assert_eq!(env, Environment::Development);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rate_limit_settings_default() {
|
||||
let settings = RateLimitSettings::default();
|
||||
assert!(settings.enabled);
|
||||
assert_eq!(settings.burst_size, 100);
|
||||
assert_eq!(settings.per_seconds, 60);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rate_limit_settings_deserialize_full() {
|
||||
let json = r#"{"enabled": true, "burst_size": 50, "per_seconds": 30}"#;
|
||||
let settings: RateLimitSettings = serde_json::from_str(json).unwrap();
|
||||
assert!(settings.enabled);
|
||||
assert_eq!(settings.burst_size, 50);
|
||||
assert_eq!(settings.per_seconds, 30);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rate_limit_settings_deserialize_partial() {
|
||||
let json = r#"{"enabled": false}"#;
|
||||
let settings: RateLimitSettings = serde_json::from_str(json).unwrap();
|
||||
assert!(!settings.enabled);
|
||||
assert_eq!(settings.burst_size, 100); // default
|
||||
assert_eq!(settings.per_seconds, 60); // default
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rate_limit_settings_deserialize_empty() {
|
||||
let json = "{}";
|
||||
let settings: RateLimitSettings = serde_json::from_str(json).unwrap();
|
||||
assert!(settings.enabled); // default
|
||||
assert_eq!(settings.burst_size, 100); // default
|
||||
assert_eq!(settings.per_seconds, 60); // default
|
||||
}
|
||||
}
|
||||
229
backend/src/startup.rs
Normal file
229
backend/src/startup.rs
Normal file
@@ -0,0 +1,229 @@
|
||||
//! Application startup and server configuration.
|
||||
//!
|
||||
//! This module handles:
|
||||
//! - Building the application with routes and middleware
|
||||
//! - Setting up the OpenAPI service and Swagger UI
|
||||
//! - Configuring CORS
|
||||
//! - Starting the HTTP server
|
||||
|
||||
use poem::middleware::{AddDataEndpoint, Cors, CorsEndpoint};
|
||||
use poem::{EndpointExt, Route};
|
||||
use poem_openapi::OpenApiService;
|
||||
|
||||
use crate::{
|
||||
middleware::rate_limit::{RateLimit, RateLimitConfig},
|
||||
route::Api,
|
||||
settings::Settings,
|
||||
};
|
||||
|
||||
use crate::middleware::rate_limit::RateLimitEndpoint;
|
||||
|
||||
type Server = poem::Server<poem::listener::TcpListener<String>, std::convert::Infallible>;
|
||||
/// The configured application with rate limiting, CORS, and settings data.
|
||||
pub type App = AddDataEndpoint<CorsEndpoint<RateLimitEndpoint<Route>>, Settings>;
|
||||
|
||||
/// Application builder that holds the server configuration before running.
|
||||
pub struct Application {
|
||||
server: Server,
|
||||
app: poem::Route,
|
||||
host: String,
|
||||
port: u16,
|
||||
settings: Settings,
|
||||
}
|
||||
|
||||
/// A fully configured application ready to run.
|
||||
pub struct RunnableApplication {
|
||||
server: Server,
|
||||
app: App,
|
||||
}
|
||||
|
||||
impl RunnableApplication {
|
||||
/// Runs the application server.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns a `std::io::Error` if the server fails to start or encounters
|
||||
/// an I/O error during runtime (e.g., port already in use, network issues).
|
||||
pub async fn run(self) -> Result<(), std::io::Error> {
|
||||
self.server.run(self.app).await
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RunnableApplication> for App {
|
||||
fn from(value: RunnableApplication) -> Self {
|
||||
value.app
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Application> for RunnableApplication {
|
||||
fn from(value: Application) -> Self {
|
||||
// Configure rate limiting based on settings
|
||||
let rate_limit_config = if value.settings.rate_limit.enabled {
|
||||
tracing::event!(
|
||||
target: "backend::startup",
|
||||
tracing::Level::INFO,
|
||||
burst_size = value.settings.rate_limit.burst_size,
|
||||
per_seconds = value.settings.rate_limit.per_seconds,
|
||||
"Rate limiting enabled"
|
||||
);
|
||||
RateLimitConfig::new(
|
||||
value.settings.rate_limit.burst_size,
|
||||
value.settings.rate_limit.per_seconds,
|
||||
)
|
||||
} else {
|
||||
tracing::event!(
|
||||
target: "backend::startup",
|
||||
tracing::Level::INFO,
|
||||
"Rate limiting disabled (using very high limits)"
|
||||
);
|
||||
// Use very high limits to effectively disable rate limiting
|
||||
RateLimitConfig::new(u32::MAX, 1)
|
||||
};
|
||||
|
||||
let app = value
|
||||
.app
|
||||
.with(RateLimit::new(&rate_limit_config))
|
||||
.with(Cors::new())
|
||||
.data(value.settings);
|
||||
|
||||
let server = value.server;
|
||||
Self { server, app }
|
||||
}
|
||||
}
|
||||
|
||||
impl Application {
|
||||
fn setup_app(settings: &Settings) -> poem::Route {
|
||||
let api_service = OpenApiService::new(
|
||||
Api::from(settings).apis(),
|
||||
settings.application.clone().name,
|
||||
settings.application.clone().version,
|
||||
)
|
||||
.url_prefix("/api");
|
||||
let ui = api_service.swagger_ui();
|
||||
poem::Route::new()
|
||||
.nest("/api", api_service.clone())
|
||||
.nest("/specs", api_service.spec_endpoint_yaml())
|
||||
.nest("/", ui)
|
||||
}
|
||||
|
||||
fn setup_server(
|
||||
settings: &Settings,
|
||||
tcp_listener: Option<poem::listener::TcpListener<String>>,
|
||||
) -> Server {
|
||||
let tcp_listener = tcp_listener.unwrap_or_else(|| {
|
||||
let address = format!(
|
||||
"{}:{}",
|
||||
settings.application.host, settings.application.port
|
||||
);
|
||||
poem::listener::TcpListener::bind(address)
|
||||
});
|
||||
poem::Server::new(tcp_listener)
|
||||
}
|
||||
|
||||
/// Builds a new application with the given settings and optional TCP listener.
|
||||
///
|
||||
/// If no listener is provided, one will be created based on the settings.
|
||||
#[must_use]
|
||||
pub fn build(
|
||||
settings: Settings,
|
||||
tcp_listener: Option<poem::listener::TcpListener<String>>,
|
||||
) -> Self {
|
||||
let port = settings.application.port;
|
||||
let host = settings.application.clone().host;
|
||||
let app = Self::setup_app(&settings);
|
||||
let server = Self::setup_server(&settings, tcp_listener);
|
||||
Self {
|
||||
server,
|
||||
app,
|
||||
host,
|
||||
port,
|
||||
settings,
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts the application into a runnable application.
|
||||
#[must_use]
|
||||
pub fn make_app(self) -> RunnableApplication {
|
||||
self.into()
|
||||
}
|
||||
|
||||
/// Returns the host address the application is configured to bind to.
|
||||
#[must_use]
|
||||
pub fn host(&self) -> String {
|
||||
self.host.clone()
|
||||
}
|
||||
|
||||
/// Returns the port the application is configured to bind to.
|
||||
#[must_use]
|
||||
pub const fn port(&self) -> u16 {
|
||||
self.port
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn create_test_settings() -> Settings {
|
||||
Settings {
|
||||
application: crate::settings::ApplicationSettings {
|
||||
name: "test-app".to_string(),
|
||||
version: "1.0.0".to_string(),
|
||||
port: 8080,
|
||||
host: "127.0.0.1".to_string(),
|
||||
base_url: "http://localhost:8080".to_string(),
|
||||
protocol: "http".to_string(),
|
||||
},
|
||||
debug: false,
|
||||
frontend_url: "http://localhost:3000".to_string(),
|
||||
rate_limit: crate::settings::RateLimitSettings {
|
||||
enabled: false,
|
||||
burst_size: 100,
|
||||
per_seconds: 60,
|
||||
},
|
||||
modbus: crate::settings::ModbusSettings::default(),
|
||||
relay: crate::settings::RelaySettings::default(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn application_build_and_host() {
|
||||
let settings = create_test_settings();
|
||||
let app = Application::build(settings.clone(), None);
|
||||
assert_eq!(app.host(), settings.application.host);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn application_build_and_port() {
|
||||
let settings = create_test_settings();
|
||||
let app = Application::build(settings, None);
|
||||
assert_eq!(app.port(), 8080);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn application_host_returns_correct_value() {
|
||||
let settings = create_test_settings();
|
||||
let app = Application::build(settings, None);
|
||||
assert_eq!(app.host(), "127.0.0.1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn application_port_returns_correct_value() {
|
||||
let settings = create_test_settings();
|
||||
let app = Application::build(settings, None);
|
||||
assert_eq!(app.port(), 8080);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn application_with_custom_listener() {
|
||||
let settings = create_test_settings();
|
||||
let tcp_listener =
|
||||
std::net::TcpListener::bind("127.0.0.1:0").expect("Failed to bind random port");
|
||||
let port = tcp_listener.local_addr().unwrap().port();
|
||||
let listener = poem::listener::TcpListener::bind(format!("127.0.0.1:{port}"));
|
||||
|
||||
let app = Application::build(settings, Some(listener));
|
||||
assert_eq!(app.host(), "127.0.0.1");
|
||||
assert_eq!(app.port(), 8080);
|
||||
}
|
||||
}
|
||||
69
backend/src/telemetry.rs
Normal file
69
backend/src/telemetry.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
//! Logging and tracing configuration.
|
||||
//!
|
||||
//! This module provides utilities for setting up structured logging using the tracing crate.
|
||||
//! Supports both pretty-printed logs for development and JSON logs for production.
|
||||
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
|
||||
/// Creates a tracing subscriber configured for the given debug mode.
|
||||
///
|
||||
/// In debug mode, logs are pretty-printed to stdout.
|
||||
/// In production mode, logs are output as JSON.
|
||||
#[must_use]
|
||||
pub fn get_subscriber(debug: bool) -> impl tracing::Subscriber + Send + Sync {
|
||||
let env_filter = if debug { "debug" } else { "info" }.to_string();
|
||||
let env_filter = tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(env_filter));
|
||||
let stdout_log = tracing_subscriber::fmt::layer().pretty();
|
||||
let subscriber = tracing_subscriber::Registry::default()
|
||||
.with(env_filter)
|
||||
.with(stdout_log);
|
||||
let json_log = if debug {
|
||||
None
|
||||
} else {
|
||||
Some(tracing_subscriber::fmt::layer().json())
|
||||
};
|
||||
subscriber.with(json_log)
|
||||
}
|
||||
|
||||
/// Initializes the global tracing subscriber.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if:
|
||||
/// - A global subscriber has already been set
|
||||
/// - The subscriber cannot be set as the global default
|
||||
pub fn init_subscriber(subscriber: impl tracing::Subscriber + Send + Sync) {
|
||||
tracing::subscriber::set_global_default(subscriber).expect("Failed to set subscriber");
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn get_subscriber_debug_mode() {
|
||||
let subscriber = get_subscriber(true);
|
||||
// If we can create the subscriber without panicking, the test passes
|
||||
// We can't easily inspect the subscriber's internals, but we can verify it's created
|
||||
let _ = subscriber;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_subscriber_production_mode() {
|
||||
let subscriber = get_subscriber(false);
|
||||
// If we can create the subscriber without panicking, the test passes
|
||||
let _ = subscriber;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_subscriber_creates_valid_subscriber() {
|
||||
// Test both debug and non-debug modes create valid subscribers
|
||||
let debug_subscriber = get_subscriber(true);
|
||||
let prod_subscriber = get_subscriber(false);
|
||||
|
||||
// Basic smoke test - if these are created without panicking, they're valid
|
||||
let _ = debug_subscriber;
|
||||
let _ = prod_subscriber;
|
||||
}
|
||||
}
|
||||
266
backend/tests/sqlite_repository_test.rs
Normal file
266
backend/tests/sqlite_repository_test.rs
Normal file
@@ -0,0 +1,266 @@
|
||||
//! Integration tests for `SqliteRelayLabelRepository`.
|
||||
//!
|
||||
//! These tests verify that the SQLite repository correctly:
|
||||
//! - Creates an in-memory database
|
||||
//! - Applies schema migrations
|
||||
//! - Validates table structure and constraints
|
||||
|
||||
use sta::domain::relay::repository::RepositoryError;
|
||||
use sta::infrastructure::persistence::sqlite_repository::SqliteRelayLabelRepository;
|
||||
|
||||
/// Test that `in_memory()` successfully creates an in-memory database.
|
||||
///
|
||||
/// **T006 Requirement**: `SqliteRelayLabelRepository::in_memory()` creates in-memory DB with schema
|
||||
#[tokio::test]
|
||||
async fn test_in_memory_creates_database() {
|
||||
let result = SqliteRelayLabelRepository::in_memory().await;
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Failed to create in-memory database: {:?}",
|
||||
result.err()
|
||||
);
|
||||
}
|
||||
|
||||
/// Test that the schema migration creates the `RelayLabels` table.
|
||||
///
|
||||
/// **T006 Requirement**: Verify schema is applied correctly
|
||||
#[tokio::test]
|
||||
async fn test_in_memory_applies_schema() {
|
||||
let repo = SqliteRelayLabelRepository::in_memory()
|
||||
.await
|
||||
.expect("Failed to create in-memory database");
|
||||
|
||||
// Verify the table exists by querying it
|
||||
let result: Result<(String,), sqlx::Error> =
|
||||
sqlx::query_as("SELECT name FROM sqlite_master WHERE type='table' AND name='RelayLabels'")
|
||||
.fetch_one(repo.pool())
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"RelayLabels table should exist after migration"
|
||||
);
|
||||
}
|
||||
|
||||
/// Test that the `RelayLabels` table has the correct schema.
|
||||
///
|
||||
/// **T006 Requirement**: Verify table structure matches migration
|
||||
#[tokio::test]
|
||||
async fn test_relay_labels_table_structure() {
|
||||
let repo = SqliteRelayLabelRepository::in_memory()
|
||||
.await
|
||||
.expect("Failed to create in-memory database");
|
||||
|
||||
// Query table info to verify column structure
|
||||
let columns: Vec<(String, String)> =
|
||||
sqlx::query_as("SELECT name, type FROM pragma_table_info('RelayLabels') ORDER BY cid")
|
||||
.fetch_all(repo.pool())
|
||||
.await
|
||||
.expect("Failed to query table structure");
|
||||
|
||||
assert_eq!(columns.len(), 2, "RelayLabels table should have 2 columns");
|
||||
|
||||
// Verify relay_id column
|
||||
assert_eq!(columns[0].0, "relay_id", "First column should be relay_id");
|
||||
assert_eq!(columns[0].1, "INTEGER", "relay_id should be INTEGER");
|
||||
|
||||
// Verify label column
|
||||
assert_eq!(columns[1].0, "label", "Second column should be label");
|
||||
assert_eq!(columns[1].1, "TEXT", "label should be TEXT");
|
||||
}
|
||||
|
||||
/// Test that `relay_id` is the primary key.
|
||||
///
|
||||
/// **T006 Requirement**: Verify primary key constraint
|
||||
#[tokio::test]
|
||||
async fn test_relay_id_primary_key() {
|
||||
let repo = SqliteRelayLabelRepository::in_memory()
|
||||
.await
|
||||
.expect("Failed to create in-memory database");
|
||||
|
||||
// Insert first row with relay_id = 1
|
||||
let insert1: Result<sqlx::sqlite::SqliteQueryResult, sqlx::Error> =
|
||||
sqlx::query("INSERT INTO RelayLabels (relay_id, label) VALUES (1, 'Test')")
|
||||
.execute(repo.pool())
|
||||
.await;
|
||||
assert!(insert1.is_ok(), "First insert should succeed");
|
||||
|
||||
// Try to insert duplicate relay_id = 1
|
||||
let insert2: Result<sqlx::sqlite::SqliteQueryResult, sqlx::Error> =
|
||||
sqlx::query("INSERT INTO RelayLabels (relay_id, label) VALUES (1, 'Duplicate')")
|
||||
.execute(repo.pool())
|
||||
.await;
|
||||
assert!(
|
||||
insert2.is_err(),
|
||||
"Duplicate relay_id should fail due to PRIMARY KEY constraint"
|
||||
);
|
||||
}
|
||||
|
||||
/// Test that `relay_id` must be between 1 and 8.
|
||||
///
|
||||
/// **T006 Requirement**: Verify CHECK constraint on relay_id range
|
||||
#[tokio::test]
|
||||
async fn test_relay_id_range_constraint() {
|
||||
let repo = SqliteRelayLabelRepository::in_memory()
|
||||
.await
|
||||
.expect("Failed to create in-memory database");
|
||||
|
||||
// Valid range: 1-8 should succeed
|
||||
for id in 1..=8 {
|
||||
let result: Result<sqlx::sqlite::SqliteQueryResult, sqlx::Error> =
|
||||
sqlx::query("INSERT INTO RelayLabels (relay_id, label) VALUES (?, ?)")
|
||||
.bind(id)
|
||||
.bind(format!("Relay {}", id))
|
||||
.execute(repo.pool())
|
||||
.await;
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"relay_id {} should be valid (range 1-8)",
|
||||
id
|
||||
);
|
||||
}
|
||||
|
||||
// Below valid range: 0 should fail
|
||||
let result_below: Result<sqlx::sqlite::SqliteQueryResult, sqlx::Error> =
|
||||
sqlx::query("INSERT INTO RelayLabels (relay_id, label) VALUES (0, 'Invalid')")
|
||||
.execute(repo.pool())
|
||||
.await;
|
||||
assert!(
|
||||
result_below.is_err(),
|
||||
"relay_id = 0 should fail CHECK constraint"
|
||||
);
|
||||
|
||||
// Above valid range: 9 should fail
|
||||
let result_above: Result<sqlx::sqlite::SqliteQueryResult, sqlx::Error> =
|
||||
sqlx::query("INSERT INTO RelayLabels (relay_id, label) VALUES (9, 'Invalid')")
|
||||
.execute(repo.pool())
|
||||
.await;
|
||||
assert!(
|
||||
result_above.is_err(),
|
||||
"relay_id = 9 should fail CHECK constraint"
|
||||
);
|
||||
}
|
||||
|
||||
/// Test that `label` cannot exceed 50 characters.
|
||||
///
|
||||
/// **T006 Requirement**: Verify CHECK constraint on label length
|
||||
#[tokio::test]
|
||||
async fn test_label_length_constraint() {
|
||||
let repo = SqliteRelayLabelRepository::in_memory()
|
||||
.await
|
||||
.expect("Failed to create in-memory database");
|
||||
|
||||
// Valid length: 50 characters should succeed
|
||||
let label_50 = "A".repeat(50);
|
||||
let result_valid: Result<sqlx::sqlite::SqliteQueryResult, sqlx::Error> =
|
||||
sqlx::query("INSERT INTO RelayLabels (relay_id, label) VALUES (1, ?)")
|
||||
.bind(&label_50)
|
||||
.execute(repo.pool())
|
||||
.await;
|
||||
assert!(
|
||||
result_valid.is_ok(),
|
||||
"Label with 50 characters should be valid"
|
||||
);
|
||||
|
||||
// Invalid length: 51 characters should fail
|
||||
let label_51 = "B".repeat(51);
|
||||
let result_invalid: Result<sqlx::sqlite::SqliteQueryResult, sqlx::Error> =
|
||||
sqlx::query("INSERT INTO RelayLabels (relay_id, label) VALUES (2, ?)")
|
||||
.bind(&label_51)
|
||||
.execute(repo.pool())
|
||||
.await;
|
||||
assert!(
|
||||
result_invalid.is_err(),
|
||||
"Label with 51 characters should fail CHECK constraint"
|
||||
);
|
||||
}
|
||||
|
||||
/// Test that `label` cannot be NULL.
|
||||
///
|
||||
/// **T006 Requirement**: Verify NOT NULL constraint on label
|
||||
#[tokio::test]
|
||||
async fn test_label_not_null_constraint() {
|
||||
let repo = SqliteRelayLabelRepository::in_memory()
|
||||
.await
|
||||
.expect("Failed to create in-memory database");
|
||||
|
||||
// Attempt to insert NULL label
|
||||
let result: Result<sqlx::sqlite::SqliteQueryResult, sqlx::Error> =
|
||||
sqlx::query("INSERT INTO RelayLabels (relay_id, label) VALUES (1, NULL)")
|
||||
.execute(repo.pool())
|
||||
.await;
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"NULL label should fail NOT NULL constraint"
|
||||
);
|
||||
}
|
||||
|
||||
/// Test that multiple in-memory repositories are isolated.
|
||||
///
|
||||
/// **T006 Requirement**: Verify in-memory instances are independent
|
||||
#[tokio::test]
|
||||
async fn test_multiple_in_memory_instances_isolated() {
|
||||
let repo1 = SqliteRelayLabelRepository::in_memory()
|
||||
.await
|
||||
.expect("Failed to create first in-memory database");
|
||||
|
||||
let repo2 = SqliteRelayLabelRepository::in_memory()
|
||||
.await
|
||||
.expect("Failed to create second in-memory database");
|
||||
|
||||
// Insert data into repo1
|
||||
sqlx::query("INSERT INTO RelayLabels (relay_id, label) VALUES (1, 'Repo1')")
|
||||
.execute(repo1.pool())
|
||||
.await
|
||||
.expect("Failed to insert into repo1");
|
||||
|
||||
// Verify repo2 is empty (no data from repo1)
|
||||
let count: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM RelayLabels")
|
||||
.fetch_one(repo2.pool())
|
||||
.await
|
||||
.expect("Failed to query repo2");
|
||||
|
||||
assert_eq!(
|
||||
count.0, 0,
|
||||
"Second in-memory instance should be isolated from first"
|
||||
);
|
||||
}
|
||||
|
||||
/// Test that `new()` with file path creates a persistent database.
|
||||
///
|
||||
/// **T006 Requirement**: Verify file-based database creation
|
||||
#[tokio::test]
|
||||
async fn test_new_creates_file_database() {
|
||||
let temp_db = tempfile::NamedTempFile::new().expect("Failed to create temp file");
|
||||
let db_path = format!("sqlite://{}", temp_db.path().to_str().unwrap());
|
||||
|
||||
let result = SqliteRelayLabelRepository::new(&db_path).await;
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Failed to create file-based database: {:?}",
|
||||
result.err()
|
||||
);
|
||||
|
||||
// Verify the file exists and has content
|
||||
let metadata = std::fs::metadata(temp_db.path()).expect("Database file should exist");
|
||||
assert!(metadata.len() > 0, "Database file should not be empty");
|
||||
}
|
||||
|
||||
/// Test that `new()` with invalid path returns error.
|
||||
///
|
||||
/// **T006 Requirement**: Verify error handling for invalid paths
|
||||
#[tokio::test]
|
||||
async fn test_new_invalid_path_returns_error() {
|
||||
let result =
|
||||
SqliteRelayLabelRepository::new("sqlite:///invalid/path/that/does/not/exist/db.sqlite")
|
||||
.await;
|
||||
|
||||
assert!(result.is_err(), "Invalid database path should return error");
|
||||
|
||||
match result {
|
||||
Err(RepositoryError::DatabaseError(_)) => {
|
||||
// Expected error type
|
||||
}
|
||||
_ => panic!("Expected RepositoryError::DatabaseError for invalid path"),
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user