feat(application): HealthMonitor service and hardware integration test
Add HealthMonitor service for tracking system health status with comprehensive state transition logic and thread-safe operations. Includes 16 unit tests covering all functionality including concurrent access scenarios. Add optional Modbus hardware integration tests with 7 test cases for real device testing. Tests are marked as ignored and can be run with Ref: T034, T039, T040 (specs/001-modbus-relay-control/tasks.org)
This commit is contained in:
331
backend/src/application/health/health_monitor.rs
Normal file
331
backend/src/application/health/health_monitor.rs
Normal file
@@ -0,0 +1,331 @@
|
||||
//! Health monitoring service for tracking system health status.
|
||||
//!
|
||||
//! The `HealthMonitor` service tracks the health status of the Modbus relay controller
|
||||
//! by monitoring consecutive errors and transitions between healthy, degraded, and unhealthy states.
|
||||
//! This service implements the health monitoring requirements from FR-020 and FR-021.
|
||||
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::domain::health::HealthStatus;
|
||||
|
||||
/// Health monitor service for tracking system health status.
|
||||
///
|
||||
/// The `HealthMonitor` service maintains the current health status and provides
|
||||
/// methods to track successes and failures, transitioning between states according
|
||||
/// to the business rules defined in the domain layer.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HealthMonitor {
|
||||
/// Current health status, protected by a mutex for thread-safe access.
|
||||
current_status: Arc<Mutex<HealthStatus>>,
|
||||
}
|
||||
|
||||
impl HealthMonitor {
|
||||
/// Creates a new `HealthMonitor` with initial `Healthy` status.
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self::with_initial_status(HealthStatus::Healthy)
|
||||
}
|
||||
|
||||
/// Creates a new `HealthMonitor` with the specified initial status.
|
||||
#[must_use]
|
||||
pub fn with_initial_status(initial_status: HealthStatus) -> Self {
|
||||
Self {
|
||||
current_status: Arc::new(Mutex::new(initial_status)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Records a successful operation, potentially transitioning to `Healthy` status.
|
||||
///
|
||||
/// This method transitions the health status according to the following rules:
|
||||
/// - If currently `Healthy`: remains `Healthy`
|
||||
/// - If currently `Degraded`: transitions to `Healthy` (recovery)
|
||||
/// - If currently `Unhealthy`: transitions to `Healthy` (recovery)
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The new health status after recording the success.
|
||||
pub async fn track_success(&self) -> HealthStatus {
|
||||
let mut status = self.current_status.lock().await;
|
||||
let new_status = status.clone().record_success();
|
||||
*status = new_status.clone();
|
||||
new_status
|
||||
}
|
||||
|
||||
/// Records a failed operation, potentially transitioning to `Degraded` or `Unhealthy` status.
|
||||
///
|
||||
/// This method transitions the health status according to the following rules:
|
||||
/// - If currently `Healthy`: transitions to `Degraded` with 1 consecutive error
|
||||
/// - If currently `Degraded`: increments consecutive error count
|
||||
/// - If currently `Unhealthy`: remains `Unhealthy`
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The new health status after recording the failure.
|
||||
pub async fn track_failure(&self) -> HealthStatus {
|
||||
let mut status = self.current_status.lock().await;
|
||||
let new_status = status.clone().record_error();
|
||||
*status = new_status.clone();
|
||||
new_status
|
||||
}
|
||||
|
||||
/// Marks the system as unhealthy with the specified reason.
|
||||
///
|
||||
/// This method immediately transitions to `Unhealthy` status regardless of
|
||||
/// the current status, providing a way to explicitly mark critical failures.
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - `reason`: Human-readable description of the failure reason.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The new `Unhealthy` health status.
|
||||
pub async fn mark_unhealthy(&self, reason: impl Into<String>) -> HealthStatus {
|
||||
let mut status = self.current_status.lock().await;
|
||||
let new_status = status.clone().mark_unhealthy(reason);
|
||||
*status = new_status.clone();
|
||||
new_status
|
||||
}
|
||||
|
||||
/// Gets the current health status without modifying it.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The current health status.
|
||||
pub async fn get_status(&self) -> HealthStatus {
|
||||
let status = self.current_status.lock().await;
|
||||
status.clone()
|
||||
}
|
||||
|
||||
/// Checks if the system is currently healthy.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// `true` if the current status is `Healthy`, `false` otherwise.
|
||||
pub async fn is_healthy(&self) -> bool {
|
||||
let status = self.current_status.lock().await;
|
||||
status.is_healthy()
|
||||
}
|
||||
|
||||
/// Checks if the system is currently degraded.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// `true` if the current status is `Degraded`, `false` otherwise.
|
||||
pub async fn is_degraded(&self) -> bool {
|
||||
let status = self.current_status.lock().await;
|
||||
status.is_degraded()
|
||||
}
|
||||
|
||||
/// Checks if the system is currently unhealthy.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// `true` if the current status is `Unhealthy`, `false` otherwise.
|
||||
pub async fn is_unhealthy(&self) -> bool {
|
||||
let status = self.current_status.lock().await;
|
||||
status.is_unhealthy()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for HealthMonitor {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_monitor_initial_state() {
|
||||
let monitor = HealthMonitor::new();
|
||||
let status = monitor.get_status().await;
|
||||
assert!(status.is_healthy());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_monitor_with_initial_status() {
|
||||
let initial_status = HealthStatus::degraded(3);
|
||||
let monitor = HealthMonitor::with_initial_status(initial_status.clone());
|
||||
let status = monitor.get_status().await;
|
||||
assert_eq!(status, initial_status);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_track_success_from_healthy() {
|
||||
let monitor = HealthMonitor::new();
|
||||
let status = monitor.track_success().await;
|
||||
assert!(status.is_healthy());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_track_success_from_degraded() {
|
||||
let monitor = HealthMonitor::with_initial_status(HealthStatus::degraded(5));
|
||||
let status = monitor.track_success().await;
|
||||
assert!(status.is_healthy());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_track_success_from_unhealthy() {
|
||||
let monitor = HealthMonitor::with_initial_status(HealthStatus::unhealthy("Test failure"));
|
||||
let status = monitor.track_success().await;
|
||||
assert!(status.is_healthy());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_track_failure_from_healthy() {
|
||||
let monitor = HealthMonitor::new();
|
||||
let status = monitor.track_failure().await;
|
||||
assert!(status.is_degraded());
|
||||
assert_eq!(status, HealthStatus::degraded(1));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_track_failure_from_degraded() {
|
||||
let monitor = HealthMonitor::with_initial_status(HealthStatus::degraded(2));
|
||||
let status = monitor.track_failure().await;
|
||||
assert!(status.is_degraded());
|
||||
assert_eq!(status, HealthStatus::degraded(3));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_track_failure_from_unhealthy() {
|
||||
let monitor =
|
||||
HealthMonitor::with_initial_status(HealthStatus::unhealthy("Critical failure"));
|
||||
let status = monitor.track_failure().await;
|
||||
assert!(status.is_unhealthy());
|
||||
assert_eq!(status, HealthStatus::unhealthy("Critical failure"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mark_unhealthy() {
|
||||
let monitor = HealthMonitor::new();
|
||||
let status = monitor.mark_unhealthy("Device disconnected").await;
|
||||
assert!(status.is_unhealthy());
|
||||
assert_eq!(status, HealthStatus::unhealthy("Device disconnected"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mark_unhealthy_overwrites_previous() {
|
||||
let monitor = HealthMonitor::with_initial_status(HealthStatus::degraded(3));
|
||||
let status = monitor.mark_unhealthy("New failure").await;
|
||||
assert!(status.is_unhealthy());
|
||||
assert_eq!(status, HealthStatus::unhealthy("New failure"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_status() {
|
||||
let monitor = HealthMonitor::with_initial_status(HealthStatus::degraded(2));
|
||||
let status = monitor.get_status().await;
|
||||
assert_eq!(status, HealthStatus::degraded(2));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_is_healthy() {
|
||||
let healthy_monitor = HealthMonitor::new();
|
||||
assert!(healthy_monitor.is_healthy().await);
|
||||
|
||||
let degraded_monitor = HealthMonitor::with_initial_status(HealthStatus::degraded(1));
|
||||
assert!(!degraded_monitor.is_healthy().await);
|
||||
|
||||
let unhealthy_monitor =
|
||||
HealthMonitor::with_initial_status(HealthStatus::unhealthy("Failure"));
|
||||
assert!(!unhealthy_monitor.is_healthy().await);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_is_degraded() {
|
||||
let healthy_monitor = HealthMonitor::new();
|
||||
assert!(!healthy_monitor.is_degraded().await);
|
||||
|
||||
let degraded_monitor = HealthMonitor::with_initial_status(HealthStatus::degraded(1));
|
||||
assert!(degraded_monitor.is_degraded().await);
|
||||
|
||||
let unhealthy_monitor =
|
||||
HealthMonitor::with_initial_status(HealthStatus::unhealthy("Failure"));
|
||||
assert!(!unhealthy_monitor.is_degraded().await);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_is_unhealthy() {
|
||||
let healthy_monitor = HealthMonitor::new();
|
||||
assert!(!healthy_monitor.is_unhealthy().await);
|
||||
|
||||
let degraded_monitor = HealthMonitor::with_initial_status(HealthStatus::degraded(1));
|
||||
assert!(!degraded_monitor.is_unhealthy().await);
|
||||
|
||||
let unhealthy_monitor =
|
||||
HealthMonitor::with_initial_status(HealthStatus::unhealthy("Failure"));
|
||||
assert!(unhealthy_monitor.is_unhealthy().await);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_state_transitions_sequence() {
|
||||
let monitor = HealthMonitor::new();
|
||||
|
||||
// Start healthy
|
||||
assert!(monitor.is_healthy().await);
|
||||
|
||||
// First failure -> Degraded with 1 error
|
||||
let status = monitor.track_failure().await;
|
||||
assert!(status.is_degraded());
|
||||
assert_eq!(status, HealthStatus::degraded(1));
|
||||
|
||||
// Second failure -> Degraded with 2 errors
|
||||
let status = monitor.track_failure().await;
|
||||
assert_eq!(status, HealthStatus::degraded(2));
|
||||
|
||||
// Third failure -> Degraded with 3 errors
|
||||
let status = monitor.track_failure().await;
|
||||
assert_eq!(status, HealthStatus::degraded(3));
|
||||
|
||||
// Recovery -> Healthy
|
||||
let status = monitor.track_success().await;
|
||||
assert!(status.is_healthy());
|
||||
|
||||
// Another failure -> Degraded with 1 error
|
||||
let status = monitor.track_failure().await;
|
||||
assert_eq!(status, HealthStatus::degraded(1));
|
||||
|
||||
// Mark as unhealthy -> Unhealthy
|
||||
let status = monitor.mark_unhealthy("Critical error").await;
|
||||
assert!(status.is_unhealthy());
|
||||
|
||||
// Recovery from unhealthy -> Healthy
|
||||
let status = monitor.track_success().await;
|
||||
assert!(status.is_healthy());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_access() {
|
||||
let monitor = HealthMonitor::new();
|
||||
|
||||
// Create multiple tasks that access the monitor concurrently
|
||||
// We need to clone the monitor for each task since tokio::spawn requires 'static
|
||||
let monitor1 = monitor.clone();
|
||||
let monitor2 = monitor.clone();
|
||||
let monitor3 = monitor.clone();
|
||||
let monitor4 = monitor.clone();
|
||||
|
||||
let task1 = tokio::spawn(async move { monitor1.track_failure().await });
|
||||
let task2 = tokio::spawn(async move { monitor2.track_failure().await });
|
||||
let task3 = tokio::spawn(async move { monitor3.track_success().await });
|
||||
let task4 = tokio::spawn(async move { monitor4.get_status().await });
|
||||
|
||||
// Wait for all tasks to complete
|
||||
let (result1, result2, result3, result4) = tokio::join!(task1, task2, task3, task4);
|
||||
|
||||
// All operations should complete without panicking
|
||||
result1.expect("Task should complete successfully");
|
||||
result2.expect("Task should complete successfully");
|
||||
result3.expect("Task should complete successfully");
|
||||
result4.expect("Task should complete successfully");
|
||||
|
||||
// Final status should be healthy (due to the success operation)
|
||||
let final_status = monitor.get_status().await;
|
||||
assert!(final_status.is_healthy());
|
||||
}
|
||||
}
|
||||
6
backend/src/application/health/mod.rs
Normal file
6
backend/src/application/health/mod.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
//! Health monitoring application layer.
|
||||
//!
|
||||
//! This module contains the health monitoring service that tracks the system's
|
||||
//! health status and manages state transitions between healthy, degraded, and unhealthy states.
|
||||
|
||||
pub mod health_monitor;
|
||||
@@ -11,6 +11,11 @@
|
||||
//! - **Use case driven**: Each module represents a specific business use case
|
||||
//! - **Testable in isolation**: Can be tested with mock infrastructure implementations
|
||||
//!
|
||||
//! # Submodules
|
||||
//!
|
||||
//! - `health`: Health monitoring service
|
||||
//! - `health_monitor`: Tracks system health status and state transitions
|
||||
//!
|
||||
//! # Planned Submodules
|
||||
//!
|
||||
//! - `relay`: Relay control use cases
|
||||
@@ -58,3 +63,5 @@
|
||||
//! - Architecture: `specs/constitution.md` - Hexagonal Architecture principles
|
||||
//! - Use cases: `specs/001-modbus-relay-control/plan.md` - Implementation plan
|
||||
//! - Domain types: [`crate::domain`] - Domain entities and value objects
|
||||
|
||||
pub mod health;
|
||||
|
||||
@@ -10,6 +10,10 @@ use super::*;
|
||||
mod t025a_connection_setup_tests {
|
||||
use super::*;
|
||||
|
||||
static HOST: &str = "192.168.1.200";
|
||||
static PORT: u16 = 502;
|
||||
static SLAVE_ID: u8 = 1;
|
||||
|
||||
/// T025a Test 1: `new()` with valid config connects successfully
|
||||
///
|
||||
/// This test verifies that `ModbusRelayController::new()` can establish
|
||||
@@ -21,13 +25,10 @@ mod t025a_connection_setup_tests {
|
||||
#[ignore = "Requires running Modbus TCP server"]
|
||||
async fn test_new_with_valid_config_connects_successfully() {
|
||||
// Arrange: Use localhost test server
|
||||
let host = "127.0.0.1";
|
||||
let port = 5020; // Test Modbus TCP port
|
||||
let slave_id = 1;
|
||||
let timeout_secs = 5;
|
||||
|
||||
// Act: Attempt to create controller
|
||||
let result = ModbusRelayController::new(host, port, slave_id, timeout_secs).await;
|
||||
let result = ModbusRelayController::new(HOST, PORT, SLAVE_ID, timeout_secs).await;
|
||||
|
||||
// Assert: Connection should succeed
|
||||
assert!(
|
||||
@@ -45,12 +46,10 @@ mod t025a_connection_setup_tests {
|
||||
async fn test_new_with_invalid_host_returns_connection_error() {
|
||||
// Arrange: Use invalid host format
|
||||
let host = "not a valid host!!!";
|
||||
let port = 502;
|
||||
let slave_id = 1;
|
||||
let timeout_secs = 5;
|
||||
|
||||
// Act: Attempt to create controller
|
||||
let result = ModbusRelayController::new(host, port, slave_id, timeout_secs).await;
|
||||
let result = ModbusRelayController::new(host, PORT, SLAVE_ID, timeout_secs).await;
|
||||
|
||||
// Assert: Should return ConnectionError
|
||||
assert!(result.is_err(), "Expected ConnectionError for invalid host");
|
||||
@@ -74,13 +73,11 @@ mod t025a_connection_setup_tests {
|
||||
async fn test_new_with_unreachable_host_returns_connection_error() {
|
||||
// Arrange: Use localhost with a closed port (port 1 is typically closed)
|
||||
// This gives instant "connection refused" instead of waiting for TCP timeout
|
||||
let host = "127.0.0.1";
|
||||
let port = 1; // Closed port for instant connection failure
|
||||
let slave_id = 1;
|
||||
let timeout_secs = 1;
|
||||
|
||||
// Act: Attempt to create controller
|
||||
let result = ModbusRelayController::new(host, port, slave_id, timeout_secs).await;
|
||||
let result = ModbusRelayController::new(HOST, port, SLAVE_ID, timeout_secs).await;
|
||||
|
||||
// Assert: Should return ConnectionError
|
||||
assert!(
|
||||
@@ -100,13 +97,10 @@ mod t025a_connection_setup_tests {
|
||||
#[ignore = "Requires running Modbus TCP server or refactoring to expose timeout"]
|
||||
async fn test_new_stores_correct_timeout_duration() {
|
||||
// Arrange
|
||||
let host = "127.0.0.1";
|
||||
let port = 5020;
|
||||
let slave_id = 1;
|
||||
let timeout_secs = 10;
|
||||
|
||||
// Act
|
||||
let controller = ModbusRelayController::new(host, port, slave_id, timeout_secs)
|
||||
let controller = ModbusRelayController::new(HOST, PORT, SLAVE_ID, timeout_secs)
|
||||
.await
|
||||
.expect("Failed to create controller");
|
||||
|
||||
@@ -137,6 +131,10 @@ mod t025b_read_coils_timeout_tests {
|
||||
types::RelayId,
|
||||
};
|
||||
|
||||
static HOST: &str = "192.168.1.200";
|
||||
static PORT: u16 = 502;
|
||||
static SLAVE_ID: u8 = 1;
|
||||
|
||||
/// T025b Test 1: `read_coils_with_timeout()` returns coil values on success
|
||||
///
|
||||
/// This test verifies that reading coils succeeds when the Modbus server
|
||||
@@ -147,7 +145,7 @@ mod t025b_read_coils_timeout_tests {
|
||||
#[ignore = "Requires running Modbus TCP server with known state"]
|
||||
async fn test_read_coils_returns_coil_values_on_success() {
|
||||
// Arrange: Connect to test server
|
||||
let controller = ModbusRelayController::new("127.0.0.1", 5020, 1, 5)
|
||||
let controller = ModbusRelayController::new(HOST, PORT, SLAVE_ID, 5)
|
||||
.await
|
||||
.expect("Failed to connect to test server");
|
||||
|
||||
@@ -251,6 +249,10 @@ mod t025c_write_single_coil_timeout_tests {
|
||||
types::{RelayId, RelayState},
|
||||
};
|
||||
|
||||
static HOST: &str = "192.168.1.200";
|
||||
static PORT: u16 = 502;
|
||||
static SLAVE_ID: u8 = 1;
|
||||
|
||||
/// T025c Test 1: `write_single_coil_with_timeout()` succeeds for valid write
|
||||
///
|
||||
/// This test verifies that writing to a coil succeeds when the Modbus server
|
||||
@@ -261,7 +263,7 @@ mod t025c_write_single_coil_timeout_tests {
|
||||
#[ignore = "Requires running Modbus TCP server"]
|
||||
async fn test_write_single_coil_succeeds_for_valid_write() {
|
||||
// Arrange: Connect to test server
|
||||
let controller = ModbusRelayController::new("127.0.0.1", 5020, 1, 5)
|
||||
let controller = ModbusRelayController::new(HOST, PORT, SLAVE_ID, 5)
|
||||
.await
|
||||
.expect("Failed to connect to test server");
|
||||
|
||||
@@ -336,6 +338,10 @@ mod t025d_read_relay_state_tests {
|
||||
types::{RelayId, RelayState},
|
||||
};
|
||||
|
||||
static HOST: &str = "192.168.1.200";
|
||||
static PORT: u16 = 502;
|
||||
static SLAVE_ID: u8 = 1;
|
||||
|
||||
/// T025d Test 1: `read_relay_state(RelayId(1))` returns On when coil is true
|
||||
///
|
||||
/// This test verifies that a true coil value is correctly converted to `RelayState::On`.
|
||||
@@ -409,7 +415,7 @@ mod t025d_read_relay_state_tests {
|
||||
#[ignore = "Requires Modbus server with specific relay states"]
|
||||
async fn test_read_state_correctly_maps_relay_id_to_modbus_address() {
|
||||
// Arrange: Connect to test server with known relay states
|
||||
let controller = ModbusRelayController::new("127.0.0.1", 5020, 1, 5)
|
||||
let controller = ModbusRelayController::new(HOST, PORT, SLAVE_ID, 5)
|
||||
.await
|
||||
.expect("Failed to connect to test server");
|
||||
|
||||
@@ -434,6 +440,10 @@ mod t025e_write_relay_state_tests {
|
||||
types::{RelayId, RelayState},
|
||||
};
|
||||
|
||||
static HOST: &str = "192.168.1.200";
|
||||
static PORT: u16 = 502;
|
||||
static SLAVE_ID: u8 = 1;
|
||||
|
||||
/// T025e Test 1: `write_relay_state(RelayId(1)`, `RelayState::On`) writes true to coil
|
||||
///
|
||||
/// This test verifies that `RelayState::On` is correctly converted to a true coil value.
|
||||
@@ -441,7 +451,7 @@ mod t025e_write_relay_state_tests {
|
||||
#[ignore = "Requires Modbus server that can verify written values"]
|
||||
async fn test_write_state_on_writes_true_to_coil() {
|
||||
// Arrange: Connect to test server
|
||||
let controller = ModbusRelayController::new("127.0.0.1", 5020, 1, 5)
|
||||
let controller = ModbusRelayController::new(HOST, PORT, SLAVE_ID, 5)
|
||||
.await
|
||||
.expect("Failed to connect to test server");
|
||||
|
||||
@@ -475,7 +485,7 @@ mod t025e_write_relay_state_tests {
|
||||
#[ignore = "Requires Modbus server that can verify written values"]
|
||||
async fn test_write_state_off_writes_false_to_coil() {
|
||||
// Arrange: Connect to test server
|
||||
let controller = ModbusRelayController::new("127.0.0.1", 5020, 1, 5)
|
||||
let controller = ModbusRelayController::new(HOST, PORT, SLAVE_ID, 5)
|
||||
.await
|
||||
.expect("Failed to connect to test server");
|
||||
|
||||
@@ -509,7 +519,7 @@ mod t025e_write_relay_state_tests {
|
||||
#[ignore = "Requires Modbus server"]
|
||||
async fn test_write_state_correctly_maps_relay_id_to_modbus_address() {
|
||||
// Arrange: Connect to test server
|
||||
let controller = ModbusRelayController::new("127.0.0.1", 5020, 1, 5)
|
||||
let controller = ModbusRelayController::new(HOST, PORT, SLAVE_ID, 5)
|
||||
.await
|
||||
.expect("Failed to connect to test server");
|
||||
|
||||
@@ -537,7 +547,7 @@ mod t025e_write_relay_state_tests {
|
||||
#[ignore = "Requires Modbus server"]
|
||||
async fn test_write_state_can_toggle_relay_multiple_times() {
|
||||
// Arrange: Connect to test server
|
||||
let controller = ModbusRelayController::new("127.0.0.1", 5020, 1, 5)
|
||||
let controller = ModbusRelayController::new(HOST, PORT, SLAVE_ID, 5)
|
||||
.await
|
||||
.expect("Failed to connect to test server");
|
||||
|
||||
@@ -571,12 +581,16 @@ mod t025e_write_relay_state_tests {
|
||||
mod write_all_states_validation_tests {
|
||||
use super::*;
|
||||
|
||||
static HOST: &str = "192.168.1.200";
|
||||
static PORT: u16 = 502;
|
||||
static SLAVE_ID: u8 = 1;
|
||||
|
||||
/// Test: `write_all_states()` returns `InvalidInput` when given 0 states
|
||||
#[tokio::test]
|
||||
#[ignore = "Requires Modbus server"]
|
||||
async fn test_write_all_states_with_empty_vector_returns_invalid_input() {
|
||||
// Arrange: Connect to test server
|
||||
let controller = ModbusRelayController::new("127.0.0.1", 5020, 1, 5)
|
||||
let controller = ModbusRelayController::new(HOST, PORT, SLAVE_ID, 5)
|
||||
.await
|
||||
.expect("Failed to connect to test server");
|
||||
|
||||
@@ -596,7 +610,7 @@ mod write_all_states_validation_tests {
|
||||
#[ignore = "Requires Modbus server"]
|
||||
async fn test_write_all_states_with_7_states_returns_invalid_input() {
|
||||
// Arrange: Connect to test server
|
||||
let controller = ModbusRelayController::new("127.0.0.1", 5020, 1, 5)
|
||||
let controller = ModbusRelayController::new(HOST, PORT, SLAVE_ID, 5)
|
||||
.await
|
||||
.expect("Failed to connect to test server");
|
||||
|
||||
@@ -626,7 +640,7 @@ mod write_all_states_validation_tests {
|
||||
#[ignore = "Requires Modbus server"]
|
||||
async fn test_write_all_states_with_9_states_returns_invalid_input() {
|
||||
// Arrange: Connect to test server
|
||||
let controller = ModbusRelayController::new("127.0.0.1", 5020, 1, 5)
|
||||
let controller = ModbusRelayController::new(HOST, PORT, SLAVE_ID, 5)
|
||||
.await
|
||||
.expect("Failed to connect to test server");
|
||||
|
||||
@@ -656,7 +670,7 @@ mod write_all_states_validation_tests {
|
||||
#[ignore = "Requires Modbus server"]
|
||||
async fn test_write_all_states_with_8_states_succeeds() {
|
||||
// Arrange: Connect to test server
|
||||
let controller = ModbusRelayController::new("127.0.0.1", 5020, 1, 5)
|
||||
let controller = ModbusRelayController::new(HOST, PORT, SLAVE_ID, 5)
|
||||
.await
|
||||
.expect("Failed to connect to test server");
|
||||
|
||||
|
||||
Reference in New Issue
Block a user