re-uploading work
This commit is contained in:
52
crates/core-timer-sensor/Cargo.toml
Normal file
52
crates/core-timer-sensor/Cargo.toml
Normal file
@@ -0,0 +1,52 @@
|
||||
[package]
|
||||
name = "core-timer-sensor"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["Attune Contributors"]
|
||||
description = "Standalone timer sensor runtime for Attune core pack"
|
||||
|
||||
[[bin]]
|
||||
name = "attune-core-timer-sensor"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
# Async runtime
|
||||
tokio = { version = "1.41", features = ["full"] }
|
||||
async-trait = "0.1"
|
||||
|
||||
# Serialization
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.12", features = ["json"] }
|
||||
|
||||
# Message queue
|
||||
lapin = "2.3"
|
||||
futures = "0.3"
|
||||
|
||||
# Logging
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] }
|
||||
|
||||
# Error handling
|
||||
anyhow = "1.0"
|
||||
thiserror = "1.0"
|
||||
|
||||
# Time handling
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
|
||||
# Cron scheduling
|
||||
tokio-cron-scheduler = "0.15"
|
||||
|
||||
# CLI
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
|
||||
# Utilities
|
||||
uuid = { version = "1.11", features = ["v4", "serde"] }
|
||||
urlencoding = "2.1"
|
||||
base64 = "0.21"
|
||||
|
||||
[dev-dependencies]
|
||||
mockall = "0.13"
|
||||
tempfile = "3.13"
|
||||
403
crates/core-timer-sensor/README.md
Normal file
403
crates/core-timer-sensor/README.md
Normal file
@@ -0,0 +1,403 @@
|
||||
# Attune Timer Sensor
|
||||
|
||||
A standalone sensor daemon for the Attune automation platform that monitors timer-based triggers and emits events. This sensor manages multiple concurrent timer schedules based on active rules.
|
||||
|
||||
## Overview
|
||||
|
||||
The timer sensor is a lightweight, event-driven process that:
|
||||
|
||||
- Listens for rule lifecycle events via RabbitMQ
|
||||
- Manages per-rule timer tasks dynamically
|
||||
- Emits events to the Attune API when timers fire
|
||||
- Supports interval-based, cron-based, and datetime-based timers
|
||||
- Authenticates using service account tokens
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Timer Sensor Process │
|
||||
│ │
|
||||
│ ┌────────────────┐ ┌──────────────────┐ │
|
||||
│ │ Rule Lifecycle │───▶│ Timer Manager │ │
|
||||
│ │ Listener │ │ │ │
|
||||
│ │ (RabbitMQ) │ │ ┌──────────────┐ │ │
|
||||
│ └────────────────┘ │ │ Rule 1 Timer │ │ │
|
||||
│ │ ├──────────────┤ │ │
|
||||
│ │ │ Rule 2 Timer │ │───┐ │
|
||||
│ │ ├──────────────┤ │ │ │
|
||||
│ │ │ Rule 3 Timer │ │ │ │
|
||||
│ │ └──────────────┘ │ │ │
|
||||
│ └──────────────────┘ │ │
|
||||
│ │ │
|
||||
│ ┌────────────────┐ │ │
|
||||
│ │ API Client │◀──────────────────────────┘ │
|
||||
│ │ (Create Events)│ │
|
||||
│ └────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│ ▲
|
||||
│ Events │ Rule Lifecycle
|
||||
▼ │ Messages
|
||||
┌─────────────────┐ ┌─────────────────┐
|
||||
│ Attune API │ │ RabbitMQ │
|
||||
└─────────────────┘ └─────────────────┘
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- **Per-Rule Timers**: Each rule gets its own independent timer task
|
||||
- **Dynamic Management**: Timers start/stop automatically based on rule lifecycle
|
||||
- **Multiple Timer Types**:
|
||||
- **Interval**: Fire every N seconds/minutes/hours/days
|
||||
- **Cron**: Fire based on cron expression (planned)
|
||||
- **DateTime**: Fire at a specific date/time
|
||||
- **Resilient**: Retries event creation with exponential backoff
|
||||
- **Secure**: Token-based authentication with trigger type restrictions
|
||||
- **Observable**: Structured JSON logging for monitoring
|
||||
|
||||
## Installation
|
||||
|
||||
### From Source
|
||||
|
||||
```bash
|
||||
cargo build --release --package core-timer-sensor
|
||||
sudo cp target/release/attune-core-timer-sensor /usr/local/bin/
|
||||
```
|
||||
|
||||
### Using Cargo Install
|
||||
|
||||
```bash
|
||||
cargo install --path crates/core-timer-sensor
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
The sensor requires the following environment variables:
|
||||
|
||||
| Variable | Required | Description | Example |
|
||||
|----------|----------|-------------|---------|
|
||||
| `ATTUNE_API_URL` | Yes | Base URL of the Attune API | `http://localhost:8080` |
|
||||
| `ATTUNE_API_TOKEN` | Yes | Service account token | `eyJhbGci...` |
|
||||
| `ATTUNE_SENSOR_REF` | Yes | Sensor reference (must be `core.timer`) | `core.timer` |
|
||||
| `ATTUNE_MQ_URL` | Yes | RabbitMQ connection URL | `amqp://localhost:5672` |
|
||||
| `ATTUNE_MQ_EXCHANGE` | No | RabbitMQ exchange name | `attune` (default) |
|
||||
| `ATTUNE_LOG_LEVEL` | No | Logging verbosity | `info` (default) |
|
||||
|
||||
### Example: Environment Variables
|
||||
|
||||
```bash
|
||||
export ATTUNE_API_URL="http://localhost:8080"
|
||||
export ATTUNE_API_TOKEN="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..."
|
||||
export ATTUNE_SENSOR_REF="core.timer"
|
||||
export ATTUNE_MQ_URL="amqp://localhost:5672"
|
||||
export ATTUNE_LOG_LEVEL="info"
|
||||
|
||||
attune-core-timer-sensor
|
||||
```
|
||||
|
||||
### Example: stdin Configuration
|
||||
|
||||
```bash
|
||||
echo '{
|
||||
"api_url": "http://localhost:8080",
|
||||
"api_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...",
|
||||
"sensor_ref": "core.timer",
|
||||
"mq_url": "amqp://localhost:5672",
|
||||
"mq_exchange": "attune",
|
||||
"log_level": "info"
|
||||
}' | attune-core-timer-sensor --stdin-config
|
||||
```
|
||||
|
||||
## Service Account Setup
|
||||
|
||||
Before running the sensor, you need to create a service account with the appropriate permissions:
|
||||
|
||||
```bash
|
||||
# Create service account (requires admin token)
|
||||
curl -X POST http://localhost:8080/service-accounts \
|
||||
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "sensor:core.timer",
|
||||
"scope": "sensor",
|
||||
"description": "Timer sensor for interval-based triggers",
|
||||
"ttl_hours": 72,
|
||||
"metadata": {
|
||||
"trigger_types": ["core.timer"]
|
||||
}
|
||||
}'
|
||||
|
||||
# Response will include the token (save this - it's only shown once!)
|
||||
{
|
||||
"identity_id": 123,
|
||||
"name": "sensor:core.timer",
|
||||
"scope": "sensor",
|
||||
"token": "eyJhbGci...", # Use this as ATTUNE_API_TOKEN
|
||||
"expires_at": "2025-01-30T12:34:56Z" # 72 hours from now
|
||||
}
|
||||
```
|
||||
|
||||
**Important**:
|
||||
- The token is only displayed once. Store it securely!
|
||||
- Sensor tokens expire after 24-72 hours and must be rotated
|
||||
- Plan to rotate the token before expiration (set up monitoring/alerts)
|
||||
|
||||
## Timer Configuration
|
||||
|
||||
Rules using the `core.timer` trigger must provide configuration in `trigger_params`:
|
||||
|
||||
### Interval Timer
|
||||
|
||||
Fires every N units of time:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "interval",
|
||||
"interval": 30,
|
||||
"unit": "seconds" // "seconds", "minutes", "hours", "days"
|
||||
}
|
||||
```
|
||||
|
||||
Examples:
|
||||
- Every 5 seconds: `{"type": "interval", "interval": 5, "unit": "seconds"}`
|
||||
- Every 10 minutes: `{"type": "interval", "interval": 10, "unit": "minutes"}`
|
||||
- Every 1 hour: `{"type": "interval", "interval": 1, "unit": "hours"}`
|
||||
- Every 1 day: `{"type": "interval", "interval": 1, "unit": "days"}`
|
||||
|
||||
### DateTime Timer
|
||||
|
||||
Fires at a specific date/time (one-time):
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "date_time",
|
||||
"fire_at": "2025-01-27T15:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Cron Timer (Planned)
|
||||
|
||||
Fires based on cron expression:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "cron",
|
||||
"expression": "0 0 * * *" // Daily at midnight
|
||||
}
|
||||
```
|
||||
|
||||
**Note**: Cron timers are not yet implemented.
|
||||
|
||||
## Running the Sensor
|
||||
|
||||
### Development
|
||||
|
||||
```bash
|
||||
# Terminal 1: Start dependencies
|
||||
docker-compose up -d postgres rabbitmq
|
||||
|
||||
# Terminal 2: Start API
|
||||
cd crates/api
|
||||
cargo run
|
||||
|
||||
# Terminal 3: Start sensor
|
||||
export ATTUNE_API_URL="http://localhost:8080"
|
||||
export ATTUNE_API_TOKEN="your_sensor_token_here"
|
||||
export ATTUNE_SENSOR_REF="core.timer"
|
||||
export ATTUNE_MQ_URL="amqp://localhost:5672"
|
||||
|
||||
cargo run --package core-timer-sensor
|
||||
```
|
||||
|
||||
### Production (systemd)
|
||||
|
||||
Create a systemd service file at `/etc/systemd/system/attune-core-timer-sensor.service`:
|
||||
|
||||
```ini
|
||||
[Unit]
|
||||
Description=Attune Timer Sensor
|
||||
After=network.target rabbitmq-server.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=attune
|
||||
WorkingDirectory=/opt/attune
|
||||
ExecStart=/usr/local/bin/attune-core-timer-sensor
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
# Environment variables
|
||||
Environment="ATTUNE_API_URL=https://attune.example.com"
|
||||
Environment="ATTUNE_SENSOR_REF=core.timer"
|
||||
Environment="ATTUNE_MQ_URL=amqps://rabbitmq.example.com:5671"
|
||||
Environment="ATTUNE_LOG_LEVEL=info"
|
||||
|
||||
# Load token from file
|
||||
EnvironmentFile=/etc/attune/sensor-timer.env
|
||||
|
||||
# Security
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Create `/etc/attune/sensor-timer.env`:
|
||||
|
||||
```bash
|
||||
ATTUNE_API_TOKEN=eyJhbGci...
|
||||
```
|
||||
|
||||
Enable and start:
|
||||
|
||||
```bash
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable attune-core-timer-sensor
|
||||
sudo systemctl start attune-core-timer-sensor
|
||||
sudo systemctl status attune-core-timer-sensor
|
||||
```
|
||||
|
||||
**Token Rotation:**
|
||||
|
||||
Sensor tokens expire after 24-72 hours. To rotate:
|
||||
|
||||
```bash
|
||||
# 1. Create new service account token (via API)
|
||||
# 2. Update /etc/attune/sensor-timer.env with new token
|
||||
sudo nano /etc/attune/sensor-timer.env
|
||||
|
||||
# 3. Restart sensor
|
||||
sudo systemctl restart attune-core-timer-sensor
|
||||
```
|
||||
|
||||
Set up a cron job or monitoring alert to remind you to rotate tokens every 72 hours.
|
||||
|
||||
View logs:
|
||||
|
||||
```bash
|
||||
sudo journalctl -u attune-core-timer-sensor -f
|
||||
```
|
||||
|
||||
## Monitoring
|
||||
|
||||
### Logs
|
||||
|
||||
The sensor outputs structured JSON logs:
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2025-01-27T12:34:56Z",
|
||||
"level": "info",
|
||||
"message": "Timer fired for rule 123, created event 456",
|
||||
"rule_id": 123,
|
||||
"event_id": 456
|
||||
}
|
||||
```
|
||||
|
||||
### Health Checks
|
||||
|
||||
The sensor verifies API connectivity on startup. Monitor the logs for:
|
||||
|
||||
- `"API connectivity verified"` - Sensor connected successfully
|
||||
- `"Timer started for rule"` - Timer activated for a rule
|
||||
- `"Timer fired for rule"` - Event created by timer
|
||||
- `"Failed to create event"` - Event creation error (check token/permissions)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Invalid sensor_ref: expected 'core.timer'"
|
||||
|
||||
The `ATTUNE_SENSOR_REF` must be exactly `core.timer`. This sensor only handles timer triggers.
|
||||
|
||||
### "Failed to connect to Attune API"
|
||||
|
||||
- Verify `ATTUNE_API_URL` is correct and reachable
|
||||
- Check that the API service is running
|
||||
- Ensure no firewall blocking the connection
|
||||
|
||||
### "Insufficient permissions to create event for trigger type 'core.timer'"
|
||||
|
||||
The service account token doesn't have permission to create timer events. Ensure the token's metadata includes `"trigger_types": ["core.timer"]`.
|
||||
|
||||
### "Failed to connect to RabbitMQ"
|
||||
|
||||
- Verify `ATTUNE_MQ_URL` is correct
|
||||
- Check that RabbitMQ is running
|
||||
- Ensure credentials are correct in the URL
|
||||
|
||||
### "Token expired"
|
||||
|
||||
The service account token has exceeded its TTL (24-72 hours). This is expected behavior.
|
||||
|
||||
**Solution:**
|
||||
1. Create a new service account token via API
|
||||
2. Update `ATTUNE_API_TOKEN` environment variable
|
||||
3. Restart the sensor
|
||||
|
||||
**Prevention:**
|
||||
- Set up monitoring to alert 6 hours before token expiration
|
||||
- Plan regular token rotation (every 72 hours maximum)
|
||||
|
||||
### Timer not firing
|
||||
|
||||
1. Check that the rule is enabled
|
||||
2. Verify the rule's `trigger_type` is `core.timer`
|
||||
3. Check the sensor logs for "Timer started for rule"
|
||||
4. Ensure `trigger_params` is valid JSON matching the timer config format
|
||||
|
||||
## Development
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
cargo test --package core-timer-sensor
|
||||
```
|
||||
|
||||
### Building
|
||||
|
||||
```bash
|
||||
# Debug build
|
||||
cargo build --package core-timer-sensor
|
||||
|
||||
# Release build
|
||||
cargo build --release --package core-timer-sensor
|
||||
```
|
||||
|
||||
### Code Structure
|
||||
|
||||
```
|
||||
crates/core-timer-sensor/
|
||||
├── src/
|
||||
│ ├── main.rs # Entry point, initialization
|
||||
│ ├── config.rs # Configuration loading (env/stdin)
|
||||
│ ├── api_client.rs # Attune API communication
|
||||
│ ├── timer_manager.rs # Per-rule timer task management
|
||||
│ ├── rule_listener.rs # RabbitMQ message consumer
|
||||
│ └── types.rs # Shared types and enums
|
||||
├── Cargo.toml
|
||||
└── README.md
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
When adding new timer types:
|
||||
|
||||
1. Add variant to `TimerConfig` enum in `types.rs`
|
||||
2. Implement spawn logic in `timer_manager.rs`
|
||||
3. Add tests for the new timer type
|
||||
4. Update this README with examples
|
||||
|
||||
## License
|
||||
|
||||
MIT License - see LICENSE file for details.
|
||||
|
||||
## See Also
|
||||
|
||||
- [Sensor Interface Specification](../../docs/sensor-interface.md)
|
||||
- [Service Accounts Documentation](../../docs/service-accounts.md)
|
||||
- [Sensor Authentication Overview](../../docs/sensor-authentication-overview.md)
|
||||
381
crates/core-timer-sensor/src/api_client.rs
Normal file
381
crates/core-timer-sensor/src/api_client.rs
Normal file
@@ -0,0 +1,381 @@
|
||||
//! API Client for Attune Platform
|
||||
//!
|
||||
//! Provides methods for interacting with the Attune API, including:
|
||||
//! - Health checks
|
||||
//! - Event creation
|
||||
//! - Rule fetching
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use reqwest::{Client, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// API client for communicating with Attune
|
||||
#[derive(Clone)]
|
||||
pub struct ApiClient {
|
||||
inner: Arc<ApiClientInner>,
|
||||
}
|
||||
|
||||
struct ApiClientInner {
|
||||
base_url: String,
|
||||
token: RwLock<String>,
|
||||
client: Client,
|
||||
}
|
||||
|
||||
/// Request to create an event
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct CreateEventRequest {
|
||||
pub trigger_ref: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub payload: Option<serde_json::Value>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub config: Option<serde_json::Value>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub trigger_instance_id: Option<String>,
|
||||
}
|
||||
|
||||
/// Response from creating an event
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct CreateEventResponse {
|
||||
pub data: EventData,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct EventData {
|
||||
pub id: i64,
|
||||
}
|
||||
|
||||
/// Response wrapper for API responses
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ApiResponse<T> {
|
||||
pub data: T,
|
||||
}
|
||||
|
||||
/// Rule information from API
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct Rule {
|
||||
pub id: i64,
|
||||
pub trigger_params: serde_json::Value,
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
/// Response from token refresh
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct RefreshTokenResponse {
|
||||
pub token: String,
|
||||
pub expires_at: String,
|
||||
}
|
||||
|
||||
impl ApiClient {
|
||||
/// Create a new API client
|
||||
pub fn new(base_url: String, token: String) -> Self {
|
||||
// Remove trailing slash from base URL if present
|
||||
let base_url = base_url.trim_end_matches('/').to_string();
|
||||
|
||||
let client = Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(30))
|
||||
.build()
|
||||
.expect("Failed to build HTTP client");
|
||||
|
||||
Self {
|
||||
inner: Arc::new(ApiClientInner {
|
||||
base_url,
|
||||
token: RwLock::new(token),
|
||||
client,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current token (for reading)
|
||||
pub async fn get_token(&self) -> String {
|
||||
self.inner.token.read().await.clone()
|
||||
}
|
||||
|
||||
/// Update the token (for refresh)
|
||||
async fn set_token(&self, new_token: String) {
|
||||
let mut token = self.inner.token.write().await;
|
||||
*token = new_token;
|
||||
}
|
||||
|
||||
/// Perform health check
|
||||
pub async fn health_check(&self) -> Result<()> {
|
||||
let url = format!("{}/health", self.inner.base_url);
|
||||
|
||||
debug!("Health check: GET {}", url);
|
||||
|
||||
let response = self
|
||||
.inner
|
||||
.client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to send health check request")?;
|
||||
|
||||
if response.status().is_success() {
|
||||
info!("Health check succeeded");
|
||||
Ok(())
|
||||
} else {
|
||||
let status = response.status();
|
||||
let body = response
|
||||
.text()
|
||||
.await
|
||||
.unwrap_or_else(|_| "<unable to read response>".to_string());
|
||||
error!("Health check failed: {} - {}", status, body);
|
||||
Err(anyhow::anyhow!("Health check failed: {}", status))
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an event
|
||||
pub async fn create_event(&self, request: CreateEventRequest) -> Result<i64> {
|
||||
let url = format!("{}/api/v1/events", self.inner.base_url);
|
||||
|
||||
debug!(
|
||||
"Creating event: POST {} (trigger_ref={})",
|
||||
url, request.trigger_ref
|
||||
);
|
||||
|
||||
let token = self.get_token().await;
|
||||
let response = self
|
||||
.inner
|
||||
.client
|
||||
.post(&url)
|
||||
.header("Authorization", format!("Bearer {}", token))
|
||||
.header("Content-Type", "application/json")
|
||||
.json(&request)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to send create event request")?;
|
||||
|
||||
let status = response.status();
|
||||
|
||||
if status.is_success() {
|
||||
let event_response: CreateEventResponse = response
|
||||
.json()
|
||||
.await
|
||||
.context("Failed to parse create event response")?;
|
||||
|
||||
info!(
|
||||
"Event created successfully: id={}, trigger_ref={}",
|
||||
event_response.data.id, request.trigger_ref
|
||||
);
|
||||
|
||||
Ok(event_response.data.id)
|
||||
} else {
|
||||
let body = response
|
||||
.text()
|
||||
.await
|
||||
.unwrap_or_else(|_| "<unable to read response>".to_string());
|
||||
|
||||
error!("Failed to create event: {} - {}", status, body);
|
||||
|
||||
// Special handling for 403 Forbidden (trigger type not allowed)
|
||||
if status == StatusCode::FORBIDDEN {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Insufficient permissions to create event for trigger ref '{}'. \
|
||||
This sensor token may not be authorized for this trigger type.",
|
||||
request.trigger_ref
|
||||
));
|
||||
}
|
||||
|
||||
Err(anyhow::anyhow!(
|
||||
"Failed to create event: {} - {}",
|
||||
status,
|
||||
body
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetch active rules for a specific trigger reference
|
||||
pub async fn fetch_rules(&self, trigger_ref: &str) -> Result<Vec<Rule>> {
|
||||
let url = format!(
|
||||
"{}/api/v1/triggers/{}/rules",
|
||||
self.inner.base_url,
|
||||
urlencoding::encode(trigger_ref)
|
||||
);
|
||||
|
||||
debug!("Fetching rules: GET {}", url);
|
||||
|
||||
let token = self.get_token().await;
|
||||
let response = self
|
||||
.inner
|
||||
.client
|
||||
.get(&url)
|
||||
.header("Authorization", format!("Bearer {}", token))
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to send fetch rules request")?;
|
||||
|
||||
let status = response.status();
|
||||
|
||||
if status.is_success() {
|
||||
let api_response: ApiResponse<Vec<Rule>> = response
|
||||
.json()
|
||||
.await
|
||||
.context("Failed to parse fetch rules response")?;
|
||||
|
||||
info!(
|
||||
"Fetched {} rules for trigger ref {}",
|
||||
api_response.data.len(),
|
||||
trigger_ref
|
||||
);
|
||||
|
||||
Ok(api_response.data)
|
||||
} else {
|
||||
let body = response
|
||||
.text()
|
||||
.await
|
||||
.unwrap_or_else(|_| "<unable to read response>".to_string());
|
||||
|
||||
warn!("Failed to fetch rules: {} - {}", status, body);
|
||||
|
||||
Err(anyhow::anyhow!(
|
||||
"Failed to fetch rules: {} - {}",
|
||||
status,
|
||||
body
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Create event with retry logic
|
||||
pub async fn create_event_with_retry(&self, request: CreateEventRequest) -> Result<i64> {
|
||||
const MAX_RETRIES: u32 = 3;
|
||||
const INITIAL_BACKOFF_MS: u64 = 100;
|
||||
|
||||
let mut attempt = 0;
|
||||
let mut last_error = None;
|
||||
|
||||
while attempt < MAX_RETRIES {
|
||||
match self.create_event(request.clone()).await {
|
||||
Ok(event_id) => return Ok(event_id),
|
||||
Err(e) => {
|
||||
// Don't retry on 403 Forbidden (authorization error)
|
||||
if e.to_string().contains("Insufficient permissions") {
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
attempt += 1;
|
||||
last_error = Some(e);
|
||||
|
||||
if attempt < MAX_RETRIES {
|
||||
let backoff_ms = INITIAL_BACKOFF_MS * 2u64.pow(attempt - 1);
|
||||
warn!(
|
||||
"Event creation failed (attempt {}/{}), retrying in {}ms",
|
||||
attempt, MAX_RETRIES, backoff_ms
|
||||
);
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(backoff_ms)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(last_error.unwrap_or_else(|| anyhow::anyhow!("Event creation failed after retries")))
|
||||
}
|
||||
|
||||
/// Refresh the current token
|
||||
pub async fn refresh_token(&self) -> Result<String> {
|
||||
let url = format!("{}/api/v1/auth/refresh", self.inner.base_url);
|
||||
|
||||
debug!("Refreshing token: POST {}", url);
|
||||
|
||||
let current_token = self.get_token().await;
|
||||
let response = self
|
||||
.inner
|
||||
.client
|
||||
.post(&url)
|
||||
.header("Authorization", format!("Bearer {}", current_token))
|
||||
.header("Content-Type", "application/json")
|
||||
.json(&serde_json::json!({}))
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to send token refresh request")?;
|
||||
|
||||
let status = response.status();
|
||||
|
||||
if status.is_success() {
|
||||
let refresh_response: RefreshTokenResponse = response
|
||||
.json()
|
||||
.await
|
||||
.context("Failed to parse token refresh response")?;
|
||||
|
||||
info!(
|
||||
"Token refreshed successfully, expires at: {}",
|
||||
refresh_response.expires_at
|
||||
);
|
||||
|
||||
// Update stored token
|
||||
self.set_token(refresh_response.token.clone()).await;
|
||||
|
||||
Ok(refresh_response.token)
|
||||
} else {
|
||||
let body = response
|
||||
.text()
|
||||
.await
|
||||
.unwrap_or_else(|_| "<unable to read response>".to_string());
|
||||
|
||||
error!("Failed to refresh token: {} - {}", status, body);
|
||||
|
||||
Err(anyhow::anyhow!(
|
||||
"Failed to refresh token: {} - {}",
|
||||
status,
|
||||
body
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CreateEventRequest {
|
||||
/// Create a new event request
|
||||
pub fn new(trigger_ref: String, payload: serde_json::Value) -> Self {
|
||||
Self {
|
||||
trigger_ref,
|
||||
payload: Some(payload),
|
||||
config: None,
|
||||
trigger_instance_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set trigger instance ID (typically rule_id)
|
||||
pub fn with_trigger_instance_id(mut self, id: String) -> Self {
|
||||
self.trigger_instance_id = Some(id);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_create_event_request() {
|
||||
let payload = serde_json::json!({
|
||||
"timestamp": "2025-01-27T12:34:56Z",
|
||||
"scheduled_time": "2025-01-27T12:34:56Z"
|
||||
});
|
||||
|
||||
let request = CreateEventRequest::new("core.timer".to_string(), payload.clone());
|
||||
|
||||
assert_eq!(request.trigger_ref, "core.timer");
|
||||
assert_eq!(request.payload, Some(payload));
|
||||
assert!(request.trigger_instance_id.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_event_request_with_instance_id() {
|
||||
let payload = serde_json::json!({
|
||||
"timestamp": "2025-01-27T12:34:56Z"
|
||||
});
|
||||
|
||||
let request = CreateEventRequest::new("core.timer".to_string(), payload)
|
||||
.with_trigger_instance_id("rule_123".to_string());
|
||||
|
||||
assert_eq!(request.trigger_instance_id, Some("rule_123".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_base_url_trailing_slash_removed() {
|
||||
let client = ApiClient::new("http://localhost:8080/".to_string(), "token".to_string());
|
||||
assert_eq!(client.inner.base_url, "http://localhost:8080");
|
||||
}
|
||||
}
|
||||
200
crates/core-timer-sensor/src/config.rs
Normal file
200
crates/core-timer-sensor/src/config.rs
Normal file
@@ -0,0 +1,200 @@
|
||||
//! Configuration module for timer sensor
|
||||
//!
|
||||
//! Supports loading configuration from environment variables or stdin JSON.
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::io::Read;
|
||||
|
||||
/// Sensor configuration
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SensorConfig {
|
||||
/// Base URL of the Attune API
|
||||
pub api_url: String,
|
||||
|
||||
/// API token for authentication
|
||||
pub api_token: String,
|
||||
|
||||
/// Sensor reference name (e.g., "core.timer")
|
||||
pub sensor_ref: String,
|
||||
|
||||
/// RabbitMQ connection URL
|
||||
pub mq_url: String,
|
||||
|
||||
/// RabbitMQ exchange name (default: "attune")
|
||||
#[serde(default = "default_exchange")]
|
||||
pub mq_exchange: String,
|
||||
|
||||
/// Log level (default: "info")
|
||||
#[serde(default = "default_log_level")]
|
||||
pub log_level: String,
|
||||
}
|
||||
|
||||
fn default_exchange() -> String {
|
||||
"attune".to_string()
|
||||
}
|
||||
|
||||
fn default_log_level() -> String {
|
||||
"info".to_string()
|
||||
}
|
||||
|
||||
impl SensorConfig {
|
||||
/// Load configuration from environment variables
|
||||
pub fn from_env() -> Result<Self> {
|
||||
let api_url = std::env::var("ATTUNE_API_URL")
|
||||
.context("ATTUNE_API_URL environment variable is required")?;
|
||||
|
||||
let api_token = std::env::var("ATTUNE_API_TOKEN")
|
||||
.context("ATTUNE_API_TOKEN environment variable is required")?;
|
||||
|
||||
let sensor_ref = std::env::var("ATTUNE_SENSOR_REF")
|
||||
.context("ATTUNE_SENSOR_REF environment variable is required")?;
|
||||
|
||||
let mq_url = std::env::var("ATTUNE_MQ_URL")
|
||||
.context("ATTUNE_MQ_URL environment variable is required")?;
|
||||
|
||||
let mq_exchange =
|
||||
std::env::var("ATTUNE_MQ_EXCHANGE").unwrap_or_else(|_| default_exchange());
|
||||
|
||||
let log_level = std::env::var("ATTUNE_LOG_LEVEL").unwrap_or_else(|_| default_log_level());
|
||||
|
||||
Ok(Self {
|
||||
api_url,
|
||||
api_token,
|
||||
sensor_ref,
|
||||
mq_url,
|
||||
mq_exchange,
|
||||
log_level,
|
||||
})
|
||||
}
|
||||
|
||||
/// Load configuration from stdin JSON
|
||||
pub async fn from_stdin() -> Result<Self> {
|
||||
let mut buffer = String::new();
|
||||
std::io::stdin()
|
||||
.read_to_string(&mut buffer)
|
||||
.context("Failed to read configuration from stdin")?;
|
||||
|
||||
serde_json::from_str(&buffer).context("Failed to parse JSON configuration from stdin")
|
||||
}
|
||||
|
||||
/// Validate configuration
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
if self.api_url.is_empty() {
|
||||
return Err(anyhow::anyhow!("api_url cannot be empty"));
|
||||
}
|
||||
|
||||
if self.api_token.is_empty() {
|
||||
return Err(anyhow::anyhow!("api_token cannot be empty"));
|
||||
}
|
||||
|
||||
if self.sensor_ref.is_empty() {
|
||||
return Err(anyhow::anyhow!("sensor_ref cannot be empty"));
|
||||
}
|
||||
|
||||
if self.mq_url.is_empty() {
|
||||
return Err(anyhow::anyhow!("mq_url cannot be empty"));
|
||||
}
|
||||
|
||||
if self.mq_exchange.is_empty() {
|
||||
return Err(anyhow::anyhow!("mq_exchange cannot be empty"));
|
||||
}
|
||||
|
||||
// Validate API URL format
|
||||
if !self.api_url.starts_with("http://") && !self.api_url.starts_with("https://") {
|
||||
return Err(anyhow::anyhow!(
|
||||
"api_url must start with http:// or https://"
|
||||
));
|
||||
}
|
||||
|
||||
// Validate MQ URL format
|
||||
if !self.mq_url.starts_with("amqp://") && !self.mq_url.starts_with("amqps://") {
|
||||
return Err(anyhow::anyhow!(
|
||||
"mq_url must start with amqp:// or amqps://"
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_config_validation() {
|
||||
let config = SensorConfig {
|
||||
api_url: "http://localhost:8080".to_string(),
|
||||
api_token: "test_token".to_string(),
|
||||
sensor_ref: "core.timer".to_string(),
|
||||
mq_url: "amqp://localhost:5672".to_string(),
|
||||
mq_exchange: "attune".to_string(),
|
||||
log_level: "info".to_string(),
|
||||
};
|
||||
|
||||
assert!(config.validate().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_validation_invalid_api_url() {
|
||||
let config = SensorConfig {
|
||||
api_url: "localhost:8080".to_string(), // Missing http://
|
||||
api_token: "test_token".to_string(),
|
||||
sensor_ref: "core.timer".to_string(),
|
||||
mq_url: "amqp://localhost:5672".to_string(),
|
||||
mq_exchange: "attune".to_string(),
|
||||
log_level: "info".to_string(),
|
||||
};
|
||||
|
||||
assert!(config.validate().is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_validation_invalid_mq_url() {
|
||||
let config = SensorConfig {
|
||||
api_url: "http://localhost:8080".to_string(),
|
||||
api_token: "test_token".to_string(),
|
||||
sensor_ref: "core.timer".to_string(),
|
||||
mq_url: "localhost:5672".to_string(), // Missing amqp://
|
||||
mq_exchange: "attune".to_string(),
|
||||
log_level: "info".to_string(),
|
||||
};
|
||||
|
||||
assert!(config.validate().is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_deserialization() {
|
||||
let json = r#"{
|
||||
"api_url": "http://localhost:8080",
|
||||
"api_token": "test_token",
|
||||
"sensor_ref": "core.timer",
|
||||
"mq_url": "amqp://localhost:5672"
|
||||
}"#;
|
||||
|
||||
let config: SensorConfig = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(config.api_url, "http://localhost:8080");
|
||||
assert_eq!(config.api_token, "test_token");
|
||||
assert_eq!(config.sensor_ref, "core.timer");
|
||||
assert_eq!(config.mq_url, "amqp://localhost:5672");
|
||||
assert_eq!(config.mq_exchange, "attune"); // Default
|
||||
assert_eq!(config.log_level, "info"); // Default
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_deserialization_with_optionals() {
|
||||
let json = r#"{
|
||||
"api_url": "http://localhost:8080",
|
||||
"api_token": "test_token",
|
||||
"sensor_ref": "core.timer",
|
||||
"mq_url": "amqp://localhost:5672",
|
||||
"mq_exchange": "custom",
|
||||
"log_level": "debug"
|
||||
}"#;
|
||||
|
||||
let config: SensorConfig = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(config.mq_exchange, "custom");
|
||||
assert_eq!(config.log_level, "debug");
|
||||
}
|
||||
}
|
||||
145
crates/core-timer-sensor/src/main.rs
Normal file
145
crates/core-timer-sensor/src/main.rs
Normal file
@@ -0,0 +1,145 @@
|
||||
//! Attune Timer Sensor
|
||||
//!
|
||||
//! A standalone sensor daemon that monitors timer-based triggers and emits events
|
||||
//! to the Attune platform. Each timer sensor instance manages multiple timer schedules
|
||||
//! based on active rules.
|
||||
//!
|
||||
//! Configuration is provided via environment variables or stdin JSON:
|
||||
//! - ATTUNE_API_URL: Base URL of the Attune API
|
||||
//! - ATTUNE_API_TOKEN: Service account token for authentication
|
||||
//! - ATTUNE_SENSOR_REF: Reference name for this sensor (e.g., "core.timer")
|
||||
//! - ATTUNE_MQ_URL: RabbitMQ connection URL
|
||||
//! - ATTUNE_MQ_EXCHANGE: RabbitMQ exchange name (default: "attune")
|
||||
//! - ATTUNE_LOG_LEVEL: Logging verbosity (default: "info")
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use clap::Parser;
|
||||
use tracing::{error, info};
|
||||
|
||||
mod api_client;
|
||||
mod config;
|
||||
mod rule_listener;
|
||||
mod timer_manager;
|
||||
mod token_refresh;
|
||||
mod types;
|
||||
|
||||
use config::SensorConfig;
|
||||
use rule_listener::RuleLifecycleListener;
|
||||
use timer_manager::TimerManager;
|
||||
use token_refresh::TokenRefreshManager;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = "attune-core-timer-sensor")]
|
||||
#[command(about = "Standalone timer sensor for Attune automation platform", long_about = None)]
|
||||
struct Args {
|
||||
/// Log level (trace, debug, info, warn, error)
|
||||
#[arg(short, long, default_value = "info")]
|
||||
log_level: String,
|
||||
|
||||
/// Read configuration from stdin as JSON instead of environment variables
|
||||
#[arg(long)]
|
||||
stdin_config: bool,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let args = Args::parse();
|
||||
|
||||
// Initialize tracing
|
||||
let log_level = args.log_level.parse().unwrap_or(tracing::Level::INFO);
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_max_level(log_level)
|
||||
.with_target(false)
|
||||
.with_thread_ids(true)
|
||||
.json()
|
||||
.init();
|
||||
|
||||
info!("Starting Attune Timer Sensor");
|
||||
info!("Version: {}", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
// Load configuration
|
||||
let config = if args.stdin_config {
|
||||
info!("Reading configuration from stdin");
|
||||
SensorConfig::from_stdin().await?
|
||||
} else {
|
||||
info!("Reading configuration from environment variables");
|
||||
SensorConfig::from_env()?
|
||||
};
|
||||
|
||||
config.validate()?;
|
||||
info!(
|
||||
"Configuration loaded successfully: sensor_ref={}, api_url={}",
|
||||
config.sensor_ref, config.api_url
|
||||
);
|
||||
|
||||
// Create API client
|
||||
let api_client = api_client::ApiClient::new(config.api_url.clone(), config.api_token.clone());
|
||||
|
||||
// Verify API connectivity
|
||||
info!("Verifying API connectivity...");
|
||||
api_client
|
||||
.health_check()
|
||||
.await
|
||||
.context("Failed to connect to Attune API")?;
|
||||
info!("API connectivity verified");
|
||||
|
||||
// Create timer manager
|
||||
let timer_manager = TimerManager::new(api_client.clone())
|
||||
.await
|
||||
.context("Failed to initialize timer manager")?;
|
||||
info!("Timer manager initialized");
|
||||
|
||||
// Create rule lifecycle listener
|
||||
let listener = RuleLifecycleListener::new(
|
||||
config.mq_url.clone(),
|
||||
config.mq_exchange.clone(),
|
||||
config.sensor_ref.clone(),
|
||||
api_client.clone(),
|
||||
timer_manager.clone(),
|
||||
);
|
||||
|
||||
info!("Rule lifecycle listener initialized");
|
||||
|
||||
// Start token refresh manager (auto-refresh when 80% of TTL elapsed)
|
||||
let refresh_manager = TokenRefreshManager::new(api_client.clone(), 0.8);
|
||||
let _refresh_handle = refresh_manager.start();
|
||||
info!("Token refresh manager started (will refresh at 80% of TTL)");
|
||||
|
||||
// Set up graceful shutdown handler
|
||||
let timer_manager_clone = timer_manager.clone();
|
||||
let shutdown_signal = tokio::spawn(async move {
|
||||
match tokio::signal::ctrl_c().await {
|
||||
Ok(()) => {
|
||||
info!("Shutdown signal received");
|
||||
if let Err(e) = timer_manager_clone.shutdown().await {
|
||||
error!("Error during timer manager shutdown: {}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to listen for shutdown signal: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Start the listener (this will block until stopped)
|
||||
info!("Starting rule lifecycle listener...");
|
||||
match listener.start().await {
|
||||
Ok(()) => {
|
||||
info!("Rule lifecycle listener stopped gracefully");
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Rule lifecycle listener error: {}", e);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for shutdown to complete
|
||||
let _ = shutdown_signal.await;
|
||||
|
||||
// Ensure timer manager is fully shutdown
|
||||
timer_manager.shutdown().await?;
|
||||
|
||||
info!("Timer sensor has shut down gracefully");
|
||||
Ok(())
|
||||
}
|
||||
340
crates/core-timer-sensor/src/rule_listener.rs
Normal file
340
crates/core-timer-sensor/src/rule_listener.rs
Normal file
@@ -0,0 +1,340 @@
|
||||
//! Rule Lifecycle Listener
|
||||
//!
|
||||
//! Listens for rule lifecycle events from RabbitMQ and manages timer instances
|
||||
//! accordingly. Handles RuleCreated, RuleEnabled, RuleDisabled, and RuleDeleted events.
|
||||
|
||||
use crate::api_client::ApiClient;
|
||||
use crate::timer_manager::TimerManager;
|
||||
use crate::types::{RuleLifecycleEvent, TimerConfig};
|
||||
use anyhow::{Context, Result};
|
||||
use futures::StreamExt;
|
||||
use lapin::{options::*, types::FieldTable, Channel, Connection, ConnectionProperties, Consumer};
|
||||
use serde_json::Value as JsonValue;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Rule lifecycle listener
|
||||
pub struct RuleLifecycleListener {
|
||||
mq_url: String,
|
||||
mq_exchange: String,
|
||||
sensor_ref: String,
|
||||
api_client: ApiClient,
|
||||
timer_manager: TimerManager,
|
||||
}
|
||||
|
||||
impl RuleLifecycleListener {
|
||||
/// Create a new rule lifecycle listener
|
||||
pub fn new(
|
||||
mq_url: String,
|
||||
mq_exchange: String,
|
||||
sensor_ref: String,
|
||||
api_client: ApiClient,
|
||||
timer_manager: TimerManager,
|
||||
) -> Self {
|
||||
Self {
|
||||
mq_url,
|
||||
mq_exchange,
|
||||
sensor_ref,
|
||||
api_client,
|
||||
timer_manager,
|
||||
}
|
||||
}
|
||||
|
||||
/// Start listening for rule lifecycle events
|
||||
pub async fn start(self) -> Result<()> {
|
||||
info!("Connecting to RabbitMQ: {}", mask_url(&self.mq_url));
|
||||
|
||||
// Connect to RabbitMQ
|
||||
let connection = Connection::connect(&self.mq_url, ConnectionProperties::default())
|
||||
.await
|
||||
.context("Failed to connect to RabbitMQ")?;
|
||||
|
||||
info!("Connected to RabbitMQ");
|
||||
|
||||
// Create channel
|
||||
let channel = connection
|
||||
.create_channel()
|
||||
.await
|
||||
.context("Failed to create channel")?;
|
||||
|
||||
info!("Created RabbitMQ channel");
|
||||
|
||||
// Declare exchange (idempotent)
|
||||
channel
|
||||
.exchange_declare(
|
||||
&self.mq_exchange,
|
||||
lapin::ExchangeKind::Topic,
|
||||
ExchangeDeclareOptions {
|
||||
durable: true,
|
||||
..Default::default()
|
||||
},
|
||||
FieldTable::default(),
|
||||
)
|
||||
.await
|
||||
.context("Failed to declare exchange")?;
|
||||
|
||||
debug!("Exchange '{}' declared", self.mq_exchange);
|
||||
|
||||
// Declare sensor-specific queue
|
||||
let queue_name = format!("sensor.{}", self.sensor_ref);
|
||||
channel
|
||||
.queue_declare(
|
||||
&queue_name,
|
||||
QueueDeclareOptions {
|
||||
durable: true,
|
||||
..Default::default()
|
||||
},
|
||||
FieldTable::default(),
|
||||
)
|
||||
.await
|
||||
.context("Failed to declare queue")?;
|
||||
|
||||
info!("Queue '{}' declared", queue_name);
|
||||
|
||||
// Bind queue to exchange with routing keys for rule lifecycle events
|
||||
let routing_keys = vec![
|
||||
"rule.created",
|
||||
"rule.enabled",
|
||||
"rule.disabled",
|
||||
"rule.deleted",
|
||||
];
|
||||
|
||||
for routing_key in &routing_keys {
|
||||
channel
|
||||
.queue_bind(
|
||||
&queue_name,
|
||||
&self.mq_exchange,
|
||||
routing_key,
|
||||
QueueBindOptions::default(),
|
||||
FieldTable::default(),
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to bind queue to routing key '{}'", routing_key)
|
||||
})?;
|
||||
|
||||
info!(
|
||||
"Bound queue '{}' to exchange '{}' with routing key '{}'",
|
||||
queue_name, self.mq_exchange, routing_key
|
||||
);
|
||||
}
|
||||
|
||||
// Load existing active rules from API
|
||||
info!("Fetching existing active rules for trigger 'core.intervaltimer'");
|
||||
match self.api_client.fetch_rules("core.intervaltimer").await {
|
||||
Ok(rules) => {
|
||||
info!("Found {} existing rules", rules.len());
|
||||
for rule in rules {
|
||||
if rule.enabled {
|
||||
if let Err(e) = self
|
||||
.start_timer_from_params(rule.id, Some(rule.trigger_params))
|
||||
.await
|
||||
{
|
||||
error!("Failed to start timer for rule {}: {}", rule.id, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to fetch existing rules: {}", e);
|
||||
// Continue anyway - we'll handle new rules via messages
|
||||
}
|
||||
}
|
||||
|
||||
// Start consuming messages
|
||||
let consumer = channel
|
||||
.basic_consume(
|
||||
&queue_name,
|
||||
"sensor-timer-consumer",
|
||||
BasicConsumeOptions {
|
||||
no_ack: false,
|
||||
..Default::default()
|
||||
},
|
||||
FieldTable::default(),
|
||||
)
|
||||
.await
|
||||
.context("Failed to create consumer")?;
|
||||
|
||||
info!("Started consuming messages from queue '{}'", queue_name);
|
||||
|
||||
// Process messages
|
||||
self.consume_messages(consumer, channel).await
|
||||
}
|
||||
|
||||
/// Consume and process messages from the queue
|
||||
async fn consume_messages(self, mut consumer: Consumer, _channel: Channel) -> Result<()> {
|
||||
while let Some(delivery) = consumer.next().await {
|
||||
match delivery {
|
||||
Ok(delivery) => {
|
||||
let payload = String::from_utf8_lossy(&delivery.data);
|
||||
debug!("Received message: {}", payload);
|
||||
|
||||
// Parse message as JSON
|
||||
match serde_json::from_slice::<JsonValue>(&delivery.data) {
|
||||
Ok(json_value) => {
|
||||
// Try to parse as RuleLifecycleEvent
|
||||
match serde_json::from_value::<RuleLifecycleEvent>(json_value.clone()) {
|
||||
Ok(event) => {
|
||||
// Filter by trigger type - only process timer events (core.timer or core.intervaltimer)
|
||||
let trigger_type = event.trigger_type();
|
||||
if trigger_type == "core.timer"
|
||||
|| trigger_type == "core.intervaltimer"
|
||||
{
|
||||
if let Err(e) = self.handle_event(event).await {
|
||||
error!("Failed to handle event: {}", e);
|
||||
}
|
||||
} else {
|
||||
debug!(
|
||||
"Ignoring event for trigger type '{}'",
|
||||
event.trigger_type()
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to parse message as RuleLifecycleEvent: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to parse message as JSON: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Acknowledge message
|
||||
if let Err(e) = delivery.ack(BasicAckOptions::default()).await {
|
||||
error!("Failed to acknowledge message: {}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error receiving message: {}", e);
|
||||
// Continue processing
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Message consumer stopped");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handle a rule lifecycle event
|
||||
async fn handle_event(&self, event: RuleLifecycleEvent) -> Result<()> {
|
||||
match event {
|
||||
RuleLifecycleEvent::RuleCreated {
|
||||
rule_id,
|
||||
rule_ref,
|
||||
trigger_type,
|
||||
trigger_params,
|
||||
enabled,
|
||||
..
|
||||
} => {
|
||||
info!(
|
||||
"Handling RuleCreated: rule_id={}, ref={}, trigger={}, enabled={}",
|
||||
rule_id, rule_ref, trigger_type, enabled
|
||||
);
|
||||
|
||||
if enabled {
|
||||
self.start_timer_from_params(rule_id, trigger_params)
|
||||
.await?;
|
||||
} else {
|
||||
info!("Rule {} is disabled, not starting timer", rule_id);
|
||||
}
|
||||
}
|
||||
RuleLifecycleEvent::RuleEnabled {
|
||||
rule_id,
|
||||
rule_ref,
|
||||
trigger_params,
|
||||
..
|
||||
} => {
|
||||
info!(
|
||||
"Handling RuleEnabled: rule_id={}, ref={}",
|
||||
rule_id, rule_ref
|
||||
);
|
||||
|
||||
self.start_timer_from_params(rule_id, trigger_params)
|
||||
.await?;
|
||||
}
|
||||
RuleLifecycleEvent::RuleDisabled {
|
||||
rule_id, rule_ref, ..
|
||||
} => {
|
||||
info!(
|
||||
"Handling RuleDisabled: rule_id={}, ref={}",
|
||||
rule_id, rule_ref
|
||||
);
|
||||
|
||||
self.timer_manager.stop_timer(rule_id).await;
|
||||
}
|
||||
RuleLifecycleEvent::RuleDeleted {
|
||||
rule_id, rule_ref, ..
|
||||
} => {
|
||||
info!(
|
||||
"Handling RuleDeleted: rule_id={}, ref={}",
|
||||
rule_id, rule_ref
|
||||
);
|
||||
|
||||
self.timer_manager.stop_timer(rule_id).await;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start a timer from trigger parameters
|
||||
async fn start_timer_from_params(
|
||||
&self,
|
||||
rule_id: i64,
|
||||
trigger_params: Option<JsonValue>,
|
||||
) -> Result<()> {
|
||||
let params = trigger_params.ok_or_else(|| {
|
||||
anyhow::anyhow!("Timer trigger requires trigger_params but none provided")
|
||||
})?;
|
||||
|
||||
let config: TimerConfig = serde_json::from_value(params)
|
||||
.context("Failed to parse trigger_params as TimerConfig")?;
|
||||
|
||||
info!(
|
||||
"Starting timer for rule {} with config: {:?}",
|
||||
rule_id, config
|
||||
);
|
||||
|
||||
self.timer_manager
|
||||
.start_timer(rule_id, config)
|
||||
.await
|
||||
.context("Failed to start timer")?;
|
||||
|
||||
info!("Timer started successfully for rule {}", rule_id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Mask sensitive parts of connection strings for logging
|
||||
fn mask_url(url: &str) -> String {
|
||||
if let Some(at_pos) = url.find('@') {
|
||||
if let Some(proto_end) = url.find("://") {
|
||||
let protocol = &url[..proto_end + 3];
|
||||
let host_and_path = &url[at_pos..];
|
||||
return format!("{}***:***{}", protocol, host_and_path);
|
||||
}
|
||||
}
|
||||
"***:***@***".to_string()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_mask_url() {
|
||||
let url = "amqp://user:password@localhost:5672/%2F";
|
||||
let masked = mask_url(url);
|
||||
assert!(!masked.contains("user"));
|
||||
assert!(!masked.contains("password"));
|
||||
assert!(masked.contains("@localhost"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mask_url_no_credentials() {
|
||||
let url = "amqp://localhost:5672";
|
||||
let masked = mask_url(url);
|
||||
assert_eq!(masked, "***:***@***");
|
||||
}
|
||||
}
|
||||
633
crates/core-timer-sensor/src/timer_manager.rs
Normal file
633
crates/core-timer-sensor/src/timer_manager.rs
Normal file
@@ -0,0 +1,633 @@
|
||||
//! Timer Manager
|
||||
//!
|
||||
//! Manages individual timer tasks for each rule, with support for:
|
||||
//! - Interval-based timers (fires every N seconds/minutes/hours/days)
|
||||
//! - Cron-based timers (fires based on cron expressions)
|
||||
//! - DateTime-based timers (fires once at a specific time)
|
||||
|
||||
use crate::api_client::{ApiClient, CreateEventRequest};
|
||||
use crate::types::{TimeUnit, TimerConfig};
|
||||
use anyhow::Result;
|
||||
use chrono::Utc;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use tokio_cron_scheduler::{Job, JobScheduler};
|
||||
use tracing::{debug, error, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Timer manager for handling per-rule timers
|
||||
#[derive(Clone)]
|
||||
pub struct TimerManager {
|
||||
inner: Arc<TimerManagerInner>,
|
||||
}
|
||||
|
||||
struct TimerManagerInner {
|
||||
/// Map of rule_id -> job UUID in the scheduler
|
||||
active_jobs: RwLock<HashMap<i64, Uuid>>,
|
||||
/// Shared cron scheduler for all timer types (wrapped in Mutex for shutdown)
|
||||
scheduler: Mutex<JobScheduler>,
|
||||
/// API client for creating events
|
||||
api_client: ApiClient,
|
||||
}
|
||||
|
||||
impl TimerManager {
|
||||
/// Create a new timer manager
|
||||
pub async fn new(api_client: ApiClient) -> Result<Self> {
|
||||
let scheduler = JobScheduler::new().await?;
|
||||
|
||||
// Start the scheduler
|
||||
scheduler.start().await?;
|
||||
|
||||
Ok(Self {
|
||||
inner: Arc::new(TimerManagerInner {
|
||||
active_jobs: RwLock::new(HashMap::new()),
|
||||
scheduler: Mutex::new(scheduler),
|
||||
api_client,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
/// Start a timer for a rule
|
||||
pub async fn start_timer(&self, rule_id: i64, config: TimerConfig) -> Result<()> {
|
||||
// Stop existing timer if any
|
||||
self.stop_timer(rule_id).await;
|
||||
|
||||
info!("Starting timer for rule {}: {:?}", rule_id, config);
|
||||
|
||||
// Create appropriate job type
|
||||
let job = match &config {
|
||||
TimerConfig::Interval { interval, unit } => {
|
||||
self.create_interval_job(rule_id, *interval, *unit).await?
|
||||
}
|
||||
TimerConfig::Cron { expression } => {
|
||||
self.create_cron_job(rule_id, expression.clone()).await?
|
||||
}
|
||||
TimerConfig::DateTime { fire_at } => {
|
||||
self.create_datetime_job(rule_id, *fire_at).await?
|
||||
}
|
||||
};
|
||||
|
||||
// Add job to scheduler and store UUID
|
||||
let job_uuid = self.inner.scheduler.lock().await.add(job).await?;
|
||||
self.inner
|
||||
.active_jobs
|
||||
.write()
|
||||
.await
|
||||
.insert(rule_id, job_uuid);
|
||||
|
||||
info!(
|
||||
"Timer started for rule {} with job UUID {}",
|
||||
rule_id, job_uuid
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop a timer for a rule
|
||||
pub async fn stop_timer(&self, rule_id: i64) {
|
||||
let mut active_jobs = self.inner.active_jobs.write().await;
|
||||
|
||||
if let Some(job_uuid) = active_jobs.remove(&rule_id) {
|
||||
if let Err(e) = self.inner.scheduler.lock().await.remove(&job_uuid).await {
|
||||
warn!(
|
||||
"Failed to remove job {} for rule {}: {}",
|
||||
job_uuid, rule_id, e
|
||||
);
|
||||
} else {
|
||||
info!("Stopped timer for rule {}", rule_id);
|
||||
}
|
||||
} else {
|
||||
debug!("No timer found for rule {}", rule_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop all timers
|
||||
pub async fn stop_all(&self) {
|
||||
let mut active_jobs = self.inner.active_jobs.write().await;
|
||||
|
||||
let count = active_jobs.len();
|
||||
for (rule_id, job_uuid) in active_jobs.drain() {
|
||||
if let Err(e) = self.inner.scheduler.lock().await.remove(&job_uuid).await {
|
||||
warn!(
|
||||
"Failed to remove job {} for rule {}: {}",
|
||||
job_uuid, rule_id, e
|
||||
);
|
||||
} else {
|
||||
debug!("Stopped timer for rule {}", rule_id);
|
||||
}
|
||||
}
|
||||
|
||||
info!("Stopped {} timers", count);
|
||||
}
|
||||
|
||||
/// Get count of active timers
|
||||
#[allow(dead_code)]
|
||||
pub async fn timer_count(&self) -> usize {
|
||||
self.inner.active_jobs.read().await.len()
|
||||
}
|
||||
|
||||
/// Shutdown the scheduler
|
||||
pub async fn shutdown(&self) -> Result<()> {
|
||||
info!("Shutting down timer manager");
|
||||
self.stop_all().await;
|
||||
self.inner.scheduler.lock().await.shutdown().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create an interval-based job
|
||||
async fn create_interval_job(
|
||||
&self,
|
||||
rule_id: i64,
|
||||
interval: u64,
|
||||
unit: TimeUnit,
|
||||
) -> Result<Job> {
|
||||
let interval_seconds = match unit {
|
||||
TimeUnit::Seconds => interval,
|
||||
TimeUnit::Minutes => interval * 60,
|
||||
TimeUnit::Hours => interval * 3600,
|
||||
TimeUnit::Days => interval * 86400,
|
||||
};
|
||||
|
||||
if interval_seconds == 0 {
|
||||
return Err(anyhow::anyhow!("Interval must be greater than 0"));
|
||||
}
|
||||
|
||||
let api_client = self.inner.api_client.clone();
|
||||
let duration = Duration::from_secs(interval_seconds);
|
||||
|
||||
info!(
|
||||
"Creating interval job for rule {} (interval: {}s)",
|
||||
rule_id, interval_seconds
|
||||
);
|
||||
|
||||
let mut execution_count = 0u64;
|
||||
|
||||
let job = Job::new_repeated_async(duration, move |_uuid, _lock| {
|
||||
let api_client = api_client.clone();
|
||||
let rule_id = rule_id;
|
||||
execution_count += 1;
|
||||
let count = execution_count;
|
||||
let interval_secs = interval_seconds;
|
||||
|
||||
Box::pin(async move {
|
||||
let now = Utc::now();
|
||||
|
||||
// Create event payload matching intervaltimer output schema
|
||||
let payload = serde_json::json!({
|
||||
"type": "interval",
|
||||
"interval_seconds": interval_secs,
|
||||
"fired_at": now.to_rfc3339(),
|
||||
"execution_count": count,
|
||||
"sensor_ref": "core.interval_timer_sensor",
|
||||
});
|
||||
|
||||
// Create event via API
|
||||
let request = CreateEventRequest::new("core.intervaltimer".to_string(), payload)
|
||||
.with_trigger_instance_id(format!("rule_{}", rule_id));
|
||||
|
||||
match api_client.create_event_with_retry(request).await {
|
||||
Ok(event_id) => {
|
||||
info!(
|
||||
"Interval timer fired for rule {} (count: {}), created event {}",
|
||||
rule_id, count, event_id
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to create event for rule {} interval timer: {}",
|
||||
rule_id, e
|
||||
);
|
||||
}
|
||||
}
|
||||
})
|
||||
})?;
|
||||
|
||||
Ok(job)
|
||||
}
|
||||
|
||||
/// Create a cron-based job
|
||||
async fn create_cron_job(&self, rule_id: i64, expression: String) -> Result<Job> {
|
||||
info!(
|
||||
"Creating cron job for rule {} with expression: {}",
|
||||
rule_id, expression
|
||||
);
|
||||
|
||||
let api_client = self.inner.api_client.clone();
|
||||
let expr_clone = expression.clone();
|
||||
|
||||
let mut execution_count = 0u64;
|
||||
|
||||
let job = Job::new_async(&expression, move |uuid, mut lock| {
|
||||
let api_client = api_client.clone();
|
||||
let rule_id = rule_id;
|
||||
let expression = expr_clone.clone();
|
||||
execution_count += 1;
|
||||
let count = execution_count;
|
||||
|
||||
Box::pin(async move {
|
||||
let now = Utc::now();
|
||||
|
||||
// Get next scheduled time
|
||||
let next_fire = match lock.next_tick_for_job(uuid).await {
|
||||
Ok(Some(ts)) => ts.to_rfc3339(),
|
||||
Ok(None) => "unknown".to_string(),
|
||||
Err(e) => {
|
||||
warn!("Failed to get next tick for cron job {}: {}", uuid, e);
|
||||
"unknown".to_string()
|
||||
}
|
||||
};
|
||||
|
||||
// Create event payload matching crontimer output schema
|
||||
let payload = serde_json::json!({
|
||||
"type": "cron",
|
||||
"fired_at": now.to_rfc3339(),
|
||||
"scheduled_at": now.to_rfc3339(),
|
||||
"expression": expression,
|
||||
"timezone": "UTC",
|
||||
"next_fire_at": next_fire,
|
||||
"execution_count": count,
|
||||
"sensor_ref": "core.interval_timer_sensor",
|
||||
});
|
||||
|
||||
// Create event via API
|
||||
let request = CreateEventRequest::new("core.crontimer".to_string(), payload)
|
||||
.with_trigger_instance_id(format!("rule_{}", rule_id));
|
||||
|
||||
match api_client.create_event_with_retry(request).await {
|
||||
Ok(event_id) => {
|
||||
info!(
|
||||
"Cron timer fired for rule {} (count: {}), created event {}",
|
||||
rule_id, count, event_id
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to create event for rule {} cron timer: {}",
|
||||
rule_id, e
|
||||
);
|
||||
}
|
||||
}
|
||||
})
|
||||
})?;
|
||||
|
||||
Ok(job)
|
||||
}
|
||||
|
||||
/// Create a datetime-based (one-shot) job
|
||||
async fn create_datetime_job(
|
||||
&self,
|
||||
rule_id: i64,
|
||||
fire_at: chrono::DateTime<Utc>,
|
||||
) -> Result<Job> {
|
||||
let now = Utc::now();
|
||||
|
||||
if fire_at <= now {
|
||||
return Err(anyhow::anyhow!(
|
||||
"DateTime timer fire_at must be in the future"
|
||||
));
|
||||
}
|
||||
|
||||
let duration = (fire_at - now)
|
||||
.to_std()
|
||||
.map_err(|e| anyhow::anyhow!("Invalid duration: {}", e))?;
|
||||
|
||||
info!(
|
||||
"Creating one-shot job for rule {} scheduled at {}",
|
||||
rule_id,
|
||||
fire_at.to_rfc3339()
|
||||
);
|
||||
|
||||
let api_client = self.inner.api_client.clone();
|
||||
let scheduled_time = fire_at.to_rfc3339();
|
||||
|
||||
let job = Job::new_one_shot_async(duration, move |_uuid, _lock| {
|
||||
let api_client = api_client.clone();
|
||||
let rule_id = rule_id;
|
||||
let scheduled_time = scheduled_time.clone();
|
||||
|
||||
Box::pin(async move {
|
||||
let now = Utc::now();
|
||||
|
||||
// Calculate delay between scheduled and actual fire time
|
||||
let delay_ms = (now.timestamp_millis() - fire_at.timestamp_millis()).max(0);
|
||||
|
||||
// Create event payload matching datetimetimer output schema
|
||||
let payload = serde_json::json!({
|
||||
"type": "one_shot",
|
||||
"fire_at": scheduled_time,
|
||||
"fired_at": now.to_rfc3339(),
|
||||
"timezone": "UTC",
|
||||
"delay_ms": delay_ms,
|
||||
"sensor_ref": "core.interval_timer_sensor",
|
||||
});
|
||||
|
||||
// Create event via API
|
||||
let request = CreateEventRequest::new("core.datetimetimer".to_string(), payload)
|
||||
.with_trigger_instance_id(format!("rule_{}", rule_id));
|
||||
|
||||
match api_client.create_event_with_retry(request).await {
|
||||
Ok(event_id) => {
|
||||
info!(
|
||||
"DateTime timer fired for rule {}, created event {}",
|
||||
rule_id, event_id
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to create event for rule {} datetime timer: {}",
|
||||
rule_id, e
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
info!("One-shot timer completed for rule {}", rule_id);
|
||||
})
|
||||
})?;
|
||||
|
||||
Ok(job)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_timer_manager_creation() {
|
||||
let api_client = ApiClient::new("http://localhost:8080".to_string(), "token".to_string());
|
||||
let manager = TimerManager::new(api_client).await.unwrap();
|
||||
assert_eq!(manager.timer_count().await, 0);
|
||||
manager.shutdown().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_timer_manager_start_stop() {
|
||||
let api_client = ApiClient::new("http://localhost:8080".to_string(), "token".to_string());
|
||||
let manager = TimerManager::new(api_client).await.unwrap();
|
||||
|
||||
let config = TimerConfig::Interval {
|
||||
interval: 60,
|
||||
unit: TimeUnit::Seconds,
|
||||
};
|
||||
|
||||
// Start timer
|
||||
manager.start_timer(1, config).await.unwrap();
|
||||
assert_eq!(manager.timer_count().await, 1);
|
||||
|
||||
// Stop timer
|
||||
manager.stop_timer(1).await;
|
||||
assert_eq!(manager.timer_count().await, 0);
|
||||
|
||||
manager.shutdown().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_timer_manager_stop_all() {
|
||||
let api_client = ApiClient::new("http://localhost:8080".to_string(), "token".to_string());
|
||||
let manager = TimerManager::new(api_client).await.unwrap();
|
||||
|
||||
let config = TimerConfig::Interval {
|
||||
interval: 60,
|
||||
unit: TimeUnit::Seconds,
|
||||
};
|
||||
|
||||
// Start multiple timers
|
||||
manager.start_timer(1, config.clone()).await.unwrap();
|
||||
manager.start_timer(2, config.clone()).await.unwrap();
|
||||
manager.start_timer(3, config).await.unwrap();
|
||||
|
||||
assert_eq!(manager.timer_count().await, 3);
|
||||
|
||||
// Stop all
|
||||
manager.stop_all().await;
|
||||
assert_eq!(manager.timer_count().await, 0);
|
||||
|
||||
manager.shutdown().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_interval_timer_validation() {
|
||||
let api_client = ApiClient::new("http://localhost:8080".to_string(), "token".to_string());
|
||||
let manager = TimerManager::new(api_client).await.unwrap();
|
||||
|
||||
let config = TimerConfig::Interval {
|
||||
interval: 0,
|
||||
unit: TimeUnit::Seconds,
|
||||
};
|
||||
|
||||
// Should fail with zero interval
|
||||
let result = manager.start_timer(1, config).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
manager.shutdown().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_datetime_timer_validation() {
|
||||
let api_client = ApiClient::new("http://localhost:8080".to_string(), "token".to_string());
|
||||
let manager = TimerManager::new(api_client).await.unwrap();
|
||||
|
||||
// Create a datetime in the past
|
||||
let past = Utc::now() - chrono::Duration::seconds(60);
|
||||
let config = TimerConfig::DateTime { fire_at: past };
|
||||
|
||||
// Should fail with past datetime
|
||||
let result = manager.start_timer(1, config).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
manager.shutdown().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cron_timer_creation() {
|
||||
let api_client = ApiClient::new("http://localhost:8080".to_string(), "token".to_string());
|
||||
let manager = TimerManager::new(api_client).await.unwrap();
|
||||
|
||||
// Valid cron expression: every minute
|
||||
let config = TimerConfig::Cron {
|
||||
expression: "0 * * * * *".to_string(),
|
||||
};
|
||||
|
||||
// Should succeed
|
||||
let result = manager.start_timer(1, config).await;
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(manager.timer_count().await, 1);
|
||||
|
||||
manager.shutdown().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cron_timer_invalid_expression() {
|
||||
let api_client = ApiClient::new("http://localhost:8080".to_string(), "token".to_string());
|
||||
let manager = TimerManager::new(api_client).await.unwrap();
|
||||
|
||||
// Invalid cron expression
|
||||
let config = TimerConfig::Cron {
|
||||
expression: "invalid cron".to_string(),
|
||||
};
|
||||
|
||||
// Should fail with invalid expression
|
||||
let result = manager.start_timer(1, config).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
manager.shutdown().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_timer_restart() {
|
||||
let api_client = ApiClient::new("http://localhost:8080".to_string(), "token".to_string());
|
||||
let manager = TimerManager::new(api_client).await.unwrap();
|
||||
|
||||
let config1 = TimerConfig::Interval {
|
||||
interval: 60,
|
||||
unit: TimeUnit::Seconds,
|
||||
};
|
||||
|
||||
let config2 = TimerConfig::Interval {
|
||||
interval: 30,
|
||||
unit: TimeUnit::Seconds,
|
||||
};
|
||||
|
||||
// Start first timer
|
||||
manager.start_timer(1, config1).await.unwrap();
|
||||
assert_eq!(manager.timer_count().await, 1);
|
||||
|
||||
// Start second timer for same rule (should replace)
|
||||
manager.start_timer(1, config2).await.unwrap();
|
||||
assert_eq!(manager.timer_count().await, 1);
|
||||
|
||||
manager.shutdown().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_all_timer_types_comprehensive() {
|
||||
let api_client = ApiClient::new("http://localhost:8080".to_string(), "token".to_string());
|
||||
let manager = TimerManager::new(api_client).await.unwrap();
|
||||
|
||||
// Test 1: Interval timer
|
||||
let interval_config = TimerConfig::Interval {
|
||||
interval: 5,
|
||||
unit: TimeUnit::Seconds,
|
||||
};
|
||||
manager.start_timer(100, interval_config).await.unwrap();
|
||||
|
||||
// Test 2: Cron timer - every minute
|
||||
let cron_config = TimerConfig::Cron {
|
||||
expression: "0 * * * * *".to_string(),
|
||||
};
|
||||
manager.start_timer(200, cron_config).await.unwrap();
|
||||
|
||||
// Test 3: DateTime timer - 2 seconds in the future
|
||||
let fire_time = Utc::now() + chrono::Duration::seconds(2);
|
||||
let datetime_config = TimerConfig::DateTime { fire_at: fire_time };
|
||||
manager.start_timer(300, datetime_config).await.unwrap();
|
||||
|
||||
// Verify all three timers are active
|
||||
assert_eq!(manager.timer_count().await, 3);
|
||||
|
||||
// Stop specific timers
|
||||
manager.stop_timer(100).await;
|
||||
assert_eq!(manager.timer_count().await, 2);
|
||||
|
||||
manager.stop_timer(200).await;
|
||||
assert_eq!(manager.timer_count().await, 1);
|
||||
|
||||
manager.stop_timer(300).await;
|
||||
assert_eq!(manager.timer_count().await, 0);
|
||||
|
||||
manager.shutdown().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cron_various_expressions() {
|
||||
let api_client = ApiClient::new("http://localhost:8080".to_string(), "token".to_string());
|
||||
let manager = TimerManager::new(api_client).await.unwrap();
|
||||
|
||||
// Test various valid cron expressions
|
||||
let expressions = vec![
|
||||
"0 0 * * * *", // Every hour
|
||||
"0 */15 * * * *", // Every 15 minutes
|
||||
"0 0 0 * * *", // Daily at midnight
|
||||
"0 0 9 * * 1-5", // Weekdays at 9 AM
|
||||
"0 30 8 * * *", // Every day at 8:30 AM
|
||||
];
|
||||
|
||||
for (i, expr) in expressions.iter().enumerate() {
|
||||
let config = TimerConfig::Cron {
|
||||
expression: expr.to_string(),
|
||||
};
|
||||
let result = manager.start_timer(i as i64 + 1, config).await;
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Failed to create cron job with expression: {}",
|
||||
expr
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(manager.timer_count().await, expressions.len());
|
||||
|
||||
manager.shutdown().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_datetime_timer_future_validation() {
|
||||
let api_client = ApiClient::new("http://localhost:8080".to_string(), "token".to_string());
|
||||
let manager = TimerManager::new(api_client).await.unwrap();
|
||||
|
||||
// Test various future times
|
||||
let one_second = Utc::now() + chrono::Duration::seconds(1);
|
||||
let one_minute = Utc::now() + chrono::Duration::minutes(1);
|
||||
let one_hour = Utc::now() + chrono::Duration::hours(1);
|
||||
|
||||
let config1 = TimerConfig::DateTime {
|
||||
fire_at: one_second,
|
||||
};
|
||||
assert!(manager.start_timer(1, config1).await.is_ok());
|
||||
|
||||
let config2 = TimerConfig::DateTime {
|
||||
fire_at: one_minute,
|
||||
};
|
||||
assert!(manager.start_timer(2, config2).await.is_ok());
|
||||
|
||||
let config3 = TimerConfig::DateTime { fire_at: one_hour };
|
||||
assert!(manager.start_timer(3, config3).await.is_ok());
|
||||
|
||||
assert_eq!(manager.timer_count().await, 3);
|
||||
|
||||
manager.shutdown().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mixed_timer_replacement() {
|
||||
let api_client = ApiClient::new("http://localhost:8080".to_string(), "token".to_string());
|
||||
let manager = TimerManager::new(api_client).await.unwrap();
|
||||
|
||||
let rule_id = 42;
|
||||
|
||||
// Start with interval timer
|
||||
let interval_config = TimerConfig::Interval {
|
||||
interval: 60,
|
||||
unit: TimeUnit::Seconds,
|
||||
};
|
||||
manager.start_timer(rule_id, interval_config).await.unwrap();
|
||||
assert_eq!(manager.timer_count().await, 1);
|
||||
|
||||
// Replace with cron timer
|
||||
let cron_config = TimerConfig::Cron {
|
||||
expression: "0 0 * * * *".to_string(),
|
||||
};
|
||||
manager.start_timer(rule_id, cron_config).await.unwrap();
|
||||
assert_eq!(manager.timer_count().await, 1);
|
||||
|
||||
// Replace with datetime timer
|
||||
let datetime_config = TimerConfig::DateTime {
|
||||
fire_at: Utc::now() + chrono::Duration::hours(1),
|
||||
};
|
||||
manager.start_timer(rule_id, datetime_config).await.unwrap();
|
||||
assert_eq!(manager.timer_count().await, 1);
|
||||
|
||||
manager.shutdown().await.unwrap();
|
||||
}
|
||||
}
|
||||
224
crates/core-timer-sensor/src/token_refresh.rs
Normal file
224
crates/core-timer-sensor/src/token_refresh.rs
Normal file
@@ -0,0 +1,224 @@
|
||||
//! Token Refresh Manager
|
||||
//!
|
||||
//! Automatically refreshes sensor tokens before they expire to enable
|
||||
//! zero-downtime operation without manual intervention.
|
||||
//!
|
||||
//! Refresh Strategy:
|
||||
//! - Token TTL: 90 days
|
||||
//! - Refresh threshold: 80% of TTL (72 days)
|
||||
//! - Check interval: 1 hour
|
||||
//! - Retry on failure: Exponential backoff (1min, 2min, 4min, 8min, max 1 hour)
|
||||
|
||||
use crate::api_client::ApiClient;
|
||||
use anyhow::Result;
|
||||
use base64::{engine::general_purpose, Engine as _};
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::time::{sleep, Duration};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Token refresh manager
|
||||
pub struct TokenRefreshManager {
|
||||
api_client: ApiClient,
|
||||
refresh_threshold: f64,
|
||||
}
|
||||
|
||||
/// JWT claims for decoding token expiration
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct JwtClaims {
|
||||
#[serde(default)]
|
||||
exp: i64,
|
||||
#[serde(default)]
|
||||
iat: i64,
|
||||
#[serde(default)]
|
||||
sub: String,
|
||||
}
|
||||
|
||||
impl TokenRefreshManager {
|
||||
/// Create a new token refresh manager
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `api_client` - API client with the current token
|
||||
/// * `refresh_threshold` - Percentage of TTL before refreshing (e.g., 0.8 for 80%)
|
||||
pub fn new(api_client: ApiClient, refresh_threshold: f64) -> Self {
|
||||
Self {
|
||||
api_client,
|
||||
refresh_threshold,
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the token refresh background task
|
||||
///
|
||||
/// This spawns a tokio task that:
|
||||
/// 1. Checks token expiration every hour
|
||||
/// 2. Refreshes when threshold reached (e.g., 80% of TTL)
|
||||
/// 3. Retries on failure with exponential backoff
|
||||
/// 4. Logs all refresh events
|
||||
pub fn start(self) -> tokio::task::JoinHandle<()> {
|
||||
tokio::spawn(async move {
|
||||
info!(
|
||||
"Token refresh manager started (threshold: {}%)",
|
||||
self.refresh_threshold * 100.0
|
||||
);
|
||||
|
||||
let mut retry_delay = Duration::from_secs(60); // Start with 1 minute
|
||||
let max_retry_delay = Duration::from_secs(3600); // Max 1 hour
|
||||
let check_interval = Duration::from_secs(3600); // Check every hour
|
||||
|
||||
loop {
|
||||
match self.check_and_refresh().await {
|
||||
Ok(RefreshStatus::Refreshed) => {
|
||||
info!("Token refresh successful");
|
||||
retry_delay = Duration::from_secs(60); // Reset retry delay
|
||||
sleep(check_interval).await;
|
||||
}
|
||||
Ok(RefreshStatus::NotNeeded) => {
|
||||
debug!("Token refresh not needed yet");
|
||||
retry_delay = Duration::from_secs(60); // Reset retry delay
|
||||
sleep(check_interval).await;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Token refresh failed: {}", e);
|
||||
warn!("Retrying token refresh in {:?}", retry_delay);
|
||||
sleep(retry_delay).await;
|
||||
|
||||
// Exponential backoff with max limit
|
||||
retry_delay = std::cmp::min(retry_delay * 2, max_retry_delay);
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Check if token needs refresh and refresh if necessary
|
||||
async fn check_and_refresh(&self) -> Result<RefreshStatus> {
|
||||
let token = self.api_client.get_token().await;
|
||||
|
||||
// Decode token to get expiration
|
||||
let claims = self.decode_token(&token)?;
|
||||
|
||||
let now = Utc::now().timestamp();
|
||||
let ttl = claims.exp - claims.iat;
|
||||
let refresh_at = claims.iat + ((ttl as f64) * self.refresh_threshold) as i64;
|
||||
|
||||
debug!(
|
||||
"Token check: iat={}, exp={}, ttl={}s, refresh_at={}, now={}",
|
||||
claims.iat, claims.exp, ttl, refresh_at, now
|
||||
);
|
||||
|
||||
if now >= refresh_at {
|
||||
let time_until_expiry = claims.exp - now;
|
||||
info!(
|
||||
"Token refresh threshold reached, refreshing (expires in {} seconds)",
|
||||
time_until_expiry
|
||||
);
|
||||
|
||||
// Refresh the token
|
||||
self.api_client.refresh_token().await?;
|
||||
|
||||
Ok(RefreshStatus::Refreshed)
|
||||
} else {
|
||||
let time_until_refresh = refresh_at - now;
|
||||
let time_until_expiry = claims.exp - now;
|
||||
|
||||
debug!(
|
||||
"Token still valid, refresh in {} seconds (expires in {} seconds)",
|
||||
time_until_refresh, time_until_expiry
|
||||
);
|
||||
|
||||
Ok(RefreshStatus::NotNeeded)
|
||||
}
|
||||
}
|
||||
|
||||
/// Decode JWT token to extract claims
|
||||
fn decode_token(&self, token: &str) -> Result<JwtClaims> {
|
||||
// JWT format: header.payload.signature
|
||||
let parts: Vec<&str> = token.split('.').collect();
|
||||
|
||||
if parts.len() != 3 {
|
||||
return Err(anyhow::anyhow!("Invalid JWT format: expected 3 parts"));
|
||||
}
|
||||
|
||||
// Decode base64 payload
|
||||
let payload = parts[1];
|
||||
let decoded = general_purpose::URL_SAFE_NO_PAD
|
||||
.decode(payload)
|
||||
.or_else(|_| general_purpose::STANDARD.decode(payload))
|
||||
.map_err(|e| anyhow::anyhow!("Failed to decode JWT payload: {}", e))?;
|
||||
|
||||
// Parse JSON
|
||||
let claims: JwtClaims = serde_json::from_slice(&decoded)
|
||||
.map_err(|e| anyhow::anyhow!("Failed to parse JWT claims: {}", e))?;
|
||||
|
||||
Ok(claims)
|
||||
}
|
||||
|
||||
/// Get token expiration time
|
||||
#[allow(dead_code)]
|
||||
pub async fn get_token_expiration(&self) -> Result<DateTime<Utc>> {
|
||||
let token = self.api_client.get_token().await;
|
||||
let claims = self.decode_token(&token)?;
|
||||
|
||||
let expiration = DateTime::from_timestamp(claims.exp, 0)
|
||||
.ok_or_else(|| anyhow::anyhow!("Invalid expiration timestamp"))?;
|
||||
|
||||
Ok(expiration)
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of a refresh check
|
||||
#[derive(Debug)]
|
||||
enum RefreshStatus {
|
||||
/// Token was refreshed
|
||||
Refreshed,
|
||||
/// Refresh not needed yet
|
||||
NotNeeded,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_decode_valid_token() {
|
||||
// Valid JWT with exp and iat claims
|
||||
let token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJzZW5zb3I6Y29yZS50aW1lciIsImlhdCI6MTcwNjM1NjQ5NiwiZXhwIjoxNzE0MTMyNDk2fQ.signature";
|
||||
|
||||
let manager = TokenRefreshManager::new(
|
||||
ApiClient::new("http://localhost:8080".to_string(), token.to_string()),
|
||||
0.8,
|
||||
);
|
||||
|
||||
let claims = manager.decode_token(token).unwrap();
|
||||
assert_eq!(claims.iat, 1706356496);
|
||||
assert_eq!(claims.exp, 1714132496);
|
||||
assert_eq!(claims.sub, "sensor:core.timer");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decode_invalid_token() {
|
||||
let manager = TokenRefreshManager::new(
|
||||
ApiClient::new("http://localhost:8080".to_string(), "invalid".to_string()),
|
||||
0.8,
|
||||
);
|
||||
|
||||
let result = manager.decode_token("invalid_token");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_refresh_threshold_calculation() {
|
||||
// Token issued at epoch 1000, expires at 2000 (TTL = 1000)
|
||||
// Refresh threshold 80% = 800 seconds after issuance
|
||||
// Refresh at: 1000 + 800 = 1800
|
||||
|
||||
let iat = 1000;
|
||||
let exp = 2000;
|
||||
let ttl = exp - iat;
|
||||
let threshold = 0.8;
|
||||
|
||||
let refresh_at = iat + ((ttl as f64) * threshold) as i64;
|
||||
|
||||
assert_eq!(refresh_at, 1800);
|
||||
}
|
||||
}
|
||||
285
crates/core-timer-sensor/src/types.rs
Normal file
285
crates/core-timer-sensor/src/types.rs
Normal file
@@ -0,0 +1,285 @@
|
||||
//! Shared types for timer sensor
|
||||
//!
|
||||
//! Defines timer configurations and common data structures.
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Timer configuration for different timer types
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
pub enum TimerConfig {
|
||||
/// Interval-based timer (fires every N seconds/minutes/hours)
|
||||
Interval {
|
||||
/// Number of units between fires
|
||||
interval: u64,
|
||||
/// Unit of time (seconds, minutes, hours, days)
|
||||
#[serde(default = "default_unit")]
|
||||
unit: TimeUnit,
|
||||
},
|
||||
/// Cron-based timer (fires based on cron expression)
|
||||
Cron {
|
||||
/// Cron expression (e.g., "0 0 * * *")
|
||||
expression: String,
|
||||
},
|
||||
/// Date/time-based timer (fires at a specific time)
|
||||
DateTime {
|
||||
/// ISO 8601 timestamp to fire at
|
||||
fire_at: DateTime<Utc>,
|
||||
},
|
||||
}
|
||||
|
||||
fn default_unit() -> TimeUnit {
|
||||
TimeUnit::Seconds
|
||||
}
|
||||
|
||||
/// Time unit for interval timers
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum TimeUnit {
|
||||
Seconds,
|
||||
Minutes,
|
||||
Hours,
|
||||
Days,
|
||||
}
|
||||
|
||||
impl TimerConfig {
|
||||
/// Calculate total interval in seconds
|
||||
#[allow(dead_code)]
|
||||
pub fn interval_seconds(&self) -> Option<u64> {
|
||||
match self {
|
||||
TimerConfig::Interval { interval, unit } => Some(match unit {
|
||||
TimeUnit::Seconds => *interval,
|
||||
TimeUnit::Minutes => interval * 60,
|
||||
TimeUnit::Hours => interval * 3600,
|
||||
TimeUnit::Days => interval * 86400,
|
||||
}),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the cron expression if this is a cron timer
|
||||
#[allow(dead_code)]
|
||||
pub fn cron_expression(&self) -> Option<&str> {
|
||||
match self {
|
||||
TimerConfig::Cron { expression } => Some(expression),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the fire time if this is a datetime timer
|
||||
#[allow(dead_code)]
|
||||
pub fn fire_time(&self) -> Option<DateTime<Utc>> {
|
||||
match self {
|
||||
TimerConfig::DateTime { fire_at } => Some(*fire_at),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Rule lifecycle event types
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "event_type", rename_all = "PascalCase")]
|
||||
pub enum RuleLifecycleEvent {
|
||||
RuleCreated {
|
||||
rule_id: i64,
|
||||
rule_ref: String,
|
||||
trigger_type: String,
|
||||
trigger_params: Option<serde_json::Value>,
|
||||
enabled: bool,
|
||||
timestamp: DateTime<Utc>,
|
||||
},
|
||||
RuleEnabled {
|
||||
rule_id: i64,
|
||||
rule_ref: String,
|
||||
trigger_type: String,
|
||||
trigger_params: Option<serde_json::Value>,
|
||||
timestamp: DateTime<Utc>,
|
||||
},
|
||||
RuleDisabled {
|
||||
rule_id: i64,
|
||||
rule_ref: String,
|
||||
trigger_type: String,
|
||||
timestamp: DateTime<Utc>,
|
||||
},
|
||||
RuleDeleted {
|
||||
rule_id: i64,
|
||||
rule_ref: String,
|
||||
trigger_type: String,
|
||||
timestamp: DateTime<Utc>,
|
||||
},
|
||||
}
|
||||
|
||||
impl RuleLifecycleEvent {
|
||||
/// Get the rule ID from any event type
|
||||
#[allow(dead_code)]
|
||||
pub fn rule_id(&self) -> i64 {
|
||||
match self {
|
||||
RuleLifecycleEvent::RuleCreated { rule_id, .. }
|
||||
| RuleLifecycleEvent::RuleEnabled { rule_id, .. }
|
||||
| RuleLifecycleEvent::RuleDisabled { rule_id, .. }
|
||||
| RuleLifecycleEvent::RuleDeleted { rule_id, .. } => *rule_id,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the trigger type from any event type
|
||||
pub fn trigger_type(&self) -> &str {
|
||||
match self {
|
||||
RuleLifecycleEvent::RuleCreated { trigger_type, .. }
|
||||
| RuleLifecycleEvent::RuleEnabled { trigger_type, .. }
|
||||
| RuleLifecycleEvent::RuleDisabled { trigger_type, .. }
|
||||
| RuleLifecycleEvent::RuleDeleted { trigger_type, .. } => trigger_type,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get trigger params if available
|
||||
#[allow(dead_code)]
|
||||
pub fn trigger_params(&self) -> Option<&serde_json::Value> {
|
||||
match self {
|
||||
RuleLifecycleEvent::RuleCreated { trigger_params, .. }
|
||||
| RuleLifecycleEvent::RuleEnabled { trigger_params, .. } => trigger_params.as_ref(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if rule should be active (created and enabled, or explicitly enabled)
|
||||
#[allow(dead_code)]
|
||||
pub fn is_active(&self) -> bool {
|
||||
match self {
|
||||
RuleLifecycleEvent::RuleCreated { enabled, .. } => *enabled,
|
||||
RuleLifecycleEvent::RuleEnabled { .. } => true,
|
||||
RuleLifecycleEvent::RuleDisabled { .. } | RuleLifecycleEvent::RuleDeleted { .. } => {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_timer_config_interval_seconds() {
|
||||
let config = TimerConfig::Interval {
|
||||
interval: 5,
|
||||
unit: TimeUnit::Seconds,
|
||||
};
|
||||
assert_eq!(config.interval_seconds(), Some(5));
|
||||
|
||||
let config = TimerConfig::Interval {
|
||||
interval: 2,
|
||||
unit: TimeUnit::Minutes,
|
||||
};
|
||||
assert_eq!(config.interval_seconds(), Some(120));
|
||||
|
||||
let config = TimerConfig::Interval {
|
||||
interval: 1,
|
||||
unit: TimeUnit::Hours,
|
||||
};
|
||||
assert_eq!(config.interval_seconds(), Some(3600));
|
||||
|
||||
let config = TimerConfig::Interval {
|
||||
interval: 1,
|
||||
unit: TimeUnit::Days,
|
||||
};
|
||||
assert_eq!(config.interval_seconds(), Some(86400));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timer_config_cron() {
|
||||
let config = TimerConfig::Cron {
|
||||
expression: "0 0 * * *".to_string(),
|
||||
};
|
||||
assert_eq!(config.cron_expression(), Some("0 0 * * *"));
|
||||
assert_eq!(config.interval_seconds(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timer_config_datetime() {
|
||||
let fire_at = Utc::now();
|
||||
let config = TimerConfig::DateTime { fire_at };
|
||||
assert_eq!(config.fire_time(), Some(fire_at));
|
||||
assert_eq!(config.interval_seconds(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timer_config_deserialization_interval() {
|
||||
let json = r#"{
|
||||
"type": "interval",
|
||||
"interval": 30,
|
||||
"unit": "seconds"
|
||||
}"#;
|
||||
|
||||
let config: TimerConfig = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(config.interval_seconds(), Some(30));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timer_config_deserialization_interval_default_unit() {
|
||||
let json = r#"{
|
||||
"type": "interval",
|
||||
"interval": 60
|
||||
}"#;
|
||||
|
||||
let config: TimerConfig = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(config.interval_seconds(), Some(60));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timer_config_deserialization_cron() {
|
||||
let json = r#"{
|
||||
"type": "cron",
|
||||
"expression": "0 0 * * *"
|
||||
}"#;
|
||||
|
||||
let config: TimerConfig = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(config.cron_expression(), Some("0 0 * * *"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rule_lifecycle_event_rule_id() {
|
||||
let event = RuleLifecycleEvent::RuleCreated {
|
||||
rule_id: 123,
|
||||
rule_ref: "test".to_string(),
|
||||
trigger_type: "core.timer".to_string(),
|
||||
trigger_params: None,
|
||||
enabled: true,
|
||||
timestamp: Utc::now(),
|
||||
};
|
||||
assert_eq!(event.rule_id(), 123);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rule_lifecycle_event_trigger_type() {
|
||||
let event = RuleLifecycleEvent::RuleEnabled {
|
||||
rule_id: 123,
|
||||
rule_ref: "test".to_string(),
|
||||
trigger_type: "core.timer".to_string(),
|
||||
trigger_params: None,
|
||||
timestamp: Utc::now(),
|
||||
};
|
||||
assert_eq!(event.trigger_type(), "core.timer");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rule_lifecycle_event_is_active() {
|
||||
let event = RuleLifecycleEvent::RuleCreated {
|
||||
rule_id: 123,
|
||||
rule_ref: "test".to_string(),
|
||||
trigger_type: "core.timer".to_string(),
|
||||
trigger_params: None,
|
||||
enabled: true,
|
||||
timestamp: Utc::now(),
|
||||
};
|
||||
assert!(event.is_active());
|
||||
|
||||
let event = RuleLifecycleEvent::RuleDisabled {
|
||||
rule_id: 123,
|
||||
rule_ref: "test".to_string(),
|
||||
trigger_type: "core.timer".to_string(),
|
||||
timestamp: Utc::now(),
|
||||
};
|
||||
assert!(!event.is_active());
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user