move rhailib to herolib

This commit is contained in:
Timur Gordon
2025-08-21 14:32:24 +02:00
parent aab2b6f128
commit aa0248ef17
121 changed files with 16412 additions and 0 deletions

View File

@@ -0,0 +1,530 @@
# API Integration Guide for RhaiLib
## Quick Start
This guide shows you how to integrate external APIs with Rhai scripts using RhaiLib's async architecture.
## Table of Contents
1. [Setup and Configuration](#setup-and-configuration)
2. [Basic API Calls](#basic-api-calls)
3. [Stripe Payment Integration](#stripe-payment-integration)
4. [Error Handling Patterns](#error-handling-patterns)
5. [Advanced Usage](#advanced-usage)
6. [Extending to Other APIs](#extending-to-other-apis)
## Setup and Configuration
### 1. Environment Variables
Create a `.env` file in your project:
```bash
# .env
STRIPE_SECRET_KEY=sk_test_your_stripe_key_here
STRIPE_PUBLISHABLE_KEY=pk_test_your_publishable_key_here
```
### 2. Rust Setup
```rust
use rhailib_dsl::payment::register_payment_rhai_module;
use rhai::{Engine, EvalAltResult, Scope};
use std::env;
fn main() -> Result<(), Box<EvalAltResult>> {
// Load environment variables
dotenv::from_filename(".env").ok();
// Create Rhai engine and register payment module
let mut engine = Engine::new();
register_payment_rhai_module(&mut engine);
// Set up scope with API credentials
let mut scope = Scope::new();
let stripe_key = env::var("STRIPE_SECRET_KEY").unwrap();
scope.push("STRIPE_API_KEY", stripe_key);
// Execute your Rhai script
let script = std::fs::read_to_string("payment_script.rhai")?;
engine.eval_with_scope::<()>(&mut scope, &script)?;
Ok(())
}
```
### 3. Rhai Script Configuration
```rhai
// Configure the API client
let config_result = configure_stripe(STRIPE_API_KEY);
print(`Configuration: ${config_result}`);
```
## Basic API Calls
### Simple Product Creation
```rhai
// Create a basic product
let product = new_product()
.name("My Product")
.description("A great product");
try {
let product_id = product.create();
print(`✅ Created product: ${product_id}`);
} catch(error) {
print(`❌ Error: ${error}`);
}
```
### Price Configuration
```rhai
// One-time payment price
let one_time_price = new_price()
.amount(1999) // $19.99 in cents
.currency("usd")
.product(product_id);
let price_id = one_time_price.create();
// Subscription price
let monthly_price = new_price()
.amount(999) // $9.99 in cents
.currency("usd")
.product(product_id)
.recurring("month");
let monthly_price_id = monthly_price.create();
```
## Stripe Payment Integration
### Complete Payment Workflow
```rhai
// 1. Configure Stripe
configure_stripe(STRIPE_API_KEY);
// 2. Create Product
let product = new_product()
.name("Premium Software License")
.description("Professional software solution")
.metadata("category", "software")
.metadata("tier", "premium");
let product_id = product.create();
// 3. Create Pricing Options
let monthly_price = new_price()
.amount(2999) // $29.99
.currency("usd")
.product(product_id)
.recurring("month")
.metadata("billing", "monthly");
let annual_price = new_price()
.amount(29999) // $299.99 (save $60)
.currency("usd")
.product(product_id)
.recurring("year")
.metadata("billing", "annual")
.metadata("discount", "save_60");
let monthly_price_id = monthly_price.create();
let annual_price_id = annual_price.create();
// 4. Create Discount Coupons
let welcome_coupon = new_coupon()
.duration("once")
.percent_off(25)
.metadata("campaign", "welcome_offer");
let coupon_id = welcome_coupon.create();
// 5. Create Payment Intent for One-time Purchase
let payment_intent = new_payment_intent()
.amount(2999)
.currency("usd")
.customer("cus_customer_id")
.description("Monthly subscription payment")
.add_payment_method_type("card")
.metadata("price_id", monthly_price_id);
let intent_id = payment_intent.create();
// 6. Create Subscription
let subscription = new_subscription()
.customer("cus_customer_id")
.add_price(monthly_price_id)
.trial_days(14)
.coupon(coupon_id)
.metadata("source", "website");
let subscription_id = subscription.create();
```
### Builder Pattern Examples
#### Product with Metadata
```rhai
let product = new_product()
.name("Enterprise Software")
.description("Full-featured business solution")
.metadata("category", "enterprise")
.metadata("support_level", "premium")
.metadata("deployment", "cloud");
```
#### Complex Pricing
```rhai
let tiered_price = new_price()
.amount(4999) // $49.99
.currency("usd")
.product(product_id)
.recurring_with_count("month", 12) // 12 monthly payments
.metadata("tier", "professional")
.metadata("features", "advanced");
```
#### Multi-item Subscription
```rhai
let enterprise_subscription = new_subscription()
.customer("cus_enterprise_customer")
.add_price_with_quantity(user_license_price_id, 50) // 50 user licenses
.add_price(support_addon_price_id) // Premium support
.add_price(analytics_addon_price_id) // Analytics addon
.trial_days(30)
.metadata("plan", "enterprise")
.metadata("contract_length", "annual");
```
## Error Handling Patterns
### Basic Error Handling
```rhai
try {
let result = some_api_call();
print(`Success: ${result}`);
} catch(error) {
print(`Error occurred: ${error}`);
// Continue with fallback logic
}
```
### Graceful Degradation
```rhai
// Try to create with coupon, fallback without coupon
let subscription_id;
try {
subscription_id = new_subscription()
.customer(customer_id)
.add_price(price_id)
.coupon(coupon_id)
.create();
} catch(error) {
print(`Coupon failed: ${error}, creating without coupon`);
subscription_id = new_subscription()
.customer(customer_id)
.add_price(price_id)
.create();
}
```
### Validation Before API Calls
```rhai
// Validate inputs before making API calls
if customer_id == "" {
print("❌ Customer ID is required");
return;
}
if price_id == "" {
print("❌ Price ID is required");
return;
}
// Proceed with API call
let subscription = new_subscription()
.customer(customer_id)
.add_price(price_id)
.create();
```
## Advanced Usage
### Conditional Logic
```rhai
// Different pricing based on customer type
let price_id;
if customer_type == "enterprise" {
price_id = enterprise_price_id;
} else if customer_type == "professional" {
price_id = professional_price_id;
} else {
price_id = standard_price_id;
}
let subscription = new_subscription()
.customer(customer_id)
.add_price(price_id);
// Add trial for new customers
if is_new_customer {
subscription = subscription.trial_days(14);
}
let subscription_id = subscription.create();
```
### Dynamic Metadata
```rhai
// Build metadata dynamically
let product = new_product()
.name(product_name)
.description(product_description);
// Add metadata based on conditions
if has_support {
product = product.metadata("support", "included");
}
if is_premium {
product = product.metadata("tier", "premium");
}
if region != "" {
product = product.metadata("region", region);
}
let product_id = product.create();
```
### Bulk Operations
```rhai
// Create multiple prices for a product
let price_configs = [
#{amount: 999, interval: "month", name: "Monthly"},
#{amount: 9999, interval: "year", name: "Annual"},
#{amount: 19999, interval: "", name: "Lifetime"}
];
let price_ids = [];
for config in price_configs {
let price = new_price()
.amount(config.amount)
.currency("usd")
.product(product_id)
.metadata("plan_name", config.name);
if config.interval != "" {
price = price.recurring(config.interval);
}
let price_id = price.create();
price_ids.push(price_id);
print(`Created ${config.name} price: ${price_id}`);
}
```
## Extending to Other APIs
### Adding New API Support
To extend the architecture to other APIs, follow this pattern:
#### 1. Define Configuration Structure
```rust
#[derive(Debug, Clone)]
pub struct CustomApiConfig {
pub api_key: String,
pub base_url: String,
pub client: Client,
}
```
#### 2. Implement Request Handler
```rust
async fn handle_custom_api_request(
config: &CustomApiConfig,
request: &AsyncRequest
) -> Result<String, String> {
let url = format!("{}/{}", config.base_url, request.endpoint);
let response = config.client
.request(Method::from_str(&request.method).unwrap(), &url)
.header("Authorization", format!("Bearer {}", config.api_key))
.json(&request.data)
.send()
.await
.map_err(|e| format!("Request failed: {}", e))?;
let response_text = response.text().await
.map_err(|e| format!("Failed to read response: {}", e))?;
Ok(response_text)
}
```
#### 3. Register Rhai Functions
```rust
#[rhai_fn(name = "custom_api_call", return_raw)]
pub fn custom_api_call(
endpoint: String,
data: rhai::Map
) -> Result<String, Box<EvalAltResult>> {
let registry = CUSTOM_API_REGISTRY.lock().unwrap();
let registry = registry.as_ref().ok_or("API not configured")?;
let form_data: HashMap<String, String> = data.into_iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
registry.make_request(endpoint, "POST".to_string(), form_data)
.map_err(|e| e.to_string().into())
}
```
### Example: GitHub API Integration
```rhai
// Hypothetical GitHub API integration
configure_github_api(GITHUB_TOKEN);
// Create a repository
let repo_data = #{
name: "my-new-repo",
description: "Created via Rhai script",
private: false
};
let repo_result = github_api_call("user/repos", repo_data);
print(`Repository created: ${repo_result}`);
// Create an issue
let issue_data = #{
title: "Initial setup",
body: "Setting up the repository structure",
labels: ["enhancement", "setup"]
};
let issue_result = github_api_call("repos/user/my-new-repo/issues", issue_data);
print(`Issue created: ${issue_result}`);
```
## Performance Tips
### 1. Batch Operations
```rhai
// Instead of creating items one by one, batch when possible
let items_to_create = [item1, item2, item3];
let created_items = [];
for item in items_to_create {
try {
let result = item.create();
created_items.push(result);
} catch(error) {
print(`Failed to create item: ${error}`);
}
}
```
### 2. Reuse Configuration
```rhai
// Configure once, use multiple times
configure_stripe(STRIPE_API_KEY);
// Multiple operations use the same configuration
let product1_id = new_product().name("Product 1").create();
let product2_id = new_product().name("Product 2").create();
let price1_id = new_price().product(product1_id).amount(1000).create();
let price2_id = new_price().product(product2_id).amount(2000).create();
```
### 3. Error Recovery
```rhai
// Implement retry logic for transient failures
let max_retries = 3;
let retry_count = 0;
let success = false;
while retry_count < max_retries && !success {
try {
let result = api_operation();
success = true;
print(`Success: ${result}`);
} catch(error) {
retry_count += 1;
print(`Attempt ${retry_count} failed: ${error}`);
if retry_count < max_retries {
print("Retrying...");
}
}
}
if !success {
print("❌ All retry attempts failed");
}
```
## Debugging and Monitoring
### Enable Detailed Logging
```rhai
// The architecture automatically logs key operations:
// 🔧 Configuring Stripe...
// 🚀 Async worker thread started
// 🔄 Processing POST request to products
// 📥 Stripe response: {...}
// ✅ Request successful with ID: prod_xxx
```
### Monitor Request Performance
```rhai
// Time API operations
let start_time = timestamp();
let result = expensive_api_operation();
let end_time = timestamp();
print(`Operation took ${end_time - start_time}ms`);
```
### Handle Rate Limits
```rhai
// Implement backoff for rate-limited APIs
try {
let result = api_call();
} catch(error) {
if error.contains("rate limit") {
print("Rate limited, waiting before retry...");
// In a real implementation, you'd add delay logic
}
}
```
## Best Practices Summary
1. **Always handle errors gracefully** - Use try/catch blocks for all API calls
2. **Validate inputs** - Check required fields before making API calls
3. **Use meaningful metadata** - Add context to help with debugging and analytics
4. **Configure once, use many** - Set up API clients once and reuse them
5. **Implement retry logic** - Handle transient network failures
6. **Monitor performance** - Track API response times and success rates
7. **Secure credentials** - Use environment variables for API keys
8. **Test with demo data** - Use test API keys during development
This architecture provides a robust foundation for integrating any HTTP-based API with Rhai scripts while maintaining the simplicity and safety that makes Rhai attractive for domain-specific scripting.

View File

@@ -0,0 +1,294 @@
# Rhailib Architecture Overview
Rhailib is a comprehensive Rust-based ecosystem for executing Rhai scripts in distributed environments with full business domain support, authorization, and scalability features.
## System Architecture
```mermaid
graph TB
subgraph "Client Layer"
A[rhai_dispatcher] --> B[Redis Task Queues]
UI[rhai_engine_ui] --> B
REPL[ui_repl] --> B
end
subgraph "Processing Layer"
B --> C[rhailib_worker]
C --> D[rhailib_engine]
D --> E[rhailib_dsl]
end
subgraph "Core Infrastructure"
E --> F[derive - Procedural Macros]
E --> G[macros - Authorization]
D --> H[mock_db - Testing]
end
subgraph "Operations Layer"
I[monitor] --> B
I --> C
end
subgraph "Data Layer"
J[Redis] --> B
K[Database] --> E
end
```
## Crate Overview
### Core Engine Components
#### [`rhailib_engine`](../src/engine/docs/ARCHITECTURE.md)
The central Rhai scripting engine that orchestrates all business domain modules.
- **Purpose**: Unified engine creation and script execution
- **Features**: Mock database, feature-based architecture, performance optimization
- **Key Functions**: `create_heromodels_engine()`, script compilation and execution
#### [`rhailib_dsl`](../src/dsl/docs/ARCHITECTURE.md)
Comprehensive Domain-Specific Language implementation exposing business models to Rhai.
- **Purpose**: Business domain integration with Rhai scripting
- **Domains**: Business operations, finance, content management, workflows, access control
- **Features**: Fluent APIs, type safety, authorization integration
### Code Generation and Utilities
#### [`derive`](../src/derive/docs/ARCHITECTURE.md)
Procedural macros for automatic Rhai integration code generation.
- **Purpose**: Simplify Rhai integration for custom types
- **Macros**: `RhaiApi` for DSL generation, `FromVec` for type conversion
- **Features**: Builder pattern generation, error handling
#### [`macros`](../src/macros/docs/ARCHITECTURE.md)
Authorization macros and utilities for secure database operations.
- **Purpose**: Declarative security for Rhai functions
- **Features**: CRUD operation macros, access control, context management
- **Security**: Multi-level authorization, audit trails
### Client and Communication
#### [`rhai_dispatcher`](../src/client/docs/ARCHITECTURE.md)
Redis-based client library for distributed script execution.
- **Purpose**: Submit and manage Rhai script execution requests
- **Features**: Builder pattern API, timeout handling, request-reply pattern
- **Architecture**: Async operations, connection pooling, error handling
#### [`rhailib_worker`](../src/worker/docs/ARCHITECTURE.md)
Distributed task execution system for processing Rhai scripts.
- **Purpose**: Scalable script processing with queue-based architecture
- **Features**: Multi-context support, horizontal scaling, fault tolerance, context injection
- **Architecture**: Workers decoupled from contexts, allowing single worker to serve multiple circles
- **Integration**: Full engine and DSL access, secure execution
### User Interfaces
#### [`ui_repl`](../src/repl/docs/ARCHITECTURE.md)
Interactive development environment for Rhai script development.
- **Purpose**: Real-time script development and testing
- **Features**: Enhanced CLI, dual execution modes, worker management
- **Development**: Syntax highlighting, script editing, immediate feedback
#### [`rhai_engine_ui`](../src/rhai_engine_ui/docs/ARCHITECTURE.md)
Web-based interface for Rhai script management and execution.
- **Purpose**: Browser-based script execution and management
- **Architecture**: WebAssembly frontend with optional server backend
- **Features**: Real-time updates, task management, visual interface
### Operations and Monitoring
#### [`monitor`](../src/monitor/docs/ARCHITECTURE.md)
Command-line monitoring and management tool for the rhailib ecosystem.
- **Purpose**: System observability and task management
- **Features**: Real-time monitoring, performance metrics, queue management
- **Operations**: Multi-worker support, interactive CLI, visualization
## Data Flow Architecture
### Script Execution Flow
```mermaid
sequenceDiagram
participant Client as rhai_dispatcher
participant Redis as Redis Queue
participant Worker as rhailib_worker
participant Engine as rhailib_engine
participant DSL as rhailib_dsl
participant DB as Database
Client->>Redis: Submit script task (worker_id + context_id)
Worker->>Redis: Poll worker queue (worker_id)
Redis->>Worker: Return task with context_id
Worker->>Engine: Create configured engine
Engine->>DSL: Register domain modules
Worker->>Engine: Execute script with context_id
Engine->>DSL: Call business functions (context_id)
DSL->>DB: Perform authorized operations (context_id)
DB->>DSL: Return results
DSL->>Engine: Return processed data
Engine->>Worker: Return execution result
Worker->>Redis: Publish result to reply queue
Redis->>Client: Deliver result
```
### Authorization Flow
```mermaid
sequenceDiagram
participant Script as Rhai Script
participant Macro as Authorization Macro
participant Context as Execution Context
participant Access as Access Control
participant DB as Database
Script->>Macro: Call authorized function
Macro->>Context: Extract caller credentials
Context->>Access: Validate permissions
Access->>DB: Check resource access
DB->>Access: Return authorization result
Access->>Macro: Grant/deny access
Macro->>DB: Execute authorized operation
DB->>Script: Return results
```
## Worker-Context Decoupling Architecture
A key architectural feature of rhailib is the decoupling of worker assignment from context management:
### Traditional Model (Previous)
- **One Worker Per Circle**: Each worker was dedicated to a specific circle/context
- **Queue Per Circle**: Workers listened to circle-specific queues
- **Tight Coupling**: Worker identity was directly tied to context identity
### New Decoupled Model (Current)
- **Worker ID**: Determines which queue the worker listens to (`rhailib:<worker_id>`)
- **Context ID**: Provided in task details, determines execution context and database access
- **Flexible Assignment**: Single worker can process tasks for multiple contexts
### Benefits of Decoupling
1. **Resource Efficiency**: Better worker utilization across multiple contexts
2. **Deployment Flexibility**: Easier scaling and resource allocation
3. **Cost Optimization**: Fewer worker instances needed for multi-context scenarios
4. **Operational Simplicity**: Centralized worker management with distributed contexts
### Implementation Details
```mermaid
graph LR
subgraph "Client Layer"
C[Client] --> |worker_id + context_id| Q[Redis Queue]
end
subgraph "Worker Layer"
W1[Worker 1] --> |listens to| Q1[Queue: worker-1]
W2[Worker 2] --> |listens to| Q2[Queue: worker-2]
end
subgraph "Context Layer"
W1 --> |processes| CTX1[Context A]
W1 --> |processes| CTX2[Context B]
W2 --> |processes| CTX1
W2 --> |processes| CTX3[Context C]
end
```
## Key Design Principles
### 1. Security First
- **Multi-layer Authorization**: Context-based, resource-specific, and operation-level security
- **Secure Execution**: Isolated script execution with proper context injection
- **Audit Trails**: Comprehensive logging and monitoring of all operations
### 2. Scalability
- **Horizontal Scaling**: Multiple worker instances for load distribution
- **Queue-based Architecture**: Reliable task distribution and processing
- **Async Operations**: Non-blocking I/O throughout the system
### 3. Developer Experience
- **Type Safety**: Comprehensive type checking and conversion utilities
- **Error Handling**: Detailed error messages and proper error propagation
- **Interactive Development**: REPL and web interfaces for immediate feedback
### 4. Modularity
- **Feature Flags**: Configurable compilation based on requirements
- **Crate Separation**: Clear boundaries and responsibilities
- **Plugin Architecture**: Easy extension and customization
## Deployment Patterns
### Development Environment
```
REPL + Local Engine + Mock Database
```
- Interactive development with immediate feedback
- Full DSL access without external dependencies
- Integrated testing and debugging
### Testing Environment
```
Client + Worker + Redis + Mock Database
```
- Distributed execution testing
- Queue-based communication validation
- Performance and scalability testing
### Production Environment
```
Multiple Clients + Redis Cluster + Worker Pool + Production Database
```
- High availability and fault tolerance
- Horizontal scaling and load distribution
- Comprehensive monitoring and observability
## Integration Points
### External Systems
- **Redis**: Task queues, result delivery, system coordination
- **Databases**: Business data persistence and retrieval
- **Web Browsers**: WebAssembly-based user interfaces
- **Command Line**: Development and operations tooling
### Internal Integration
- **Macro System**: Code generation and authorization
- **Type System**: Safe conversions and error handling
- **Module System**: Domain-specific functionality organization
- **Context System**: Security and execution environment management
## Performance Characteristics
### Throughput
- **Concurrent Execution**: Multiple workers processing tasks simultaneously
- **Connection Pooling**: Efficient database and Redis connection management
- **Compiled Scripts**: AST caching for repeated execution optimization
### Latency
- **Local Execution**: Direct engine access for development scenarios
- **Queue Optimization**: Efficient task distribution and result delivery
- **Context Caching**: Reduced overhead for authorization and setup
### Resource Usage
- **Memory Management**: Efficient ownership and borrowing patterns
- **CPU Utilization**: Async operations and non-blocking I/O
- **Network Efficiency**: Optimized serialization and communication protocols
## Future Extensibility
### Adding New Domains
1. Create domain module in `rhailib_dsl`
2. Implement authorization macros in `macros`
3. Add feature flags and conditional compilation
4. Update engine registration and documentation
### Custom Authorization
1. Extend authorization macros with custom logic
2. Implement domain-specific access control functions
3. Add audit and logging capabilities
4. Update security documentation
### New Interfaces
1. Implement client interface following existing patterns
2. Integrate with Redis communication layer
3. Add monitoring and observability features
4. Provide comprehensive documentation
This architecture provides a robust, secure, and scalable foundation for distributed Rhai script execution while maintaining excellent developer experience and operational visibility.

View File

@@ -0,0 +1,254 @@
# Async Implementation Summary
## Overview
This document summarizes the successful implementation of async HTTP API support in RhaiLib, enabling Rhai scripts to perform external API calls despite Rhai's synchronous nature.
## Problem Solved
**Challenge**: Rhai is fundamentally synchronous and single-threaded, making it impossible to natively perform async operations like HTTP API calls.
**Solution**: Implemented a multi-threaded architecture using MPSC channels to bridge Rhai's synchronous execution with Rust's async ecosystem.
## Key Technical Achievement
### The Blocking Runtime Fix
The most critical technical challenge was resolving the "Cannot block the current thread from within a runtime" error that occurs when trying to use blocking operations within a Tokio async context.
**Root Cause**: Using `tokio::sync::oneshot` channels with `blocking_recv()` from within an async runtime context.
**Solution**:
1. Replaced `tokio::sync::oneshot` with `std::sync::mpsc` channels
2. Used `recv_timeout()` instead of `blocking_recv()`
3. Implemented timeout-based polling in the async worker loop
```rust
// Before (caused runtime panic)
let result = response_receiver.blocking_recv()
.map_err(|_| "Failed to receive response")?;
// After (works correctly)
response_receiver.recv_timeout(Duration::from_secs(30))
.map_err(|e| format!("Failed to receive response: {}", e))?
```
## Architecture Components
### 1. AsyncFunctionRegistry
- **Purpose**: Central coordinator for async operations
- **Key Feature**: Thread-safe communication via MPSC channels
- **Location**: [`src/dsl/src/payment.rs:19`](../src/dsl/src/payment.rs#L19)
### 2. AsyncRequest Structure
- **Purpose**: Encapsulates async operation data
- **Key Feature**: Includes response channel for result communication
- **Location**: [`src/dsl/src/payment.rs:31`](../src/dsl/src/payment.rs#L31)
### 3. Async Worker Thread
- **Purpose**: Dedicated thread for processing async operations
- **Key Feature**: Timeout-based polling to prevent runtime blocking
- **Location**: [`src/dsl/src/payment.rs:339`](../src/dsl/src/payment.rs#L339)
## Implementation Flow
```mermaid
sequenceDiagram
participant RS as Rhai Script
participant RF as Rhai Function
participant AR as AsyncRegistry
participant CH as MPSC Channel
participant AW as Async Worker
participant API as External API
RS->>RF: product.create()
RF->>AR: make_request()
AR->>CH: send(AsyncRequest)
CH->>AW: recv_timeout()
AW->>API: HTTP POST
API->>AW: Response
AW->>CH: send(Result)
CH->>AR: recv_timeout()
AR->>RF: Result
RF->>RS: product_id
```
## Code Examples
### Rhai Script Usage
```rhai
// Configure API client
configure_stripe(STRIPE_API_KEY);
// Create product with builder pattern
let product = new_product()
.name("Premium Software License")
.description("Professional software solution")
.metadata("category", "software");
// Async HTTP call (appears synchronous to Rhai)
let product_id = product.create();
```
### Rust Implementation
```rust
pub fn make_request(&self, endpoint: String, method: String, data: HashMap<String, String>) -> Result<String, String> {
let (response_sender, response_receiver) = mpsc::channel();
let request = AsyncRequest {
endpoint,
method,
data,
response_sender,
};
// Send to async worker
self.request_sender.send(request)
.map_err(|_| "Failed to send request to async worker".to_string())?;
// Wait for response with timeout
response_receiver.recv_timeout(Duration::from_secs(30))
.map_err(|e| format!("Failed to receive response: {}", e))?
}
```
## Testing Results
### Successful Test Output
```
=== Rhai Payment Module Example ===
🔑 Using Stripe API key: sk_test_your_st***
🔧 Configuring Stripe...
🚀 Async worker thread started
🔄 Processing POST request to products
📥 Stripe response: {"error": {"message": "Invalid API Key provided..."}}
✅ Payment script executed successfully!
```
**Key Success Indicators**:
- ✅ No runtime panics or blocking errors
- ✅ Async worker thread starts successfully
- ✅ HTTP requests are processed correctly
- ✅ Error handling works gracefully with invalid API keys
- ✅ Script execution completes without hanging
## Files Modified/Created
### Core Implementation
- **[`src/dsl/src/payment.rs`](../src/dsl/src/payment.rs)**: Complete async architecture implementation
- **[`src/dsl/examples/payment/main.rs`](../src/dsl/examples/payment/main.rs)**: Environment variable loading
- **[`src/dsl/examples/payment/payment.rhai`](../src/dsl/examples/payment/payment.rhai)**: Comprehensive API usage examples
### Documentation
- **[`docs/ASYNC_RHAI_ARCHITECTURE.md`](ASYNC_RHAI_ARCHITECTURE.md)**: Technical architecture documentation
- **[`docs/API_INTEGRATION_GUIDE.md`](API_INTEGRATION_GUIDE.md)**: Practical usage guide
- **[`README.md`](../README.md)**: Updated with async API features
### Configuration
- **[`src/dsl/examples/payment/.env.example`](../src/dsl/examples/payment/.env.example)**: Environment variable template
- **[`src/dsl/Cargo.toml`](../src/dsl/Cargo.toml)**: Added dotenv dependency
## Performance Characteristics
### Throughput
- **Concurrent Processing**: Multiple async operations can run simultaneously
- **Connection Pooling**: HTTP client reuses connections efficiently
- **Channel Overhead**: Minimal (~microseconds per operation)
### Latency
- **Network Bound**: Dominated by actual HTTP request time
- **Thread Switching**: Single context switch per request
- **Timeout Handling**: 30-second default timeout with configurable values
### Memory Usage
- **Bounded Channels**: Prevents memory leaks from unbounded queuing
- **Connection Pooling**: Efficient memory usage for HTTP connections
- **Request Lifecycle**: Automatic cleanup when requests complete
## Error Handling
### Network Errors
```rust
.map_err(|e| {
println!("❌ HTTP request failed: {}", e);
format!("HTTP request failed: {}", e)
})?
```
### API Errors
```rust
if let Some(error) = json.get("error") {
let error_msg = format!("Stripe API error: {}", error);
Err(error_msg)
}
```
### Rhai Script Errors
```rhai
try {
let product_id = product.create();
print(`✅ Product ID: ${product_id}`);
} catch(error) {
print(`❌ Failed to create product: ${error}`);
}
```
## Extensibility
The architecture is designed to support any HTTP-based API:
### Adding New APIs
1. Define configuration structure
2. Implement async request handler
3. Register Rhai functions
4. Add builder patterns for complex objects
### Example Extension
```rust
// GraphQL API support
async fn handle_graphql_request(config: &GraphQLConfig, request: &AsyncRequest) -> Result<String, String> {
// Implementation for GraphQL queries
}
#[rhai_fn(name = "graphql_query")]
pub fn execute_graphql_query(query: String, variables: rhai::Map) -> Result<String, Box<EvalAltResult>> {
// Rhai function implementation
}
```
## Best Practices Established
1. **Timeout-based Polling**: Always use `recv_timeout()` instead of blocking operations in async contexts
2. **Channel Type Selection**: Use `std::sync::mpsc` for cross-thread communication in mixed sync/async environments
3. **Error Propagation**: Provide meaningful error messages at each layer
4. **Resource Management**: Implement proper cleanup and timeout handling
5. **Configuration Security**: Use environment variables for sensitive data
6. **Builder Patterns**: Provide fluent APIs for complex object construction
## Future Enhancements
### Potential Improvements
1. **Connection Pooling**: Advanced connection management for high-throughput scenarios
2. **Retry Logic**: Automatic retry with exponential backoff for transient failures
3. **Rate Limiting**: Built-in rate limiting to respect API quotas
4. **Caching**: Response caching for frequently accessed data
5. **Metrics**: Performance monitoring and request analytics
6. **WebSocket Support**: Real-time communication capabilities
### API Extensions
1. **GraphQL Support**: Native GraphQL query execution
2. **Database Integration**: Direct database access from Rhai scripts
3. **File Operations**: Async file I/O operations
4. **Message Queues**: Integration with message brokers (Redis, RabbitMQ)
## Conclusion
The async architecture successfully solves the fundamental challenge of enabling HTTP API calls from Rhai scripts. The implementation is:
- **Robust**: Handles errors gracefully and prevents runtime panics
- **Performant**: Minimal overhead with efficient resource usage
- **Extensible**: Easy to add support for new APIs and protocols
- **Safe**: Thread-safe with proper error handling and timeouts
- **User-Friendly**: Simple, intuitive API for Rhai script authors
This foundation enables powerful integration capabilities while maintaining Rhai's simplicity and safety characteristics, making it suitable for production use in applications requiring external API integration.

View File

@@ -0,0 +1,460 @@
# Async Rhai Architecture for HTTP API Integration
## Overview
This document describes the async architecture implemented in RhaiLib that enables Rhai scripts to perform HTTP API calls despite Rhai's fundamentally synchronous nature. The architecture bridges Rhai's blocking execution model with Rust's async ecosystem using multi-threading and message passing.
## The Challenge
Rhai is a synchronous, single-threaded scripting language that cannot natively handle async operations. However, modern applications often need to:
- Make HTTP API calls (REST, GraphQL, etc.)
- Interact with external services (Stripe, payment processors, etc.)
- Perform I/O operations that benefit from async handling
- Maintain responsive execution while waiting for network responses
## Architecture Solution
### Core Components
```mermaid
graph TB
subgraph "Rhai Thread (Synchronous)"
RS[Rhai Script]
RF[Rhai Functions]
RR[Registry Interface]
end
subgraph "Communication Layer"
MC[MPSC Channel]
REQ[AsyncRequest]
RESP[Response Channel]
end
subgraph "Async Worker Thread"
RT[Tokio Runtime]
AW[Async Worker Loop]
HC[HTTP Client]
API[External APIs]
end
RS --> RF
RF --> RR
RR --> MC
MC --> REQ
REQ --> AW
AW --> HC
HC --> API
API --> HC
HC --> AW
AW --> RESP
RESP --> RR
RR --> RF
RF --> RS
```
### 1. AsyncFunctionRegistry
The central coordinator that manages async operations:
```rust
#[derive(Debug, Clone)]
pub struct AsyncFunctionRegistry {
pub request_sender: Sender<AsyncRequest>,
pub stripe_config: StripeConfig,
}
```
**Key Features:**
- **Thread-safe communication**: Uses `std::sync::mpsc` channels
- **Request coordination**: Manages the request/response lifecycle
- **Configuration management**: Stores API credentials and HTTP client settings
### 2. AsyncRequest Structure
Encapsulates all information needed for an async operation:
```rust
#[derive(Debug)]
pub struct AsyncRequest {
pub endpoint: String,
pub method: String,
pub data: HashMap<String, String>,
pub response_sender: std::sync::mpsc::Sender<Result<String, String>>,
}
```
**Components:**
- **endpoint**: API endpoint path (e.g., "products", "payment_intents")
- **method**: HTTP method (POST, GET, PUT, DELETE)
- **data**: Form data for the request body
- **response_sender**: Channel to send the result back to the calling thread
### 3. Async Worker Thread
A dedicated thread running a Tokio runtime that processes async operations:
```rust
async fn async_worker_loop(config: StripeConfig, receiver: Receiver<AsyncRequest>) {
loop {
match receiver.recv_timeout(Duration::from_millis(100)) {
Ok(request) => {
let result = Self::handle_stripe_request(&config, &request).await;
if let Err(_) = request.response_sender.send(result) {
println!("⚠️ Failed to send response back to caller");
}
}
Err(std::sync::mpsc::RecvTimeoutError::Timeout) => continue,
Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => break,
}
}
}
```
**Key Design Decisions:**
- **Timeout-based polling**: Uses `recv_timeout()` instead of blocking `recv()` to prevent runtime deadlocks
- **Error handling**: Gracefully handles channel disconnections and timeouts
- **Non-blocking**: Allows the async runtime to process other tasks during polling intervals
## Request Flow
### 1. Rhai Script Execution
```rhai
// Rhai script calls a function
let product = new_product()
.name("Premium Software License")
.description("A comprehensive software solution");
let product_id = product.create(); // This triggers async HTTP call
```
### 2. Function Registration and Execution
```rust
#[rhai_fn(name = "create", return_raw)]
pub fn create_product(product: &mut RhaiProduct) -> Result<String, Box<EvalAltResult>> {
let registry = ASYNC_REGISTRY.lock().unwrap();
let registry = registry.as_ref().ok_or("Stripe not configured")?;
let form_data = prepare_product_data(product);
let result = registry.make_request("products".to_string(), "POST".to_string(), form_data)
.map_err(|e| e.to_string())?;
product.id = Some(result.clone());
Ok(result)
}
```
### 3. Request Processing
```rust
pub fn make_request(&self, endpoint: String, method: String, data: HashMap<String, String>) -> Result<String, String> {
let (response_sender, response_receiver) = mpsc::channel();
let request = AsyncRequest {
endpoint,
method,
data,
response_sender,
};
// Send request to async worker
self.request_sender.send(request)
.map_err(|_| "Failed to send request to async worker".to_string())?;
// Wait for response with timeout
response_receiver.recv_timeout(Duration::from_secs(30))
.map_err(|e| format!("Failed to receive response: {}", e))?
}
```
### 4. HTTP Request Execution
```rust
async fn handle_stripe_request(config: &StripeConfig, request: &AsyncRequest) -> Result<String, String> {
let url = format!("{}/{}", STRIPE_API_BASE, request.endpoint);
let response = config.client
.post(&url)
.basic_auth(&config.secret_key, None::<&str>)
.form(&request.data)
.send()
.await
.map_err(|e| format!("HTTP request failed: {}", e))?;
let response_text = response.text().await
.map_err(|e| format!("Failed to read response: {}", e))?;
// Parse and validate response
let json: serde_json::Value = serde_json::from_str(&response_text)
.map_err(|e| format!("Failed to parse JSON: {}", e))?;
if let Some(id) = json.get("id").and_then(|v| v.as_str()) {
Ok(id.to_string())
} else if let Some(error) = json.get("error") {
Err(format!("API error: {}", error))
} else {
Err(format!("Unexpected response: {}", response_text))
}
}
```
## Configuration and Setup
### 1. HTTP Client Configuration
```rust
let client = Client::builder()
.timeout(Duration::from_secs(5))
.connect_timeout(Duration::from_secs(3))
.pool_idle_timeout(Duration::from_secs(10))
.tcp_keepalive(Duration::from_secs(30))
.user_agent("rhailib-payment/1.0")
.build()?;
```
### 2. Environment Variable Loading
```rust
// Load from .env file
dotenv::from_filename("examples/payment/.env").ok();
let stripe_secret_key = env::var("STRIPE_SECRET_KEY")
.unwrap_or_else(|_| "sk_test_demo_key".to_string());
```
### 3. Rhai Engine Setup
```rust
let mut engine = Engine::new();
register_payment_rhai_module(&mut engine);
let mut scope = Scope::new();
scope.push("STRIPE_API_KEY", stripe_secret_key);
engine.eval_with_scope::<()>(&mut scope, &script)?;
```
## API Integration Examples
### Stripe Payment Processing
The architecture supports comprehensive Stripe API integration:
#### Product Creation
```rhai
let product = new_product()
.name("Premium Software License")
.description("A comprehensive software solution")
.metadata("category", "software");
let product_id = product.create(); // Async HTTP POST to /v1/products
```
#### Price Configuration
```rhai
let monthly_price = new_price()
.amount(2999) // $29.99 in cents
.currency("usd")
.product(product_id)
.recurring("month");
let price_id = monthly_price.create(); // Async HTTP POST to /v1/prices
```
#### Subscription Management
```rhai
let subscription = new_subscription()
.customer("cus_example_customer")
.add_price(monthly_price_id)
.trial_days(14)
.coupon(coupon_id);
let subscription_id = subscription.create(); // Async HTTP POST to /v1/subscriptions
```
#### Payment Intent Processing
```rhai
let payment_intent = new_payment_intent()
.amount(19999)
.currency("usd")
.customer("cus_example_customer")
.description("Premium Software License");
let intent_id = payment_intent.create(); // Async HTTP POST to /v1/payment_intents
```
## Error Handling
### 1. Network Errors
```rust
.map_err(|e| {
println!("❌ HTTP request failed: {}", e);
format!("HTTP request failed: {}", e)
})?
```
### 2. API Errors
```rust
if let Some(error) = json.get("error") {
let error_msg = format!("Stripe API error: {}", error);
println!("❌ {}", error_msg);
Err(error_msg)
}
```
### 3. Timeout Handling
```rust
response_receiver.recv_timeout(Duration::from_secs(30))
.map_err(|e| format!("Failed to receive response: {}", e))?
```
### 4. Rhai Script Error Handling
```rhai
try {
let product_id = product.create();
print(`✅ Product ID: ${product_id}`);
} catch(error) {
print(`❌ Failed to create product: ${error}`);
return; // Exit gracefully
}
```
## Performance Characteristics
### Throughput
- **Concurrent requests**: Multiple async operations can be processed simultaneously
- **Connection pooling**: HTTP client reuses connections for efficiency
- **Timeout management**: Prevents hanging requests from blocking the system
### Latency
- **Channel overhead**: Minimal overhead for message passing (~microseconds)
- **Thread switching**: Single context switch per request
- **Network latency**: Dominated by actual HTTP request time
### Memory Usage
- **Request buffering**: Bounded by channel capacity
- **Connection pooling**: Efficient memory usage for HTTP connections
- **Response caching**: No automatic caching (can be added if needed)
## Thread Safety
### 1. Global Registry
```rust
static ASYNC_REGISTRY: Mutex<Option<AsyncFunctionRegistry>> = Mutex::new(None);
```
### 2. Channel Communication
- **MPSC channels**: Multiple producers (Rhai functions), single consumer (async worker)
- **Response channels**: One-to-one communication for each request
### 3. Shared Configuration
- **Immutable after setup**: Configuration is cloned to worker thread
- **Thread-safe HTTP client**: reqwest::Client is thread-safe
## Extensibility
### Adding New APIs
1. **Define request structures**:
```rust
#[derive(Debug)]
pub struct GraphQLRequest {
pub query: String,
pub variables: HashMap<String, serde_json::Value>,
pub response_sender: std::sync::mpsc::Sender<Result<String, String>>,
}
```
2. **Implement request handlers**:
```rust
async fn handle_graphql_request(config: &GraphQLConfig, request: &GraphQLRequest) -> Result<String, String> {
// Implementation
}
```
3. **Register Rhai functions**:
```rust
#[rhai_fn(name = "graphql_query", return_raw)]
pub fn execute_graphql_query(query: String) -> Result<String, Box<EvalAltResult>> {
// Implementation
}
```
### Custom HTTP Methods
The architecture supports any HTTP method:
```rust
registry.make_request("endpoint".to_string(), "PUT".to_string(), data)
registry.make_request("endpoint".to_string(), "DELETE".to_string(), HashMap::new())
```
## Best Practices
### 1. Configuration Management
- Use environment variables for sensitive data (API keys)
- Validate configuration before starting async workers
- Provide meaningful error messages for missing configuration
### 2. Error Handling
- Always handle both network and API errors
- Provide fallback behavior for failed requests
- Log errors with sufficient context for debugging
### 3. Timeout Configuration
- Set appropriate timeouts for different types of requests
- Consider retry logic for transient failures
- Balance responsiveness with reliability
### 4. Resource Management
- Limit concurrent requests to prevent overwhelming external APIs
- Use connection pooling for efficiency
- Clean up resources when shutting down
## Troubleshooting
### Common Issues
1. **"Cannot block the current thread from within a runtime"**
- **Cause**: Using blocking operations within async context
- **Solution**: Use `recv_timeout()` instead of `blocking_recv()`
2. **Channel disconnection errors**
- **Cause**: Worker thread terminated unexpectedly
- **Solution**: Check worker thread for panics, ensure proper error handling
3. **Request timeouts**
- **Cause**: Network issues or slow API responses
- **Solution**: Adjust timeout values, implement retry logic
4. **API authentication errors**
- **Cause**: Invalid or missing API keys
- **Solution**: Verify environment variable configuration
### Debugging Tips
1. **Enable detailed logging**:
```rust
println!("🔄 Processing {} request to {}", request.method, request.endpoint);
println!("📥 API response: {}", response_text);
```
2. **Monitor channel health**:
```rust
if let Err(_) = request.response_sender.send(result) {
println!("⚠️ Failed to send response back to caller");
}
```
3. **Test with demo data**:
```rhai
// Use demo API keys that fail gracefully for testing
let demo_key = "sk_test_demo_key_will_fail_gracefully";
```
## Conclusion
This async architecture successfully bridges Rhai's synchronous execution model with Rust's async ecosystem, enabling powerful HTTP API integration while maintaining the simplicity and safety of Rhai scripts. The design is extensible, performant, and handles errors gracefully, making it suitable for production use in applications requiring external API integration.
The key innovation is the use of timeout-based polling in the async worker loop, which prevents the common "cannot block within runtime" error while maintaining responsive execution. This pattern can be applied to other async operations beyond HTTP requests, such as database queries, file I/O, or any other async Rust operations that need to be exposed to Rhai scripts.

View File

@@ -0,0 +1,367 @@
# Dispatcher-Based Event-Driven Flow Architecture
## Overview
This document describes the implementation of a non-blocking, event-driven flow architecture for Rhai payment functions using the existing RhaiDispatcher. The system transforms blocking API calls into fire-and-continue patterns where HTTP requests spawn background threads that dispatch new Rhai scripts based on API responses.
## Architecture Principles
### 1. **Non-Blocking API Calls**
- All payment functions (e.g., `create_payment_intent()`) return immediately
- HTTP requests happen in background threads
- No blocking of the main Rhai engine thread
### 2. **Self-Dispatching Pattern**
- Worker dispatches scripts to itself
- Same `worker_id` and `context_id` maintained
- `caller_id` changes to reflect the API response source
### 3. **Generic Request/Response Flow**
- Request functions: `new_..._request` pattern
- Response scripts: `new_..._response` pattern
- Consistent naming across all API operations
## Flow Architecture
```mermaid
graph TD
A[main.rhai] --> B[create_payment_intent]
B --> C[HTTP Thread Spawned]
B --> D[Return Immediately]
C --> E[Stripe API Call]
E --> F{API Response}
F -->|Success| G[Dispatch: new_create_payment_intent_response]
F -->|Error| H[Dispatch: new_create_payment_intent_error]
G --> I[Response Script Execution]
H --> J[Error Script Execution]
```
## Implementation Components
### 1. **FlowManager**
```rust
use rhai_dispatcher::{RhaiDispatcher, RhaiDispatcherBuilder, RhaiDispatcherError};
use std::sync::{Arc, Mutex};
pub struct FlowManager {
dispatcher: RhaiDispatcher,
worker_id: String,
context_id: String,
}
#[derive(Debug)]
pub enum FlowError {
DispatcherError(RhaiDispatcherError),
ConfigurationError(String),
}
impl From<RhaiDispatcherError> for FlowError {
fn from(err: RhaiDispatcherError) -> Self {
FlowError::DispatcherError(err)
}
}
impl FlowManager {
pub fn new(worker_id: String, context_id: String) -> Result<Self, FlowError> {
let dispatcher = RhaiDispatcherBuilder::new()
.caller_id("stripe") // API responses come from Stripe
.worker_id(&worker_id)
.context_id(&context_id)
.redis_url("redis://127.0.0.1/")
.build()?;
Ok(Self {
dispatcher,
worker_id,
context_id,
})
}
pub async fn dispatch_response_script(&self, script_name: &str, data: &str) -> Result<(), FlowError> {
let script_content = format!(
r#"
// Auto-generated response script for {}
let response_data = `{}`;
let parsed_data = parse_json(response_data);
// Include the response script
eval_file("flows/{}.rhai");
"#,
script_name,
data.replace('`', r#"\`"#),
script_name
);
self.dispatcher
.new_play_request()
.worker_id(&self.worker_id)
.context_id(&self.context_id)
.script(&script_content)
.submit()
.await?;
Ok(())
}
pub async fn dispatch_error_script(&self, script_name: &str, error: &str) -> Result<(), FlowError> {
let script_content = format!(
r#"
// Auto-generated error script for {}
let error_data = `{}`;
let parsed_error = parse_json(error_data);
// Include the error script
eval_file("flows/{}.rhai");
"#,
script_name,
error.replace('`', r#"\`"#),
script_name
);
self.dispatcher
.new_play_request()
.worker_id(&self.worker_id)
.context_id(&self.context_id)
.script(&script_content)
.submit()
.await?;
Ok(())
}
}
// Global flow manager instance
static FLOW_MANAGER: Mutex<Option<FlowManager>> = Mutex::new(None);
pub fn initialize_flow_manager(worker_id: String, context_id: String) -> Result<(), FlowError> {
let manager = FlowManager::new(worker_id, context_id)?;
let mut global_manager = FLOW_MANAGER.lock().unwrap();
*global_manager = Some(manager);
Ok(())
}
pub fn get_flow_manager() -> Result<FlowManager, FlowError> {
let global_manager = FLOW_MANAGER.lock().unwrap();
global_manager.as_ref()
.ok_or_else(|| FlowError::ConfigurationError("Flow manager not initialized".to_string()))
.map(|manager| FlowManager {
dispatcher: manager.dispatcher.clone(), // Assuming Clone is implemented
worker_id: manager.worker_id.clone(),
context_id: manager.context_id.clone(),
})
}
```
### 2. **Non-Blocking Payment Functions**
```rust
// Transform blocking function into non-blocking
#[rhai_fn(name = "create", return_raw)]
pub fn create_payment_intent(intent: &mut RhaiPaymentIntent) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_payment_intent_data(intent);
// Get flow manager
let flow_manager = get_flow_manager()
.map_err(|e| format!("Flow manager error: {:?}", e))?;
// Spawn background thread for HTTP request
let stripe_config = get_stripe_config()?;
thread::spawn(move || {
let rt = Runtime::new().expect("Failed to create runtime");
rt.block_on(async {
match make_stripe_request(&stripe_config, "payment_intents", &form_data).await {
Ok(response) => {
if let Err(e) = flow_manager.dispatch_response_script(
"new_create_payment_intent_response",
&response
).await {
eprintln!("Failed to dispatch response: {:?}", e);
}
}
Err(error) => {
if let Err(e) = flow_manager.dispatch_error_script(
"new_create_payment_intent_error",
&error
).await {
eprintln!("Failed to dispatch error: {:?}", e);
}
}
}
});
});
// Return immediately with confirmation
Ok("payment_intent_request_dispatched".to_string())
}
// Generic async HTTP request function
async fn make_stripe_request(
config: &StripeConfig,
endpoint: &str,
form_data: &HashMap<String, String>
) -> Result<String, String> {
let url = format!("{}/{}", STRIPE_API_BASE, endpoint);
let response = config.client
.post(&url)
.basic_auth(&config.secret_key, None::<&str>)
.form(form_data)
.send()
.await
.map_err(|e| format!("HTTP request failed: {}", e))?;
let response_text = response.text().await
.map_err(|e| format!("Failed to read response: {}", e))?;
let json: serde_json::Value = serde_json::from_str(&response_text)
.map_err(|e| format!("Failed to parse JSON: {}", e))?;
if json.get("error").is_some() {
Err(response_text)
} else {
Ok(response_text)
}
}
```
### 3. **Flow Script Templates**
#### Success Response Script
```rhai
// flows/new_create_payment_intent_response.rhai
let payment_intent_id = parsed_data.id;
let status = parsed_data.status;
print(`✅ Payment Intent Created: ${payment_intent_id}`);
print(`Status: ${status}`);
// Continue the flow based on status
if status == "requires_payment_method" {
print("Payment method required - ready for frontend");
// Could dispatch another flow here
} else if status == "succeeded" {
print("Payment completed successfully!");
// Dispatch success notification flow
}
// Store the payment intent ID for later use
set_context("payment_intent_id", payment_intent_id);
set_context("payment_status", status);
```
#### Error Response Script
```rhai
// flows/new_create_payment_intent_error.rhai
let error_type = parsed_error.error.type;
let error_message = parsed_error.error.message;
print(`❌ Payment Intent Error: ${error_type}`);
print(`Message: ${error_message}`);
// Handle different error types
if error_type == "card_error" {
print("Card was declined - notify user");
// Dispatch user notification flow
} else if error_type == "rate_limit_error" {
print("Rate limited - retry later");
// Dispatch retry flow
} else {
print("Unknown error - log for investigation");
// Dispatch error logging flow
}
// Store error details for debugging
set_context("last_error_type", error_type);
set_context("last_error_message", error_message);
```
### 4. **Configuration and Initialization**
```rust
// Add to payment module initialization
#[rhai_fn(name = "init_flows", return_raw)]
pub fn init_flows(worker_id: String, context_id: String) -> Result<String, Box<EvalAltResult>> {
initialize_flow_manager(worker_id, context_id)
.map_err(|e| format!("Failed to initialize flow manager: {:?}", e))?;
Ok("Flow manager initialized successfully".to_string())
}
```
## Usage Examples
### 1. **Basic Payment Flow**
```rhai
// main.rhai
init_flows("worker-1", "context-123");
configure_stripe("sk_test_...");
let payment_intent = new_payment_intent()
.amount(2000)
.currency("usd")
.customer("cus_customer123");
// This returns immediately, HTTP happens in background
let result = payment_intent.create();
print(`Request dispatched: ${result}`);
// Script ends here, but flow continues in background
```
### 2. **Chained Flow Example**
```rhai
// flows/new_create_payment_intent_response.rhai
let payment_intent_id = parsed_data.id;
if parsed_data.status == "requires_payment_method" {
// Chain to next operation
let subscription = new_subscription()
.customer(get_context("customer_id"))
.add_price("price_monthly");
// This will trigger new_create_subscription_response flow
subscription.create();
}
```
## Benefits
### 1. **Non-Blocking Execution**
- Main Rhai script never blocks on HTTP requests
- Multiple API calls can happen concurrently
- Engine remains responsive for other scripts
### 2. **Event-Driven Architecture**
- Clear separation between request and response handling
- Easy to add new flow steps
- Composable and chainable operations
### 3. **Error Handling**
- Dedicated error flows for each operation
- Contextual error information preserved
- Retry and recovery patterns possible
### 4. **Scalability**
- Each HTTP request runs in its own thread
- No shared state between concurrent operations
- Redis-based dispatch scales horizontally
## Implementation Checklist
- [ ] Implement FlowManager with RhaiDispatcher integration
- [ ] Convert all payment functions to non-blocking pattern
- [ ] Create flow script templates for all operations
- [ ] Add flow initialization functions
- [ ] Test with example payment flows
- [ ] Update documentation and examples
## Migration Path
1. **Phase 1**: Implement FlowManager and basic infrastructure
2. **Phase 2**: Convert payment_intent functions to non-blocking
3. **Phase 3**: Convert remaining payment functions (products, prices, subscriptions, coupons)
4. **Phase 4**: Create comprehensive flow script library
5. **Phase 5**: Add advanced features (retries, timeouts, monitoring)

View File

@@ -0,0 +1,443 @@
# Event-Driven Flow Architecture
## Overview
A simple, single-threaded architecture where API calls trigger HTTP requests and spawn new Rhai scripts based on responses. No global state, no polling, no blocking - just clean event-driven flows.
## Core Concept
```mermaid
graph LR
RS1[Rhai Script] --> API[create_payment_intent]
API --> HTTP[HTTP Request]
HTTP --> SPAWN[Spawn Thread]
SPAWN --> WAIT[Wait for Response]
WAIT --> SUCCESS[200 OK]
WAIT --> ERROR[Error]
SUCCESS --> RS2[new_payment_intent.rhai]
ERROR --> RS3[payment_failed.rhai]
```
## Architecture Design
### 1. Simple Flow Manager
```rust
use std::thread;
use std::collections::HashMap;
use reqwest::Client;
use rhai::{Engine, Scope};
pub struct FlowManager {
pub client: Client,
pub engine: Engine,
pub flow_scripts: HashMap<String, String>, // event_name -> script_path
}
impl FlowManager {
pub fn new() -> Self {
let mut flow_scripts = HashMap::new();
// Define flow mappings
flow_scripts.insert("payment_intent_created".to_string(), "flows/payment_intent_created.rhai".to_string());
flow_scripts.insert("payment_intent_failed".to_string(), "flows/payment_intent_failed.rhai".to_string());
flow_scripts.insert("product_created".to_string(), "flows/product_created.rhai".to_string());
flow_scripts.insert("subscription_created".to_string(), "flows/subscription_created.rhai".to_string());
Self {
client: Client::new(),
engine: Engine::new(),
flow_scripts,
}
}
// Fire HTTP request and spawn response handler
pub fn fire_and_continue(&self,
endpoint: String,
method: String,
data: HashMap<String, String>,
success_event: String,
error_event: String,
context: HashMap<String, String>
) {
let client = self.client.clone();
let flow_scripts = self.flow_scripts.clone();
// Spawn thread for HTTP request
thread::spawn(move || {
let result = Self::make_http_request(&client, &endpoint, &method, &data);
match result {
Ok(response_data) => {
// Success: dispatch success flow
Self::dispatch_flow(&flow_scripts, &success_event, response_data, context);
}
Err(error) => {
// Error: dispatch error flow
let mut error_data = HashMap::new();
error_data.insert("error".to_string(), error);
Self::dispatch_flow(&flow_scripts, &error_event, error_data, context);
}
}
});
// Return immediately - no blocking!
}
// Execute HTTP request
fn make_http_request(
client: &Client,
endpoint: &str,
method: &str,
data: &HashMap<String, String>
) -> Result<HashMap<String, String>, String> {
// This runs in spawned thread - can block safely
let rt = tokio::runtime::Runtime::new().unwrap();
rt.block_on(async {
let url = format!("https://api.stripe.com/v1/{}", endpoint);
let response = client
.post(&url)
.form(data)
.send()
.await
.map_err(|e| format!("HTTP error: {}", e))?;
let response_text = response.text().await
.map_err(|e| format!("Response read error: {}", e))?;
let json: serde_json::Value = serde_json::from_str(&response_text)
.map_err(|e| format!("JSON parse error: {}", e))?;
// Convert JSON to HashMap for Rhai
let mut result = HashMap::new();
if let Some(id) = json.get("id").and_then(|v| v.as_str()) {
result.insert("id".to_string(), id.to_string());
}
if let Some(status) = json.get("status").and_then(|v| v.as_str()) {
result.insert("status".to_string(), status.to_string());
}
Ok(result)
})
}
// Dispatch new Rhai script based on event
fn dispatch_flow(
flow_scripts: &HashMap<String, String>,
event_name: &str,
response_data: HashMap<String, String>,
context: HashMap<String, String>
) {
if let Some(script_path) = flow_scripts.get(event_name) {
println!("🎯 Dispatching flow: {} -> {}", event_name, script_path);
// Create new engine instance for this flow
let mut engine = Engine::new();
register_payment_rhai_module(&mut engine);
// Create scope with response data and context
let mut scope = Scope::new();
// Add response data
for (key, value) in response_data {
scope.push(key, value);
}
// Add context data
for (key, value) in context {
scope.push(format!("context_{}", key), value);
}
// Execute flow script
if let Ok(script_content) = std::fs::read_to_string(script_path) {
match engine.eval_with_scope::<()>(&mut scope, &script_content) {
Ok(_) => println!("✅ Flow {} completed successfully", event_name),
Err(e) => println!("❌ Flow {} failed: {}", event_name, e),
}
} else {
println!("❌ Flow script not found: {}", script_path);
}
} else {
println!("⚠️ No flow defined for event: {}", event_name);
}
}
}
```
### 2. Simple Rhai Functions
```rust
#[export_module]
mod rhai_flow_module {
use super::*;
// Global flow manager instance
static FLOW_MANAGER: std::sync::OnceLock<FlowManager> = std::sync::OnceLock::new();
#[rhai_fn(name = "init_flows")]
pub fn init_flows() {
FLOW_MANAGER.set(FlowManager::new()).ok();
println!("✅ Flow manager initialized");
}
#[rhai_fn(name = "create_payment_intent")]
pub fn create_payment_intent(
amount: i64,
currency: String,
customer: String
) {
let manager = FLOW_MANAGER.get().expect("Flow manager not initialized");
let mut data = HashMap::new();
data.insert("amount".to_string(), amount.to_string());
data.insert("currency".to_string(), currency);
data.insert("customer".to_string(), customer.clone());
let mut context = HashMap::new();
context.insert("customer_id".to_string(), customer);
context.insert("original_amount".to_string(), amount.to_string());
manager.fire_and_continue(
"payment_intents".to_string(),
"POST".to_string(),
data,
"payment_intent_created".to_string(),
"payment_intent_failed".to_string(),
context
);
println!("🚀 Payment intent creation started");
// Returns immediately!
}
#[rhai_fn(name = "create_product")]
pub fn create_product(name: String, description: String) {
let manager = FLOW_MANAGER.get().expect("Flow manager not initialized");
let mut data = HashMap::new();
data.insert("name".to_string(), name.clone());
data.insert("description".to_string(), description);
let mut context = HashMap::new();
context.insert("product_name".to_string(), name);
manager.fire_and_continue(
"products".to_string(),
"POST".to_string(),
data,
"product_created".to_string(),
"product_failed".to_string(),
context
);
println!("🚀 Product creation started");
}
#[rhai_fn(name = "create_subscription")]
pub fn create_subscription(customer: String, price_id: String) {
let manager = FLOW_MANAGER.get().expect("Flow manager not initialized");
let mut data = HashMap::new();
data.insert("customer".to_string(), customer.clone());
data.insert("items[0][price]".to_string(), price_id.clone());
let mut context = HashMap::new();
context.insert("customer_id".to_string(), customer);
context.insert("price_id".to_string(), price_id);
manager.fire_and_continue(
"subscriptions".to_string(),
"POST".to_string(),
data,
"subscription_created".to_string(),
"subscription_failed".to_string(),
context
);
println!("🚀 Subscription creation started");
}
}
```
## Usage Examples
### 1. Main Script (Initiator)
```rhai
// main.rhai
init_flows();
print("Starting payment flow...");
// This returns immediately, spawns HTTP request
create_payment_intent(2000, "usd", "cus_customer123");
print("Payment intent request sent, continuing...");
// Script ends here, but flow continues in background
```
### 2. Success Flow Script
```rhai
// flows/payment_intent_created.rhai
print("🎉 Payment intent created successfully!");
print(`Payment Intent ID: ${id}`);
print(`Status: ${status}`);
print(`Customer: ${context_customer_id}`);
print(`Amount: ${context_original_amount}`);
// Continue the flow - create subscription
if status == "requires_payment_method" {
print("Creating subscription for customer...");
create_subscription(context_customer_id, "price_monthly_plan");
}
```
### 3. Error Flow Script
```rhai
// flows/payment_intent_failed.rhai
print("❌ Payment intent creation failed");
print(`Error: ${error}`);
print(`Customer: ${context_customer_id}`);
// Handle error - maybe retry or notify
print("Sending notification to customer...");
// Could trigger email notification flow here
```
### 4. Subscription Success Flow
```rhai
// flows/subscription_created.rhai
print("🎉 Subscription created!");
print(`Subscription ID: ${id}`);
print(`Customer: ${context_customer_id}`);
print(`Price: ${context_price_id}`);
// Final step - send welcome email
print("Sending welcome email...");
// Could trigger email flow here
```
## Flow Configuration
### 1. Flow Mapping
```rust
// Define in FlowManager::new()
flow_scripts.insert("payment_intent_created".to_string(), "flows/payment_intent_created.rhai".to_string());
flow_scripts.insert("payment_intent_failed".to_string(), "flows/payment_intent_failed.rhai".to_string());
flow_scripts.insert("product_created".to_string(), "flows/product_created.rhai".to_string());
flow_scripts.insert("subscription_created".to_string(), "flows/subscription_created.rhai".to_string());
```
### 2. Directory Structure
```
project/
├── main.rhai # Main script
├── flows/
│ ├── payment_intent_created.rhai # Success flow
│ ├── payment_intent_failed.rhai # Error flow
│ ├── product_created.rhai # Product success
│ ├── subscription_created.rhai # Subscription success
│ └── email_notification.rhai # Email flow
└── src/
└── flow_manager.rs # Flow manager code
```
## Execution Flow
```mermaid
sequenceDiagram
participant MS as Main Script
participant FM as FlowManager
participant TH as Spawned Thread
participant API as Stripe API
participant FS as Flow Script
MS->>FM: create_payment_intent()
FM->>TH: spawn thread
FM->>MS: return immediately
Note over MS: Script ends
TH->>API: HTTP POST /payment_intents
API->>TH: 200 OK + payment_intent data
TH->>FS: dispatch payment_intent_created.rhai
Note over FS: New Rhai execution
FS->>FM: create_subscription()
FM->>TH: spawn new thread
TH->>API: HTTP POST /subscriptions
API->>TH: 200 OK + subscription data
TH->>FS: dispatch subscription_created.rhai
```
## Benefits
### 1. **Simplicity**
- No global state management
- No complex polling or callbacks
- Each flow is a simple Rhai script
### 2. **Single-Threaded Rhai**
- Main Rhai engine never blocks
- Each flow script runs in its own engine instance
- No concurrency issues in Rhai code
### 3. **Event-Driven**
- Clear separation of concerns
- Easy to add new flows
- Composable flow chains
### 4. **No Blocking**
- HTTP requests happen in background threads
- Main script continues immediately
- Flows trigger based on responses
## Advanced Features
### 1. Flow Chaining
```rhai
// flows/payment_intent_created.rhai
if status == "requires_payment_method" {
// Chain to next flow
create_subscription(context_customer_id, "price_monthly");
}
```
### 2. Conditional Flows
```rhai
// flows/subscription_created.rhai
if context_customer_type == "enterprise" {
// Enterprise-specific flow
create_enterprise_setup(context_customer_id);
} else {
// Standard flow
send_welcome_email(context_customer_id);
}
```
### 3. Error Recovery
```rhai
// flows/payment_intent_failed.rhai
if error.contains("insufficient_funds") {
// Retry with smaller amount
let retry_amount = context_original_amount / 2;
create_payment_intent(retry_amount, "usd", context_customer_id);
} else {
// Send error notification
send_error_notification(context_customer_id, error);
}
```
This architecture is much simpler, has no global state, and provides clean event-driven flows that are easy to understand and maintain.

View File

@@ -0,0 +1,593 @@
# Event-Driven Flow Implementation Specification
## Overview
This document provides the complete implementation specification for converting the blocking payment.rs architecture to an event-driven flow system using RhaiDispatcher.
## File Structure
```
src/dsl/src/
├── flow_manager.rs # New: FlowManager implementation
├── payment.rs # Modified: Non-blocking payment functions
└── lib.rs # Modified: Include flow_manager module
```
## 1. FlowManager Implementation
### File: `src/dsl/src/flow_manager.rs`
```rust
use rhai_dispatcher::{RhaiDispatcher, RhaiDispatcherBuilder, RhaiDispatcherError};
use std::sync::{Arc, Mutex};
use std::collections::HashMap;
use serde_json;
use tokio::runtime::Runtime;
#[derive(Debug)]
pub enum FlowError {
DispatcherError(RhaiDispatcherError),
ConfigurationError(String),
SerializationError(serde_json::Error),
}
impl From<RhaiDispatcherError> for FlowError {
fn from(err: RhaiDispatcherError) -> Self {
FlowError::DispatcherError(err)
}
}
impl From<serde_json::Error> for FlowError {
fn from(err: serde_json::Error) -> Self {
FlowError::SerializationError(err)
}
}
impl std::fmt::Display for FlowError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FlowError::DispatcherError(e) => write!(f, "Dispatcher error: {}", e),
FlowError::ConfigurationError(e) => write!(f, "Configuration error: {}", e),
FlowError::SerializationError(e) => write!(f, "Serialization error: {}", e),
}
}
}
impl std::error::Error for FlowError {}
#[derive(Clone)]
pub struct FlowManager {
dispatcher: RhaiDispatcher,
worker_id: String,
context_id: String,
}
impl FlowManager {
pub fn new(worker_id: String, context_id: String, redis_url: Option<String>) -> Result<Self, FlowError> {
let redis_url = redis_url.unwrap_or_else(|| "redis://127.0.0.1/".to_string());
let dispatcher = RhaiDispatcherBuilder::new()
.caller_id("stripe") // API responses come from Stripe
.worker_id(&worker_id)
.context_id(&context_id)
.redis_url(&redis_url)
.build()?;
Ok(Self {
dispatcher,
worker_id,
context_id,
})
}
pub async fn dispatch_response_script(&self, script_name: &str, data: &str) -> Result<(), FlowError> {
let script_content = format!(
r#"
// Auto-generated response script for {}
let response_data = `{}`;
let parsed_data = parse_json(response_data);
// Include the response script
eval_file("flows/{}.rhai");
"#,
script_name,
data.replace('`', r#"\`"#),
script_name
);
self.dispatcher
.new_play_request()
.worker_id(&self.worker_id)
.context_id(&self.context_id)
.script(&script_content)
.submit()
.await?;
Ok(())
}
pub async fn dispatch_error_script(&self, script_name: &str, error: &str) -> Result<(), FlowError> {
let script_content = format!(
r#"
// Auto-generated error script for {}
let error_data = `{}`;
let parsed_error = parse_json(error_data);
// Include the error script
eval_file("flows/{}.rhai");
"#,
script_name,
error.replace('`', r#"\`"#),
script_name
);
self.dispatcher
.new_play_request()
.worker_id(&self.worker_id)
.context_id(&self.context_id)
.script(&script_content)
.submit()
.await?;
Ok(())
}
}
// Global flow manager instance
static FLOW_MANAGER: Mutex<Option<FlowManager>> = Mutex::new(None);
pub fn initialize_flow_manager(worker_id: String, context_id: String, redis_url: Option<String>) -> Result<(), FlowError> {
let manager = FlowManager::new(worker_id, context_id, redis_url)?;
let mut global_manager = FLOW_MANAGER.lock().unwrap();
*global_manager = Some(manager);
Ok(())
}
pub fn get_flow_manager() -> Result<FlowManager, FlowError> {
let global_manager = FLOW_MANAGER.lock().unwrap();
global_manager.as_ref()
.ok_or_else(|| FlowError::ConfigurationError("Flow manager not initialized".to_string()))
.cloned()
}
// Async HTTP request function for Stripe API
pub async fn make_stripe_request(
config: &super::StripeConfig,
endpoint: &str,
form_data: &HashMap<String, String>
) -> Result<String, String> {
let url = format!("{}/{}", super::STRIPE_API_BASE, endpoint);
let response = config.client
.post(&url)
.basic_auth(&config.secret_key, None::<&str>)
.form(form_data)
.send()
.await
.map_err(|e| format!("HTTP request failed: {}", e))?;
let response_text = response.text().await
.map_err(|e| format!("Failed to read response: {}", e))?;
let json: serde_json::Value = serde_json::from_str(&response_text)
.map_err(|e| format!("Failed to parse JSON: {}", e))?;
if json.get("error").is_some() {
Err(response_text)
} else {
Ok(response_text)
}
}
```
## 2. Payment.rs Modifications
### Add Dependencies
Add to the top of `payment.rs`:
```rust
mod flow_manager;
use flow_manager::{get_flow_manager, initialize_flow_manager, make_stripe_request, FlowError};
use std::thread;
use tokio::runtime::Runtime;
```
### Add Flow Initialization Function
Add to the `rhai_payment_module`:
```rust
#[rhai_fn(name = "init_flows", return_raw)]
pub fn init_flows(worker_id: String, context_id: String) -> Result<String, Box<EvalAltResult>> {
initialize_flow_manager(worker_id, context_id, None)
.map_err(|e| format!("Failed to initialize flow manager: {:?}", e))?;
Ok("Flow manager initialized successfully".to_string())
}
#[rhai_fn(name = "init_flows_with_redis", return_raw)]
pub fn init_flows_with_redis(worker_id: String, context_id: String, redis_url: String) -> Result<String, Box<EvalAltResult>> {
initialize_flow_manager(worker_id, context_id, Some(redis_url))
.map_err(|e| format!("Failed to initialize flow manager: {:?}", e))?;
Ok("Flow manager initialized successfully".to_string())
}
```
### Helper Function for Stripe Config
Add helper function to get stripe config:
```rust
fn get_stripe_config() -> Result<StripeConfig, Box<EvalAltResult>> {
let registry = ASYNC_REGISTRY.lock().unwrap();
let registry = registry.as_ref().ok_or("Stripe not configured. Call configure_stripe() first.")?;
Ok(registry.stripe_config.clone())
}
```
### Convert Payment Intent Function
Replace the existing `create_payment_intent` function:
```rust
#[rhai_fn(name = "create", return_raw)]
pub fn create_payment_intent(intent: &mut RhaiPaymentIntent) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_payment_intent_data(intent);
// Get flow manager and stripe config
let flow_manager = get_flow_manager()
.map_err(|e| format!("Flow manager error: {:?}", e))?;
let stripe_config = get_stripe_config()?;
// Spawn background thread for HTTP request
thread::spawn(move || {
let rt = Runtime::new().expect("Failed to create runtime");
rt.block_on(async {
match make_stripe_request(&stripe_config, "payment_intents", &form_data).await {
Ok(response) => {
if let Err(e) = flow_manager.dispatch_response_script(
"new_create_payment_intent_response",
&response
).await {
eprintln!("Failed to dispatch response: {:?}", e);
}
}
Err(error) => {
if let Err(e) = flow_manager.dispatch_error_script(
"new_create_payment_intent_error",
&error
).await {
eprintln!("Failed to dispatch error: {:?}", e);
}
}
}
});
});
// Return immediately with confirmation
Ok("payment_intent_request_dispatched".to_string())
}
```
### Convert Product Function
Replace the existing `create_product` function:
```rust
#[rhai_fn(name = "create", return_raw)]
pub fn create_product(product: &mut RhaiProduct) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_product_data(product);
// Get flow manager and stripe config
let flow_manager = get_flow_manager()
.map_err(|e| format!("Flow manager error: {:?}", e))?;
let stripe_config = get_stripe_config()?;
// Spawn background thread for HTTP request
thread::spawn(move || {
let rt = Runtime::new().expect("Failed to create runtime");
rt.block_on(async {
match make_stripe_request(&stripe_config, "products", &form_data).await {
Ok(response) => {
if let Err(e) = flow_manager.dispatch_response_script(
"new_create_product_response",
&response
).await {
eprintln!("Failed to dispatch response: {:?}", e);
}
}
Err(error) => {
if let Err(e) = flow_manager.dispatch_error_script(
"new_create_product_error",
&error
).await {
eprintln!("Failed to dispatch error: {:?}", e);
}
}
}
});
});
// Return immediately with confirmation
Ok("product_request_dispatched".to_string())
}
```
### Convert Price Function
Replace the existing `create_price` function:
```rust
#[rhai_fn(name = "create", return_raw)]
pub fn create_price(price: &mut RhaiPrice) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_price_data(price);
// Get flow manager and stripe config
let flow_manager = get_flow_manager()
.map_err(|e| format!("Flow manager error: {:?}", e))?;
let stripe_config = get_stripe_config()?;
// Spawn background thread for HTTP request
thread::spawn(move || {
let rt = Runtime::new().expect("Failed to create runtime");
rt.block_on(async {
match make_stripe_request(&stripe_config, "prices", &form_data).await {
Ok(response) => {
if let Err(e) = flow_manager.dispatch_response_script(
"new_create_price_response",
&response
).await {
eprintln!("Failed to dispatch response: {:?}", e);
}
}
Err(error) => {
if let Err(e) = flow_manager.dispatch_error_script(
"new_create_price_error",
&error
).await {
eprintln!("Failed to dispatch error: {:?}", e);
}
}
}
});
});
// Return immediately with confirmation
Ok("price_request_dispatched".to_string())
}
```
### Convert Subscription Function
Replace the existing `create_subscription` function:
```rust
#[rhai_fn(name = "create", return_raw)]
pub fn create_subscription(subscription: &mut RhaiSubscription) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_subscription_data(subscription);
// Get flow manager and stripe config
let flow_manager = get_flow_manager()
.map_err(|e| format!("Flow manager error: {:?}", e))?;
let stripe_config = get_stripe_config()?;
// Spawn background thread for HTTP request
thread::spawn(move || {
let rt = Runtime::new().expect("Failed to create runtime");
rt.block_on(async {
match make_stripe_request(&stripe_config, "subscriptions", &form_data).await {
Ok(response) => {
if let Err(e) = flow_manager.dispatch_response_script(
"new_create_subscription_response",
&response
).await {
eprintln!("Failed to dispatch response: {:?}", e);
}
}
Err(error) => {
if let Err(e) = flow_manager.dispatch_error_script(
"new_create_subscription_error",
&error
).await {
eprintln!("Failed to dispatch error: {:?}", e);
}
}
}
});
});
// Return immediately with confirmation
Ok("subscription_request_dispatched".to_string())
}
```
### Convert Coupon Function
Replace the existing `create_coupon` function:
```rust
#[rhai_fn(name = "create", return_raw)]
pub fn create_coupon(coupon: &mut RhaiCoupon) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_coupon_data(coupon);
// Get flow manager and stripe config
let flow_manager = get_flow_manager()
.map_err(|e| format!("Flow manager error: {:?}", e))?;
let stripe_config = get_stripe_config()?;
// Spawn background thread for HTTP request
thread::spawn(move || {
let rt = Runtime::new().expect("Failed to create runtime");
rt.block_on(async {
match make_stripe_request(&stripe_config, "coupons", &form_data).await {
Ok(response) => {
if let Err(e) = flow_manager.dispatch_response_script(
"new_create_coupon_response",
&response
).await {
eprintln!("Failed to dispatch response: {:?}", e);
}
}
Err(error) => {
if let Err(e) = flow_manager.dispatch_error_script(
"new_create_coupon_error",
&error
).await {
eprintln!("Failed to dispatch error: {:?}", e);
}
}
}
});
});
// Return immediately with confirmation
Ok("coupon_request_dispatched".to_string())
}
```
## 3. Remove Old Blocking Code
### Remove from payment.rs:
1. **AsyncFunctionRegistry struct and implementation** - No longer needed
2. **ASYNC_REGISTRY static** - No longer needed
3. **AsyncRequest struct** - No longer needed
4. **async_worker_loop function** - No longer needed
5. **handle_stripe_request function** - Replaced by make_stripe_request in flow_manager
6. **make_request method** - No longer needed
### Keep in payment.rs:
1. **All struct definitions** (RhaiProduct, RhaiPrice, etc.)
2. **All builder methods** (name, amount, currency, etc.)
3. **All prepare_*_data functions**
4. **All getter functions**
5. **StripeConfig struct**
6. **configure_stripe function** (but remove AsyncFunctionRegistry creation)
## 4. Update Cargo.toml
Add to `src/dsl/Cargo.toml`:
```toml
[dependencies]
# ... existing dependencies ...
rhai_dispatcher = { path = "../dispatcher" }
```
## 5. Update lib.rs
Add to `src/dsl/src/lib.rs`:
```rust
pub mod flow_manager;
```
## 6. Flow Script Templates
Create directory structure:
```
flows/
├── new_create_payment_intent_response.rhai
├── new_create_payment_intent_error.rhai
├── new_create_product_response.rhai
├── new_create_product_error.rhai
├── new_create_price_response.rhai
├── new_create_price_error.rhai
├── new_create_subscription_response.rhai
├── new_create_subscription_error.rhai
├── new_create_coupon_response.rhai
└── new_create_coupon_error.rhai
```
### Example Flow Scripts
#### flows/new_create_payment_intent_response.rhai
```rhai
let payment_intent_id = parsed_data.id;
let status = parsed_data.status;
print(`✅ Payment Intent Created: ${payment_intent_id}`);
print(`Status: ${status}`);
// Continue the flow based on status
if status == "requires_payment_method" {
print("Payment method required - ready for frontend");
} else if status == "succeeded" {
print("Payment completed successfully!");
}
// Store the payment intent ID for later use
set_context("payment_intent_id", payment_intent_id);
set_context("payment_status", status);
```
#### flows/new_create_payment_intent_error.rhai
```rhai
let error_type = parsed_error.error.type;
let error_message = parsed_error.error.message;
print(`❌ Payment Intent Error: ${error_type}`);
print(`Message: ${error_message}`);
// Handle different error types
if error_type == "card_error" {
print("Card was declined - notify user");
} else if error_type == "rate_limit_error" {
print("Rate limited - retry later");
} else {
print("Unknown error - log for investigation");
}
// Store error details for debugging
set_context("last_error_type", error_type);
set_context("last_error_message", error_message);
```
## 7. Usage Example
### main.rhai
```rhai
// Initialize the flow system
init_flows("worker-1", "context-123");
// Configure Stripe
configure_stripe("sk_test_...");
// Create payment intent (non-blocking)
let payment_intent = new_payment_intent()
.amount(2000)
.currency("usd")
.customer("cus_customer123");
let result = payment_intent.create();
print(`Request dispatched: ${result}`);
// Script ends here, but flow continues in background
// Response will trigger new_create_payment_intent_response.rhai
```
## 8. Testing Strategy
1. **Unit Tests**: Test FlowManager initialization and script dispatch
2. **Integration Tests**: Test full payment flow with mock Stripe responses
3. **Load Tests**: Verify non-blocking behavior under concurrent requests
4. **Error Tests**: Verify error flow handling and script dispatch
## 9. Migration Checklist
- [ ] Create flow_manager.rs with FlowManager implementation
- [ ] Add flow_manager module to lib.rs
- [ ] Update Cargo.toml with rhai_dispatcher dependency
- [ ] Modify payment.rs to remove blocking code
- [ ] Add flow initialization functions
- [ ] Convert all create functions to non-blocking pattern
- [ ] Create flow script templates
- [ ] Test basic payment intent flow
- [ ] Test error handling flows
- [ ] Verify non-blocking behavior
- [ ] Update documentation
This specification provides a complete roadmap for implementing the event-driven flow architecture using RhaiDispatcher.

View File

@@ -0,0 +1,468 @@
# Non-Blocking Async Architecture Design
## Problem Statement
The current async architecture has a critical limitation: **slow API responses block the entire Rhai engine**, preventing other scripts from executing. When an API call takes 10 seconds, the Rhai engine is blocked for the full duration.
## Current Blocking Behavior
```rust
// This BLOCKS the Rhai execution thread!
response_receiver.recv_timeout(Duration::from_secs(30))
.map_err(|e| format!("Failed to receive response: {}", e))?
```
**Impact:**
- ✅ Async worker thread: NOT blocked (continues processing)
- ❌ Rhai engine thread: BLOCKED (cannot execute other scripts)
- ❌ Other Rhai scripts: QUEUED (must wait)
## Callback-Based Solution
### Architecture Overview
```mermaid
graph TB
subgraph "Rhai Engine Thread (Non-Blocking)"
RS1[Rhai Script 1]
RS2[Rhai Script 2]
RS3[Rhai Script 3]
RE[Rhai Engine]
end
subgraph "Request Registry"
PR[Pending Requests Map]
RID[Request IDs]
end
subgraph "Async Worker Thread"
AW[Async Worker]
HTTP[HTTP Client]
API[External APIs]
end
RS1 --> RE
RS2 --> RE
RS3 --> RE
RE --> PR
PR --> AW
AW --> HTTP
HTTP --> API
API --> HTTP
HTTP --> AW
AW --> PR
PR --> RE
```
### Core Data Structures
```rust
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use uuid::Uuid;
// Global registry for pending requests
static PENDING_REQUESTS: Mutex<HashMap<String, PendingRequest>> = Mutex::new(HashMap::new());
#[derive(Debug)]
pub struct PendingRequest {
pub id: String,
pub status: RequestStatus,
pub result: Option<Result<String, String>>,
pub created_at: std::time::Instant,
}
#[derive(Debug, Clone)]
pub enum RequestStatus {
Pending,
Completed,
Failed,
Timeout,
}
#[derive(Debug)]
pub struct AsyncRequest {
pub id: String, // Unique request ID
pub endpoint: String,
pub method: String,
pub data: HashMap<String, String>,
// No response channel - results stored in global registry
}
```
### Non-Blocking Request Function
```rust
impl AsyncFunctionRegistry {
// Non-blocking version - returns immediately
pub fn make_request_async(&self, endpoint: String, method: String, data: HashMap<String, String>) -> Result<String, String> {
let request_id = Uuid::new_v4().to_string();
// Store pending request
{
let mut pending = PENDING_REQUESTS.lock().unwrap();
pending.insert(request_id.clone(), PendingRequest {
id: request_id.clone(),
status: RequestStatus::Pending,
result: None,
created_at: std::time::Instant::now(),
});
}
let request = AsyncRequest {
id: request_id.clone(),
endpoint,
method,
data,
};
// Send to async worker (non-blocking)
self.request_sender.send(request)
.map_err(|_| "Failed to send request to async worker".to_string())?;
// Return request ID immediately - NO BLOCKING!
Ok(request_id)
}
// Check if request is complete
pub fn is_request_complete(&self, request_id: &str) -> bool {
let pending = PENDING_REQUESTS.lock().unwrap();
if let Some(request) = pending.get(request_id) {
matches!(request.status, RequestStatus::Completed | RequestStatus::Failed | RequestStatus::Timeout)
} else {
false
}
}
// Get request result (non-blocking)
pub fn get_request_result(&self, request_id: &str) -> Result<String, String> {
let mut pending = PENDING_REQUESTS.lock().unwrap();
if let Some(request) = pending.remove(request_id) {
match request.result {
Some(result) => result,
None => Err("Request not completed yet".to_string()),
}
} else {
Err("Request not found".to_string())
}
}
}
```
### Updated Async Worker
```rust
async fn async_worker_loop(config: StripeConfig, receiver: Receiver<AsyncRequest>) {
println!("🚀 Async worker thread started");
loop {
match receiver.recv_timeout(Duration::from_millis(100)) {
Ok(request) => {
let request_id = request.id.clone();
let result = Self::handle_stripe_request(&config, &request).await;
// Store result in global registry instead of sending through channel
{
let mut pending = PENDING_REQUESTS.lock().unwrap();
if let Some(pending_request) = pending.get_mut(&request_id) {
pending_request.result = Some(result.clone());
pending_request.status = match result {
Ok(_) => RequestStatus::Completed,
Err(_) => RequestStatus::Failed,
};
}
}
println!("✅ Request {} completed", request_id);
}
Err(std::sync::mpsc::RecvTimeoutError::Timeout) => continue,
Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => break,
}
}
}
```
### Rhai Function Registration
```rust
#[export_module]
mod rhai_payment_module {
// Async version - returns request ID immediately
#[rhai_fn(name = "create_async", return_raw)]
pub fn create_product_async(product: &mut RhaiProduct) -> Result<String, Box<EvalAltResult>> {
let registry = ASYNC_REGISTRY.lock().unwrap();
let registry = registry.as_ref().ok_or("Stripe not configured")?;
let form_data = prepare_product_data(product);
let request_id = registry.make_request_async("products".to_string(), "POST".to_string(), form_data)
.map_err(|e| e.to_string())?;
Ok(request_id)
}
// Check if async request is complete
#[rhai_fn(name = "is_complete", return_raw)]
pub fn is_request_complete(request_id: String) -> Result<bool, Box<EvalAltResult>> {
let registry = ASYNC_REGISTRY.lock().unwrap();
let registry = registry.as_ref().ok_or("Stripe not configured")?;
Ok(registry.is_request_complete(&request_id))
}
// Get result of async request
#[rhai_fn(name = "get_result", return_raw)]
pub fn get_request_result(request_id: String) -> Result<String, Box<EvalAltResult>> {
let registry = ASYNC_REGISTRY.lock().unwrap();
let registry = registry.as_ref().ok_or("Stripe not configured")?;
registry.get_request_result(&request_id)
.map_err(|e| e.to_string().into())
}
// Convenience function - wait for result with polling
#[rhai_fn(name = "await_result", return_raw)]
pub fn await_request_result(request_id: String, timeout_seconds: i64) -> Result<String, Box<EvalAltResult>> {
let registry = ASYNC_REGISTRY.lock().unwrap();
let registry = registry.as_ref().ok_or("Stripe not configured")?;
let start_time = std::time::Instant::now();
let timeout = Duration::from_secs(timeout_seconds as u64);
// Non-blocking polling loop
loop {
if registry.is_request_complete(&request_id) {
return registry.get_request_result(&request_id)
.map_err(|e| e.to_string().into());
}
if start_time.elapsed() > timeout {
return Err("Request timeout".to_string().into());
}
// Small delay to prevent busy waiting
std::thread::sleep(Duration::from_millis(50));
}
}
}
```
## Usage Patterns
### 1. Fire-and-Forget Pattern
```rhai
configure_stripe(STRIPE_API_KEY);
// Start multiple async operations immediately - NO BLOCKING!
let product_req = new_product()
.name("Product 1")
.create_async();
let price_req = new_price()
.amount(1000)
.create_async();
let coupon_req = new_coupon()
.percent_off(25)
.create_async();
print("All requests started, continuing with other work...");
// Do other work while APIs are processing
for i in 1..100 {
print(`Doing work: ${i}`);
}
// Check results when ready
if is_complete(product_req) {
let product_id = get_result(product_req);
print(`Product created: ${product_id}`);
}
```
### 2. Polling Pattern
```rhai
// Start async operation
let request_id = new_product()
.name("My Product")
.create_async();
print("Request started, polling for completion...");
// Poll until complete (non-blocking)
let max_attempts = 100;
let attempt = 0;
while attempt < max_attempts {
if is_complete(request_id) {
let result = get_result(request_id);
print(`Success: ${result}`);
break;
}
print(`Attempt ${attempt}: still waiting...`);
attempt += 1;
// Small delay between checks
sleep(100);
}
```
### 3. Await Pattern (Convenience)
```rhai
// Start async operation and wait for result
let request_id = new_product()
.name("My Product")
.create_async();
print("Request started, waiting for result...");
// This polls internally but doesn't block other scripts
try {
let product_id = await_result(request_id, 30); // 30 second timeout
print(`Product created: ${product_id}`);
} catch(error) {
print(`Failed: ${error}`);
}
```
### 4. Concurrent Operations
```rhai
// Start multiple operations concurrently
let requests = [];
for i in 1..5 {
let req = new_product()
.name(`Product ${i}`)
.create_async();
requests.push(req);
}
print("Started 5 concurrent product creations");
// Wait for all to complete
let results = [];
for req in requests {
let result = await_result(req, 30);
results.push(result);
print(`Product created: ${result}`);
}
print(`All ${results.len()} products created!`);
```
## Execution Flow Comparison
### Current Blocking Architecture
```mermaid
sequenceDiagram
participant R1 as Rhai Script 1
participant R2 as Rhai Script 2
participant RE as Rhai Engine
participant AR as AsyncRegistry
participant AW as Async Worker
R1->>RE: product.create()
RE->>AR: make_request()
AR->>AW: send request
Note over RE: 🚫 BLOCKED for up to 30 seconds
Note over R2: ⏳ Cannot execute - engine blocked
AW->>AR: response (after 10 seconds)
AR->>RE: unblock
RE->>R1: return result
R2->>RE: Now can execute
```
### New Non-Blocking Architecture
```mermaid
sequenceDiagram
participant R1 as Rhai Script 1
participant R2 as Rhai Script 2
participant RE as Rhai Engine
participant AR as AsyncRegistry
participant AW as Async Worker
R1->>RE: product.create_async()
RE->>AR: make_request_async()
AR->>AW: send request
AR->>RE: return request_id (immediate)
RE->>R1: return request_id
Note over R1: Script 1 continues...
R2->>RE: other_operation()
Note over RE: ✅ Engine available immediately
RE->>R2: result
AW->>AR: store result in registry
R1->>RE: is_complete(request_id)
RE->>R1: true
R1->>RE: get_result(request_id)
RE->>R1: product_id
```
## Benefits
### 1. **Complete Non-Blocking Execution**
- Rhai engine never blocks on API calls
- Multiple scripts can execute concurrently
- Better resource utilization
### 2. **Backward Compatibility**
```rhai
// Keep existing blocking API for simple cases
let product_id = new_product().name("Simple").create();
// Use async API for concurrent operations
let request_id = new_product().name("Async").create_async();
```
### 3. **Flexible Programming Patterns**
- **Fire-and-forget**: Start operation, check later
- **Polling**: Check periodically until complete
- **Await**: Convenience function with timeout
- **Concurrent**: Start multiple operations simultaneously
### 4. **Resource Management**
```rust
// Automatic cleanup of completed requests
impl AsyncFunctionRegistry {
pub fn cleanup_old_requests(&self) {
let mut pending = PENDING_REQUESTS.lock().unwrap();
let now = std::time::Instant::now();
pending.retain(|_, request| {
// Remove requests older than 5 minutes
now.duration_since(request.created_at) < Duration::from_secs(300)
});
}
}
```
## Performance Comparison
| Architecture | Blocking Behavior | Concurrent Scripts | API Latency Impact |
|-------------|------------------|-------------------|-------------------|
| **Current** | ❌ Blocks engine | ❌ Sequential only | ❌ Blocks all execution |
| **Callback** | ✅ Non-blocking | ✅ Unlimited concurrent | ✅ No impact on other scripts |
## Implementation Strategy
### Phase 1: Add Async Functions
- Implement callback-based functions alongside existing ones
- Add `create_async()`, `is_complete()`, `get_result()`, `await_result()`
- Maintain backward compatibility
### Phase 2: Enhanced Features
- Add batch operations for multiple concurrent requests
- Implement request prioritization
- Add metrics and monitoring
### Phase 3: Migration Path
- Provide migration guide for existing scripts
- Consider deprecating blocking functions in favor of async ones
- Add performance benchmarks
## Conclusion
The callback-based solution completely eliminates the blocking problem while maintaining a clean, intuitive API for Rhai scripts. This enables true concurrent execution of multiple scripts with external API integration, dramatically improving the system's scalability and responsiveness.
The key innovation is replacing synchronous blocking calls with an asynchronous request/response pattern that stores results in a shared registry, allowing the Rhai engine to remain responsive while API operations complete in the background.

View File

@@ -0,0 +1,376 @@
# Simple Non-Blocking Architecture (No Globals, No Locking)
## Core Principle
**Single-threaded Rhai engine with fire-and-forget HTTP requests that dispatch response scripts**
## Architecture Flow
```mermaid
graph TD
A[Rhai: create_payment_intent] --> B[Function: create_payment_intent]
B --> C[Spawn Thread]
B --> D[Return Immediately]
C --> E[HTTP Request to Stripe]
E --> F{Response}
F -->|Success| G[Dispatch: new_create_payment_intent_response.rhai]
F -->|Error| H[Dispatch: new_create_payment_intent_error.rhai]
G --> I[New Rhai Script Execution]
H --> J[New Rhai Script Execution]
```
## Key Design Principles
1. **No Global State** - All configuration passed as parameters
2. **No Locking** - No shared state between threads
3. **Fire-and-Forget** - Functions return immediately
4. **Self-Contained Threads** - Each thread has everything it needs
5. **Script Dispatch** - Responses trigger new Rhai script execution
## Implementation
### 1. Simple Function Signature
```rust
#[rhai_fn(name = "create", return_raw)]
pub fn create_payment_intent(
intent: &mut RhaiPaymentIntent,
worker_id: String,
context_id: String,
stripe_secret: String
) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_payment_intent_data(intent);
// Spawn completely independent thread
thread::spawn(move || {
let rt = Runtime::new().expect("Failed to create runtime");
rt.block_on(async {
// Create HTTP client in thread
let client = Client::new();
// Make HTTP request
match make_stripe_request(&client, &stripe_secret, "payment_intents", &form_data).await {
Ok(response) => {
dispatch_response_script(
&worker_id,
&context_id,
"new_create_payment_intent_response",
&response
).await;
}
Err(error) => {
dispatch_error_script(
&worker_id,
&context_id,
"new_create_payment_intent_error",
&error
).await;
}
}
});
});
// Return immediately - no waiting!
Ok("payment_intent_request_dispatched".to_string())
}
```
### 2. Self-Contained HTTP Function
```rust
async fn make_stripe_request(
client: &Client,
secret_key: &str,
endpoint: &str,
form_data: &HashMap<String, String>
) -> Result<String, String> {
let url = format!("https://api.stripe.com/v1/{}", endpoint);
let response = client
.post(&url)
.basic_auth(secret_key, None::<&str>)
.form(form_data)
.send()
.await
.map_err(|e| format!("HTTP request failed: {}", e))?;
let response_text = response.text().await
.map_err(|e| format!("Failed to read response: {}", e))?;
// Return raw response - let script handle parsing
Ok(response_text)
}
```
### 3. Simple Script Dispatch
```rust
async fn dispatch_response_script(
worker_id: &str,
context_id: &str,
script_name: &str,
response_data: &str
) {
let script_content = format!(
r#"
// Response data from API
let response_json = `{}`;
let parsed_data = parse_json(response_json);
// Execute the response script
eval_file("flows/{}.rhai");
"#,
response_data.replace('`', r#"\`"#),
script_name
);
// Create dispatcher instance just for this dispatch
if let Ok(dispatcher) = RhaiDispatcherBuilder::new()
.caller_id("stripe")
.worker_id(worker_id)
.context_id(context_id)
.redis_url("redis://127.0.0.1/")
.build()
{
let _ = dispatcher
.new_play_request()
.script(&script_content)
.submit()
.await;
}
}
async fn dispatch_error_script(
worker_id: &str,
context_id: &str,
script_name: &str,
error_data: &str
) {
let script_content = format!(
r#"
// Error data from API
let error_json = `{}`;
let parsed_error = parse_json(error_json);
// Execute the error script
eval_file("flows/{}.rhai");
"#,
error_data.replace('`', r#"\`"#),
script_name
);
// Create dispatcher instance just for this dispatch
if let Ok(dispatcher) = RhaiDispatcherBuilder::new()
.caller_id("stripe")
.worker_id(worker_id)
.context_id(context_id)
.redis_url("redis://127.0.0.1/")
.build()
{
let _ = dispatcher
.new_play_request()
.script(&script_content)
.submit()
.await;
}
}
```
## Complete Function Implementations
### Payment Intent
```rust
#[rhai_fn(name = "create_async", return_raw)]
pub fn create_payment_intent_async(
intent: &mut RhaiPaymentIntent,
worker_id: String,
context_id: String,
stripe_secret: String
) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_payment_intent_data(intent);
thread::spawn(move || {
let rt = Runtime::new().expect("Failed to create runtime");
rt.block_on(async {
let client = Client::new();
match make_stripe_request(&client, &stripe_secret, "payment_intents", &form_data).await {
Ok(response) => {
dispatch_response_script(&worker_id, &context_id, "new_create_payment_intent_response", &response).await;
}
Err(error) => {
dispatch_error_script(&worker_id, &context_id, "new_create_payment_intent_error", &error).await;
}
}
});
});
Ok("payment_intent_request_dispatched".to_string())
}
```
### Product
```rust
#[rhai_fn(name = "create_async", return_raw)]
pub fn create_product_async(
product: &mut RhaiProduct,
worker_id: String,
context_id: String,
stripe_secret: String
) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_product_data(product);
thread::spawn(move || {
let rt = Runtime::new().expect("Failed to create runtime");
rt.block_on(async {
let client = Client::new();
match make_stripe_request(&client, &stripe_secret, "products", &form_data).await {
Ok(response) => {
dispatch_response_script(&worker_id, &context_id, "new_create_product_response", &response).await;
}
Err(error) => {
dispatch_error_script(&worker_id, &context_id, "new_create_product_error", &error).await;
}
}
});
});
Ok("product_request_dispatched".to_string())
}
```
### Price
```rust
#[rhai_fn(name = "create_async", return_raw)]
pub fn create_price_async(
price: &mut RhaiPrice,
worker_id: String,
context_id: String,
stripe_secret: String
) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_price_data(price);
thread::spawn(move || {
let rt = Runtime::new().expect("Failed to create runtime");
rt.block_on(async {
let client = Client::new();
match make_stripe_request(&client, &stripe_secret, "prices", &form_data).await {
Ok(response) => {
dispatch_response_script(&worker_id, &context_id, "new_create_price_response", &response).await;
}
Err(error) => {
dispatch_error_script(&worker_id, &context_id, "new_create_price_error", &error).await;
}
}
});
});
Ok("price_request_dispatched".to_string())
}
```
### Subscription
```rust
#[rhai_fn(name = "create_async", return_raw)]
pub fn create_subscription_async(
subscription: &mut RhaiSubscription,
worker_id: String,
context_id: String,
stripe_secret: String
) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_subscription_data(subscription);
thread::spawn(move || {
let rt = Runtime::new().expect("Failed to create runtime");
rt.block_on(async {
let client = Client::new();
match make_stripe_request(&client, &stripe_secret, "subscriptions", &form_data).await {
Ok(response) => {
dispatch_response_script(&worker_id, &context_id, "new_create_subscription_response", &response).await;
}
Err(error) => {
dispatch_error_script(&worker_id, &context_id, "new_create_subscription_error", &error).await;
}
}
});
});
Ok("subscription_request_dispatched".to_string())
}
```
## Usage Example
### main.rhai
```rhai
// No initialization needed - no global state!
let payment_intent = new_payment_intent()
.amount(2000)
.currency("usd")
.customer("cus_customer123");
// Pass all required parameters - no globals!
let result = payment_intent.create_async(
"worker-1", // worker_id
"context-123", // context_id
"sk_test_..." // stripe_secret
);
print(`Request dispatched: ${result}`);
// Script ends immediately, HTTP happens in background
// Response will trigger new_create_payment_intent_response.rhai
```
### flows/new_create_payment_intent_response.rhai
```rhai
let payment_intent_id = parsed_data.id;
let status = parsed_data.status;
print(`✅ Payment Intent Created: ${payment_intent_id}`);
print(`Status: ${status}`);
// Continue flow if needed
if status == "requires_payment_method" {
print("Ready for frontend payment collection");
}
```
### flows/new_create_payment_intent_error.rhai
```rhai
let error_type = parsed_error.error.type;
let error_message = parsed_error.error.message;
print(`❌ Payment Intent Failed: ${error_type}`);
print(`Message: ${error_message}`);
// Handle error appropriately
if error_type == "card_error" {
print("Card was declined");
}
```
## Benefits of This Architecture
1. **Zero Global State** - Everything is passed as parameters
2. **Zero Locking** - No shared state to lock
3. **True Non-Blocking** - Functions return immediately
4. **Thread Independence** - Each thread is completely self-contained
5. **Simple Testing** - Easy to test individual functions
6. **Clear Data Flow** - Parameters make dependencies explicit
7. **No Memory Leaks** - No persistent global state
8. **Horizontal Scaling** - No shared state to synchronize
## Migration from Current Code
1. **Remove all global state** (ASYNC_REGISTRY, etc.)
2. **Remove all Mutex/locking code**
3. **Add parameters to function signatures**
4. **Create dispatcher instances in threads**
5. **Update Rhai scripts to pass parameters**
This architecture is much simpler, has no global state, no locking, and provides true non-blocking behavior while maintaining the event-driven flow pattern you want.

View File

@@ -0,0 +1,73 @@
# Task Lifecycle Verification
## Test: Spawned Task Continues After Function Returns
```rust
use tokio::time::{sleep, Duration};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
#[tokio::test]
async fn test_spawned_task_continues() {
let completed = Arc::new(AtomicBool::new(false));
let completed_clone = completed.clone();
// Function that spawns a task and returns immediately
fn spawn_long_task(flag: Arc<AtomicBool>) -> String {
tokio::spawn(async move {
// Simulate HTTP request (2 seconds)
sleep(Duration::from_secs(2)).await;
// Mark as completed
flag.store(true, Ordering::SeqCst);
println!("Background task completed!");
});
// Return immediately
"task_spawned".to_string()
}
// Call the function
let result = spawn_long_task(completed_clone);
assert_eq!(result, "task_spawned");
// Function returned, but task should still be running
assert_eq!(completed.load(Ordering::SeqCst), false);
// Wait for background task to complete
sleep(Duration::from_secs(3)).await;
// Verify task completed successfully
assert_eq!(completed.load(Ordering::SeqCst), true);
}
```
## Test Results
**Function returns immediately** (microseconds)
**Spawned task continues running** (2+ seconds)
**Task completes successfully** after function has returned
**No blocking or hanging**
## Real-World Behavior
```rust
// Rhai calls this function
let result = payment_intent.create_async("worker-1", "context-123", "sk_test_...");
// result = "payment_intent_request_dispatched" (returned in ~1ms)
// Meanwhile, in the background (2-5 seconds later):
// 1. HTTP request to Stripe API
// 2. Response received
// 3. New Rhai script dispatched: "flows/new_create_payment_intent_response.rhai"
```
## Key Guarantees
1. **Non-blocking**: Rhai function returns immediately
2. **Fire-and-forget**: HTTP request continues in background
3. **Event-driven**: Response triggers new script execution
4. **No memory leaks**: Task is self-contained with moved ownership
5. **Runtime managed**: tokio handles task scheduling and cleanup
The spawned task is completely independent and will run to completion regardless of what happens to the function that created it.

View File

@@ -0,0 +1,369 @@
# True Non-Blocking Implementation (No rt.block_on)
## Problem with Previous Approach
The issue was using `rt.block_on()` which blocks the spawned thread:
```rust
// THIS BLOCKS THE THREAD:
thread::spawn(move || {
let rt = Runtime::new().expect("Failed to create runtime");
rt.block_on(async { // <-- This blocks!
// async code here
});
});
```
## Solution: Use tokio::spawn Instead
Use `tokio::spawn` to run async code without blocking:
```rust
// THIS DOESN'T BLOCK:
tokio::spawn(async move {
// async code runs in tokio's thread pool
let client = Client::new();
match make_stripe_request(&client, &stripe_secret, "payment_intents", &form_data).await {
Ok(response) => {
dispatch_response_script(&worker_id, &context_id, "new_create_payment_intent_response", &response).await;
}
Err(error) => {
dispatch_error_script(&worker_id, &context_id, "new_create_payment_intent_error", &error).await;
}
}
});
```
## Complete Corrected Implementation
### Payment Intent Function (Corrected)
```rust
#[rhai_fn(name = "create_async", return_raw)]
pub fn create_payment_intent_async(
intent: &mut RhaiPaymentIntent,
worker_id: String,
context_id: String,
stripe_secret: String
) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_payment_intent_data(intent);
// Use tokio::spawn instead of thread::spawn + rt.block_on
tokio::spawn(async move {
let client = Client::new();
match make_stripe_request(&client, &stripe_secret, "payment_intents", &form_data).await {
Ok(response) => {
dispatch_response_script(
&worker_id,
&context_id,
"new_create_payment_intent_response",
&response
).await;
}
Err(error) => {
dispatch_error_script(
&worker_id,
&context_id,
"new_create_payment_intent_error",
&error
).await;
}
}
});
// Returns immediately - no blocking!
Ok("payment_intent_request_dispatched".to_string())
}
```
### Product Function (Corrected)
```rust
#[rhai_fn(name = "create_async", return_raw)]
pub fn create_product_async(
product: &mut RhaiProduct,
worker_id: String,
context_id: String,
stripe_secret: String
) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_product_data(product);
tokio::spawn(async move {
let client = Client::new();
match make_stripe_request(&client, &stripe_secret, "products", &form_data).await {
Ok(response) => {
dispatch_response_script(
&worker_id,
&context_id,
"new_create_product_response",
&response
).await;
}
Err(error) => {
dispatch_error_script(
&worker_id,
&context_id,
"new_create_product_error",
&error
).await;
}
}
});
Ok("product_request_dispatched".to_string())
}
```
### Price Function (Corrected)
```rust
#[rhai_fn(name = "create_async", return_raw)]
pub fn create_price_async(
price: &mut RhaiPrice,
worker_id: String,
context_id: String,
stripe_secret: String
) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_price_data(price);
tokio::spawn(async move {
let client = Client::new();
match make_stripe_request(&client, &stripe_secret, "prices", &form_data).await {
Ok(response) => {
dispatch_response_script(
&worker_id,
&context_id,
"new_create_price_response",
&response
).await;
}
Err(error) => {
dispatch_error_script(
&worker_id,
&context_id,
"new_create_price_error",
&error
).await;
}
}
});
Ok("price_request_dispatched".to_string())
}
```
### Subscription Function (Corrected)
```rust
#[rhai_fn(name = "create_async", return_raw)]
pub fn create_subscription_async(
subscription: &mut RhaiSubscription,
worker_id: String,
context_id: String,
stripe_secret: String
) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_subscription_data(subscription);
tokio::spawn(async move {
let client = Client::new();
match make_stripe_request(&client, &stripe_secret, "subscriptions", &form_data).await {
Ok(response) => {
dispatch_response_script(
&worker_id,
&context_id,
"new_create_subscription_response",
&response
).await;
}
Err(error) => {
dispatch_error_script(
&worker_id,
&context_id,
"new_create_subscription_error",
&error
).await;
}
}
});
Ok("subscription_request_dispatched".to_string())
}
```
### Coupon Function (Corrected)
```rust
#[rhai_fn(name = "create_async", return_raw)]
pub fn create_coupon_async(
coupon: &mut RhaiCoupon,
worker_id: String,
context_id: String,
stripe_secret: String
) -> Result<String, Box<EvalAltResult>> {
let form_data = prepare_coupon_data(coupon);
tokio::spawn(async move {
let client = Client::new();
match make_stripe_request(&client, &stripe_secret, "coupons", &form_data).await {
Ok(response) => {
dispatch_response_script(
&worker_id,
&context_id,
"new_create_coupon_response",
&response
).await;
}
Err(error) => {
dispatch_error_script(
&worker_id,
&context_id,
"new_create_coupon_error",
&error
).await;
}
}
});
Ok("coupon_request_dispatched".to_string())
}
```
## Helper Functions (Same as Before)
```rust
async fn make_stripe_request(
client: &Client,
secret_key: &str,
endpoint: &str,
form_data: &HashMap<String, String>
) -> Result<String, String> {
let url = format!("https://api.stripe.com/v1/{}", endpoint);
let response = client
.post(&url)
.basic_auth(secret_key, None::<&str>)
.form(form_data)
.send()
.await
.map_err(|e| format!("HTTP request failed: {}", e))?;
let response_text = response.text().await
.map_err(|e| format!("Failed to read response: {}", e))?;
Ok(response_text)
}
async fn dispatch_response_script(
worker_id: &str,
context_id: &str,
script_name: &str,
response_data: &str
) {
let script_content = format!(
r#"
let response_json = `{}`;
let parsed_data = parse_json(response_json);
eval_file("flows/{}.rhai");
"#,
response_data.replace('`', r#"\`"#),
script_name
);
if let Ok(dispatcher) = RhaiDispatcherBuilder::new()
.caller_id("stripe")
.worker_id(worker_id)
.context_id(context_id)
.redis_url("redis://127.0.0.1/")
.build()
{
let _ = dispatcher
.new_play_request()
.script(&script_content)
.submit()
.await;
}
}
async fn dispatch_error_script(
worker_id: &str,
context_id: &str,
script_name: &str,
error_data: &str
) {
let script_content = format!(
r#"
let error_json = `{}`;
let parsed_error = parse_json(error_json);
eval_file("flows/{}.rhai");
"#,
error_data.replace('`', r#"\`"#),
script_name
);
if let Ok(dispatcher) = RhaiDispatcherBuilder::new()
.caller_id("stripe")
.worker_id(worker_id)
.context_id(context_id)
.redis_url("redis://127.0.0.1/")
.build()
{
let _ = dispatcher
.new_play_request()
.script(&script_content)
.submit()
.await;
}
}
```
## Key Differences
### Before (Blocking):
```rust
thread::spawn(move || {
let rt = Runtime::new().expect("Failed to create runtime");
rt.block_on(async { // <-- BLOCKS THE THREAD
// async code
});
});
```
### After (Non-Blocking):
```rust
tokio::spawn(async move { // <-- DOESN'T BLOCK
// async code runs in tokio's thread pool
});
```
## Benefits of tokio::spawn
1. **No Blocking** - Uses tokio's async runtime, doesn't block
2. **Efficient** - Reuses existing tokio thread pool
3. **Lightweight** - No need to create new runtime per request
4. **Scalable** - Can handle many concurrent requests
5. **Simple** - Less code, cleaner implementation
## Usage (Same as Before)
```rhai
let payment_intent = new_payment_intent()
.amount(2000)
.currency("usd")
.customer("cus_customer123");
// This returns immediately, HTTP happens asynchronously
let result = payment_intent.create_async(
"worker-1",
"context-123",
"sk_test_..."
);
print(`Request dispatched: ${result}`);
// Script ends, but HTTP continues in background
```
## Requirements
Make sure your application is running in a tokio runtime context. If not, you might need to ensure the Rhai engine is running within a tokio runtime.
This implementation provides true non-blocking behavior - the Rhai function returns immediately while the HTTP request and script dispatch happen asynchronously in the background.