feat: Add process package to monorepo
Some checks are pending
Rhai Tests / Run Rhai Tests (push) Waiting to run
Some checks are pending
Rhai Tests / Run Rhai Tests (push) Waiting to run
- Add `sal-process` package for cross-platform process management. - Update workspace members in `Cargo.toml`. - Mark process package as complete in MONOREPO_CONVERSION_PLAN.md - Remove license information from `mycelium` and `os` READMEs.
This commit is contained in:
326
process/tests/rhai/04_real_world_scenarios.rhai
Normal file
326
process/tests/rhai/04_real_world_scenarios.rhai
Normal file
@@ -0,0 +1,326 @@
|
||||
// Test script for real-world process scenarios
|
||||
|
||||
print("=== Real-World Process Scenarios Tests ===");
|
||||
|
||||
// Test 1: System information gathering
|
||||
print("\n--- Test 1: System Information Gathering ---");
|
||||
let system_info = #{};
|
||||
|
||||
// Get current user
|
||||
try {
|
||||
let whoami_result = run_command("whoami");
|
||||
if whoami_result.success {
|
||||
system_info.user = whoami_result.stdout.trim();
|
||||
print(`✓ Current user: ${system_info.user}`);
|
||||
}
|
||||
} catch(e) {
|
||||
print("⚠ whoami command not available");
|
||||
}
|
||||
|
||||
// Get current directory
|
||||
try {
|
||||
let pwd_result = run_command("pwd");
|
||||
if pwd_result.success {
|
||||
system_info.pwd = pwd_result.stdout.trim();
|
||||
print(`✓ Current directory: ${system_info.pwd}`);
|
||||
}
|
||||
} catch(e) {
|
||||
// Try Windows alternative
|
||||
try {
|
||||
let cd_result = run_command("cd");
|
||||
if cd_result.success {
|
||||
system_info.pwd = cd_result.stdout.trim();
|
||||
print(`✓ Current directory (Windows): ${system_info.pwd}`);
|
||||
}
|
||||
} catch(e2) {
|
||||
print("⚠ pwd/cd commands not available");
|
||||
}
|
||||
}
|
||||
|
||||
assert_true(system_info.len() > 0, "Should gather at least some system information");
|
||||
|
||||
// Test 2: File system operations
|
||||
print("\n--- Test 2: File System Operations ---");
|
||||
let temp_file = "/tmp/sal_process_test.txt";
|
||||
let temp_content = "SAL Process Test Content";
|
||||
|
||||
// Create a test file
|
||||
let create_script = `
|
||||
echo "${temp_content}" > ${temp_file}
|
||||
`;
|
||||
|
||||
try {
|
||||
let create_result = run_command(create_script);
|
||||
if create_result.success {
|
||||
print("✓ Test file created successfully");
|
||||
|
||||
// Read the file back
|
||||
let read_result = run_command(`cat ${temp_file}`);
|
||||
if read_result.success {
|
||||
assert_true(read_result.stdout.contains(temp_content), "File content should match");
|
||||
print("✓ Test file read successfully");
|
||||
}
|
||||
|
||||
// Clean up
|
||||
let cleanup_result = run_command(`rm -f ${temp_file}`);
|
||||
if cleanup_result.success {
|
||||
print("✓ Test file cleaned up successfully");
|
||||
}
|
||||
}
|
||||
} catch(e) {
|
||||
print("⚠ File system operations not available on this platform");
|
||||
}
|
||||
|
||||
// Test 3: Process monitoring workflow
|
||||
print("\n--- Test 3: Process Monitoring Workflow ---");
|
||||
let monitoring_workflow = || {
|
||||
// Get all processes
|
||||
let all_processes = process_list("");
|
||||
assert_true(all_processes.len() > 0, "Should find running processes");
|
||||
|
||||
// Find processes with common names
|
||||
let common_patterns = ["init", "kernel", "system", "explorer", "winlogon"];
|
||||
let found_patterns = [];
|
||||
|
||||
for pattern in common_patterns {
|
||||
let matches = process_list(pattern);
|
||||
if matches.len() > 0 {
|
||||
found_patterns.push(pattern);
|
||||
}
|
||||
}
|
||||
|
||||
print(`✓ Process monitoring found patterns: ${found_patterns}`);
|
||||
return found_patterns.len() > 0;
|
||||
};
|
||||
|
||||
assert_true(monitoring_workflow(), "Process monitoring workflow should succeed");
|
||||
|
||||
// Test 4: Command availability checking
|
||||
print("\n--- Test 4: Command Availability Checking ---");
|
||||
let essential_commands = ["echo"];
|
||||
let optional_commands = ["git", "curl", "wget", "python", "node", "java"];
|
||||
|
||||
let available_commands = [];
|
||||
let missing_commands = [];
|
||||
|
||||
// Check essential commands
|
||||
for cmd in essential_commands {
|
||||
let path = which(cmd);
|
||||
if path != () {
|
||||
available_commands.push(cmd);
|
||||
} else {
|
||||
missing_commands.push(cmd);
|
||||
}
|
||||
}
|
||||
|
||||
// Check optional commands
|
||||
for cmd in optional_commands {
|
||||
let path = which(cmd);
|
||||
if path != () {
|
||||
available_commands.push(cmd);
|
||||
}
|
||||
}
|
||||
|
||||
assert_true(missing_commands.len() == 0, "All essential commands should be available");
|
||||
print(`✓ Available commands: ${available_commands}`);
|
||||
print(`✓ Command availability check completed`);
|
||||
|
||||
// Test 5: Batch processing simulation
|
||||
print("\n--- Test 5: Batch Processing Simulation ---");
|
||||
let batch_commands = [
|
||||
"echo 'Processing item 1'",
|
||||
"echo 'Processing item 2'",
|
||||
"echo 'Processing item 3'"
|
||||
];
|
||||
|
||||
let batch_results = [];
|
||||
let batch_success = true;
|
||||
|
||||
for cmd in batch_commands {
|
||||
try {
|
||||
let result = run(cmd).silent().execute();
|
||||
batch_results.push(result);
|
||||
if !result.success {
|
||||
batch_success = false;
|
||||
}
|
||||
} catch(e) {
|
||||
batch_success = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert_true(batch_success, "Batch processing should succeed");
|
||||
assert_true(batch_results.len() == batch_commands.len(), "Should process all batch items");
|
||||
print(`✓ Batch processing completed: ${batch_results.len()} items`);
|
||||
|
||||
// Test 6: Environment variable handling
|
||||
print("\n--- Test 6: Environment Variable Handling ---");
|
||||
let env_test_script = `
|
||||
export TEST_VAR="test_value"
|
||||
echo "TEST_VAR=$TEST_VAR"
|
||||
`;
|
||||
|
||||
try {
|
||||
let env_result = run_command(env_test_script);
|
||||
if env_result.success {
|
||||
assert_true(env_result.stdout.contains("TEST_VAR=test_value"), "Environment variable should be set");
|
||||
print("✓ Environment variable handling works");
|
||||
}
|
||||
} catch(e) {
|
||||
print("⚠ Environment variable test not available");
|
||||
}
|
||||
|
||||
// Test 7: Pipeline simulation
|
||||
print("\n--- Test 7: Pipeline Simulation ---");
|
||||
let pipeline_script = `
|
||||
echo "line1
|
||||
line2
|
||||
line3" | grep "line2"
|
||||
`;
|
||||
|
||||
try {
|
||||
let pipeline_result = run_command(pipeline_script);
|
||||
if pipeline_result.success {
|
||||
assert_true(pipeline_result.stdout.contains("line2"), "Pipeline should filter correctly");
|
||||
print("✓ Pipeline simulation works");
|
||||
}
|
||||
} catch(e) {
|
||||
print("⚠ Pipeline simulation not available");
|
||||
}
|
||||
|
||||
// Test 8: Error recovery workflow
|
||||
print("\n--- Test 8: Error Recovery Workflow ---");
|
||||
let recovery_workflow = || {
|
||||
let primary_cmd = "nonexistent_primary_command";
|
||||
let fallback_cmd = "echo 'fallback executed'";
|
||||
|
||||
// Try primary command
|
||||
try {
|
||||
let primary_result = run_command(primary_cmd);
|
||||
return primary_result.success;
|
||||
} catch(e) {
|
||||
// Primary failed, try fallback
|
||||
try {
|
||||
let fallback_result = run_command(fallback_cmd);
|
||||
return fallback_result.success && fallback_result.stdout.contains("fallback executed");
|
||||
} catch(e2) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
assert_true(recovery_workflow(), "Error recovery workflow should succeed");
|
||||
print("✓ Error recovery workflow works");
|
||||
|
||||
// Test 9: Resource monitoring
|
||||
print("\n--- Test 9: Resource Monitoring ---");
|
||||
let resource_monitoring = || {
|
||||
let start_time = timestamp();
|
||||
|
||||
// Simulate resource-intensive operation
|
||||
let intensive_script = `
|
||||
for i in $(seq 1 10); do
|
||||
echo "Processing $i"
|
||||
done
|
||||
`;
|
||||
|
||||
try {
|
||||
let result = run(intensive_script).silent().execute();
|
||||
let end_time = timestamp();
|
||||
let duration = end_time - start_time;
|
||||
|
||||
print(`✓ Resource monitoring: operation took ${duration}ms`);
|
||||
return result.success && duration < 10000; // Should complete within 10 seconds
|
||||
} catch(e) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
assert_true(resource_monitoring(), "Resource monitoring should work");
|
||||
|
||||
// Test 10: Cross-platform compatibility
|
||||
print("\n--- Test 10: Cross-Platform Compatibility ---");
|
||||
let cross_platform_test = || {
|
||||
// Test basic commands that should work everywhere
|
||||
let basic_commands = ["echo hello"];
|
||||
|
||||
for cmd in basic_commands {
|
||||
try {
|
||||
let result = run_command(cmd);
|
||||
if !result.success {
|
||||
return false;
|
||||
}
|
||||
} catch(e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Test platform detection
|
||||
let windows_detected = which("cmd") != ();
|
||||
let unix_detected = which("sh") != ();
|
||||
|
||||
return windows_detected || unix_detected;
|
||||
};
|
||||
|
||||
assert_true(cross_platform_test(), "Cross-platform compatibility should work");
|
||||
print("✓ Cross-platform compatibility verified");
|
||||
|
||||
// Test 11: Complex workflow integration
|
||||
print("\n--- Test 11: Complex Workflow Integration ---");
|
||||
let complex_workflow = || {
|
||||
// Step 1: Check prerequisites
|
||||
let echo_available = which("echo") != ();
|
||||
if !echo_available {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Step 2: Execute main task
|
||||
let main_result = run("echo 'Complex workflow step'").silent().execute();
|
||||
if !main_result.success {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Step 3: Verify results
|
||||
let verify_result = run("echo 'Verification step'").silent().execute();
|
||||
if !verify_result.success {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Step 4: Cleanup (always succeeds)
|
||||
let cleanup_result = run("echo 'Cleanup step'").ignore_error().silent().execute();
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
assert_true(complex_workflow(), "Complex workflow integration should succeed");
|
||||
print("✓ Complex workflow integration works");
|
||||
|
||||
// Test 12: Performance under load
|
||||
print("\n--- Test 12: Performance Under Load ---");
|
||||
let performance_test = || {
|
||||
let start_time = timestamp();
|
||||
let iterations = 5;
|
||||
let success_count = 0;
|
||||
|
||||
for i in range(0, iterations) {
|
||||
try {
|
||||
let result = run(`echo "Iteration ${i}"`).silent().execute();
|
||||
if result.success {
|
||||
success_count += 1;
|
||||
}
|
||||
} catch(e) {
|
||||
// Continue with next iteration
|
||||
}
|
||||
}
|
||||
|
||||
let end_time = timestamp();
|
||||
let duration = end_time - start_time;
|
||||
let avg_time = duration / iterations;
|
||||
|
||||
print(`✓ Performance test: ${success_count}/${iterations} succeeded, avg ${avg_time}ms per operation`);
|
||||
return success_count == iterations && avg_time < 1000; // Each operation should be < 1 second
|
||||
};
|
||||
|
||||
assert_true(performance_test(), "Performance under load should be acceptable");
|
||||
|
||||
print("\n=== All Real-World Scenarios Tests Passed! ===");
|
Reference in New Issue
Block a user